text stringlengths 957 885k |
|---|
# Copyright (c) 2020 Cisco and/or its affiliates.
# This software is licensed to you under the terms of the Cisco Sample
# Code License, Version 1.1 (the "License"). You may obtain a copy of the
# License at
# https://developer.cisco.com/docs/licenses
# All use of the material herein must be in accordance with the terms of
# the License. All rights not expressly granted by the License are
# reserved. Unless required by applicable law or agreed to separately in
# writing, software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied.
readMe= """This is a script to send an email alert if the remaining license time in an org an admin has
access to is less than X days, or if its license capacity is not enough for its current device
count. The alert is sent using an SMTP server; by default Gmail. Use an automation platform
like Zapier to read this email and trigger further actions.
Command line syntax:
python merakilicensealert.py -k <key> [-u <user> -p <pass> -d <dest>] [-s <srv>] [-t <days>]
[-m include_empty]
Mandatory argument:
-k <key> : Your Meraki Dashboard API key
Arguments to enable sending emails. All three must be given to send email:
-u <user> : The username (email address) that will be used to send the alert message
-p <pass> : Password for the email address where the message is sent from
-d <dest> : Recipient email address
Optional arguments:
-s <server> : Server to use for sending SMTP. If omitted, Gmail will be used
-t <days> : Alert threshold in days for generating alert. Default is 90
-m include_empty : Flag: Also send warnings for new orgs with no devices
Example 1, send email for orgs with 180 or less days license remaining:
python merakilicensealert.py -k 1234 -u <EMAIL> -p 4321 -d <EMAIL> -t 180
Example 2, print orgs with 360 or less days remaining to screen:
python merakilicensealert.py -k 1234 -t 360"""
import sys, requests, time, json, os
from typing import Text
from datetime import datetime, date
class c_organizationdata:
def __init__(self):
self.name = ''
self.id = ''
self.licensestate = ''
self.timeremaining = 0
self.code = 4
self.status = ''
self.statustext = ''
self.licensetype = ''
self.skulist = []
#end class
#Used for time.sleep(API_EXEC_DELAY). Delay added to avoid hitting dashboard API max request rate
API_EXEC_DELAY = 0.21
#connect and read timeouts for the Requests module
REQUESTS_CONNECT_TIMEOUT = 30
REQUESTS_READ_TIMEOUT = 30
#used by merakirequestthrottler(). DO NOT MODIFY
LAST_MERAKI_REQUEST = datetime.now()
STATE_OK = 0
STATE_ORANGE = 1
STATE_RED = 2
STATE_EMPTY = 3
STATE_FAILED = 4
STATE_REQUIRED = 5
def translate_code(org):
org.status = 'success'
org.statustext = 'Ok'
for s in org.skulist:
if s['statustext'] == 'Expired':
org.status = 'danger'
org.statustext = 'Expired'
return org
elif s['statustext'] == 'Expiring':
org.status = 'warning'
org.statustext = 'Expiring'
return org
def merakirequestthrottler(p_requestcount=1):
#makes sure there is enough time between API requests to Dashboard not to hit shaper
global LAST_MERAKI_REQUEST
if (datetime.now()-LAST_MERAKI_REQUEST).total_seconds() < (API_EXEC_DELAY*p_requestcount):
time.sleep(API_EXEC_DELAY*p_requestcount)
LAST_MERAKI_REQUEST = datetime.now()
return
def getorglist(p_apikey):
#returns the organizations' list for a specified admin
merakirequestthrottler()
try:
r = requests.get('https://dashboard.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
except:
print('ERROR 01: Unable to contact Meraki cloud')
sys.exit(2)
returnvalue = []
if r.status_code != requests.codes.ok:
returnvalue.append({'id':'null'})
return returnvalue
rjson = r.json()
return(rjson)
def getshardhost():
return("api.meraki.com")
def getlicensestate(p_apikey, p_shardhost, p_orgid):
#returns the organizations' list for a specified admin
merakirequestthrottler()
try:
r = requests.get('https://%s/api/v0/organizations/%s/licenseState' % (p_shardhost, p_orgid) , headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
except:
raise Exception('ERROR 03: Unable to contact Meraki cloud')
if r.status_code != requests.codes.ok:
raise Exception('ERROR 03: Unable to contact Meraki cloud')
rjson = r.json()
return(rjson)
def calcdaysremaining(p_merakidate):
#calculates how many days remain between today and a date expressed in the Dashboard API license time format
mdate = datetime.date(datetime.strptime(p_merakidate, '%b %d, %Y UTC'))
today = date.today()
#the first part (before space) of the date difference is number of days. rest is garbage
retvalue = int(str(mdate - today).split(' ')[0])
return retvalue
def checklicensewarning(p_apikey, p_orglist, p_timethreshold, p_modeincludeempty = False):
#checks org list for license violations and expiration warnings
filterlist = []
i = 0
for org in p_orglist:
filterlist.append(c_organizationdata())
filterlist[i].id = org.id
filterlist[i].name = org.name
try:
licensestate = getlicensestate(p_apikey, org.shardhost, org.id)
filterlist[i].licensestate = licensestate['status']
# Set time remaining
if licensestate['expirationDate'] == 'N/A':
if p_modeincludeempty:
timeremaining = 0
else:
if licensestate['status'] != 'License Required':
timeremaining = p_timethreshold + 1
else:
timeremaining = 0
else:
timeremaining = calcdaysremaining(licensestate['expirationDate'])
filterlist[i].timeremaining = timeremaining
# Set code
if licensestate['status'] == "OK":
if licensestate['expirationDate'] == 'N/A':
filterlist[i].code = STATE_EMPTY
elif timeremaining > p_timethreshold:
filterlist[i].code = STATE_OK
else:
filterlist[i].code = STATE_ORANGE
else:
if timeremaining < p_timethreshold:
filterlist[i].code = STATE_RED
elif licensestate['status'] != 'N/A':
filterlist[i].code = STATE_REQUIRED
else:
filterlist[i].code = STATE_EMPTY
except:
filterlist[i].licensestate = 'Failed to connect'
filterlist[i].timeremaining = 0
filterlist[i].code = STATE_FAILED
i += 1
return(filterlist)
def check_eos(sku):
current_date = datetime.today()
result = []
with open(f'{os.path.dirname(__file__)}/endofsale.json', 'r') as f:
eos_info = json.load(f)
for s in eos_info:
if s['SKU'] == sku:
eos_date = datetime.strptime(s['eosale'], '%b %d, %Y')
eosu_date = datetime.strptime(s['eosupport'], '%b %d, %Y')
if eos_date < current_date:
result += ["End of Sale"]
if eosu_date < current_date:
result += ["End of Support"]
return result
return result
def check_sku(apikey, org, deadline):
merakirequestthrottler()
result = {
'licensetype' : 'Per-device',
'skulist' : []
}
try:
r = requests.get('https://%s/api/v0/organizations/%s/licenses' % (getshardhost(), org.id) , headers={'X-Cisco-Meraki-API-Key': apikey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
except:
raise Exception('ERROR 03: Unable to contact Meraki cloud')
sku_list = []
# Co-term licensing
if r.status_code != requests.codes.ok:
result['licensetype'] = 'Co-termination'
try:
r = requests.get('https://%s/api/v0/organizations/%s/licenseState' % (getshardhost(), org.id) , headers={'X-Cisco-Meraki-API-Key': apikey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
r.raise_for_status()
exp_date = r.json()['expirationDate']
devices = r.json()['licensedDeviceCounts']
mdate = datetime.date(datetime.strptime(exp_date, '%b %d, %Y UTC'))
for d in devices.keys():
sku_list += [{
'SKU' : d,
'expiration' : exp_date,
'datestring': f"{mdate.day}/{mdate.month}/{mdate.year}",
'timeremaining' : calcdaysremaining(exp_date),
'amount' : devices[d],
'endofsale' : check_eos(d)
}]
except:
raise Exception('ERROR 03: Unable to contact Meraki cloud')
# Per-device licensing
else:
for l in r.json():
added = False
for sku in sku_list:
if sku['SKU'] == l['licenseType'] and sku['expiration'] == l['expirationDate']:
sku['amount'] += 1
added = True
if not added:
if l['expirationDate'] is None:
sku_list += [{
'SKU' : l['licenseType'],
'expiration' : None,
'datestring' : 'Not activated',
'timeremaining': 10000000000000,
'amount' : 1,
'endofsale' : check_eos(l['licenseType'])
}]
else:
mdate = datetime.date(datetime.strptime(l['expirationDate'], '%Y-%m-%dT%H:%M:%SZ'))
today = date.today()
remaining = str(mdate - today).split(' ')[0]
sku_list += [{
'SKU' : l['licenseType'],
'expiration' : l['expirationDate'],
'datestring': f"{mdate.day}/{mdate.month}/{mdate.year}",
'timeremaining': int(remaining),
'amount' : 1,
'endofsale' : check_eos(l['licenseType'])
}]
sku_list = sorted(sku_list, key=lambda k: k['timeremaining'])
for s in sku_list:
if s['expiration'] is None or int(s['timeremaining']) > deadline:
s['status'] = 'success'
s['statustext'] = 'Ok'
elif int(s['timeremaining']) < 0:
s['status'] = 'danger'
s['statustext'] = 'Expired'
else:
s['status'] = 'warning'
s['statustext'] = 'Expiring'
result['skulist'] = sku_list
org.licensetype = result['licensetype']
org.skulist = result['skulist']
return org
def check_api_key(apikey, threshold, include_empty = False):
# compile list of organizations to be processed
orglist = []
orgjson = getorglist(apikey)
if orgjson[0]['id'] == 'null':
raise Exception('ERROR 07: Unable to retrieve org list')
i = 0
for record in orgjson:
orglist.append(c_organizationdata())
orglist[i].name = record['name']
orglist[i].id = record['id']
i += 1
# get shard host/FQDN where destination org is stored
for record in orglist:
record.shardhost = getshardhost()
# find orgs in license incompliance state
filterlist = checklicensewarning(apikey, orglist, threshold, include_empty)
result = []
for org in filterlist:
if org.code != STATE_FAILED and org.code != STATE_EMPTY:
try:
full_org = translate_code(check_sku(apikey, org, threshold))
result += [
{
'name': full_org.name,
'licensetype': full_org.licensetype,
'skulist': full_org.skulist,
'status': full_org.status,
'statustext': full_org.statustext
}
]
except Exception as e:
print("fail here")
print(e)
return result
|
import testing.parity
import unittest
import json
import urllib.request
import time
import os
class TestParity(unittest.TestCase):
def test_basic(self):
try:
# start postgresql server
parity = testing.parity.ParityServer(network_id=42)
self.assertIsNotNone(parity)
params = parity.dsn()
self.assertEqual('http://localhost:{}'.format(parity.settings['jsonrpc_port']), params['url'])
self.assertEqual('http://localhost:{}'.format(parity.settings['jsonrpc_port']), parity.url())
self.assertEqual(42, params['network_id'])
result = urllib.request.urlopen(
urllib.request.Request(
parity.url(),
headers={'Content-Type': "application/json"},
data=json.dumps({
"jsonrpc": "2.0",
"id": "1234",
"method": "eth_blockNumber",
"params": []
}).encode('utf-8')
))
self.assertEqual(json.load(result)['result'], '0x0')
result = urllib.request.urlopen(
urllib.request.Request(
parity.url(),
headers={'Content-Type': "application/json"},
data=json.dumps({
"jsonrpc": "2.0",
"id": "1234",
"method": "net_version",
"params": []
}).encode('utf-8')
))
self.assertEqual(json.load(result)['result'], str(42))
finally:
# shutting down
pid = parity.server_pid
self.assertTrue(parity.is_alive())
parity.stop()
time.sleep(1)
self.assertFalse(parity.is_alive())
with self.assertRaises(OSError):
os.kill(pid, 0) # process is down
def test_send_transactions(self):
try:
parity = testing.parity.ParityServer(
network_id=0x42,
enable_ws=True,
min_gas_price=10000000000,
faucet_private_key=b'}\x82Cl\xbc\x8bc\x8f-\xf9V\xfct{W\\\xdb6D\xf8\x13w\x87\x95\xf6\x8a\x97\x04\x9c\xb8\x0fk')
address = "0xf0fd3db9396b084d26d4f838eade9f111a715a29"
txs = [
("0xc337816fd40c6a54f77a1445fa1ea6ab5101294974515312052b670650e2aca9", "0xf8718310000085028fa6ae0082520894000000000000000000000000000000000000000089056bc75e2d631000008081a8a0e0e959ccf4b4baf9b32dda1c210a321e71af96b5f66269fb2beb38298f89e5e8a0207b860442764f685024479d57c99a2c91fcd5b204eb2c728a98320dc9db04a5"),
("0x60b9b36033d99d36b52ff94d033f2d99ff1bf15c5c848d8af9b8295c3d19fbd7", "0xf8718310000185028fa6ae0082520894000000000000000000000000000000000000000089056bc75e2d631000008081a8a021025e7ddc5ad4744c2dfeffa84fdddaaa64e9f4681657c731e3cc020b3f9592a00f42710314e49d47c66aa63003d6820cdfc59c72c7a8037cbce5024b81ebaa1a"),
("<KEY>", "0xf8718310000285028fa6ae0082520894000000000000000000000000000000000000000089056bc75e2d631000008081a8a06391c32a10f084529e42a2b992f0da95994fcace83e99735765cc758c0d137ffa018683b0cd87c7d231004608e11b81841992b0b5b47e8ea7f31c9e5653d2f0c90"),
("0x7e1388c57e625c824902dd8a61e06679d74265d7451b4cd1f3f4be0c2ccd5473", "0xf8718310000385028fa6ae0082520894000000000000000000000000000000000000000089056bc75e2d631000008081a8a0eb6b075192aadbc0899c57fb86ed0cf11709dd116344ea8dd9990c4e81320d62a072798f2facc4b0cb30e65cd429c6aabfed654d9817e85b52021bfdbeea8a2bd5"),
("<KEY>", "0xf8718310000485028fa6ae0082520894000000000000000000000000000000000000000089056bc75e2d631000008081a8a08eed90177f77ef1ae1c943522c496f26ed71c0c3e7a54f4bfea9ea1a7eff635ea07d73fa41256f5727694bec32fd2da0046ad804654af26746d61719ff26ccaa5f"),
("0x6483459e4be0559f8758b9179b81ae8730406b3c15d36dcab2e14184fd112f7e", "0xf8718310000585028fa6ae0082520894000000000000000000000000000000000000000089056bc75e2d631000008081a8a07435f4729015dbb6c70a8d5d65a19aef73ca3017e3520c5aec1ab8b563ab5585a05a9928ff026a1a1fc9e05d8e6df7c7ae938c6622a8b7daffcb263fe9c99bfd2c"),
("0x82d696e461d9da1fc1a15d4e832034172a6ba6256789f8180f4d2efed8fff22e", "0xf8718310000685028fa6ae0082520894000000000000000000000000000000000000000089056bc75e2d631000008081a8a0f58ef89a4f5a09713a6a1d9d06c17e243e50ec609533f9e68600e063526291f6a00b530d5f9651b01c62d03dd34319827f58bdc9adb790a9591ded8ccc15e4aae6"),
("0x72a95fb830d413a3e2e2a2ed8eab85012da8eedc80ef2e6e29b81125a701ecaf", "<KEY>"),
("0xa39670d948e05a67f78ca2f9b560e136c93a786537093c9bd780695d2b31b24a", "0xf8718310000885028fa6ae0082520894000000000000000000000000000000000000000089056bc75e2d631000008081a7a09f4aa314d0a1b70bffd9448be2bcc79591fa8bcc54219e414e85773315341b96a063be68037a3b5f066db3affc54167edb0d963d20bba6501fa12e0c32e51642f6"),
("0xb9491f2876689953c08f62d5f9e56a4ee4fe56db140df285a6da9166f051f992", "0xf8718310000985028fa6ae0082520894000000000000000000000000000000000000000089056bc75e2d631000008081a7a02d2c4480889a5afc0cdf671bd3eee85c2a8553cb9ce45e48e5b380098c20f515a068cc79a5b6a0d097364b8faa21280a5fb6b07af2e4073d28360e666c78a7bbff")
]
for i, tx in enumerate(txs):
expected_hash, raw_tx = tx
result = urllib.request.urlopen(
urllib.request.Request(
parity.url(),
headers={'Content-Type': "application/json"},
data=json.dumps({
"jsonrpc": "2.0",
"id": "1234",
"method": "eth_blockNumber",
"params": []
}).encode('utf-8')
))
block_number = int(json.load(result)['result'], 16)
assert block_number == i
result = urllib.request.urlopen(
urllib.request.Request(
parity.url(),
headers={'Content-Type': "application/json"},
data=json.dumps({
"jsonrpc": "2.0",
"id": "1234",
"method": "eth_getTransactionCount",
"params": [address]
}).encode('utf-8')
))
nonce = int(json.load(result)['result'], 16)
assert nonce == 0x100000 + i
result = urllib.request.urlopen(
urllib.request.Request(
parity.url(),
headers={'Content-Type': "application/json"},
data=json.dumps({
"jsonrpc": "2.0",
"id": "1234",
"method": "eth_sendRawTransaction",
"params": [raw_tx]
}).encode('utf-8')
))
tx_hash = json.load(result)['result']
assert tx_hash == expected_hash
start = time.time()
while True:
result = urllib.request.urlopen(
urllib.request.Request(
parity.url(),
headers={'Content-Type': "application/json"},
data=json.dumps({
"jsonrpc": "2.0",
"id": "1234",
"method": "eth_getTransactionByHash",
"params": [tx_hash]
}).encode('utf-8')
))
receipt = json.load(result)['result']
if receipt is not None and receipt['blockNumber'] is not None:
assert int(receipt['blockNumber'], 16) == block_number + 1
break
elif receipt is not None:
assert int(receipt['nonce'], 16) == nonce
print(receipt)
if time.time() - start > 5.0:
assert False, "timeout at tx: #{} {}".format(i, tx_hash)
time.sleep(0.5)
finally:
parity.stop()
|
<reponame>bching/oppia
# coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for question domain objects."""
from core.domain import exp_domain
from core.domain import question_domain
from core.tests import test_utils
import utils
class QuestionDomainTest(test_utils.GenericTestBase):
"""Tests for Question domain object."""
def test_to_dict(self):
expected_object = {
'question_id': 'col1.random',
'title': 'abc',
'question_data': {},
'question_data_schema_version': 1,
'collection_id': 'col1',
'language_code': 'en'
}
observed_object = question_domain.Question(
expected_object['question_id'], expected_object['title'],
expected_object['question_data'],
expected_object['question_data_schema_version'],
expected_object['collection_id'], expected_object['language_code'])
self.assertDictEqual(expected_object, observed_object.to_dict())
def test_validation(self):
"""Test to verify validate method of Question domain object."""
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
test_object = {
'question_id': 'col1.random',
'title': 'abc',
'question_data': question_data,
'question_data_schema_version': 1,
'collection_id': 'col1',
'language_code': 'en'
}
question = question_domain.Question(
test_object['question_id'], test_object['title'],
test_object['question_data'],
test_object['question_data_schema_version'],
test_object['collection_id'], test_object['language_code'])
question.question_id = 123
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected ID to be a string')):
question.validate()
question.question_id = 'col1.random'
question.update_title(1)
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected title to be a string')):
question.validate()
question.update_title('ABC')
question.update_question_data([])
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected question_data to be a dict')):
question.validate()
question.update_question_data(question_data)
question.question_data_schema_version = 'abc'
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected question_data_schema_version to be a integer')):
question.validate()
question.question_data_schema_version = 1
question.collection_id = 123
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected collection_id to be a string')):
question.validate()
question.collection_id = 'col1'
question.language_code = 123
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected language_code to be a string')):
question.validate()
question.update_language_code('abc')
with self.assertRaisesRegexp(utils.ValidationError, (
'Invalid language code')):
question.validate()
def test_from_dict(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
expected_object = {
'question_id': 'col1.random',
'title': 'abc',
'question_data': question_data,
'question_data_schema_version': 1,
'collection_id': 'col1',
'language_code': 'en'
}
question = question_domain.Question.from_dict(expected_object)
self.assertDictEqual(expected_object, question.to_dict())
def test_create_default_question(self):
"""Test to verify create_default_question method of Question domain
object."""
question_id = 'col1.random'
collection_id = 'col1'
title = ''
language_code = 'en'
question = question_domain.Question.create_default_question(
question_id, collection_id, title, language_code)
self.assertEqual(question.question_id, question_id)
self.assertEqual(question.collection_id, collection_id)
self.assertEqual(question.question_data_schema_version, 1)
self.assertEqual(question.question_data, {})
self.assertEqual(question.title, '')
self.assertEqual(question.language_code, 'en')
def test_update_methods(self):
"""Tests update_title, update_question_data and update_language_code
methods of the question domain object."""
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
test_object = {
'question_id': 'col1.random',
'title': 'abc',
'question_data': question_data,
'question_data_schema_version': 1,
'collection_id': 'col1',
'language_code': 'en'
}
question = question_domain.Question.from_dict(test_object)
question.update_title('hello')
self.assertEqual(question.title, 'hello')
question.update_question_data({})
self.assertEqual(question.question_data, {})
question.update_language_code('es')
self.assertEqual(question.language_code, 'es')
|
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.test import TestCase
from django.contrib.auth.models import Permission
from django.core import mail
from mapentity.factories import SuperUserFactory, UserFactory
from geotrek.common.tests import CommonTest, TranslationResetMixin
from geotrek.common.utils.testdata import get_dummy_uploaded_image_svg, get_dummy_uploaded_image, get_dummy_uploaded_file
from geotrek.feedback import models as feedback_models
from geotrek.feedback import factories as feedback_factories
from rest_framework.test import APIClient
class ReportModelTest(TestCase):
"""Test some custom model"""
def test_default_no_status(self):
my_report = feedback_factories.ReportFactory()
self.assertEqual(my_report.status, None)
def test_default_status_exists(self):
self.default_status = feedback_factories.ReportStatusFactory(label="Nouveau")
my_report = feedback_factories.ReportFactory()
self.assertEqual(my_report.status, self.default_status)
class ReportViewsetMailSend(TestCase):
def test_mail_send_on_request(self):
self.client.post(
'/api/en/reports/report',
{
'email': '<EMAIL>',
'comment': 'Test comment',
'activity': feedback_factories.ReportActivityFactory.create().pk,
'problem_magnitude': feedback_factories.ReportProblemMagnitudeFactory.create().pk,
})
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[1].subject, "Geotrek : Signal a mistake")
self.assertIn("We acknowledge receipt of your feedback", mail.outbox[1].body)
self.assertEqual(mail.outbox[1].from_email, settings.DEFAULT_FROM_EMAIL)
class ReportViewsTest(CommonTest):
model = feedback_models.Report
modelfactory = feedback_factories.ReportFactory
userfactory = SuperUserFactory
expected_json_geom = {
'type': 'Point',
'coordinates': [3.0, 46.5],
}
def get_expected_json_attrs(self):
return {
'activity': self.obj.activity.pk,
'category': self.obj.category.pk,
'comment': self.obj.comment,
'related_trek': None,
'email': self.obj.email,
'status': None,
'problem_magnitude': self.obj.problem_magnitude.pk
}
def get_bad_data(self):
return {'geom': 'FOO'}, _('Invalid geometry value.')
def get_good_data(self):
return {
'geom': '{"type": "Point", "coordinates": [0, 0]}',
'email': '<EMAIL>',
'activity': feedback_factories.ReportActivityFactory.create().pk,
'problem_magnitude': feedback_factories.ReportProblemMagnitudeFactory.create().pk,
}
def test_good_data_with_name(self):
"""Test report created if `name` in data"""
data = self.get_good_data()
data['name'] = 'Anonymous'
self.login()
response = self.client.post(self._get_add_url(), data)
self.assertEqual(response.status_code, 302)
obj = self.model.objects.last()
self.assertEqual(obj.email, data['email'])
self.logout()
class BaseAPITest(TestCase):
def setUp(self):
self.user = UserFactory(password='<PASSWORD>')
perm = Permission.objects.get_by_natural_key('add_report', 'feedback', 'report')
self.user.user_permissions.add(perm)
self.login_url = '/login/'
def login(self):
response = self.client.get(self.login_url)
csrftoken = response.cookies.get('csrftoken', '')
response = self.client.post(self.login_url,
{'username': self.user.username,
'password': '<PASSWORD>',
'csrfmiddlewaretoken': csrftoken},
allow_redirects=False)
self.assertEqual(response.status_code, 302)
class CreateReportsAPITest(BaseAPITest):
def setUp(self):
super(CreateReportsAPITest, self).setUp()
self.add_url = '/api/en/reports/report'
self.data = {
'geom': '{"type": "Point", "coordinates": [3, 46.5]}',
'email': '<EMAIL>',
'activity': feedback_factories.ReportActivityFactory.create().pk,
'problem_magnitude': feedback_factories.ReportProblemMagnitudeFactory.create().pk,
}
def post_report_data(self, data):
client = APIClient()
response = client.post(self.add_url, data=data,
allow_redirects=False)
self.assertEqual(response.status_code, 201)
def test_reports_can_be_created_using_post(self):
self.post_report_data(self.data)
self.assertTrue(feedback_models.Report.objects.filter(email='<EMAIL>').exists())
report = feedback_models.Report.objects.get()
self.assertAlmostEqual(report.geom.x, 700000)
self.assertAlmostEqual(report.geom.y, 6600000)
def test_reports_can_be_created_without_geom(self):
self.data.pop('geom')
self.post_report_data(self.data)
self.assertTrue(feedback_models.Report.objects.filter(email='<EMAIL>').exists())
def test_reports_with_file(self):
self.data['file'] = get_dummy_uploaded_file()
self.data['csv'] = get_dummy_uploaded_image_svg()
self.data['image'] = get_dummy_uploaded_image()
self.post_report_data(self.data)
self.assertTrue(feedback_models.Report.objects.filter(email='<EMAIL>').exists())
report = feedback_models.Report.objects.get()
self.assertEqual(report.attachments.count(), 3)
class ListCategoriesTest(TranslationResetMixin, BaseAPITest):
def setUp(self):
super(ListCategoriesTest, self).setUp()
self.cat = feedback_factories.ReportCategoryFactory(label_it='Obstaculo')
def test_categories_can_be_obtained_as_json(self):
response = self.client.get('/api/en/feedback/categories.json')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data[0]['id'], self.cat.id)
self.assertEqual(data[0]['label'], self.cat.label)
def test_categories_are_translated(self):
response = self.client.get('/api/it/feedback/categories.json')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data[0]['label'], self.cat.label_it)
class ListOptionsTest(TranslationResetMixin, BaseAPITest):
def setUp(self):
super(ListOptionsTest, self).setUp()
self.activity = feedback_factories.ReportActivityFactory(label_it='Hiking')
self.cat = feedback_factories.ReportCategoryFactory(label_it='Obstaculo')
self.pb_magnitude = feedback_factories.ReportProblemMagnitudeFactory(label_it='Possible')
def test_options_can_be_obtained_as_json(self):
response = self.client.get('/api/en/feedback/options.json')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['activities'][0]['id'], self.activity.id)
self.assertEqual(data['activities'][0]['label'], self.activity.label)
self.assertEqual(data['categories'][0]['id'], self.cat.id)
self.assertEqual(data['categories'][0]['label'], self.cat.label)
self.assertEqual(data['magnitudeProblems'][0]['id'], self.pb_magnitude.id)
self.assertEqual(data['magnitudeProblems'][0]['label'], self.pb_magnitude.label)
def test_options_are_translated(self):
response = self.client.get('/api/it/feedback/options.json')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['activities'][0]['label'], self.activity.label_it)
self.assertEqual(data['categories'][0]['label'], self.cat.label_it)
self.assertEqual(data['magnitudeProblems'][0]['label'], self.pb_magnitude.label_it)
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Attention models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import layers
from lingvo.core import py_utils
from lingvo.core import quant_utils
from lingvo.core import summary_utils
import numpy as np
from tensorflow.python.framework import function
from tensorflow.python.ops import inplace_ops
# Currently, quantization statistics cannot be accumulated across arbitrary
# defuns, so we allow them to be disabled. A potentially more robust fix is
# to save and merge the attention state across the defun boundary as is
# done in recurrent.py.
def _ConditionalDefun(cond, *args, **kwargs):
def Decorator(f):
if not cond:
return f
return function.Defun(*args, **kwargs)(f)
return Decorator
def _ApplyAttentionDropout(params, x, global_step):
"""Apply attention dropout according to the given parameters.
If `params.atten_dropout_deterministic` is set to True, the dropout will be
fully deterministic.
Args:
params: The parameters of attention layer.
x: A float Tensor on which to apply dropout.
global_step: Required for deterministic dropout.
Returns:
A Tensor with the same shape as `x`.
"""
if params.atten_dropout_prob == 0:
return x
if params.atten_dropout_deterministic:
seeds = py_utils.GenerateStepSeedPair(params, global_step)
return py_utils.DeterministicDropout(x, 1.0 - params.atten_dropout_prob,
seeds)
else:
return tf.nn.dropout(
x, 1.0 - params.atten_dropout_prob, seed=params.random_seed)
def SafeCumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with tf.name_scope(None, 'SafeCumprod', [x]):
x = tf.convert_to_tensor(x, name='x')
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return tf.exp(
tf.cumsum(tf.log(tf.clip_by_value(x, tiny, 1)), *args, **kwargs))
# pyformat: disable
def MonotonicAttentionProb(p_choose_i, previous_attention, mode):
"""Compute monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions according
to these assumptions. For more information, see `Online and Linear-Time
Attention by Enforcing Monotonic Alignments`.
Args:
p_choose_i: Probability of choosing input sequence/memory element i. Should
be of shape (batch_size, input_sequence_length), and should all be in the
range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of `recursive`,
`parallel`, or `hard`.
* recursive: uses tf.scan to recursively compute the distribution. This is
slowest but is exact, general, and does not suffer from numerical
instabilities.
* parallel: uses parallelized cumulative-sum and cumulative-product
operations to compute a closed-form solution to the recurrence relation
defining the attention distribution. This makes it more efficient than
'recursive', but it requires numerical checks which make the
distribution non-exact. This can be a problem in particular when
input_sequence_length is long and/or p_choose_i has entries very close
to 0 or 1.
* hard: requires that the probabilities in p_choose_i are all either 0 or
1, and subsequently uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# pyformat: enable
# Force things to be tensors
p_choose_i = tf.convert_to_tensor(p_choose_i, name='p_choose_i')
previous_attention = tf.convert_to_tensor(
previous_attention, name='previous_attention')
if mode == 'recursive':
batch_size = py_utils.GetShape(p_choose_i)[0]
tf.logging.info(batch_size)
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
shifted_1mp_choose_i = tf.concat(
[tf.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i - 1])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i * tf.transpose(
tf.scan(
# Need to use reshape to remind TF of the shape between loop
# iterations.
lambda x, yz: tf.reshape(yz[0] * x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[
tf.transpose(shifted_1mp_choose_i),
tf.transpose(previous_attention)
],
# Initial value of x is just zeros
tf.zeros((batch_size,))))
elif mode == 'parallel':
# SafeCumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = SafeCumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = p_choose_i * cumprod_1mp_choose_i * tf.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
tf.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.),
axis=1)
elif mode == 'hard':
# Remove any probabilities before the index chosen last time step
p_choose_i *= tf.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i * tf.cumprod(1 - p_choose_i, axis=1, exclusive=True)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
class BaseAttentionLayer(quant_utils.QuantizableLayer):
"""A base class for all attention layers."""
@classmethod
def Params(cls):
p = super(BaseAttentionLayer, cls).Params()
p.Define('atten_dropout_prob', 0.0,
'Probability at which we apply dropout to the attention weights.')
p.Define(
'atten_dropout_deterministic', False,
'Whether to dropout in a fully deterministic way, which is more '
'suitable for TPU.')
p.Define('packed_input', False,
'If True, each training example may pack multiple sequences.')
p.qdomain.Define('softmax', None, 'QDomain for the internal softmax.')
p.qdomain.Define(
'fullyconnected', None, 'Fully connected layers are fed '
'into activation functions which have known input ranges')
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs a BaseAttentionLayer object."""
if not params.name:
raise ValueError('params.name is not set.')
super(BaseAttentionLayer, self).__init__(params)
self._source_init_done = False
self.TrackQTensor('logits', domain='fullyconnected')
def InitForSourcePacked(self,
theta,
source_vecs,
source_contexts,
source_padding,
source_segment_id=None):
"""Initialize attention for the given source vectors.
Must set `_source_init_done` to True in the function.
Note: `source_segment_id`, if present, should always have the same shape as
`source_padding`.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: A single tensor of shape [time, batch_size, source_dim].
source_contexts: A single tensor of shape [time, batch_size, some_dim].
source_padding: A tensor of shape [time, batch_size].
source_segment_id: A tensor of shape [time, batch_size]. source_segment_id
is not None for packed inputs where one training example may pack
multiple sequences.
Returns:
A `.NestedMap` object to be passed to ComputeContextVectorWithSource.
The internal structure of the return value should be considered an
implementation detail of the attention mechanism and should not be
inspected or modified by its callers.
"""
self._source_init_done = True
self._packed_src = self.PackSource(theta, source_vecs, source_contexts,
source_padding, source_segment_id)
return self._packed_src
def PackSource(self,
theta,
source_vecs,
source_contexts,
source_padding,
source_segment_id=None):
"""Packs source vectors.
Does not change attention state.
Note: `source_segment_id`, if present, should always have the same shape as
`source_padding`.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: A single tensor of shape [time, batch_size, source_dim].
source_contexts: A single tensor of shape [time, batch_size, some_dim].
source_padding: A tensor of shape [time, batch_size].
source_segment_id: A tensor of shape [time, batch_size]. source_segment_id
is not None for packed inputs where one training example may pack
multiple sequences.
Returns:
A `.NestedMap` object to be passed to ComputeContextVectorWithSource.
The internal structure of the return value should be considered an
implementation detail of the attention mechanism and should not be
inspected or modified by its callers.
"""
raise NotImplementedError('Abstract method.')
def ComputeContextVectorWithSource(self,
theta,
packed_src,
query_vec,
attention_state=None,
per_step_source_padding=None,
query_segment_id=None):
"""Computes the context vector given the current query output.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
packed_src: A `.NestedMap` object returned by PackSource or
InitForSourcePacked.
query_vec: a tensor of shape [batch_size, query_dim].
attention_state: previous attention state.
per_step_source_padding: Source sequence padding to apply at this step. If
not None, it should have shape [target_batch_size, source_length].
query_segment_id: a tensor of shape [batch_size].
Returns:
A tuple of 3 elements.
The attention context vector:
[batch_size, context_dim]
The attention probability vector:
[batch_size, time]
The new attention mechanism state:
possibly nested tuple of tensors with dimensions [target_batch, ...]
"""
raise NotImplementedError('Abstract method.')
def ComputeContextVector(self,
theta,
query_vec,
attention_state=None,
per_step_source_padding=None,
query_segment_id=None):
"""Computes the context vector given the current query output.
Unlike `ComputeContextVectorWithSource` which explicitly asks for the packed
source tensors, `ComputeContextVector` uses the class' internal variables.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query_vec: a tensor of shape [batch_size, query_dim].
attention_state: previous attention state.
per_step_source_padding: Source sequence padding to apply at this step. If
not None, it should be of shape [target_batch_size, source_length].
query_segment_id: a tensor of shape [batch_size].
Returns:
A tuple of 3 elements.
* The attention context vector.
* The attention probability vector.
* The new attention mechanism state: possibly nested tuple of tensors with
dimensions [target_batch, ...]
"""
assert self._source_init_done
return self.ComputeContextVectorWithSource(theta, self._packed_src,
query_vec, attention_state,
per_step_source_padding,
query_segment_id)
def GetInitializationSourceState(self):
"""Gets the attention initialization state.
The base class only preserves the `concated_source_vecs`,
`concated_source_contexts` and `source_padding`. If subclasses use more
state than this and need to interact with inference code that must
fetch and reload state, this and `SetInitializationSourceState` must
be overridden.
Returns:
A `.NestedMap` of Tensors that can be preserved and reset via
`SetInitializationSourceState()` at a later point. This allows, for
example, for attention computations to span session runs.
"""
assert self._source_init_done
return self._packed_src
def SetInitializationSourceState(self, new_init_state):
"""Sets the attention initialization state.
Args:
new_init_state: A `.NestedMap` matching what was returned from
`GetInitializationSourceState`, which will return this layer to that
initialization state.
"""
self._source_init_done = True
self._packed_src = new_init_state.DeepCopy()
def _PaddedSoftmax(self, logits, padding, narrow_to_asym_bit_depth=False):
"""Performs a softmax as if padding were applied after exponentiation.
The default implementation uses numerical techniques to approximate this
with a standard `tf.nn.softmax` (using large negative logits for padded
values). It defers to a `Defun` that may be replaced on low-range
implementations with a version that is numerically correct.
Args:
logits: Logits.
padding: Padding (must be the same shape as logits).
narrow_to_asym_bit_depth: Narrows the bit depth, removing the upper limit
value. This is to accommodate certain interpreters that would cover a 0
.... 2**bits - 1 range for quantization.
Returns:
Result of the softmax.
"""
p = self.params
fns = self.fns
if logits.dtype.is_complex:
logits = tf.abs(logits)
assert logits.dtype.is_floating
assert hasattr(logits.dtype, 'max')
very_negative_logits = (
tf.ones_like(logits) * logits.dtype.max *
tf.constant(-0.7, dtype=logits.dtype))
if p.is_eval:
very_negative_logits = self.QTensor('logits', very_negative_logits)
padded_logits = tf.where(padding > 0.0, very_negative_logits, logits)
# TFLite hardcodes the range of qsoftmax, setting explicitly to avoid
# incompatible concats.
return fns.qsoftmax(
padded_logits,
qdomain='softmax',
narrow_to_asym_bit_depth=narrow_to_asym_bit_depth)
def _UpdatePaddingWithPackedInputMask(self, padding, source_segment_ids,
query_segment_ids):
"""Creates an attention mask based on source and query segment ids.
This creates a mask that removes invalid attention, where the query vector
might assign some weight to neighboring sequences in a packed input example.
Assumes `n = target_batch // source_batch`.
Args:
padding: Padding for logits, a tensor of shape [time, n, source_batch].
source_segment_ids: a tensor of shape [time, source_batch].
query_segment_ids: a tensor of shape [target_batch].
Returns:
Logits with mask applied.
"""
# Generating packed input mask for attention padding.
source_segment_ids = tf.expand_dims(source_segment_ids, 1)
query_segment_ids = tf.reshape(
query_segment_ids,
[1, -1, py_utils.GetShape(source_segment_ids)[2]])
padding = tf.where(
tf.equal(source_segment_ids, query_segment_ids), padding,
tf.ones_like(padding))
return padding
class AdditiveAttention(BaseAttentionLayer):
"""Implements additive attention (also known as "Bahdanau Attention").
Described in:
<NAME>, <NAME>, <NAME>.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015.
https://arxiv.org/abs/1409.0473
"""
@classmethod
def Params(cls):
"""Params for this `AdditiveAttention` class."""
p = super(AdditiveAttention, cls).Params()
p.Define('source_dim', 0, 'Number of source nodes.')
p.Define('query_dim', 0, 'Number of query nodes.')
p.Define('hidden_dim', 0, 'Number of hidden nodes.')
# Fill in reasonable default for params init
p.params_init = py_utils.WeightInit.GaussianSqrtDim()
p.Define(
'same_batch_size', False,
'True iff the source and target sequence has the same batch size.')
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs an `AdditiveAttention` object."""
super(AdditiveAttention, self).__init__(params)
p = self.params
with tf.variable_scope(p.name):
pc = py_utils.WeightParams(
shape=[p.source_dim, p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=['AdditiveAttention_vars'])
self.CreateVariable('source_var', pc, self.AddGlobalVN)
pc = py_utils.WeightParams(
shape=[p.query_dim, p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=['AdditiveAttention_vars'])
self.CreateVariable('query_var', pc, self.AddGlobalVN)
pc = py_utils.WeightParams(
shape=[p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=['AdditiveAttention_vars'])
self.CreateVariable('hidden_var', pc, self.AddGlobalVN)
# noinline and compiled cannot be set at the same time
@function.Defun(
*([py_utils.FPropDtype(p)] * 7), noinline=not py_utils.use_tpu())
def AttenProbs(concated_source_vecs, source_padding, query_vec_reshaped, v,
per_step_source_padding, source_segment_id,
query_segment_id):
"""Generates probs."""
source_batch = py_utils.GetShape(source_padding)[1]
target_batch = py_utils.GetShape(per_step_source_padding)[0]
multiplier = target_batch // source_batch
# Shape of summed is [sl, tb/sb, sb, hidden_dim].
summed = tf.tanh(concated_source_vecs + query_vec_reshaped)
# logits is of shape [sl * tb/sb * sb, 1]. Computes dot product
# between v with every rows in 'summed'. Then we reshape the
# result to be of shape [sl, tb/sb, sb].
#
# Another equivalent way is to do:
# logits = tf.reduce_sum(summed *
# tf.reshape(v, [1, 1, 1, hidden_dim]), 3)
logits = py_utils.Matmul(
tf.reshape(summed, [-1, p.hidden_dim]),
tf.reshape(v, [p.hidden_dim, 1]))
logits = tf.reshape(logits, tf.shape(summed)[:3])
# Take out the padding states.
# _source_padding is of shape [source_length, source_batch].
# reshaped to [source_length, 1, source_batch].
# per_step_source_padding is reshaped to the same but with 'multiplier'
# for the second dim.
source_padding = tf.expand_dims(source_padding, 1)
per_step_source_padding = tf.reshape(
tf.transpose(per_step_source_padding), [-1, multiplier, source_batch])
source_padding += per_step_source_padding
if p.packed_input:
source_padding = self._UpdatePaddingWithPackedInputMask(
source_padding, source_segment_id, query_segment_id)
# Reshape logits to a matrix of shape [target_batch, source_length] and
# takes the softmax to compute the probabilities.
logits = tf.transpose(tf.reshape(logits, [-1, target_batch]))
source_padding = tf.transpose(
tf.reshape(source_padding, [-1, target_batch]))
probs = self._PaddedSoftmax(logits, source_padding)
return probs
# Adds the atten function into the graph's library.
def Atten(v, w, source_padding, source_segment_id, concated_source_vecs,
concated_source_contexts, query_vec, query_segment_id,
per_step_source_padding, global_step):
"""Computes the attention context vector.
Args:
v: hidden weight. [hidden_dim, 1].
w: query weight. [query_dim, hidden_dim].
source_padding: [source_length, source_batch].
source_segment_id: [source_lentgh, source_batch]
concated_source_vecs: [source_length, source_batch, hidden_dim].
concated_source_contexts: [source_batch, source_length, context_dim]
query_vec: [target_batch, query_dim]
query_segment_id: [target_batch]
per_step_source_padding: [target_batch, source_length]
global_step: Required for deterministic dropout.
Note: concated_source_vecs are the vectors that are used to compute the
attention score between the query_vec and each concated_source_vec. The
concated_source_contexts are the vectors that compose the result. The
attention context vector is computed as a weighted average of the
concated_source_contexts, using the scores that were computed using
concated_source_vecs.
Returns:
attention context vectors and probabilities.
"""
source_batch = py_utils.GetShape(concated_source_vecs)[1]
target_batch = py_utils.GetShape(query_vec)[0]
multiplier = target_batch // source_batch
# concated_source_vecs is reshaped to
# [source_length, 1, source_batch, hidden_dims]
concated_source_vecs = tf.expand_dims(concated_source_vecs, 1)
query_vec_transformed = py_utils.Matmul(query_vec, w)
# query_vec is reshaped to
# [1, target_batch/source_batch, source_batch, hidden_dims].
query_vec_reshaped = tf.reshape(
query_vec_transformed, [1, multiplier, source_batch, p.hidden_dim])
# probs is of shape [target_batch, source_length]
probs = AttenProbs(concated_source_vecs, source_padding,
query_vec_reshaped, v, per_step_source_padding,
source_segment_id, query_segment_id)
probs.set_shape(per_step_source_padding.shape)
# Apply dropout to weights if applicable.
if not p.is_eval:
probs = _ApplyAttentionDropout(p, probs, global_step)
# Reshape probs to be of shape
# [target_batch/source_batch, source_batch, source_length]
probs_reshaped = tf.reshape(probs, [multiplier, source_batch, -1])
# Transpose probs to be of shape
# [source_batch, target_batch/source_batch, source_length]
probs_reshaped = tf.transpose(probs_reshaped, [1, 0, 2])
# Batched matmul
# [source_batch, target_batch/source_batch, source_length] *
# [source_batch, source_length, context_dim] =
# [source_batch, target_batch/source_batch, context_dim]
summed = tf.matmul(probs_reshaped, concated_source_contexts)
# summed is of shape
# [target_batch/source_batch, source_batch, context_dim]
summed = tf.transpose(summed, [1, 0, 2])
return tf.reshape(summed, [target_batch, -1]), probs
# The source batch size equals to the target batch size.
def AttenSameBatchSize(v, w, source_padding, source_segment_id,
concated_source_vecs, concated_source_contexts,
query_vec, query_segment_id, per_step_source_padding,
global_step):
"""Computes the attention context vector.
Args:
v: hidden weight. [hidden_dim].
w: query weight. [query_dim, hidden_dim].
source_padding: [sl, b]
source_segment_id: [sl, b]
concated_source_vecs: [sl, b, hidden_dim].
concated_source_contexts: [b, sl, context_dim]
query_vec: [b, query_dim]
query_segment_id: [b]
per_step_source_padding: [b, sl]
global_step: Required for deterministic dropout.
Returns:
attention context vectors and probabilities.
"""
# TODO(jiaye): support dropout
if p.atten_dropout_prob != 0:
raise NotImplementedError('dropout is not supported')
del global_step
# [b, hidden_dim]
query_vec = py_utils.Matmul(query_vec, w)
# [sl, b]
@function.Defun(
*([py_utils.FPropDtype(p)] * 7), noinline=not py_utils.use_tpu())
def AttenProbs(x, source_padding, y, v, per_step_source_padding,
source_segment_id, query_segment_id):
"""Calculates atten probs with padding."""
# tf.tanh(x+y) shape [sl, b, hidden_dim]
summed = tf.tanh(x + y)
# [-1, hidden_dim] * [hidden_dim, 1] = [-1, 1]
res = py_utils.Matmul(
tf.reshape(summed, [-1, p.hidden_dim]), tf.expand_dims(v, 1))
# Reshape res to [sl, b]
logits = tf.reshape(res, tf.shape(summed)[:2])
# Take out the padding states. _source_padding is of shape [sl, b].
source_padding += tf.transpose(per_step_source_padding)
if p.packed_input:
source_padding = self._UpdatePaddingWithPackedInputMask(
tf.expand_dims(source_padding, 1), source_segment_id,
query_segment_id)
source_padding = tf.squeeze(source_padding, 1)
# [b, sl]
source_padding = tf.transpose(source_padding)
logits = tf.transpose(logits)
# softmax to compute the probabilities. [b, sl]
probs = self._PaddedSoftmax(logits, source_padding)
return probs
probs = AttenProbs(concated_source_vecs, source_padding, query_vec, v,
per_step_source_padding, source_segment_id,
query_segment_id)
probs.set_shape(per_step_source_padding.shape)
# contexts[i, :] is a weighted (probs[i, :]) average of
# concated_source_vecs[i, :, :].
# Reshaped probs is of shape [b, 1, sl]
reshaped_probs = tf.expand_dims(probs, 1)
# [b, 1, sl] * [b, sl, context_dim] = [b, 1, context_dim]
contexts = tf.matmul(reshaped_probs, concated_source_contexts)
# Reshaped context is of shape [b, context_dim]
contexts = tf.squeeze(contexts, axis=1)
return contexts, probs
if p.same_batch_size:
self._ctx_vec = AttenSameBatchSize
else:
self._ctx_vec = Atten
def EncodeSource(src_w, vecs, ctxs):
time, batch = py_utils.GetShape(vecs, 2)
ctxs = py_utils.HasShape(ctxs, [time, batch, -1])
transformed_vecs = tf.reshape(
py_utils.Matmul(tf.reshape(vecs, [-1, p.source_dim]), src_w),
[time, batch, -1])
transposed_ctxs = tf.transpose(ctxs, [1, 0, 2])
return transformed_vecs, transposed_ctxs
self._encode_source = EncodeSource
def PackSource(self,
theta,
source_vecs,
source_contexts,
source_padding,
source_segment_id=None):
"""Packs source vectors.
Does not change attention state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: A single tensor of shape [time, batch_size, source_dim].
source_contexts: A single tensor of shape [time, batch_size, some_dim].
source_padding: A tensor of shape [time, batch_size].
source_segment_id: A tensor of shape [time, batch_size].
Returns:
A NestedMap containing the packed source.
"""
with tf.name_scope(self.params.name):
if source_segment_id is None:
source_segment_id = tf.zeros_like(source_padding)
(concated_source_vecs, concated_source_contexts) = (
self._encode_source(theta.source_var, source_vecs, source_contexts))
return py_utils.NestedMap(
# [time, batch_size, hidden_dim].
source_vecs=concated_source_vecs,
# [batch_size, time, context_dim].
# Note the mismatch between `source_vecs` and `source_contexts`. In
# `source_vecs`, time is the first dim, while it is the second dim in
# `source_contexts`.
source_contexts=concated_source_contexts,
# [time, batch_size].
source_padding=source_padding,
# [time, batch_size].
source_segment_id=source_segment_id)
def ZeroAttentionState(self, source_length, decoder_batch_size):
p = self.params
# This is just a dummy state. The first dimension of the state has to match
# decoder_batch_size.
zs = tf.zeros([decoder_batch_size, 1], dtype=py_utils.FPropDtype(p))
return zs
def ComputeContextVectorWithSource(self,
theta,
packed_src,
query_vec,
attention_state=None,
per_step_source_padding=None,
query_segment_id=None):
"""Computes the context vector given the current query output.
Note: `packed_src.source_vecs` are the vectors that are used to compute the
attention score between the `query_vec` and each `packed_src.source_vecs`.
The `packed_src.source_contexts` are the vectors that compose the result.
The attention context vector is computed as a weighted average of the
`packed_src.source_contexts`, using the scores that were computed using
`packed_src.source_vecs`.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
packed_src: A `.NestedMap` object returned by PackSource or
InitForSourcePacked.
query_vec: a tensor of shape [batch_size, query_dim].
attention_state: previous attention state. It is not used in
`AdditiveAttention`, and is simply passed through.
per_step_source_padding: Source sequence padding to apply at this step. If
not None, it should be of shape [target_batch_size, source_length].
query_segment_id: a tensor of shape [batch_size]
Returns:
A tuple of 3 elements.
The attention context vector:
[batch_size, context_dim]
The attention probability vector:
[batch_size, time]
The new attention mechanism state:
possibly nested tuple of tensors with dimensions [target_batch, ...]
"""
p = self.params
concated_source_vecs = packed_src.source_vecs
concated_source_contexts = packed_src.source_contexts
source_padding = packed_src.source_padding
source_segment_id = packed_src.source_segment_id
query_batch_size = py_utils.GetShape(query_vec)[0]
source_length = py_utils.GetShape(source_padding)[0]
if per_step_source_padding is None:
zero = tf.constant(0.0, dtype=query_vec.dtype)
per_step_source_padding = tf.fill([query_batch_size, source_length], zero)
per_step_source_padding = py_utils.HasShape(
per_step_source_padding, [query_batch_size, source_length])
hidden = py_utils.AddPerStepVN(p, theta.hidden_var)
query = py_utils.AddPerStepVN(p, theta.query_var)
if source_segment_id is None:
source_segment_id = tf.zeros_like(source_padding)
if query_segment_id is None:
query_segment_id = tf.zeros(
tf.shape(query_vec)[0], dtype=source_padding.dtype)
ctx_vec, prob = self._ctx_vec(hidden, query, source_padding,
source_segment_id, concated_source_vecs,
concated_source_contexts, query_vec,
query_segment_id, per_step_source_padding,
theta.global_step)
return ctx_vec, prob, attention_state
class DotProductAttention(BaseAttentionLayer):
"""Implements dot-product attention (also known as "Luong Attention").
Described in:
<NAME>, <NAME>, <NAME>.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015.
https://arxiv.org/abs/1508.04025
"""
@classmethod
def Params(cls):
"""Params for `DotProductAttention`."""
p = super(DotProductAttention, cls).Params()
p.Define('source_dim', 0, 'Number of source nodes.')
p.Define('query_dim', 0, 'Number of query nodes.')
p.Define('hidden_dim', 0, 'Number of hidden nodes.')
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs a DotProductAttention object."""
super(DotProductAttention, self).__init__(params)
p = self.params
# TODO(yonghui): relax these constraints.
assert p.source_dim == p.query_dim
assert p.source_dim == p.hidden_dim
with tf.variable_scope(p.name):
pc = py_utils.WeightParams(
shape=[p.hidden_dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=['DotProductAttention_vars'])
self.CreateVariable('per_dim_scale', pc)
@function.Defun(
*[py_utils.FPropDtype(p)] * 7, noinline=not py_utils.use_tpu())
def AttenProbs(per_dim_scale, source_padding, concated_source_vecs,
query_vec, per_step_source_padding, source_segment_id,
query_segment_id):
"""Main attention function.
Args:
per_dim_scale: [source_dim], a vec to scale individual dims.
source_padding: [time, source_batch].
concated_source_vecs: [time, source_batch, source_dim].
query_vec: [target_batch, source_dim].
per_step_source_padding: [target_batch, source_length]
source_segment_id: [time, source_batch].
query_segment_id: [target_batch].
Returns:
logits: [target_batch, source_time].
target_batch = source_batch * n where n is an integer >= 1.
In this case query_vec contains:
-------------------------
| instance 1 |
| instance 2 |
0 | ... |
| instance source_batch |
-------------------------
| instance 1 |
| instance 2 |
1 | ... |
| instance source_batch |
-------------------------
...
-------------------------
| instance 1 |
| instance 2 |
n-1 | ... |
| instance source_batch |
-------------------------
One use case is beam search where n = beam size.
"""
source_padding = tf.transpose(source_padding)
concated_source_vecs = tf.transpose(concated_source_vecs, [1, 0, 2])
logit_scale = tf.stop_gradient(
tf.rsqrt(
tf.cast(tf.shape(query_vec)[1], dtype=py_utils.FPropDtype(p))))
source_batch = tf.shape(concated_source_vecs)[0]
target_batch = tf.shape(query_vec)[0]
query_vec *= per_dim_scale
# The n here refers to the "n" described in the comment above.
n = target_batch // source_batch
query_vec = tf.reshape(query_vec, [n, source_batch, -1])
# => [source_batch, source_dim, n]
query_vec = tf.transpose(query_vec, [1, 2, 0])
# => [n, source_batch, source_sequence_len]
per_step_source_padding = tf.reshape(per_step_source_padding,
[n, source_batch, -1])
# => [source_batch, source_sequence_len, n]
per_step_source_padding = tf.transpose(per_step_source_padding, [1, 2, 0])
# Dot-product part.
# Calls batch_mat_mul since dim > 2 for per-instance matmul.
# [source_batch, time, source_dim] * [source_batch, source_dim, n]
# => [source_batch, time, n]
logits = tf.matmul(concated_source_vecs, query_vec)
logits *= logit_scale
# Exclude padding frames.
# [source_batch, time] => [source_batch, time, 1]
source_padding = tf.expand_dims(source_padding, 2)
source_padding += per_step_source_padding
if p.packed_input:
source_padding = tf.transpose(source_padding, [1, 2, 0])
source_padding = self._UpdatePaddingWithPackedInputMask(
source_padding, source_segment_id, query_segment_id)
source_padding = tf.transpose(source_padding, [1, 2, 0])
else:
source_padding = tf.transpose(source_padding, [2, 0, 1])
# => [n, source_batch, time]
logits = tf.transpose(logits, [2, 0, 1])
# => [n * source_batch, time].
# This makes logits store content in the same order as query_vec.
logits = tf.reshape(logits, [target_batch, -1])
source_padding = tf.reshape(source_padding, [target_batch, -1])
probs = self._PaddedSoftmax(logits, source_padding)
return probs
def Atten(per_dim_scale, source_padding, source_segment_id,
concated_source_vecs, concated_source_contexts, query_vec,
query_segment_id, per_step_source_padding, global_step):
"""Main attention function.
Args:
per_dim_scale: [source_dim], a vec to scale individual dims.
source_padding: [time, source_batch].
source_segment_id: [time, source_batch].
concated_source_vecs: [time, source_batch, source_dim].
concated_source_contexts: [source_batch, time, context_dim].
query_vec: [target_batch, source_dim].
query_segment_id: [target_batch].
per_step_source_padding: [target_batch, source_length]
global_step: Required for deterministic dropout.
Note: concated_source_vecs are the vectors that are used to compute the
attention score between the query_vec and each concated_source_vec. The
concated_source_contexts are the vectors that compose the result. The
attention context vector is computed as a weighted average of the
concated_source_contexts, using the scores that were computed using
concated_source_vecs.
Returns:
context_vector: [target_batch, context_dim].
probs: [target_batch, time].
"""
py_utils.assert_shape_match([tf.shape(concated_source_vecs)[2]],
[tf.shape(query_vec)[1]])
py_utils.assert_shape_match([tf.shape(concated_source_vecs)[2]],
[p.source_dim])
source_batch = tf.shape(concated_source_vecs)[1]
target_batch = tf.shape(query_vec)[0]
n = target_batch // source_batch
returned_probs = AttenProbs(per_dim_scale, source_padding,
concated_source_vecs, query_vec,
per_step_source_padding, source_segment_id,
query_segment_id)
returned_probs.set_shape(per_step_source_padding.shape)
# => [n, source_batch, time].
probs = tf.reshape(returned_probs, [n, source_batch, -1])
# => [source_batch, n, time].
probs = tf.transpose(probs, [1, 0, 2])
# Apply dropout to weights if applicable.
if not p.is_eval:
probs = _ApplyAttentionDropout(p, probs, global_step)
# Weight each frame with the probability and sum them.
# [source_batch, n, time] * [source_batch, time, context_dim]
# => [source_batch, n, context_dim].
context_vector = tf.matmul(probs, concated_source_contexts)
# => [n, source_batch, context_dim].
context_vector = tf.transpose(context_vector, [1, 0, 2])
# => [n * source_batch, context_dim].
context_vector = tf.reshape(context_vector, [target_batch, -1])
return context_vector, returned_probs
self._ctx_vec = Atten
def PackSource(self,
theta,
source_vecs,
source_contexts,
source_padding,
source_segment_id=None):
"""Packs source vectors.
Does not change attention state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: A tensor of shape [time, source_batch, source_dim].
source_contexts: A tensor of shape [time, source_batch, context_dim].
source_padding: A tensor of shape [time, source_batch].
source_segment_id: A tensor of shape [time, source_batch].
Returns:
A tuple (concated_source_vecs, concated_source_contexts, source_padding)
where `concated_source_vecs` is a tensor of shape [time, batch_size,
hidden_dim], `concated_source_contexts` is a tensor of shape
[batch_size, time, some_dim] and `source_padding` is a tensor of shape
[time, batch_size].
"""
concated_source_vecs = tf.identity(source_vecs)
concated_source_contexts = tf.transpose(source_contexts, [1, 0, 2])
if source_segment_id is None:
source_segment_id = tf.zeros_like(source_padding)
return py_utils.NestedMap(
# [time, batch_size, hidden_dim].
source_vecs=concated_source_vecs,
# [batch_size, time, context_dim].
# Note the mismatch between `source_vecs` and `source_contexts`. In
# `source_vecs`, time is the first dim, while it is the second dim in
# `source_contexts`.
source_contexts=concated_source_contexts,
# [time, batch_size].
source_padding=source_padding,
# [time, batch_size].
source_segment_id=source_segment_id)
def ZeroAttentionState(self, source_length, decoder_batch_size):
p = self.params
# No states to keep track of currently.
return tf.zeros([decoder_batch_size, 1], dtype=p.dtype)
def ComputeContextVectorWithSource(self,
theta,
packed_src,
query_vec,
attention_state=None,
per_step_source_padding=None,
query_segment_id=None):
"""Computes the context vector given the current query output.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
packed_src: A `.NestedMap` object returned by PackSource or
InitForSourcePacked.
query_vec: a tensor of shape [target_batch, query_dim], where target_batch
= n * source_batch (e.g., n = num_hyps_per_beam in beamsearch). Along
the target_batch dimension, there are n groups of consecutive rows, each
group containing source_batch rows.
attention_state: previous attention state. It is not used in
AdditiveAttention, and is simply passed through.
per_step_source_padding: Source sequence padding to apply at this step. If
not None, it should be of shape [target_batch, source_length].
query_segment_id: Query segment id with shape [target_batch].
Returns:
A tuple of 3 elements.
The attention context vector:
[batch_size, context_dim]
The attention probability vector:
[batch_size, time]
The new attention mechanism state:
possibly nested tuple of tensors with dimensions [target_batch, ...]
"""
concated_source_vecs = packed_src.source_vecs
concated_source_contexts = packed_src.source_contexts
source_padding = packed_src.source_padding
source_segment_id = packed_src.source_segment_id
query_batch_size = tf.shape(query_vec)[0]
source_sequence_length = tf.shape(source_padding)[0]
if per_step_source_padding is None:
zero = tf.constant(0.0, dtype=query_vec.dtype)
per_step_source_padding = tf.fill(
[query_batch_size, source_sequence_length], zero)
per_step_source_padding = py_utils.HasShape(
per_step_source_padding, [query_batch_size, source_sequence_length])
if source_segment_id is None:
source_segment_id = tf.zeros_like(source_padding)
if query_segment_id is None:
query_segment_id = tf.zeros(
tf.shape(query_vec)[0], dtype=source_padding.dtype)
def ScaleFn(x):
return tf.nn.softplus(x) / tf.nn.softplus(tf.constant(0.0, dtype=x.dtype))
ctx_vec, prob = self._ctx_vec(
ScaleFn(theta.per_dim_scale), source_padding, source_segment_id,
concated_source_vecs, concated_source_contexts, query_vec,
query_segment_id, per_step_source_padding, theta.global_step)
return ctx_vec, prob, attention_state
def _RecursiveReshape(x, shape):
if x is None:
return None
elif isinstance(x, py_utils.NestedMap):
return x.Transform(lambda y: _RecursiveReshape(y, shape))
else:
return tf.reshape(x, shape) if x.shape.ndims == 2 else x
class MultiHeadedAttention(BaseAttentionLayer, quant_utils.QuantizableLayer):
"""Attention with multiple attention heads.
Conceptually, the algorithm works as follows:
1. Source vectors (attention keys) are first projected to vectors of dim
p.hidden_dim.
2. Query vectors are projected to vectors of dim p.hidden_dim as well.
3. Context vectors (attention values) are not projected.
4. Source vectors, query vectors and context vectors are all split into
p.num_attention_heads chunks.
5. The inner atten mechanism is computed separately on each of the chunks.
6. Attention contexts from each of the chunk are concatenated to form the
final context.
7. Attention probs from each of the chunk are averaged to form the final
attention prob.
"""
@classmethod
def Params(cls):
"""Params for MultiHeadedAttention."""
p = super(MultiHeadedAttention, cls).Params()
p.Define('source_dim', 0, 'Number of source nodes.')
p.Define('query_dim', 0, 'Number of query nodes.')
p.Define('context_dim', 0, 'Number of context nodes.')
p.Define('hidden_dim', 0, 'Number of hidden nodes.')
p.Define('num_attention_heads', 2, 'Num of attention heads.')
p.Define(
'use_source_vec_as_attention_value', True,
'Whether or not to use source_vec as the attention value as well.'
' If True, we expect source_vec and source_contexts are the same.')
p.Define('enable_source_proj', True,
'If False, source side linear projection is disabled.')
p.Define('enable_query_proj', True,
'If False, query side linear projection is disabled.')
p.Define('inner_atten_params', DotProductAttention.Params(),
'Params for underlying attention mechanism.')
p.Define(
'enable_ctx_pre_proj', False,
'If True, context is pre-projected before processing into'
' hidden_dim.')
p.Define(
'enable_ctx_post_proj', False,
'If True, computed context is post projected into'
' ctx_post_proj_dim.')
p.Define('ctx_post_proj_dim', 0, 'Number of post projection nodes.')
# Often the attention context output needs to be concated
# with tensors from another layer. This allows them to share
# quantization parameters. By convention, all attention layers
# need to include their context output vectors in this domain.
p.qdomain.Define('atten_context', None,
'Quantization domain for attention context.')
p.params_init = py_utils.WeightInit.Xavier(scale=1.0)
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs a MultiHeadedAttention object."""
super(MultiHeadedAttention, self).__init__(params)
p = self.params
assert p.hidden_dim % p.num_attention_heads == 0
self.TrackQTensor('source_proj_matmul', 'source_proj_add',
'query_proj_matmul', 'query_proj_add',
'ctx_pre_proj_matmul', 'ctx_pre_proj_add')
# TODO(suderman): Remove the p.is_eval check below once brop quant within
# defun is fixed on the training side. This is less than ideal as-is because
# training will just trend to match downstream quant constraints vs force
# alignment.
self.TrackQTensor(
'ctx_post_proj_matmul', 'ctx_post_proj_add', domain='atten_context')
pc_bias = py_utils.WeightParams(
shape=[p.hidden_dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
with tf.variable_scope(p.name):
if p.enable_source_proj:
pc = py_utils.WeightParams(
shape=[p.source_dim, p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('source_proj', pc)
self.CreateVariable('source_proj_b', pc_bias)
else:
assert p.source_dim == p.hidden_dim
if p.enable_query_proj:
pc = py_utils.WeightParams(
shape=[p.query_dim, p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('query_proj', pc)
self.CreateVariable('query_proj_b', pc_bias)
else:
assert p.query_dim == p.hidden_dim
if p.enable_ctx_pre_proj and not p.use_source_vec_as_attention_value:
assert p.context_dim
pc = py_utils.WeightParams(
shape=[p.context_dim, p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('ctx_proj', pc)
self.CreateVariable('ctx_proj_b', pc_bias)
if p.enable_ctx_post_proj:
assert p.ctx_post_proj_dim
pc = py_utils.WeightParams(
shape=[p.hidden_dim, p.ctx_post_proj_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('ctx_post_proj', pc)
pc_bias_post_proj = py_utils.WeightParams(
shape=[p.ctx_post_proj_dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('ctx_post_proj_b', pc_bias_post_proj)
att_dim = p.hidden_dim // p.num_attention_heads
att_p = p.inner_atten_params.Set(
source_dim=att_dim,
query_dim=att_dim,
hidden_dim=att_dim,
dtype=p.dtype,
atten_dropout_prob=p.atten_dropout_prob,
atten_dropout_deterministic=p.atten_dropout_deterministic,
packed_input=p.packed_input)
if not att_p.name:
att_p.name = 'inner_att'
self.CreateChild('atten', att_p)
@py_utils.NameScopeDecorator('MultiHeadedAttention/PackSource')
def PackSource(self,
theta,
source_vecs,
source_contexts,
source_padding,
source_segment_id=None):
"""Packs source vectors.
Does not change attention state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: A tensor of shape [time, source_batch, source_dim].
source_contexts: A tensor of shape [time, source_batch, context_dim].
source_padding: A tensor of shape [time, source_batch].
source_segment_id: A tensor of shape [time, source_batch].
Returns:
A NestedMap representing packed src. It will have the same structure
as the one returned by the inner atten, except that source_batch will be
source_batch * num_heads.
"""
p = self.params
fns = self.fns
if not p.enable_source_proj:
assert p.source_dim == p.hidden_dim
if not p.enable_query_proj:
assert p.query_dim == p.hidden_dim
with tf.name_scope('init__0'):
if p.use_source_vec_as_attention_value:
source_vecs = py_utils.HasShape(source_vecs, tf.shape(source_contexts))
time_steps = tf.shape(source_vecs)[0]
batch_size = tf.shape(source_vecs)[1]
# source_projected shape [time * source_batch, hidden]
with tf.name_scope('init__0a'):
source_vec_depth = tf.shape(source_vecs)[2]
with tf.name_scope('init__0b'):
if p.enable_source_proj:
source_projected = (
fns.qbatchmatmul(
tf.reshape(source_vecs, [-1, source_vec_depth]),
fns.qweight(theta.source_proj),
qt='source_proj_matmul'))
source_projected = fns.qadd(
source_projected,
fns.qweight(theta.source_proj_b),
qt='source_proj_add')
else:
source_projected = tf.reshape(source_vecs, [-1, source_vec_depth])
with tf.name_scope('init__1'):
hidden_depth = p.hidden_dim
num_heads = p.num_attention_heads
# => [time, source_batch * num_heads, hidden / num_heads]
source_projected = tf.reshape(
source_projected,
[time_steps, batch_size * num_heads, hidden_depth // num_heads])
if p.use_source_vec_as_attention_value:
source_contexts_reshaped = source_projected
else:
if p.enable_ctx_pre_proj:
source_context_depth = tf.shape(source_contexts)[2]
source_contexts_projected = fns.qbatchmatmul(
tf.reshape(source_contexts, [-1, source_context_depth]),
fns.qweight(theta.ctx_proj),
qt='ctx_pre_proj_matmul')
source_contexts_projected = fns.qadd(
source_contexts_projected,
fns.qweight(theta.ctx_proj_b),
qt='ctx_pre_proj_add')
else:
source_contexts_projected = source_contexts
source_contexts_reshaped = tf.reshape(
source_contexts_projected, [time_steps, batch_size * num_heads, -1])
with tf.name_scope('init__2'):
source_padding_replicated = tf.reshape(
tf.tile(
tf.reshape(source_padding, [time_steps, batch_size, 1]),
[1, 1, num_heads]), [time_steps, batch_size * num_heads])
if source_segment_id is None:
source_segment_id_repl = tf.zeros_like(source_padding_replicated)
else:
source_segment_id_repl = tf.reshape(
tf.tile(
tf.reshape(source_segment_id, [time_steps, batch_size, 1]),
[1, 1, num_heads]), [time_steps, batch_size * num_heads])
return self.atten.PackSource(theta.atten, source_projected,
source_contexts_reshaped,
source_padding_replicated,
source_segment_id_repl)
@py_utils.NameScopeDecorator('MultiHeadedAttention/ExtendSourcePacked')
def ExtendSourcePacked(self,
theta,
new_source_vecs,
new_source_contexts,
new_source_paddings,
new_source_segment_ids,
cached_packed_src,
t=None):
"""Extend cached source_vecs and source_contexts by one more timestep.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
new_source_vecs: A tensor of shape [source_batch, source_dim].
new_source_contexts: A tensor of shape [source_batch, context_dim].
new_source_vecs and new_source_contexts are source_vecs and
source_contexts for the new timestep to be extended.
new_source_paddings: If not None, a tensor of shape [source_batch].
source_padding for the new timestep.
new_source_segment_ids: If not None, a tensor of shape [source_batch].
source_segment_id for the new timestep.
cached_packed_src: a `.NestedMap` object, containing already preprocessed
source_vecs and source_contexts for the previous t-1 steps. To support
tf.while_loop on TPU (satisfying static shape requirement), instead of
using tf.concat to update the cached vectors, the time dimension of each
cached vector is fixed as the max_sequence_length and inplace
update op is used to update the information for each time step:
* source_vecs: A tensor of shape [max_sequence_length, source_batch,
hidden_dim]. [:t, :, :] contains valid preprocessed source_vecs in the
previous t - 1 timesteps, the rests are invalid data.
* source_contexts: A tensor of shape [max_sequence_length, source_batch,
hidden_dim]. [:t, :, :] contains valid preprocessed source_contexts in
the previous t - 1 timesteps, the rests are invalid data.
* source_padding: If not None, a tensor of shape [max_sequence_length,
source_batch, num_heads]. [:t, :, :] contains cached source padding
for the previous t - 1 timesteps, the rests are invalid data.
* source_segment_id: If not None, a tensor of shape
[max_sequence_length, source_batch, num_heads]. [:t, :, :] contains
cached source segment id for the previous t - 1 timesteps, the rests
are invalid data.
When t is None (not running on TPU or the while loop is unrolled):
* source_vecs: A tensor of shape [t - 1, source_batch, hidden_dim].
* source_contexts: A tensor of shape [t - 1, source_batch, hidden_dim].
* source_padding: If not None, a tensor of shape [t - 1, source_batch,
num_heads], cached source padding for the previous t - 1 timesteps.
* source_segment_id: If not None, a tensor of shape [t - 1,
source_batch, num_heads], cached source segment id for the previous t
- 1 timesteps.
t: a scalar, the current time step, 0-based.
Returns:
Extended cached source_vecs, source_contexts, source_paddings, and
source_segment_ids. The time dimension of each cached state is fixed:
'extended_source_vec' is of shape [max_sequence_length, batch_size,
num_heads * dim];
'extended_source_context' is of shape [max_sequence_length, batch_size,
num_heads * dim];
'source_padding' is of shape [max_sequence_length, batch_size, num_heads];
'source_segment_id' is of shape [max_sequence_length, batch_size,
num_heads].
But only [:(t + 1), :, :] contains valid data.
If t is not given,
'extended_source_vec' is of shape [t, batch_size, num_heads * dim];
'extended_source_context' is of shape [t, batch_size, num_heads * dim];
'source_padding' is of shape [t, batch_size, num_heads];
'source_segment_id' is of shape [t, batch_size, num_heads].
"""
batch_size = tf.shape(new_source_vecs)[0]
if new_source_paddings is None:
new_source_paddings = tf.zeros([batch_size], dtype=new_source_vecs.dtype)
if new_source_segment_ids is None:
new_source_segment_ids = tf.zeros([batch_size],
dtype=new_source_vecs.dtype)
processed_packed_src = self.InitForSourcePacked(
theta, tf.expand_dims(new_source_vecs, 0),
tf.expand_dims(new_source_contexts, 0),
tf.expand_dims(new_source_paddings, 0),
tf.expand_dims(new_source_segment_ids, 0))
extended_packed_src = py_utils.NestedMap()
for key in ('source_vecs', 'source_contexts', 'source_padding',
'source_segment_id'):
if cached_packed_src.get(key, None) is None:
extended_packed_src[key] = None
else:
if t is not None:
processed = tf.reshape(processed_packed_src[key], [batch_size, -1])
extended_packed_src[key] = inplace_ops.alias_inplace_update(
cached_packed_src[key], t, processed)
else:
processed = tf.reshape(processed_packed_src[key], [1, batch_size, -1])
extended_packed_src[key] = tf.concat(
[cached_packed_src[key], processed], axis=0)
return extended_packed_src
@py_utils.NameScopeDecorator('MultiHeadedAttention/ZeroAttentionState')
def ZeroAttentionState(self, source_length, decoder_batch_size):
zero_att_state = self.atten.ZeroAttentionState(
source_length, decoder_batch_size * self.params.num_attention_heads)
# [batch * num_heads, length] => [batch, num_heads * length].
zero_att_state = _RecursiveReshape(zero_att_state, [decoder_batch_size, -1])
return zero_att_state
@py_utils.NameScopeDecorator(
'MultiHeadedAttention/ComputeContextVectorWithSource')
def ComputeContextVectorWithSource(self,
theta,
packed_src,
query_vec,
attention_state=None,
per_step_source_padding=None,
query_segment_id=None):
"""Computes the context vector given the current query output.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
packed_src: A `.NestedMap` object returned by PackSource or
InitForSourcePacked.
query_vec: a tensor of shape [target_batch, query_dim].
attention_state: previous attention state. It is not used in
AdditiveAttention, and is simply passed through.
per_step_source_padding: Source sequence padding to apply at this step. If
not None, it should be of shape [target_batch_size, source_length].
query_segment_id: a tensor of shape [target_batch].
Note: concated_source_vecs are the vectors that are used to compute the
attention score between the query_vec and each concated_source_vec. The
concated_source_contexts are the vectors that compose the result. The
attention context vector is computed as a weighted average of the
concated_source_contexts, using the scores that were computed using
concated_source_vecs.
Returns:
A tuple of 3 elements.
The attention context vector:
[batch_size, context_dim]
The attention probability vector:
[batch_size, time]
The new attention mechanism state:
possibly nested tuple of tensors with dimensions [target_batch, ...]
"""
p = self.params
fns = self.fns
source_padding = packed_src.source_padding
source_seq_len = tf.shape(source_padding)[0]
num_heads = p.num_attention_heads
batch_size = tf.shape(query_vec)[0]
if p.enable_query_proj:
query_vec_projected = fns.qbatchmatmul(
query_vec, fns.qweight(theta.query_proj), qt='query_proj_matmul')
query_vec_projected = fns.qadd(
query_vec_projected,
fns.qweight(theta.query_proj_b),
qt='query_proj_add')
query_vec_projected = tf.reshape(
query_vec_projected,
[batch_size * num_heads, p.hidden_dim // num_heads])
else:
query_vec_projected = tf.reshape(
query_vec, [batch_size * num_heads, p.hidden_dim // num_heads])
query_batch_size = tf.shape(query_vec)[0]
if query_segment_id is None:
query_segment_id = tf.zeros(
query_batch_size * num_heads, dtype=source_padding.dtype)
else:
query_segment_id_repl = tf.tile(
tf.expand_dims(query_segment_id, 1), [1, num_heads])
query_segment_id = tf.reshape(query_segment_id_repl, [-1])
if per_step_source_padding is None:
zero = tf.constant(0.0, dtype=query_vec.dtype)
per_step_source_padding = tf.fill([query_batch_size, source_seq_len],
zero)
per_step_source_padding = py_utils.HasShape(
per_step_source_padding, [query_batch_size, source_seq_len])
per_step_source_padding = tf.reshape(
tf.tile(per_step_source_padding, [1, num_heads]), [-1, source_seq_len])
attention_state = _RecursiveReshape(attention_state,
[batch_size * num_heads, -1])
ctx_vec, prob, att_state = self.atten.ComputeContextVectorWithSource(
theta.atten, packed_src, query_vec_projected, attention_state,
per_step_source_padding, query_segment_id)
ctx_vec = tf.reshape(ctx_vec, [batch_size, -1])
if p.enable_ctx_post_proj:
ctx_vec = fns.qbatchmatmul(
ctx_vec, fns.qweight(theta.ctx_post_proj), qt='ctx_post_proj_matmul')
ctx_vec = fns.qadd(
ctx_vec, fns.qweight(theta.ctx_post_proj_b), qt='ctx_post_proj_add')
# explicitly name this tensor for potential future reference
multi_headed_atten_prob = tf.reshape(
prob, [batch_size, num_heads, -1], name='multi_headed_atten_prob')
# TODO(laurenzo): Use a better named range function (we want to represent
# 0..1 probs).
prob = self.QRSoftmax(tf.reduce_mean(multi_headed_atten_prob, 1))
att_state = _RecursiveReshape(att_state, [batch_size, -1])
return ctx_vec, prob, att_state
@py_utils.NameScopeDecorator(
'MultiHeadedAttention/ComputeContextVectorWithAttenProbs')
def ComputeContextVectorWithAttenProbs(self, theta, packed_context,
atten_probs):
"""Computes the context vector given the attention probailities.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
packed_context: Concated source contexts with shape [ batch_size *
num_heads, time, context_dim // num_heads].
atten_probs: The attention probability vector: [batch_size * num_heads,
time].
Returns:
The attention context vector: [target_batch, source_dim]
If p.enable_ctx_post_proj is false, source_dim = context_dim,
otherwise, source_dim = p.ctx_post_proj_dim.
"""
p = self.params
num_heads = p.num_attention_heads
# packed_context: [batch_size * num_head, num_style,
# hidden_dim / num_head]
# inp: [batch_size * num_head, num_style]
packed_context = py_utils.with_dependencies([
py_utils.assert_shape_match([tf.shape(packed_context)[0]],
[tf.shape(atten_probs)[0]])
], packed_context)
b_size = tf.shape(packed_context)[0] // num_heads
ctx_vec = tf.reshape(
tf.matmul(tf.expand_dims(atten_probs, 1), packed_context), [b_size, -1])
if p.enable_ctx_post_proj:
ctx_vec_proj = tf.matmul(ctx_vec, theta.ctx_post_proj)
ctx_vec_proj += theta.ctx_post_proj_b
else:
ctx_vec_proj = ctx_vec
return ctx_vec_proj, ctx_vec
def PackCachedSource(self, cached_src):
p = self.params
concated_source_vecs = cached_src.source_vecs
concated_source_contexts = cached_src.source_contexts
source_padding = cached_src.source_padding
source_segment_id = cached_src.source_segment_id
batch_size = tf.shape(concated_source_vecs)[1]
src_seq_len = tf.shape(concated_source_vecs)[0]
num_heads = p.num_attention_heads
packed_src = py_utils.NestedMap()
packed_src.source_vecs = tf.reshape(
concated_source_vecs, [src_seq_len, batch_size * num_heads, -1])
# TODO(yonghui): Rewrite the following with just one transpose.
packed_src.source_contexts = tf.transpose(
tf.reshape(concated_source_contexts,
[src_seq_len, batch_size * num_heads, -1]), [1, 0, 2])
if source_padding is not None:
packed_src.source_padding = tf.reshape(
source_padding, [src_seq_len, batch_size * num_heads])
else:
packed_src.source_padding = tf.zeros(
[src_seq_len, batch_size * num_heads], dtype=py_utils.FPropDtype(p))
if source_segment_id is None:
packed_src.source_segment_id = tf.zeros(
[src_seq_len, batch_size * num_heads],
dtype=packed_src.source_padding.dtype)
else:
packed_src.source_segment_id = tf.reshape(
source_segment_id, [src_seq_len, batch_size * num_heads])
return packed_src
@py_utils.NameScopeDecorator(
'MultiHeadedAttention/ComputeContextVectorWithCachedSource')
def ComputeContextVectorWithCachedSource(self,
theta,
cached_src,
query_vec,
attention_state=None,
per_step_source_padding=None,
query_segment_id=None):
"""Same as the ComputeContextVectorWithSource api above, except values ...
in source_vecs, source_contexts and source_padding are ordered differently.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
cached_src: A `.NestedMap` object returned by ExtendSourcePacked.
query_vec: a tensor of shape [target_batch, query_dim].
attention_state: previous attention state. It is not used in
AdditiveAttention, and is simply passed through.
per_step_source_padding: Source sequence padding to apply at this step. If
not None, it should be of shape [target_batch_size, source_length].
query_segment_id: a tensor of shape [target_batch].
Returns:
The attention context vector: [target_batch, source_dim]
The attention probability vector: [target_batch, time]
The new attention mechanism state: possibly nested tuple of tensors with
dimensions [target_batch....]
"""
return self.ComputeContextVectorWithSource(
theta, self.PackCachedSource(cached_src), query_vec, attention_state,
per_step_source_padding, query_segment_id)
class LocationSensitiveAttention(BaseAttentionLayer):
"""An attention that also takes into account previously attended locations.
See section 2.2 of this paper for a description of this technique:
http://papers.nips.cc/paper/5847-attention-based-models-for-speech-recognition.pdf
"""
@classmethod
def Params(cls):
"""Params for this LocationSensitiveAttention class."""
p = super(LocationSensitiveAttention, cls).Params()
p.Define('source_dim', 0, 'Number of source nodes.')
p.Define('location_filter_size', 0,
'Location filter size, should be an odd number e.g. 31.')
p.Define('location_num_filters', 0, 'Number of location filters, e.g. 32.')
p.Define('query_dim', 0, 'Number of query nodes.')
p.Define('hidden_dim', 0, 'Number of hidden nodes.')
p.Define(
'same_batch_size', False,
'True iff the source and target sequence has the same batch size.')
p.Define(
'location_features', ['PREV_PROBS'],
'List signals to run the convolutions on. Possible options are: '
'PREV_PROBS, CUMULATIVE_PROBS.')
# Often the attention context output needs to be concated
# with tensors from another layer. This allows them to share
# quantization parameters. By convention, all attention layers
# need to include their context output vectors in this domain.
p.qdomain.Define('atten_context', None,
'Quantization domain for attention context.')
# Fill in reasonable default for params init
p.params_init = py_utils.WeightInit.GaussianSqrtDim()
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs an LocationSensitiveAttention object."""
super(LocationSensitiveAttention, self).__init__(params)
p = self.params
name = p.name
self._is_quantized = p.qdomain.default is not None
assert not p.packed_input, ('Packed input is not supported yet for '
'LocationSensitiveAttention.')
if p.atten_dropout_prob != 0:
raise NotImplementedError('dropout is not supported')
self.TrackQTensor('atten_conv')
self.TrackQTensor('atten_context', domain='atten_context')
self.TrackQTensor(
'atten_matmul',
'logits_add',
'encode_matmul',
'logits_mul',
'logits_bias',
domain='fullyconnected')
with tf.variable_scope(name):
pc = py_utils.WeightParams(
shape=[p.source_dim, p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=['LocationSensitiveAttention_vars'])
self.CreateVariable('source_var', pc, self.AddGlobalVN)
pc = py_utils.WeightParams(
shape=[p.query_dim, p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=['LocationSensitiveAttention_vars'])
self.CreateVariable('query_var', pc, self.AddGlobalVN)
pc = py_utils.WeightParams(
shape=[p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=['LocationSensitiveAttention_vars'])
self.CreateVariable('hidden_var', pc, self.AddGlobalVN)
assert p.location_filter_size % 2 == 1
assert p.location_num_filters > 0
location_filter_shape = [
p.location_filter_size,
len(p.location_features), p.location_num_filters
]
# TODO(yonghui): Don't hard code how params are initialized.
location_filter_pc = py_utils.WeightParams(
shape=location_filter_shape,
init=py_utils.WeightInit.Uniform(0.05),
dtype=p.dtype,
collections=['LocationSensitiveAttention_vars'])
self.CreateVariable('location_filter_var', location_filter_pc,
self.AddGlobalVN)
location_var_shape = [p.location_num_filters, p.hidden_dim]
location_pc = py_utils.WeightParams(
shape=location_var_shape,
init=py_utils.WeightInit.Uniform(0.05),
dtype=p.dtype,
collections=['LocationSensitiveAttention_vars'])
self.CreateVariable('location_var', location_pc, self.AddGlobalVN)
@_ConditionalDefun(
self._is_quantized, *[p.dtype] * 5, noinline=not py_utils.use_tpu())
def AttenLogits(concated_source_vecs, query_vec_reshaped, hidden_v,
location_feats, location_var):
"""Generates logits."""
fns = self.fns
def CollapseOutDim(x):
return tf.reshape(x, [-1, tf.shape(x)[-1]])
# => [sl, sb, hd]
location_feats = tf.transpose(location_feats, [2, 0, 1])
location_hidden = fns.qmatmul(
CollapseOutDim(location_feats), location_var, qt='logits_mul')
sl = py_utils.GetShape(location_feats)[0]
tb = py_utils.GetShape(location_feats)[1]
hd = py_utils.GetShape(location_var)[1]
location_hidden = tf.reshape(location_hidden, [sl, tb, hd])
sb = py_utils.GetShape(query_vec_reshaped)[2]
bs_mult = py_utils.GetShape(query_vec_reshaped)[1]
location_hidden = tf.reshape(location_hidden, [sl, bs_mult, sb, hd])
# Shape of summed is [sl, tb/sb, sb, hidden_dim].
summed = fns.qadd(
concated_source_vecs, query_vec_reshaped, qt='logits_add')
summed = fns.qadd(summed, location_hidden, qt='logits_bias')
summed = fns.qtanh(summed)
# logits is of shape [sl * tb/sb * sb, 1]. Computes dot product
# between v with every rows in 'summed'. Then we reshape the
# result to be of shape [sl, tb/sb, sb].
logits = fns.qmatmul(
tf.reshape(summed, [-1, p.hidden_dim]),
tf.reshape(hidden_v, [p.hidden_dim, 1]),
qt='logits')
logits = tf.reshape(logits, py_utils.GetShape(summed)[:3])
return logits
@_ConditionalDefun(
not self._is_quantized, *[p.dtype] * 5, noinline=not py_utils.use_tpu())
def AttenLogitsSameBatchSize(concated_source_vecs, query_vec_transformed,
hidden_v, location_feats, location_var):
"""Generates logits.
Optimized code path for when the target and the source have the same batch
size.
Args:
concated_source_vecs: Tensor of shape [sl, batch, dim]
query_vec_transformed: Tensor of shape [batch, dim]
hidden_v: Tensor of shape [dim]
location_feats: Tensor of shape [batch, location_feature_dim, sl]
location_var: Tensor of shape [location_feature_dim, dim]
Returns:
logits in the shape [sl, batch_size].
"""
def CollapseOutDim(x):
return tf.reshape(x, [-1, tf.shape(x)[-1]])
fns = self.fns
# => [sl, sb, hd]
location_feats = tf.transpose(location_feats, [2, 0, 1])
location_hidden = fns.qmatmul(
CollapseOutDim(location_feats), location_var, qt='logits_mul')
sl = tf.shape(location_feats)[0]
tb = tf.shape(location_feats)[1]
hd = tf.shape(location_var)[1]
location_hidden = tf.reshape(location_hidden, [sl, tb, hd])
# Shape of summed is [sl, sb, hidden_dim].
summed = fns.qadd(
concated_source_vecs,
tf.expand_dims(query_vec_transformed, 0),
qt='logits_add')
summed = fns.qadd(summed, location_hidden, qt='logits_bias')
summed = fns.qtanh(summed)
# logits is of shape [sl * sb, 1]. Computes dot product
# between v with every rows in 'summed'. Then we reshape the
# result to be of shape [sl, tb].
logits = fns.qmatmul(
tf.reshape(summed, [-1, p.hidden_dim]),
tf.reshape(hidden_v, [p.hidden_dim, 1]),
qt='logits')
logits = tf.reshape(logits, py_utils.GetShape(summed)[:2])
return logits
def Atten(hidden_var, query_var, source_padding, concated_source_vecs,
concated_source_contexts, query_vec, attention_state,
location_filter_var, location_var, per_step_source_padding):
"""Computes the attention context vector."""
p = self.params
# attention_state shape [batch, len(p.location_features), slen]
# it contains previous and accumulated attention probabilites.
attention_state = py_utils.HasShape(attention_state,
[-1, len(p.location_features), -1])
fns = self.fns
location_feats = self._ApplyConv(attention_state, location_filter_var)
# concated_source_vecs is of shape [sl, sb, dims]
# concated_source_contexts is of shape [sb, sl, context_dim]
# query_vec is of shape [tb, dims]
sb = py_utils.GetShape(concated_source_vecs)[1]
tb = py_utils.GetShape(query_vec)[0]
multiplier = tb // sb
# concated_source_vecs is reshaped to [sl, 1, sb, hidden_dims]
concated_source_vecs = tf.expand_dims(concated_source_vecs, 1)
query_vec_transformed = fns.qmatmul(
query_vec, query_var, qt='atten_matmul')
# query_vec is reshaped to [1, tb/sb, sb, hidden_dims].
query_vec_reshaped = tf.reshape(query_vec_transformed,
[1, multiplier, sb, p.hidden_dim])
# logits is of shape [sl, tb/sb, sb]
logits = AttenLogits(concated_source_vecs, query_vec_reshaped, hidden_var,
location_feats, location_var)
# Take out the padding states.
# _source_padding is of shape [sl, sb].
# reshaped to [sl, 1, sb].
source_padding = tf.expand_dims(source_padding, 1)
per_step_source_padding = tf.reshape(
tf.transpose(per_step_source_padding), [-1, multiplier, sb])
source_padding = self.QRPadding(
tf.add(source_padding, per_step_source_padding))
# Reshape logits to a matrix of shape [tb, sl] and takes the
# softmax to compute the probabilities.
logits = tf.transpose(tf.reshape(logits, [-1, tb]))
source_padding = tf.transpose(tf.reshape(source_padding, [-1, tb]))
probs = self._PaddedSoftmax(
logits, source_padding, narrow_to_asym_bit_depth=True)
# Reshape probs to be of shape [tb/sb, sb, sl].
probs_reshaped = tf.reshape(probs, [multiplier, sb, -1])
# Transpose probs to be of shape [sb, tb/sb, sl]
probs_reshaped = tf.transpose(probs_reshaped, [1, 0, 2])
# [sb, tb/sb, sl] * [sb, sl, context_dim] = [sb, tb/sb, context_dim]
summed = fns.qbatchmatmul(
tf.cast(probs_reshaped, concated_source_contexts.dtype),
concated_source_contexts,
qt='atten_context')
# summed is of shape [tb/sb, sb, context_dim]
summed = tf.transpose(summed, [1, 0, 2])
return tf.reshape(summed, [tb, -1]), probs
def AttenSameBatchSize(hidden_var, query_var, source_padding,
concated_source_vecs, concated_source_contexts,
query_vec, attention_state, location_filter_var,
location_var, per_step_source_padding):
"""Computes the attention context vector.
Optimized code path for when source and target have the same batch size.
"""
del per_step_source_padding
p = self.params
# attention_state shape [batch, len(p.location_features), slen]
# it contains previous and accumulated attention probabilites.
attention_state = py_utils.HasShape(attention_state,
[-1, len(p.location_features), -1])
fns = self.fns
location_feats = self._ApplyConv(attention_state, location_filter_var)
query_vec_transformed = fns.qmatmul(
query_vec, query_var, qt='atten_matmul')
# logits is of shape [sl, sb]
logits = AttenLogitsSameBatchSize(concated_source_vecs,
query_vec_transformed, hidden_var,
location_feats, location_var)
# => [sl, tb]
logits.set_shape(source_padding.shape)
# Reshape logits to a matrix of shape [tb, sl] and takes the
# softmax to compute the probabilities.
logits = tf.transpose(logits)
source_padding = tf.transpose(source_padding)
probs = self._PaddedSoftmax(
logits, source_padding, narrow_to_asym_bit_depth=True)
summed = fns.qbatchmatmul(
tf.cast(tf.expand_dims(probs, 1), concated_source_contexts.dtype),
concated_source_contexts,
qt='atten_context')
return tf.squeeze(summed, 1), probs
if p.same_batch_size:
self._ctx_vec = AttenSameBatchSize
else:
self._ctx_vec = Atten
def EncodeSource(src_w, vecs, ctxs):
fns = self.fns
time, batch = py_utils.GetShape(vecs, 2)
ctxs = py_utils.HasShape(ctxs, [time, batch, -1])
transformed_vecs = tf.reshape(
fns.qmatmul(
tf.reshape(vecs, [-1, p.source_dim]), src_w, qt='encode_matmul'),
[time, batch, -1])
transposed_ctxs = tf.transpose(ctxs, [1, 0, 2])
return transformed_vecs, transposed_ctxs
self._encode_source = EncodeSource
def _ApplyConv(self, attention_state, location_filter_var):
"""Applies the convolution on attention state."""
p = self.params
fns = self.fns
attention_state_f32 = attention_state
location_filter_var_f32 = location_filter_var
if p.dtype != tf.float32:
attention_state_f32 = tf.cast(attention_state, tf.float32)
location_filter_var_f32 = tf.cast(location_filter_var, tf.float32)
data_format = 'NCW'
if not py_utils.use_xla():
# NCW format is not supported on CPU.
attention_state_f32 = tf.transpose(attention_state_f32, [0, 2, 1])
data_format = 'NWC'
location_feats = fns.qconv1d(
attention_state_f32,
location_filter_var_f32,
1,
'SAME',
data_format=data_format,
qt='atten_conv')
if not py_utils.use_xla():
location_feats = tf.transpose(location_feats, [0, 2, 1])
if p.dtype != tf.float32:
location_feats = tf.cast(location_feats, p.dtype)
# [sb, hd, sl]
return location_feats
def PackSource(self,
theta,
source_vecs,
source_contexts,
source_padding,
source_segment_id=None):
with tf.name_scope(self.params.name):
if source_segment_id is None:
source_segment_id = tf.zeros_like(source_padding)
(concated_source_vecs, concated_source_contexts) = (
self._encode_source(
self.QWeight(theta.source_var), source_vecs, source_contexts))
return py_utils.NestedMap(
# [time, batch_size, hidden_dim].
source_vecs=concated_source_vecs,
# [batch_size, time, context_dim].
# Note the mismatch between `source_vecs` and `source_contexts`. In
# `source_vecs`, time is the first dim, while it is the second dim in
# `source_contexts`.
source_contexts=concated_source_contexts,
# [time, batch_size].
source_padding=source_padding,
# [time, batch_size].
source_segment_id=source_segment_id)
def ZeroAttentionState(self, source_length, decoder_batch_size):
p = self.params
dtype = p.dtype.real_dtype
num_features = len(p.location_features)
with tf.name_scope(p.name):
state = tf.concat([
tf.ones([decoder_batch_size, num_features, 1], dtype=dtype),
tf.zeros([decoder_batch_size, num_features, source_length - 1],
dtype=dtype)
], 2)
state = self.QRSoftmax(state)
return state
def ComputeContextVectorWithSource(self,
theta,
packed_src,
query_vec,
attention_state=None,
per_step_source_padding=None,
query_segment_id=None):
"""Computes the context vector given the current query output.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
packed_src: A `.NestedMap` object returned by PackSource or
InitForSourcePacked.
query_vec: a tensor of shape [batch_size, query_dim].
attention_state: If `params().location_features == ['PREV_PROBS',
'CUMULATIVE_PROBS']`, then `attention_state` is a tensor of shape
[batch_size, 2, src_len].
- attention_state[:, 0, :] contains previous attention probabilities.
- attention_state[:, 1, :] contains a sum over previous timesteps of
attention probabilities.
per_step_source_padding: Source sequence padding to apply at this step. If
not None, it should be of shape [target_batch_size, source_length].
query_segment_id: Query segment id with shape [batch_size].
Note: concated_source_vecs are the vectors that are used to compute the
attention score between the query_vec and each concated_source_vec. The
concated_source_contexts are the vectors that compose the result. The
attention context vector is computed as a weighted average of the
concated_source_contexts, using the scores that were computed using
concated_source_vecs.
Returns:
A tuple of 3 elements.
The attention context vector:
[batch_size, context_dim]
The attention probability vector:
[batch_size, time]
The new attention mechanism state:
possibly nested tuple of tensors with dimensions [target_batch, ...]
"""
del query_segment_id
p = self.params
concated_source_vecs = packed_src.source_vecs
concated_source_contexts = packed_src.source_contexts
source_padding = packed_src.source_padding
if p.same_batch_size:
assert per_step_source_padding is None
query_batch_size = py_utils.GetShape(query_vec)[0]
source_length = py_utils.GetShape(source_padding)[0]
if per_step_source_padding is None:
zero = tf.constant(0.0, dtype=query_vec.dtype)
per_step_source_padding = tf.fill([query_batch_size, source_length], zero)
per_step_source_padding = py_utils.HasShape(
per_step_source_padding, [query_batch_size, source_length])
hidden = py_utils.AddPerStepVN(p, theta.hidden_var)
query = py_utils.AddPerStepVN(p, theta.query_var)
location_filter = py_utils.AddPerStepVN(p, theta.location_filter_var)
location = py_utils.AddPerStepVN(p, theta.location_var)
ctx_vec, prob = self._ctx_vec(hidden, query, source_padding,
concated_source_vecs,
concated_source_contexts, query_vec,
attention_state, location_filter, location,
per_step_source_padding)
new_feats = {'PREV_PROBS': prob}
if 'CUMULATIVE_PROBS' in p.location_features:
# Quantization must match the _PaddedSoftmax method.
cum_prob_index = p.location_features.index('CUMULATIVE_PROBS')
new_feats['CUMULATIVE_PROBS'] = self.QRSoftmax(
tf.add(prob, attention_state[:, cum_prob_index, :]),
narrow_to_asym_bit_depth=True)
new_attention_state = tf.stack([new_feats[f] for f in p.location_features],
axis=1)
return ctx_vec, prob, new_attention_state
def MergeSourcePaddingWithPerStepSourcePadding(source_padding,
per_step_source_padding, tb):
"""Merges source padding with per-step source padding.
Args:
source_padding: [sl, sb].
per_step_source_padding: [tb, sl].
tb: target batch size.
Returns:
A tensor of shape [tb, sl].
"""
# source_padding is of shape [sl, sb].
sl = py_utils.GetShape(source_padding)[0]
sb = py_utils.GetShape(source_padding)[1]
if per_step_source_padding is None:
zero = tf.constant(0.0, dtype=source_padding.dtype)
per_step_source_padding = tf.fill([tb, sl], zero)
per_step_source_padding = py_utils.HasShape(per_step_source_padding, [tb, sl])
# Transpose and reshape source_padding to [1, sb, sl].
source_padding = tf.expand_dims(tf.transpose(source_padding), 0)
# Merge source_padding and per_step_source_padding.
source_padding = tf.maximum(source_padding,
tf.reshape(per_step_source_padding, [-1, sb, sl]))
return tf.reshape(source_padding, [tb, -1])
class MonotonicAttention(BaseAttentionLayer):
"""An attention mechanism which enforces monotonic alignments.
This layer implements the monotonic attention mechanism described in
Online and Linear-Time Attention by Enforcing Mononotonic Alignments
(https://arxiv.org/abs/1704.00784). It is used in exactly the same way as
AdditiveAttention, but both the attention distribution and the energy function
are different.
Rather than using a softmax, this mechanism feeds the attention energy into a
(hard or soft) sigmoid and treats the output as Bernoulli probabilities
representing the probability of attending to a given entry in the input
sequence, processed from left-to-right. Based on this interpretation, the
resulting distribution over input sequence entries is computed with a dynamic
program. The intended use is to train with soft sigmoids according to the
expected output (setting param hard_sigmoid=False), then use hard sigmoids at
test time to allow for online and linear-time decoding. To encourge the train
and test-time behavior to be similar, noise can optionally be added to the
sigmoid activations during training (param pre_sigmoid_noise). For the energy
function, rather than computing::
E = dot(v, tanh(dot(W, query) + dot(W, encoder_states)))
it computes::
E = dot(g*v/||v||, tanh(dot(W, query) + dot(W, encoder_states) + b)) + r
where g and r are scalars and b is a vector, and ||v|| is the L2 norm of v.
instead. These modifications address the fact that the sigmoids in the
monotonic attention mechanism are sensitive to offset and a bit harder to
train compared to the softmax function. It can be helpful to initialize the
energy bias scalar r to a negative value (param hidden_bias_init).
"""
@classmethod
def Params(cls):
"""Params for this MonotonicAttention class."""
p = super(MonotonicAttention, cls).Params()
p.Define('source_dim', 0, 'Number of source nodes.')
p.Define('query_dim', 0, 'Number of query nodes.')
p.Define('hidden_dim', 0, 'Number of hidden nodes.')
p.Define('pre_sigmoid_noise', 0, 'Standard deviation of pre-sigmoid noise.')
p.Define('hidden_bias_init', -1, 'Initial value of hidden bias.')
p.Define('hard_sigmoid', False, 'Whether to use a hard sigmoid.')
# Fill in reasonable default for params init
p.params_init = py_utils.WeightInit.GaussianSqrtDim()
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs an MonotonicAttention object."""
super(MonotonicAttention, self).__init__(params)
p = self.params
assert not p.packed_input, ('Packed input not supported for Monotonic '
'Attention.')
if p.atten_dropout_prob != 0:
raise NotImplementedError('dropout is not supported')
# When running eval, don't add pre-sigmoid noise, and use a hard sigmoid to
# match behavior of online decoding.
if p.is_eval:
p.pre_sigmoid_noise = 0.
p.hard_sigmoid = True
with tf.variable_scope(p.name):
# source is the weight matrix for the memory/encoder states
pc = py_utils.WeightParams(
shape=[p.source_dim, p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=['MonotonicAttention_vars'])
self.CreateVariable('source_var', pc, self.AddGlobalVN)
# query is the weight matrix for the query/decoder RNN state
pc = py_utils.WeightParams(
shape=[p.query_dim, p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=['MonotonicAttention_vars'])
self.CreateVariable('query_var', pc, self.AddGlobalVN)
# hidden is the pre-softmax vector which converts from tanh to scalar
pc = py_utils.WeightParams(
shape=[p.hidden_dim],
init=p.params_init,
dtype=p.dtype,
collections=['MonotonicAttention_vars'])
self.CreateVariable('hidden_var', pc, self.AddGlobalVN)
# energy_bias is the bias vector which appears inside of tanh
# Initialize the bias vector to all zeros
pc = py_utils.WeightParams(
shape=[p.hidden_dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=['MonotonicAttention_vars'])
self.CreateVariable('energy_bias_var', pc)
# hidden_scale is the weight normalization scale for hidden
# Initialize so that the initial scale is 1/sqrt(hidden_dim)
pc = py_utils.WeightParams(
shape=[],
init=py_utils.WeightInit.Constant(1 / np.sqrt(p.hidden_dim)),
dtype=p.dtype,
collections=['MonotonicAttention_vars'])
self.CreateVariable('hidden_scale_var', pc)
# hidden_bias is the bias scalar applied before the sigmoid
# Use the hidden_bias_init hyperparam to set the initial value
pc = py_utils.WeightParams(
shape=[],
init=py_utils.WeightInit.Constant(p.hidden_bias_init),
dtype=p.dtype,
collections=['MonotonicAttention_vars'])
self.CreateVariable('hidden_bias_var', pc)
def EncodeSource(src_w, vecs, ctxs):
time, batch = py_utils.GetShape(vecs, 2)
ctxs = py_utils.HasShape(ctxs, [time, batch, -1])
transformed_vecs = tf.reshape(
py_utils.Matmul(tf.reshape(vecs, [-1, p.source_dim]), src_w),
[time, batch, -1])
transposed_ctxs = tf.transpose(ctxs, [1, 0, 2])
return transformed_vecs, transposed_ctxs
self._encode_source = EncodeSource
def PackSource(self,
theta,
source_vecs,
source_contexts,
source_padding,
source_segment_id=None):
with tf.name_scope(self.params.name):
if source_segment_id is None:
source_segment_id = tf.zeros_like(source_padding)
(concated_source_vecs, concated_source_contexts) = (
self._encode_source(theta.source_var, source_vecs, source_contexts))
return py_utils.NestedMap(
# [time, batch_size, hidden_dim].
source_vecs=concated_source_vecs,
# [batch_size, time, context_dim].
# Note the mismatch between `source_vecs` and `source_contexts`. In
# `source_vecs`, time is the first dim, while it is the second dim in
# `source_contexts`.
source_contexts=concated_source_contexts,
# [time, batch_size].
source_padding=source_padding,
# [time, batch_size].
source_segment_id=source_segment_id)
def ZeroAttentionState(self, source_length, decoder_batch_size):
p = self.params
dtype = p.dtype
with tf.name_scope(p.name):
# Set initial previous attention to [1, 0, ... 0] to avoid special-casing
emit_probs = tf.one_hot(
tf.zeros((decoder_batch_size,), dtype=tf.int32),
source_length,
dtype=dtype)
return py_utils.NestedMap(emit_probs=emit_probs)
def ComputeProbabilities(self, theta, concated_source_vecs,
merged_source_padding, query_vec, attention_state):
"""Computes probabilities of emissions."""
# concated_source_contexts is of shape [sb, sl, context_dim]
# query_vec is of shape [tb, dims]
sb = tf.shape(concated_source_vecs)[1]
tb = tf.shape(query_vec)[0]
multiplier = tb // sb
p = self.params
# noinline and compiled cannot be set at the same time
@function.Defun(*([p.dtype] * 7), noinline=not py_utils.use_tpu())
def AttenLogits(concated_source_vecs, query_vec, query_v, energy_b,
hidden_v, hidden_g, hidden_b):
"""Computes logits from source, query, and variables.
Args:
concated_source_vecs: [sl, sb, hidden_dims].
query_vec: [tb, query_dim].
query_v: [query_dim, hidden_dim]
energy_b: [hidden_dim].
hidden_v: [hidden_dim].
hidden_g: [].
hidden_b: [].
Returns:
logits: [tb, sl].
"""
# Apply query matrix to query. Becomes [tb, hidden_dim].
query_vec_transformed = py_utils.Matmul(
query_vec, query_v, name='query_transformation')
# query_vec is reshaped to [1, tb/sb, sb, hidden_dim].
query_vec_reshaped = tf.reshape(query_vec_transformed,
[1, multiplier, sb, p.hidden_dim])
# [sl, 1, sb, hidden_dim].
concated_source_vecs = tf.expand_dims(concated_source_vecs, 1)
energy_b = tf.reshape(energy_b, [1, 1, 1, -1])
# Shape of summed is [sl, tb/sb, sb, hidden_dim].
summed = tf.tanh(concated_source_vecs + query_vec_reshaped + energy_b)
hidden_v = hidden_g * tf.nn.l2_normalize(hidden_v, axis=0)
# logits is of shape [sl * tb/sb * sb, 1]. Computes dot product
# between v with every rows in 'summed'. Then we reshape the
# result to be of shape [sl, tb/sb, sb].
#
# Another equivalent way is to do:
# logits = tf.reduce_sum(summed *
# tf.reshape(v, [1, 1, 1, hidden_dim]), 3)
logits = py_utils.Matmul(
tf.reshape(summed, [-1, p.hidden_dim]),
tf.reshape(hidden_v, [p.hidden_dim, 1]))
logits += hidden_b
# [tb, sl].
logits = tf.transpose(tf.reshape(logits, [-1, tb]), [1, 0])
return logits
with tf.name_scope('logits'):
logits = AttenLogits(concated_source_vecs, query_vec, theta.query_var,
theta.energy_bias_var, theta.hidden_var,
theta.hidden_scale_var, theta.hidden_bias_var)
previous_attention = attention_state.emit_probs
with tf.name_scope('prob'):
if self.params.hard_sigmoid:
# If using a hard sigmoid, just compare against 0
p_choose_i = tf.cast(tf.greater(logits, 0), logits.dtype)
# Never choose padded values.
p_choose_i = tf.where(merged_source_padding > 0.0,
tf.zeros_like(p_choose_i), p_choose_i)
# Compute probability distribution assuming hard probabilities
probs = MonotonicAttentionProb(p_choose_i, previous_attention, 'hard')
else:
# Compute pre-sigmoid noise.
activation_noise = tf.random.stateless_normal(
py_utils.GetShape(logits),
py_utils.GenerateStepSeedPair(p, theta.global_step),
dtype=logits.dtype)
# Compute sigmoid probabilities.
p_choose_i = tf.nn.sigmoid(logits + self.params.pre_sigmoid_noise *
activation_noise)
# Never choose padded values.
p_choose_i = tf.where(merged_source_padding > 0,
tf.zeros_like(p_choose_i), p_choose_i)
# Compute attention distribution
probs = MonotonicAttentionProb(p_choose_i, previous_attention,
'parallel')
# [tb, sl].
return probs, py_utils.NestedMap(emit_probs=probs)
def ComputeContextVectorWithSource(self,
theta,
packed_src,
query_vec,
attention_state,
per_step_source_padding=None,
query_segment_id=None):
"""Computes the context vector given the current query output.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
packed_src: A `.NestedMap` object returned by PackSource or
InitForSourcePacked.
query_vec: a tensor of shape [batch_size, query_dim].
attention_state: The attention probs computed at the previous timestep.
per_step_source_padding: Source sequence padding to apply at this step. If
not None, it should be of shape [target_batch_size, source_length].
query_segment_id: a tensor of shape [batch_size].
Note: concated_source_vecs are the vectors that are used to compute the
attention score between the query_vec and each concated_source_vec. The
concated_source_contexts are the vectors that compose the result. The
attention context vector is computed as a weighted average of the
concated_source_contexts, using the scores that were computed using
concated_source_vecs.
Returns:
A tuple of 3 elements.
The attention context vector:
[batch_size, context_dim]
The attention probability vector:
[batch_size, time]
The attention probability vector:
(again, to be interpreted as state).
"""
del query_segment_id
concated_source_vecs = packed_src.source_vecs
concated_source_contexts = packed_src.source_contexts
source_padding = packed_src.source_padding
sb = tf.shape(concated_source_vecs)[1]
tb = tf.shape(query_vec)[0]
multiplier = tb // sb
merged_source_padding = MergeSourcePaddingWithPerStepSourcePadding(
source_padding, per_step_source_padding, tb)
probs, new_state = self.ComputeProbabilities(theta, concated_source_vecs,
merged_source_padding,
query_vec, attention_state)
with tf.name_scope('sum'):
# Reshape probs to be of shape [tb/sb, sb, sl]
probs_reshaped = tf.reshape(probs, [multiplier, sb, -1])
# Transpose probs to be of shape [sb, tb/sb, sl]
probs_reshaped = tf.transpose(probs_reshaped, [1, 0, 2])
# Batched matmul
# [sb, tb/sb, sl] * [sb, sl, context_dim] = [sb, tb/sb, context_dim]
summed = tf.matmul(probs_reshaped, concated_source_contexts)
# summed is of shape [tb/sb, sb, context_dim]
summed = tf.transpose(summed, [1, 0, 2])
ctx_vec = tf.reshape(summed, [tb, -1])
return ctx_vec, probs, new_state
class GmmMonotonicAttention(BaseAttentionLayer):
"""A GMM-based monotonic attention module.
Based on "Generating Sequences With Recurrent Neural Networks" by <NAME>.
Eq [46-51] in https://arxiv.org/abs/1308.0850.
"""
@classmethod
def Params(cls):
"""Params for this MonotonicAttention class."""
p = super(GmmMonotonicAttention, cls).Params()
p.Define('source_dim', 0, 'Number of source nodes.')
p.Define('query_dim', 0, 'Number of query nodes.')
p.Define('hidden_dim', 128,
'Number of hidden units for the MLP that predicts GMM params.')
p.Define('max_offset', -1,
'Max offset to move attention pointer, Enabled only when > 0.')
p.Define('num_mixtures', 5, 'Number of location GMM components.')
p.Define(
'normalize_probs', False,
'Whether to normalize probabilities computed by GMM. Otherwise, '
'the attention weights (i.e. probabilities) may not add up to '
'1.0.')
# TODO(oday): Remove this after all experiments had been migrated.
p.Define(
'use_atten_v2', True,
'Whether to use AttenV2 inner function. This flag should be False '
'in old checkpoints because the loss calculation may be affected.')
# TODO(ngyuzh): find a good initialize for both TTS and ASR. Consider split
# the layer if it's very sensitive to the initialization
p.params_init = py_utils.WeightInit.Xavier(0.1)
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs a GMM-based monotonic attention module."""
super(GmmMonotonicAttention, self).__init__(params)
p = self.params
if p.atten_dropout_prob != 0:
raise NotImplementedError('dropout is not supported.')
# TODO(ngyuzh): Compare Sigmoid and other activation functions.
with tf.variable_scope(p.name):
ff_params = layers.FeedForwardNet.Params().Set(
name=p.name,
input_dim=p.query_dim,
hidden_layer_dims=[p.hidden_dim, p.num_mixtures * 3],
activation=['SIGMOID', 'NONE'],
params_init=p.params_init.Copy())
self.CreateChild('GMM', ff_params)
def ComputeProbsV2(encoder_positions, priors, means, variances):
"""Computes the location GMM probabilities at all encoder positions.
Unlike `ComputeProbs(V1)`, this function assumes that the first 2
dimensions of `priors`, `means`, `variances`, and the return value:
`multiplier (target_batch / source_batch)` and `source_batch` are
transposed, and `encoder_positions` has only non-one dimensions.
Args:
encoder_positions: [source_batch, source_length]
priors: [multiplier, source_batch, num_mixtures]
means: [multiplier, source_batch, num_mixtures]
variances: [multiplier, source_batch, num_mixtures]
Returns:
Calculated probabilities: [multiplier, source_batch, source_length]
"""
# [multiplier, source_batch, 1, num_mixtures]
priors = tf.expand_dims(priors, 2)
means = tf.expand_dims(means, 2)
variances = tf.expand_dims(variances, 2)
epsilon = 1e-8
# [source_batch, source_length, 1]
encoder_positions = tf.expand_dims(encoder_positions, 2)
# [multiplier, source_batch, source_length, num_mixtures]
probs = ((priors * tf.rsqrt(2 * np.pi * variances + epsilon)) *
tf.exp(-(encoder_positions - means)**2 /
(2 * variances + epsilon)))
# [multiplier, source_batch, source_length]
return tf.reduce_sum(probs, axis=3)
# TODO(oday): Remove this after all experiments had been migrated.
# TODO(ngyuzh): change variance to scale to make it simpler.
def ComputeProbs(encoder_positions, priors, means, variances):
"""Computes the location GMM probabilities at all encoder positions."""
# encoder_positions: [batch, 1, timesteps, 1]
# [batch, tb / sb, 1, num_mixtures]
priors = tf.expand_dims(priors, 2)
means = tf.expand_dims(means, 2)
variances = tf.expand_dims(variances, 2)
# [batch, tb / sb, timesteps, num_mixtures]
probs = priors * tf.rsqrt(2 * np.pi * variances + 1e-8) * tf.exp(
-(encoder_positions - means)**2 / (2 * variances + 1e-8))
# probs sized [batch, tb / sb, timesteps].
return tf.reduce_sum(probs, axis=3)
def AttenV2(source_padding, concated_source_vecs,
concated_source_contexts, query_vec, priors, means, variances,
encoder_positions, per_step_source_padding):
"""Computes the attention context vector.
Args:
source_padding: [source_length, source_batch]
concated_source_vecs: [source_length, source_batch, hidden_dim]
concated_source_contexts: [source_batch, source_length, context_dim]
query_vec: [target_batch, query_dim]
priors: [target_batch, num_mixtures]
means: [target_batch, num_mixtures]
variances: [target_batch, num_mixtures]
encoder_positions: [source_batch, source_length]
per_step_source_padding: [target_batch, source_length]
Returns:
Following tensors:
context vector: [target_batch, context_dim]
attention probabilities: [target_batch, source_length]
"""
# Note: shape [target_batch] can be converted to
# [multiplier, source_batch], not [source_batch, multiplier].
p = self.params
source_batch = tf.shape(concated_source_vecs)[1]
target_batch = tf.shape(query_vec)[0]
multiplier = target_batch // source_batch
# [multiplier, source_batch, num_mixtures]
priors = tf.reshape(priors, [multiplier, source_batch, p.num_mixtures])
means = tf.reshape(means, [multiplier, source_batch, p.num_mixtures])
variances = tf.reshape(variances,
[multiplier, source_batch, p.num_mixtures])
# [multiplier, source_batch, source_length]
probs = ComputeProbsV2(encoder_positions, priors, means, variances)
# [source_batch, source_length]
source_padding = tf.transpose(source_padding)
# [multiplier, source_batch, source_length]
per_step_source_padding = tf.reshape(per_step_source_padding,
[multiplier, source_batch, -1])
source_padding += per_step_source_padding
source_padding = tf.minimum(source_padding, 1.0)
# [multiplier, source_batch, source_length]
probs *= (1.0 - source_padding)
if p.normalize_probs:
probs /= tf.maximum(
tf.reduce_sum(probs, axis=2, keepdims=True), 1e-12)
probs = py_utils.AddDebugTensor(probs, name='atten_probs')
# [multiplier, source_batch]
summary_utils.histogram('gmm_probs_norm', tf.reduce_sum(probs, axis=2))
# [source_batch, multiplier, source_length]
probs_transposed = tf.transpose(probs, [1, 0, 2])
# Matmul:
# [source_batch, multiplier, source_length]
# @ [source_batch, source_length, context_dim]
# -> [source_batch, multiplier, context_dim]
context_vector_transposed = tf.matmul(probs_transposed,
concated_source_contexts)
# [multiplier, source_batch, context_dim]
context_vector = tf.transpose(context_vector_transposed, [1, 0, 2])
# [target_batch, context_dim], [target_batch, source_length]
return (tf.reshape(context_vector, [target_batch, -1]),
tf.reshape(probs, [target_batch, -1]))
# TODO(oday): Remove this after all experiments had been migrated.
# TODO(ngyuzh): remove unnecessary transpose.
def Atten(source_padding, concated_source_vecs, concated_source_contexts,
query_vec, priors, means, variances, encoder_positions,
per_step_source_padding):
"""Computes the attention context vector."""
# tb: target batch size
# sb: source batch size
# concated_source_vecs is of shape [sl, sb, context_dim]
# query_vec is of shape [tb, dims]
p = self.params
sb = tf.shape(concated_source_vecs)[1]
tb = tf.shape(query_vec)[0]
multiplier = tb // sb
# [sb, tb / sb, num_mixtures]
priors = tf.reshape(priors, [-1, multiplier, p.num_mixtures])
means = tf.reshape(means, [-1, multiplier, p.num_mixtures])
variances = tf.reshape(variances, [-1, multiplier, p.num_mixtures])
probs = ComputeProbs(encoder_positions, priors, means, variances)
# [sl, tb / sb, sb]
probs = tf.reshape(tf.transpose(probs, [2, 0, 1]), [-1, multiplier, sb])
source_padding = tf.expand_dims(source_padding, 1)
per_step_source_padding = tf.reshape(
tf.transpose(per_step_source_padding), [-1, multiplier, sb])
source_padding += per_step_source_padding
source_padding = tf.minimum(source_padding, 1.0)
probs *= (1.0 - source_padding)
if p.normalize_probs:
probs /= tf.maximum(
tf.reduce_sum(probs, axis=0, keepdims=True), 1e-12)
summary_utils.histogram('gmm_probs_norm', tf.reduce_sum(probs, axis=0))
probs = py_utils.AddDebugTensor(probs, name='atten_probs')
probs = tf.transpose(tf.reshape(probs, [-1, tb]))
# [tb/sb, sb, sl]
probs_reshaped = tf.reshape(probs, [multiplier, sb, -1])
# [sb, tb/sb, sl]
probs_reshaped = tf.transpose(probs_reshaped, [1, 0, 2])
# Batched matmul
# [sb, tb/sb, sl] * [sb, sl, context_dim] = [sb, tb/sb, context_dim]
context_vector = tf.matmul(probs_reshaped, concated_source_contexts)
context_vector = tf.transpose(context_vector, [1, 0, 2])
return tf.reshape(context_vector, [tb, -1]), probs
self._ctx_vec = AttenV2 if p.use_atten_v2 else Atten
def EncodeSource(vecs, ctxs):
# TODO(ngyuzh): combine with content-base attention.
time, batch = py_utils.GetShape(vecs, 2)
ctxs = py_utils.HasShape(ctxs, [time, batch, -1])
transposed_ctxs = tf.transpose(ctxs, [1, 0, 2])
return vecs, transposed_ctxs
self._encode_source = EncodeSource
def PackSource(self,
theta,
source_vecs,
source_contexts,
source_padding,
source_segment_id=None):
with tf.name_scope(self.params.name):
if source_segment_id is None:
source_segment_id = tf.zeros_like(source_padding)
(concated_source_vecs, concated_source_contexts) = (
self._encode_source(source_vecs, source_contexts))
return py_utils.NestedMap(
# [source_length, source_batch, hidden_dim].
source_vecs=concated_source_vecs,
# [source_batch, source_length, context_dim].
# Note the mismatch between `source_vecs` and `source_contexts`. In
# `source_vecs`, `source_length` is the first dim, while it is the
# second dim in `source_contexts`.
source_contexts=concated_source_contexts,
# [source_length, source_batch].
source_padding=source_padding,
# [source_length, source_batch].
source_segment_id=source_segment_id)
def ZeroAttentionState(self, source_length, decoder_batch_size):
p = self.params
# [target_batch, num_mixtures]
position = tf.zeros([decoder_batch_size, p.num_mixtures], dtype=p.dtype)
position_offsets = tf.zeros([decoder_batch_size, p.num_mixtures],
dtype=p.dtype)
variances = tf.ones([decoder_batch_size, p.num_mixtures], dtype=p.dtype)
priors = tf.zeros([decoder_batch_size, p.num_mixtures], dtype=p.dtype)
# [target_batch, num_mixtures, 4]
return tf.stack([position, position_offsets, variances, priors], axis=2)
def ComputeContextVectorWithSource(self,
theta,
packed_src,
query_vec,
attention_state,
per_step_source_padding=None,
query_segment_id=None):
"""Computes the context vector given the current query output.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
packed_src: A `.NestedMap` object returned by PackSource or
InitForSourcePacked.
query_vec: a tensor of shape [target_batch, query_dim].
attention_state: previous attention state, a tensor of shape
[target_batch, num_mixtures, 4].
- attention_state[:, :, 0] contains previous location
- attention_state[:, :, 1] contains previous offset.
- attention_state[:, :, 2] contains previous variance.
- attention_state[:, :, 3] contains previous prior.
per_step_source_padding: Source sequence padding to apply at this step. If
not None, it should be of shape [target_batch, source_length].
query_segment_id: a tensor of shape [target_batch].
Note: concated_source_vecs are the vectors that are used to compute the
attention score between the query_vec and each concated_source_vec. The
concated_source_contexts are the vectors that compose the result. The
attention context vector is computed as a weighted average of the
concated_source_contexts, using the scores that were computed using
concated_source_vecs.
Returns:
A tuple of 3 elements.
The attention context vector: [target_batch, context_dim]
The attention probability vector: [target_batch, source_length]
The new attention state vector: [target_batch, num_mixtures, 4]
"""
del query_segment_id
p = self.params
concated_source_vecs = packed_src.source_vecs
concated_source_contexts = packed_src.source_contexts
source_padding = packed_src.source_padding
target_batch = tf.shape(query_vec)[0]
source_length = tf.shape(source_padding)[0]
source_batch = tf.shape(source_padding)[1]
# [target_batch, source_length]
if per_step_source_padding is None:
per_step_source_padding = tf.zeros([target_batch, source_length],
dtype=query_vec.dtype)
per_step_source_padding = py_utils.HasShape(per_step_source_padding,
[target_batch, source_length])
# [target_batch, num_mixtures * 3]
out = self.GMM.FProp(theta.GMM, query_vec)
# [target_batch, num_mixtures]
priors_logits, position_offset_logits, log_variances = tf.split(
out, 3, axis=1, name='GMM')
log_variances = tf.minimum(log_variances, layers.LOG_SCALE_CLAMP_BOUND)
variances = tf.exp(log_variances)
summary_utils.histogram('gmm_variances', variances)
priors = tf.nn.softmax(priors_logits)
summary_utils.histogram('gmm_weights', priors)
if p.max_offset > 0:
position_offset = tf.nn.sigmoid(position_offset_logits)
position_offset *= p.max_offset
else:
position_offset = tf.exp(position_offset_logits)
summary_utils.histogram('gmm_offsets', position_offset)
new_position = attention_state[:, :, 0] + position_offset
new_position = tf.minimum(new_position, tf.cast(source_length, tf.float32))
variances = py_utils.AddDebugTensor(variances, name='variances')
priors = py_utils.AddDebugTensor(priors, name='priors')
# Tile and reshape encoder_positions to [source_batch, source_length]
# so that it can be evaluated by locations GMMs in a vectorized way.
encoder_positions = tf.expand_dims(
tf.cast(tf.range(source_length), tf.float32), 0)
encoder_positions = tf.tile(encoder_positions, [source_batch, 1])
# TODO(oday): Remove this after all experiments had been migrated.
if not p.use_atten_v2:
# Reshape encoder_positions to [source_batch, 1, source_length, 1] to
# maintain backward compatibility.
encoder_positions = tf.expand_dims(encoder_positions, 1)
encoder_positions = tf.expand_dims(encoder_positions, 3)
# [target_batch, context_dim], [target_batch, source_length]
ctx_vec, prob = self._ctx_vec(source_padding, concated_source_vecs,
concated_source_contexts, query_vec, priors,
new_position, variances, encoder_positions,
per_step_source_padding)
# [target_batch, num_mixtures, 4]
new_atten_states = tf.stack(
[new_position, position_offset, variances, priors], axis=2)
return ctx_vec, prob, new_atten_states
|
<reponame>fossabot/SeeO-K<filename>src/weather.py
import requests
import gtts
from bs4 import BeautifulSoup
import re
"""
네이버 날씨 크롤링을 통해, 현재 위치의 날씨와 오전, 오후 강수확률을 갖고와 TTS를 생성해준다.
그리고 강수확률이 50% 이상인지 아닌지를 판단하여, 우산을 챙겨야하는지 아닌지 여부를 판단하고 TTS를 생성해준다.
@author : 이도원
@version 1.0.0
"""
def weather():
"""
:return now_temperature : 현재 위치의 기온, 날씨에 따른 옷 추천을 하기 위해 return 해준다.
"""
# 네이버 날씨 크롤링
html = requests.get('https://weather.naver.com/')
# parsing 작업
soup = BeautifulSoup(html.text, 'html.parser')
# 현재 위치
now_address = soup.find('strong', {'class': 'location_name'}).text
weather_box = soup.find('div', {'class': 'weather_area'})
# 현재 온도
now_temperature = re.findall('\d+', weather_box.find('strong', {'class': 'current'}).text)[0]
# 현재 날씨
today_weather = weather_box.find('span', {'class': 'weather before_slash'}).text
weekly_box = soup.find('ul', {'class': 'week_list'})
today_info = weekly_box.find('li', {'class': 'week_item today'})
rain_list = today_info.findAll('span', {'class': "rainfall"})
# 오전 강수 확률
morning_rain_rate = re.findall("\d+", rain_list[0].text)[0]
# 오후 강수 확률
afternoon_rain_rate = re.findall("\d+", rain_list[1].text)[0]
# 최저 기온
lowest_temperature = re.findall("\d+", today_info.find('span', {'class': 'lowest'}).text)[0]
# 최고 기온
highest_temperature = re.findall("\d+", today_info.find('span', {'class': 'highest'}).text)[0]
temperature_gap = int(highest_temperature) - int(lowest_temperature)
# 현재 위치의 날씨와 오전, 오후 강수확률 TTS 생성
tts = gtts.gTTS(
text=now_address + "의 현재 온도는" + now_temperature + "도." + "현재 날씨는" + today_weather + "." + "오전 강수확률은 "
+ morning_rain_rate + "% 이고," + "오후 강수확률은" + afternoon_rain_rate + "% 입니다.", lang="ko")
tts.save("../sound_data/weather.wav")
# 오전 강수 확률 또는 오후 강수 확률이 50% 이상일 경우
if (int(morning_rain_rate) >= 50 or int(afternoon_rain_rate) >= 50):
tts = gtts.gTTS(text="비 올 확률이 50% 이상이기 때문에, 우산을 챙기세요", lang="ko")
tts.save("../sound_data/umbrella.wav")
# 오전 강수 확률 또는 오후 강수 확률이 50% 미만일 경우
else:
tts = gtts.gTTS(text="비 올 확률이 50% 미만이기 때문에, 우산을 안챙기셔도 됩니다", lang="ko")
tts.save("../sound_data/umbrella.wav")
# 일교차가 큰 경우(최고 기온, 최저 기온 차이가 10도 이상 일 경우)
if(temperature_gap >= 10):
tts = gtts.gTTS(text="일교차가 크니, 겉옷을 챙겨주십시오.", lang="ko")
tts.save("../sound_data/temperature_gap.wav")
else:
tts = gtts.gTTS(text="일교차가 크지 않으니, 겉옷을 안챙기셔도 됩니다", lang="ko")
tts.save("../sound_data/temperature_gap.wav")
return now_temperature
|
"""
Copyright (c) 2016, 2017 - o2r project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import json
import os
import sys
import jsonschema
import requests
from lxml import etree
from helpers.helpers import *
def json_validate(c, s, bln_c_http, bln_s_http):
global is_debug
# process schema file file:
try:
schema = ''
candidate = ''
if bln_s_http:
r = requests.get(s)
schema = json.loads(r.text)
else:
with open(os.path.abspath(s), encoding='utf-8') as schema_file:
schema = json.load(schema_file)
except json.decoder.JSONDecodeError as jexc:
status_note(['!error while parsing ', str(s), ': ', str(jexc)], d=is_debug)
except jsonschema.exceptions.ValidationError as vexc:
status_note(['!invalid: ', str(vexc)], d=is_debug)
except Exception as exc:
status_note(['!error: ', str(exc)], d=is_debug)
# process candidate file:
try:
if bln_c_http:
r = requests.get(c)
candidate = json.loads(r.text)
else:
with open(os.path.abspath(c), encoding='utf-8') as candidate_file:
candidate = json.load(candidate_file)
jsonschema.validate(candidate, schema)
status_note(['valid: ', c], d=False)
except json.decoder.JSONDecodeError as jexc:
status_note(['!error while parsing ', str(c), ': ', str(jexc)], d=is_debug)
except jsonschema.exceptions.ValidationError as vexc:
status_note(['!invalid: ', str(vexc)], d=is_debug)
except Exception as exc:
status_note(['!error: ', str(exc)], d=is_debug)
def xml_validate(c, s, bln_c_http, bln_s_http):
global is_debug
try:
if bln_s_http:
schema = etree.parse(s)
else:
with open(os.path.abspath(s), encoding='utf-8') as schema_file:
schema = etree.parse(schema_file)
if bln_c_http:
candidate = etree.parse(c)
else:
with open(os.path.abspath(c), encoding='utf-8') as candidate_file:
candidate = etree.parse(candidate_file)
xmlschema = etree.XMLSchema(schema)
if xmlschema(candidate):
status_note(['valid: ', os.path.basename(c)])
else:
status_note(['invalid: ', os.path.basename(c)])
except etree.XMLSchemaParseError as xexc:
status_note(['! error: ', str(xexc)], d=is_debug)
except Exception as exc:
status_note(['! error: ', str(exc)], d=is_debug)
# main
def start(**kwargs):
global is_debug
is_debug = kwargs.get('dbg', None)
schema_path = kwargs.get('s', None)
candidate_path = kwargs.get('c', None)
status_note(['checking ', os.path.basename(candidate_path), ' against ', os.path.basename(schema_path)], d=False)
if candidate_path.endswith('.json'):
json_validate(candidate_path, schema_path, candidate_path.startswith('http'), schema_path.startswith('http'))
elif candidate_path.endswith('.xml'):
xml_validate(candidate_path, schema_path, candidate_path.startswith('http'), schema_path.startswith('http'))
else:
status_note('! warning, could not process this type of file', e=True)
sys.exit(1)
|
# -*- coding: UTF-8 -*-
# *****************************************************************************
# Copyright (C) 2006-2020 <NAME>. <<EMAIL>>
# Copyright (C) 2020 <NAME>. <<EMAIL>>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
# *****************************************************************************
from __future__ import absolute_import, print_function, unicode_literals
import sys
import unittest
import pyreadline3.logger
from pyreadline3.lineeditor import lineobj
from pyreadline3.lineeditor.history import LineHistory
sys.path.append('../..')
pyreadline3.logger.sock_silent = False
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
RL = lineobj.ReadLineTextBuffer
class Test_prev_next_history(unittest.TestCase):
t = "test text"
def setUp(self):
self.q = q = LineHistory()
for x in ["aaaa", "aaba", "aaca", "akca", "bbb", "ako"]:
q.add_history(RL(x))
def test_previous_history(self):
hist = self.q
assert hist.history_cursor == 6
t_line = RL("")
hist.previous_history(t_line)
assert t_line.get_line_text() == "ako"
hist.previous_history(t_line)
assert t_line.get_line_text() == "bbb"
hist.previous_history(t_line)
assert t_line.get_line_text() == "akca"
hist.previous_history(t_line)
assert t_line.get_line_text() == "aaca"
hist.previous_history(t_line)
assert t_line.get_line_text() == "aaba"
hist.previous_history(t_line)
assert t_line.get_line_text() == "aaaa"
hist.previous_history(t_line)
assert t_line.get_line_text() == "aaaa"
def test_next_history(self):
hist = self.q
hist.beginning_of_history()
assert hist.history_cursor == 0
t_line = RL("")
hist.next_history(t_line)
assert t_line.get_line_text() == "aaba"
hist.next_history(t_line)
assert t_line.get_line_text() == "aaca"
hist.next_history(t_line)
assert t_line.get_line_text() == "akca"
hist.next_history(t_line)
assert t_line.get_line_text() == "bbb"
hist.next_history(t_line)
assert t_line.get_line_text() == "ako"
hist.next_history(t_line)
assert t_line.get_line_text() == "ako"
class Test_prev_next_history1(unittest.TestCase):
t = "test text"
def setUp(self):
self.q = q = LineHistory()
for x in ["aaaa", "aaba", "aaca", "akca", "bbb", "ako"]:
q.add_history(RL(x))
def test_history_search_backward(self):
q = LineHistory()
for x in ["aaaa", "aaba", "aaca", " aacax", "akca", "bbb", "ako"]:
q.add_history(RL(x))
a = RL("aa", point=2)
for x in ["aaca", "aaba", "aaaa", "aaaa"]:
res = q.history_search_backward(a)
assert res.get_line_text() == x
def test_history_search_forward(self):
q = LineHistory()
for x in ["aaaa", "aaba", "aaca", " aacax", "akca", "bbb", "ako"]:
q.add_history(RL(x))
q.beginning_of_history()
a = RL("aa", point=2)
for x in ["aaba", "aaca", "aaca"]:
res = q.history_search_forward(a)
assert res.get_line_text() == x
class Test_history_search_incr_fwd_backwd(unittest.TestCase):
def setUp(self):
self.q = q = LineHistory()
for x in ["aaaa", "aaba", "aaca", "akca", "bbb", "ako"]:
q.add_history(RL(x))
def test_backward_1(self):
q = self.q
self.assertEqual(q.reverse_search_history("b"), "bbb")
self.assertEqual(q.reverse_search_history("b"), "aaba")
self.assertEqual(q.reverse_search_history("bb"), "aaba")
def test_backward_2(self):
q = self.q
self.assertEqual(q.reverse_search_history("a"), "ako")
self.assertEqual(q.reverse_search_history("aa"), "aaca")
self.assertEqual(q.reverse_search_history("a"), "aaca")
self.assertEqual(q.reverse_search_history("ab"), "aaba")
def test_forward_1(self):
q = self.q
self.assertEqual(q.forward_search_history("a"), "ako")
def test_forward_2(self):
q = self.q
q.history_cursor = 0
self.assertEqual(q.forward_search_history("a"), "aaaa")
self.assertEqual(q.forward_search_history("a"), "aaba")
self.assertEqual(q.forward_search_history("ak"), "akca")
self.assertEqual(q.forward_search_history("akl"), "akca")
self.assertEqual(q.forward_search_history("ak"), "akca")
self.assertEqual(q.forward_search_history("ako"), "ako")
class Test_empty_history_search_incr_fwd_backwd(unittest.TestCase):
def setUp(self):
self.q = LineHistory()
def test_backward_1(self):
q = self.q
self.assertEqual(q.reverse_search_history("b"), "")
def test_forward_1(self):
q = self.q
self.assertEqual(q.forward_search_history("a"), "")
# ----------------------------------------------------------------------
# utility functions
# ----------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
"""Base Integration for Cortex XSOAR - Unit Tests file
Pytest Unit Tests: all funcion names must start with "test_"
More details: https://xsoar.pan.dev/docs/integrations/unit-testing
You must add at least a Unit Test function for every XSOAR command
you are implementing with your integration
"""
import json
import io
import pytest
import demistomock as demisto
from Microsoft365Defender import Client, fetch_incidents, _query_set_limit, main
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_convert_incident():
from Microsoft365Defender import convert_incident_to_readable
empty_incident = util_load_json("./test_data/empty_incident.json")
assert convert_incident_to_readable(None) == empty_incident
raw_incident = util_load_json("./test_data/raw_incident.json")
converted_incident = util_load_json("./test_data/converted_incident.json")
assert convert_incident_to_readable(raw_incident) == converted_incident
def mock_client(mocker, function: str = None, http_response=None):
mocker.patch.object(demisto, 'getIntegrationContext',
return_value={'current_refresh_token': 'refresh_token', 'access_token': 'access_token'})
client = Client(
app_id='app_id',
verify=False,
proxy=False,
base_url='https://api.security.microsoft.com'
)
if http_response:
mocker.patch.object(client, function, return_value=http_response)
return client
def check_api_response(results, results_mock):
assert results.outputs_prefix == results_mock['outputs_prefix']
assert results.outputs_key_field == results_mock['outputs_key_field']
assert results.readable_output == results_mock['readable_output']
assert results.outputs == results_mock['outputs']
def test_microsoft_365_defender_incidents_list_command(mocker):
from Microsoft365Defender import microsoft_365_defender_incidents_list_command
client = mock_client(mocker, 'incidents_list', util_load_json('./test_data/incidents_list_response.json'))
results = microsoft_365_defender_incidents_list_command(client, {'limit': 10})
check_api_response(results, util_load_json('./test_data/incidents_list_results.json'))
def test_microsoft_365_defender_incident_update_command(mocker):
from Microsoft365Defender import microsoft_365_defender_incident_update_command
client = mock_client(mocker, 'update_incident', util_load_json('./test_data/incident_update_response.json'))
args = {'id': '263', 'tags': 'test1,test2', 'status': 'Active', 'classification': 'Unknown',
'determination': 'Other'}
results = microsoft_365_defender_incident_update_command(client, args)
check_api_response(results, util_load_json('./test_data/incident_update_results.json'))
def test_microsoft_365_defender_advanced_hunting_command(mocker):
from Microsoft365Defender import microsoft_365_defender_advanced_hunting_command
client = mock_client(mocker, 'advanced_hunting', util_load_json('./test_data/advanced_hunting_response.json'))
args = {'query': 'AlertInfo'}
results = microsoft_365_defender_advanced_hunting_command(client, args)
check_api_response(results, util_load_json('./test_data/advanced_hunting_results.json'))
def fetch_check(mocker, client, last_run, first_fetch_time, fetch_limit, mock_results):
mocker.patch.object(demisto, 'getLastRun', return_value=last_run)
results = fetch_incidents(client, first_fetch_time, fetch_limit)
assert len(results) == len(mock_results)
for incident, mock_incident in zip(results, mock_results):
assert incident['name'] == mock_incident['name']
assert incident['occurred'] == mock_incident['occurred']
assert incident['rawJSON'] == mock_incident['rawJSON']
def test_fetch_incidents(mocker):
"""
This test check for 4 fetch cycles.
First - get all the incidents and fill the queue 127, returns 50
Second - get 50 incidents from the queue
Third - tries to fill the queue with new incidents but there are no new ones so returns all the remaining
incidents in the queue
Forth - tries to fill the queue with new incidents but there are no new ones so returns empty list
"""
response_dict = util_load_json('./test_data/fetch_response.json')
client = Client(
app_id='app_id',
verify=False,
proxy=False,
base_url='https://api.security.microsoft.com'
)
mocker.patch.object(demisto, 'getIntegrationContext',
return_value={'current_refresh_token': 'refresh_token', 'access_token': 'access_token'})
response_list = response_dict['response_list']
mocker.patch.object(client, 'incidents_list', side_effect=response_list)
first_fetch_time = "3000 days"
fetch_limit = 50
results = util_load_json('./test_data/fetch_results.json')
for current_flow in ['first', 'second', 'third', 'forth']:
fetch_check(mocker, client, response_dict[f'{current_flow}_last_run'], first_fetch_time, fetch_limit,
results[f'{current_flow}_result'])
@pytest.mark.parametrize('query, limit, result', [("a | b | limit 5", 10, "a | b | limit 10 "),
("a | b ", 10, "a | b | limit 10 "),
("a | b | limit 1 | take 1", 10, "a | b | limit 10 | limit 10 "),
])
def test_query_set_limit(query: str, limit: int, result: str):
assert _query_set_limit(query, limit) == result
def test_params(mocker):
"""
Given:
- Configuration parameters
When:
- The required parameter app_id is missed.
Then:
- Ensure the exception message as expected.
"""
mocker.patch.object(demisto, 'params', return_value={'_tenant_id': '_tenant_id', 'credentials': {'password': '<PASSWORD>'}})
mocker.patch.object(demisto, 'error')
return_error_mock = mocker.patch('Microsoft365Defender.return_error')
main()
assert 'Application ID must be provided.' in return_error_mock.call_args[0][0]
|
import math
import matplotlib.pyplot as plt
import numpy as np
import random
import time
from collections import defaultdict
from euclid import Circle, Point2, Vector2, LineSegment2
from itertools import combinations
from . import svg
class GameObject(object):
def __init__(self, position, speed, obj_type, radius=0.03, mass=1.0):
"""Esentially represents circles of different kinds, which have
position and speed."""
self.position = position
self.speed = speed
self.obj_type = obj_type
self.mass = mass
self.radius = radius
def step(self, dt, viscosity=1.0):
"""Move as if dt seconds passed"""
self.position += dt * self.speed
self.position.x = max(self.radius, min(1.0 - self.radius, self.position.x))
self.position.y = max(self.radius, min(1.0 - self.radius, self.position.y))
self.speed *= viscosity
def as_circle(self):
return Circle(self.position, float(self.radius))
def speed_after_collision(a, b, cor=1.0):
a_vdiff = cor * (a.speed - b.speed)
a_xdiff = (a.position - b.position)
a_rmass = 2 * b.mass / (a.mass + b.mass)
a_factor = a_rmass * a_vdiff.dot(a_xdiff) / a_xdiff.magnitude_squared()
return a.speed - a_factor * a_xdiff
def objects_colliding(a, b):
distance_squared = (a.position - b.position).magnitude_squared()
return distance_squared < (a.radius + b.radius)**2
def objects_will_collide(a, b, dt):
distance_squared = ((a.position + a.speed*dt) - (b.position + b.speed * dt)).magnitude_squared()
return distance_squared < (a.radius + b.radius)**2
def wall_collision_soon(a, dt):
cur = a.position
nex = a.position + a.speed * dt
r = a.radius
if cur.x < r or nex.x < r:
return Vector2(-1, 0)
if cur.x > 1 - r or nex.x > 1 - r:
return Vector2(1, 0)
if cur.y < r or nex.y < r:
return Vector2(0, -1)
if cur.y > 1 - r or nex.y > 1 - r:
return Vector2(0, 1)
return None
def resolve_wall_colision(obj, direction, cor=1.0):
wall_obj = GameObject(obj.position + direction * obj.radius, Vector2(0.0,0.0), "wall", 0.0, mass=1000000.0)
obj.speed = speed_after_collision(obj, wall_obj, cor=cor)
def correct_penetration(a, b):
sep = a.position.distance(b.position)
minsep = a.radius + b.radius
correction = minsep - sep
if correction > 0:
dir_a = (a.position - b.position)
dir_a.normalize()
dir_a *= correction / 2.
a.position += dir_a
b.position -= dir_a
class Simulator(object):
def __init__(self, settings):
self.objects = []
self.settings = settings
self.size = self.settings["size"]
self.restitution = self.settings["restitution"]
self.viscosity = self.settings["viscosity"]
self.game_time_passed = 0.0
self.collision_observer = lambda x, y: True
def add(self, obj, randomize_position=False, ensure_noncolliding=False):
self.objects.append(obj)
gen = lambda: random.uniform(obj.radius, 1.0 - obj.radius)
if randomize_position:
obj.position = Point2(gen(), gen())
if ensure_noncolliding:
while any(objects_colliding(obj, other)
for other in self.objects if other is not obj):
obj.position = Point2(gen(), gen())
def remove(self, obj):
self.objects.remove(obj)
def randomize_position(self, obj, noncoliding=True, margin=0.0):
gen = lambda: random.uniform(obj.radius + margin, 1.0 - obj.radius - margin)
obj.position = Point2(gen(), gen())
while noncoliding and any(objects_colliding(obj, other) for other in self.objects if other is not obj):
obj.position = Point2(gen(), gen())
def step(self, dt):
"""Simulate all the objects for a given ammount of time.
Also resolve collisions with the hero"""
for obj in self.objects:
obj.step(dt, self.viscosity)
for obj in self.objects:
wall_col_dir = wall_collision_soon(obj, dt)
if wall_col_dir is not None:
resolve_wall_colision(obj, wall_col_dir, self.restitution)
for obj1, obj2 in combinations(self.objects, 2):
if objects_colliding(obj1, obj2) or objects_will_collide(obj1, obj2, dt):
if self.collision_observer(obj1, obj2):
obj1.speed, obj2.speed = (
speed_after_collision(obj1, obj2, self.restitution),
speed_after_collision(obj2, obj1, self.restitution),
)
if objects_colliding(obj1, obj2):
correct_penetration(obj1, obj2)
self.game_time_passed += dt
def create_scene(self, stats):
scene = svg.Scene((self.size + 20, self.size + 20 + 20 * len(stats)))
scene.add(svg.Rectangle((10, 10), (self.size, self.size)))
return scene
def draw_objects(self, scene):
for obj in self.objects:
color = self.settings["colors"][obj.obj_type]
obj_drawing = svg.Circle(obj.position * self.size + Point2(10, 10), obj.radius * self.size, color=color)
scene.add(obj_drawing)
def draw_stats(self, scene, stats):
offset = self.size + 15
for txt in stats:
scene.add(svg.Text((10, offset + 20), txt, 15))
offset += 20
def to_svg(self, stats=[]):
"""Return svg representation of the simulator"""
stats = stats[:]
scene = self.create_scene(stats)
self.draw_objects(scene)
self.draw_stats(scene, stats)
return scene
class HeroSimulator(Simulator):
def __init__(self, settings):
super(HeroSimulator, self).__init__(settings)
self.walls = [
LineSegment2(Point2(0, 0), Point2(0, 1.0)),
LineSegment2(Point2(0, 1.0), Point2(1.0, 1.0)),
LineSegment2(Point2(1.0, 1.0), Point2(1.0, 0)),
LineSegment2(Point2(1.0, 0), Point2(0, 0))
]
self.observation_lines = self.generate_observation_lines()
self.line_end_where = {l: l.p2 for l in self.observation_lines}
self.line_end_who = {l: None for l in self.observation_lines}
self.hero = GameObject(Point2(0.5,0.5), Vector2(0.0,0.0), "hero", radius=self.settings['obj_radius'])
self.add(self.hero)
self.update_observation_lines()
self.observation_size = self.settings["num_observation_lines"] * (len(self.settings["observable_objects"]) + 4)
def generate_observation_lines(self):
"""Generate observation segments in settings["num_observation_lines"] directions"""
result = []
start = Point2(0.0, 0.0)
end = Point2(self.settings["observable_distance"], self.settings["observable_distance"])
for angle in np.linspace(0, 2 * np.pi, self.settings["num_observation_lines"], endpoint=False):
rotation = Point2(math.cos(angle), math.sin(angle))
current_start = Point2(start[0] * rotation[0], start[1] * rotation[1])
current_end = Point2(end[0] * rotation[0], end[1] * rotation[1])
result.append( LineSegment2(current_start, current_end))
return result
def add(self, *args, **kwargs):
super(HeroSimulator, self).add(*args, **kwargs)
self.update_observation_lines()
def remove(self, *args, **kwargs):
super(HeroSimulator, self).remove(*args, **kwargs)
self.update_observation_lines()
def observe(self):
observable_distance = self.settings["observable_distance"]
nl = self.settings["num_observation_lines"]
observ_obj = self.settings["observable_objects"]
no = len(observ_obj)
observation = np.empty((nl, no + 4))
hero_speed_x, hero_speed_y = self.hero.speed
for i, line in enumerate(self.observation_lines):
obj = self.line_end_who[line]
speed_x, speed_y = 0.0, 0.0
if obj is not None and type(obj) == GameObject:
speed_x, speed_y = obj.speed
obj = obj.obj_type
observation[i] = 1.0
if obj in observ_obj:
proximity = self.hero.position.distance(self.line_end_where[line]) / observable_distance
observation[i, observ_obj.index(obj)] = proximity
observation[i, no:] = (speed_x, speed_y, hero_speed_x, hero_speed_y)
return observation.ravel()[np.newaxis,:]
def update_observation_lines(self):
observable_distance = self.settings["observable_distance"]
relevant_objects = [obj for obj in self.objects
if obj.position.distance(self.hero.position) < observable_distance
and obj is not self.hero]
# objects sorted from closest to furthest
relevant_objects.sort(key=lambda x: x.position.distance(self.hero.position))
for observation_line in self.observation_lines:
# shift to hero position
l = LineSegment2(self.hero.position + Vector2(*observation_line.p1),
self.hero.position + Vector2(*observation_line.p2))
start_l, end_l = l.p1, l.p2
observed_object = None
# if end of observation line is outside of walls, we see the wall.
if end_l.x < 0.0 or end_l.x > 1.0 or end_l.y < 0.0 or end_l.y > 1.0:
observed_object = "wall"
for obj in relevant_objects:
if l.distance(obj.position) < obj.radius:
observed_object = obj
break
intersection = end_l
if observed_object == "wall": # wall seen
# best candidate is intersection between
# l and a wall, that's
# closest to the hero
for wall in self.walls:
candidate = l.intersect(wall)
if candidate is not None:
if (intersection is None or
intersection.distance(self.hero.position) >
candidate.distance(self.hero.position)):
intersection = candidate
elif observed_object is not None: # agent seen
intersection_segment = obj.as_circle().intersect(l)
if intersection_segment is not None:
if (intersection_segment.p1.distance(self.hero.position) <
intersection_segment.p2.distance(self.hero.position)):
intersection = intersection_segment.pl
else:
intersection = intersection_segment.p2
self.line_end_where[observation_line] = intersection
self.line_end_who[observation_line] = observed_object
def step(self, dt):
super(HeroSimulator, self).step(dt)
self.update_observation_lines()
def draw_observation(self, scene):
for line in self.observation_lines:
obj = self.line_end_who[line]
color = self.settings["colors"][obj.obj_type] if type(obj) is GameObject else 'black'
line_drawn = svg.Line(self.hero.position * self.size + Point2(10,10),
self.line_end_where[line] * self.size + Point2(10,10),
stroke=color)
scene.add(line_drawn)
def to_svg(self, stats=[]):
"""Return svg representation of the simulator"""
stats = stats[:]
scene = self.create_scene(stats)
self.draw_observation(scene)
self.draw_objects(scene)
self.draw_stats(scene, stats)
return scene
|
from py4j.java_gateway import JavaGateway, JavaObject, GatewayParameters
from py4j.java_gateway import java_import, get_field
'''
Usage:
from sagas.bots.hanlp_procs import Hanlp, hanlp, hanlp_c
'''
class Hanlp(object):
def __init__(self):
host="localhost"
port=2333
callback_port=2334
self.gateway = JavaGateway(python_proxy_port=callback_port,
gateway_parameters=GatewayParameters(address=host, port=port, auto_field=True))
j = self.gateway.new_jvm_view()
java_import(j, 'com.hankcs.hanlp.*')
java_import(j, 'java.util.*')
java_import(j, 'com.hankcs.hanlp.util.*')
java_import(j, 'com.hankcs.hanlp.utility.*')
java_import(j, 'com.hankcs.hanlp.corpus.tag.Nature')
java_import(j, 'com.hankcs.hanlp.corpus.dependency.CoNll.CoNLLWord')
java_import(j, "com.hankcs.hanlp.tokenizer.NLPTokenizer")
self.j=j
self.helper = self.gateway.entry_point.helper()
def get_pinyin(self, text):
return self.j.HanLP.convertToPinyinList(text)
def set_nature(self, nature_name, words):
pcNature = self.j.Nature.create(nature_name)
nature_c = self.j.Nature
natures = self.gateway.new_array(nature_c, 1)
natures[0] = pcNature
for word in words:
self.j.LexiconUtility.setAttribute(word, natures)
def describe_rel(self, word, result):
if word.DEPREL == "主谓关系":
result.append("\tactor: {}".format(word.LEMMA))
elif word.DEPREL == "动宾关系":
result.append("\tobject: {}".format(word.LEMMA))
elif word.DEPREL == "标点符号":
pass
else:
result.append("\trel.{}({}): {}".format(word.POSTAG, word.DEPREL, word.LEMMA))
def get_pinyin_tone(self, sentence):
pinyin_list = self.j.HanLP.convertToPinyinList(sentence)
l = []
for pinyin in pinyin_list:
l.append("%s" % pinyin.getPinyinWithToneMark())
return (" ".join(l))
def parse_tree(self, sentence):
conll = self.j.HanLP.parseDependency(sentence)
coreindex = 0
result = []
for word in conll.iterator():
if word.HEAD == self.j.CoNLLWord.ROOT:
coreindex = word.ID
result.append("core: {} - {}".format(word.POSTAG, word.LEMMA))
for word in conll.iterator():
if word.HEAD.ID == coreindex:
self.describe_rel(word, result)
result.append("⊕ " + str(self.j.NLPTokenizer.analyze(sentence)))
result.append("⊙ " + str(self.j.NLPTokenizer.analyze(sentence).translateLabels()))
result.append("ﺴ " + self.get_pinyin_tone(sentence))
result.append("☫ " + self.j.HanLP.convertToTraditionalChinese(sentence))
result.append("% " + sentence)
return '\n'.join(result), conll
def print_deps(self, conll):
from tabulate import tabulate
table_header = ['a', 'rel', 'b']
table_data = []
# 顺序遍历
# sentence = self.j.HanLP.parseDependency(raw)
wordArray = conll.getWordArray()
for word in wordArray:
# print("%s --(%s)--> %s"%(word.LEMMA, word.DEPREL, word.HEAD.LEMMA))
table_data.append((word.LEMMA, word.DEPREL, word.HEAD.LEMMA))
print(tabulate(table_data, headers=table_header, tablefmt='psql'))
hanlp=Hanlp()
hanlp_c=hanlp.j.HanLP
class HanlpProcs(object):
def tree(self, sentence):
"""
$ python -m sagas.bots.hanlp_procs tree '苹果电脑可以运行开源阿尔法狗代码吗'
:param sentence:
:return:
"""
hanlp.set_nature('tech', ["苹果电脑", "阿尔法狗"])
result, conll=hanlp.parse_tree(sentence)
print(result)
hanlp.print_deps(conll)
def backtrace(self, raw, index=0):
"""
$ python -m sagas.bots.hanlp_procs backtrace '苹果电脑可以运行开源阿尔法狗代码吗'
:param raw:
:param index:
:return:
"""
# 可以直接遍历子树,从某棵子树的某个节点一路遍历到虚根
sentence = hanlp.j.HanLP.parseDependency(raw)
wordArray = sentence.getWordArray()
head = wordArray[index]
while head is not None:
if head == hanlp.j.CoNLLWord.ROOT:
print(head.LEMMA)
else:
print("%s --(%s)--> " % (head.LEMMA, head.DEPREL))
head = head.HEAD
def deps(self, raw):
"""
$ python -m sagas.bots.hanlp_procs deps '苹果电脑可以运行开源阿尔法狗代码吗'
:param raw:
:return:
"""
# 顺序遍历
sentence = hanlp.j.HanLP.parseDependency(raw)
wordArray = sentence.getWordArray()
for word in wordArray:
print("%s --(%s)--> %s" % (word.LEMMA, word.DEPREL, word.HEAD.LEMMA))
if __name__ == '__main__':
import fire
fire.Fire(HanlpProcs)
|
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Tests for Job class.
"""
__author__ = '<EMAIL> (<NAME>)'
import time
from viewfinder.backend.base import constants
from viewfinder.backend.base.dotdict import DotDict
from viewfinder.backend.db.job import Job
from viewfinder.backend.db.lock import Lock
from viewfinder.backend.db.lock_resource_type import LockResourceType
from base_test import DBBaseTestCase
class JobTestCase(DBBaseTestCase):
def testLocking(self):
"""Test basic locking mechanism."""
job1 = Job(self._client, 'test_job')
self.assertTrue(self._RunAsync(job1.AcquireLock))
job2 = Job(self._client, 'test_job')
self.assertFalse(self._RunAsync(job2.AcquireLock))
# Abandon job1 lock. We never do this on real jobs, so manually clear the lock.
self._RunAsync(job1._lock.Abandon, self._client)
job1._lock = None
# Set detect_abandonment=False: failure.
self.assertFalse(self._RunAsync(job2.AcquireLock, detect_abandonment=False))
self.assertFalse(self._RunAsync(job2.AcquireLock, detect_abandonment=False))
# Now allow abandoned lock acquisition.
self.assertTrue(self._RunAsync(job2.AcquireLock))
self.assertFalse(self._RunAsync(job1.AcquireLock))
self._RunAsync(job2.ReleaseLock)
# Job1 grabs the lock again.
self.assertTrue(self._RunAsync(job1.AcquireLock))
self._RunAsync(job1.ReleaseLock)
def testMetrics(self):
"""Test fetching/writing metrics."""
# Job being tested.
job = Job(self._client, 'test_job')
prev_runs = self._RunAsync(job.FindPreviousRuns)
self.assertEqual(len(prev_runs), 0)
# Unrelated job with a different name. Run entries should not show up under 'test_job'.
other_job = Job(self._client, 'other_test_job')
other_job.Start()
self._RunAsync(other_job.RegisterRun, Job.STATUS_SUCCESS)
other_job.Start()
self._RunAsync(other_job.RegisterRun, Job.STATUS_FAILURE)
# Calling RegisterRun without first calling Start fails because the start_time is not set.
self.assertIsNone(job._start_time)
self.assertRaises(AssertionError, self._RunAsync, job.RegisterRun, Job.STATUS_SUCCESS)
job.Start()
self.assertIsNotNone(job._start_time)
# Overwrite it for easier testing.
start_time = job._start_time = int(time.time() - (constants.SECONDS_PER_WEEK + constants.SECONDS_PER_HOUR))
# Write run summary with extra stats.
stats = DotDict()
stats['foo.bar'] = 5
stats['baz'] = 'test'
self._RunAsync(job.RegisterRun, Job.STATUS_SUCCESS, stats=stats, failure_msg='foo')
# start_time is reset to prevent multiple calls to RegisterRun.
self.assertIsNone(job._start_time)
self.assertRaises(AssertionError, self._RunAsync, job.RegisterRun, Job.STATUS_SUCCESS)
end_time = int(time.time())
# Default search is "runs started in the past week".
prev_runs = self._RunAsync(job.FindPreviousRuns)
self.assertEqual(len(prev_runs), 0)
# Default search is for successful runs.
prev_runs = self._RunAsync(job.FindPreviousRuns, start_timestamp=(start_time - 10))
self.assertEqual(len(prev_runs), 1)
self.assertEqual(prev_runs[0]['start_time'], start_time)
self.assertAlmostEqual(prev_runs[0]['end_time'], end_time, delta=10)
self.assertEqual(prev_runs[0]['status'], Job.STATUS_SUCCESS)
self.assertEqual(prev_runs[0]['stats.foo.bar'], 5)
self.assertEqual(prev_runs[0]['stats.baz'], 'test')
# failure_msg does nothing when status is SUCCESS.
self.assertTrue('failure_msg' not in prev_runs[0])
# Search for failed runs.
prev_runs = self._RunAsync(job.FindPreviousRuns, start_timestamp=(start_time - 10), status=Job.STATUS_FAILURE)
self.assertEqual(len(prev_runs), 0)
# Create a failed job summary.
job.Start()
start_time2 = job._start_time = int(time.time() - constants.SECONDS_PER_HOUR)
self._RunAsync(job.RegisterRun, Job.STATUS_FAILURE, failure_msg='stack trace')
# Find previous runs using a variety of filters.
prev_runs = self._RunAsync(job.FindPreviousRuns, start_timestamp=(start_time - 10), status=Job.STATUS_SUCCESS)
self.assertEqual(len(prev_runs), 1)
self.assertEqual(prev_runs[0]['start_time'], start_time)
prev_runs = self._RunAsync(job.FindPreviousRuns, start_timestamp=(start_time - 10), status=Job.STATUS_FAILURE)
self.assertEqual(len(prev_runs), 1)
self.assertEqual(prev_runs[0]['status'], Job.STATUS_FAILURE)
self.assertEqual(prev_runs[0]['failure_msg'], 'stack trace')
self.assertEqual(prev_runs[0]['start_time'], start_time2)
prev_runs = self._RunAsync(job.FindPreviousRuns, start_timestamp=(start_time - 10))
self.assertEqual(len(prev_runs), 2)
self.assertEqual(prev_runs[0]['start_time'], start_time)
self.assertEqual(prev_runs[1]['start_time'], start_time2)
prev_runs = self._RunAsync(job.FindPreviousRuns, start_timestamp=(start_time2 - 10))
self.assertEqual(len(prev_runs), 1)
self.assertEqual(prev_runs[0]['start_time'], start_time2)
prev_runs = self._RunAsync(job.FindPreviousRuns, start_timestamp=(start_time - 10), limit=1)
self.assertEqual(len(prev_runs), 1)
self.assertEqual(prev_runs[0]['start_time'], start_time2)
# Find last successful run with optional payload key/value.
prev_success = self._RunAsync(job.FindLastSuccess, start_timestamp=(start_time - 10))
self.assertIsNotNone(prev_success)
self.assertEqual(prev_success['stats.foo.bar'], 5)
prev_success = self._RunAsync(job.FindLastSuccess, start_timestamp=(start_time - 10), with_payload_key='stats.baz')
self.assertIsNotNone(prev_success)
self.assertEqual(prev_success['stats.foo.bar'], 5)
prev_success = self._RunAsync(job.FindLastSuccess, start_timestamp=(start_time - 10), with_payload_key='stats.bar')
self.assertIsNone(prev_success)
prev_success = self._RunAsync(job.FindLastSuccess, start_timestamp=(start_time - 10),
with_payload_key='stats.baz', with_payload_value='test')
self.assertIsNotNone(prev_success)
self.assertEqual(prev_success['stats.foo.bar'], 5)
prev_success = self._RunAsync(job.FindLastSuccess, start_timestamp=(start_time - 10),
with_payload_key='stats.baz', with_payload_value='test2')
self.assertIsNone(prev_success)
|
from nlcontrol.systems.controllers import ControllerBase
from sympy.tensor.array import Array
from simupy.systems.symbolic import MemorylessSystem
class PID(ControllerBase):
"""
PID(inputs=w)
PID(ksi0, chi0, psi0, inputs=inputs)
A nonlinear PID controller can be created using the PID class. This class is based on the ControllerBase object. The nonlinear PID is is based on the input vector w(t), containing sympy's dynamicsymbols. The formulation is the following:
.. math::
u(t) = \\xi_0(w(t)) + \\chi_0\\left(\\int(w(t),t)\\right) + \\psi_0(w'(t))
with :math:`.'(t)` indicating the time derivative of the signal. The class object allows the construction of P, PI, PD and PID controllers, by setting chi0 or psi0 to None. The system is based on a MemorylessSystem object from simupy.
Parameters
-----------
args : optional
ksi0 : array-like
A list of P-action expressions, containing the input signal.
chi0 : array-like
A list of I-action expressions, containing the integral of the input signal.
psi0 : array-like
A list of D-action expressions, containing the derivative of the input signal.
kwargs :
inputs : array-like or string
if `inputs` is a string, it is a comma-separated listing of the input names. If `inputs` is array-like it contains the inputs as sympy's dynamic symbols.
Examples
---------
* Create a classic PD controller with two inputs:
>>> C = PID(inputs='w1, w2')
>>> w1, w2, w1dot, w2dot = C.create_variables()
>>> kp = 1, kd = 5
>>> ksi0 = [kp * w1, kp * w2]
>>> psi0 = [kd * w1dot, kd * w2dot]
>>> C.define_PID(ksi0, None, psi0)
* Same as exercise as above, but with a different constructor:
>>> from sympy.physics.mechanics import dynamicsymbols
>>> from sympy import Symbol, diff
>>> w = dynamicsymbols('w1, w2')
>>> w1, w2 = tuple(inputs)
>>> kp = 1, kd = 5
>>> ksi0 = [kp * w1, kp * w2]
>>> psi0 = [kd * diff(w1, Symbol('t')), kd * diff(w2, Symbol('t'))]
>>> C = PID(ksi0, None, psi0, inputs=w)
* Formulate a standard I-action chi0:
>>> from sympy.physics.mechanics import dynamicsymbols
>>> from sympy import Symbol, integrate
>>> w = dynamicsymbols('w1, w2')
>>> w1, w2 = tuple(inputs)
>>> ki = 0.5
>>> chi0 = [ki * integrate(w1, Symbol('t')), ki * integrate(w2, Symbol('t'))]
"""
def __init__(self, *args, **kwargs):
if 'inputs' not in kwargs.keys():
error_text = "[nlcontrol.systems.PID] An 'inputs=' keyword is necessary."
raise AssertionError(error_text)
super().__init__(*args, **kwargs)
self._ksi0 = None # potential energy shaper
self._psi0 = None # damping injection
self._chi0 = None # integral action
if len(args) not in (0, 1, 3):
error_text = '[nlcontrol.systems.PID] the argument list should contain a P-action vector, or a P-action, I-action, and D-action vector. In the latter case, if I- or D-action is not necessary replace with None.'
raise ValueError(error_text)
if len(args) == 3:
self.define_PID(*args)
elif len(args) == 1:
self.define_PID(*args, None, None)
def __str__(self):
return """
PID object:
===========
P: {}
I: {}
D: {}
""".format(self.P_action, self.I_action, self.D_action)
@property
def P_action(self):
return self._ksi0
@P_action.setter
def P_action(self, fct):
fct = [fct] if not isinstance(fct, list) and fct is not None else fct
self._ksi0 = fct
@property
def D_action(self):
return self._psi0
@D_action.setter
def D_action(self, fct):
fct = [fct] if not isinstance(fct, list) and fct is not None else fct
self._psi0 = fct
@property
def I_action(self):
return self._chi0
@I_action.setter
def I_action(self, fct):
fct = [fct] if not isinstance(fct, list) and fct is not None else fct
self._chi0 = fct
def define_PID(self, P, I, D):
"""
Set all three PID actions with one function, instead of using the setter functions for each individual action. Automatic checking of the dimensions is done as well. The PID's system arguments is set to a simupy's MemorylessSystem object, containing the proper PID expressions. Both P, PI, PD and PID can be formed by setting the appropriate actions to None.
Parameters
-----------
P : list or expression
A list of expressions or an expression defining ksi0.
I : list or expression or None
A list of expressions or an expression defining chi0. If I is None, the controller does not contain an I-action.
D : list or expression or None
A list of expressions or an expression defining psi0. If D is None, the controller does not contain a D-action.
"""
P = [P] if not isinstance(P, list) and fct is not None else P
dim = len(P)
self.P_action = P
if I is None:
self.I_action = None
else:
I = [I] if not isinstance(I, list) and fct is not None else I
if len(I) == dim:
self.I_action = I
else:
error_text = '[nlcontrol.systems.PID] The dimension of the I vector differs from the dimension of the P vector.'
raise ValueError(error_text)
if D is None:
self.D_action = None
else:
D = [D] if not isinstance(D, list) and fct is not None else D
if len(D) == dim:
self.D_action = D
else:
error_text = '[nlcontrol.systems.PID] The dimension of the D vector differs from the dimension of the P vector.'
raise ValueError(error_text)
self.__create_system__()
def __create_system__(self):
"""
Create the inputs and output equations from the P, PI,PD, or PID's expressions.
"""
if self.I_action is None and self.D_action is None:
# P-controller
inputs = self.inputs
output_equation = Array(self.P_action)
elif self.I_action is None:
# PD-controller
inputs = [val for pair in zip(self.inputs, self.dinputs) for val in pair]
output_equation = Array([sum(x) for x in zip(self.P_action, self.D_action)])
elif self.D_action is None:
# PI-controller
inputs = [val for pair in zip(self.inputs, self.iinputs) for val in pair]
output_equation = Array([sum(x) for x in zip(self.P_action, self.I_action)])
else:
# PID-controller
inputs = [val for pair in zip(self.inputs, self.iinputs, self.dinputs) for val in pair]
output_equation = Array([sum(x) for x in zip(self.P_action, self.I_action, self.D_action)])
self.system = MemorylessSystem(input_=inputs, output_equation=output_equation) |
<reponame>ardovm/wxGlade
"""
@copyright: 2019-2020 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
from testsupport_new import WXGladeGUITest
import common, clipboard
import unittest, wx, time
class TestEditing(WXGladeGUITest):
"Test for e.g. cut/paste; to be extended..."
def test_crash_on_cut_paste(self):
"with NEW_STRUCTURE at first there were crashes when cutting the static box sizer"
basename = 'crash_on_cut_paste'
infilename = self._get_casefile_path( '%s.wxg'%basename )
common.main._open_app(infilename, use_progress_dialog=False, add_to_history=False)
editor = common.root.find_widget_from_path( 'App/frame/sizer_limit/panel_3/sizer_8' )
common.app_tree.show_toplevel( None, common.root.find_widget_from_path('App/frame') )
self._process_wx_events()
parent = editor.parent
# cut
data = clipboard.dump_widget(editor)
editor.remove()
self._process_wx_events()
# paste again
parent.clipboard_paste(data)
self._process_wx_events()
# save and check .wxg file
generated_filename = self._get_outputfile_path(infilename)
common.main._save_app(generated_filename)
self._compare_files(infilename, generated_filename)
# generate and check python code
app = common.root
expected_filename = self._get_casefile_path( '%s.py'%basename )
generated_filename = self._get_outputfile_path( '%s.py'%basename )
app.properties["output_path"].set(generated_filename)
common.app_tree.root.generate_code()
self._compare_files(expected_filename, generated_filename, check_mtime=True)
def sleep(self, dt=1.0):
end = time.time() + dt
while time.time() < end:
self._process_wx_events()
def check_no_overlap(self, editor, rectangles=None):
# recursively check that all widgets have been created and sizer children do not overlap
if rectangles is None: rectangles = []
for child in editor.get_all_children():
if child.IS_SLOT and child.overlapped: continue
if editor.IS_SIZER and not child.IS_SIZER:
rect = child.widget.GetRect()
for r in rectangles:
self.assertFalse( rect.Intersects(r) )
rectangles.append(rect)
elif editor.IS_SIZER and child.IS_SIZER:
self.check_no_overlap(child, rectangles)
self.check_no_overlap(child)
def test_editing_1(self):
basename = 'Test_Editing'
infilename = self._get_casefile_path( '%s.wxg'%basename )
common.main._open_app(infilename, use_progress_dialog=False, add_to_history=False)
wx.SafeYield()
app = common.root # shortcut
common.app_tree.show_toplevel( None, app.children[0] )
# ensure that there's no overlap of elements
self.check_no_overlap(app.children[0])
# cut static box sizer
widget = app.find_widget_from_path("app/frame/notebook_1/panel_1/sizer_2/sizer_1")
parent = widget.parent
index = widget.index
data = self.simulate_cut(widget)
# paste again
self.simulate_paste(parent, index, data)
# insert panel into splitter; change "Scrollable" to test re-creation
widget = app.find_widget_from_path("app/frame/notebook_1/window_1/SLOT 1")
import widgets.panel.panel
panel = widgets.panel.panel.builder(widget.parent, widget.index)
self.assertTrue(isinstance(panel.widget, wx.Panel))
panel.properties["scrollable"].set(True, notify=True)
self.assertTrue(isinstance(panel.widget, wx.ScrolledWindow))
#panel.widget.GetSize()
#wx.Size(404, 659)
#self.sleep(1.0)
# set span of button inside gridbag sizer
widget = app.find_widget_from_path("app/frame/notebook_1/window_1/window_1_pane_1/grid_sizer_1/button_3")
widget.properties["span"].set((2,2), notify=True)
#self.sleep(1.0)
# XXX test change_sizer
## save and check .wxg file
#generated_filename = self._get_outputfile_path(infilename)
#common.main._save_app(generated_filename)
#self._compare_files(infilename, generated_filename)
def test_editing_2(self):
basename = 'Test_Editing2'
infilename = self._get_casefile_path( '%s.wxg'%basename )
common.main._open_app(infilename, use_progress_dialog=False, add_to_history=False)
wx.SafeYield()
app = common.root # shortcut
common.app_tree.show_toplevel( None, app.children[0] )
# ensure that there's no overlap of elements
self.check_no_overlap(app.children[0])
# change font size for static text and asserts it's size change
text = common.root.find_widget_from_path("app/frame/notebook_1/panel_1/sizer_2/static_text_1")
rect1 = text.widget.GetRect()
size1 = text.widget.GetSize()
font_p = text.properties["font"]
font_p.set( (42, 'default', 'normal', 'normal', 0, ''), notify=True)
self.check_no_overlap(text.parent)
return
# cut static box sizer
widget = app.find_widget_from_path("app/frame/notebook_1/panel_1/sizer_2/sizer_1")
parent = widget.parent
index = widget.index
data = self.simulate_cut(widget)
# paste again
self.simulate_paste(parent, index, data)
# insert panel into splitter; change "Scrollable" to test re-creation
widget = app.find_widget_from_path("app/frame/notebook_1/window_1/SLOT 1")
import widgets.panel.panel
panel = widgets.panel.panel.builder(widget.parent, widget.index)
self.assertTrue(isinstance(panel.widget, wx.Panel))
panel.properties["scrollable"].set(True, notify=True)
self.assertTrue(isinstance(panel.widget, wx.ScrolledWindow))
# set span of button inside gridbag sizer
widget = app.find_widget_from_path("app/frame/notebook_1/window_1/window_1_pane_1/grid_sizer_1/button_3")
widget.properties["span"].set((2,2), notify=True)
if __name__ == '__main__':
unittest.main(exit=False)
|
"""
This was only used to copy quotes from streamlabs.
Don't use this!
import asyncio
import random
from twitchAPI.twitch import Twitch
from twitchio.ext import commands
from twitchio.message import Message
from config.config_loader import FiZoneBotConfig
from google_sheet import GoogleSheet
# Used to copy quotes manually from streamlabs.
# This can take up to 20 minutes per 100 quotes.
class CopyStreamlabs(commands.Bot):
current_index = 900
start_index = 900
amount = 35
def __init__(self, config: FiZoneBotConfig):
self.google_sheet = GoogleSheet()
self.twitch = Twitch(
config.twitch_config.client_id,
config.twitch_config.secret
)
super().__init__(
token=config.twitch_config.oauth,
prefix='!',
initial_channels=['fionn']
)
async def event_ready(self):
# We are logged in and ready to chat and use commands...
print(f'Logged in as | {self.nick}')
async def event_message(self, message: Message):
if message.author is not None:
print(f'author: {message.author.name}')
else:
print(f'author: None')
if message.channel is not None:
print(f'channel: {message.channel.name}')
else:
print(f'channel: None')
if message.author is not None:
if message.author.name == 'streamlabs':
if 'Quote #' in message.content:
print(f'message.content: {message.content}')
index = int(message.content.split('#')[1].split(' ')[0])
text = message.content.split('#')[1].split(' ', 1)[1].split('[')[0].rstrip()
contents = message.content.split('[')
game = contents[1].replace(']', '')
date = contents[2].replace(']', '')
self.google_sheet.quotes.update_cell(index, 1, index)
self.google_sheet.quotes.update_cell(index, 2, text)
self.google_sheet.quotes.update_cell(index, 3, game)
self.google_sheet.quotes.update_cell(index, 4, date)
context: commands.Context = await self.get_context(message)
r = random.randrange(0, 4)
s = 10 + r
await context.send(
f'Copied quote {index}. '
f'Waiting {s} seconds to copy the next quote.'
)
print(f'wait {s}s')
await asyncio.sleep(s)
await context.send(f'!quote {index + 1}')
self.current_index = index + 1
pass
# await self.handle_commands(message)
@commands.command(name='sqc')
async def start_quote_collection(self, ctx: commands.Context):
try:
print('start_quote_collection')
index = int(ctx.message.content.replace('!sqc ', ''))
self.start_index = index
print(f'index: {index}')
await ctx.send(f'!quote {index}')
except Exception as e:
print(e)
pass
@commands.command(name='close')
async def close_bot(self, ctx: commands.Context):
if ctx.message.author is not None:
if ctx.message.author.is_mod or ctx.message.author.name.lower() == 'ostof':
await ctx.send('stopping bot')
await self.close()
else:
await ctx.reply('You are unauthorized to stopping the bot yogP')
pass
pass
if __name__ == '__main__':
bot = CopyStreamlabs('oauth:ucgkm5guom8m4tu3nwmubwkjvddvop')
bot.run()
"""
|
<filename>meirlop/motif_enrichment.py
from timeit import default_timer as timer
import datetime
import logging
from tqdm import tqdm
import pandas as pd
import numpy as np
import statsmodels.api as smapi
import statsmodels.formula.api as sm
from statsmodels.stats.multitest import multipletests as mt
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from .sequence_characterization import get_background, get_frequency_ratio_df
from .motif_scanning import scan_motifs_parallel, format_scan_results
def analyze_scored_fasta_data_with_lr(
sequence_dict,
score_dict,
motif_matrix_dict,
alphabet = list('ACGT'),
max_pct_degenerate = 50,
pval = 0.001,
pseudocount = 0.001,
max_k = 2,
use_length = False,
use_gc = False,
user_covariates_df = None,
padj_method = 'fdr_bh',
padj_thresh = 0.05,
min_set_size = 2,
max_set_size = np.inf,
progress_wrapper = tqdm,
n_jobs = 1,
revcomp = True):
start = timer()
print('importing peak data')
print(datetime.datetime.now())
peak_sequence_dict = {k: v.upper()
for
k, v
in sequence_dict.items()
if (len([nuc
for nuc in v
if nuc not in alphabet])
/(1.0 * len(v))) < (max_pct_degenerate/100)}
peak_score_dict = {sequence_id: score_dict[sequence_id]
for sequence_id
in peak_sequence_dict.keys()}
peak_score_df = dict_to_df(peak_score_dict,
'peak_id',
'peak_score')
end = timer()
runtime = end - start
print(f'{runtime} seconds')
print(datetime.datetime.now())
print('scanning for motifs')
bg = get_background(''.join(peak_sequence_dict.values()),
alphabet = alphabet,
as_counts = False)
scan_results = scan_motifs_parallel(motif_matrix_dict,
peak_sequence_dict,
bg = bg,
pval = pval,
pseudocount = pseudocount,
n_jobs = n_jobs,
progress_wrapper = progress_wrapper,
revcomp = revcomp)
(scan_results_df,
motif_peak_set_dict) = format_scan_results(scan_results)
covariate_dfs = []
if max_k > 0:
end = timer()
runtime = end - start
print(f'{runtime} seconds')
print(datetime.datetime.now())
print('calculating kmer frequency ratios')
frequency_ratio_df = get_frequency_ratio_df(
peak_sequence_dict,
alphabet = alphabet,
max_k = max_k,
n_jobs = n_jobs,
remove_redundant = True,
progress_wrapper = progress_wrapper)
frequency_ratio_df = (frequency_ratio_df
.rename(
columns = {'sequence_id': 'peak_id'}
))
frequency_ratio_df = frequency_ratio_df.sort_values(by = 'peak_id')
# debug
# frequency_ratio_df.to_csv('frequency_ratio_df.tsv', sep = '\t')
print(f'frequency_ratio_df_shape = {frequency_ratio_df.shape}')
print(f'frequency_ratio_df_columns = {frequency_ratio_df.columns}')
covariate_dfs.append(frequency_ratio_df)
if use_length:
peak_length_dict = {k: len(v)*1.0
for k,v
in peak_sequence_dict.items()}
peak_length_df = dict_to_df(peak_length_dict,
'peak_id',
'peak_length')
peak_length_df = peak_length_df.sort_values(by = 'peak_id')
# debug
# peak_length_df.to_csv('peak_length_df.tsv', sep = '\t')
print(f'peak_length_df_shape = {peak_length_df.shape}')
print(f'peak_length_df_columns = {peak_length_df.columns}')
covariate_dfs.append(peak_length_df)
if use_gc:
end = timer()
runtime = end - start
print(f'{runtime} seconds')
print(datetime.datetime.now())
print('calculating GC ratios')
gc_ratio_df = get_frequency_ratio_df(
peak_sequence_dict,
alphabet = alphabet,
max_k = 1,
n_jobs = n_jobs,
remove_redundant = False,
progress_wrapper = progress_wrapper)
gc_ratio_df = (gc_ratio_df
.rename(
columns = {'sequence_id': 'peak_id'}
))
gc_ratio_df = gc_ratio_df.sort_values(by = 'peak_id')
gc_ratio_df['ratio_gc'] = gc_ratio_df['kmer_ratio_G'] + gc_ratio_df['kmer_ratio_C']
gc_ratio_df = gc_ratio_df[['peak_id', 'ratio_gc']].copy()
print(f'gc_ratio_df_shape = {gc_ratio_df.shape}')
print(f'gc_ratio_df_columns = {gc_ratio_df.columns}')
covariate_dfs.append(gc_ratio_df)
if user_covariates_df is not None:
user_covariates_df_cp = user_covariates_df.copy()
user_covariates_df_columns = list(user_covariates_df_cp.columns)
user_covariates_df_cp = (user_covariates_df_cp
.rename(columns = {
user_covariates_df_columns[0]: 'peak_id'
}))
user_covariates_df_cp = (user_covariates_df_cp
.rename(columns = {
colname: 'user_covariate_' + colname
for colname
in user_covariates_df_columns[1:]
}))
user_covariates_df_cp = (user_covariates_df_cp
.sort_values(by = 'peak_id'))
print(f'user_covariates_df_cp_shape = {user_covariates_df_cp.shape}')
print(f'user_covariates_df_cp_columns = {user_covariates_df_cp.columns}')
covariate_dfs.append(user_covariates_df_cp)
covariates_df = None
lr_input_df = peak_score_df
print('number of covariates dfs used:')
print(len(covariate_dfs))
if len(covariate_dfs) > 0:
covariates_df = pd.concat([(df
.set_index('peak_id'))
for df
in covariate_dfs],
axis = 1,
join = 'inner').reset_index()
# debug
# covariates_df.to_csv('covariates_df.tsv', sep = '\t')
# debug
# peak_score_df.to_csv('peak_score_df.tsv', sep = '\t')
lr_input_df = peak_score_df.merge(covariates_df)
end = timer()
runtime = end - start
print(f'{runtime} seconds')
print(datetime.datetime.now())
print('performing logistic regression')
min_frac_set_size = 10.0**-3
max_frac_set_size = 1.0 - min_frac_set_size
adj_min_set_size = max(3, int(np.round(len(peak_sequence_dict)*min_frac_set_size)))
adj_max_set_size = min(len(peak_sequence_dict)-3, int(np.round(len(peak_sequence_dict)*max_frac_set_size)))
lr_results_df = analyze_peaks_with_lr(
peak_score_df,
motif_peak_set_dict,
covariates_df,
padj_method = padj_method,
padj_thresh = padj_thresh,
min_set_size = adj_min_set_size,
max_set_size = adj_max_set_size,
progress_wrapper = tqdm)
motif_num_peaks_dict = {k: len(set(v)) for k, v in motif_peak_set_dict.items()}
lr_results_df['num_peaks'] = lr_results_df['motif_id'].map(motif_num_peaks_dict)
lr_results_df['percent_peaks'] = 100.0 * lr_results_df['num_peaks'] / len(list(sequence_dict.keys()))
end = timer()
runtime = end - start
print(f'{runtime} seconds')
print(datetime.datetime.now())
print('returning logistic regression results')
return lr_results_df, lr_input_df, motif_peak_set_dict, scan_results_df
def dict_to_df(data_dict, key_column, val_column):
return pd.DataFrame(data = list(data_dict
.items()),
columns = [key_column,
val_column])
def analyze_peaks_with_lr(peak_score_df,
peak_set_dict,
peak_covariates_df = None,
padj_method = 'fdr_bh',
padj_thresh = 0.05,
min_set_size = 1,
max_set_size = np.inf,
progress_wrapper = tqdm):
lr_df = preprocess_lr_df(peak_score_df, peak_covariates_df)
peak_id_colname = lr_df.columns[0]
score_colname = lr_df.columns[1]
cov_colnames = list(lr_df.columns[2:])
def process_motif_id(motif_id):
return ((motif_id,)
+ compute_logit_regression_for_peak_set(
peak_set_dict[motif_id],
lr_df,
peak_id_colname,
score_colname,
cov_colnames)[:-1])
valid_peak_ids = [key
for key, val
in peak_set_dict.items()
if (min_set_size <= len(val))
and (len(val) <= max_set_size)]
result_tups = [process_motif_id(motif_id)
for motif_id
in progress_wrapper(valid_peak_ids)]
results_df = pd.DataFrame(result_tups,
columns = ['motif_id',
'coef',
'std_err',
'ci_95_pct_lower',
'ci_95_pct_upper',
'pval',
'auc'])
results_df['padj'] = mt(results_df['pval'],
method = padj_method)[1]
results_df['padj_sig'] = ((results_df['padj'] < padj_thresh)
.astype(int))
results_df['abs_coef'] = np.abs(results_df['coef'])
results_df = (results_df
.sort_values(by = ['abs_coef', 'padj_sig'],
ascending = False)
.reset_index(drop = True))
return results_df
def preprocess_lr_df(peak_score_df,
peak_covariates_df = None,
num_pca_components = 0.99):
if peak_covariates_df is None:
peak_data_df = peak_score_df.copy()
peak_id_colname = peak_score_df.columns[0]
peak_score_colname = peak_data_df.columns[1]
peak_covariate_colnames = []
else:
peak_data_df = peak_score_df.merge(peak_covariates_df)
peak_id_colname = peak_score_df.columns[0]
peak_score_colname = peak_data_df.columns[1]
peak_covariate_colnames = list(peak_covariates_df.columns[1:])
ss = StandardScaler(with_mean = True, with_std = True)
X = ss.fit_transform(peak_data_df[[peak_score_colname]
+ peak_covariate_colnames])
lr_df = pd.DataFrame(X, columns = [peak_score_colname]
+ peak_covariate_colnames)
lr_df.insert(0,
peak_id_colname,
peak_data_df[peak_id_colname])
if len(peak_covariate_colnames) > 1:
lr_score_df = lr_df[peak_score_df.columns]
lr_covariates_df = lr_df[[peak_id_colname] + peak_covariate_colnames]
lr_covariates_df.head()
X_df = lr_covariates_df.set_index(peak_id_colname)
pca = PCA(n_components = num_pca_components)
pca.fit(X_df)
pca_X = pca.transform(X_df)
pca_ss = StandardScaler(with_mean = True, with_std = True)
pca_X_ss = pca_ss.fit_transform(pca_X)
# pca_covariates_df = pd.DataFrame(pca_X, index = X_df.index,
pca_covariates_df = pd.DataFrame(pca_X_ss, index = X_df.index,
columns = [f'pc_{i}'
for i
in range(pca_X.shape[1])])
n_pca_cols = pca_covariates_df.shape[1]
print(f'Reduced covariates to {n_pca_cols} principal components')
if (0 < num_pca_components) and (num_pca_components < 1):
pct_variance = 100.0 * num_pca_components
print((f'Components were chosen to explain '
f'{pct_variance}% of variance in covariates'))
pca_covariates_df = pca_covariates_df.reset_index(drop = False)
lr_df = lr_score_df.merge(pca_covariates_df)
lr_df['intercept'] = 1.0
return lr_df
def compute_logit_regression_for_peak_set(peak_set,
lr_df,
peak_id_colname,
score_colname,
cov_colnames):
y = lr_df[peak_id_colname].isin(peak_set)
indep_var_cols = [score_colname] + cov_colnames
X = lr_df[indep_var_cols]
# X = smapi.add_constant(lr_df[indep_var_cols])
# model = sm.Logit(y, X)
model = smapi.Logit(y, X)
result = model.fit(disp=0)
coef = result.params[score_colname]
std_err = result.bse[score_colname]
pval = result.pvalues[score_colname]
ci = result.conf_int()
(ci_95_pct_lower,
ci_95_pct_upper) = (ci[0][score_colname], ci[1][score_colname])
y_score = result.predict(X.values)
auc = roc_auc_score(y_true = y,
y_score = y_score)
return coef, std_err, ci_95_pct_lower, ci_95_pct_upper, pval, auc, result
|
<filename>noggin/security/ipa.py
from cryptography.fernet import Fernet
from requests import RequestException
import python_freeipa
from python_freeipa.client_legacy import ClientLegacy as IPAClient
from python_freeipa.exceptions import (
ValidationError,
BadRequest,
FreeIPAError,
PWChangeInvalidPassword,
PWChangePolicyError,
)
import random
def parse_group_management_error(data):
"""
An extension of freeipa's function to handle membermanagers.
TODO: send this upstream.
"""
try:
failed = data['failed']
except KeyError:
return
targets = ('member', 'membermanager')
for target in targets:
if target in failed and (failed[target]['group'] or failed[target]['user']):
raise ValidationError(failed)
class Client(IPAClient):
"""
Subclass the official client to add missing methods that we need.
TODO: send this upstream.
"""
def group_add_member_manager(
self, group, users=None, groups=None, skip_errors=False, **kwargs
):
"""
Add member managers to a group.
:param group: Group name.
:param users: Users to add.
:type users: string or list
:param groups: Groups to add.
:type groups: string or list
:param skip_errors: Skip processing errors.
:type skip_errors: bool
"""
params = {'all': True, 'raw': True, 'user': users, 'group': groups}
params.update(kwargs)
data = self._request('group_add_member_manager', group, params)
if not skip_errors:
parse_group_management_error(data)
return data['result']
def otptoken_add(
self, ipatokenowner=None, ipatokenotpalgorithm=None, description=False
):
"""
Add an otptoken for a user.
:param ipatokenowner: the username
:type ipatokenowner: string
:param ipatokenotpalgorithm: the token algorithim
:type ipatokenotpalgorithm: string
:param description: the token's description.
:type description: string
"""
params = {
'ipatokenowner': ipatokenowner,
'ipatokenotpalgorithm': ipatokenotpalgorithm,
'description': description,
}
data = self._request('otptoken_add', [], params)
return data['result']
def otptoken_mod(self, ipatokenuniqueid, ipatokendisabled=False):
"""
Mod an otptoken for a user.
:param ipatokenuniqueid: the unique id of the token
:type ipatokenuniqueid: string
:param ipatokendisabled: whether it should be disabled
:type ipatokendisabled: boolean
"""
params = {
'ipatokenuniqueid': ipatokenuniqueid,
'ipatokendisabled': ipatokendisabled,
}
data = self._request('otptoken_mod', [], params)
return data['result']
def otptoken_del(self, ipatokenuniqueid):
"""
Mod an otptoken for a user.
:param ipatokenuniqueid: the unique id of the token
:type ipatokenuniqueid: string
"""
params = {'ipatokenuniqueid': ipatokenuniqueid}
data = self._request('otptoken_del', [], params)
return data['result']
def otptoken_find(self, ipatokenowner=None):
"""
Find otptokens for a user.
:param ipatokenowner: the username
:type ipatokenowner: string
"""
params = {'ipatokenowner': ipatokenowner}
data = self._request('otptoken_find', [], params)
return data['result']
def otptoken_sync(self, user, password, first_code, second_code, token=None):
"""
Sync an otptoken for a user.
:param user: the user to sync the token for
:type user: string
:param password: <PASSWORD>
:type password: string
:param first_code: the first OTP token
:type first_code: string
:param second_code: the second OTP token
:type second_code: string
:param token: the token description (optional)
:type token: string
"""
data = {
'user': user,
'password': password,
'first_code': first_code,
'second_code': second_code,
'token': token,
}
url = "https://" + self._host + "/ipa/session/sync_token"
try:
response = self._session.post(url=url, data=data, verify=self._verify_ssl)
if response.ok and "Token sync rejected" not in response.text:
return response
else:
raise BadRequest(
message="The username, password or token codes are not correct."
)
except RequestException:
raise BadRequest(message="Something went wrong trying to sync OTP token.")
def batch(self, methods=None, raise_errors=True):
"""
Make multiple ipa calls via one remote procedure call.
:param methods: Nested Methods to execute.
:type methods: dict
:param skip_errors: Raise errors from RPC calls.
:type skip_errors: bool
"""
data = self._request('batch', methods)
for idx, result in enumerate(data['results']):
error = result['error']
if error:
exception = BadRequest(message=error, code=result['error_code'])
if raise_errors:
raise exception
else:
data['results'][idx] = exception
return data
def pwpolicy_add(
self,
group,
krbminpwdlife=None,
krbpwdminlength=None,
cospriority=None,
**kwargs
):
"""
Create the password policy
:param cn: Group name.
:param krbminpwdlife: The minimum password lifetime
:param krbpwdminlength: The minimum password length
"""
params = {
'all': True,
'raw': True,
'krbminpwdlife': krbminpwdlife,
'cospriority': cospriority,
'krbpwdminlength': krbpwdminlength,
}
params.update(kwargs)
data = self._request('pwpolicy_add', group, params)
return data['result']
def change_password(self, username, new_password, old_password, otp=None):
"""
Override change_password to allow an OTP token to be provided.
:param username: User login (username)
:type username: string
:param new_password: <PASSWORD> the user
:type new_password: string
:param old_password: <PASSWORD>
:type old_password: string
:param otp: Users OTP token
:type otp: string
"""
password_url = '{0}/session/change_password'.format(self._base_url)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
}
data = {
'user': username,
'new_password': <PASSWORD>,
'old_password': <PASSWORD>,
}
if otp:
data['otp'] = otp
response = self._session.post(
password_url, headers=headers, data=data, verify=self._verify_ssl
)
if not response.ok:
raise FreeIPAError(message=response.text, code=response.status_code)
pwchange_result = response.headers.get('X-IPA-Pwchange-Result', None)
if pwchange_result != 'ok':
if pwchange_result == 'invalid-password':
raise PWChangeInvalidPassword(
message=response.text, code=response.status_code
)
elif pwchange_result == 'policy-error':
policy_error = response.headers.get('X-IPA-Pwchange-Policy-Error', None)
raise PWChangePolicyError(
message=response.text,
code=response.status_code,
policy_error=policy_error,
)
else:
raise FreeIPAError(message=response.text, code=response.status_code)
return response
# Construct an IPA client from app config, but don't attempt to log in with it
# or to form a session of any kind with it. This is useful for one-off cases
# like password resets where a session isn't actually required.
def untouched_ipa_client(app):
return Client(
random.choice(app.config['FREEIPA_SERVERS']),
verify_ssl=app.config['FREEIPA_CACERT'],
)
# Attempt to obtain an IPA session from a cookie.
#
# If we are given a token as a cookie in the request, decrypt it and see if we
# are left with a valid IPA session.
#
# NOTE: You *MUST* check the result of this function every time you call it.
# It will be None if no session was provided or was provided but invalid.
def maybe_ipa_session(app, session):
encrypted_session = session.get('noggin_session', None)
server_hostname = session.get('noggin_ipa_server_hostname', None)
if encrypted_session and server_hostname:
fernet = Fernet(app.config['FERNET_SECRET'])
ipa_session = fernet.decrypt(encrypted_session)
client = Client(server_hostname, verify_ssl=app.config['FREEIPA_CACERT'])
client._session.cookies['ipa_session'] = str(ipa_session, 'utf8')
# We have reconstructed a client, let's send a ping and see if we are
# successful.
try:
ping = client._request('ping')
client.ipa_version = ping['summary']
except python_freeipa.exceptions.Unauthorized:
return None
# If there's any other kind of exception, we let it propagate up for the
# controller (and, more practically, @with_ipa) to handle.
return client
return None
# Attempt to log in to an IPA server.
#
# On a successful login, we will encrypt the session token and put it in the
# user's session, returning the client handler to the caller.
#
# On an unsuccessful login, we'll let the exception bubble up.
def maybe_ipa_login(app, session, username, password):
# A session token is bound to a particular server, so we store the server
# in the session and just always use that. Flask sessions are signed, so we
# are safe in later assuming that the server hostname cookie has not been
# altered.
chosen_server = random.choice(app.config['FREEIPA_SERVERS'])
client = Client(chosen_server, verify_ssl=app.config['FREEIPA_CACERT'])
auth = client.login(username, password)
if auth and auth.logged_in:
fernet = Fernet(app.config['FERNET_SECRET'])
encrypted_session = fernet.encrypt(
bytes(client._session.cookies['ipa_session'], 'utf8')
)
session['noggin_session'] = encrypted_session
session['noggin_ipa_server_hostname'] = chosen_server
session['noggin_username'] = username
return client
return None
|
import asyncio
from datetime import datetime
from io import BytesIO
from telethon import events
from telethon.errors import BadRequestError
from telethon.tl.functions.channels import EditBannedRequest
from telethon.tl.types import Channel
import userbot.modules.sql_helper.gban_sql as gban_sql
from userbot import BOTLOG_CHATID
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, DEVS, bot
from userbot.events import register
from userbot.utils import edit_or_reply, geez_cmd, get_user_from_event
from .admin import BANNED_RIGHTS, UNBAN_RIGHTS
async def admin_groups(grp):
admgroups = []
async for dialog in grp.client.iter_dialogs():
entity = dialog.entity
if (
isinstance(entity, Channel)
and entity.megagroup
and (entity.creator or entity.admin_rights)
):
admgroups.append(entity.id)
return admgroups
def mentionuser(name, userid):
return f"[{name}](tg://user?id={userid})"
@geez_cmd(pattern="gban(?: |$)(.*)")
@register(pattern=r"^\.cgban(?: |$)(.*)", sudo=True)
async def gban(event):
if event.fwd_from:
return
gbun = await edit_or_reply(event, "`Memproses Global Blokir...`")
start = datetime.now()
user, reason = await get_user_from_event(event, gbun)
if not user:
return
if user.id == (await event.client.get_me()).id:
await gbun.edit("**Terjadi Kesalahan, Harap Balas Kepesan Untuk melakukan Global Blokir**")
return
if user.id in DEVS:
await gbun.edit("**Gagal Melakukan Global Blokir, Karna Dia Adalah Pembuat Saya**")
return
if gban_sql.is_gbanned(user.id):
await gbun.edit(
f"[{user.first_name}](tg://user?id={user.id}) **Sudah Berada Di Daftar Gban List**"
)
else:
gban_sql.freakgban(user.id, reason)
san = []
san = await admin_groups(event)
count = 0
fiz = len(san)
if fiz == 0:
await gbun.edit("**Maaf Anda Tidak Mempunyai Hak Admin Di Group Ini**")
return
await gbun.edit(
f"**Memulai Global Blokir ** [{user.first_name}](tg://user?id={user.id}) **dalam** `{len(san)}` **Grup**"
)
for i in range(fiz):
try:
await event.client(EditBannedRequest(san[i], user.id, BANNED_RIGHTS))
await asyncio.sleep(0.5)
count += 1
except BadRequestError:
await event.client.send_message(
BOTLOG_CHATID,
f"**Anda tidak memiliki izin Blokir di :**\n**Grup :** `{event.chat_id}`",
)
end = datetime.now()
timetaken = (end - start).seconds
if reason:
await gbun.edit(
f"**Global Blokir** [{user.first_name}](tg://user?id={user.id}) **di** `{count}` **Grup, dalam waktu** `{timetaken}` **detik**!!\n**Alasan :** `{reason}`"
)
else:
await gbun.edit(
f"**Global Blokir** [{user.first_name}](tg://user?id={user.id}) **di** `{count}` **Grup, dalam waktu** `{timetaken}` **detik**!!\n**Ditambahkan ke Daftar Global Blokir.**"
)
@geez_cmd(pattern="ungban(?: |$)(.*)")
@register(pattern=r"^\.cungban(?: |$)(.*)", sudo=True)
async def ungban(event):
if event.fwd_from:
return
ungbun = await edit_or_reply(event, "`Melepas Global Blokir...`")
start = datetime.now()
user, reason = await get_user_from_event(event, ungbun)
if not user:
return
if gban_sql.is_gbanned(user.id):
gban_sql.freakungban(user.id)
else:
await ungbun.edit(
f"[{user.first_name}](tg://user?id={user.id}) **Tidak Berada Di Daftar Global Blokir!!!**"
)
return
san = []
san = await admin_groups(event)
count = 0
fiz = len(san)
if fiz == 0:
await ungbun.edit("**Terjadi Kesalahan Karna Anda Bukan lah admin.**")
return
await ungbun.edit(
f"**Memulai Lepas Blokir ** [{user.first_name}](tg://user?id={user.id}) **dalam** `{len(san)}` **Grup**"
)
for i in range(fiz):
try:
await event.client(EditBannedRequest(san[i], user.id, UNBAN_RIGHTS))
await asyncio.sleep(0.5)
count += 1
except BadRequestError:
await event.client.send_message(
BOTLOG_CHATID,
f"**Anda tidak memiliki izin Blokir di :**\n**Grup :** `{event.chat_id}`",
)
end = datetime.now()
timetaken = (end - start).seconds
if reason:
await ungbun.edit(
f"**Melepas Global Blokir** [{user.first_name}](tg://user?id={user.id}) **di** `{count}` **Grup dalam waktu** `{timetaken}` **detik**!!\n**Alasan :** `{reason}`"
)
else:
await ungbun.edit(
f"**Melepas Global Blokir** [{user.first_name}](tg://user?id={user.id}) **di** `{count}` **Grup dalam waktu** `{timetaken}` **detik**!!\n**Dihapus dari Daftar Global Blokir**"
)
@geez_cmd(pattern="listgban$")
async def gablist(event):
if event.fwd_from:
return
gbanned_users = gban_sql.get_all_gbanned()
GBANNED_LIST = "**Daftar Global Blokir Anda Saat Ini**\n"
if len(gbanned_users) > 0:
for a_user in gbanned_users:
if a_user.reason:
GBANNED_LIST += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) **Alasan** `{a_user.reason}`\n"
else:
GBANNED_LIST += (
f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) `Tanpa Alasan`\n"
)
if len(gbanned_users) >= 4096:
with BytesIO(str.encode(GBANNED_LIST)) as fileuser:
fileuser.name = "list-gban.txt"
await event.client.send_file(
event.chat_id,
fileuser,
force_document=True,
thumb="userbot/resources/logo.jpg",
caption="**Daftar Global Blokir**",
allow_cache=False,
)
else:
GBANNED_LIST = "`Belum ada Pengguna yang Di-GlobalBlokir`"
await edit_or_reply(event, GBANNED_LIST)
@bot.on(events.ChatAction)
async def _(event):
if event.user_joined or event.added_by:
user = await event.get_user()
chat = await event.get_chat()
if gban_sql.is_gbanned(user.id) and chat.admin_rights:
try:
await event.client.edit_permissions(
chat.id,
user.id,
view_messages=False,
)
await event.reply(
f"**Global Blokir Pengguna** Bergabung.\n\n** • Akun:** [{user.first_name}](tg://user?id={user.id})\n • **Aksi:** `Blokir`"
)
except BaseException:
pass
CMD_HELP.update(
{
"gban": f"**Plugin : **`gban`\
\n\n 𝘾𝙤𝙢𝙢𝙖𝙣𝙙 :** `{cmd}gban` <username/id>\
\n ❍▸ : **Melakukan Banned Secara Global Ke Semua Grup Dimana anda Sebagai Admin.\
\n\n 𝘾𝙤𝙢𝙢𝙖𝙣𝙙 :** `{cmd}ungban` <username/id>\
\n ❍▸ : **Membatalkan Global Banned\
\n\n 𝘾𝙤𝙢𝙢𝙖𝙣𝙙 :** `{cmd}listgban`\
\n ❍▸ : **Menampilkan List Global Banned\
"
}
)
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
###
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from pprint import pprint
from config_loader import try_load_from_file
from hpeOneView.oneview_client import OneViewClient
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<username>",
"password": "<password>"
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
api_variant = 'Synergy'
oneview_client = OneViewClient(config)
enclosure_groups = oneview_client.enclosure_groups
scopes = oneview_client.scopes
logical_interconnect_groups = oneview_client.logical_interconnect_groups
lig_name = 'LIG'
lig_uri = logical_interconnect_groups.get_by_name(lig_name).data['uri']
eg_options = {
"name": "EG",
"interconnectBayMappings": [
{
"interconnectBay": 3,
"logicalInterconnectGroupUri": lig_uri
},
{
"interconnectBay": 6,
"logicalInterconnectGroupUri": lig_uri
}
],
"ipAddressingMode": "External",
"ipRangeUris": [],
"ipv6AddressingMode": "External",
"ipv6RangeUris": [],
"enclosureCount": 3,
"osDeploymentSettings": {
"manageOSDeployment": True,
"deploymentModeSettings": {
"deploymentMode": "Internal",
"deploymentNetworkUri": None
}
}
}
# Get the first 10 records, sorting by name descending
print("Get the ten first Enclosure Groups, sorting by name descending")
egs = enclosure_groups.get_all(0, 10, sort='name:descending')
pprint(egs)
print("\n## Create the scope")
scope_options = {
"name": "SampleScopeForTest",
"description": "Sample Scope description"
}
scope = scopes.get_by_name(scope_options['name'])
if not scope:
scope = scopes.create(scope_options)
# Get Enclosure Group by scope_uris
if oneview_client.api_version >= 600:
eg_by_scope_uris = enclosure_groups.get_all(scope_uris=scope.data['uri'])
if len(eg_by_scope_uris) > 0:
print("Found Enclosure Group by scope_uris: '%s'.\n uri = '%s'" % (eg_by_scope_uris[0]['name'], eg_by_scope_uris[0]['uri']))
pprint(eg_by_scope_uris)
else:
print("No Enclosure Group found.")
# Get by name
enclosure_group = enclosure_groups.get_by_name(eg_options["name"])
if not enclosure_group:
# Create a Enclosure Group
print("Create a Enclosure Group")
if oneview_client.api_version <= 500:
options = {"stackingMode": "Enclosure"}
options.update(eg_options)
enclosure_group = enclosure_groups.create(options)
else:
enclosure_group = enclosure_groups.create(eg_options)
print("Created enclosure group of name - '{}' with uri - '{}'".format(enclosure_group.data['name'], enclosure_group.data['uri']))
# Get all, with default
print("Get all Enclosure Groups")
egs = enclosure_groups.get_all()
pprint(egs)
# Get by uri
print("Get an Enclosure Group by uri")
eg_byuri = enclosure_groups.get_by_uri(egs[0]["uri"])
pprint(eg_byuri.data)
# Update an Enclosure Group
resource = {"name": "Renamed EG"}
print("Renaming the enclosure Group")
enclosure_group.update(resource)
pprint(enclosure_group.data)
# Update an Enclosure Group Script
if api_variant == 'C7000':
# update_script is available for API version 300 in Synergy and in all versions in C7000
print("Update an Enclosure Group Script")
script = "#TEST COMMAND"
update_script_result = enclosure_group.update_script(script)
pprint(update_script_result)
# Gets the configuration script of a Enclosure Group
# get_script is available for API version 300 in Synergy and in all versions in C7000
print("Gets the configuration script of an Enclosure Group")
script = enclosure_group.get_script()
print(script)
# Delete an Enclosure Group
print("Delete the created Enclosure Group")
enclosure_group.delete()
print("Successfully deleted Enclosure Group")
scope.delete()
# Create EG & EG-2 for automation
enclosure_group = enclosure_groups.create(eg_options)
print("Created enclosure group of name - '{}' with uri - '{}'".format(enclosure_group.data['name'], enclosure_group.data['uri']))
eg_options['name'] = "EG-2"
enclosure_group = enclosure_groups.create(eg_options)
print("Created enclosure group of name - '{}' with uri - '{}'".format(enclosure_group.data['name'], enclosure_group.data['uri']))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket as sock
import tkinter as tk
from datetime import datetime
DATE = datetime.now().strftime('%H:%M - %d/%m/%Y')
SEGOE = 'Segoe 11'
class App(tk.Frame):
def __init__(self, master = None):
super().__init__(master)
self.create_labels()
self.create_entries()
self.create_buttons()
self.create_inserts()
def create_labels(self):
self.label = tk.Label(text = 'Port Scanner', font = 'Segoe 20 bold',
bg = '#273238', fg = 'white')
self.label.place(x = '10', y = '10')
self.label1 = tk.Label(text = 'URL:', font = SEGOE,
bg = '#273238', fg = 'white')
self.label1.place(x = '10', y = '70')
self.label2 = tk.Label(text = 'Porta Inicial:', font = SEGOE,
bg = '#273238', fg = 'white')
self.label2.place(x = '10', y = '100')
self.label3 = tk.Label(text = 'Porta Final:', font = SEGOE,
bg = '#273238', fg = 'white')
self.label3.place(x = '10', y = '130')
def create_entries(self):
self.entry_text = tk.StringVar()
self.ini = tk.Entry(font = SEGOE, bg = '#696969', fg = 'white', width = '30',
textvariable = 'entry_text')
self.ini.place(x = '110', y = '70')
self.entry_text1 = tk.IntVar()
self.port_initial = tk.Entry(font = SEGOE, bg = '#696969', fg = 'white', width = '8',
textvariable = 'entry_text1')
self.port_initial.place(x = '110', y = '100')
self.entry_text2 = tk.IntVar()
self.port_final = tk.Entry(font = SEGOE, bg = '#696969', fg = 'white', width = '8',
textvariable = 'entry_text2')
self.port_final.place(x = '110', y = '130')
self.textbox = tk.Text(font = ('Segoe 12 bold'), bg = '#696969',
fg = 'white', width = '42', height = '15',
selectbackground = 'green2', selectforeground = 'gray10')
self.textbox.place(x = '8', y = '180')
def create_inserts(self):
text0 = ' site.com.br'
text1 = ' 0'
text2 = ' 0000'
self.ini.insert(0, text0)
self.port_initial.insert(0, text1)
self.port_final.insert(0, text2)
self.textbox.insert(tk.INSERT, '\n\n Preencha os campos do SCAN!')
self.textbox.insert(tk.INSERT, '\n\n Quanto mais alto o valor das portas')
self.textbox.insert(tk.INSERT, '\n\n Maior será a demora.')
self.textbox.insert(tk.INSERT, '\n\n O escaneamento vair levar tempo.')
def create_buttons(self):
self.button1 = tk.Button(text = 'SCAN', width = '15', font = 'Arial 11 bold',
bg = '#FF8C00', fg = 'white', command = self.write_on_square)
self.button1.place(x = '243', y = '500')
def write_on_square(self):
self.name_solved = self.ini.get()
self.resposta = sock.gethostbyname(self.name_solved)
self.textbox.delete(1.0, tk.END)
self.textbox.insert(tk.INSERT, '\n Resultado do scan:')
self.textbox.insert(tk.INSERT, '\n\n O scan começou em: ' + str(DATE))
self.textbox.insert(tk.INSERT, '\n\n Alvo encontrado: ' + self.name_solved + ' conectado.')
self.textbox.insert(tk.INSERT, '\n\n Feito o scan de: ' + self.resposta)
self.scanner()
def scanner(self):
self.port_i = int(self.port_initial.get())
self.port_f = int(self.port_final.get())
try:
for port in range(self.port_i, self.port_f):
attach = sock.socket(sock.AF_INET, sock.SOCK_STREAM)
sock.setdefaulttimeout(1)
result = attach.connect_ex((self.resposta, port))
self.port = str(port)
if result == 0:
self.textbox.insert(tk.INSERT, '\n\n Open ports: ' + self.port)
attach.close()
except KeyboardInterrupt:
print('\n Exitting Program !!!!')
except sock.gaierror:
print('\n Hostname Could Not Be Resolved !!!!')
except sock.error:
print('\n Server isnt responding !!!!')
root = tk.Tk()
PortScanner = App(master = root)
PortScanner.master.config(background = '#273238')
PortScanner.master.iconbitmap(r'/home/rodrigo/pirate.ico')
PortScanner.master.title('PortScanner')
PortScanner.master.geometry('400x540+450+20')
PortScanner.master.resizable(False, False)
PortScanner.master.eval('tk::PlaceWindow . center')
PortScanner.mainloop()
|
import numpy
from convenience import reduce_h, find_pcts_multi
from deuces.deuces import Deck, Card
from itertools import combinations, product
import random
all52 = Deck.GetFullDeck()
all_hole_explicit = []
for h in combinations(all52, 2):
all_hole_explicit += [list(h)]
deck_choose_2 = len(all_hole_explicit)
assert deck_choose_2 == 1326
def _all_hole_cards():
"""Enumerate all 1326 2-card combos, lumping them together based on
suited or off-suit. Do this as a generator, yielding these in
somewhat specialized string form.
Example:
AA AA AA AKs AK AQs ... AA AA AK AKs AK AK AQ ...
Which reflects:
AsAh AsAd AsAc AsKs AsKh AsQs AhAd AhAc AhKs AhKh AhKd AhKc AhQs
"""
for hole in all_hole_explicit:
s = reduce_h(hole)
if s[2] == 'o':
s = s[0:2]
yield s
def numbers_of_hole_cards():
"""Return a dict counting how many combos in each of the 169 hands,
along the lines of {'AA': 6, 'AKs': 4, 'AQ': 12, ...}. Built by
iterating thru each of the 1326 combos one by one
"""
Table = {}
cells = [] # lets us do it in order
for s in _all_hole_cards():
if s in Table.keys():
Table[s] += 1
else:
cells += [s]
Table[s] = 1
assert sum(Table.values()) == deck_choose_2
return [Table, cells]
def numbers_of_hole_cards_random(n):
"""Return a dict counting combos in each hand, but built by drawing 2
at random (not normalizing to n).
"""
Table = {}
for i in range(n):
d = Deck()
hole = d.draw(2)
s = reduce_h(hole)
if s[2] == 'o':
s = s[0:2]
if s in Table.keys():
Table[s] += 1
else:
Table[s] = 1
return Table
#### Functions for calculating and plotting ranges ####
def range_plot(hands):
"""Take a list of strings describing hands. Return 13 lines of dots
and stars representing the hands on a grid.
"""
M = numpy.array([[0]*13]*13)
ranks = 'AKQJT98765432'
for h in hands:
if 's' in h:
row = ranks.find(h[0])
col = ranks.find(h[1])
else:
row = ranks.find(h[1])
col = ranks.find(h[0])
M[row][col] = 1
M_str = "\n".join(map(str, M)).replace('[','').replace(', ','')
M_str = M_str.replace(']','').replace('0','.').replace('1','*')
return M_str
def add_margins(M_str):
"""Adds margins showing ranks to a range plot. Useful as the outermost
in a series of nested function calls along the lines of:
print add_margins(range_plot(top_hands_pct(25)))
"""
lines = M_str.split('\n')
ranks = 'AKQJT98765432'
for i, row in enumerate(lines):
lines[i] = ranks[i] + ' ' + row
lines = [' A K Q J T 9 8 7 6 5 4 3 2'] + lines
return '\n'.join(lines)
def top_hands_pct(p):
"""Return list of the top p percent of hands, using the lookup table
called 'HR' (short for Hand Rankings). It's a list of strings like
['AA', 'AKs', 'KJ']
"""
[Table, cells] = numbers_of_hole_cards()
tot_hands = sum(Table.values())
n_hands = tot_hands * (p / 100.0)
# setup for loop
hands_retrieved = 0
hand_list = []
for d in HR:
hs = d['h']
old_distance = abs(n_hands - hands_retrieved)
new_distance = abs(n_hands - hands_retrieved - Table[hs])
if new_distance < old_distance:
hand_list += [hs]
hands_retrieved += Table[hs]
return hand_list
def find_pcts_range(p1, range_pct, start_b = [], iter = 10000):
"""Equity calculator for hand versus range. Given 1 player's hole
cards and one range expressed as a percent, and an optional board,
what is each player's chance of winning (equity)?
"""
main_winlist = [0, 0]
enum_hands = all_hands_in_range(top_hands_pct(range_pct))
print " villain hands (before elim) N =",
print len(enum_hands)
for i in range(iter):
p2 = []
while not p2:
candidate = random.choice(enum_hands)
if p1[0] in candidate or p1[1] in candidate or candidate[0] in start_b or candidate[1] in start_b:
# print ' ng',
# pr(candidate)
continue
p2 = candidate
## consider just doing one eval, not call to func?
winlist = find_pcts_multi([p1, p2], start_b = start_b, iter = 1)
for i in range(len(winlist)):
main_winlist[i] += winlist [i]
for i in range(len(main_winlist)):
main_winlist[i] /= iter
return main_winlist
def all_hands_in_range(list_of_str):
"""Return a list of lists of deuces objects, to answer 'What detailed
hole cards (combos) are in range provided?'
"""
total_hands = []
for s in list_of_str:
if s[0] == s[1]: # pairs (6 for each)
a = [s[0] + 's', s[0] + 'h', s[0] + 'd', s[0] + 'c']
for pair_strings in combinations(a, 2):
total_hands += [[Card.new(pair_strings[0]),
Card.new(pair_strings[1])]]
elif 's' in s: # suited (4 for each)
for suit in 'shdc':
total_hands += [[Card.new(s[0] + suit),
Card.new(s[1] + suit)]]
else: # offsuit (12 for each)
a = [s[0] + 's', s[0] + 'h', s[0] + 'd', s[0] + 'c']
b = [s[1] + 's', s[1] + 'h', s[1] + 'd', s[1] + 'c']
for s1, s2 in product(a, b):
if s1[1] == s2[1]:
continue # because suited
total_hands += [[Card.new(s1),
Card.new(s2)]]
return total_hands
#### bunch of data ####
# Source for hole card rank table -
# http://www.tightpoker.com/poker_hands.html
# sum(n) = 115591080
HR = [
{'h':'AA', 'e':2.32, 'n':521324},
{'h':'KK', 'e':1.67, 'n':522652},
{'h':'QQ', 'e':1.22, 'n':520663},
{'h':'JJ', 'e':0.86, 'n':521866},
{'h':'AKs', 'e':0.78, 'n':348364},
{'h':'AQs', 'e':0.59, 'n':348759},
{'h':'TT', 'e':0.58, 'n':520705},
{'h':'AK', 'e':0.51, 'n':1048008},
{'h':'AJs', 'e':0.44, 'n':348126},
{'h':'KQs', 'e':0.39, 'n':346772},
{'h':'99', 'e':0.38, 'n':522454},
{'h':'ATs', 'e':0.32, 'n':348013},
{'h':'AQ', 'e':0.31, 'n':1042962},
{'h':'KJs', 'e':0.29, 'n':346582},
{'h':'88', 'e':0.25, 'n':521972},
{'h':'QJs', 'e':0.23, 'n':348870},
{'h':'KTs', 'e':0.20, 'n':348774},
{'h':'A9s', 'e':0.19, 'n':348992},
{'h':'AJ', 'e':0.19, 'n':1045857},
{'h':'QTs', 'e':0.17, 'n':346115},
{'h':'KQ', 'e':0.16, 'n':1045069},
{'h':'77', 'e':0.16, 'n':524345},
{'h':'JTs', 'e':0.15, 'n':348235},
{'h':'A8s', 'e':0.10, 'n':349431},
{'h':'K9s', 'e':0.09, 'n':348286},
{'h':'AT', 'e':0.08, 'n':1047289},
{'h':'A5s', 'e':0.08, 'n':348544},
{'h':'A7s', 'e':0.08, 'n':349949},
{'h':'KJ', 'e':0.08, 'n':1047098},
{'h':'66', 'e':0.07, 'n':520946},
{'h':'T9s', 'e':0.05, 'n':348264},
{'h':'A4s', 'e':0.05, 'n':347862},
{'h':'Q9s', 'e':0.05, 'n':348760},
{'h':'J9s', 'e':0.04, 'n':349965},
{'h':'QJ', 'e':0.03, 'n':1044338},
{'h':'A6s', 'e':0.03, 'n':347677},
{'h':'55', 'e':0.02, 'n':521945},
{'h':'A3s', 'e':0.02, 'n':347895},
{'h':'K8s', 'e':0.01, 'n':350401},
{'h':'KT', 'e':0.01, 'n':1045392},
{'h':'98s', 'e':0.00, 'n':348759},
{'h':'T8s', 'e':-0.00, 'n':347443},
{'h':'K7s', 'e':-0.00, 'n':348341},
{'h':'A2s', 'e':0.00, 'n':347318},
{'h':'87s', 'e':-0.02, 'n':348348},
{'h':'QT', 'e':-0.02, 'n':1047827},
{'h':'Q8s', 'e':-0.02, 'n':348381},
{'h':'44', 'e':-0.03, 'n':523398},
{'h':'A9', 'e':-0.03, 'n':1047672},
{'h':'J8s', 'e':-0.03, 'n':348046},
{'h':'76s', 'e':-0.03, 'n':347540},
{'h':'JT', 'e':-0.03, 'n':1043812},
{'h':'97s', 'e':-0.04, 'n':350158},
{'h':'K6s', 'e':-0.04, 'n':347029},
{'h':'K5s', 'e':-0.05, 'n':349320},
{'h':'K4s', 'e':-0.05, 'n':348681},
{'h':'T7s', 'e':-0.05, 'n':347638},
{'h':'Q7s', 'e':-0.06, 'n':348073},
{'h':'K9', 'e':-0.07, 'n':1045630},
{'h':'65s', 'e':-0.07, 'n':348590},
{'h':'T9', 'e':-0.07, 'n':1045306},
{'h':'86s', 'e':-0.07, 'n':348374},
{'h':'A8', 'e':-0.07, 'n':1042209},
{'h':'J7s', 'e':-0.07, 'n':345009},
{'h':'33', 'e':-0.07, 'n':522632},
{'h':'54s', 'e':-0.08, 'n':348260},
{'h':'Q6s', 'e':-0.08, 'n':349068},
{'h':'K3s', 'e':-0.08, 'n':348865},
{'h':'Q9', 'e':-0.08, 'n':1049468},
{'h':'75s', 'e':-0.09, 'n':349781},
{'h':'22', 'e':-0.09, 'n':524131},
{'h':'J9', 'e':-0.09, 'n':1044150},
{'h':'64s', 'e':-0.09, 'n':349689},
{'h':'Q5s', 'e':-0.09, 'n':350110},
{'h':'K2s', 'e':-0.09, 'n':349276},
{'h':'96s', 'e':-0.09, 'n':349514},
{'h':'Q3s', 'e':-0.10, 'n':348009},
{'h':'J8', 'e':-0.10, 'n':1046506},
{'h':'98', 'e':-0.10, 'n':1044759},
{'h':'T8', 'e':-0.10, 'n':1048779},
{'h':'97', 'e':-0.10, 'n':1046152},
{'h':'A7', 'e':-0.10, 'n':1046587},
{'h':'T7', 'e':-0.10, 'n':1044950},
{'h':'Q4s', 'e':-0.10, 'n':348979},
{'h':'Q8', 'e':-0.11, 'n':1048251},
{'h':'J5s', 'e':-0.11, 'n':348923},
{'h':'T6', 'e':-0.11, 'n':1043014},
{'h':'75', 'e':-0.11, 'n':1047447},
{'h':'J4s', 'e':-0.11, 'n':347508},
{'h':'74s', 'e':-0.11, 'n':350325},
{'h':'K8', 'e':-0.11, 'n':1048167},
{'h':'86', 'e':-0.11, 'n':1047524},
{'h':'53s', 'e':-0.11, 'n':346930},
{'h':'K7', 'e':-0.11, 'n':1043698},
{'h':'63s', 'e':-0.11, 'n':346449},
{'h':'J6s', 'e':-0.11, 'n':347570},
{'h':'85', 'e':-0.11, 'n':1048159},
{'h':'T6s', 'e':-0.11, 'n':348875},
{'h':'76', 'e':-0.11, 'n':1046722},
{'h':'A6', 'e':-0.12, 'n':1046762},
{'h':'T2', 'e':-0.12, 'n':1047032},
{'h':'95s', 'e':-0.12, 'n':348477},
{'h':'84', 'e':-0.12, 'n':1046266},
{'h':'62', 'e':-0.12, 'n':1049495},
{'h':'T5s', 'e':-0.12, 'n':348928},
{'h':'95', 'e':-0.12, 'n':1044601},
{'h':'A5', 'e':-0.12, 'n':1046285},
{'h':'Q7', 'e':-0.12, 'n':1046099},
{'h':'T5', 'e':-0.12, 'n':1048428},
{'h':'87', 'e':-0.12, 'n':1044635},
{'h':'83', 'e':-0.12, 'n':1048550},
{'h':'65', 'e':-0.12, 'n':1045971},
{'h':'Q2s', 'e':-0.12, 'n':348912},
{'h':'94', 'e':-0.12, 'n':1047422},
{'h':'74', 'e':-0.12, 'n':1043278},
{'h':'54', 'e':-0.12, 'n':1046435},
{'h':'A4', 'e':-0.12, 'n':1046931},
{'h':'T4', 'e':-0.12, 'n':1047976},
{'h':'82', 'e':-0.12, 'n':1043638},
{'h':'64', 'e':-0.12, 'n':1043079},
{'h':'42', 'e':-0.12, 'n':1043357},
{'h':'J7', 'e':-0.12, 'n':1046565},
{'h':'93', 'e':-0.12, 'n':1045989},
{'h':'85s', 'e':-0.12, 'n':347928},
{'h':'73', 'e':-0.12, 'n':1047020},
{'h':'53', 'e':-0.12, 'n':1047022},
{'h':'T3', 'e':-0.12, 'n':1043908},
{'h':'63', 'e':-0.12, 'n':1044818},
{'h':'K6', 'e':-0.12, 'n':1045039},
{'h':'J6', 'e':-0.12, 'n':1045991},
{'h':'96', 'e':-0.12, 'n':1047156},
{'h':'92', 'e':-0.12, 'n':1049342},
{'h':'72', 'e':-0.12, 'n':1046167},
{'h':'52', 'e':-0.12, 'n':1049213},
{'h':'Q4', 'e':-0.13, 'n':1045087},
{'h':'K5', 'e':-0.13, 'n':1047359},
{'h':'J5', 'e':-0.13, 'n':1047697},
{'h':'43s', 'e':-0.13, 'n':348802},
{'h':'Q3', 'e':-0.13, 'n':1047649},
{'h':'43', 'e':-0.13, 'n':1047900},
{'h':'K4', 'e':-0.13, 'n':1046562},
{'h':'J4', 'e':-0.13, 'n':1048129},
{'h':'T4s', 'e':-0.13, 'n':350639},
{'h':'Q6', 'e':-0.13, 'n':1046958},
{'h':'Q2', 'e':-0.13, 'n':1046353},
{'h':'J3s', 'e':-0.13, 'n':349254},
{'h':'J3', 'e':-0.13, 'n':1046204},
{'h':'T3s', 'e':-0.13, 'n':349673},
{'h':'A3', 'e':-0.13, 'n':1046970},
{'h':'Q5', 'e':-0.13, 'n':1047946},
{'h':'J2', 'e':-0.13, 'n':1045715},
{'h':'84s', 'e':-0.13, 'n':349390},
{'h':'82s', 'e':-0.14, 'n':348622},
{'h':'42s', 'e':-0.14, 'n':350591},
{'h':'93s', 'e':-0.14, 'n':348835},
{'h':'73s', 'e':-0.14, 'n':349007},
{'h':'K3', 'e':-0.14, 'n':1045968},
{'h':'J2s', 'e':-0.14, 'n':348259},
{'h':'92s', 'e':-0.14, 'n':347868},
{'h':'52s', 'e':-0.14, 'n':348401},
{'h':'K2', 'e':-0.14, 'n':1048521},
{'h':'T2s', 'e':-0.14, 'n':349612},
{'h':'62s', 'e':-0.14, 'n':348033},
{'h':'32', 'e':-0.14, 'n':1044956},
{'h':'A2', 'e':-0.15, 'n':1047979},
{'h':'83s', 'e':-0.15, 'n':349355},
{'h':'94s', 'e':-0.15, 'n':348259},
{'h':'72s', 'e':-0.15, 'n':348368},
{'h':'32s', 'e':-0.15, 'n':349794},
]
|
<reponame>cjgalvin/deepchem
"""Test normalization of input."""
import numpy as np
import deepchem as dc
from deepchem.metrics import to_one_hot
from deepchem.metrics import from_one_hot
from deepchem.metrics import threshold_predictions
from deepchem.metrics import handle_classification_mode
from deepchem.metrics import normalize_prediction_shape
from deepchem.metrics import normalize_weight_shape
def test_one_hot():
"""Test the one hot encoding."""
y = np.array([0, 0, 1, 0, 1, 1, 0])
y_hot = to_one_hot(y)
expected = np.array([[1, 0], [1, 0], [0, 1], [1, 0], [0, 1], [0, 1], [1, 0]])
yp = from_one_hot(y_hot)
assert np.array_equal(expected, y_hot)
assert np.array_equal(y, yp)
def test_handle_classification_mode_none():
"""Test proper thresholding."""
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y = np.expand_dims(y, 1)
y_expected = y
y_out = handle_classification_mode(y, None)
assert y_out.shape == (10, 1, 2)
assert np.array_equal(y_out, y_expected)
def test_handle_classification_mode_threshold():
"""Test proper thresholding."""
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y = np.expand_dims(y, 1)
y_expected = np.argmax(np.squeeze(y), axis=1)[:, np.newaxis]
y_out = handle_classification_mode(y, "threshold", threshold_value=0.5)
assert y_out.shape == (10, 1)
assert np.array_equal(y_out, y_expected)
def test_handle_classification_mode_threshold_nonstandard():
"""Test proper thresholding."""
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y_expected = np.where(y[:, 1] >= 0.3, np.ones(10),
np.zeros(10))[:, np.newaxis]
y = np.expand_dims(y, 1)
y_out = handle_classification_mode(y, "threshold", threshold_value=0.3)
assert y_out.shape == (10, 1)
assert np.array_equal(y_out, y_expected)
def test_handle_classification_mode_threshold_one_hot():
"""Test proper thresholding."""
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y = np.expand_dims(y, 1)
y_expected = np.expand_dims(
to_one_hot(np.argmax(np.squeeze(y), axis=1), n_classes=2), 1)
y_out = handle_classification_mode(
y, "threshold-one-hot", threshold_value=0.5)
assert y_out.shape == (10, 1, 2)
assert np.array_equal(y_out, y_expected)
def test_threshold_predictions_binary():
"""Test thresholding of binary predictions."""
# Get a random prediction matrix
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y_thresh = threshold_predictions(y, 0.5)
assert y_thresh.shape == (10,)
assert (y_thresh == np.argmax(y, axis=1)).all()
def test_threshold_predictions_multiclass():
"""Test thresholding of multiclass predictions."""
y = np.random.rand(10, 5)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y_thresh = threshold_predictions(y)
assert y_thresh.shape == (10,)
assert (y_thresh == np.argmax(y, axis=1)).all()
def test_normalize_1d_classification_binary():
"""Tests 1d classification normalization."""
y = np.array([0, 0, 1, 0, 1, 1, 0])
expected = np.array([[[1., 0.]], [[1., 0.]], [[0., 1.]], [[1., 0.]],
[[0., 1.]], [[0., 1.]], [[1., 0.]]])
y_out = normalize_prediction_shape(
y, mode="classification", n_tasks=1, n_classes=2)
assert y_out.shape == (7, 1, 2)
assert np.array_equal(expected, y_out)
def test_normalize_1d_classification_multiclass():
"""Tests 1d classification normalization."""
y = np.random.randint(5, size=(200,))
y_expected = np.expand_dims(to_one_hot(y, n_classes=5), 1)
y_out = normalize_prediction_shape(
y, mode="classification", n_tasks=1, n_classes=5)
assert y_out.shape == (200, 1, 5)
assert np.array_equal(y_expected, y_out)
def test_normalize_1d_classification_multiclass_explicit_nclasses():
"""Tests 1d classification normalization."""
y = np.random.randint(5, size=(10,))
y_expected = np.expand_dims(to_one_hot(y, n_classes=10), 1)
y_out = normalize_prediction_shape(
y, mode="classification", n_classes=10, n_tasks=1)
assert y_out.shape == (10, 1, 10)
assert np.array_equal(y_expected, y_out)
def test_normalize_2d_classification_binary():
"""Tests 2d classification normalization."""
# Of shape (N, n_classes)
y = np.random.randint(2, size=(10, 1))
y_expected = np.expand_dims(dc.metrics.to_one_hot(np.squeeze(y)), 1)
y_out = normalize_prediction_shape(
y, mode="classification", n_tasks=1, n_classes=2)
assert y_out.shape == (10, 1, 2)
assert np.array_equal(y_expected, y_out)
def test_normalize_3d_classification_binary():
"""Tests 1d classification normalization."""
# Of shape (N, 1, n_classes)
y = np.random.randint(2, size=(10,))
y = dc.metrics.to_one_hot(y, n_classes=2)
y = np.expand_dims(y, 1)
y_expected = y
y_out = normalize_prediction_shape(
y, mode="classification", n_tasks=1, n_classes=2)
assert y_out.shape == (10, 1, 2)
assert np.array_equal(y_expected, y_out)
def test_normalize_1d_regression():
"""Tests 1d regression normalization."""
y = np.random.rand(10)
y_expected = y[:, np.newaxis]
y_out = normalize_prediction_shape(y, mode="regression", n_tasks=1)
assert y_out.shape == (10, 1)
assert np.array_equal(y_expected, y_out)
def test_normalize_2d_regression():
"""Tests 2d regression normalization."""
y = np.random.rand(10, 5)
y_expected = y
y_out = normalize_prediction_shape(y, mode="regression", n_tasks=5)
assert y_out.shape == (10, 5)
assert np.array_equal(y_expected, y_out)
def test_normalize_3d_regression():
"""Tests 3d regression normalization."""
y = np.random.rand(10, 5, 1)
y_expected = np.squeeze(y)
y_out = normalize_prediction_shape(y, mode="regression", n_tasks=5)
assert y_out.shape == (10, 5)
assert np.array_equal(y_expected, y_out)
def test_scalar_weight_normalization():
"""Test normalization of weights."""
w_out = normalize_weight_shape(w=5, n_samples=10, n_tasks=5)
assert w_out.shape == (10, 5)
assert np.all(w_out == 5 * np.ones((10, 5)))
def test_1d_weight_normalization():
"""Test normalization of weights."""
w = np.random.rand(10)
# This has w for each task.
w_expected = np.array([w, w, w, w, w]).T
w_out = normalize_weight_shape(w, n_samples=10, n_tasks=5)
assert w_out.shape == (10, 5)
assert np.all(w_out == w_expected)
def test_2d_weight_normalization():
"""Test normalization of weights."""
w = np.random.rand(10, 5)
w_out = normalize_weight_shape(w, n_samples=10, n_tasks=5)
assert w_out.shape == (10, 5)
assert np.all(w_out == w)
|
<reponame>johnche/troll-simulator
import scripts.config as conf
import numpy as np
from scripts.tools import generate_sections
from bokeh.palettes import Category20_16
from bokeh.layouts import column, row, WidgetBox
from bokeh.models.widgets import CheckboxGroup, RadioButtonGroup, PreText, Paragraph
from bokeh.models import ColumnDataSource, Panel, Span, HoverTool
from bokeh.plotting import figure
from pandas import DataFrame, to_datetime
class MixedData:
def __init__(self, logdata: DataFrame):
self.line_dataset = None
self.scatter_dataset = None
self.logdata = logdata
# Partition markers, filter out span data from full log
self.span_locations = logdata[logdata.Name.isin(conf.sections)]
self.logdata = self.logdata[~self.logdata.Name.isin(conf.sections)]
# Convert to datetime format
self.logdata.Timestamp = to_datetime(self.logdata.Timestamp, unit='ms')
# Scatter data
self.scatter_data = self.logdata
# Multi line data prep
self.line_data = self.logdata.groupby('Channel').aggregate({'Timestamp': list, 'Value': list})
self.id_list = list(self.line_data.index)
self.id_list.sort()
self.line_checkbox = CheckboxGroup(labels=self.id_list)
self.line_checkbox.on_change('active', self.lineplot_handler)
self.scatter_checkbox = CheckboxGroup(labels=self.id_list)
self.scatter_checkbox.on_change('active', self.scatterplot_handler)
#self.duration = self.logdata.Timestamp.max() -
self.reaction_times = [1, 2, 3, 4, 5]
self.extra_data_text = PreText(text='')
try:
self.sections = generate_sections(logdata)
radio_labels = ['Section %s' % i for i in range(len(self.sections))]
radio_labels.append('All')
self.radio_buttons = RadioButtonGroup(labels=radio_labels, active=len(radio_labels)-1)
self.radio_buttons.on_change('active', self.radio_button_handler)
except IndexError:
print('Missing Slide values. Skipping section generation')
self.radio_buttons = RadioButtonGroup(labels=['Fix your data'])
# Hint: This duct tape will explode at some point
def generate_scatter_dataset(self, log_data, active_channels):
visible_data = DataFrame(columns=['Timestamp', 'Value', 'Channel', 'color'])
for i, channel in enumerate(active_channels):
visible_channel = log_data[log_data['Channel'] == channel]
visible_channel['color'] = conf.SCATTER_COLOR[i]
visible_data = visible_data.append(visible_channel, sort=True)
return ColumnDataSource(visible_data)
def generate_line_dataset(self, log_data, active_channels):
visible_data = DataFrame(columns=['Timestamp', 'Value', 'Channel', 'color'])
for i, channel in enumerate(active_channels):
visible_channel = log_data.loc[channel]
visible_channel['color'] = conf.LINE_COLOR[i]
visible_channel['Channel'] = channel
visible_data = visible_data.append(visible_channel, sort=True)
return ColumnDataSource(visible_data)
def generate_figure(self, line_data: ColumnDataSource, scatter_data: ColumnDataSource):
fig = figure(
title='Log',
plot_width=conf.PLOTWIDTH,
plot_height=conf.PLOTHEIGHT,
x_axis_label='Timestamp (ms)',
y_axis_label='Value'
)
fig.multi_line('Timestamp', 'Value', source=line_data, line_width=2, color='color', legend='Channel')
fig.scatter('Timestamp', 'Value', source=scatter_data, legend='Channel', marker='circle', color='color')
return fig
def update_extra_data(self):
self.duration = self.scatter_data.Timestamp.max() - self.scatter_data.Timestamp.min()
new_text = 'Duration: %s\nReaction times: %s\nMean reaction time: %s' \
% (self.duration, self.reaction_times, np.mean(self.reaction_times))
self.extra_data_text.text = new_text
def update_scatter(self):
active_channels = [self.scatter_checkbox.labels[i] for i in self.scatter_checkbox.active]
new_dataset = self.generate_scatter_dataset(self.scatter_data, active_channels)
self.scatter_dataset.data.update(new_dataset.data)
def update_line(self):
active_channels = [self.line_checkbox.labels[i] for i in self.line_checkbox.active]
new_dataset = self.generate_line_dataset(self.line_data, active_channels)
self.line_dataset.data.update(new_dataset.data)
def scatterplot_handler(self, attr, old, new):
self.update_scatter()
def lineplot_handler(self, attr, old, new):
self.update_line()
def radio_button_handler(self, attr, old, new):
"""
Update 'global' scatter and line datasets.
Update active datadata scopes with new datasets.
"""
if self.radio_buttons.labels[new] == 'All':
self.scatter_data = self.logdata
self.line_data = self.logdata.groupby('Channel').aggregate({'Timestamp': list, 'Value': list})
else:
section = self.sections[new]
self.scatter_data = self.logdata.loc[section.start:section.end]
self.line_data = self.scatter_data \
.groupby('Channel'). \
aggregate({'Timestamp': list, 'Value': list})
self.duration = self.scatter_data.Timestamp.max() - self.scatter_data.Timestamp.min()
self.update_extra_data()
self.update_scatter()
self.update_line()
def tab(self, name):
initial_line_channels = [self.line_checkbox.labels[i] for i in self.line_checkbox.active]
self.line_dataset = self.generate_line_dataset(self.line_data, initial_line_channels)
initial_scatter_channels = [self.scatter_checkbox.labels[i] for i in self.scatter_checkbox.active]
self.scatter_dataset = self.generate_scatter_dataset(self.scatter_data, initial_scatter_channels)
fig = self.generate_figure(self.line_dataset, self.scatter_dataset)
# For sectioning the figure
for span_location in self.span_locations.itertuples():
span = Span(
location=span_location.Timestamp,
dimension='height',
line_color='red',
line_dash='dashed',
line_width=2
)
fig.add_layout(span)
# Add hovertool trick
hovertool_cheat = fig.line(
[span_location.Timestamp, span_location.Timestamp],
[-10, 10],
line_width=0,
line_color='red',
line_dash='dashed'
)
hovertool = HoverTool(
renderers=[hovertool_cheat],
tooltips=[
(span_location.Name, str(span_location.Timestamp))
]
)
fig.add_tools(hovertool)
lineplot_text = Paragraph(text='Lineplot channels')
scatterplot_text = Paragraph(text='Scatterplot channels')
self.update_extra_data()
layout = column(self.radio_buttons, row(
column(
lineplot_text,
self.line_checkbox,
scatterplot_text,
self.scatter_checkbox
),
fig,
self.extra_data_text
)
)
return Panel(child=layout, title=name)
|
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from coverage2sql.db import api
from coverage2sql.tests import base
from coverage2sql.tests import coverage2sql_fixtures as fixtures
from coverage2sql.tests import db_test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class TestDatabaseAPI(base.TestCase):
scenarios = [
('mysql', {'dialect': 'mysql'}),
('postgresql', {'dialect': 'postgres'}),
('sqlite', {'dialect': 'sqlite'})
]
def setUp(self):
super(TestDatabaseAPI, self).setUp()
self.useFixture(fixtures.LockFixture(self.dialect))
if not db_test_utils.is_backend_avail(self.dialect):
raise self.skipTest('%s is not available' % self.dialect)
if self.dialect == 'mysql':
self.useFixture(fixtures.MySQLConfFixture())
elif self.dialect == 'postgres':
self.useFixture(fixtures.PostgresConfFixture())
elif self.dialect == 'sqlite':
self.useFixture(fixtures.SqliteConfFixture())
self.useFixture(fixtures.Database())
def test_create_coverage(self):
cov = api.create_coverage('foo_project')
self.assertTrue(cov is not None)
self.assertEqual(cov.project_name, 'foo_project')
def test_get_coverage_all(self):
api.create_coverage('foo1_project')
api.create_coverage('foo2_project')
covs = api.get_coverage()
self.assertTrue(covs is not None)
self.assertEqual(len(covs), 2)
names = [n.project_name for n in covs]
self.assertIn(needle='foo1_project', haystack=names)
self.assertIn(needle='foo2_project', haystack=names)
def test_get_coverage_with_projenct_name(self):
api.create_coverage('foo1_project')
api.create_coverage('foo2_project')
covs = api.get_coverage(project_name='foo1_project')
self.assertTrue(covs is not None)
self.assertEqual(len(covs), 1)
self.assertEqual(covs[0].project_name, 'foo1_project')
def test_get_coverage_with_metadata(self):
api.create_coverage('foo1_project', coverage_metadata="foo,bar")
api.create_coverage('foo2_project', coverage_metadata="bar,foo")
covs = api.get_coverage(project_name='foo1_project')
self.assertTrue(covs is not None)
self.assertEqual(len(covs), 1)
self.assertEqual(covs[0].project_name, 'foo1_project')
self.assertEqual(covs[0].coverage_metadata, 'foo,bar')
def test_add_file_rates(self):
rates = []
rates.append({'filename': 'foo/bar0', 'line-rate': '0'})
rates.append({'filename': 'foo/bar1', 'line-rate': '1'})
rates.append({'filename': 'foo/bar2', 'line-rate': '0.92'})
files = api.add_file_rates(1, rates)
self.assertEqual(3, len(files))
for r, f in zip(rates, files):
self.assertEqual(r['filename'], f.filename)
self.assertEqual(r['line-rate'], f.line_rate)
|
import os
import unittest
from unittest import mock
import click
from click.testing import CliRunner
import aioflask
import aioflask.cli
from .utils import async_test
class TestCli(unittest.TestCase):
@async_test
async def test_command_with_appcontext(self):
app = aioflask.Flask('testapp')
@app.cli.command(with_appcontext=True)
async def testcmd():
click.echo(aioflask.current_app.name)
result = await app.test_cli_runner().invoke(testcmd)
assert result.exit_code == 0
assert result.output == "testapp\n"
@async_test
async def test_command_without_appcontext(self):
app = aioflask.Flask('testapp')
@app.cli.command(with_appcontext=False)
async def testcmd():
click.echo(aioflask.current_app.name)
result = await app.test_cli_runner().invoke(testcmd)
assert result.exit_code == 1
assert type(result.exception) == RuntimeError
@async_test
async def test_with_appcontext(self):
@click.command()
@aioflask.cli.with_appcontext
async def testcmd():
click.echo(aioflask.current_app.name)
app = aioflask.Flask('testapp')
result = await app.test_cli_runner().invoke(testcmd)
assert result.exit_code == 0
assert result.output == "testapp\n"
@mock.patch('aioflask.cli.uvicorn')
def test_aiorun(self, uvicorn):
app = aioflask.Flask('testapp')
obj = aioflask.cli.ScriptInfo(app_import_path='app.py',
create_app=lambda: app)
result = CliRunner().invoke(aioflask.cli.run_command, obj=obj)
assert result.exit_code == 0
uvicorn.run.assert_called_with('app:app', factory=False,
host='127.0.0.1', port=5000,
reload=False, workers=1,
log_level='info', ssl_certfile=None,
ssl_keyfile=None)
result = CliRunner().invoke(aioflask.cli.run_command,
'--host 1.2.3.4 --port 3000', obj=obj)
assert result.exit_code == 0
uvicorn.run.assert_called_with('app:app', factory=False,
host='1.2.3.4', port=3000,
reload=False, workers=1,
log_level='info', ssl_certfile=None,
ssl_keyfile=None)
os.environ['FLASK_DEBUG'] = 'true'
result = CliRunner().invoke(aioflask.cli.run_command, obj=obj)
assert result.exit_code == 0
uvicorn.run.assert_called_with('app:app', factory=False,
host='127.0.0.1', port=5000,
reload=True, workers=1,
log_level='debug', ssl_certfile=None,
ssl_keyfile=None)
os.environ['FLASK_DEBUG'] = 'true'
result = CliRunner().invoke(aioflask.cli.run_command, '--no-reload',
obj=obj)
assert result.exit_code == 0
uvicorn.run.assert_called_with('app:app', factory=False,
host='127.0.0.1', port=5000,
reload=False, workers=1,
log_level='debug', ssl_certfile=None,
ssl_keyfile=None)
if 'FLASK_DEBUG' in os.environ:
del os.environ['FLASK_DEBUG']
if 'AIOFLASK_USE_DEBUGGER' in os.environ:
del os.environ['AIOFLASK_USE_DEBUGGER']
@mock.patch('aioflask.cli.uvicorn')
def test_aiorun_with_factory(self, uvicorn):
app = aioflask.Flask('testapp')
obj = aioflask.cli.ScriptInfo(app_import_path='app:create_app()',
create_app=lambda: app)
result = CliRunner().invoke(aioflask.cli.run_command, obj=obj)
assert result.exit_code == 0
uvicorn.run.assert_called_with('app:create_app', factory=True,
host='127.0.0.1', port=5000,
reload=False, workers=1,
log_level='info', ssl_certfile=None,
ssl_keyfile=None)
|
<gh_stars>10-100
import numpy as np
import torch as th
from torchvision import utils
from utils.helper_functions import *
import utils.visualization as visualization
import utils.filename_templates as TEMPLATES
import utils.helper_functions as helper
import utils.logger as logger
from utils import image_utils
class Test(object):
"""
Test class
"""
def __init__(self, model, config,
data_loader, visualizer, test_logger=None, save_path=None, additional_args=None):
self.downsample = False # Downsampling for Rebuttal
self.model = model
self.config = config
self.data_loader = data_loader
self.additional_args = additional_args
if config['cuda'] and not torch.cuda.is_available():
print('Warning: There\'s no CUDA support on this machine, '
'training is performed on CPU.')
else:
self.gpu = torch.device('cuda:' + str(config['gpu']))
self.model = self.model.to(self.gpu)
if save_path is None:
self.save_path = helper.create_save_path(config['save_dir'].lower(),
config['name'].lower())
else:
self.save_path=save_path
if logger is None:
self.logger = logger.Logger(self.save_path)
else:
self.logger = test_logger
if isinstance(self.additional_args, dict) and 'name_mapping_test' in self.additional_args.keys():
visu_add_args = {'name_mapping' : self.additional_args['name_mapping_test']}
else:
visu_add_args = None
self.visualizer = visualizer(data_loader, self.save_path, additional_args=visu_add_args)
def summary(self):
self.logger.write_line("====================================== TEST SUMMARY ======================================", True)
self.logger.write_line("Model:\t\t\t" + self.model.__class__.__name__, True)
self.logger.write_line("Tester:\t\t" + self.__class__.__name__, True)
self.logger.write_line("Test Set:\t" + self.data_loader.dataset.__class__.__name__, True)
self.logger.write_line("\t-Dataset length:\t"+str(len(self.data_loader)), True)
self.logger.write_line("\t-Batch size:\t\t" + str(self.data_loader.batch_size), True)
self.logger.write_line("==========================================================================================", True)
def run_network(self, epoch):
raise NotImplementedError
def move_batch_to_cuda(self, batch):
raise NotImplementedError
def visualize_sample(self, batch):
self.visualizer(batch)
def visualize_sample_dsec(self, batch, batch_idx):
self.visualizer(batch, batch_idx, None)
def get_estimation_and_target(self, batch):
# Returns the estimation and target of the current batch
raise NotImplementedError
def _test(self):
"""
Validate after training an epoch
:return: A log that contains information about validation
Note:
The validation metrics in log must have the key 'val_metrics'.
"""
self.model.eval()
with torch.no_grad():
for batch_idx, batch in enumerate(self.data_loader):
# Move Data to GPU
if next(self.model.parameters()).is_cuda:
batch = self.move_batch_to_cuda(batch)
# Network Forward Pass
self.run_network(batch)
print("Sample {}/{}".format(batch_idx + 1, len(self.data_loader)))
# Visualize
if hasattr(batch, 'keys') and 'loader_idx' in batch.keys() \
or (isinstance(batch,list) and hasattr(batch[0], 'keys') and 'loader_idx' in batch[0].keys()):
self.visualize_sample(batch)
else:
# DSEC Special Snowflake
self.visualize_sample_dsec(batch, batch_idx)
#print('Not Visualizing')
# Log Generation
log = {}
return log
class TestRaftEvents(Test):
def move_batch_to_cuda(self, batch):
return move_dict_to_cuda(batch, self.gpu)
def get_estimation_and_target(self, batch):
if not self.downsample:
if 'gt_valid_mask' in batch.keys():
return batch['flow_est'].cpu().data, (batch['flow'].cpu().data, batch['gt_valid_mask'].cpu().data)
return batch['flow_est'].cpu().data, batch['flow'].cpu().data
else:
f_est = batch['flow_est'].cpu().data
f_gt = torch.nn.functional.interpolate(batch['flow'].cpu().data, scale_factor=0.5)
if 'gt_valid_mask' in batch.keys():
f_mask = torch.nn.functional.interpolate(batch['gt_valid_mask'].cpu().data, scale_factor=0.5)
return f_est, (f_gt, f_mask)
return f_est, f_gt
def run_network(self, batch):
# RAFT just expects two images as input. cleanest. code. ever.
if not self.downsample:
im1 = batch['event_volume_old']
im2 = batch['event_volume_new']
else:
im1 = torch.nn.functional.interpolate(batch['event_volume_old'], scale_factor=0.5)
im2 = torch.nn.functional.interpolate(batch['event_volume_new'], scale_factor=0.5)
_, batch['flow_list'] = self.model(image1=im1,
image2=im2)
batch['flow_est'] = batch['flow_list'][-1]
class TestRaftEventsWarm(Test):
def __init__(self, model, config,
data_loader, visualizer, test_logger=None, save_path=None, additional_args=None):
super(TestRaftEventsWarm, self).__init__(model, config,
data_loader, visualizer, test_logger, save_path,
additional_args=additional_args)
self.subtype = config['subtype'].lower()
print('Tester Subtype: {}'.format(self.subtype))
self.net_init = None # Hidden state of the refinement GRU
self.flow_init = None
self.idx_prev = None
self.init_print=False
assert self.data_loader.batch_size == 1, 'Batch size for recurrent testing must be 1'
def move_batch_to_cuda(self, batch):
return move_list_to_cuda(batch, self.gpu)
def get_estimation_and_target(self, batch):
if not self.downsample:
if 'gt_valid_mask' in batch[-1].keys():
return batch[-1]['flow_est'].cpu().data, (batch[-1]['flow'].cpu().data, batch[-1]['gt_valid_mask'].cpu().data)
return batch[-1]['flow_est'].cpu().data, batch[-1]['flow'].cpu().data
else:
f_est = batch[-1]['flow_est'].cpu().data
f_gt = torch.nn.functional.interpolate(batch[-1]['flow'].cpu().data, scale_factor=0.5)
if 'gt_valid_mask' in batch[-1].keys():
f_mask = torch.nn.functional.interpolate(batch[-1]['gt_valid_mask'].cpu().data, scale_factor=0.5)
return f_est, (f_gt, f_mask)
return f_est, f_gt
def visualize_sample(self, batch):
self.visualizer(batch[-1])
def visualize_sample_dsec(self, batch, batch_idx):
self.visualizer(batch[-1], batch_idx, None)
def check_states(self, batch):
# 0th case: there is a flag in the batch that tells us to reset the state (DSEC)
if 'new_sequence' in batch[0].keys():
if batch[0]['new_sequence'].item() == 1:
self.flow_init = None
self.net_init = None
self.logger.write_line("Resetting States!", True)
else:
# During Validation, reset state if a new scene starts (index jump)
if self.idx_prev is not None and batch[0]['idx'].item() - self.idx_prev != 1:
self.flow_init = None
self.net_init = None
self.logger.write_line("Resetting States!", True)
self.idx_prev = batch[0]['idx'].item()
def run_network(self, batch):
self.check_states(batch)
for l in range(len(batch)):
# Run Recurrent Network for this sample
if not self.downsample:
im1 = batch[l]['event_volume_old']
im2 = batch[l]['event_volume_new']
else:
im1 = torch.nn.functional.interpolate(batch[l]['event_volume_old'], scale_factor=0.5)
im2 = torch.nn.functional.interpolate(batch[l]['event_volume_new'], scale_factor=0.5)
flow_low_res, batch[l]['flow_list'] = self.model(image1=im1,
image2=im2,
flow_init=self.flow_init)
batch[l]['flow_est'] = batch[l]['flow_list'][-1]
self.flow_init = image_utils.forward_interpolate_pytorch(flow_low_res)
batch[l]['flow_init'] = self.flow_init
|
<gh_stars>10-100
import numpy as np
import tensorflow as tf
from ..agent import Agent
from ..registry import register
from .utils import copy_variables_op
from ...utils.logger import log_scalar
from ...models.registry import get_model
from .utils import normalize, one_hot
from .advantage_estimator.registry import get_advantage_estimator
@register
class PPO(Agent):
""" Proximal Policy Optimization """
def __init__(self, sess, hparams):
assert hparams.memory == "simple", "PPO only works with simple memory."
super().__init__(sess, hparams)
self.actor = get_model(hparams, register="PPOActor", name="actor")
self.critic = get_model(hparams, register="PPOCritic", name="critic")
self.target_actor = get_model(
hparams, register="PPOActor", name="target_actor")
self.advantage_estimator = get_advantage_estimator(
self._hparams.advantage_estimator)
self.build()
def act(self, state, worker_id):
if state.ndim < len(self._hparams.state_shape) + 1:
state = np.expand_dims(state, axis=0)
action_distribution = self._sess.run(
self.probs, feed_dict={self.last_states: state})
return self._action_function(self._hparams, action_distribution, worker_id)
def observe(self, last_state, action, reward, done, state, worker_id=0):
action = one_hot(action, self._hparams.num_actions)
memory = self._memory[worker_id]
memory.add_sample(
last_state=last_state,
action=action,
reward=reward,
discount=self._hparams.gamma,
done=done,
state=state,
)
if memory.size() == self._hparams.num_steps:
self.update(worker_id)
def reset(self, worker_id=0):
self._memory[worker_id].clear()
def clone_weights(self):
self.target_actor.set_weights(self.actor.get_weights())
def update_targets(self):
self._sess.run(self.target_update_op)
def _build_target_update_op(self):
with tf.variable_scope("update_target_networks"):
self.target_update_op = copy_variables_op(
source=self.actor, target=self.target_actor)
def build(self):
self.last_states = tf.placeholder(
tf.float32, [None] + self._hparams.state_shape, name="last_states")
self.advantages = tf.placeholder(tf.float32, [None], name="advantages")
self.discounted_rewards = tf.placeholder(
tf.float32, [None], name="discounted_rewards")
self.actions = tf.placeholder(
tf.int32, [None, self._hparams.num_actions], name="actions")
last_states = self.process_states(self.last_states)
if self._hparams.pixel_input:
self.cnn_vars = self._state_processor.trainable_weights
else:
self.cnn_vars = None
self.logits = self.actor(last_states)
self.probs = tf.nn.softmax(self.logits, -1)
target_logits = self.target_actor(last_states)
self.values = self.critic(last_states)[:, 0]
losses, train_ops = self._grad_function(
logits={
"target_logits": target_logits,
"logits": self.logits
},
actions=self.actions,
advantages=self.advantages,
values=self.values,
discounted_rewards=self.discounted_rewards,
hparams=self._hparams,
var_list={
"actor_vars": self.actor.trainable_weights,
"critic_vars": self.critic.trainable_weights,
"cnn_vars": self.cnn_vars
})
self.actor_loss = losses['actor_loss']
self.critic_loss = losses['critic_loss']
self.actor_train_op = train_ops['actor_train_op']
self.critic_train_op = train_ops['critic_train_op']
self.state_processor_train_op = train_ops['state_processor_train_op']
self._build_target_update_op()
def update(self, worker_id=0):
if self._hparams.test_only:
return
memory = self._memory[worker_id]
states = np.concatenate((
memory.get_sequence('last_state'),
memory.get_sequence('state', indices=[-1]),
))
rewards = memory.get_sequence('reward')
dones = memory.get_sequence('done')
values = self._sess.run(self.values, feed_dict={self.last_states: states})
advantages = self.advantage_estimator(rewards, values, dones, self._hparams)
discounted_rewards = advantages + values[:-1]
memory.set_sequence('discounted_reward', discounted_rewards)
if self._hparams.normalize_reward:
advantages = normalize(advantages)
memory.set_sequence('advantage', advantages)
for _ in range(self._hparams.num_epochs):
for batch in memory.shuffled_batches(self._hparams.batch_size):
feed_dict = {
self.last_states: batch.last_state,
self.actions: batch.action,
self.advantages: batch.advantage,
self.discounted_rewards: batch.discounted_reward,
}
self._sess.run(self.state_processor_train_op, feed_dict=feed_dict)
actor_loss, _ = self._sess.run([self.actor_loss, self.actor_train_op],
feed_dict=feed_dict)
log_scalar("loss/actor/worker_%d" % worker_id, actor_loss)
critic_loss, _ = self._sess.run(
[self.critic_loss, self.critic_train_op], feed_dict=feed_dict)
log_scalar("loss/critic/worker_%d" % worker_id, critic_loss)
memory.clear()
self.update_targets()
|
<gh_stars>1-10
import pprint as pp
from learning.NetStrucLner import *
from shannon_info_theory.DataEntropy import *
class MB_BasedLner(NetStrucLner):
"""
MB_BasedLner (Markov Blanket Based Learner) is an abstract class for
learning the structure of a bnet by first finding the markov blanket of
each node, then using that MB info to find the neighbors of each node,
then orienting the edges of each node with its neighbors. The procedure
for orienting the edges is not 100% effective and must be patched up (
it might introduce some cycles and leave some edges undecided. A
heuristic is introduced to patch things up.
The first MB based learner was Grow Shrink (referring to growing and
shrinking of the MB) by Margaritis. See Refs. 1 and 2 for his original
paper and his 2003 Thesis at Carnegie Mellon.
Many variations of Grow Shrink were introduced after it. In Quantum Fog,
Grow Shrink and all its variants are subclasses of this class,
MB_BasedLner, and their names all start with 'MB_' for easy
identification and so they all stay together in an alphabetical listing
of files.
Ref. 3, the PhD thesis of Shunkai Fu, was very helpful in writing the
MB_ classes, because it contains pseudo code for most of the MB_
algorithms. However, note that in that pseudo code, whenever it says I <
epsilon, it means that the conditional mutual info I > epsilon.
See Shunkai Fu Thesis if you want to know who invented each MB_
algorithm and in which papers they proposed it for the first time. The
References given below are not necessarily the first papers, but rather
papers wih good pseudo code
References
----------
1. <NAME> and <NAME>, Bayesian Network Induction via Local
Neighborhoods Adv. in Neural Info. Proc. Sys. 12 (MIT Press, 2000)
2. <NAME>, Learning Bayesian Network Model Structure from Data,
Thesis 2003 (Carnegie Mellon Univ.)
3. <NAME>, Efficient Learning of Markov Blanket and Markov Blanket
Classifier, Thesis 2010, UNIVERSITÉ DE MONTRÉAL
4. <NAME>, Andre<NAME>, Using Markov Blankets for
Causal Structure Learning (Journal of Machine Learning Research 9, 2008)
5. <NAME>, NeuroBN at Github
Attributes
----------
alpha : float
threshold used for deciding whether a conditional or unconditional
mutual info is said to be close to zero (independence) or not (
dependence). The error in a data entropy is on the order of ln(n+1)
- ln(n) \approx 1/n where n is the number of samples so 5/n is a
good default value for alpha.
verbose : bool
True for this prints a running commentary to console
vtx_to_MB : dict[str, list[str]]
A dictionary mapping each vertex to a list of the vertices in its
Markov Blanket. (The MB of a node consists of its parents, children
and children's parents, aka spouses).
vtx_to_nbors : dict[str, list[str]]
a dictionary mapping each vertex to a list of its neighbors. The
literature also calls the set of neighbors of a vertex its PC (
parents-children) set.
vtx_to_parents : dict[str, list[str]]
dictionary mapping each vertex to a list of its parents's names
"""
def __init__(self, states_df, alpha, verbose=False,
vtx_to_states=None, learn_later=False):
"""
Constructor
Parameters
----------
states_df : pandas.DataFrame
alpha : float
verbose : bool
vtx_to_states : dict[str, list[str]]
A dictionary mapping each node name to a list of its state names.
This information will be stored in self.bnet. If
vtx_to_states=None, constructor will learn vtx_to_states
from states_df
learn_later : bool
False if you want to call the function learn_struc() inside the
constructor. True if not.
Returns
-------
None
"""
NetStrucLner.__init__(self, False, states_df, vtx_to_states)
self.alpha = alpha
self.verbose = verbose
self.vtx_to_MB = None
self.vtx_to_parents = None
self.vtx_to_nbors = None
if not learn_later:
self.learn_struc()
def learn_struc(self):
"""
This is the orchestra conductor of the symphony. Each of the
functions it calls does a lot. By the end, a whole bnet structure
has been learned from the data and has been stored in self.bnet.
Returns
-------
None
"""
self.find_MB()
self.find_nbors()
self.orient_edges()
self.undo_cycles()
self.orient_undecided_edges()
self.fill_bnet_with_parents(self.vtx_to_parents)
def find_MB(self, vtx=None):
"""
This function finds the MB of vtx and stores it inside vtx_to_MB[
vtx]. If vtx=None, then it will find the MB of all the vertices of
the graph.
This function is overridden by all the subclasses of this class (the
ones with names starting with MB_). All the other functions called
by learn_struc() are the same for most of the subclasses of this
class.
Parameters
----------
vtx : str
Returns
-------
bool
"""
assert False
def find_nbors(self):
"""
Finds for each vtx of the graph, a list of all its neighbors and
puts that info into vtx_to_nbors.
Returns
-------
None
"""
if self.verbose:
print('\nbegin find_nbors')
vertices = self.states_df.columns
# list all vertices in case some have no neighbors
self.vtx_to_nbors = {vtx: [] for vtx in vertices}
for x in vertices:
for y in self.vtx_to_MB[x]:
# x and y are direct neighbors if
# H(x:y|sub_list) >> 0 for all sub_list in super_list
# where super_list is the smaller of the two lists
# MB(x)-y and MB(y)-x
set1 = set(self.vtx_to_MB[x]) - {y}
set2 = set(self.vtx_to_MB[y]) - {x}
if len(set1) < len(set2):
min_set = set1
else:
min_set = set2
# min_set = set1 & set2
super_list = list(min_set)
x_y_are_dep = True
for combi_len in range(len(super_list)):
for sub_list in it.combinations(super_list, combi_len):
mi = DataEntropy.cond_mut_info(self.states_df,
[x], [y], list(sub_list))
if mi < self.alpha:
x_y_are_dep = False
break
if not x_y_are_dep:
break
if x_y_are_dep:
if y not in self.vtx_to_nbors[x]:
self.vtx_to_nbors[x].append(y)
self.vtx_to_nbors[y].append(x)
if self.verbose:
print('vtx_to_nbors=')
pp.pprint(self.vtx_to_nbors, width=1)
print('end find_nbors')
def orient_edges(self):
"""
This function gives an orientation to some (not necessarily all) the
undirected edges implied by vtx_to_nbors. The edge orientation info
found by this function is stored by it in vtx_to_parents.
Returns
-------
None
"""
if self.verbose:
print('\nbegin orient_MB_edges')
vertices = self.states_df.columns
self.vtx_to_parents = {vtx: [] for vtx in vertices}
for x in vertices:
for y in self.vtx_to_nbors[x]:
# set x->y if there exists z in sub_list
# such that H(y:z| sub_list union x) >> 0
# for all sub_list in super_list,
# where super_list is the smaller of the two lists
# MB(y)-{x, z} and MB(z)-{x, y}
z_set = set(self.vtx_to_nbors[x]) - set(self.vtx_to_nbors[y])
z_set = z_set - {y}
y_to_x = False
for z in z_set:
set1 = set(self.vtx_to_MB[y]) - {z}
set2 = set(self.vtx_to_MB[z]) - {y}
if len(set1) < len(set2):
min_set = set1
else:
min_set = set2
# min_set = set1 & set2
super_list = list(min_set)
y_to_x = True
for combi_len in range(len(super_list)):
for sub_list in it.combinations(super_list, combi_len):
mi = DataEntropy.cond_mut_info(self.states_df,
[y], [z], list(set(sub_list) | {x}))
if mi < self.alpha:
y_to_x = False
break
if not y_to_x:
break
if y_to_x:
break
if y_to_x:
if y not in self.vtx_to_parents[x] and \
x not in self.vtx_to_parents[y]:
self.vtx_to_parents[x].append(y)
if self.verbose:
print('vtx_to_parents=')
pp.pprint(self.vtx_to_parents, width=1)
print('end orient_MB_edges')
def new_filled_nx_graph(self):
"""
This function fills nx_graph with the info found in vtx_to_parents.
Returns
-------
networkx.DiGraph
"""
vertices = self.states_df.columns
nx_graph = nx.DiGraph()
for vtx in vertices:
nx_graph.add_node(vtx)
nx_graph.add_edges_from([(pa_vtx, vtx)
for pa_vtx in self.vtx_to_parents[vtx]])
return nx_graph
def undo_cycles(self):
"""
When this function is called in learn_str(), the vtx_to_parents that
has been leaned so far may imply (directed) cycles. This function
uses a reasonable but not rigorous heuristic to reverse the
direction of at least one arrow in each cycle and make it a non-cycle.
Returns
-------
None
"""
if self.verbose:
print('\nbegin undo_cycles')
# vertices = self.states_df.columns
nx_graph = self.new_filled_nx_graph()
dir_edge_to_freq = {}
bad_dir_edges = []
cycles = list(nx.simple_cycles(nx_graph))
num_cyc = len(cycles)
# print('cycles=', cycles)
while num_cyc > 0:
for cyc in cycles:
for dir_edge in cyc:
if dir_edge not in dir_edge_to_freq.keys():
dir_edge_to_freq[dir_edge] = 1
else:
dir_edge_to_freq[dir_edge] += 1
# xx = {'a':100, 'b':300, 'c':5}
# print(max(xx, key=xx.get))
max_freq_edge = max(dir_edge_to_freq,
key=dir_edge_to_freq.get)
print('dir_edge_to_freq=', dir_edge_to_freq)
bad_dir_edges.append(max_freq_edge)
(beg_vtx, end_vtx) = max_freq_edge
self.vtx_to_parents[end_vtx].remove(beg_vtx)
nx_graph.remove_edge(beg_vtx, end_vtx)
cycles = list(nx.simple_cycles(nx_graph))
num_cyc = len(cycles)
for (beg_vtx, end_vtx) in reversed(bad_dir_edges):
self.vtx_to_parents[beg_vtx].append(end_vtx)
if self.verbose:
print('vtx_to_parents=')
pp.pprint(self.vtx_to_parents, width=1)
print('end undo_cycles')
def orient_undecided_edges(self):
"""
When this function is called in learn_str(), the vtx_to_parents that
has been learned so far may not include all of the edges implied by
vtx_to_nbors. Hence, there might still be some undirected edges.
This function uses a reasonable but not rigorous heuristic to orient
those undecided edges.
Returns
-------
None
"""
if self.verbose:
print('\nbegin orient_undecided_edges')
vertices = self.states_df.columns
nx_graph = self.new_filled_nx_graph()
undecided_edges = []
for vtx in vertices:
nbor_set = set(self.vtx_to_nbors[vtx]) - \
set(nx.all_neighbors(nx_graph, vtx))
for nbor in nbor_set:
if (nbor, vtx) not in undecided_edges:
undecided_edges.append((vtx, nbor))
for beg_vtx, end_vtx in undecided_edges:
# add dir_edge to nx_graph in one direction
# and see if it causes cycle
# If it doesn't, then
# add dir edge to vtx_to_parents in same direction
# and if it does add to vtx_to_parents in opposite direction
nx_graph.add_edge(beg_vtx, end_vtx)
try:
cycle_edge_list = nx.find_cycle(nx_graph, source=beg_vtx)
except nx.exception.NetworkXNoCycle:
cycle_edge_list = []
if len(cycle_edge_list) == 0:
self.vtx_to_parents[end_vtx].append(beg_vtx)
else:
self.vtx_to_parents[beg_vtx].append(end_vtx)
# restore nx_graph to original state
nx_graph.remove_edge(beg_vtx, end_vtx)
if self.verbose:
print('undecided edges=', undecided_edges)
print('vtx_to_parents=')
pp.pprint(self.vtx_to_parents, width=1)
print('end orient_undecided_edges')
@staticmethod
def MB_lner_test(LnerClass, verbose=False):
"""
This static method gives a simple example that we use to test
MB_BasedLner and its subclasses (those starting with MB_). The
method takes as input training data generated from 2 graphs (the
classical versions of wetgrass and earthquake) and it outputs a
drawing of the learned structure.
Parameters
----------
LnerClass : MB_BasedLner or subclass
This is either MB_BasedLner without quotes or the name of a
subclass of that class.
verbose : bool
Returns
-------
None
"""
path1 = 'training_data_c/WetGrass.csv'
# true:
# All arrows pointing down
# Cloudy
# / \
# Rain Sprinkler
# \ /
# WetGrass
path2 = 'training_data_c/earthquake.csv'
# true:
# All arrows pointing down
# burglary earthquake
# \ /
# alarm
# / \
# johnCalls maryCalls
for path in [path1, path2]:
print('\n######### new path=', path)
states_df = pd.read_csv(path, dtype=str)
num_sam = len(states_df.index)
alpha = None
if path == path1:
alpha = 4/num_sam
elif path == path2:
alpha = 4/num_sam
lner = LnerClass(states_df, alpha, verbose=verbose)
lner.bnet.draw(algo_num=1)
# nx.draw_networkx(lner.nx_graph)
# plt.axis('off')
# plt.show()
if __name__ == "__main__":
def main():
print(5)
main()
|
"""
The evaluation module for VA-JCR/VA-JCM models. Only works in Python >= 3.5.
Some code is forked from https://github.com/ECHO960/PKU-MMD/blob/master/evaluate.py related to the paper:
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, "PKU-MMD: A large scale benchmark for continuous multi-modal human
action understanding," arXiv preprint arXiv:1703.07475, 2017.
"""
import math
import os
import logging
import pickle
import torch
import torch.nn as nn
import numpy as np
import seaborn as sn
import pandas as pd
import matplotlib.pylab as plt
from scipy.signal import find_peaks
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchnet import meter
from sklearn.metrics import f1_score, accuracy_score, precision_recall_curve
from .misc import write_sequence_labels_to_file, ProgressBar
from .processing import is_model_on_gpu, get_data_at_observation_levels
from global_configs import DatasetProtocol, RNN_NAME, ACTIVATION_NAME, HyperParamType
from utils.misc import load_checkpoint_jcm, load_checkpoint
from dataset.skeleton_abstract import *
from dataset.skeleton_multitask import *
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(0)
np.warnings.filterwarnings('ignore')
plt.switch_backend('agg')
class UntrimmedDatasetEvaluator(object):
__slots__ = ['dataset', 'dataset_name', 'num_classes', 'input_dim', 'device', 'model',
'epoch', 'output_path', 'logger', 'pred_folder', 'true_folder']
"""
In prediction and true folders:
- each file contains results for one video.
- several lines in one file and each line contains: label, start_frame, end_frame, confidence
"""
def __init__(self, dataset_path: str,
device: torch.device):
self.dataset = deserialize_dataset(dataset_path, False)
self.dataset_name = str(self.dataset)
protocol = self.dataset.train_test_protocol
if protocol is not None:
if protocol == DatasetProtocol.CROSS_SUBJECT:
protocol_name = 'crossSubject'
elif protocol == DatasetProtocol.CROSS_VIEW:
protocol_name = 'crossView'
else:
raise NotImplementedError
self.dataset_name += '_' + protocol_name
self.num_classes = self.dataset.label_size
self.input_dim = self.dataset.get_joint_number() * 3
if self.dataset.has_interaction:
self.input_dim *= 2
self.device = device
self.model = None
self.epoch = 0
self.output_path = None
self.true_folder = None
self.pred_folder = None
self.logger = None
def set_model(self, model_path: str, output_path: str):
import gc
self.model, _, _, self.epoch, loss_plotter, hyperparams = load_checkpoint(model_path,
num_classes=self.num_classes,
input_dim=self.input_dim,
device=self.device,
deprecated=False)
output_path.replace('\\', '/')
model_path.replace('\\', '/')
if not output_path.endswith('/'):
output_path += '/'
model_filename = model_path.split('/')[-1]
try:
loc_gamma = model_filename.index('gamma')
gamma = float(model_filename[loc_gamma+5:loc_gamma+8])
except ValueError:
if 'FL' in model_filename:
gamma = 1.0
else:
gamma = 0
subdir_name = 'scores_%s_%s_%d_va%s_ln%s_fl%s_lbd%s_tl%d_gamma%.1f' % (
RNN_NAME[hyperparams[HyperParamType.RNN_TYPE]],
ACTIVATION_NAME[hyperparams[HyperParamType.ACTIVATION_TYPE]],
self.epoch,
hyperparams[HyperParamType.ENABLE_VA],
hyperparams[HyperParamType.USE_LAYER_NORM],
hyperparams[HyperParamType.USE_FOCAL_LOSS],
str(hyperparams[HyperParamType.REGRESS_LAMBDA]),
hyperparams[HyperParamType.TRUNCATED_LENGTH],
gamma
)
dropouts = hyperparams[HyperParamType.DROPOUTS]
subdir_name += '_dp%d%d%d' % (int(dropouts[0] * 10), int(dropouts[1] * 10), int(dropouts[2] * 10))
protocol = self.dataset.train_test_protocol
if protocol == DatasetProtocol.CROSS_SUBJECT:
subdir_name += '_cs'
elif protocol == DatasetProtocol.CROSS_VIEW:
subdir_name += '_cv'
self.output_path = output_path + subdir_name + '/'
os.makedirs(self.output_path, exist_ok=True)
loss_plotter.fig_path = self.output_path + '%s_%d_train_test_curves.png' % (self.dataset_name, self.epoch)
loss_plotter.draw()
self.logger = logging.getLogger(self.dataset_name + '_%d' % self.epoch)
self.logger.setLevel('DEBUG')
file_log_handler = logging.FileHandler(self.output_path + '%s_%d_eval_logfile'
'.log' % (self.dataset_name, self.epoch),
mode='w+')
self.logger.addHandler(file_log_handler)
stderr_log_handler = logging.StreamHandler()
self.logger.addHandler(stderr_log_handler)
formatter = logging.Formatter('%(asctime)s - %(message)s')
file_log_handler.setFormatter(formatter)
stderr_log_handler.setFormatter(formatter)
null_class = self.num_classes - 1
self.pred_folder = self.output_path + 'pred_files/'
self.true_folder = self.output_path + 'true_files/'
os.makedirs(self.pred_folder, exist_ok=True)
os.makedirs(self.true_folder, exist_ok=True)
for idx, (_, label, _) in enumerate(self.dataset.testing_set):
write_sequence_labels_to_file(label.numpy(),
self.true_folder + '%s_%03d.txt' % (str(self.dataset), idx), null_class)
self.logger.info('Training logs: %s' % loss_plotter.logs)
gc.collect()
def run_evaluation(self):
assert self.model is not None, 'Model is not initialized before running the evaluation.'
self._evaluate_basic()
self._evaluate_advanced()
def _evaluate_basic(self):
labels = list(self.dataset.get_labels())
confusion_mat, raw_f1, sl, el, acc, forecast_prs = \
evaluate_untrimmed_dataset(self.model, self.dataset, self.device, self.pred_folder)
filename_prefix = self.output_path + '%s_%d_' % (str(self.dataset), self.epoch)
sl, el = sl[:-1], el[:-1] # omit the blank class
plot_localization_scores(np.mean(sl, axis=0), filename_prefix + 'sl.png')
plot_localization_scores(np.mean(el, axis=0), filename_prefix + 'el.png')
with open(filename_prefix + 'sl_el.bin', 'wb+') as f:
pickle.dump((sl, el), f)
raw_f1, sl, el = np.round(raw_f1, decimals=4), np.round(sl, decimals=4), np.round(el, decimals=4)
self.logger.info('Raw F1 Scores: %s (avg: %.3f)' % (list(raw_f1), round(np.mean(raw_f1).item(), 3)))
self.logger.info('SL Scores at 20%% Threshold: %s (avg: %.3f)' % (list(sl[:, 1]),
round(np.mean(sl[:, 1]).item(), 3)))
self.logger.info('EL Scores at 20%% Threshold: %s (avg: %.3f)' % (list(el[:, 1]),
round(np.mean(el[:, 1]).item(), 3)))
self.logger.info('Average Frame-level Accuracy: %.3f' % acc)
confusion_mat = np.round(confusion_mat, decimals=2)
df_cm = pd.DataFrame(confusion_mat, index=[label for label in labels],
columns=[label for label in labels])
width = round(self.num_classes * 1.1)
height = round(self.num_classes * 0.9)
cm_filename_without_extension = filename_prefix + 'confusion_mat'
plot_confusion_mat(df_cm, fig_width=width, fig_height=height, annot_font_size=13, label_font_size=12,
label_rotation=30, output_filename=cm_filename_without_extension+'.png')
with open(cm_filename_without_extension + '.bin', 'wb+') as f:
pickle.dump(df_cm, f)
with open(filename_prefix + 'forecast_pr.bin', 'wb+') as f:
pickle.dump(forecast_prs, f)
s_pr, e_pr = forecast_prs
plot_forecast_pr(s_pr[0], s_pr[1], filename_prefix + 'start_forecast_pr.png')
plot_forecast_pr(e_pr[0], e_pr[1], filename_prefix + 'end_forecast_pr.png')
def _evaluate_advanced(self):
v_props = [] # proposal list separated by video
v_grounds = [] # ground-truth list separated by video
# ========== find all proposals separated by video========
for video in os.listdir(self.pred_folder):
prop = open(self.pred_folder + video, 'r').readlines()
prop = [prop[x].replace(',', ' ') for x in range(len(prop))]
prop = [[float(y) for y in prop[x].split()] for x in range(len(prop))]
ground = open(self.true_folder + video, 'r').readlines()
ground = [ground[x].replace(',', ' ') for x in range(len(ground))]
ground = [[float(y) for y in ground[x].split()] for x in range(len(ground))]
# append video name
for x in prop:
x.append(video)
for x in ground:
x.append(video)
v_props.append(prop)
v_grounds.append(ground)
# ========== find all proposals separated by action categories ========
# proposal list separated by class
a_props = [[] for _ in range(self.num_classes)]
# ground-truth list separated by class
a_grounds = [[] for _ in range(self.num_classes)]
for x in range(len(v_props)):
for y in range(len(v_props[x])):
a_props[int(v_props[x][y][0])].append(v_props[x][y])
for x in range(len(v_grounds)):
for y in range(len(v_grounds[x])):
a_grounds[int(v_grounds[x][y][0])].append(v_grounds[x][y])
# ========== find all proposals ========
all_props = sum(a_props, [])
all_grounds = sum(a_grounds, [])
# ========== calculate protocols ========
overlap_ratios = [0.1, 0.5]
for overlap_ratio in overlap_ratios:
self.logger.info('==============================================================\n'
'Advanced evaluations for theta = %.1f: ' % overlap_ratio)
self.logger.info('F1 = %.3f' % get_f1(all_props, overlap_ratio, all_grounds, self.num_classes))
self.logger.info('AP = %.3f' % get_ap(all_props, overlap_ratio, all_grounds, self.num_classes))
self.logger.info('mAP_action = %.3f' % (sum([get_ap(a_props[x], overlap_ratio, a_grounds[x],
self.num_classes)
for x in range(self.num_classes - 1)])/(self.num_classes - 1)))
self.logger.info('mAP_video = %.3f' % (sum([get_ap(v_props[x], overlap_ratio, v_grounds[x],
self.num_classes)
for x in range(len(v_props))])/len(v_props)))
fig_path = self.output_path + '%s_%d_theta%.1f.png' % (str(self.dataset), self.epoch, overlap_ratio)
plot_detect_pr(all_props, overlap_ratio, all_grounds, fig_path, self.num_classes)
self.logger.info('2DAP = %.3f' % (sum([get_ap(all_props, (ratio + 1) * 0.05, all_grounds, self.num_classes)
for ratio in range(20)]) / 20))
class MultiAttrDatasetEvaluator(object):
def __init__(self, dataset_path: str, device: torch.device):
self.device = device
self.dataset = deserialize_dataset_multitask(dataset_path, False)
self.dataset_name = str(self.dataset)
self.input_dim = self.dataset.get_joint_number() * 3 # 3D coordinates
if self.dataset.has_interaction:
self.input_dim *= 2
self.num_classes_tuple = (self.dataset.label_size, self.dataset.subject_label_size, self.dataset.age_label_size)
self.model = None
self.epoch = None
self.output_path = None
self.logger = None
def set_model(self, model_path: str, output_path: str):
self.model, _, _, self.epoch, loss_plotter, hyperparams, _, test_indices = \
load_checkpoint_jcm(model_path, num_classes=self.num_classes_tuple, input_dim=self.input_dim,
device=self.device)
assert test_indices == self.dataset.indices_test
output_path.replace('\\', '/')
model_path.replace('\\', '/')
if not output_path.endswith('/'):
output_path += '/'
model_filename = model_path.split('/')[-1]
try:
loc_gamma = model_filename.index('gamma')
gamma = float(model_filename[loc_gamma + 5:loc_gamma + 8])
except ValueError:
if 'FL' in model_filename:
gamma = 1.0
else:
gamma = 0
subdir_name = 'scores_%s_%s_%d_va%s_ln%s_fl%s_lbd%s_tl%d_gamma%.1f' % (
RNN_NAME[hyperparams[HyperParamType.RNN_TYPE]],
ACTIVATION_NAME[hyperparams[HyperParamType.ACTIVATION_TYPE]],
self.epoch,
hyperparams[HyperParamType.ENABLE_VA],
hyperparams[HyperParamType.USE_LAYER_NORM],
hyperparams[HyperParamType.USE_FOCAL_LOSS],
str(hyperparams[HyperParamType.REGRESS_LAMBDA]),
hyperparams[HyperParamType.TRUNCATED_LENGTH],
gamma
)
dropouts = hyperparams[HyperParamType.DROPOUTS]
subdir_name += '_dp%d%d%d' % (int(dropouts[0] * 10), int(dropouts[1] * 10), int(dropouts[2] * 10))
protocol = self.dataset.train_test_protocol
if protocol == DatasetProtocol.CROSS_SUBJECT:
subdir_name += '_csb'
elif protocol == DatasetProtocol.CROSS_SAMPLE:
subdir_name += '_csa'
elif protocol == DatasetProtocol.CROSS_AGE:
subdir_name += '_cag'
elif protocol == DatasetProtocol.CROSS_GENDER:
subdir_name += '_cgd'
self.output_path = output_path + subdir_name + '/'
os.makedirs(self.output_path, exist_ok=True)
loss_plotter.fig_path = self.output_path + '%s_%d_train_test_curves.png' % (self.dataset_name, self.epoch)
loss_plotter.draw()
self.logger = logging.getLogger(self.dataset_name + '_%d' % self.epoch)
self.logger.setLevel('DEBUG')
file_log_handler = logging.FileHandler(self.output_path + '%s_%d_eval_logfile'
'.log' % (self.dataset_name, self.epoch),
mode='w+')
self.logger.addHandler(file_log_handler)
stderr_log_handler = logging.StreamHandler()
self.logger.addHandler(stderr_log_handler)
formatter = logging.Formatter('%(asctime)s - %(message)s')
file_log_handler.setFormatter(formatter)
stderr_log_handler.setFormatter(formatter)
def run_evaluation(self):
assert self.model is not None, 'Model is not initialized before running the evaluation.'
enable_multitask = (self.dataset.train_test_protocol == DatasetProtocol.CROSS_SAMPLE)
action_labels = self.dataset.get_labels()
subject_labels = range(self.dataset.subject_label_size) # TODO
age_labels = range(self.dataset.age_label_size) # TODO
confusion_mats, f1s, accs, acc_at_observ_lvls = \
evaluate_multiattr_dataset(self.model, self.dataset, self.device)
filename_prefix = self.output_path + '%s_%d_' % (str(self.dataset), self.epoch)
with open(filename_prefix + 'acc_at_lvls.bin', 'wb+') as f:
pickle.dump(acc_at_observ_lvls, f)
# Ignore classes without any predictions (i.e. missing in test set)
action_conf, subject_conf, age_conf = confusion_mats
action_ignore_idx = np.where(~action_conf.any(axis=1))[0]
subject_ignore_idx = np.where(~subject_conf.any(axis=1))[0]
# Accuracies and F1s
action_f1, subject_f1, age_f1 = f1s
action_acc, subject_acc, age_acc = accs
action_acc_lvls, subject_acc_lvls, age_acc_lvls = np.round(acc_at_observ_lvls, 4)
action_f1, subject_f1, age_f1 = np.round(action_f1, 4), np.round(subject_f1, 4), np.round(age_f1, 4)
action_f1 = np.delete(action_f1, action_ignore_idx, 0)
self.logger.info('Action Accuracies: %s (avg: %.3f)' % (list(action_acc_lvls), action_acc))
self.logger.info('Action F1 Scores: %s (avg: %.3f)' % (list(action_f1), round(np.mean(action_f1).item(), 3)))
if enable_multitask:
subject_f1 = np.delete(subject_f1, subject_ignore_idx, 0)
self.logger.info('Subject Accuracies: %s (avg: %.3f)' % (list(subject_acc_lvls), subject_acc))
self.logger.info('Subject F1 Scores: %s (avg: %.3f)' % (list(subject_f1),
round(np.mean(subject_f1).item(), 3)))
self.logger.info('Age Accuracies: %s (avg: %.3f)' % (list(age_acc_lvls), age_acc))
self.logger.info('Age F1 Scores: %s (avg: %.3f)' % (list(age_f1),
round(np.mean(age_f1).item(), 3)))
# Confusion matrices
action_conf, subject_conf, age_conf = np.round(action_conf, 2), np.round(subject_conf, 2), np.round(age_conf, 2)
cm_filename_without_extension = filename_prefix + 'confusion_mat'
action_labels = np.delete(action_labels, action_ignore_idx, 0)
action_conf = np.delete(action_conf, action_ignore_idx, 0)
action_conf = np.delete(action_conf, action_ignore_idx, 1)
act_df = pd.DataFrame(action_conf, index=[label for label in action_labels],
columns=[label for label in action_labels])
plot_confusion_mat(act_df, fig_width=round(self.num_classes_tuple[0] * 1.1),
fig_height=round(self.num_classes_tuple[0] * 0.9),
annot_font_size=13, label_font_size=12, label_rotation=30,
output_filename=cm_filename_without_extension + '_action.png')
with open(cm_filename_without_extension + '_action.bin', 'wb+') as f:
pickle.dump(act_df, f)
if enable_multitask:
subject_labels = np.delete(subject_labels, subject_ignore_idx, 0)
subject_conf = np.delete(subject_conf, subject_ignore_idx, 0)
subject_conf = np.delete(subject_conf, subject_ignore_idx, 1)
sub_df = pd.DataFrame(subject_conf, index=[label for label in subject_labels],
columns=[label for label in subject_labels])
plot_confusion_mat(sub_df, fig_width=round(self.num_classes_tuple[1] * 1.1),
fig_height=round(self.num_classes_tuple[1] * 0.9),
annot_font_size=13, label_font_size=12, label_rotation=0,
output_filename=cm_filename_without_extension + '_subject.png')
with open(cm_filename_without_extension + '_subject.bin', 'wb+') as f:
pickle.dump(sub_df, f)
age_df = pd.DataFrame(age_conf, index=[label for label in age_labels],
columns=[label for label in age_labels])
plot_confusion_mat(age_df, fig_width=round(self.num_classes_tuple[2] * 1.1),
fig_height=round(self.num_classes_tuple[2] * 0.9),
annot_font_size=13, label_font_size=12, label_rotation=0,
output_filename=cm_filename_without_extension + '_age.png')
with open(cm_filename_without_extension + '_age.bin', 'wb+') as f:
pickle.dump(age_df, f)
def calc_pr(positive, proposal, ground):
"""
Calculate precision and recall
:param positive: number of positive proposals
:param proposal: number of all proposals
:param ground: number of ground truths
:return:
"""
if not proposal or not ground:
return 0, 0
return positive / proposal, positive / ground
def match(lst, ratio, ground, num_classes):
"""
Match proposal and ground truth
correspond_map: record matching ground truth for each proposal
count_map: record how many proposals is each ground truth matched by
index_map: index_list of each video for ground truth
:param lst: list of proposals(label, start, end, confidence, video_name)
:param ratio: overlap ratio
:param ground: list of ground truth(label, start, end, confidence, video_name)
:param num_classes:
"""
def overlap(prop, gt):
l_p, s_p, e_p, c_p, v_p = prop
l_g, s_g, e_g, c_g, v_g = gt
if v_p != v_g or int(l_p) != int(l_g):
return 0
denominator = max(e_p, e_g) - min(s_p, s_g)
if not denominator: # avoid division by zero, i.e. one-frame prediction
return 0
return (min(e_p, e_g) - max(s_p, s_g)) / denominator
corres_map = [-1] * len(lst)
count_map = [0] * len(ground)
# generate index_map to speed up
index_map = [[] for _ in range(num_classes)]
for x in range(len(ground)):
index_map[int(ground[x][0])].append(x)
for x in range(len(lst)):
for y in index_map[int(lst[x][0])]:
if overlap(lst[x], ground[y]) < ratio:
continue
if overlap(lst[x], ground[y]) < overlap(lst[x], ground[corres_map[x]]):
continue
corres_map[x] = y
if corres_map[x] != -1:
count_map[corres_map[x]] += 1
positive = sum([(x > 0) for x in count_map])
return corres_map, count_map, positive
def plot_detect_pr(lst, ratio, ground, output_filename, num_classes):
"""
plot precision-recall figure of given proposal
:param lst: list of proposals(label, start, end, confidence, video_name)
:param ratio: overlap ratio
:param ground: list of ground truth(label, start, end, confidence, video_name)
:param output_filename:
:param num_classes
"""
lst.sort(key=lambda y: y[3]) # sorted by confidence
correspond_map, count_map, positive = match(lst, ratio, ground, num_classes)
number_proposal = len(lst)
number_ground = len(ground)
old_precision, old_recall = calc_pr(positive, number_proposal, number_ground)
recalls = [old_recall]
precisions = [old_precision]
for x in range(len(lst)):
number_proposal -= 1
if correspond_map[x] == -1:
continue
count_map[correspond_map[x]] -= 1
if count_map[correspond_map[x]] == 0:
positive -= 1
precision, recall = calc_pr(positive, number_proposal, number_ground)
if precision > old_precision:
old_precision = precision
recalls.append(recall)
precisions.append(old_precision)
# old_recall = recall
bin_filename_prefix = output_filename.replace('.png', '')
with open(bin_filename_prefix + '_recall_precision.bin', 'wb+') as f:
pickle.dump((recalls, precisions), f)
fig = plt.figure()
plt.axis([0, 1, 0, 1])
plt.plot(recalls, precisions, 'r')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.grid(True)
plt.title('Action Detection Precision-Recall Curve for theta = %.1f' % ratio)
# plt.show()
if not output_filename.endswith('.png'):
output_filename += '.png'
fig.savefig(output_filename)
def get_f1(lst, ratio, ground, num_classes):
"""
f1-score
:param lst: list of proposals(label, start, end, confidence, video_name)
:param ratio: overlap ratio
:param ground: list of ground truth(label, start, end, confidence, video_name)
:param num_classes:
"""
correspond_map, count_map, positive = match(lst, ratio, ground, num_classes)
precision, recall = calc_pr(positive, len(lst), len(ground))
score = 2 * precision * recall / (precision + recall)
return score
def get_ap(lst, ratio, ground, num_classes):
"""
Interpolated Average Precision
score = sigma(precision(recall) * delta(recall))
Note that when overlap ratio < 0.5,
one ground truth will correspond to many proposals
In that case, only one positive proposal is counted
:param lst: list of proposals(label, start, end, confidence, video_name)
:param ratio: overlap ratio
:param ground: list of ground truth(label, start, end, confidence, video_name)
:param num_classes:
"""
lst.sort(key=lambda x: x[3]) # sorted by confidence
correspond_map, count_map, positive = match(lst, ratio, ground, num_classes)
score = 0
number_proposal = len(lst)
number_ground = len(ground)
old_precision, old_recall = calc_pr(positive, number_proposal, number_ground)
for x in range(len(lst)):
number_proposal -= 1
if correspond_map[x] == -1:
continue
count_map[correspond_map[x]] -= 1
if count_map[correspond_map[x]] == 0:
positive -= 1
precision, recall = calc_pr(positive, number_proposal, number_ground)
if precision > old_precision:
old_precision = precision
score += old_precision * (old_recall - recall)
old_recall = recall
return score
def plot_confusion_mat(dataframe: pd.DataFrame, fig_width: int, fig_height: int, annot_font_size: int,
label_font_size: int, label_rotation: int, output_filename: str):
plt.figure(figsize=(fig_width, fig_height))
heatmap = sn.heatmap(dataframe, annot=True, vmin=0, vmax=1, square=True, annot_kws={'size': annot_font_size},
xticklabels=True, yticklabels=True)
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),
rotation=label_rotation, ha='right', fontsize=label_font_size)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),
rotation=label_rotation, ha='right', fontsize=label_font_size)
plt.xlabel('Predicted Label', fontsize=label_font_size+1)
plt.ylabel('True Label', fontsize=label_font_size+1)
plt.title('Confusion Matrix')
plt.gcf().subplots_adjust(left=0.3, bottom=0.15)
cmf = heatmap.get_figure()
# cmf.show()
cmf.savefig(output_filename)
def evaluate_multiattr_sequence(sequence: torch.Tensor, model: nn.Module):
assert sequence.dim() == 2
seq_len = len(sequence)
predicted_action_classes = torch.zeros(seq_len, dtype=torch.int64)
predicted_subject_classes = torch.zeros(seq_len, dtype=torch.int64)
predicted_age_classes = torch.zeros(seq_len, dtype=torch.int64)
model.eval()
with torch.no_grad():
for idx, frame in enumerate(Variable(sequence)):
_, (action_out, subject_out, age_out) = model(frame.unsqueeze(0).unsqueeze(0))
predicted_action_classes[idx] = torch.max(action_out, -1)[1].item()
predicted_subject_classes[idx] = torch.max(subject_out, -1)[1].item()
predicted_age_classes[idx] = torch.max(age_out, -1)[1].item()
return predicted_action_classes, predicted_subject_classes, predicted_age_classes
def evaluate_multiattr_dataset(model: nn.Module, dataset: SkeletonDatasetMultiTask, device):
model.to(device)
num_action_classes = dataset.label_size
num_subject_classes = dataset.subject_label_size
num_age_classes = dataset.age_label_size
acc_at_observ_lvls = np.zeros((3, 11))
all_pred_action = torch.zeros(0, dtype=torch.int64)
all_true_action = torch.zeros(0, dtype=torch.int64)
all_pred_subject = torch.zeros(0, dtype=torch.int64)
all_true_subject = torch.zeros(0, dtype=torch.int64)
all_pred_age = torch.zeros(0, dtype=torch.int64)
all_true_age = torch.zeros(0, dtype=torch.int64)
action_confusion = meter.ConfusionMeter(num_action_classes)
subject_confusion = meter.ConfusionMeter(num_subject_classes)
age_confusion = meter.ConfusionMeter(num_age_classes)
num_test_samples = len(dataset.testing_set)
pg = ProgressBar(80, num_test_samples)
for idx, (seq, action_label, subject_label, age_label) in enumerate(dataset.testing_set, 0):
pred_act, pred_sub, pred_age = evaluate_multiattr_sequence(seq.to(device), model)
sampled_pred = get_data_at_observation_levels(np.vstack((pred_act, pred_sub, pred_age)).transpose())
sampled_true = get_data_at_observation_levels(np.vstack((action_label, subject_label, age_label)).transpose())
sampled_pred, sampled_true = sampled_pred.transpose(), sampled_true.transpose()
for category_idx, category_true in enumerate(sampled_true):
acc_at_observ_lvls[category_idx] += (category_true == sampled_pred[category_idx])
all_true_action = torch.cat((all_true_action, action_label))
all_pred_action = torch.cat((all_pred_action, pred_act))
all_true_subject = torch.cat((all_true_subject, subject_label))
all_pred_subject = torch.cat((all_pred_subject, pred_sub))
all_true_age = torch.cat((all_true_age, age_label))
all_pred_age = torch.cat((all_pred_age, pred_age))
pg.update(idx + 1)
action_confusion.add(all_pred_action, all_true_action)
subject_confusion.add(all_pred_subject, all_true_subject)
age_confusion.add(all_pred_age, all_true_age)
action_confusion, _ = get_normalized_confusion_matrix_and_f1_score(action_confusion.conf)
subject_confusion, _ = get_normalized_confusion_matrix_and_f1_score(subject_confusion.conf)
age_confusion, _ = get_normalized_confusion_matrix_and_f1_score(age_confusion.conf)
avg_action_f1 = f1_score(all_true_action.numpy(), all_pred_action.numpy(), average=None)
avg_subject_f1 = f1_score(all_true_subject.numpy(), all_pred_subject.numpy(), average=None)
avg_age_f1 = f1_score(all_true_age.numpy(), all_pred_age.numpy(), average=None)
acc_at_observ_lvls /= num_test_samples
avg_action_acc = accuracy_score(all_true_action.numpy(), all_pred_action.numpy())
avg_subject_acc = accuracy_score(all_true_subject.numpy(), all_pred_subject.numpy())
avg_age_acc = accuracy_score(all_true_age.numpy(), all_pred_age.numpy())
return ((action_confusion, subject_confusion, age_confusion),
(avg_action_f1, avg_subject_f1, avg_age_f1),
(avg_action_acc, avg_subject_acc, avg_age_acc),
acc_at_observ_lvls)
def plot_localization_scores(l_scores: np.ndarray, out_filename: str):
"""Plots and saves the figure of R-based localization scores against thresholds."""
assert len(l_scores) == 10
fig = plt.figure()
x = np.arange(0, 1.0, 0.1)
plt.plot(x, l_scores, 'g', linewidth=2, markersize=12)
plt.grid(True)
axes = plt.gca()
axes.set_xlim([0, 0.9])
axes.set_ylim([0, 1])
plt.xlabel('Threshold')
plt.ylabel('Localization Score')
plt.title('Localization Scores at Different Thresholds')
if not out_filename.endswith('.png'):
out_filename += '.png'
fig.savefig(out_filename)
def evaluate_untrimmed_sequence(sequence: torch.Tensor, model: nn.Module, true_confidence: np.ndarray):
"""Evaluates a untrimmed sequence, i.e. a sequence that contains multiple classes."""
assert sequence.dim() == 2
seq_len = len(sequence)
predicted_classes = []
class_probs = []
tot_r_out = torch.Tensor()
model.eval()
# Aggregates result at each time step
with torch.no_grad():
for idx, frame in enumerate(Variable(sequence)):
_, s_out, r_out = model(frame.unsqueeze(0).unsqueeze(0))
class_prob, pred_idx = torch.max(s_out, -1)
predicted_classes += [pred_idx.item()]
class_probs += [class_prob.item()]
tot_r_out = torch.cat((tot_r_out, r_out.cpu()))
tot_r_out = torch.clamp(tot_r_out, min=0, max=1).detach().numpy()
num_targets = true_confidence.shape[1]
all_sl = np.zeros((num_targets, 10))
all_el = np.zeros((num_targets, 10))
is_action_in_seq = np.zeros(num_targets)
# Evaluates R-based SL/EL scores for each class except the background class ~
for i in range(num_targets):
if np.max(true_confidence[:, i]) > 0:
true_starts = find_peaks(np.pad(true_confidence[:, i, 0], (1, 1), 'constant'),
height=1, distance=8)[0] - 1
predicted_starts = -np.ones((10, len(true_starts)), dtype=np.int)
for idx, true_start in enumerate(true_starts):
begin_scan_idx = true_start - 30
if begin_scan_idx < 0:
begin_scan_idx = 0
end_scan_idx = true_start + 30
if end_scan_idx >= seq_len:
end_scan_idx = seq_len - 1
predicted_start_in_window = np.argmax(tot_r_out[begin_scan_idx:end_scan_idx + 1, i, 0])
actual_start = predicted_start_in_window + begin_scan_idx
for th in range(10):
confidence_thres = th / 10
if tot_r_out[actual_start, i, 0] > confidence_thres:
predicted_starts[th, idx] = actual_start
true_ends = find_peaks(np.pad(true_confidence[:, i, 1], (1, 1), 'constant'),
height=1, distance=8)[0] - 1
predicted_ends = -np.ones((10, len(true_starts)), dtype=np.int)
for idx, true_end in enumerate(true_ends):
begin_scan_idx = true_end - 30
if begin_scan_idx < 0:
begin_scan_idx = 0
end_scan_idx = true_end + 30
if end_scan_idx >= seq_len:
end_scan_idx = seq_len - 1
predicted_end_in_window = np.argmax(tot_r_out[begin_scan_idx:end_scan_idx + 1, i, 1])
actual_end = predicted_end_in_window + begin_scan_idx
for th in range(10):
confidence_thres = th / 10
if tot_r_out[actual_end, i, 1] > confidence_thres:
predicted_ends[th, idx] = actual_end
sl, el = get_localization_score_arrays(predicted_starts, predicted_ends, true_starts, true_ends)
all_sl[i] = sl
all_el[i] = el
is_action_in_seq[i] += 1 # increment if the sequence contains such action
return predicted_classes, all_sl, all_el, class_probs, is_action_in_seq, tot_r_out
def evaluate_untrimmed_dataset(model: nn.Module, dataset: SkeletonDataset, device, output_path):
"""Evaluates untrimmed datasets by averaging sequence-wise results."""
assert dataset.preloaded is False
model.to(device)
num_classes = dataset.label_size
null_class = num_classes - 1
confusion = meter.ConfusionMeter(num_classes)
predictions = labels = np.array([])
tot_sl = np.zeros((num_classes, 10))
tot_el = np.zeros((num_classes, 10))
tot_train_regress_confidence = np.array([]).reshape((0, num_classes, 2))
tot_test_regress_confidence = np.array([]).reshape((0, num_classes, 2))
action_in_seq_counts = np.zeros(num_classes)
pg = ProgressBar(80, len(dataset.testing_set))
for idx, (seq, label, confidence) in enumerate(dataset.testing_set, 0):
confidence = confidence.numpy()
y_pred, sl, el, class_probs, action_in_seq, r_out = \
evaluate_untrimmed_sequence(seq.to(device), model, confidence)
tot_train_regress_confidence = np.concatenate((tot_train_regress_confidence, confidence))
tot_test_regress_confidence = np.concatenate((tot_test_regress_confidence, r_out))
y_true = label.cpu().numpy().flatten()
write_sequence_labels_to_file(y_pred, output_path + '%s_%03d.txt' % (str(dataset), idx),
null_class, class_probs)
predictions = np.append(predictions, y_pred)
labels = np.append(labels, y_true)
tot_sl += sl
tot_el += el
action_in_seq_counts += action_in_seq
confusion.add(torch.LongTensor(y_pred), label)
pg.update(idx + 1)
norm_confusion_mat, _ = get_normalized_confusion_matrix_and_f1_score(confusion.conf)
action_in_seq_counts = np.expand_dims(action_in_seq_counts, -1)
avg_sl, avg_el = tot_sl / action_in_seq_counts, tot_el / action_in_seq_counts
avg_f1 = f1_score(labels, predictions, average=None)
avg_acc = accuracy_score(labels, predictions)
avg_forecast_prs = calc_forecast_prs(tot_train_regress_confidence, tot_test_regress_confidence)
return norm_confusion_mat, avg_f1, avg_sl, avg_el, avg_acc, avg_forecast_prs
def get_normalized_confusion_matrix_and_f1_score(confusion_matrix: np.ndarray):
"""Returns the normalized confusion matrix and F1 from a raw confusion matrix."""
num_classes = confusion_matrix.shape[0]
f1 = np.zeros(num_classes)
norm_conf_mat = np.zeros([num_classes, num_classes])
for label_idx in range(num_classes):
ground_truth_length = np.sum(confusion_matrix[label_idx, :])
if ground_truth_length == 0:
continue
norm_conf_mat[label_idx, :] = confusion_matrix[label_idx, :] / ground_truth_length
f1[label_idx] = 2. * confusion_matrix[label_idx, label_idx] / \
(ground_truth_length + np.sum(confusion_matrix[:, label_idx]))
return norm_conf_mat, f1
def get_localization_score_arrays(predicted_starts: np.ndarray, predicted_ends: np.ndarray,
true_starts: np.ndarray, true_ends: np.ndarray):
"""Returns R-based localization scores at different confidence thresholds."""
assert predicted_starts.shape[-1] == predicted_ends.shape[-1] == true_starts.shape[-1] == true_ends.shape[-1]
intervals = true_starts - true_ends
intervals[intervals >= 0] = -1 # invalid ground truths produced by downsampling
base = np.exp(1 / intervals)
predicted_starts[predicted_starts < 0] = 1e6
predicted_ends[predicted_ends < 0] = -1e6
avg_sl = np.mean(base ** np.abs(predicted_starts - true_starts), axis=1)
avg_el = np.mean(base ** np.abs(predicted_ends - true_ends), axis=1)
return avg_sl, avg_el # both of shape (10, ) for thresholds 0-0.9
def calc_forecast_prs(true_confidence: np.ndarray, pred_confidence: np.ndarray):
"""Evaluates precision-recall for start/end forecasts."""
assert true_confidence.shape == pred_confidence.shape
true_start_confidence, true_end_confidence = true_confidence[:, :-1].transpose((2, 0, 1))
pred_start_confidence, pred_end_confidence = pred_confidence[:, :-1].transpose((2, 0, 1))
seq_len, num_actions = true_start_confidence.shape
true_start_mat = np.zeros((num_actions, seq_len), dtype=np.int) # one hot
pred_start_mat = pred_start_confidence.transpose()
true_end_mat = np.zeros((num_actions, seq_len), dtype=np.int) # one hot
pred_end_mat = pred_end_confidence.transpose()
for i in range(num_actions):
true_starts = find_peaks(np.pad(true_start_confidence[:, i], (1, 1), 'constant'),
height=1, distance=8)[0] - 1
for true_start in true_starts:
begin_scan_idx = true_start - 15
if begin_scan_idx < 0:
begin_scan_idx = 0
end_scan_idx = true_start # strictly forecast, not causal / lagging
if end_scan_idx >= seq_len:
end_scan_idx = seq_len - 1
true_start_mat[i, begin_scan_idx:end_scan_idx + 1] = 1
true_ends = find_peaks(np.pad(true_end_confidence[:, i], (1, 1), 'constant'),
height=1, distance=8)[0] - 1
for true_end in true_ends:
begin_scan_idx = true_end - 15
if begin_scan_idx < 0:
begin_scan_idx = 0
end_scan_idx = true_end # strictly forecast, not causal / lagging
if end_scan_idx >= seq_len:
end_scan_idx = seq_len - 1
true_end_mat[i, begin_scan_idx:end_scan_idx + 1] = 1
start_precisions, start_recalls, _ = precision_recall_curve(true_start_mat.ravel(),
pred_start_mat.ravel())
end_precisions, end_recalls, _ = precision_recall_curve(true_end_mat.ravel(),
pred_end_mat.ravel())
return (start_recalls[1:], start_precisions[1:]), (end_recalls[1:], end_precisions[1:])
def plot_forecast_pr(recalls: np.ndarray, precisions: np.ndarray, output_filename: str):
"""Plots precision-recall curve for start/end forecasts."""
plt.figure()
plt.plot(recalls, precisions, 'b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.grid(True)
axes = plt.gca()
axes.set_xlim([0, 1])
axes.set_ylim([0, 1])
plt.title('Action Forecast Precision-Recall Curve')
if not output_filename.endswith('.png'):
output_filename += '.png'
plt.savefig(output_filename)
|
import tensorflow as tf
import tensorflow.contrib as tc
import numpy as np
from baselines.common.minout import minout
from tensorflow.python.framework import ops
class Model(object):
def __init__(self, name):
self.name = name
@property
def vars(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
@property
def trainable_vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
@property
def perturbable_vars(self):
return [var for var in self.trainable_vars if 'LayerNorm' not in var.name]
class CollectiveDecActorGridPatrolling(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0, N = 1):
super(CollectiveDecActorGridPatrolling, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
self.stateNum = stateNum
self.N = float(N)
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
local_obs_joint = []
#visiable_obs = tf.concat([obs[:, :self.stateNum*2], obs[:, self.stateNum:self.stateNum*2]*self.N*obs[:, self.stateNum*2:]], axis=1)
for i in range(len(self.nb_actions)):
local_obs = [obs[:,j] for j in self.adjacent_list[i]]
local_obs.extend([obs[:, self.stateNum + j]*obs[:, 2*self.stateNum + j] for j in self.adjacent_list[i]])
local_obs = tf.stack(local_obs, axis=1)
local_obs_joint.append(local_obs)
#local_obs = visiable_obs#obs[:, :self.stateNum*2]
x = local_obs
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
x = tf.layers.dense(x, self.nb_actions[i])
x = tf.nn.softmax(x)
output.append(x)
x = tf.stack(output, axis=1)
# local_obs_joint = tf.stack(local_obs_joint, axis=1)
return x#{'output':x, 'local_obs':local_obs_joint}
class CollectiveDecSharedActorGrid(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0, N = 1):
super(CollectiveDecSharedActorGrid, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
self.stateNum = stateNum
self.N = float(N)
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
i = 0
Mask = np.zeros(len(self.nb_actions) * 2)
for j in self.adjacent_list[i]:
Mask[j] = 1
Mask[j + len(self.nb_actions)] = 1
Mask = tf.Variable(Mask, dtype=tf.float32)
local_obs = tf.multiply(obs, Mask)
# local_obs_joint.append(local_obs)
x = local_obs
hidden = 0
loc_weights = []
# output_biases = []
for hidden_size in self.hidden_sizes:
loc_weight = tf.get_variable('loc_weight_' + str(hidden), [hidden_size], tf.float32, trainable=True)
loc_weights.append(loc_weight)
x = tf.layers.dense(x,
hidden_size, name='hidden_layer_' + str(
hidden)) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
x = tf.add(x, loc_weight)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
hidden += 1
output_bias = tf.get_variable('output_bias_' + str(i), [self.nb_actions[i]], tf.float32, trainable=True)
# output_biases.append(output_bias)
x = tf.layers.dense(x, self.nb_actions[i])
x = tf.add(x, output_bias)
x = tf.nn.softmax(x)
output.append(x)
for i in range(1, len(self.nb_actions)):
Mask = np.zeros(len(self.nb_actions) * 2)
for j in self.adjacent_list[i]:
Mask[j] = 1
Mask[j + len(self.nb_actions)] = 1
Mask = tf.Variable(Mask, dtype=tf.float32)
local_obs = tf.multiply(obs, Mask)
# local_obs_joint.append(local_obs)
x = local_obs
hidden = 0
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size, name='hidden_layer_' + str(
hidden),
reuse=True) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
x = tf.add(x, loc_weights[hidden])
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
hidden += 1
output_bias = tf.get_variable('output_bias_' + str(i), [self.nb_actions[i]], tf.float32, trainable=True)
x = tf.layers.dense(x, self.nb_actions[i])
x = tf.add(x, output_bias)
x = tf.nn.softmax(x)
output.append(x)
x = tf.stack(output, axis=1)
# local_obs_joint = tf.stack(local_obs_joint, axis=1)
return x
class CollectiveDecActorGrid(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0, N = 1):
super(CollectiveDecActorGrid, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
self.stateNum = stateNum
self.N = float(N)
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
local_obs_joint = []
#visiable_obs = tf.concat([obs[:, :self.stateNum*2], obs[:, self.stateNum:self.stateNum*2]*self.N*obs[:, self.stateNum*2:]], axis=1)
for i in range(len(self.nb_actions)):
local_obs = [obs[:,j] for j in self.adjacent_list[i]]
local_obs.extend([obs[:, self.stateNum + j]*obs[:, self.stateNum + j] for j in self.adjacent_list[i]])
local_obs = tf.stack(local_obs, axis=1)
local_obs_joint.append(local_obs)
#local_obs = visiable_obs#obs[:, :self.stateNum*2]
x = local_obs
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
x = tf.layers.dense(x, self.nb_actions[i])
x = tf.nn.softmax(x)
output.append(x)
x = tf.stack(output, axis=1)
# local_obs_joint = tf.stack(local_obs_joint, axis=1)
return x#{'output':x, 'local_obs':local_obs_joint}
class CollectiveDecActorGridNObs(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0, N = 1):
super(CollectiveDecActorGridNObs, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
self.stateNum = stateNum
self.N = float(N)
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
local_obs_joint = []
#visiable_obs = tf.concat([obs[:, :self.stateNum*2], obs[:, self.stateNum:self.stateNum*2]*self.N*obs[:, self.stateNum*2:]], axis=1)
for i in range(len(self.nb_actions)):
local_obs = [obs[:,j] for j in self.adjacent_list[i]]
local_obs = tf.stack(local_obs, axis=1)
local_obs_joint.append(local_obs)
#local_obs = visiable_obs#obs[:, :self.stateNum*2]
x = local_obs
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
x = tf.layers.dense(x, self.nb_actions[i])
x = tf.nn.softmax(x)
output.append(x)
x = tf.stack(output, axis=1)
# local_obs_joint = tf.stack(local_obs_joint, axis=1)
return x#{'output':x, 'local_obs':local_obs_joint}
class CollectiveDecActorGrid0Obs(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0, N = 1):
super(CollectiveDecActorGrid0Obs, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
self.stateNum = stateNum
self.N = float(N)
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
local_obs_joint = []
#visiable_obs = tf.concat([obs[:, :self.stateNum*2], obs[:, self.stateNum:self.stateNum*2]*self.N*obs[:, self.stateNum*2:]], axis=1)
for i in range(len(self.nb_actions)):
x = tf.get_variable('a_'+str(i),[self.nb_actions[i]], tf.float32,
tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3),
trainable=True)
x = tf.nn.softmax(x)
x = tf.reshape(x, shape=[1, self.nb_actions[i]])
output.append(x)
x = tf.stack(output, axis=1)
return x#{'output':x, 'local_obs':local_obs_joint}
class CollectiveDecCriticGrid0Obs(Model):
def __init__(self, nb_actions, name='dec_collective_critic', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0, N = 1):
super(CollectiveDecCriticGrid0Obs, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
self.stateNum = stateNum
self.N = float(N)
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
local_obs_joint = []
#visiable_obs = tf.concat([obs[:, :self.stateNum*2], obs[:, self.stateNum:self.stateNum*2]*self.N*obs[:, self.stateNum*2:]], axis=1)
for i in range(len(self.nb_actions)):
x = tf.get_variable('a_'+str(i),[self.nb_actions[i]], tf.float32,
tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3),
trainable=True)
x = tf.reshape(x, shape=[1, self.nb_actions[i]])
output.append(x)
x = tf.stack(output, axis=1)
return x#{'output':x, 'local_obs':local_obs_joint}
class StatelessActor(Model):
def __init__(self, nb_actions, name='dec_collective_actor'):
super(StatelessActor, self).__init__(name=name)
self.nb_actions = nb_actions
def __call__(self, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
x = tf.get_variable('x', [self.nb_actions], tf.float64,
tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3),
trainable=True)
x = tf.nn.softmax(x)
x = tf.reshape(x, shape=[1, self.nb_actions])
return x
class CollectiveDecCriticGrid(Model):
def __init__(self, nb_actions, name='dec_collective_critic', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0):
super(CollectiveDecCriticGrid, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
self.stateNum = stateNum
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
# local_obs_joint = []
for i in range(len(self.nb_actions)):
local_obs = [obs[:,j] for j in self.adjacent_list[i]]
local_obs.extend([obs[:, self.stateNum + j] for j in self.adjacent_list[i]])
local_obs = tf.stack(local_obs, axis=1)
# local_obs_joint.append(local_obs)
x = local_obs
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
x = tf.layers.dense(x, self.nb_actions[i])
output.append(x)
x = tf.stack(output, axis=1)
return x
class CollectiveDecSharedCriticGrid(Model):
def __init__(self, nb_actions, name='dec_collective_critic', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0):
super(CollectiveDecSharedCriticGrid, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
self.stateNum = stateNum
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# construct shared dense layers
# indices = np.arange(len(self.nb_actions))
# loc_indices = tf.one_hot(indices, len(self.nb_actions))
# batch_size = [0]
# loc_indices = tf.tile(loc_indices, tf.stack([batch_size, 1, 1]))
i = 0
Mask = np.zeros(len(self.nb_actions)*2)
for j in self.adjacent_list[i]:
Mask[j] = 1
Mask[j + len(self.nb_actions)] = 1
Mask = tf.Variable(Mask, dtype=tf.float32)
local_obs = tf.multiply(obs,Mask)
# local_obs_joint.append(local_obs)
x = local_obs
hidden = 0
loc_weights = []
# output_biases = []
for hidden_size in self.hidden_sizes:
loc_weight = tf.get_variable('loc_weight_' + str(hidden), [hidden_size], tf.float32, trainable=True)
loc_weights.append(loc_weight)
x = tf.layers.dense(x,
hidden_size, name = 'hidden_layer_' + str(hidden)) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
x = tf.add(x, loc_weight)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
hidden += 1
output_bias = tf.get_variable('output_bias_' + str(i), [self.nb_actions[i]], tf.float32, trainable=True)
# output_biases.append(output_bias)
x = tf.layers.dense(x, self.nb_actions[i])
x = tf.add(x, output_bias)
output.append(x)
for i in range(1,len(self.nb_actions)):
Mask = np.zeros(len(self.nb_actions) * 2)
for j in self.adjacent_list[i]:
Mask[j] = 1
Mask[j + len(self.nb_actions)] = 1
Mask = tf.Variable(Mask, dtype=tf.float32)
local_obs = tf.multiply(obs , Mask)
# local_obs_joint.append(local_obs)
x = local_obs
hidden = 0
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size, name='hidden_layer_' + str(
hidden), reuse=True) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
x = tf.add(x, loc_weights[hidden])
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
hidden += 1
output_bias = tf.get_variable('output_bias_' + str(i), [self.nb_actions[i]], tf.float32, trainable=True)
x = tf.layers.dense(x, self.nb_actions[i])
x = tf.add(x, output_bias)
output.append(x)
x = tf.stack(output, axis=1)
return x
class CollectiveDecCriticGridNObs(Model):
def __init__(self, nb_actions, name='dec_collective_critic', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0):
super(CollectiveDecCriticGridNObs, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
self.stateNum = stateNum
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
# local_obs_joint = []
for i in range(len(self.nb_actions)):
local_obs = [obs[:,j] for j in self.adjacent_list[i]]
local_obs = tf.stack(local_obs, axis=1)
# local_obs_joint.append(local_obs)
x = local_obs
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
x = tf.layers.dense(x, self.nb_actions[i])
output.append(x)
x = tf.stack(output, axis=1)
return x
class CollectiveDecActorTaxi(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[]):
super(CollectiveDecActorTaxi, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
# local_obs_joint = []
for i in range(len(self.nb_actions)):
local_obs = [obs[:,48 + j] for j in self.adjacent_list[i]]
local_obs.extend([obs[:, 48 + 81 + j] for j in self.adjacent_list[i]])
local_obs = tf.stack(local_obs, axis=1)
local_obs = tf.concat([local_obs, obs[:,:48]], axis=1)
# local_obs_joint.append(local_obs)
x = local_obs
adjacent_num = len(self.adjacent_list[i])
# local_output = tf.zeros([None, adjacent_num], tf.float64)
local_output = [obs[:,0]*0.0]*81
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
# y.append(x)
x = tf.layers.dense(x, adjacent_num)
x = tf.nn.softmax(x)
for j in range(len(self.adjacent_list[i])):
local_output[self.adjacent_list[i][j]] = x[:,j]
local_output = tf.stack(local_output, axis=1)
output.append(local_output)
x = tf.stack(output, axis=1)
# x = tf.reshape(x, [-1])
return x#{'action':x, 'y':y, 'local_obs_joint':local_obs_joint}
class CollectiveDecActorTaxi0Obs(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[]):
super(CollectiveDecActorTaxi0Obs, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
# local_obs_joint = []
for i in range(len(self.nb_actions)):
# local_obs_joint.append(local_obs)
x = obs[:,:48]
adjacent_num = len(self.adjacent_list[i])
# local_output = tf.zeros([None, adjacent_num], tf.float64)
local_output = [obs[:,0]*0.0]*81
x = tf.layers.dense(x, adjacent_num)
x = tf.nn.softmax(x)
for j in range(len(self.adjacent_list[i])):
local_output[self.adjacent_list[i][j]] = x[:,j]
local_output = tf.stack(local_output, axis=1)
output.append(local_output)
x = tf.stack(output, axis=1)
# x = tf.reshape(x, [-1])
return x
class CollectiveDecCriticTaxi0Obs(Model):
def __init__(self, nb_actions, name='dec_collective_critic', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[]):
super(CollectiveDecCriticTaxi0Obs, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
# local_obs_joint = []
for i in range(len(self.nb_actions)):
x = obs[:, :48]
adjacent_num = len(self.adjacent_list[i])
# local_output = tf.zeros([None, adjacent_num], tf.float64)
local_output = [obs[:,0]*0.0]*81
x = tf.layers.dense(x, adjacent_num)
for j in range(len(self.adjacent_list[i])):
local_output[self.adjacent_list[i][j]] = x[:,j]
local_output = tf.stack(local_output, axis=1)
output.append(local_output)
x = tf.stack(output, axis=1)
# x = tf.reshape(x, [-1])
return x
class CollectiveDecCriticTaxi(Model):
def __init__(self, nb_actions, name='dec_collective_critic', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[]):
super(CollectiveDecCriticTaxi, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
# local_obs_joint = []
for i in range(len(self.nb_actions)):
local_obs = [obs[:,48 + j] for j in self.adjacent_list[i]]
local_obs.extend([obs[:, 48 + 81 + j] for j in self.adjacent_list[i]])
local_obs = tf.stack(local_obs, axis=1)
local_obs = tf.concat([local_obs, obs[:,:48]], axis=1)
# local_obs_joint.append(local_obs)
x = local_obs
adjacent_num = len(self.adjacent_list[i])
# local_output = tf.zeros([None, adjacent_num], tf.float64)
local_output = [obs[:,0]*0.0]*81
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
# y.append(x)
x = tf.layers.dense(x, adjacent_num)
for j in range(len(self.adjacent_list[i])):
local_output[self.adjacent_list[i][j]] = x[:,j]
local_output = tf.stack(local_output, axis=1)
output.append(local_output)
x = tf.stack(output, axis=1)
# x = tf.reshape(x, [-1])
return x#{'action':x, 'y':y, 'local_obs_joint':local_obs_joint}
class CollectiveDecInitializationTaxi(Model):
def __init__(self, nb_actions, name='dec_collective_initialization_actor', zone_number = 81):
super(CollectiveDecInitializationTaxi, self).__init__(name=name)
self.zone_number = zone_number
def __call__(self, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
x = tf.get_variable('x', [self.zone_number], tf.float32, tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
trainable=True)
x = tf.nn.softmax(x)
return x
class CollectiveDecActorTaxiDecObs(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[]):
super(CollectiveDecActorTaxiDecObs, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
# y = []
# local_obs_joint = []
for i in range(len(self.nb_actions)):
x = obs[:, i]
adjacent_num = len(self.adjacent_list[i])
# local_output = tf.zeros([None, adjacent_num], tf.float64)
local_output = [obs[:,0,0]*0.0]*81
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
# y.append(x)
x = tf.layers.dense(x, adjacent_num)
x = tf.nn.softmax(x)
for j in range(len(self.adjacent_list[i])):
local_output[self.adjacent_list[i][j]] = x[:,j]
local_output = tf.stack(local_output, axis=1)
output.append(local_output)
x = tf.stack(output, axis=1)
# x = tf.reshape(x, [-1])
return x#{'action':x, 'y':y, 'local_obs_joint':local_obs_joint}
class CollectiveDecActorTaxiN(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[]):
super(CollectiveDecActorTaxiN, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = adjacent_list
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
obs = obs*8000.0
# y = []
# local_obs_joint = []
for i in range(len(self.nb_actions)):
local_obs = [obs[:,48 + j] for j in self.adjacent_list[i]]
local_obs.extend([obs[:, 48 + 81 + j] for j in self.adjacent_list[i]])
local_obs = tf.stack(local_obs, axis=1)
local_obs = tf.concat([local_obs, obs[:,:48]], axis=1)
# local_obs_joint.append(local_obs)
x = local_obs
adjacent_num = len(self.adjacent_list[i])
# local_output = tf.zeros([None, adjacent_num], tf.float64)
local_output = [obs[:,0]*0.0]*81
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
# y.append(x)
x = tf.layers.dense(x, adjacent_num)
x = tf.nn.softmax(x)
for j in range(len(self.adjacent_list[i])):
local_output[self.adjacent_list[i][j]] = x[:,j]
local_output = tf.stack(local_output, axis=1)
output.append(local_output)
x = tf.stack(output, axis=1)
# x = tf.reshape(x, [-1])
return x#{'action':x, 'y':y, 'local_obs_joint':local_obs_joint}
class CollectiveDecActor(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(CollectiveDecActor, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# obs = obs*8000.0
for i in range(len(self.nb_actions)):
x = obs[:,i]
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x, hidden_size)#, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
x = tf.layers.dense(x, self.nb_actions[i])#,kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
x = tf.nn.softmax(x)
output.append(x)
x = tf.stack(output, axis=1)
#x = tf.reshape(x, [-1])
return x
class CollectiveDecCritic(Model):
def __init__(self, nb_actions, name='dec_collective_critic', layer_norm=True, batch_norm = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(CollectiveDecCritic, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
for i in range(len(self.nb_actions)):
x = obs[:, i]
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x, hidden_size)#,
# kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
x = tf.layers.dense(x, self.nb_actions[i],
kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
output.append(x)
x = tf.stack(output, axis=1)
return x
class CollectiveDecActorMinOut(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm = True, num_channels = 10, num_maxout_units = 2, hidden_nonlinearity = tf.nn.relu):
super(CollectiveDecActor, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.num_channels = num_channels
self.num_maxout_units = num_maxout_units
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
for i in range(len(self.nb_actions)):
x = obs[:,i]
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x, hidden_size, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
x = self.hidden_nonlinearity(x)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = tf.layers.dense(x, self.nb_actions[i],
kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
x = tf.nn.softmax(x)
output.append(x)
x = tf.stack(output, axis=1)
#x = tf.reshape(x, [-1])
return x
class CollectiveDecCriticMinOut(Model):
def __init__(self, nb_actions, name='dec_collective_critic', layer_norm=True, batch_norm = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(CollectiveDecCritic, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
for i in range(len(self.nb_actions)):
x = obs[:, i]
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x, hidden_size,
kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
x = self.hidden_nonlinearity(x)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = tf.layers.dense(x, self.nb_actions[i],
kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
output.append(x)
x = tf.stack(output, axis=1)
return x
class GridCollectiveCritic(Model):
def __init__(self, nb_actions, name='collective_critic', layer_norm=True, relu_output = True, num_channels = 20, num_maxout_units = 5):
super(GridCollectiveCritic, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.num_channels = num_channels
self.num_maxout_units = num_maxout_units
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
# if self.relu_output:
output = []
for i in range(len(self.nb_actions)):
x = obs[:,i]
x = tf.layers.dense(x, self.num_channels)
x = minout(x, self.num_maxout_units)
x = tf.layers.dense(x, self.nb_actions[i])
output.append(x)
x = tf.stack(output, axis=1)
return x
class GridCollectiveActor(Model):
def __init__(self, nb_actions, name='actor', layer_norm=True, num_channels = 20, num_maxout_units = 5):
super(GridCollectiveActor, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.num_maxout_units = num_maxout_units
self.num_channels = num_channels
# self.hidden_nonlinearity = hidden_nonlinearity
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
for i in range(len(self.nb_actions)):
x = obs[:,i]
x = tf.layers.dense(x, self.num_channels,
kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
x = minout(x, self.num_maxout_units)
x = tf.layers.dense(x, self.nb_actions[i],
kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
x = tf.nn.softmax(x)
output.append(x)
x = tf.stack(output, axis=1)
return x
class TaxiBasicCollectiveCritic(Model):
def __init__(self, nb_actions, name='collective_critic', layer_norm=True, relu_output = True, hidden_nonlinearity = tf.nn.relu):
super(TaxiBasicCollectiveCritic, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.hidden_nonlinearity = hidden_nonlinearity
self.zoneNum = 81
self.H = 48
def __call__(self, obs, action_count, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
#time
output.append(obs[:,0,:self.H])
#payment
immediateRewards = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:,:,i], axis=1)
immediateRewards.append(tf.reduce_min(tf.stack([x, obs[:, i, self.H + 1]], axis=1), axis=1))
immediateRewards = tf.stack(immediateRewards, axis=1)
output.append(immediateRewards)
#cost
flatten_action_count = tf.reshape(action_count, shape=[-1, (action_count.shape[1]*action_count.shape[2]).value])
output.append(flatten_action_count)
output = tf.concat(output, axis=1)
x = tf.layers.dense(output, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))
return x
class TaxiCollectiveActorPseudoFlowMaxout(Model):
def __init__(self, nb_actions, name='actor', layer_norm=None, num_pieces = 3, num_maxout_units = 18):
super(TaxiCollectiveActorPseudoFlowMaxout, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = None
if layer_norm== 'layer_norm':
self.layer_norm = tc.layers.layer_norm
if layer_norm == 'batch_norm':
self.layer_norm = tc.layers.batch_norm
self.zoneNum = 81
self.H = 48
self.num_maxout_units = num_maxout_units
self.num_pieces = num_pieces
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
# time
time = obs[:, 0, :self.H]
normalizedDenseCounter = 0
normalizedDenseCounter += 1
state_count = obs[:, :, self.H ]#obs[:,self.H:self.H + self.zoneNum]
demand_count = obs[:, :, self.H + 1]#obs[:,self.H + self.zoneNum:]
initializer = tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5)#tf.truncated_normal_initializer(mean=1.0 / 9.0, stddev=1.0 / 90.0, dtype=tf.float64)
V = tf.get_variable('V_' + str(normalizedDenseCounter), [int(state_count.get_shape()[1]), self.zoneNum], tf.float32, initializer,
trainable=True)
V_norm = tf.nn.softmax(V, dim=1)
flows = tf.matmul(state_count, V_norm)
# flows = normalizedDense(obs[:, self.H:self.H + self.zoneNum], self.zoneNum, counter=normalizedDenseCounter)
for i in range(len(self.nb_actions)):
#counterpart: the flow from other
x = flows - tf.matmul(tf.expand_dims(state_count[:,i], axis=1),tf.expand_dims(V_norm[i, :],0))
x = demand_count - x
x = tf.layers.dense(x, self.num_pieces*self.num_maxout_units, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
x = minout(x, self.num_maxout_units)
if self.layer_norm!= None:
x = self.layer_norm(x)
x = tf.layers.dense(x, self.num_pieces * self.num_maxout_units,
kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))
x = minout(x, self.num_maxout_units)
if self.layer_norm != None:
x = self.layer_norm(x)
x = tf.concat([x, time], axis=1)
x = tf.layers.dense(x, self.nb_actions[i],
kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))
x = tf.nn.softmax(x)
output.append(x)
x = tf.stack(output, axis=1)
print('initialized actor network')
return x
class TaxiCollectiveCriticWithCost(Model):
def __init__(self, nb_actions, costMatrix, name='collective_critic', layer_norm=True, relu_output = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(TaxiCollectiveCriticWithCost, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.zoneNum = 81
self.H = 48
self.costMatrix = costMatrix
def __call__(self, obs, action_count, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
inputLayer = obs
output = []
next_state_count = []
normalizedDenseCounter = 0
customer_flow = normalizedDense(obs[:, :, self.H + 1], self.zoneNum, counter = normalizedDenseCounter)
state_count = obs[:, :, self.H]
demand_count = obs[:, :, self.H + 1]
normalizedDenseCounter +=1
#payment
immediateRewards = []
cost = tf.multiply(action_count, -self.costMatrix)
output1 = []
output2 = []
trip_weights = tf.get_variable('r', [len(self.nb_actions)], tf.float32, tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
trainable=True)
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:,:,i], axis=1)
# next_state_count.append(x - demand_count[:,i] + customer_flow[:,i])
stateReward = tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)#tf.stack([tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)], axis=1)
stateReward = tf.multiply(trip_weights[i],stateReward)
output1.append(stateReward)
output2.append(tf.reduce_sum(cost[:,:,i], axis=1))
stateReward = tf.reduce_sum(tf.stack([tf.squeeze(stateReward), tf.reduce_sum(cost[:,:,i], axis=1)], axis=1), axis=1)
immediateRewards.append(stateReward)
output.append(stateReward)
immediateRewards = tf.stack(immediateRewards, axis=1)
# output.append(flatten_action_count)
# #future reward
# next_state_count = tf.stack(next_state_count, axis=1)
# x = normalizedDense(next_state_count, self.zoneNum, counter = normalizedDenseCounter)
# normalizedDenseCounter += 1
# d = tf.get_variable("d", [self.zoneNum], trainable=True,
# initializer=tf.random_uniform_initializer(minval=0, maxval=1.0))
# x = tf.minimum(x, d)
# output.append(x)
output = tf.stack(output, axis=1)
# time
# output = append(obs[:, 0, :self.H])
x = tf.reduce_sum(output, axis=1)
return {'symbolic_val':x, 'state_reward':immediateRewards, 'output1':output1, 'output2':output2, 'cost':cost}
class TaxiCollectiveCriticWithCostAndBiasOld(Model):
def __init__(self, nb_actions, costMatrix, name='collective_critic', layer_norm=True, relu_output = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(TaxiCollectiveCriticWithCostAndBiasOld, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.zoneNum = 81
self.H = 48
self.costMatrix = costMatrix
def __call__(self, obs, action_count, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
inputLayer = obs
output = []
next_state_count = []
normalizedDenseCounter = 0
customer_flow = normalizedDense(obs[:, :, self.H + 1], self.zoneNum, counter = normalizedDenseCounter)
state_count = obs[:, :, self.H]
demand_count = obs[:, :, self.H + 1]
normalizedDenseCounter +=1
#payment
immediateRewards = []
futureRewards = []
cost = tf.multiply(action_count, -self.costMatrix)
# output1 = []
# output2 = []
trip_weights = tf.get_variable('r', [len(self.nb_actions)], tf.float32, tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
trainable=True)
# future reward
d = tf.get_variable("d", [self.zoneNum], trainable=True,
initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))
future_trip_weights = tf.get_variable('rd', [len(self.nb_actions)], tf.float32,
tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
trainable=True)
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:,:,i], axis=1)
# next_state_count.append(x - demand_count[:,i] + customer_flow[:,i])
stateReward = tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)#tf.stack([tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)], axis=1)
stateReward = tf.multiply(trip_weights[i],stateReward)
biasReward = tf.minimum(x, d[i])
future_reward = tf.multiply(biasReward, future_trip_weights[i])
immediateRewards.append(stateReward)
stateReward = tf.add(stateReward, future_reward)
futureRewards.append(stateReward)
# output1.append(stateReward)
# output2.append(tf.reduce_sum(cost[:,:,i], axis=1))
stateReward = tf.add(stateReward, tf.reduce_sum(cost[:,:,i], axis=1))
output.append(stateReward)
immediateRewards = tf.stack(immediateRewards, axis=1)
# output.append(flatten_action_count)
# #future reward
# next_state_count = tf.stack(next_state_count, axis=1)
# x = normalizedDense(next_state_count, self.zoneNum, counter = normalizedDenseCounter)
# normalizedDenseCounter += 1
# d = tf.get_variable("d", [self.zoneNum], trainable=True,
# initializer=tf.random_uniform_initializer(minval=0, maxval=1.0))
# x = tf.minimum(x, d)
# output.append(x)
output = tf.stack(output, axis=1)
# time
# output = append(obs[:, 0, :self.H])
x = tf.reduce_sum(output, axis=1)
return {'symbolic_val':x, 'immediateRewards':immediateRewards, 'cost':cost, 'd':d, 'future_trip_weights':future_trip_weights, 'trip_weights':trip_weights}
class TaxiCollectiveCriticWithCostAndBias(Model):
def __init__(self, nb_actions, costMatrix, name='collective_critic', layer_norm=False, batch_norm = False, relu_output = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(TaxiCollectiveCriticWithCostAndBias, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.zoneNum = 81
self.H = 48
self.N = 8000.0
self.costMatrix = costMatrix
self.layer_norm = layer_norm
self.batch_norm = batch_norm
def __call__(self, obs, action_count, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
inputLayer = obs
output = []
time_period = obs[:, 0, :self.H]*self.N
next_state_count = []
normalizedDenseCounter = 0
demand_count = obs[:, :, self.H + 1]
normalizedDenseCounter +=1
#payment
state_returns = []
cost = tf.multiply(action_count, -self.costMatrix)
# output1 = []
# output2 = []
# trip_weights = tf.get_variable('r', [len(self.nb_actions)], tf.float64, tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
# trainable=True)
trip_weights = []
# served_demands = []
features = []
state_count = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:,:,i], axis=1)
state_count.append(x)
served_demand = tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)#tf.stack([tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)], axis=1)
features.append(x)
features.append(served_demand)
# served_demands.append(served_demand)
#
# served_demands = tf.stack(served_demands, axis=1)
features = tf.stack(features, axis=1)
features = tf.concat([features, time_period], axis=1)
d = []
for i in range(len(self.nb_actions)):
x = features
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
if self.batch_norm:
x = tc.layers.batch_norm(x)
x = self.hidden_nonlinearity(x)
trip_weight = tf.layers.dense(x, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))[:,0]
trip_weights.append(trip_weight)
val_UB = tf.layers.dense(x, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))[:,0]
d.append(val_UB)
stateReward = tf.multiply(trip_weight,tf.reduce_min(tf.stack([state_count[i],val_UB], axis=1), axis=1))
stateReward = tf.add(stateReward, tf.reduce_sum(cost[:, :, i], axis=1))
state_returns.append(stateReward)
output.append(stateReward)
d = tf.stack(d, axis=1)
trip_weights = tf.stack(trip_weights, axis=1)
output = tf.stack(output, axis=1)
state_returns = tf.stack(state_returns, axis=1)
# time
# output = append(obs[:, 0, :self.H])
x = tf.reduce_sum(output, axis=1)
return {'symbolic_val':x, 'state_returns':state_returns, 'cost':cost, 'd':d, 'trip_weights':trip_weights}
class TaxiCollectiveCriticWithCostAndBiasVPN(Model):
def __init__(self, nb_actions, costMatrix, name='collective_critic', layer_norm=True, relu_output = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(TaxiCollectiveCriticWithCostAndBiasVPN, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.zoneNum = 81
self.H = 48
self.N = 8000.0
self.costMatrix = costMatrix
def __call__(self, obs, action_count, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
obs = obs*self.N
action_count = action_count*self.N
output = []
inputLayer = obs
time_period = obs[:, :self.H]#*self.N
next_state_count = []
normalizedDenseCounter = 0
state_count = obs[:, self.H: self.H + self.zoneNum ]
demand_count = obs[:, self.H + self.zoneNum:]
normalizedDenseCounter +=1
#payment
immediateRewards = []
future_vals = []
cost = tf.multiply(action_count, -self.costMatrix)
# output1 = []
# output2 = []
trip_weights = tf.get_variable('r', [len(self.nb_actions)], tf.float32, tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
trainable=True)
served_demands = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:,:,i], axis=1)
served_demand = tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)#tf.stack([tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)], axis=1)
stateReward = tf.multiply(trip_weights[i],served_demand)
stateReward = tf.add(stateReward, tf.reduce_sum(cost[:, :, i], axis=1))
output.append(stateReward)
immediateRewards.append(stateReward)
served_demands.append(served_demand)
immediateRewards = tf.stack(immediateRewards, axis=1)
# predict passenger flow
served_demands = tf.stack(served_demands, axis=1)
initializer = tf.truncated_normal_initializer(mean=1.0 / 9.0, stddev=1.0 / 90.0, dtype=tf.float32)
V = tf.layers.dense(time_period,int(state_count.get_shape()[1])* self.zoneNum, kernel_initializer=initializer)
V = tf.reshape(V, [-1, int(state_count.get_shape()[1]), self.zoneNum])
# V = tf.get_variable('V_' + str(normalizedDenseCounter), [int(state_count.get_shape()[1]), self.zoneNum],
# tf.float64, initializer,
# trainable=True)
V_norm = tf.nn.softmax(V, dim=2)
customer_flow = tf.squeeze(tf.matmul(tf.reshape(served_demands, [-1, 1, self.zoneNum]), V_norm))#tf.matmul(served_demands, V_norm)
# predict the next upper bound
d = tf.layers.dense(time_period, self.zoneNum, activation=tf.nn.relu, kernel_initializer=tf.random_uniform_initializer(minval=100, maxval=self.N), name= "d", use_bias=False)
# d = tf.get_variable("d", [self.zoneNum], trainable=True,
# initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))
# predict the next payment
future_trip_weights = tf.layers.dense(time_period, len(self.nb_actions), activation=tf.nn.relu, kernel_initializer=tf.random_uniform_initializer(minval=0, maxval=3e-3), use_bias=False)
# future_trip_weights = tf.get_variable('rd', [len(self.nb_actions)], tf.float64,
# tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
# trainable=True)
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:, :, i], axis=1)
#future reward
next_x = x - served_demands[:,i] + customer_flow[:,i]
next_state_count.append(next_x)
future_val = tf.minimum(tf.multiply(next_x, future_trip_weights[:,i]), d[:,i])
# future_val = tf.multiply(future_served_demand, future_trip_weights[:,i])
future_vals.append(future_val)
output.append(future_val)
future_vals = tf.stack(future_vals, axis=1)
output = tf.stack(output, axis=1)
# time
# output = append(obs[:, 0, :self.H])
x = tf.reduce_sum(output, axis=1)
return {'symbolic_val':x, 'V_norm':V_norm, 'customer_flow':customer_flow, 'immediateRewards':immediateRewards, 'cost':cost, 'd':d, 'future_trip_weights':future_trip_weights, 'trip_weights':trip_weights, 'next_vals':future_vals, 'next_state_count':next_state_count}
class TaxiCollectiveCriticWithCostAndBiasDenseVPN(Model):
def __init__(self, nb_actions, costMatrix, name='collective_critic', layer_norm=False, batch_norm = False, relu_output = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu, adjacent_list = []):
super(TaxiCollectiveCriticWithCostAndBiasDenseVPN, self).__init__(name=name)
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.zoneNum = 81
self.H = 48
self.N = 8000.0
self.costMatrix = costMatrix
self.adjacent_list = adjacent_list
def __call__(self, obs, action_count, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
obs = obs*self.N
action_count = action_count*self.N
output = []
inputLayer = obs
time_period = obs[:, :self.H]#*self.N
next_state_count = []
normalizedDenseCounter = 0
state_count = obs[:, self.H: self.H + self.zoneNum ]
demand_count = obs[:, self.H + self.zoneNum:]
normalizedDenseCounter +=1
#payment
immediateRewards = []
future_vals = []
cost = tf.multiply(action_count, -self.costMatrix)
# output1 = []
# output2 = []
trip_weights = tf.get_variable('r', [len(self.nb_actions)], tf.float32, tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
trainable=True)
served_demands = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:,:,i], axis=1)
served_demand = tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)#tf.stack([tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)], axis=1)
stateReward = tf.multiply(trip_weights[i],served_demand)
stateReward = tf.add(stateReward, tf.reduce_sum(cost[:, :, i], axis=1))
output.append(stateReward)
immediateRewards.append(stateReward)
served_demands.append(served_demand)
immediateRewards = tf.stack(immediateRewards, axis=1)
# predict passenger flow
served_demands = tf.stack(served_demands, axis=1)
initializer = tf.truncated_normal_initializer(mean=1.0 / 9.0, stddev=1.0 / 90.0, dtype=tf.float32)
V = tf.layers.dense(time_period,int(state_count.get_shape()[1])* self.zoneNum, kernel_initializer=initializer)
V = tf.reshape(V, [-1, int(state_count.get_shape()[1]), self.zoneNum])
# V = tf.get_variable('V_' + str(normalizedDenseCounter), [int(state_count.get_shape()[1]), self.zoneNum],
# tf.float64, initializer,
# trainable=True)
V_norm = tf.nn.softmax(V, dim=2)
customer_flow = tf.matmul(tf.reshape(served_demands, [-1, 1, self.zoneNum]), V_norm)[:,0,:]#tf.matmul(served_demands, V_norm)
# predict the next upper bound
d = tf.layers.dense(time_period, self.zoneNum, activation=tf.nn.relu, kernel_initializer=tf.random_uniform_initializer(minval=100, maxval=self.N*100), name= "d", use_bias=False)
# d = tf.get_variable("d", [self.zoneNum], trainable=True,
# initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))
# predict the next payment
future_trip_weights = []
# next_local_obs = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:, :, i], axis=1)
# future reward
next_x = x - served_demands[:, i] + customer_flow[:, i]
next_state_count.append(next_x)
next_state_count = tf.stack(next_state_count, axis=1)
for i in range(len(self.nb_actions)):
local_state_count = tf.stack([next_state_count[:, k] for k in self.adjacent_list[i]], axis=1)
feature = tf.concat([local_state_count, time_period], axis=1)
for h in self.hidden_sizes:
feature = tf.layers.dense(feature,h)
if self.layer_norm:
feature = tc.layers.layer_norm(feature, center=True, scale=True)
if self.batch_norm:
feature = tc.layers.batch_norm(feature)
feature = self.hidden_nonlinearity(feature)
future_trip_weight = tf.layers.dense(feature, 1,
kernel_initializer=tf.random_uniform_initializer(minval=-3e-3,
maxval=3e-3))[:,0]
future_val = tf.minimum(tf.multiply(next_x, future_trip_weight), d[:,i])
future_trip_weights.append(future_trip_weight)
# future_val = tf.multiply(future_served_demand, future_trip_weights[:,i])
future_vals.append(future_val)
output.append(future_val)
future_trip_weights = tf.stack(future_trip_weights, axis=1)
future_vals = tf.stack(future_vals, axis=1)
output = tf.stack(output, axis=1)
# time
# output = append(obs[:, 0, :self.H])
x = tf.reduce_sum(output, axis=1)
return {'symbolic_val':x, 'V_norm':V_norm, 'customer_flow':customer_flow, 'immediateRewards':immediateRewards, 'cost':cost, 'd':d, 'future_trip_weights':future_trip_weights, 'trip_weights':trip_weights, 'next_vals':future_vals, 'next_state_count':next_state_count}
class TaxiCollectiveCriticWithCostAndBiasAndFutureRelationVPN(Model):
def __init__(self, nb_actions, costMatrix, adjacent_list, name='collective_critic', layer_norm=False, batch_norm = False, relu_output = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(TaxiCollectiveCriticWithCostAndBiasAndFutureRelationVPN, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.zoneNum = 81
self.H = 48
self.N = 8000.0
self.costMatrix = costMatrix
self.adjacent_list = adjacent_list
self.layer_norm = layer_norm
self.batch_norm = batch_norm
def __call__(self, obs, action_count, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
inputLayer = obs
output = []
time_period = obs[:, 0, :self.H]*self.N
next_state_count = []
normalizedDenseCounter = 0
state_count = obs[:, :, self.H]
demand_count = obs[:, :, self.H + 1]
normalizedDenseCounter +=1
#payment
immediateRewards = []
future_vals = []
cost = tf.multiply(action_count, -self.costMatrix)
# output1 = []
# output2 = []
trip_weights = tf.get_variable('r', [len(self.nb_actions)], tf.float32, tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
trainable=True)
served_demands = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:,:,i], axis=1)
served_demand = tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)#tf.stack([tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)], axis=1)
stateReward = tf.multiply(trip_weights[i],served_demand)
stateReward = tf.add(stateReward, tf.reduce_sum(cost[:, :, i], axis=1))
output.append(stateReward)
immediateRewards.append(stateReward)
served_demands.append(served_demand)
immediateRewards = tf.stack(immediateRewards, axis=1)
# predict passenger flow
served_demands = tf.stack(served_demands, axis=1)
initializer = tf.truncated_normal_initializer(mean=1.0 / 9.0, stddev=1.0 / 90.0, dtype=tf.float32)
V = tf.layers.dense(time_period,int(state_count.get_shape()[1])* self.zoneNum, kernel_initializer=initializer)
V = tf.reshape(V, [-1, int(state_count.get_shape()[1]), self.zoneNum])
# V = tf.get_variable('V_' + str(normalizedDenseCounter), [int(state_count.get_shape()[1]), self.zoneNum],
# tf.float64, initializer,
# trainable=True)
V_norm = tf.nn.softmax(V, dim=2)
customer_flow = tf.matmul(tf.reshape(served_demands, [-1, 1, self.zoneNum]), V_norm)[:,0,:]#tf.matmul(served_demands, V_norm)
# predict the threshold values
d = tf.layers.dense(time_period, self.zoneNum, kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5), name= "d")
# predict the next payment
future_trip_weights = []#tf.layers.dense(time_period, len(self.nb_actions), kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))
# future_trip_weights = tf.get_variable('rd', [len(self.nb_actions)], tf.float64,
# tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
# trainable=True)
# next_local_obs = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:, :, i], axis=1)
#future reward
next_x = x - served_demands[:,i] + customer_flow[:,i]
next_state_count.append(next_x)
next_state_count = tf.stack(next_state_count, axis=1)
for i in range(len(self.nb_actions)):
local_state_count = tf.stack([next_state_count[:, k] for k in self.adjacent_list[i]], axis=1)
local_demand_count = tf.stack([d[:, k] for k in self.adjacent_list[i]], axis=1)
next_local_obs = tf.concat([local_state_count, local_demand_count, time_period], axis=1)
feature = next_local_obs
for h in self.hidden_sizes:
feature = tf.layers.dense(feature,h)
if self.layer_norm:
feature = tc.layers.layer_norm(feature, center=True, scale=True)
if self.batch_norm:
feature = tc.layers.batch_norm(feature)
feature = self.hidden_nonlinearity(feature)
future_trip_weight = tf.layers.dense(feature, 1,
kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))[:,0]
future_trip_weights.append(future_trip_weight)
future_served_demand = tf.minimum(next_state_count[:,i], d[:,i])
future_val = tf.multiply(future_served_demand, future_trip_weight)
future_vals.append(future_val)
output.append(future_val)
future_vals = tf.stack(future_vals, axis=1)
future_trip_weights = tf.stack(future_trip_weights, axis=1)
output = tf.stack(output, axis=1)
# time
# output = append(obs[:, 0, :self.H])
x = tf.reduce_sum(output, axis=1)
return {'symbolic_val':x, 'V_norm':V_norm, 'customer_flow':customer_flow, 'immediateRewards':immediateRewards, 'cost':cost, 'd':d, 'future_trip_weights':future_trip_weights, 'trip_weights':trip_weights, 'next_vals':future_vals}
class TaxiCollectiveCriticDenseVPN(Model):
def __init__(self, nb_actions, costMatrix, adjacent_list, name='collective_critic', layer_norm=False, batch_norm = False, relu_output = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(TaxiCollectiveCriticDenseVPN, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.zoneNum = 81
self.H = 48
self.N = 8000.0
self.costMatrix = costMatrix
self.adjacent_list = adjacent_list
self.layer_norm = layer_norm
self.batch_norm = batch_norm
def __call__(self, obs, action_count, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
inputLayer = obs
output = []
time_period = obs[:, 0, :self.H]*self.N
next_state_count = []
normalizedDenseCounter = 0
state_count = obs[:, :, self.H]
demand_count = obs[:, :, self.H + 1]
normalizedDenseCounter +=1
#payment
immediateRewards = []
future_vals = []
cost = tf.multiply(action_count, -self.costMatrix)
# output1 = []
# output2 = []
trip_weights = tf.get_variable('r', [len(self.nb_actions)], tf.float32, tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
trainable=True)
served_demands = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:,:,i], axis=1)
served_demand = tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)#tf.stack([tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1)], axis=1)
stateReward = tf.multiply(trip_weights[i],served_demand)
stateReward = tf.add(stateReward, tf.reduce_sum(cost[:, :, i], axis=1))
output.append(stateReward)
immediateRewards.append(stateReward)
served_demands.append(served_demand)
immediateRewards = tf.stack(immediateRewards, axis=1)
# predict passenger flow
served_demands = tf.stack(served_demands, axis=1)
initializer = tf.truncated_normal_initializer(mean=1.0 / 9.0, stddev=1.0 / 90.0, dtype=tf.float32)
V = tf.layers.dense(time_period,int(state_count.get_shape()[1])* self.zoneNum, kernel_initializer=initializer)
V = tf.reshape(V, [-1, int(state_count.get_shape()[1]), self.zoneNum])
# V = tf.get_variable('V_' + str(normalizedDenseCounter), [int(state_count.get_shape()[1]), self.zoneNum],
# tf.float64, initializer,
# trainable=True)
V_norm = tf.nn.softmax(V, dim=2)
customer_flow = tf.matmul(tf.reshape(served_demands, [-1, 1, self.zoneNum]), V_norm)[:,0,:]#tf.matmul(served_demands, V_norm)
# predict the threshold values
d = tf.layers.dense(time_period, self.zoneNum, kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5), name= "d")
# predict the next payment
future_trip_weights = []#tf.layers.dense(time_period, len(self.nb_actions), kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))
# future_trip_weights = tf.get_variable('rd', [len(self.nb_actions)], tf.float64,
# tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5),
# trainable=True)
# next_local_obs = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:, :, i], axis=1)
#future reward
next_x = x - served_demands[:,i] + customer_flow[:,i]
next_state_count.append(next_x)
next_state_count = tf.stack(next_state_count, axis=1)
for i in range(len(self.nb_actions)):
local_state_count = tf.stack([next_state_count[:, k] for k in self.adjacent_list[i]], axis=1)
local_demand_count = tf.stack([d[:, k] for k in self.adjacent_list[i]], axis=1)
next_local_obs = tf.concat([local_state_count, local_demand_count, time_period], axis=1)
feature = next_local_obs
for h in self.hidden_sizes:
feature = tf.layers.dense(feature,h)
if self.layer_norm:
feature = tc.layers.layer_norm(feature, center=True, scale=True)
if self.batch_norm:
feature = tc.layers.batch_norm(feature)
feature = self.hidden_nonlinearity(feature)
future_trip_weight = tf.layers.dense(feature, 1,
kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))[:,0]
future_trip_weights.append(future_trip_weight)
future_served_demand = tf.minimum(next_state_count[:,i], d[:,i])
future_val = tf.multiply(future_served_demand, future_trip_weight)
future_vals.append(future_val)
output.append(future_val)
future_vals = tf.stack(future_vals, axis=1)
future_trip_weights = tf.stack(future_trip_weights, axis=1)
output = tf.stack(output, axis=1)
# time
# output = append(obs[:, 0, :self.H])
x = tf.reduce_sum(output, axis=1)
return {'symbolic_val':x, 'V_norm':V_norm, 'customer_flow':customer_flow, 'immediateRewards':immediateRewards, 'cost':cost, 'd':d, 'future_trip_weights':future_trip_weights, 'trip_weights':trip_weights, 'next_vals':future_vals}
class TaxiCollectiveCritic(Model):
def __init__(self, nb_actions, name='collective_critic', layer_norm=True, relu_output = True, hidden_sizes = (), hidden_nonlinearity = tf.nn.relu):
super(TaxiCollectiveCritic, self).__init__(name=name)
self.layer_norm = layer_norm
self.nb_actions = nb_actions
# self.actionNum = np.sum(nb_actions)
self.relu_output = relu_output
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.zoneNum = 81
self.H = 48
def __call__(self, obs, action_count, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
inputLayer = obs
output = []
# time
output.append(obs[:, 0, :self.H])
next_state_count = []
normalizedDenseCounter = 0
customer_flow = normalizedDense(obs[:, :, self.H + 1], self.zoneNum, counter = normalizedDenseCounter)
state_count = obs[:, :, self.H]
demand_count = obs[:, :, self.H + 1]
normalizedDenseCounter +=1
#payment
immediateRewards = []
for i in range(len(self.nb_actions)):
x = tf.reduce_sum(action_count[:,:,i], axis=1)
next_state_count.append(x - demand_count[:,i] + customer_flow[:,i])
immediateRewards.append(tf.reduce_min(tf.stack([x, demand_count[:,i]], axis=1), axis=1))
immediateRewards = tf.stack(immediateRewards, axis=1)
output.append(immediateRewards)
#cost
flatten_action_count = tf.reshape(action_count, shape=[-1, (action_count.shape[1]*action_count.shape[2]).value])
output.append(flatten_action_count)
#future reward
next_state_count = tf.stack(next_state_count, axis=1)
x = normalizedDense(next_state_count, self.zoneNum, counter = normalizedDenseCounter)
normalizedDenseCounter += 1
d = tf.get_variable("d", [self.zoneNum], trainable=True,
initializer=tf.random_uniform_initializer(minval=0, maxval=1.0))
x = tf.minimum(x, d)
output.append(x)
output = tf.concat(output, axis=1)
x = tf.layers.dense(output, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-5, maxval=3e-5))
return x
class DecActorGrid(Model):
def __init__(self, nb_actions, name='dec_collective_actor', layer_norm=True, batch_norm=True, hidden_sizes=(),
hidden_nonlinearity=tf.nn.relu, adjacent_list=[] , stateNum = 0, N = 1):
super(DecActorGrid, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
self.hidden_sizes = hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.adjacent_list = np.zeros((len(adjacent_list),len(adjacent_list)), dtype=np.float32)
for i in range(len(adjacent_list)):
for j in adjacent_list[i]:
self.adjacent_list[i,j] = 1
self.stateNum = stateNum
self.N = float(N)
# self.obs_mask = np.zeros((81, 48 + 81*2))
# for i in range(81):
# self.obs_mask[i,:48] = 1
# self.obs_mask[i,48:48+81] = self.adjacent_array[i]
# self.obs_mask[i,48 + 81:] = self.adjacent_array[i]
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
output = []
i = 0
local_state = tf.one_hot(tf.cast(obs[:, i + self.stateNum * 2],tf.int32), self.stateNum)
obs_masks = tf.matmul(local_state, self.adjacent_list)
local_obs = tf.concat([obs[:, :self.stateNum]*obs_masks, obs[:, self.stateNum:self.stateNum*2]*obs_masks, local_state], axis=1)
x = local_obs
hidden = 0
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size, name = 'hidden_layer_' + str(hidden)) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True, scope = 'layer_norm_' + str(hidden))
if self.batch_norm:
x = tc.layers.batch_norm(x, name = 'batch_norm_' + str(hidden))
x = self.hidden_nonlinearity(x)
hidden += 1
x = tf.layers.dense(x, int(self.nb_actions[i]), name = 'hidden_layer_' + str(hidden))
x = tf.nn.softmax(x)
output.append(x)
for i in range(1, int(self.N)):
local_state = tf.one_hot(tf.cast(obs[:, i + self.stateNum * 2],tf.int32), self.stateNum)
local_obs = tf.concat([obs[:, :self.stateNum * 2], local_state], axis=1)
x = local_obs
hidden = 0
for hidden_size in self.hidden_sizes:
x = tf.layers.dense(x,
hidden_size, name='hidden_layer_' + str(
hidden), reuse=True) # , kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True, scope='layer_norm_' + str(hidden), reuse=True)
if self.batch_norm:
x = tc.layers.batch_norm(x, name='batch_norm_' + str(hidden), reuse=True)
x = self.hidden_nonlinearity(x)
hidden += 1
x = tf.layers.dense(x, int(self.nb_actions[i]), name='hidden_layer_' + str(hidden), reuse=True)
x = tf.nn.softmax(x)
output.append(x)
x = tf.stack(output, axis=1)
# local_obs_joint = tf.stack(local_obs_joint, axis=1)
return x
def normalizedDense(x, num_units, nonlinearity=None, initializer = tf.random_normal_initializer(0, 0.05), counter = 0):
''' fully connected layer '''
initializer = tf.truncated_normal_initializer(mean=1.0 / 9.0, stddev=1.0 / 90.0, dtype=tf.float32)
V = tf.get_variable('V_' +str(counter), [int(x.get_shape()[1]),num_units], tf.float32, initializer, trainable=True)
# with ops.name_scope(None, "softmax_normalize", [V]) as name:
# V = ops.convert_to_tensor(V, name="x")
V_norm = tf.nn.softmax(V, dim=1)
return tf.matmul(x, V_norm) |
<filename>storops_test/unity/resource/test_pool.py<gh_stars>10-100
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from unittest import TestCase
import ddt
import mock
from hamcrest import assert_that, equal_to, instance_of, raises
from storops.exception import UnityLunNameInUseError, JobStateError, \
UnityPoolNameInUseError
from storops.unity.enums import RaidTypeEnum, FastVPStatusEnum, \
FastVPRelocationRateEnum, PoolDataRelocationTypeEnum, \
RaidStripeWidthEnum, TierTypeEnum, PoolUnitTypeEnum, \
FSSupportedProtocolEnum, TieringPolicyEnum, JobStateEnum, \
StoragePoolTypeEnum, ESXFilesystemMajorVersionEnum, \
ESXFilesystemBlockSizeEnum, NFSShareDefaultAccessEnum, \
NFSShareSecurityEnum
from storops.unity.resource.disk import UnityDiskGroup, UnityDisk
from storops.unity.resource.lun import UnityLun
from storops.unity.resource.nas_server import UnityNasServer
from storops.unity.resource.pool import UnityPool, UnityPoolList, \
RaidGroupParameter
from storops.unity.resource.snap_schedule import UnitySnapSchedule
from storops.unity.resource.sp import UnityStorageProcessor
from storops.unity.resource.system import UnitySystem
from storops_test.unity.rest_mock import t_rest, patch_rest
__author__ = '<NAME>'
@ddt.ddt
class UnityPoolTest(TestCase):
@patch_rest
def test_properties(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
self.verify_pool_1(pool)
@staticmethod
def verify_pool_1(pool):
assert_that(pool.id, equal_to('pool_1'))
assert_that(pool.raid_type, equal_to(RaidTypeEnum.MIXED))
assert_that(pool.name, equal_to('perfpool1130'))
assert_that(pool.description, equal_to('pp'))
assert_that(pool.size_free, equal_to(9160359936000))
assert_that(pool.size_total, equal_to(9251627991040))
assert_that(pool.size_used, equal_to(91268055040))
assert_that(pool.size_subscribed, equal_to(1392106274816))
assert_that(pool.alert_threshold, equal_to(70))
assert_that(pool.pool_space_harvest_high_threshold, equal_to(95.0))
assert_that(pool.pool_space_harvest_low_threshold, equal_to(85.0))
assert_that(pool.snap_space_harvest_high_threshold, equal_to(25.0))
assert_that(pool.snap_space_harvest_low_threshold, equal_to(20.0))
assert_that(pool.is_fast_cache_enabled, equal_to(False))
assert_that(str(pool.creation_time),
equal_to('2016-02-29 07:34:23+00:00'))
assert_that(pool.is_empty, equal_to(False))
assert_that(pool.is_harvest_enabled, equal_to(True))
assert_that(pool.is_snap_harvest_enabled, equal_to(False))
assert_that(pool.metadata_size_subscribed, equal_to(59324235776))
assert_that(pool.snap_size_subscribed, equal_to(873220538368))
assert_that(pool.metadata_size_used, equal_to(36775657472))
assert_that(pool.snap_size_used, equal_to(24452407296))
assert_that(pool.is_all_flash, equal_to(False))
assert_that(pool.pool_type, equal_to(StoragePoolTypeEnum.TRADITIONAL))
tiers = pool.tiers
assert_that(len(tiers), equal_to(3))
for tier in tiers:
assert_that(tier._cli, equal_to(pool._cli))
assert_that(pool.pool_fast_vp._cli, equal_to(pool._cli))
@patch_rest
def test_pool_fast_vp_properties(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
fast = pool.pool_fast_vp
assert_that(fast.status, equal_to(FastVPStatusEnum.ACTIVE))
assert_that(fast.relocation_rate,
equal_to(FastVPRelocationRateEnum.MEDIUM))
assert_that(fast.type, equal_to(PoolDataRelocationTypeEnum.SCHEDULED))
assert_that(fast.is_schedule_enabled, equal_to(True))
assert_that(str(fast.relocation_duration_estimate),
equal_to('0:00:00'))
assert_that(fast.size_moving_down, equal_to(0))
assert_that(fast.size_moving_up, equal_to(0))
assert_that(fast.size_moving_within, equal_to(0))
assert_that(fast.percent_complete, equal_to(0))
assert_that(fast.data_relocated, equal_to(0))
assert_that(str(fast.last_start_time),
equal_to('2016-03-13 22:00:00+00:00'))
assert_that(str(fast.last_end_time),
equal_to('2016-03-14 06:00:00+00:00'))
@patch_rest
def test_tier_properties(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
tier = next(t for t in pool.tiers if t.name == 'Performance')
assert_that(tier.raid_type, equal_to(RaidTypeEnum.RAID5))
assert_that(tier.stripe_width, equal_to(RaidStripeWidthEnum._5))
assert_that(tier.tier_type, equal_to(TierTypeEnum.PERFORMANCE))
assert_that(tier.size_total, equal_to(1180847570944))
assert_that(tier.size_used, equal_to(3489660928))
assert_that(tier.size_free, equal_to(1177357910016))
assert_that(tier.size_moving_down, equal_to(0))
assert_that(tier.size_moving_up, equal_to(0))
assert_that(tier.size_moving_within, equal_to(0))
assert_that(tier.disk_count, equal_to(5))
@patch_rest
def test_get_all(self):
pools = UnityPoolList(cli=t_rest())
assert_that(len(pools), equal_to(2))
pool = next(pool for pool in pools if pool.id == 'pool_1')
self.verify_pool_1(pool)
@patch_rest
def test_disk_groups(self):
cli = t_rest()
pool0 = UnityPool.get(cli=cli, _id='pool_1')
disk_groups = pool0.disk_groups
assert_that(disk_groups, instance_of(dict))
assert_that(len(disk_groups), equal_to(2))
assert_that(disk_groups['dg_8'], instance_of(list))
assert_that(disk_groups['dg_15'], instance_of(list))
for key in disk_groups:
for disk in disk_groups[key]:
assert_that(disk, instance_of(UnityDisk))
@patch_rest
def test_get_nested_resource_properties(self):
pools = UnityPoolList(cli=t_rest())
pool = next(pool for pool in pools if pool.id == 'pool_1')
tier = next(t for t in pool.tiers if t.name == 'Performance')
unit = next(u for u in tier.pool_units if u.id == 'rg_2')
assert_that(unit.type, equal_to(PoolUnitTypeEnum.RAID_GROUP))
assert_that(unit.tier_type, equal_to(TierTypeEnum.PERFORMANCE))
assert_that(unit.name, equal_to("RAID5, #2, pool:perfpool1130"))
assert_that(unit.description, equal_to('123'))
assert_that(unit.wwn, equal_to(
'06:00:00:00:05:00:00:00:01:00:00:00:00:00:00:64'))
assert_that(unit.size_total, equal_to(1181501882368))
assert_that(unit.pool, instance_of(UnityPool))
@patch_rest
def test_get_nested_resource_filter_by_non_id(self):
pools = UnityPoolList(cli=t_rest())
pool = next(pool for pool in pools if pool.id == 'pool_1')
tier = next(t for t in pool.tiers if t.name == 'Performance')
unit = next(u for u in tier.pool_units if u.description == '123')
assert_that(unit.id, equal_to('rg_2'))
@patch_rest
def test_create_pool(self):
cli = t_rest()
disk_group = UnityDiskGroup.get(cli=cli, _id='dg_15')
raid_group_0 = RaidGroupParameter(
disk_group=disk_group,
disk_num=3, raid_type=RaidTypeEnum.RAID5,
stripe_width=RaidStripeWidthEnum.BEST_FIT)
raid_groups = [raid_group_0]
pool = UnityPool.create(
cli=cli, name='test_pool', description='Unity test pool.',
raid_groups=raid_groups, alert_threshold=15,
is_harvest_enabled=True, is_snap_harvest_enabled=True,
pool_harvest_high_threshold=80, pool_harvest_low_threshold=40,
snap_harvest_high_threshold=80, snap_harvest_low_threshold=40,
is_fast_cache_enabled=True, is_fastvp_enabled=True,
pool_type=StoragePoolTypeEnum.DYNAMIC)
assert_that(pool.id, equal_to('pool_4'))
assert_that(pool.pool_type, equal_to(StoragePoolTypeEnum.DYNAMIC))
assert_that(pool.is_all_flash, equal_to(False))
@patch_rest
def test_create_pool_name_in_use(self):
cli = t_rest()
raid_group_0 = RaidGroupParameter(
disk_group='dg_15',
disk_num=3, raid_type=RaidTypeEnum.RAID5,
stripe_width=RaidStripeWidthEnum.BEST_FIT)
raid_groups = [raid_group_0]
def _inner():
UnityPool.create(
cli=cli, name='duplicate_pool',
description='Unity test pool.',
raid_groups=raid_groups)
assert_that(_inner, raises(UnityPoolNameInUseError))
@patch_rest
def test_extend_pool(self):
cli = t_rest()
raid_group_0 = RaidGroupParameter(
disk_group='dg_8',
disk_num=4,
raid_type=RaidTypeEnum.RAID10,
stripe_width=RaidStripeWidthEnum.BEST_FIT)
raid_groups = [raid_group_0]
pool0 = UnityPool.get(cli=cli, _id='pool_1')
resp = pool0.modify(raid_groups=raid_groups)
assert_that(resp.is_ok(), equal_to(True))
@patch_rest
def test_modify_pool(self):
cli = t_rest()
pool0 = UnityPool.get(cli=cli, _id='pool_30')
resp = pool0.modify(name="new_name",
is_fastvp_enabled=True,
alert_threshold=80)
assert_that(resp.is_ok(), equal_to(True))
@patch_rest
def test_delete_pool(self):
cli = t_rest()
pool = UnityPool.get(cli=cli, _id='pool_4')
resp = pool.delete()
assert_that(resp.is_ok(), equal_to(True))
@patch_rest
def test_create_filesystem_success(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
fs = pool.create_filesystem(
'nas_2', 'fs3', 3 * 1024 ** 3,
proto=FSSupportedProtocolEnum.CIFS,
tiering_policy=TieringPolicyEnum.AUTOTIER_HIGH)
assert_that(fs.get_id(), equal_to('fs_12'))
@patch_rest
def test_create_lun(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
lun = pool.create_lun("LunName", 100)
assert_that(lun, instance_of(UnityLun))
@patch_rest
def test_create_lun_with_same_name(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
def f():
pool.create_lun("openstack_lun")
assert_that(f, raises(UnityLunNameInUseError))
@patch_rest
def test_create_lun_on_spb(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
sp = UnityStorageProcessor(_id='spb', cli=t_rest())
lun = pool.create_lun("LunName", 100, sp=sp)
assert_that(lun, instance_of(UnityLun))
@patch_rest
def test_create_lun_with_muitl_property(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
lun = pool.create_lun("LunName", 100,
description="Hello World", is_thin=True,
is_repl_dst=True,
tiering_policy=TieringPolicyEnum.AUTOTIER_HIGH)
assert_that(lun, instance_of(UnityLun))
@patch_rest
def test_create_lun_with_compression_enabled_v4_2(self):
pool = UnityPool(_id='pool_1', cli=t_rest(version='4.2'))
lun = pool.create_lun("LunName", 100,
is_compression=True)
assert_that(lun, instance_of(UnityLun))
@patch_rest
def test_create_lun_with_compression_enabled(self):
pool = UnityPool(_id='pool_1', cli=t_rest(version='4.3'))
lun = pool.create_lun("LunName", 100,
is_compression=True)
assert_that(lun, instance_of(UnityLun))
@patch_rest
def test_create_lun_with_dedup_enabled(self):
pool = UnityPool(_id='pool_1', cli=t_rest(version='5.0'))
lun = pool.create_lun("LunName", 100,
is_compression=True,
is_advanced_dedup_enabled=True)
assert_that(lun, instance_of(UnityLun))
@patch_rest
def test_create_vmfs(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
vmfs = pool.create_vmfs(vmfs_name="VMFS datastore", size_gb=100)
assert_that(vmfs, instance_of(UnityLun))
@patch_rest
def test_create_vmfs_vmware_iscsi_paramters(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
vmfs = pool.create_vmfs(
vmfs_name="VMFS datastore 2", size_gb=100,
major_version=ESXFilesystemMajorVersionEnum.VMFS_5,
block_size=ESXFilesystemBlockSizeEnum._2MB)
assert_that(vmfs, instance_of(UnityLun))
@patch_rest
def test_create_nfs_share_success(self):
pool = UnityPool(_id='pool_5', cli=t_rest())
nas_server = UnityNasServer.get(cli=t_rest(), _id='nas_6')
job = pool.create_nfs_share(
nas_server,
name='513dd8b0-2c22-4da0-888e-494d320303b6',
size=4294967296)
assert_that(JobStateEnum.COMPLETED, equal_to(job.state))
@patch_rest
def test_create_nfs_share_success_all_params(self):
pool = UnityPool(_id='pool_5', cli=t_rest())
nas_server = UnityNasServer.get(cli=t_rest(), _id='nas_6')
size = 3 * 1024 ** 3
job = pool.create_nfs_share(
nas_server,
name='513dd8b0-2c22-4da0-888e-494d320303b7',
size=size, is_thin=True,
tiering_policy=TieringPolicyEnum.AUTOTIER_HIGH,
default_access=NFSShareDefaultAccessEnum.READ_WRITE,
min_security=NFSShareSecurityEnum.KERBEROS,
no_access_hosts_string='Host_1',
read_only_hosts_string='Host_2',
read_write_hosts_string='Host_3',
read_only_root_hosts_string='Host_5,Host_4',
root_access_hosts_string='Host_6',
anonymous_uid=10001,
anonymous_gid=10002,
export_option=20001)
assert_that(JobStateEnum.COMPLETED, equal_to(job.state))
@patch_rest
def test_create_nfs_share_failed(self):
def f():
pool = UnityPool(_id='pool_1', cli=t_rest())
nas_server = UnityNasServer.get(cli=t_rest(), _id='nas_1')
pool.create_nfs_share(
nas_server,
name='job_share_failed',
size=1)
assert_that(f, raises(JobStateError, 'too small'))
@patch_rest
def test_create_lun_with_snap_schedule(self):
pool = UnityPool(_id='pool_1', cli=t_rest())
schedule = UnitySnapSchedule(_id='snapSch_1', cli=t_rest())
lun = pool.create_lun(
lun_name='lun-with-snap-schedule',
snap_schedule=schedule)
assert_that(lun.get_id(), equal_to('sv_16455'))
@patch_rest
@ddt.data(
{'unity_version': '4.5.0', 'pool_id': 'pool_1', 'is_all_flash': True,
'expected': True},
{'unity_version': '4.5.1', 'pool_id': 'pool_2', 'is_all_flash': False,
'expected': False},
{'unity_version': '5.0.3', 'pool_id': 'pool_1', 'is_all_flash': True,
'expected': True},
{'unity_version': '5.0.3', 'pool_id': 'pool_2', 'is_all_flash': False,
'expected': False}
)
@ddt.unpack
def test_is_compression_supported(self, unity_version, pool_id,
is_all_flash, expected):
cli = t_rest(unity_version)
pool = UnityPool(_id=pool_id, cli=cli)
pool.is_all_flash = is_all_flash
assert_that(pool.is_compression_supported(), equal_to(expected))
@patch_rest
@ddt.data(
{'unity_version': '4.3.0', 'unity_model': 'Unity 500',
'pool_id': 'pool_1', 'is_all_flash': True, 'expected': False},
{'unity_version': '4.3.1', 'unity_model': 'Unity 650F',
'pool_id': 'pool_1', 'is_all_flash': True, 'expected': False},
{'unity_version': '4.5.0', 'unity_model': 'Unity 500',
'pool_id': 'pool_1', 'is_all_flash': True, 'expected': False},
{'unity_version': '4.5.0', 'unity_model': 'Unity 500',
'pool_id': 'pool_2', 'is_all_flash': False, 'expected': False},
{'unity_version': '4.5.1', 'unity_model': 'Unity 650F',
'pool_id': 'pool_1', 'is_all_flash': True, 'expected': True},
{'unity_version': '4.5.1', 'unity_model': 'Unity 650F',
'pool_id': 'pool_2', 'is_all_flash': False, 'expected': False},
{'unity_version': '4.5.2', 'unity_model': 'Unity 640F',
'pool_id': 'pool_2', 'is_all_flash': True, 'expected': False},
{'unity_version': '4.5.2', 'unity_model': 'Unity 660F',
'pool_id': 'pool_2', 'is_all_flash': True, 'expected': True},
{'unity_version': '5.0.0', 'unity_model': 'Unity 500',
'pool_id': 'pool_1', 'is_all_flash': True, 'expected': False},
{'unity_version': '5.0.0', 'unity_model': 'Unity 500',
'pool_id': 'pool_2', 'is_all_flash': False, 'expected': False},
{'unity_version': '5.0.1', 'unity_model': 'Unity 480',
'pool_id': 'pool_1', 'is_all_flash': True, 'expected': True},
{'unity_version': '5.0.1', 'unity_model': 'Unity 480',
'pool_id': 'pool_2', 'is_all_flash': False, 'expected': False},
{'unity_version': '5.0.1', 'unity_model': 'Unity 470',
'pool_id': 'pool_1', 'is_all_flash': True, 'expected': False},
{'unity_version': '5.0.1', 'unity_model': 'Unity 490',
'pool_id': 'pool_1', 'is_all_flash': True, 'expected': True},
{'unity_version': '5.0.3', 'unity_model': 'Unity 880F',
'pool_id': 'pool_1', 'is_all_flash': True, 'expected': True},
{'unity_version': '5.0.3', 'unity_model': 'Unity 880F',
'pool_id': 'pool_2', 'is_all_flash': False, 'expected': False},
)
@ddt.unpack
def test_is_advanced_dedup_supported(self, unity_version, unity_model,
pool_id, is_all_flash, expected):
with mock.patch.object(UnitySystem, 'model', create=True,
return_value=unity_model,
new_callable=mock.PropertyMock):
cli = t_rest(unity_version)
pool = UnityPool(_id=pool_id, cli=cli)
pool.is_all_flash = is_all_flash
assert_that(pool.is_advanced_dedup_supported(), equal_to(expected))
|
<filename>supplementary_lessons/A_Group/KSEOP/1018_customer_func.py<gh_stars>0
import pymysql as my
class DBMgr:
def __init__(self):
self.initDB()
def initDB(self):
self.conn = my.connect(
host = 'localhost',
user='root',
password='<PASSWORD>',
db='pythondb',
charset='utf8',
cursorclass=my.cursors.DictCursor
)
return self.conn
def freeDB(self):
if self.conn:
self.conn.close()
def insert_data(self):
while True:
print('입력창 진입')
# while True:
name = input('이름을 입력하세요')
age = input('나이를 입력하세요')
gender = input('성별을 입력하세요')
email = input('이메일을 입력하세요')
###############################################################################
# if type(name) != str:
# print('이름은 문자만 입력하여 주십시오')
# continue
# elif type(age) != int:
# print('나이는 숫자만 입력하여 주십시오')
# continue
# elif gender != 'M' or 'F':
# print('성별은 M 또는 F 만 입력하여 주십시오')
# continue
# elif '@' and '.' not in email:
# print('이메일 형식이 잘못되었습니다..')
# continue
# break
##############################################################################
try:
self.initDB()
# 3. 쿼리 획득 및 수행
with self.conn.cursor() as cursor:
sql ='''
insert into customer_table(name, age, gender, email)
values( %s, %s, %s, %s );
'''
cursor.execute(sql, (name,age,gender,email))
# 최종 디비에 반영하기 위해 커밋 진행.
self.conn.commit()
# 결과 > Affected rows 획득. fetch계열없음.
result = self.conn.affected_rows()
except Exception as e:
result = 0
print('에러 ->', e)
str(e)
break
if self.conn: # None 도 그 자체 불린은 false임.
self.conn.close()
#결과 리턴 : 튜플로 리턴 -> 리턴할 내용을 열거하면 된다.
return print('삽입 종료')
def select_all_data(self):
rows = None # 쿼리 결과.
try:
self.initDB()
# 3. 쿼리 획득 및 수행
with self.conn.cursor() as cursor:
# 3-2. sql 준비
sql ='''
select * from customer_table ;
'''
# 3-3. 쿼리 수행
cursor.execute(sql) #sql이 적힌건 sql만 인자로 받기떄문
# 3-4. 결과 처리 및 커서 닫기
rows = cursor.fetchall() # 얘가 출력의 본질임.
except Exception as e:
rows = None
print('에러 ->', e)
if self.conn: # None 도 그 자체 불린은 false임.
self.conn.close()
#결과 리턴
page_num = 0
print(rows[page_num])
while True:
##첫번째 고객정보를 보여준 후 input을 보여줘야한다 첫번째 고객에서는 D가 없어야한다. 마지막 고객에서는 P가 없어야한다
#DB에서 첫번째 고객정보를 받아온다.
select_UpDown = input('다음 고객 정보를 조회하려면 N키를, 이전 고객을 조회하시려면 P키를, 조회를 종료하려면 Q키를 입력해주세요 :')
if select_UpDown =='P':
if page_num == 0 :
print('첫번째 고객 정보입니다.'.format(page_num+1))
print(rows[page_num])
else:
page_num -= 1
print('{}번째 고객 정보입니다.'.format(page_num+1))
print(rows[page_num])
continue
if select_UpDown =='N':
if page_num == len(rows)-1:
print('마지막 고객정보 입니다.')
print(rows[page_num])
else:
page_num += 1
print('{}번째 고객 정보입니다.'.format(page_num+1))
print(rows[page_num])
continue
elif select_UpDown =='Q':
break
else:
print('P와 D, Q중 정확하게 입력하여 주세요')
continue
return None
def update_data(self):
while True:
name = input('수정할 고객의 이름을 입력하시오')
age = input('수정할 고객의 나이를 다시 입력하시오')
birth = input('수정할 생년월일을 다시 입력하시오')############없으면 pass -> while True : break로 가자
try:
self.initDB()
# 3. 쿼리 획득 및 수행
with self.conn.cursor() as cursor:
sql ='''
update customer_table set age =%s, birth =%s where name=%s
'''
# 3-3. 쿼리 수행
cursor.execute(sql, (age, birth, name,))
# 최종 디비에 반영하기 위해 커밋 진행.
self.conn.commit()
# 결과 > Affected rows 획득. fetch계열없음.
result = self.conn.affected_rows()
except Exception as e:
result = None
print('에러 ->', e)
if self.conn: # None 도 그 자체 불린은 false임.
self.conn.close()
#결과 리턴 : 튜플로 리턴 -> 리턴할 내용을 열거하면 된다.
return result
def del_data(self): #########고객 정보를 잘못입력할 경우에 대한 예외 처리가 없음
name = input('삭제할 고객의 이름을 입력하세요')
rows = None # 쿼리 결과.
try:
self.initDB()
# 3. 쿼리 획득 및 수행
with self.conn.cursor() as cursor:
# 3-2. sql 준비
sql ='''
delete from customer_table where name = %s
'''
# 3-3. 쿼리 수행
cursor.execute(sql, (name,)) #sql이 적힌건 sql만 인자로 받기떄문
# 3-4. 결과 처리 및 커서 닫기
rows = cursor.fetchall() # 얘가 출력의 본질임.
self.conn.commit()
except Exception as e:
rows = None
print('에러 ->', e)
if self.conn: # None 도 그 자체 불린은 false임.
self.conn.close()
#결과 리턴
print(rows)
return rows
# print('삭제완료')######################################
|
<reponame>Beracah-Group/docker-microservices<filename>services/denting/src/api/v1/denting.py
# import modules, models and configs
from datetime import datetime, timedelta
import re
import jwt
from flask import jsonify, request, abort
# jsonify converts objects to JSON strings
# abort method either accepts an error code or it can accept a Response object
from src.api.__init__ import app, databases
from src.api.v1.models import Denting
from flask import render_template
databases.create_all()
'''
201 ok resulting to creation of something
200 ok
400 bad request
404 not found
401 unauthorized
409 conflict
'''
'''
(UTF) Unicode Transformation Format
its a character encoding
A character in UTF8 can be from 1 to 4 bytes long
UTF-8 is backwards compatible with ASCII
is the preferred encoding for e-mail and web pages
'''
# 404 error handler
@app.errorhandler(404)
def page_not_found(e):
response = jsonify({'error': 'The request can not be linked to, Please check your endpoint url'})
response.status_code = 404
return response
# 405 error handler
@app.errorhandler(405)
def method_not_allowed(e):
response = jsonify({'error': 'Invalid request method. Please check the request method being used'})
response.status_code = 405
return response
# 401 error handler
@app.errorhandler(401)
def internal_server_error(e):
response = jsonify({"error": "Token is invalid"})
response.status_code = 401
return response
# 500 error handler
@app.errorhandler(500)
def internal_server_error(e):
response = jsonify({'error': 'Error, Server currently down, please restart the server to use the Denting API'})
response.status_code = 500
return response
@app.route('/')
def homepage():
""" The homepage route
:return: A welcome message
"""
return render_template('index.html')
# add denting type method
@app.route('/denting/api/v1/dentingpackage', methods=['POST'])
def add_denting_method():
request.get_json(force=True)
try:
# verification = verify_token(request)
# if isinstance(verification, dict):
# user_id = verification['user_id']
# else:
# return verification
d_package = request.json.get('package')
d_price = request.json.get('price')
d_description = request.json.get('description')
if not d_package:
response = jsonify({'Error': 'denting package has no name'})
response.status_code = 400
return response
if not d_price:
response = jsonify({'Error': 'denting package has no price tag'})
response.status_code = 400
return response
if not d_description:
response = jsonify({'Error': 'denting package has no description'})
response.status_code = 400
return response
res = Denting.query.all()
data_check = [data for data in res if data.package == d_package]
if data_check:
response = jsonify({'Warning': 'this denting package already exists.'})
response.status_code = 409
return response
else:
d = Denting(package=d_package, price=d_price, description=d_description)
d.save()
response = jsonify({'status': 'denting package added successfully'})
response.status_code = 201
return response
except KeyError:
response = jsonify({'Error': 'Use the name for dict key.'})
response.status_code = 500
return response
# get denting package
@app.route('/denting/api/v1/dentingpackage', methods=['GET'])
def retrieve_denting_method():
message = 'No denting packages have been added yet'
# payload = verify_token(request)
# if isinstance(payload, dict):
# user_id = payload['user_id']
# else:
# return payload
limit = int(request.args.get("limit", 3))
if limit > 100:
limit = 100
respons = Denting.query.all()
if not respons:
response = jsonify({'error': 'No denting package has been created yet'})
response.status_code = 200
return response
else:
search = request.args.get("q", "")
if search:
res = [dent for dent in respons if dent.package in search]
if not res:
response = jsonify({'error': 'The denting package you searched does not exist'})
return response
else:
denting_data = []
for data in res:
final = {
'id': data.id,
'package': data.package,
'price': data.price,
'description': data.description,
'date-created': data.date_created,
'date_modified': data.date_modified,
}
denting_data.clear()
denting_data.append(final)
response = jsonify(denting_data)
response.status_code = 200
return response
else:
res = [dent for dent in respons]
denting_data = []
if not res:
response = jsonify({'error': message})
response.status_code = 200
return response
else:
for data in res:
final = {
'id': data.id,
'package': data.package,
'price': data.price,
'description': data.description,
'date-created': data.date_created,
'date_modified': data.date_modified,
}
denting_data.append(final)
response = jsonify(denting_data)
response.status_code = 200
return response
# get, update and delete denting package
@app.route('/denting/api/v1/dentingpackage/<int:dent_id>', methods=['GET', 'PUT', 'DELETE'])
def denting_by_id(dent_id):
# payload = verify_token(request)
# if isinstance(payload, dict):
# user_id = payload['user_id']
# else:
# return payload
res = Denting.query.all()
denting_data = [dent for dent in res if dent.id == dent_id]
if request.method == 'GET':
data = {}
for data in denting_data:
data = {
'id': data.id,
'package': data.package,
'price': data.price,
'description': data.description,
'date-created': data.date_created,
'date_modified': data.date_modified,
}
if dent_id not in data.values():
response = jsonify({'warning': 'the denting package does not exist.'})
response.status_code = 404
return response
else:
response = jsonify(data)
response.status_code = 200
return response
elif request.method == 'DELETE':
data = {}
for data in denting_data:
data = {
'id': data.id,
'package': data.package,
'price': data.price,
'description': data.description,
'date-created': data.date_created,
'date_modified': data.date_modified,
}
if dent_id not in data.values():
response = jsonify({'warning': 'the denting package does not exist.'})
response.status_code = 404
return response
else:
delete = Denting.query.filter_by(id=dent_id).first()
databases.session.delete(delete)
databases.session.commit()
response = jsonify({'Status': 'Denting package deleted successfully.'})
response.status_code = 200
return response
elif request.method == 'PUT':
request.get_json(force=True)
data = Denting.query.filter_by(id=dent_id).first()
if not data:
response = jsonify({'warning': 'the denting package does not exist.'})
response.status_code = 404
return response
else:
try:
package = request.json['package']
data.package = package
databases.session.commit()
data = {}
for data in denting_data:
data = {
'id': data.id,
'package': data.package,
'price': data.price,
'description': data.description,
'date-created': data.date_created,
'date_modified': data.date_modified
}
response = jsonify(data)
response.status_code = 201
return response
except KeyError:
response = jsonify({'error': 'Please use name for dict keys.'})
response.status_code = 500
return response |
<gh_stars>1000+
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for makani.analysis.aero.hover_model.hover_model."""
import copy
import unittest
from makani.analysis.aero.hover_model import hover_model
import numpy as np
class HoverModelTest(unittest.TestCase):
PARAMS = hover_model.GetParams('M600a', '04Hover')
def testGetPanelSamplingPoints(self):
# Check that all combinations of x, y, and z appear in the
# sampling points for a panel aligned with body coordinates.
panel = {
'pos_b': [0.0, 0.0, 0.0],
'chord': 1.0,
'span': 2.0,
'dcm_b2s': np.eye(3)
}
panel_points_b = hover_model._GetPanelSamplingPoints(
panel, thickness_ratio=0.5, num_points=(3, 2, 2))
for x in (-0.75, -0.25, 0.25):
for y in (-1.0, 1.0):
for z in (-0.25, 0.25):
found = False
for v in panel_points_b:
if (abs(v[0] - x) < 1e-9 and abs(v[1] - y) < 1e-9
and abs(v[2] - z) < 1e-9):
found = True
self.assertTrue(found)
# Check that all combinations of x, y, and z appear in the
# sampling points for a vertical panel.
panel = {
'pos_b': [1.0, 2.0, 3.0],
'chord': 1.0,
'span': 2.0,
'dcm_b2s': np.array([[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
}
panel_points_b = hover_model._GetPanelSamplingPoints(
panel, thickness_ratio=0.5, num_points=(3, 2, 2))
for x in (-0.75, -0.25, 0.25):
for y in (-0.25, 0.25):
for z in (-1.0, 1.0):
found = False
for v in panel_points_b:
if (abs(v[0] - (x + panel['pos_b'][0])) < 1e-9
and abs(v[1] - (y + panel['pos_b'][1])) < 1e-9
and abs(v[2] - (z + panel['pos_b'][2])) < 1e-9):
found = True
self.assertTrue(found)
def testCalcLocalApparentWind(self):
# Check simple stationary case without propwash.
apparent_wind_b = hover_model._CalcLocalApparentWind(
[0.0, 10.0, 0.0], [0.0, 0.0, 0.0], 5.0, np.array([[np.pi / 2.0]]),
np.array([[0.0]]), True, self.PARAMS)
self.assertAlmostEqual(apparent_wind_b[0, 0][0], 0.0)
self.assertAlmostEqual(apparent_wind_b[0, 0][1], 0.0)
self.assertAlmostEqual(apparent_wind_b[0, 0][2], -5.0)
# Check simple rotating case without propwash.
apparent_wind_b = hover_model._CalcLocalApparentWind(
[0.0, 10.0, 0.0], [2.0, 0.0, 0.0], 0.0, np.array([[0.0]]),
np.array([[0.0]]), True, self.PARAMS)
self.assertAlmostEqual(apparent_wind_b[0, 0][0], 0.0)
self.assertAlmostEqual(apparent_wind_b[0, 0][1], 0.0)
self.assertAlmostEqual(apparent_wind_b[0, 0][2], -20.0)
# Check simple stationary case with only propwash. Compare the
# velocity to what is expected from basic momentum theory. Note
# that, even though we evaluate this function at a small axial
# position, we use the far downstream velocity because this wake
# model deals with this difference by contracting the radius near
# the source.
no_rotation_params = copy.copy(self.PARAMS)
no_rotation_params['rotor_pitch'] = 0.0
point_b = copy.copy(self.PARAMS['rotors'][0]['pos'])
point_b[0] -= 0.01
apparent_wind_b = hover_model._CalcLocalApparentWind(
point_b, [0.0, 0.0, 0.0], 0.0, np.array([[0.0]]), np.array([[0.0]]),
True, no_rotation_params)
wake_vel = 2.0 * np.sqrt(
self.PARAMS['rotors'][0]['thrust']
/ (2.0 * self.PARAMS['phys']['rho']
* np.pi * self.PARAMS['rotors'][0]['radius']**2.0))
self.assertAlmostEqual(apparent_wind_b[0, 0][0], -wake_vel, delta=1e-2)
self.assertAlmostEqual(apparent_wind_b[0, 0][1], 0.0)
self.assertAlmostEqual(apparent_wind_b[0, 0][2], 0.0)
if __name__ == '__main__':
unittest.main()
|
<reponame>chccc1994/Paddle2ONNX
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import copy
import six
import collections
from paddle2onnx.constant import NodeDomain
class Node(object):
def __init__(self,
op_type,
inputs,
outputs,
attrs,
layer_name,
domain=NodeDomain.RAW):
self.domain = domain
self.type = op_type
self.attrs = attrs
self.layer_name = layer_name
self.set_inputs(inputs)
self.set_outputs(outputs)
def __hash__(self):
return hash(self.layer_name)
def __eq__(self, other):
if self.layer_name == other.layer_name:
return True
return False
def __str__(self):
node_str = ''
attrs = ''
for key, value in self.attrs.items():
attrs += ', ' + key + '=' + str(value)
node_str += " {} = {}::{}(inputs={}{}) \n".format(
self.outputs, self.domain, self.type, self.inputs, attrs)
return node_str
def input(self, idx=None):
if idx is None:
return self.inputs
return self.inputs[idx]
def output(self, idx=None):
if idx is None:
return self.outputs
return self.outputs[idx]
def attr(self, name):
if name in self.attrs:
return self.attrs[name]
return None
def set_inputs(self, inputs):
if isinstance(inputs, list):
self.inputs = [
ipt.layer_name if isinstance(ipt, Node) else ipt
for ipt in inputs
]
elif isinstance(inputs, six.string_types):
self.inputs = [inputs]
elif isinstance(inputs, Node):
self.inputs = [inputs.layer_name]
else:
raise TypeError(
'Inputs of node must be type: list, Node, or String but got {}'.
format(type(inputs)))
def set_outputs(self, outputs):
if isinstance(outputs, list):
self.outputs = [
opt.layer_name if isinstance(opt, Node) else opt
for opt in outputs
]
elif isinstance(outputs, six.string_types):
self.outputs = [outputs]
elif isinstance(ouputs, Node):
self.outputs = [outputs.layer_name]
else:
raise TypeError(
'Outputs of node must be type: list, Node, or String but got {}'.
format(type(outputs)))
class Graph(object):
def __init__(self):
self.parameters = {}
self.node_map = collections.OrderedDict()
self.input_nodes = list()
self.output_nodes = list()
self.op_type_count = dict()
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
if self.id == other.id:
return True
return False
def __str__(self):
graph_str = 'graph { \n'
for node in self.input_nodes:
graph_str += " input: {} \n".format(node.layer_name)
for node in self.output_nodes:
graph_str += " output: {} \n \n".format(node.layer_name)
for name, node in self.node_map.items():
graph_str += node.__str__()
graph_str += ' }'
return graph_str
def set_output_nodes(self, node_list):
if isinstance(node_list, list):
self.output_nodes = node_list
else:
raise TypeError(
'output_nodes of Graph must be type: list, but got {}'.format(
type(node_list)))
def set_node_map(self, node_map):
if isinstance(node_map, dict):
self.node_map = node_map
self.generate_topo_sort()
else:
raise TypeError('node_map of Graph must be type: list, but got {}'.
format(type(node_map)))
def set_input_nodes(self, node_list):
if isinstance(node_list, list):
self.input_nodes = node_list
else:
raise TypeError(
'input_nodes of Graph must be type: list, but got {}'.format(
type(node_list)))
def set_parameters(self, parameters):
if isinstance(parameters, dict):
self.parameters = parameters
else:
raise TypeError(
'parameters of Graph must be type: dict, but got {}'.format(
type(parameters)))
def generate_node_name(self, op_type):
if op_type in self.op_type_count:
self.op_type_count[op_type] += 1
else:
self.op_type_count[op_type] = 1
# layer_name need follow https://github.com/onnx/onnx/blob/master/docs/OpConventions.md
layer_name = op_type + '_' + str(self.op_type_count[op_type] - 1)
return layer_name
def insert_node(self, node):
if node.type not in ['feed', 'fetch']:
self.node_map[node.layer_name] = node
def make_node(self,
op_type,
inputs=None,
outputs=None,
attrs=None,
layer_name=None,
domain=None,
**kw):
if layer_name is None:
layer_name = self.generate_node_name(op_type)
if attrs is None:
attrs = kw
attrs.update(kw)
if inputs is None:
inputs = []
if outputs is None:
outputs = [layer_name]
node = Node(op_type, layer_name, inputs, outputs, attrs, domain)
self.insert_node(node)
return node
def update_node(self,
node,
op_type=None,
inputs=None,
outputs=None,
attrs=None,
block=None,
move_to_end=True,
domain=None,
**kw):
if op_type is not None:
node.type = op_type
if inputs is not None:
node.set_inputs(inputs)
if outputs is not None:
node.set_outputs(outputs)
if attrs is None:
attrs = kw
attrs.update(kw)
node.attrs = attrs
if domain is not None:
node.domain = domain
if move_to_end:
self.node_map.pop(node.layer_name)
self.node_map[node.layer_name] = node
return node
def get_node(self, name, copy=False):
if name not in self.node_map:
raise TypeError('Node with name:{} not in graph'.format(name))
if copy:
node = copy.copy(self.node_map[name])
else:
node = self.node_map[name]
return node
def remove_node_by_name(self, name):
if name in self.node_map:
node = self.node_map.pop(name)
return node
raise TypeError('Node with name:{} not in graph'.format(name))
def remove_node(self, node):
if isinstance(node, Node):
node = self.remove_node_by_name(node.layer_name)
return node
else:
node = self.remove_node_by_name(node)
return node
def get_output_nodes_of_node(self, node):
if node in self.edge_map:
return self.edge_map[node]
elif self.get_node(node.layer_name, copy=False):
return []
else:
raise KeyError('Node with layer_name {} not in graph.egde_map'.
format(node.layer_name))
def get_adjacency_map(self):
adjacency_map = {}
for layer_name, current_node in self.node_map.items():
inputs = current_node.inputs
for ipt in inputs:
for layer_name, node in self.node_map.items():
if current_node == node:
continue
outputs = node.outputs
if ipt in outputs:
if node not in adjacency_map:
adjacency_map[node] = set([current_node])
else:
adjacency_map[node].add(current_node)
return adjacency_map
def get_topo_sort_list(self):
topo_sort_list = list()
adjacency_map = self.get_adjacency_map()
for layer_name, node in self.node_map.items():
if node not in adjacency_map:
topo_sort_list.append(node)
idx = 0
while idx < len(topo_sort_list):
current_node = topo_sort_list[idx]
for input_node, output_nodes in adjacency_map.items():
if current_node in output_nodes:
adjacency_map[input_node].remove(current_node)
if len(adjacency_map[input_node]) == 0:
topo_sort_list.append(input_node)
idx += 1
return topo_sort_list[::-1]
|
# Copyright 2020 InterDigital Communications, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from compressai.models import (
Cheng2020Anchor,
Cheng2020Attention,
FactorizedPrior,
JointAutoregressiveHierarchicalPriors,
MeanScaleHyperprior,
ScaleHyperprior,
)
from compressai.zoo import (
bmshj2018_factorized,
bmshj2018_hyperprior,
cheng2020_anchor,
cheng2020_attn,
mbt2018,
mbt2018_mean,
)
from compressai.zoo.image import _load_model
class TestLoadModel:
def test_invalid(self):
with pytest.raises(ValueError):
_load_model("yolo", "mse", 1)
with pytest.raises(ValueError):
_load_model("mbt2018", "mse", 0)
class TestBmshj2018Factorized:
def test_params(self):
for i in range(1, 6):
net = bmshj2018_factorized(i, metric="mse")
assert isinstance(net, FactorizedPrior)
assert net.state_dict()["g_a.0.weight"].size(0) == 128
assert net.state_dict()["g_a.6.weight"].size(0) == 192
for i in range(6, 9):
net = bmshj2018_factorized(i, metric="mse")
assert isinstance(net, FactorizedPrior)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
def test_invalid_params(self):
with pytest.raises(ValueError):
bmshj2018_factorized(-1)
with pytest.raises(ValueError):
bmshj2018_factorized(10)
with pytest.raises(ValueError):
bmshj2018_factorized(10, metric="ssim")
with pytest.raises(ValueError):
bmshj2018_factorized(1, metric="ssim")
@pytest.mark.slow
@pytest.mark.pretrained
@pytest.mark.parametrize(
"metric", [("mse",), ("ms-ssim",)]
) # bypass weird pytest bug
def test_pretrained(self, metric):
metric = metric[0]
for i in range(1, 6):
net = bmshj2018_factorized(i, metric=metric, pretrained=True)
assert net.state_dict()["g_a.0.weight"].size(0) == 128
assert net.state_dict()["g_a.6.weight"].size(0) == 192
for i in range(6, 9):
net = bmshj2018_factorized(i, metric=metric, pretrained=True)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
assert net.state_dict()["g_a.6.weight"].size(0) == 320
class TestBmshj2018Hyperprior:
def test_params(self):
for i in range(1, 6):
net = bmshj2018_hyperprior(i, metric="mse")
assert isinstance(net, ScaleHyperprior)
assert net.state_dict()["g_a.0.weight"].size(0) == 128
assert net.state_dict()["g_a.6.weight"].size(0) == 192
for i in range(6, 9):
net = bmshj2018_hyperprior(i, metric="mse")
assert isinstance(net, ScaleHyperprior)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
assert net.state_dict()["g_a.6.weight"].size(0) == 320
def test_invalid_params(self):
with pytest.raises(ValueError):
bmshj2018_hyperprior(-1)
with pytest.raises(ValueError):
bmshj2018_hyperprior(10)
with pytest.raises(ValueError):
bmshj2018_hyperprior(10, metric="ssim")
with pytest.raises(ValueError):
bmshj2018_hyperprior(1, metric="ssim")
@pytest.mark.slow
@pytest.mark.pretrained
def test_pretrained(self):
# test we can load the correct models from the urls
for i in range(1, 6):
net = bmshj2018_factorized(i, metric="mse", pretrained=True)
assert net.state_dict()["g_a.0.weight"].size(0) == 128
assert net.state_dict()["g_a.6.weight"].size(0) == 192
for i in range(6, 9):
net = bmshj2018_factorized(i, metric="mse", pretrained=True)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
assert net.state_dict()["g_a.6.weight"].size(0) == 320
class TestMbt2018Mean:
def test_parameters(self):
for i in range(1, 5):
net = mbt2018_mean(i, metric="mse")
assert isinstance(net, MeanScaleHyperprior)
assert net.state_dict()["g_a.0.weight"].size(0) == 128
assert net.state_dict()["g_a.6.weight"].size(0) == 192
for i in range(5, 9):
net = mbt2018_mean(i, metric="mse")
assert isinstance(net, MeanScaleHyperprior)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
assert net.state_dict()["g_a.6.weight"].size(0) == 320
def test_invalid_params(self):
with pytest.raises(ValueError):
mbt2018_mean(-1)
with pytest.raises(ValueError):
mbt2018_mean(10)
with pytest.raises(ValueError):
mbt2018_mean(10, metric="ssim")
with pytest.raises(ValueError):
mbt2018_mean(1, metric="ssim")
@pytest.mark.slow
@pytest.mark.pretrained
def test_pretrained(self):
# test we can load the correct models from the urls
for i in range(1, 5):
net = mbt2018_mean(i, metric="mse", pretrained=True)
assert net.state_dict()["g_a.0.weight"].size(0) == 128
assert net.state_dict()["g_a.6.weight"].size(0) == 192
for i in range(5, 9):
net = mbt2018_mean(i, metric="mse", pretrained=True)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
assert net.state_dict()["g_a.6.weight"].size(0) == 320
class TestMbt2018:
def test_ok(self):
for i in range(1, 5):
net = mbt2018(i, metric="mse")
assert isinstance(net, JointAutoregressiveHierarchicalPriors)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
assert net.state_dict()["g_a.6.weight"].size(0) == 192
for i in range(5, 9):
net = mbt2018(i, metric="mse")
assert isinstance(net, JointAutoregressiveHierarchicalPriors)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
assert net.state_dict()["g_a.6.weight"].size(0) == 320
def test_invalid_params(self):
with pytest.raises(ValueError):
mbt2018(-1)
with pytest.raises(ValueError):
mbt2018(10)
with pytest.raises(ValueError):
mbt2018(10, metric="ssim")
with pytest.raises(ValueError):
mbt2018(1, metric="ssim")
@pytest.mark.slow
@pytest.mark.pretrained
def test_pretrained(self):
# test we can load the correct models from the urls
for i in range(1, 5):
net = mbt2018(i, metric="mse", pretrained=True)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
assert net.state_dict()["g_a.6.weight"].size(0) == 192
for i in range(5, 9):
net = mbt2018(i, metric="mse", pretrained=True)
assert net.state_dict()["g_a.0.weight"].size(0) == 192
assert net.state_dict()["g_a.6.weight"].size(0) == 320
class TestCheng2020:
@pytest.mark.parametrize(
"func,cls",
(
(cheng2020_anchor, Cheng2020Anchor),
(cheng2020_attn, Cheng2020Attention),
),
)
def test_anchor_ok(self, func, cls):
for i in range(1, 4):
net = func(i, metric="mse")
assert isinstance(net, cls)
assert net.state_dict()["g_a.0.conv1.weight"].size(0) == 128
for i in range(4, 7):
net = func(i, metric="mse")
assert isinstance(net, cls)
assert net.state_dict()["g_a.0.conv1.weight"].size(0) == 192
@pytest.mark.slow
@pytest.mark.pretrained
def test_pretrained(self):
for i in range(1, 4):
net = cheng2020_anchor(i, metric="mse", pretrained=True)
assert net.state_dict()["g_a.0.conv1.weight"].size(0) == 128
for i in range(4, 7):
net = cheng2020_anchor(i, metric="mse", pretrained=True)
assert net.state_dict()["g_a.0.conv1.weight"].size(0) == 192
|
from dataclasses import dataclass
from abc import ABC, abstractmethod
from schema import Schema
from typing import Dict, Any, Type, List, Sequence, Optional, Union
import numpy as np
import json
"""
Methods for loadings the various open 3D AV datasets into a common format for use throughout this repo
This is setup for the Audi a2d2, Waymo Open Dataset, NuScenes, Argoverse, ApolloScape, Bosch's Boxy Dataset, and Pandaset
Attempts to load the different modules for loading in the datasets, but should just pass if the setup doesn't exist
The loader should get the data from the native format for that loader, transform it to a common label/ format, then
return that to the caller.
"""
from abc import ABC
class Loader(ABC):
point_cloud: bool
segmentation: bool
bounding_boxes: bool
cameras: List[int]
bus: bool
include_annotations: bool
include_reflectance: bool
def __init__(self, config: Dict[str, Any]):
self.point_cloud = config.get("include_points", False)
self.cameras = config.get("include_cameras", [])
self.segmentation = config.get("include_segmentation", False)
self.bounding_boxes = config.get("include_bboxes", False)
self.bus = config.get("include_bus", False)
self.include_annotations = config.get("include_annotations", False)
self.include_reflectance = config.get("include_reflectance", False)
@property
def name(self) -> str:
return type(self).__name__
def __call__(self, scene_id: Any, frame_numbers: Union[int, List[int]]):
return self.transform(self.get(scene_id, frame_numbers)
@abstractmethod
def get(self, scene_id: Any, frame_numbers: Union[int, List[int]]):
"""Gets an example, or set of examples """
raise NotImplementedError
@abstractmethod
def transform(self, items: Any) -> Any:
""" Convert the gathered annotations/frames to the common format"""
raise NotImplementedError
class NuScenesLoader(Loader):
"""NuScenes has lidar segmentation for some frames, images, bounding boxes"""
def get(self, scene_id: Any, frame_numbers: Union[int, List[int]]):
return NotImplementedError
def transform(self, items: Any) -> Any:
return NotImplementedError
class A2D2Loader(Loader):
"""A2D2 has lidar segmentation of cameras, and bounding boxes"""
def __init__(self, config: Dict[str, Any]):
self.super.__init__(config)
# Specific configuration file for A2D2
self.config = json.load(open(config.get("a2d2_config", ""), 'r'))
def get(self, scene_id: Any, frame_numbers: Union[int, List[int]]):
return NotImplementedError
def transform(self, items: Any) -> Any:
return NotImplementedError
class ApolloScapeLoader(Loader):
""""ApolloScape has stereo images, cameras, lidar, semantic segmentation of images"""
def get(self, scene_id: Any, frame_numbers: Union[int, List[int]]):
return NotImplementedError
def transform(self, items: Any) -> Any:
return NotImplementedError
class ArgoverseLoader(Loader):
""""The Argoverse data includes lidar, images, and bounding boxes, as well as high quality maps"""
def get(self, scene_id: Any, frame_numbers: Union[int, List[int]]):
return NotImplementedError
def transform(self, items: Any) -> Any:
return NotImplementedError
class WaymoLoader(Loader):
"""" Waymo Open Dataset includes Lidar and camera information, with both 2D and 3D boudning boxes"""
def get(self, scene_id: Any, frame_numbers: Union[int, List[int]]):
return NotImplementedError
def transform(self, items: Any) -> Any:
return NotImplementedError
class BoxyLoader(Loader):
""" Bosch Boxy Dataset is only camera images and 2D bounding boxes with 5MP images, and ~2million vehicles"""
def get(self, scene_id: Any, frame_numbers: Union[int, List[int]]):
return NotImplementedError
def transform(self, items: Any) -> Any:
return NotImplementedError
|
CHAR_DICT = {32: ' ', 33: '!', 34: '"', 35: '#', 36: '$', 37: '%', 38: '&', 39: "'", 40: '(', 41: ')', 42: '*', 43: '+',
44: ',', 45: '-', 46: '.', 47: '/', 48: '0', 49: '1', 50: '2', 51: '3', 52: '4', 53: '5', 54: '6', 55: '7',
56: '8', 57: '9', 58: ':', 59: ';', 60: '<', 61: '=', 62: '>', 63: '?', 64: '@', 65: 'A', 66: 'B', 67: 'C',
68: 'D', 69: 'E', 70: 'F', 71: 'G', 72: 'H', 73: 'I', 74: 'J', 75: 'K', 76: 'L', 77: 'M', 78: 'N', 79: 'O',
80: 'P', 81: 'Q', 82: 'R', 83: 'S', 84: 'T', 85: 'U', 86: 'V', 87: 'W', 88: 'X', 89: 'Y', 90: 'Z', 91: '[',
92: '\\', 93: ']', 94: '^', 95: '_', 96: '`', 97: 'a', 98: 'b', 99: 'c', 100: 'd', 101: 'e', 102: 'f',
103: 'g', 104: 'h', 105: 'i', 106: 'j', 107: 'k', 108: 'l', 109: 'm', 110: 'n', 111: 'o', 112: 'p',
113: 'q', 114: 'r', 115: 's', 116: 't', 117: 'u', 118: 'v', 119: 'w', 120: 'x', 121: 'y', 122: 'z',
123: '{', 124: '|', 125: '}', 126: '~'}
COMMANDS = ['print', 'class', 'def', 'if', 'elif', 'else', 'else:', 'try:', 'except:', 'for', 'while', 'return', 'try',
'except', 'break', 'pass', 'continue', 'del', 'and', 'or', 'global', 'import', 'from', 'yield', 'raise',
'lambda', 'assert', 'exec', 'finally', 'in', 'is', 'not']
EXECUTABLES = ['color', 'indent', 'unindent', 'delete', 'comment', 'uncomment', 'show', 'hide', 'mark', 'unmark',
'find', 'whitespace', 'syntax', 'entry', 'run', 'split', 'splitscreen', 'quit', 'load', 'replace',
'formatting', 'replace', 'new', 'save', 'saveas', 'revert', 'copy', 'paste', 'collapse', 'uncollapse',
'undo', 'expand', 'debug', 'cut', 'cut ', 'goto', 'color default', 'protect', 'protect with ',
'commands', 'isave', 'setcolor', 'timestamp', 'live', 'read', 'prev', 'saveprefs', 'select',
'deselect', 'strip', 'help', 'previous', 'guide', 'pageguide', 'invert', 'setcolors',
'acceleration', 'accelerate', 'tab', 'tabs']
EXEC_ABBREVIATED = ['col', 'ind', 'uni', 'del', 'com', 'sho', 'hid', 'mar', 'unm', 'fin', 'whi', 'syn', 'ent', 'run',
'spl', 'spl', 'qui', 'loa', 'rep', 'new', 'sav', 'rev', 'cop', 'pas', 'col', 'unc', 'und', 'exp',
'deb', 'cut', 'got', 'rea', 'pro', 'isa', 'set', 'tim', 'for', 'liv', 'pre', 'hel', 'pag', 'gui',
'sel', 'inv']
# Set Help Guide text
HELP_GUIDE = ["####################################################################:",
"########################################################## lexed HELP",
"####################################################################:",
"",
"#####################################################################",
"####################### KEYBOARD NAVIGATION #########################",
"#####################################################################",
"arrow keys Navigate document",
"---------------------------------------------------------------------",
"tab key Indent (tab = 4 spaces)",
"---------------------------------------------------------------------",
"control 'w' Launch SAVE WINDOW, an alternate way to save document",
"---------------------------------------------------------------------",
"control 'e' Launch ENTRY WINDOW, alternate way to enter commands",
"---------------------------------------------------------------------",
"control 'd' If debug mode is on, move to next line with an error",
"---------------------------------------------------------------------",
"control 'f' Launches a search window",
"---------------------------------------------------------------------",
"control 'g' If 'find' has been used, find again (next match)",
"---------------------------------------------------------------------",
"control 'n' Moves to next 'marked' line",
"---------------------------------------------------------------------",
"control 'b' Moves back to previous 'marked' line",
"---------------------------------------------------------------------",
"control 'a' Deselect ALL lines",
"---------------------------------------------------------------------",
"control 'p' Move down ~ 1 page, or one line in splitscreen mode",
"---------------------------------------------------------------------",
"control 'u' Move up ~ 1 page, or one line in splitscreen mode",
"",
"#####################################################################",
"###################### ABOUT INLINE COMMANDS ########################",
"#####################################################################",
"Execute commands by typing them on a blank line in the editor and",
"pressing 'enter'. If there are no blank lines, press the down arrow",
"and one will be created at the end of the document.",
"",
"Commands can optionally be 'protected' with a text string to safeguard",
"against accidental execution. While protection is on, the protect",
"string is displayed in the top right corner of the terminal screen.",
"",
" Example: ##::save myfile.txt",
"",
"#####################################################################",
"######################### COMMANDS (MAIN) ###########################",
"#####################################################################",
"quit Quits lexed",
"---------------------------------------------------------------------",
"new Create empty document",
"---------------------------------------------------------------------",
"load [file]",
"",
" Loads file. If file name not given, file can be chosen",
" from a selection screen.",
"---------------------------------------------------------------------",
"read [file]",
" ",
" Loads file in 'read only' mode, editing not allowed. If",
" file name not given, file can be chosen from a selection",
" screen.",
"---------------------------------------------------------------------",
"save [file]",
"",
" Saves file. Assumes current directory if not specified",
"---------------------------------------------------------------------",
"saveas Opens confirmation window so filepath/name can be edited",
"---------------------------------------------------------------------",
"isave Increments filename & saves (+1 to number before .ext)",
"---------------------------------------------------------------------",
"revert Reverts file to last saved version",
"---------------------------------------------------------------------",
"saveprefs Saves current settings",
"---------------------------------------------------------------------",
"help [module] or [class] or [function]",
"",
" When no arguments given, opens HELP GUIDE. Otherwise",
" shows help or doc-string for given item.",
"---------------------------------------------------------------------",
"run Attempts to run your python code in a separate window.",
" *Currently,this feature only works in Linux with",
" the GNOME TERMINAL.",
"",
"#####################################################################",
"######################## COMMANDS (EDITING) #########################",
"NOTE: most editing commands allow you to act on a single line number,",
"multiple lines, a range of lines, selected lines only, marked lines",
"only, or the current line if executed from the ENTRY WINDOW",
"(control 'e').",
"",
" examples:",
" To copy line 10 ##copy 10",
" To copy multiple lines ##copy 10,15,20",
" To copy range ##copy 10-20",
" To copy marked lines ##copy marked",
" To copy selection (from ENTRY WINDOW) ##copy selection",
" To copy selection (inline command only) ##copy",
" To copy current line (ENTRY WINDOW only) ##copy",
"---------------------------------------------------------------------",
"select [line] or [line1,line2,line3] or [start-end] or ['marked']",
"or [function/class name] or ['all'] or ['up'] or ['down']",
"",
" Select lines by line numbers, by function/class name,",
" or marked lines only.",
"",
" Select up/down automatically selects all lines above",
" or below the current line, stopping when a blank line",
" is reached.",
"",
" NOTE: using select automatically deselects the current",
" selection. You can not add to a selection.",
"---------------------------------------------------------------------",
"deselect [line] or [line1,line2,line3] or [start-end] or ['marked']",
"or [function/class name] or ['all']",
"",
" Deselect specified line(s)",
"---------------------------------------------------------------------",
"invert Inverts selection",
"---------------------------------------------------------------------",
"copy [line] or [line1,line2,line3] or [start-end] or ['marked']",
"or ['selection']",
"",
" Copies line(s) into memory.",
"---------------------------------------------------------------------",
"paste [line]",
"",
" Pastes previously copied text into document.",
" With no arguments, paste occurs at current line.",
" If line is given, paste occurs there (line inserted).",
" From ENTRY WINDOW, text is appended to current line.",
"",
" example: ##paste 20",
"---------------------------------------------------------------------",
"delete [line] or [line1,line2,line3] or [start-end] or ['marked']",
"or ['selection']",
"",
" Deletes specified line(s)",
"---------------------------------------------------------------------",
"cut [line] or [line1,line2,line3] or [start-end] or ['marked']",
"or ['selection']",
"",
" Combines copy and delete into one operation.",
"---------------------------------------------------------------------",
"comment [line] or [line1,line2,line3] or [start-end] or ['marked']",
"or ['selection']",
"",
" Comments specified line(s)",
"---------------------------------------------------------------------",
"uncomment [line] or [line1,line2,line3] or [start-end] or ['marked']",
"or ['selection']",
"",
" Uncomments specified line(s)",
"---------------------------------------------------------------------",
"indent [line] or [line1,line2,line3] or [start-end] or ['marked']",
"or ['selection']",
"",
" Indents specified line(s)",
"---------------------------------------------------------------------",
"unindent [line] or [line1,line2,line3] or [start-end] or ['marked']",
"or ['selection']",
"",
" Unindents specified line(s)",
"---------------------------------------------------------------------",
"replace ['marked']['selected'] text1 with text2",
"",
" Replaces occurrences of 'text1' with 'text2'. Can act on",
" all lines in document or marked/selected lines only",
"",
" examples:",
" ##replace cout with print",
" ##replace marked true with True",
" ##replace selected false with False",
"---------------------------------------------------------------------",
"timestamp Appends current time & date to current line",
"---------------------------------------------------------------------",
"strip Removes trailing whitespace from ALL lines",
"---------------------------------------------------------------------",
"undo Rudimentary undo feature with 1 undo level",
"",
"#####################################################################",
"######################## COMMANDS (NAVIGATION) ######################",
"#####################################################################",
"goto [line] or ['start'] or ['end'] or [function/class name]",
"",
" Move to specified line number or function definition",
"---------------------------------------------------------------------",
"prev Returns to previous line (no args)",
"---------------------------------------------------------------------",
"find [text]",
"",
" Find specified text string and move to line containing it",
"---------------------------------------------------------------------",
"mark [line] or [line1,line2,line3] or [start-end] or [text]",
"",
" Marks/highlights lines with specified text or specified",
" line numbers.",
"---------------------------------------------------------------------",
"unmark [line] or [line1,line2,line3] or [start-end] or [text] or [all]",
"",
" Unmarks line(s) that are marked. Use 'unmark all' to",
" unmark the entire document.",
"---------------------------------------------------------------------",
"collapse [line] or [line1,line2,line3] or [start-end] or ['functions']",
" or ['all'] or ['tab' number] or ['marked'] or ['selection']",
"",
" Collapses lines with greater indentation than specified",
" line(s). Simplifies navigation by hiding the details.",
" It can even collapse lines with a specified indentation.",
" example: ##collapse tab 2",
"---------------------------------------------------------------------",
"expand [line] or [line1,line2,line3] or [start-end] or ['all'] or",
" ['marked'] or ['selection']",
"",
" Expands (uncollapses) specified line(s). Use 'expand all'",
" to expand all lines.",
"",
"#####################################################################",
"###################### COMMANDS (SETTINGS) ##########################",
"#####################################################################",
"NOTE: Changes not saved to settings file until 'saveprefs' is used!",
" Some settings can alternatively be changed by using the",
" commands 'show' & 'hide'.",
"---------------------------------------------------------------------",
"syntax [on/off]",
"(show/hide syntax)",
"",
" Toggles syntax highlighting",
"---------------------------------------------------------------------",
"debug [on/off]",
"(show/hide bugs)",
"",
" Toggles debug mode. Lines with errors marked with ##!",
"---------------------------------------------------------------------",
"formatting [on/off]",
"(show/hide formatting)",
"",
" Toggles comment formatting",
"---------------------------------------------------------------------",
"whitespace [on/off]",
"(show/hide whitespace)",
"",
" Toggles visible whitespace",
"---------------------------------------------------------------------",
"tabs [on/off]",
"(show/hide tabs)",
"",
" Toggles visible indentation (tabs)",
"---------------------------------------------------------------------",
"entry [on/off]",
"(show/hide entry)",
"",
" Toggles entry highlighting (highlights the current line being",
" edited)",
"---------------------------------------------------------------------",
"live [on/off]",
"(show/hide live)",
"",
" Toggles live (real-time) syntax highlighting on entry line",
"---------------------------------------------------------------------",
"split [on/off] or [line]",
"(show/hide splitscreen)",
"",
" Toggles splitscreen. If a line number is the argument,",
" splitscreen display begins at that line",
"---------------------------------------------------------------------",
"pageguide [on/off] or [number]",
"(show/hide guide)",
"",
" Toggles page guide. If a number is the argument,",
" guide occurs after that number of characters.",
" The default page guide is 80 characters wide.",
"",
" NOTE: Terminal width must be at least 88 characters",
" to view 80 character page guide!",
"---------------------------------------------------------------------",
"protect [on/off] or ['with' text]",
"",
" Toggles command protection and can optionally change",
" protect string.",
"",
" example: ##protect with ::",
"---------------------------------------------------------------------",
"commands [on/off]",
"",
" Toggles inline (live) commands.",
" When off, ENTRY WINDOW is the ONLY way to enter commands.",
"---------------------------------------------------------------------",
"auto [on/off]",
"",
" Toggles automation of settings. When on, automatically",
" adjusts settings to match file type.",
" '.py' - debug mode on, syntax highlighting on, protect off",
" other - debug off, syntax highlighting off, protect on",
"---------------------------------------------------------------------",
"acceleration [on/off]",
"",
" Toggles cursor acceleration. While on, cursor speed",
" increases over time as you hold down the arrow keys.",
"---------------------------------------------------------------------",
"color on Manually start color if supported by your terminal",
"---------------------------------------------------------------------",
"color default",
"",
" Set colors to their default values",
"---------------------------------------------------------------------",
"setcolors Opens selection screen to allow you to set colors used",
" with syntax highlighting",
"",
"#####################################################################",
"######################## SPECIAL FEATURES ###########################",
"#####################################################################",
"HELP",
" Similar to the help() feature in the python interpreter, attempts",
" to show the help file/doc string for given module, class, or",
" function. Can only show help for imported modules and classes &",
" functions defined within your code.",
"",
" Navigate up/down using up and down arrow keys, or the 'b' key",
" and spacebar. Left & right arrows will change 'pages'.",
" Press 's' to go to the start of the document.",
" Press 'e' to go to the end.",
" Press 'q' to quit (or any key if help is less than one page).",
"",
" example:",
" ##import os",
" ##help os",
" ##help os.path.exists",
"---------------------------------------------------------------------",
"DEBUG MODE",
" Realtime python debugging. While it won't catch all errors, it",
" should flag simple errors such as incorrect use of the comparison",
" operator '==' or forgetting to indent when necessary.",
"",
" DEBUG MODE (as well as syntax highlighting) can be processor",
" intensive and should be turned off on older machines to improve",
" performance.",
"---------------------------------------------------------------------",
"COMMENT FORMATTING",
" When on, '#' is a regular comment, '##' is a formatted comment.",
" Formats:",
" '##TEXT' - Left Justified",
"##Hello World",
" '###TEXT' - Right Justified",
"###Hello World",
" '##TEXT##' - Centered",
"##Hello World##",
" '##=' - Separator (fills line with last character)",
"##=",
" '##' - empty (same color as centered)",
"###",
" '....##TEXT' - Comment Block (after indent or other characters)",
" ##BLOCK OF TEXT",
"---------------------------------------------------------------------",
"READ MODE",
" In Read Mode, file can be viewed but not edited. Debugging and",
" syntax highlighting (save for comment formatting) are turned off.",
" Navigate up/down using up and down arrow keys, or the 'b' key",
" and spacebar. Left & right arrows will change 'pages'.",
" Press 's' to go to the start of the document.",
" Press 'e' to go to the end.",
" Press 'q' to quit.",
"---------------------------------------------------------------------",
"ENCRYPTION",
" When saving, you can choose to save a password protected file with",
" basic encryption (in theory, the longer the password the better",
" the encryption). Simply save with the extension '.pwe', short for",
" 'lexed encryption'.",
"",
" NOTE: It is recommended that you save a non-encrypted version",
" of your file first. If you forget your password or something",
" goes wrong during the encryption process, your file will not",
" be recoverable! No guarantees are made as to the strength of",
" the encryption, ##USE AT YOUR OWN RISK!",
"",
"#####################################################################",
"############################## FLAGS ################################",
"#####################################################################",
"lexed [OPTIONS] [FILENAME]",
"",
"-t, --text Open in text mode with protection on",
"-s, --string Sets protection string (and turns protection on)",
"-p, --python Open in python mode with syntax and debugging on",
"-c, --color Open in color mode with default colors",
"-m, --mono Open in monochrome mode",
"-i, --inverted Open in monochrome mode with inverted display",
"-n, --nobold Turns off bold text",
"-r, --read Opens specified file in 'read only' mode",
"-h, --help Display Help Guide",
"",
" example:",
" ##lexed --text --string '::' ~/Desktop/myfile.txt",
"",
"####################### Last edited on 07/22/08 01:05:49 PM (Tuesday)"]
|
from datetime import timedelta
import ipaddress
import time
import voluptuous as vol
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.components.fan import (
SUPPORT_SET_SPEED,
FanEntity,
)
from .const import (
DOMAIN,
SPEED_OFF,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_HIGH,
SPEED_MAX,
VALUE_TO_SPEED,
SPEED_TO_VALUE,
SIGNAL_HELIOS_STATE_UPDATE,
SCAN_INTERVAL,
CONF_HOST,
CONF_NAME,
)
import eazyctrl
async def async_setup(hass: HomeAssistant, config: dict):
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN]["config"] = config.get(DOMAIN) or {}
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry):
host = config_entry.data[CONF_HOST]
name = config_entry.data[CONF_NAME]
client = eazyctrl.EazyController(host)
state_proxy = HeliosStateProxy(hass, client)
hass.data[DOMAIN] = {"client": client, "state_proxy": state_proxy, "name": name}
def handle_fan_boost(call):
duration = call.data.get('duration', 60)
speed = call.data.get('speed', 'high')
if int(duration) > 0:
hass.data[DOMAIN]['state_proxy'].start_boost_mode(speed, duration)
else:
hass.data[DOMAIN]['state_proxy'].stop_boost_mode()
hass.services.async_register(DOMAIN, "fan_boost", handle_fan_boost)
hass.async_create_task(hass.config_entries.async_forward_entry_setup(config_entry, "sensor"))
hass.async_create_task(hass.config_entries.async_forward_entry_setup(config_entry, "switch"))
hass.async_create_task(hass.config_entries.async_forward_entry_setup(config_entry, "fan"))
async_track_time_interval(hass, state_proxy.async_update, SCAN_INTERVAL)
await state_proxy.async_update(0)
return True
class HeliosStateProxy:
def __init__(self, hass, client):
self._hass = hass
self._client = client
self._auto = None
self._speed = None
self._percent = None
self._boost_time = 0
def set_speed(self, speed: str):
self._client.set_variable('v00101', '1')
self._auto = False
self._client.set_feature('fan_stage', SPEED_TO_VALUE[speed])
self._speed = SPEED_TO_VALUE[speed]
self.fetchPercent()
def start_boost_mode(self, speed: str, time: int):
self._client.set_variable('v00093', '0')
self._client.set_variable('v00092', SPEED_TO_VALUE[speed])
self._client.set_variable('v00091', time)
self._client.set_variable('v00094', '1')
self.fetchPercent()
def stop_boost_mode(self):
self._client.set_variable('v00094', '0')
self.fetchPercent()
def set_auto_mode(self, enabled: bool):
self._client.set_variable('v00101', '0' if enabled else '1')
self._auto = enabled
self.fetchPercent()
def get_speed(self):
return self._speed
def get_speed_percent(self):
return self._percent
def is_auto(self) -> bool:
return self._auto
def get_boost_time(self) -> int:
return self._boost_time
async def async_update(self, event_time):
self._auto = self._client.get_variable("v00101", 1, conversion=int) == 0
self._speed = self._client.get_feature('fan_stage')
self.fetchPercent()
def fetchPercent(self):
time.sleep(2)
self._boost_time = self._client.get_variable("v00093", 3, conversion=int)
self._percent = self._client.get_variable("v00103", 3, conversion=int)
async_dispatcher_send(self._hass, SIGNAL_HELIOS_STATE_UPDATE)
|
<reponame>RULCSoft/cloudroast
"""
Copyright 2018 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from cafe.drivers.unittest.decorators import tags
from cafe.engine.clients.ping import PingClient
from cloudcafe.auth.provider import AuthProvider
from cloudcafe.compute.config import ComputeAdminEndpointConfig, \
ComputeAdminUserConfig, ComputeAdminAuthConfig
from cloudcafe.compute.servers_api.client import ServersClient
from cloudcafe.networking.networks.personas import ServerPersona
from cloudcafe.networking.networks.common.tools.connectivity import \
Connectivity
from cloudroast.networking.networks.fixtures import NetworkingComputeFixture
from cloudroast.networking.networks.scenario.common import ScenarioMixin
class TestFixedIPsConnectivity(NetworkingComputeFixture, ScenarioMixin):
"""
Testing connectivity between servers by adding fixed ips to existing
servers.
"""
NAMES_PREFIX = 'fixed_ips_connectivity'
PRIVATE_KEY_PATH = '/root/pkey'
MAX_RETRIES = 5
admin_user = ComputeAdminUserConfig()
compute_admin_endpoint = ComputeAdminEndpointConfig()
auth_endpoint_config = ComputeAdminAuthConfig()
access_data = AuthProvider.get_access_data(
auth_endpoint_config, admin_user)
compute_service = access_data.get_service(
compute_admin_endpoint.compute_endpoint_name)
url = compute_service.get_endpoint(
compute_admin_endpoint.region).public_url
servers_client = ServersClient(
url, access_data.token.id_, 'json', 'json')
SSH_COMMAND = ('ssh -o UserKnownHostsFile=/dev/null '
'-o StrictHostKeyChecking=no -o ConnectTimeout=60 '
'-i {private_key_path} {user}@{ip_address}')
ssh_msg = ('Failed remote ssh connection from '
'server {0} to server {1}')
@classmethod
def setUpClass(cls):
super(TestFixedIPsConnectivity, cls).setUpClass()
network_name = 'network_{0}'.format(cls.NAMES_PREFIX)
cls.network = cls.create_server_network(name=network_name, ipv4=True)
cls.delete_networks.append(cls.network.id)
keypair_name = 'key_{0}'.format(cls.NAMES_PREFIX)
cls.keypair = cls.create_keypair(name=keypair_name)
cls.delete_keypairs.append(cls.keypair.name)
svr_name_1 = 'svr_1_{0}'.format(cls.NAMES_PREFIX)
svr_name_2 = 'svr_2_{0}'.format(cls.NAMES_PREFIX)
network_ids = [cls.public_network_id, cls.service_network_id,
cls.network.id]
cls.server1 = cls.create_test_server(
name=svr_name_1, key_name=cls.keypair.name,
network_ids=network_ids, active_server=False)
cls.server2 = cls.create_test_server(
name=svr_name_2, key_name=cls.keypair.name,
network_ids=network_ids, active_server=False)
cls.servers = [cls.server1, cls.server2]
cls.FIXED_IPS_TO_ADD = cls.net.config.fixed_ips_to_add
cls.PNET_FIX_IPv4_COUNT = cls.FIXED_IPS_TO_ADD + 1
cls.SNET_FIX_IPv4_COUNT = cls.FIXED_IPS_TO_ADD + 1
cls.INET_FIX_IPv4_COUNT = cls.FIXED_IPS_TO_ADD + 1
cls.TOTAL_INITIAL_IPS_SERVER = 3
cls.TOTAL_NETWORKS_ATTACHED_TO_SERVER = 3
cls.TOTAL_IPS_SERVER = cls.TOTAL_INITIAL_IPS_SERVER + \
(cls.FIXED_IPS_TO_ADD * cls.TOTAL_NETWORKS_ATTACHED_TO_SERVER)
# Add fixed IPs to servers
for server in cls.servers:
cls.add_fixed_ips_network(server, cls.public_network_id,
number_fixed_ips=cls.FIXED_IPS_TO_ADD)
cls.add_fixed_ips_network(server, cls.service_network_id,
number_fixed_ips=cls.FIXED_IPS_TO_ADD)
cls.add_fixed_ips_network(server, cls.network.id,
number_fixed_ips=cls.FIXED_IPS_TO_ADD)
cls.server_persona1 = ServerPersona(
server=cls.server1, pnet=True, snet=True, inet=True,
pnet_fix_ipv4_count=cls.PNET_FIX_IPv4_COUNT,
snet_fix_ipv4_count=cls.SNET_FIX_IPv4_COUNT,
inet_fix_ipv4_count=cls.INET_FIX_IPv4_COUNT,
network=cls.network, keypair=cls.keypair, ssh_username='root')
cls.server_persona2 = ServerPersona(
server=cls.server2, pnet=True, snet=True, inet=True,
pnet_fix_ipv4_count=cls.PNET_FIX_IPv4_COUNT,
snet_fix_ipv4_count=cls.SNET_FIX_IPv4_COUNT,
inet_fix_ipv4_count=cls.INET_FIX_IPv4_COUNT,
network=cls.network, keypair=cls.keypair,
ssh_username='root')
server_ids = [cls.server_persona1.server.id,
cls.server_persona2.server.id]
cls.delete_servers.extend(server_ids)
cls._transfer_private_key_to_vm(
cls.server_persona1.remote_client.ssh_client,
cls.keypair.private_key, cls.PRIVATE_KEY_PATH)
cls._transfer_private_key_to_vm(
cls.server_persona2.remote_client.ssh_client,
cls.keypair.private_key, cls.PRIVATE_KEY_PATH)
@tags('admin', 'positive')
def test_server_ifconfig(self):
"""Testing ifconfig on servers"""
servers = [self.server_persona1, self.server_persona2]
for server in servers:
ips = []
ips.extend(server.pnet_fix_ipv4)
ips.extend(server.snet_fix_ipv4)
ips.extend(server.inet_fix_ipv4)
rm_client = server.remote_client
ifconfig_ips = []
stdout = None
retry_count = 0
while stdout is None or len(ifconfig_ips) != self.TOTAL_IPS_SERVER:
del ifconfig_ips[:]
if retry_count < self.MAX_RETRIES:
ifconfig_output = rm_client.ssh_client.\
execute_shell_command("hostname -I")
stdout = ifconfig_output.stdout
pattern = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
matches = pattern.finditer(stdout)
for match in matches:
ifconfig_ips.append(match.group())
if len(ifconfig_ips) == self.TOTAL_IPS_SERVER or \
retry_count == self.MAX_RETRIES:
break
retry_count += 1
server_ip_not_found = False
for ip in ips:
if ip not in ifconfig_ips:
server_ip_not_found = True
break
self.assertFalse(server_ip_not_found,
msg="server {} ip {} not found in output of "
"ifconfig {}".
format(server, ip, ifconfig_ips))
@tags('admin', 'positive')
def test_public_ping(self):
"""Testing ping on servers with public network"""
msg_err = 'Public ping to IP address {0} - FAILED'
msg_ok = 'Public ping to IP address {0} - OK'
pub_ipv4_addr = []
pub_ipv4_addr.extend(self.server_persona1.pnet_fix_ipv4)
pub_ipv4_addr.extend(self.server_persona2.pnet_fix_ipv4)
all_pub_ips_ping_result = []
failure_flag = False
for ip_addr in pub_ipv4_addr:
ip_addr_reachable = PingClient.ping(ip_addr, 4)
if ip_addr_reachable:
all_pub_ips_ping_result.append(msg_ok.format(ip_addr))
else:
all_pub_ips_ping_result.append(msg_err.format(ip_addr))
failure_flag = True
msg = 'Got connectivity failures. Ping Results: {0}'
# Fail the test if any ping failure is found
self.assertFalse(failure_flag, msg.format(all_pub_ips_ping_result))
@tags('admin', 'positive')
def test_remote_public_ping(self):
"""Testing public network remote ping on servers"""
self._test_remote_ping(port_type='pnet')
@tags('admin', 'positive')
def test_remote_private_ping(self):
"""Testing private network remote ping on servers"""
self._test_remote_ping(port_type='snet')
@tags('admin', 'positive')
def test_remote_isolated_ping(self):
"""Testing isolated network remote ping on servers"""
self._test_remote_ping(port_type='inet')
def _test_remote_ping(self, port_type):
"""Testing remote ping on servers"""
conn = Connectivity(self.server_persona2, self.server_persona1)
icmp_basic = dict(port_type=port_type, protocol='icmp', ip_version=4)
rp = conn.verify_personas_conn(**icmp_basic)
result = rp[0]
ping_result = result['connection']
self.assertTrue(ping_result, rp)
@tags('admin', 'positive')
def test_remote_public_ssh(self):
"""Testing Public remote ssh on servers"""
self._test_remote_ssh(self.server_persona1.pnet_fix_ipv4[0])
@tags('admin', 'positive')
def test_remote_private_ssh(self):
"""Testing ServiceNet remote ssh on servers"""
self._test_remote_ssh(self.server_persona1.snet_fix_ipv4[0])
@tags('admin', 'positive')
def test_remote_isolated_ssh(self):
"""Testing isolated network A remote ssh on servers"""
self._test_remote_ssh(self.server_persona1.inet_fix_ipv4[0])
def _test_remote_ssh(self, target_ip_addr):
"""Testing remote ssh on servers"""
rc2 = self.server_persona2.remote_client
ssh_cmd = self.SSH_COMMAND.format(
private_key_path=self.PRIVATE_KEY_PATH,
user=self.server_persona1.ssh_username, ip_address=target_ip_addr)
stdout = None
ssh_connection_established = False
retry_count = 0
while stdout is None or not stdout.endswith('# '):
if retry_count < self.MAX_RETRIES:
output = rc2.ssh_client.execute_shell_command(ssh_cmd)
stdout = output.stdout
retry_count += 1
if retry_count == self.MAX_RETRIES:
break
if stdout.endswith('# '):
ssh_connection_established = True
self.assertTrue(ssh_connection_established, self.ssh_msg.format(
self.server_persona2.pnet_fix_ipv4[0], target_ip_addr))
@classmethod
def add_fixed_ips_network(cls, server, network, number_fixed_ips):
# Add fixed IP's to server
for _ in range(number_fixed_ips):
cls.servers_client.add_fixed_ip(server.id, network)
|
import os
import time
import config
import numpy as np
from PIL import Image
import tensorflow as tf
from dataReader import Reader
from model.yolo3_model import yolo
from collections import defaultdict
from yolo_predict import yolo_predictor
from utils import draw_box, load_weights, letterbox_image, voc_ap
# 指定使用GPU的Index
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_index
def train():
"""
Introduction
------------
训练模型
"""
train_reader = Reader('train', config.data_dir, config.anchors_path, config.num_classes, input_shape = config.input_shape, max_boxes = config.max_boxes)
train_data = train_reader.build_dataset(config.train_batch_size)
is_training = tf.placeholder(tf.bool, shape = [])
iterator = train_data.make_one_shot_iterator()
images, bbox, bbox_true_13, bbox_true_26, bbox_true_52 = iterator.get_next()
images.set_shape([None, config.input_shape, config.input_shape, 3])
bbox.set_shape([None, config.max_boxes, 5])
grid_shapes = [config.input_shape // 32, config.input_shape // 16, config.input_shape // 8]
bbox_true_13.set_shape([None, grid_shapes[0], grid_shapes[0], 3, 5 + config.num_classes])
bbox_true_26.set_shape([None, grid_shapes[1], grid_shapes[1], 3, 5 + config.num_classes])
bbox_true_52.set_shape([None, grid_shapes[2], grid_shapes[2], 3, 5 + config.num_classes])
draw_box(images, bbox)
model = yolo(config.norm_epsilon, config.norm_decay, config.anchors_path, config.classes_path, config.pre_train)
bbox_true = [bbox_true_13, bbox_true_26, bbox_true_52]
output = model.yolo_inference(images, config.num_anchors / 3, config.num_classes, is_training)
loss = model.yolo_loss(output, bbox_true, model.anchors, config.num_classes, config.ignore_thresh)
l2_loss = tf.losses.get_regularization_loss()
loss += l2_loss
tf.summary.scalar('loss', loss)
merged_summary = tf.summary.merge_all()
global_step = tf.Variable(0, trainable = False)
lr = tf.train.exponential_decay(config.learning_rate, global_step, decay_steps = 2000, decay_rate = 0.8)
optimizer = tf.train.AdamOptimizer(learning_rate = lr)
# 如果读取预训练权重,则冻结darknet53网络的变量
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
if config.pre_train:
train_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='yolo')
train_op = optimizer.minimize(loss = loss, global_step = global_step, var_list = train_var)
else:
train_op = optimizer.minimize(loss = loss, global_step = global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session(config = tf.ConfigProto(log_device_placement = False)) as sess:
ckpt = tf.train.get_checkpoint_state(config.model_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print('restore model', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(init)
if config.pre_train is True:
load_ops = load_weights(tf.global_variables(scope = 'darknet53'), config.darknet53_weights_path)
sess.run(load_ops)
summary_writer = tf.summary.FileWriter(config.log_dir, sess.graph)
loss_value = 0
for epoch in range(config.Epoch):
for step in range(int(config.train_num / config.train_batch_size)):
start_time = time.time()
train_loss, summary, global_step_value, _ = sess.run([loss, merged_summary, global_step, train_op], {is_training : True})
loss_value += train_loss
duration = time.time() - start_time
examples_per_sec = float(duration) / config.train_batch_size
format_str = ('Epoch {} step {}, train loss = {} ( {} examples/sec; {} ''sec/batch)')
print(format_str.format(epoch, step, loss_value / global_step_value, examples_per_sec, duration))
summary_writer.add_summary(summary = tf.Summary(value = [tf.Summary.Value(tag = "train loss", simple_value = train_loss)]), global_step = step)
summary_writer.add_summary(summary, step)
summary_writer.flush()
# 每3个epoch保存一次模型
if epoch % 3 == 0:
checkpoint_path = os.path.join(config.model_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = global_step)
def eval(model_path, min_Iou = 0.5, yolo_weights = None):
"""
Introduction
------------
计算模型在coco验证集上的MAP, 用于评价模型
"""
ground_truth = {}
class_pred = defaultdict(list)
gt_counter_per_class = defaultdict(int)
input_image_shape = tf.placeholder(dtype = tf.int32, shape = (2,))
input_image = tf.placeholder(shape = [None, 416, 416, 3], dtype = tf.float32)
predictor = yolo_predictor(config.obj_threshold, config.nms_threshold, config.classes_path, config.anchors_path)
boxes, scores, classes = predictor.predict(input_image, input_image_shape)
val_Reader = Reader("val", config.data_dir, config.anchors_path, config.num_classes, input_shape = config.input_shape, max_boxes = config.max_boxes)
image_files, bboxes_data = val_Reader.read_annotations()
with tf.Session() as sess:
if yolo_weights is not None:
with tf.variable_scope('predict'):
boxes, scores, classes = predictor.predict(input_image, input_image_shape)
load_op = load_weights(tf.global_variables(scope = 'predict'), weights_file = yolo_weights)
sess.run(load_op)
else:
saver = tf.train.Saver()
saver.restore(sess, model_path)
for index in range(len(image_files)):
val_bboxes = []
image_file = image_files[index]
file_id = os.path.split(image_file)[-1].split('.')[0]
for bbox in bboxes_data[index]:
left, top, right, bottom, class_id = bbox[0], bbox[1], bbox[2], bbox[3], bbox[4]
class_name = val_Reader.class_names[int(class_id)]
bbox = [float(left), float(top), float(right), float(bottom)]
val_bboxes.append({"class_name" : class_name, "bbox": bbox, "used": False})
gt_counter_per_class[class_name] += 1
ground_truth[file_id] = val_bboxes
image = Image.open(image_file)
resize_image = letterbox_image(image, (416, 416))
image_data = np.array(resize_image, dtype = np.float32)
image_data /= 255.
image_data = np.expand_dims(image_data, axis = 0)
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict = {
input_image : image_data,
input_image_shape : [image.size[1], image.size[0]]
})
print("detect {}/{} found boxes: {}".format(index, len(image_files), len(out_boxes)))
for o, c in enumerate(out_classes):
predicted_class = val_Reader.class_names[c]
box = out_boxes[o]
score = out_scores[o]
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
bbox = [left, top, right, bottom]
class_pred[predicted_class].append({"confidence": str(score), "file_id": file_id, "bbox": bbox})
# 计算每个类别的AP
sum_AP = 0.0
count_true_positives = {}
for class_index, class_name in enumerate(sorted(gt_counter_per_class.keys())):
count_true_positives[class_name] = 0
predictions_data = class_pred[class_name]
# 该类别总共有多少个box
nd = len(predictions_data)
tp = [0] * nd # true positive
fp = [0] * nd # false positive
for idx, prediction in enumerate(predictions_data):
file_id = prediction['file_id']
ground_truth_data = ground_truth[file_id]
bbox_pred = prediction['bbox']
Iou_max = -1
gt_match = None
for obj in ground_truth_data:
if obj['class_name'] == class_name:
bbox_gt = obj['bbox']
bbox_intersect = [max(bbox_pred[0], bbox_gt[0]), max(bbox_gt[1], bbox_pred[1]), min(bbox_gt[2], bbox_pred[2]), min(bbox_gt[3], bbox_pred[3])]
intersect_weight = bbox_intersect[2] - bbox_intersect[0] + 1
intersect_high = bbox_intersect[3] - bbox_intersect[1] + 1
if intersect_high > 0 and intersect_weight > 0:
union_area = (bbox_pred[2] - bbox_pred[0] + 1) * (bbox_pred[3] - bbox_pred[1] + 1) + (bbox_gt[2] - bbox_gt[0] + 1) * (bbox_gt[3] - bbox_gt[1] + 1) - intersect_weight * intersect_high
Iou = intersect_high * intersect_weight / union_area
if Iou > Iou_max:
Iou_max = Iou
gt_match = obj
if Iou_max > min_Iou:
if not gt_match['used'] and gt_match is not None:
tp[idx] = 1
gt_match['used'] = True
else:
fp[idx] = 1
else:
fp[idx] = 1
# 计算精度和召回率
sum_class = 0
for idx, val in enumerate(fp):
fp[idx] += sum_class
sum_class += val
sum_class = 0
for idx, val in enumerate(tp):
tp[idx] += sum_class
sum_class += val
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = tp[idx] / gt_counter_per_class[class_name]
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = tp[idx] / (fp[idx] + tp[idx])
ap, mrec, mprec = voc_ap(rec, prec)
sum_AP += ap
MAP = sum_AP / len(gt_counter_per_class) * 100
print("The Model Eval MAP: {}".format(MAP))
if __name__ == "__main__":
train()
# 计算模型的Map
# eval(config.model_dir, yolo_weights = config.yolo3_weights_path)
|
<gh_stars>0
__author__ = 'artanis'
import os
import sys
import tables
import cv2
import numpy as N
from math import floor, ceil, log
from scipy.ndimage.morphology import distance_transform_edt
from BaseStructuredForests import BaseStructuredForests
from RandomForests import RandomForests
from RobustPCA import robust_pca
from utils import conv_tri, gradient
import pyximport
pyximport.install(build_dir=".pyxbld",
setup_args={"include_dirs": N.get_include()})
from _StructuredForests import predict_core, non_maximum_supr
class StructuredForests(BaseStructuredForests):
def __init__(self, options, model_dir="model/",
rand=N.random.RandomState(123)):
"""
:param options:
rgbd: 0 for RGB, 1 for RGB + depth
shrink: amount to shrink channels
n_orient: number of orientations per gradient scale
grd_smooth_rad: radius for image gradient smoothing
grd_norm_rad: radius for gradient normalization
reg_smooth_rad: radius for reg channel smoothing
ss_smooth_rad: radius for sim channel smoothing
p_size: size of image patches
g_size: size of ground truth patches
n_cell: number of self similarity cells
n_pos: number of positive patches per tree
n_neg: number of negative patches per tree
fraction: fraction of features to use to train each tree
n_tree: number of trees in forest to train
n_class: number of classes (clusters) for binary splits
min_count: minimum number of data points to allow split
min_child: minimum number of data points allowed at child nodes
max_depth: maximum depth of tree
split: options include 'gini', 'entropy' and 'twoing'
discretize: optional function mapping structured to class labels
stride: stride at which to compute edges
sharpen: sharpening amount (can only decrease after training)
n_tree_eval: number of trees to evaluate per location
nms: if true apply non-maximum suppression to edges
:param model_dir: directory for model
A trained model will contain
thrs: threshold corresponding to each feature index
fids: feature indices for each node
cids: indices of children for each node
edge_bnds: begin / end of edge points for each node
edge_pts: edge points for each node
n_seg: number of segmentations for each node
segs: segmentation map for each node
:param rand: random number generator
"""
BaseStructuredForests.__init__(self, options)
assert self.options["g_size"] % 2 == 0
assert self.options["stride"] % self.options["shrink"] == 0
self.model_dir = model_dir
self.data_dir = os.path.join(self.model_dir, "data")
self.tree_dir = os.path.join(self.model_dir, "trees")
self.forest_dir = os.path.join(self.model_dir, "forests")
self.data_prefix = "data_"
self.tree_prefix = "tree_"
self.forest_name = "forest.h5"
self.comp_filt = tables.Filters(complib="zlib", complevel=1)
self.trained = False
try:
self.load_model()
except:
self.model = {}
print("No model file found. Training is required.")
self.rand = rand
def load_model(self):
model_file = os.path.join(self.forest_dir, self.forest_name)
with tables.open_file(model_file, filters=self.comp_filt) as mfile:
self.model = {
"thrs": mfile.get_node("/thrs")[:],
"fids": mfile.get_node("/fids")[:],
"cids": mfile.get_node("/cids")[:],
"edge_bnds": mfile.get_node("/edge_bnds")[:].flatten(),
"edge_pts": mfile.get_node("/edge_pts")[:].flatten(),
"n_seg": mfile.get_node("/n_seg")[:].flatten(),
"segs": mfile.get_node("/segs")[:],
}
self.trained = True
return self.model
def predict(self, src):
stride = self.options["stride"]
sharpen = self.options["sharpen"]
shrink = self.options["shrink"]
p_size = self.options["p_size"]
g_size = self.options["g_size"]
n_cell = self.options["n_cell"]
n_tree_eval = self.options["n_tree_eval"]
nms = self.options["nms"] if "nms" in self.options else False
thrs = self.model["thrs"]
fids = self.model["fids"]
cids = self.model["cids"]
edge_bnds = self.model["edge_bnds"]
edge_pts = self.model["edge_pts"]
n_seg = self.model["n_seg"]
segs = self.model["segs"]
p_rad = p_size // 2
g_rad = g_size // 2
pad = cv2.copyMakeBorder(src, p_rad, p_rad, p_rad, p_rad,
borderType=cv2.BORDER_REFLECT)
reg_ch, ss_ch = self.get_shrunk_channels(pad)
if sharpen != 0:
pad = conv_tri(pad, 1)
dst = predict_core(pad, reg_ch, ss_ch, shrink, p_size, g_size, n_cell,
stride, sharpen, n_tree_eval, thrs, fids, cids,
n_seg, segs, edge_bnds, edge_pts)
if sharpen == 0:
alpha = 2.1 * stride ** 2 / g_size ** 2 / n_tree_eval
elif sharpen == 1:
alpha = 1.8 * stride ** 2 / g_size ** 2 / n_tree_eval
else:
alpha = 1.65 * stride ** 2 / g_size ** 2 / n_tree_eval
dst = N.minimum(dst * alpha, 1.0)
dst = conv_tri(dst, 1)[g_rad: src.shape[0] + g_rad,
g_rad: src.shape[1] + g_rad]
if nms:
dy, dx = N.gradient(conv_tri(dst, 4))
_, dxx = N.gradient(dx)
dyy, dxy = N.gradient(dy)
orientation = N.arctan2(dyy * N.sign(-dxy) + 1e-5, dxx)
orientation[orientation < 0] += N.pi
dst = non_maximum_supr(dst, orientation, 1, 5, 1.02)
return dst
def train(self, input_data):
if self.trained:
print("Model has been trained. Quit training.")
return
self.prepare_data(input_data)
self.train_tree()
self.merge_trees()
self.load_model()
def prepare_data(self, input_data):
"""
Prepare data for model training
"""
n_img = len(input_data)
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
n_tree = self.options["n_tree"]
n_pos = self.options["n_pos"]
n_neg = self.options["n_neg"]
fraction = self.options["fraction"]
p_size = self.options["p_size"]
g_size = self.options["g_size"]
shrink = self.options["shrink"]
p_rad, g_rad = p_size // 2, g_size // 2
n_ftr_dim = N.sum(self.get_ftr_dim())
n_smp_ftr_dim = int(n_ftr_dim * fraction)
rand = self.rand
for i in range(n_tree):
data_file = self.data_prefix + str(i + 1) + ".h5"
data_path = os.path.join(self.data_dir, data_file)
if os.path.exists(data_path):
print("Found Data %d '%s', reusing..." % ((i + 1), data_file))
continue
ftrs = N.zeros((n_pos + n_neg, n_smp_ftr_dim), dtype=N.float32)
lbls = N.zeros((n_pos + n_neg, g_size, g_size), dtype=N.int32)
sids = rand.permutation(n_ftr_dim)[:n_smp_ftr_dim]
total = 0
for j, (img, bnds, segs) in enumerate(input_data):
mask = N.zeros(bnds[0].shape, dtype=bnds[0].dtype)
mask[::shrink, ::shrink] = 1
mask[:p_rad] = mask[-p_rad:] = 0
mask[:, :p_rad] = mask[:, -p_rad:] = 0
n_pos_per_gt = int(ceil(float(n_pos) / n_img / len(bnds)))
n_neg_per_gt = int(ceil(float(n_neg) / n_img / len(bnds)))
for k, boundary in enumerate(bnds):
dis = distance_transform_edt(boundary == 0)
pos_loc = ((dis < g_rad) * mask).nonzero()
pos_loc = zip(pos_loc[0].tolist(), pos_loc[1].tolist())
pos_loc = [pos_loc[item] for item in
rand.permutation(len(pos_loc))[:n_pos_per_gt]]
neg_loc = ((dis >= g_rad) * mask).nonzero()
neg_loc = zip(neg_loc[0].tolist(), neg_loc[1].tolist())
neg_loc = [neg_loc[item] for item in
rand.permutation(len(neg_loc))[:n_neg_per_gt]]
loc = pos_loc + neg_loc
n_loc = min(len(loc), ftrs.shape[0] - total)
loc = [loc[item] for item in rand.permutation(len(loc))[:n_loc]]
if n_loc == 0:
continue
ftr = N.concatenate(self.get_features(img, loc), axis=1)
assert ftr.shape[1] == n_ftr_dim
ftr = ftr[:, sids]
lbl = N.zeros((ftr.shape[0], g_size, g_size), dtype=N.int8)
for m, (x, y) in enumerate(loc):
sub = segs[k][x - g_rad: x + g_rad, y - g_rad: y + g_rad]
sub = N.unique(sub, return_inverse=True)[1]
lbl[m] = sub.reshape((g_size, g_size))
ftrs[total: total + n_loc] = ftr
lbls[total: total + n_loc] = lbl
total += n_loc
sys.stdout.write("Processing Data %d: %d/%d\r" % (i + 1, j + 1, n_img))
sys.stdout.flush()
print()
with tables.open_file(data_path, "w", filters=self.comp_filt) as dfile:
dfile.create_carray("/", "ftrs", obj=ftrs[:total])
dfile.create_carray("/", "lbls", obj=lbls[:total])
dfile.create_carray("/", "sids", obj=sids.astype(N.int32))
print("Saving %d samples to '%s'..." % (total, data_file))
def train_tree(self):
"""
Train a single tree
"""
n_tree = self.options["n_tree"]
if not os.path.exists(self.tree_dir):
os.makedirs(self.tree_dir)
rf = RandomForests(n_class=self.options["n_class"],
min_count=self.options["min_count"],
min_child=self.options["min_child"],
max_depth=self.options["max_depth"],
split=self.options["split"],
discretize=self.options["discretize"],
rand=self.rand)
for i in range(n_tree):
data_file = self.data_prefix + str(i + 1) + ".h5"
data_path = os.path.join(self.data_dir, data_file)
tree_file = self.tree_prefix + str(i + 1) + ".h5"
tree_path = os.path.join(self.tree_dir, tree_file)
if os.path.exists(tree_path):
print("Found Tree %d '%s', reusing..." % ((i + 1), tree_file))
continue
with tables.open_file(data_path, filters=self.comp_filt) as dfile:
ftrs = dfile.get_node("/ftrs")[:]
lbls = dfile.get_node("/lbls")[:]
sids = dfile.get_node("/sids")[:]
forest = rf.train(ftrs, lbls)
thrs, probs, preds, fids, cids, counts, depths = forest[0]
fids[cids > 0] = sids[fids[cids > 0]]
with tables.open_file(tree_path, "w", filters=self.comp_filt) as tfile:
tfile.create_carray("/", "fids", obj=fids)
tfile.create_carray("/", "thrs", obj=thrs)
tfile.create_carray("/", "cids", obj=cids)
tfile.create_carray("/", "probs", obj=probs)
tfile.create_carray("/", "segs", obj=preds)
tfile.create_carray("/", "counts", obj=counts)
tfile.create_carray("/", "depths", obj=depths)
tfile.close()
sys.stdout.write("Processing Tree %d/%d\r" % (i + 1, n_tree))
sys.stdout.flush()
print()
def merge_trees(self):
"""
Accumulate trees and merge into final model
"""
n_tree = self.options["n_tree"]
g_size = self.options["g_size"]
if not os.path.exists(self.forest_dir):
os.makedirs(self.forest_dir)
forest_path = os.path.join(self.forest_dir, self.forest_name)
if os.path.exists(forest_path):
print("Found model, reusing...")
return
trees = []
for i in range(n_tree):
tree_file = self.tree_prefix + str(i + 1) + ".h5"
tree_path = os.path.join(self.tree_dir, tree_file)
with tables.open_file(tree_path, filters=self.comp_filt) as mfile:
tree = {"fids": mfile.get_node("/fids")[:],
"thrs": mfile.get_node("/thrs")[:],
"cids": mfile.get_node("/cids")[:],
"segs": mfile.get_node("/segs")[:]}
trees.append(tree)
max_n_node = 0
for i in range(n_tree):
max_n_node = max(max_n_node, trees[i]["fids"].shape[0])
# merge all fields of all trees
thrs = N.zeros((n_tree, max_n_node), dtype=N.float64)
fids = N.zeros((n_tree, max_n_node), dtype=N.int32)
cids = N.zeros((n_tree, max_n_node), dtype=N.int32)
segs = N.zeros((n_tree, max_n_node, g_size, g_size), dtype=N.int32)
for i in range(n_tree):
tree = trees[i]
n_node = tree["fids"].shape[0]
thrs[i, :n_node] = tree["thrs"].flatten()
fids[i, :n_node] = tree["fids"].flatten()
cids[i, :n_node] = tree["cids"].flatten()
segs[i, :n_node] = tree["segs"]
# remove very small segments (<=5 pixels)
n_seg = N.max(segs.reshape((n_tree, max_n_node, g_size ** 2)), axis=2) + 1
for i in range(n_tree):
for j in range(max_n_node):
m = n_seg[i, j]
if m <= 1:
continue
S = segs[i, j]
remove = False
for k in range(m):
Sk = (S == k)
if N.count_nonzero(Sk) > 5:
continue
S[Sk] = N.median(S[conv_tri(Sk.astype(N.float64), 1) > 0])
remove = True
if remove:
S = N.unique(S, return_inverse=True)[1]
segs[i, j] = S.reshape((g_size, g_size))
n_seg[i, j] = N.max(S) + 1
# store compact representations of sparse binary edge patches
n_bnd = self.options["sharpen"] + 1
edge_pts = []
edge_bnds = N.zeros((n_tree, max_n_node, n_bnd), dtype=N.int32)
for i in range(n_tree):
for j in range(max_n_node):
if cids[i, j] != 0 or n_seg[i, j] <= 1:
continue
E = gradient(segs[i, j].astype(N.float64))[0] > 0.01
E0 = 0
for k in range(n_bnd):
r, c = N.nonzero(E & (~ E0))
edge_pts += [r[m] * g_size + c[m] for m in range(len(r))]
edge_bnds[i, j, k] = len(r)
E0 = E
E = conv_tri(E.astype(N.float64), 1) > 0.01
segs = segs.reshape((-1, segs.shape[-2], segs.shape[-1]))
edge_pts = N.asarray(edge_pts, dtype=N.int32)
edge_bnds = N.hstack(([0], N.cumsum(edge_bnds.flatten()))).astype(N.int32)
with tables.open_file(forest_path, "w", filters=self.comp_filt) as mfile:
mfile.create_carray("/", "thrs", obj=thrs)
mfile.create_carray("/", "fids", obj=fids)
mfile.create_carray("/", "cids", obj=cids)
mfile.create_carray("/", "edge_bnds", obj=edge_bnds)
mfile.create_carray("/", "edge_pts", obj=edge_pts)
mfile.create_carray("/", "n_seg", obj=n_seg)
mfile.create_carray("/", "segs", obj=segs)
mfile.close()
def discretize(segs, n_class, n_sample, rand):
"""
Convert a set of segmentations into a set of labels in [0, n_class - 1]
:param segs: segmentations
:param n_class: number of classes (clusters) for binary splits
:param n_sample: number of samples for clustering structured labels
:param rand: random number generator
"""
w = segs[0].shape[0]
segs = segs.reshape((segs.shape[0], w ** 2))
# compute all possible lookup inds for w x w patches
ids = N.arange(w ** 4, dtype=N.float64)
ids1 = N.floor(ids / w / w)
ids2 = ids - ids1 * w * w
kp = ids2 > ids1
ids1 = ids1[kp]
ids2 = ids2[kp]
# compute n binary codes zs of length nSamples
n_sample = min(n_sample, ids1.shape[0])
kp = rand.permutation(ids1.shape[0])[:n_sample]
n = segs.shape[0]
ids1 = ids1[kp].astype(N.int32)
ids2 = ids2[kp].astype(N.int32)
zs = N.zeros((n, n_sample), dtype=N.float64)
for i in range(n):
zs[i] = (segs[i][ids1] == segs[i][ids2])
zs -= N.mean(zs, axis=0)
zs = zs[:, N.any(zs, axis=0)]
if N.count_nonzero(zs) == 0:
lbls = N.ones(n, dtype=N.int32)
segs = segs[0]
else:
# find most representative segs (closest to mean)
ind = N.argmin(N.sum(zs * zs, axis=1))
segs = segs[ind]
# discretize zs by discretizing pca dimensions
d = min(5, n_sample, int(floor(log(n_class, 2))))
zs = robust_pca(zs, d, rand=rand)[0]
lbls = N.zeros(n, dtype=N.int32)
for i in range(d):
lbls += (zs[:, i] < 0).astype(N.int32) * 2 ** i
lbls = N.unique(lbls, return_inverse=True)[1].astype(N.int32)
return lbls, segs.reshape((-1, w, w))
def bsds500_train(input_root):
import scipy.io as SIO
from skimage import img_as_float
from skimage.io import imread
dataset_dir = os.path.join(input_root, "BSDS500", "data")
image_dir = os.path.join(dataset_dir, "images", "train")
label_dir = os.path.join(dataset_dir, "groundTruth", "train")
data = []
for file_name in os.listdir(label_dir):
gts = SIO.loadmat(os.path.join(label_dir, file_name))
gts = gts["groundTruth"].flatten()
bnds = [gt["Boundaries"][0, 0] for gt in gts]
segs = [gt["Segmentation"][0, 0] for gt in gts]
img = imread(os.path.join(image_dir, file_name[:-3] + "jpg"))
img = img_as_float(img)
data.append((img, bnds, segs))
return data
def bsds500_test(model, input_root, output_root):
from skimage import img_as_float, img_as_ubyte
from skimage.io import imread, imsave
if not os.path.exists(output_root):
os.makedirs(output_root)
image_dir = os.path.join(input_root, "BSDS500", "data", "images", "test")
file_names = list(filter(lambda name: name[-3:] == "jpg", os.listdir(image_dir)))
n_image = len(file_names)
print('%d images...' % (n_image))
for i, file_name in enumerate(file_names):
print(file_name)
print(os.path.join(image_dir, file_name))
img = img_as_float(imread(os.path.join(image_dir, file_name)))
edge = img_as_ubyte(model.predict(img))
imsave(os.path.join(output_root, file_name[:-3] + "png"), edge)
sys.stdout.write("Processing Image %d/%d\r" % (i + 1, n_image))
sys.stdout.flush()
print()
if __name__ == "__main__":
rand = N.random.RandomState(1)
options = {
"rgbd": 0,
"shrink": 2,
"n_orient": 4,
"grd_smooth_rad": 0,
"grd_norm_rad": 4,
"reg_smooth_rad": 2,
"ss_smooth_rad": 8,
"p_size": 32,
"g_size": 16,
"n_cell": 5,
"n_pos": 10000,
"n_neg": 10000,
"fraction": 0.25,
"n_tree": 8,
"n_class": 2,
"min_count": 1,
"min_child": 8,
"max_depth": 64,
"split": "gini",
"discretize": lambda lbls, n_class:
discretize(lbls, n_class, n_sample=256, rand=rand),
"stride": 2,
"sharpen": 2,
"n_tree_eval": 4,
"nms": True,
}
model = StructuredForests(options, rand=rand)
model.train(bsds500_train("toy"))
bsds500_test(model, "toy", "edges")
|
<gh_stars>0
import re
from requests import Session
from xml.etree import ElementTree as ET
class SfdcSession(Session):
_DEFAULT_API_VERSION = "37.0"
_LOGIN_URL = "https://{instance}.salesforce.com"
_SOAP_API_BASE_URI = "/services/Soap/c/{version}"
_XML_NAMESPACES = {
'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
'mt': 'http://soap.sforce.com/2006/04/metadata',
'd': 'urn:enterprise.soap.sforce.com'
}
_LOGIN_TMPL = \
"""<env:Envelope xmlns:xsd='http://www.w3.org/2001/XMLSchema'
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xmlns:env='http://schemas.xmlsoap.org/soap/envelope/'>
<env:Body>
<sf:login xmlns:sf='urn:enterprise.soap.sforce.com'>
<sf:username>{username}</sf:username>
<sf:password>{password}</sf:password>
</sf:login>
</env:Body>
</env:Envelope>"""
def __init__(
self, username=None, password=<PASSWORD>, token=None,
is_sandbox=False, api_version=_DEFAULT_API_VERSION,
**kwargs):
super(SfdcSession, self).__init__()
self._username = username
self._password = password
self._token = token
self._is_sandbox = is_sandbox
self._api_version = api_version
self._session_id = kwargs.get("session_id", None)
self._instance = kwargs.get("instance", None)
def login(self):
url = self.construct_url(self.get_soap_api_uri())
headers = {'Content-Type': 'text/xml', 'SOAPAction': 'login'}
password = <PASSWORD>
if self._token:
password += self._token
data = SfdcSession._LOGIN_TMPL.format(**{'username': self._username, 'password': password})
r = self.post(url, headers=headers, data=data)
root = ET.fromstring(r.text.encode('utf8', 'ignore'))
if root.find('soapenv:Body/soapenv:Fault', SfdcSession._XML_NAMESPACES):
raise Exception("Could not log in. Code: %s Message: %s" % (
root.find('soapenv:Body/soapenv:Fault/faultcode', SfdcSession._XML_NAMESPACES).text,
root.find('soapenv:Body/soapenv:Fault/faultstring', SfdcSession._XML_NAMESPACES).text))
self._session_id = root.find('soapenv:Body/d:loginResponse/d:result/d:sessionId', SfdcSession._XML_NAMESPACES).text
server_url = root.find('soapenv:Body/d:loginResponse/d:result/d:serverUrl', SfdcSession._XML_NAMESPACES).text
self._instance = re.search("""https://(.*).salesforce.com/.*""", server_url).group(1)
def get_server_url(self):
if not self._instance:
return SfdcSession._LOGIN_URL.format(**{'instance': 'test' if self._is_sandbox else 'login'})
return SfdcSession._LOGIN_URL.format(**{'instance': self._instance})
def get_soap_api_uri(self):
return SfdcSession._SOAP_API_BASE_URI.format(**{'version': self._api_version})
def construct_url(self, uri):
return "%s%s" % (self.get_server_url(), uri)
def get_api_version(self):
return self._api_version
def get_session_id(self):
return self._session_id
def is_connected(self):
return True if self._instance else False
|
#!/usr/bin/python
#
# File: qrmaker.py
# Author: <NAME>
# Email: <EMAIL>
# Date: 29 Mar 2016
#----------------------------------------------------------------------------
# Install notes:
# > sudo apt-get install python-dev
# > sudo pip install reportlab
#----------------------------------------------------------------------------
# Usage: qrmaker.py [-h] -i IMAGE [-s SMALLTEXT] [-b BIGTEXT] [-q QRCODE]
# [-f CSVFILE]
# outFile
#
# positional arguments:
# outFile Name of the PDF output file
#
# optional arguments:
# -h, --help show this help message and exit
# -i IMAGE, --image IMAGE
# Background image
#
# Args for single page PDF document:
# -s SMALLTEXT, --smallText SMALLTEXT
# Text to be displayed in the small text box
# -b BIGTEXT, --bigText BIGTEXT
# Text to be displayed in the large text box
# -q QRCODE, --qrCode QRCODE
# URL or text for QR code
#
# Args for multiple page PDF document:
# -f CSVFILE, --csvFile CSVFILE
# A CSV file containing lines of
# qrText,bigText,smallText
#----------------------------------------------------------------------------
"""
Generate QR code image, composite into another image, then convert to a PDF document.
Use either the -f option, or the -q/-b/-s options. If you use -f, -q/-b/-s are ignored.
The format of the lines in the CSV input file is:
qrText,bigText,smallText
"""
from __future__ import print_function, division
import sys
import argparse
from PIL import Image
import qrcode
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import inch
from reportlab.lib.utils import ImageReader
from reportlab.platypus import Paragraph, Frame
from reportlab.lib.styles import getSampleStyleSheet
import cStringIO
import csv
#----------------------------------------------------------------------------
class Rect(object):
""" A rectangle object defined by (xmin, ymin), (xmax, ymax) using integer coords. """
def __init__(self, xmin=0, ymin=0, xmax=0, ymax=0):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.rearrange()
def rearrange(self):
""" Sort the mins and the maxes """
if (self.xmin > self.xmax):
self.xmin,self.xmax = self.xmax,self.xmin
if (self.ymin > self.ymax):
self.ymin,self.ymax = self.ymax,self.ymin
def width(self):
""" Return the rect x size """
return self.xmax - self.xmin
def height(self):
""" Return the rect y size """
return self.ymax - self.ymin
def box(self):
return (self.xmin, self.ymin, self.xmax, self.ymax)
def scale(self, factor):
""" Scale all coordinate values by multiplying by factor """
self.xmin *= factor
self.ymin *= factor
self.xmax *= factor
self.ymax *= factor
#----------------------------------------------------------------------------
class QrMakerApp(object):
QR_BORDER_PIXELS = 4
def __init__(self):
self.bgImage = None
self.qrBox = Rect(727, 1326, 1033, 1020)
self.smallTextBox = Rect(29, 1543-1513, 284, 1543-1461)
self.bigTextBox = Rect(435, 1543-1459, 1033, 1543-1367)
self.pdf = None
self.pageData = []
def parseCmdLine(self):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-i", "--image", required=True, help="Background image")
group1 = parser.add_argument_group("Args for single page PDF document")
group1.add_argument("-s", "--smallText", default="", help="Text to be displayed in the small text box")
group1.add_argument("-b", "--bigText", default="", help="Text to be displayed in the large text box")
group1.add_argument("-q", "--qrCode", help="URL or text for QR code")
group2 = parser.add_argument_group("Args for multiple page PDF document")
group2.add_argument("-f", "--csvFile", help="A CSV file containing lines of qrText,bigText,smallText")
parser.add_argument("outFile", help="Name of the PDF output file")
self.args = parser.parse_args()
def readCsvFile(self, csvFileName):
""" Read a CSV file containing lines with 3 fields each.
The fields are: qrText,bigText,smallText
Blank lines are ignored.
Lines with proper content are added to the pageData list.
"""
with open(csvFileName, "rb") as f:
rdr = csv.reader(f)
lineNum = 0
for line in rdr:
lineNum += 1
if len(line): # not blank
if len(line) != 3:
print("Error in line {}: Wrong number of fields".format(lineNum), file=sys.stderr)
else:
self.pageData.append(line)
def makeQrCode(self, qrText):
""" Generate a QR code image, and return it as a pygame image object """
qr = qrcode.QRCode(
version=None,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=20,
border=self.QR_BORDER_PIXELS,
)
qr.add_data(qrText)
qr.make(fit=True)
# Return a PIL image object
img = qr.make_image().convert("RGB")
return img
def addSmallText(self, canvas, text, x, y):
text = "<para align=CENTER><b>{}</b></para>".format(text)
stylesheet=getSampleStyleSheet()
para = Paragraph(text, stylesheet["Normal"])
frame = Frame(x + self.smallTextBox.xmin * inch/154, y + self.smallTextBox.ymin * inch/154,
self.smallTextBox.width()/154.0 * inch, self.smallTextBox.height()/154.0 * inch,
showBoundary=0)
w,h = para.wrap(self.smallTextBox.width(), self.smallTextBox.height())
frame.addFromList([para], canvas)
def addBigText(self, canvas, text, x, y):
text = '<para align="center" spaceBefore="-100"><font size=32><b>{}</b></font></para>'.format(text)
stylesheet=getSampleStyleSheet()
para = Paragraph(text, stylesheet["Normal"])
frame = Frame(x + self.bigTextBox.xmin * inch/154, y + self.bigTextBox.ymin * inch/154,
self.bigTextBox.width()/154.0 * inch, self.bigTextBox.height()/154.0 * inch,
showBoundary=0)
w,h = para.wrap(self.bigTextBox.width(), self.bigTextBox.height())
frame.addFromList([para], canvas)
def loadBgImage(self, path):
""" Load bgImage from a file """
self.bgImage = Image.open(path)
def addQrCode(self, qrText):
""" Create a qrcode image and composite it into the bgImage """
qr = self.makeQrCode(qrText)
qr = qr.resize((self.qrBox.width(), self.qrBox.height()))
self.bgImage.paste(qr, self.qrBox.box())
def makePdfPage(self, bgImgFile, qrText, smText, bigText):
# Load the background image from a file
self.loadBgImage(bgImgFile)
# Write the qrcode into the background image
self.addQrCode(qrText)
# Copy the background image to a memory buffer
# that can be read as an in-memory file
imgdata = cStringIO.StringIO()
self.bgImage.save(imgdata, format='png')
imgdata.seek(0) # rewind the data
# Define the image size on the page
imWidth = 7 * inch
imHeight = 10 * inch
# Put the background image into the PDF page
pgWidth,pgHeight = letter
layoutW,layoutH = self.pdf.drawImage(ImageReader(imgdata), (pgWidth-imWidth)/2, inch/2, width=imWidth, height=imHeight, preserveAspectRatio=True)
# Add the text in the boxes on the page
self.addSmallText(self.pdf, smText, (pgWidth-imWidth)/2, inch/2)
self.addBigText(self.pdf, bigText, (pgWidth-imWidth)/2, inch/2)
# Finalize the page
self.pdf.showPage() # Create a page break
def createPdfDoc(self, path):
""" Create a PDF document object that will be written to path """
self.pdf = Canvas(path, pagesize=letter)
def finalizePdfDoc(self):
""" Write the doc to a file and close it"""
self.pdf.save() # save to file and close
def run(self):
self.parseCmdLine()
if self.args.csvFile:
# Read file into self.pageData
self.readCsvFile(self.args.csvFile)
else:
# Put args into self.pageData
self.pageData.append([self.args.qrCode, self.args.bigText, self.args.smallText])
# Generate the PDF document
print("making pdf")
self.createPdfDoc(self.args.outFile)
for qrText,bigText,smallText in self.pageData:
print("Page: {}, {}, {}".format(qrText, bigText, smallText))
self.makePdfPage(self.args.image, qrText, smallText, bigText)
self.finalizePdfDoc()
print("done making pdf")
if __name__ == '__main__':
app = QrMakerApp()
app.run()
sys.exit(0)
|
from smartcard.System import readers
r = readers()[0]
c = r.createConnection()
c.connect()
def hexy(l):
# return ':'.join([f'{x:x}' for x in l])
return ' '.join([f'{x:02X}' for x in l])
print(f"ATR = '{hexy(c.getATR())}'")
# CLA, INS, P1, P2, Lc
NIST_RID = [0xA0, 0x00, 0x00, 0x03, 0x08]
NIST_PIX_PIV_APP = [0x00, 0x00, 0x10, 0x00]
NIST_PIX_PIV_VERSION = [0x01, 0x00]
PIV = NIST_RID + NIST_PIX_PIV_APP + NIST_PIX_PIV_VERSION
SELECT = [0x00, 0xA4, 0x04, 0x00, len(PIV)]
# [0, 164, 4, 0, 11, 160, 0, 0, 3, 8, 0, 0, 16, 0, 1, 0]
# resp = c.transmit(SELECT + PIV + [0])
LONG_ASS = [
0x10,
0xDB,
0x3F,
0xFF,
0xFF,
0x5C,
0x03,
0x5F,
0xC1,
0x05,
0x53,
0x82,
0x01,
0x5B,
0x70,
0x82,
0x01,
0x52,
0x30,
0x82,
0x01,
0x4E,
0x30,
0x81,
0xF5,
0xA0,
0x03,
0x02,
0x01,
0x02,
0x02,
0x11,
0x00,
0x8B,
0xAB,
0x31,
0xCF,
0x3E,
0xB9,
0xF5,
0x6A,
0x6F,
0x38,
0xF0,
0x5A,
0x4D,
0x7F,
0x55,
0x62,
0x30,
0x0A,
0x06,
0x08,
0x2A,
0x86,
0x48,
0xCE,
0x3D,
0x04,
0x03,
0x02,
0x30,
0x2A,
0x31,
0x16,
0x30,
0x14,
0x06,
0x03,
0x55,
0x04,
0x0A,
0x13,
0x0D,
0x79,
0x75,
0x62,
0x69,
0x6B,
0x65,
0x79,
0x2D,
0x61,
0x67,
0x65,
0x6E,
0x74,
0x31,
0x10,
0x30,
0x0E,
0x06,
0x03,
0x55,
0x04,
0x0B,
0x13,
0x07,
0x28,
0x64,
0x65,
0x76,
0x65,
0x6C,
0x29,
0x30,
0x20,
0x17,
0x0D,
0x32,
0x30,
0x30,
0x35,
0x31,
0x36,
0x30,
0x31,
0x31,
0x37,
0x32,
0x36,
0x5A,
0x18,
0x0F,
0x32,
0x30,
0x36,
0x32,
0x30,
0x35,
0x31,
0x36,
0x30,
0x32,
0x31,
0x37,
0x32,
0x36,
0x5A,
0x30,
0x12,
0x31,
0x10,
0x30,
0x0E,
0x06,
0x03,
0x55,
0x04,
0x03,
0x13,
0x07,
0x53,
0x53,
0x48,
0x20,
0x6B,
0x65,
0x79,
0x30,
0x59,
0x30,
0x13,
0x06,
0x07,
0x2A,
0x86,
0x48,
0xCE,
0x3D,
0x02,
0x01,
0x06,
0x08,
0x2A,
0x86,
0x48,
0xCE,
0x3D,
0x03,
0x01,
0x07,
0x03,
0x42,
0x00,
0x04,
0x4F,
0x98,
0x63,
0x2F,
0x53,
0xBD,
0xAB,
0xEE,
0xBF,
0x69,
0x73,
0x3A,
0x84,
0x0F,
0xFD,
0x9F,
0x9D,
0xB3,
0xCE,
0x5C,
0x1E,
0x1B,
0x84,
0x06,
0x63,
0x32,
0xFF,
0x9C,
0x44,
0x0B,
0xCE,
0x56,
0x13,
0x94,
0x00,
0x98,
0xE3,
0x46,
0xC2,
0xBC,
0x3D,
0xE6,
0x5E,
0xF2,
0x81,
0x4B,
0xBC,
0xEA,
0x2B,
0x9D,
0x47,
0xCC,
0x9B,
0x5E,
0xBE,
0x1E,
0x2C,
0x69,
0x1D,
0xC3,
0x53,
0x4C,
0x89,
0x14,
0xA3,
0x12,
0x30,
0x10,
0x30,
0x0E,
0x06,
0x03,
0x55,
0x1D,
]
# call e.g. with 0x7E for discovery object,
# 0x7F61 for biometric information template interindustry tag,
# or 0x5FC107 for card capability container, etc.
# except these first two, all tags have length 3,
# and even are [0x5F, 0xC1, ?].
def get_data(object_tag):
if object_tag == 0x7E:
tag = [0x7E]
elif object_tag == 0x7F61:
tag = list(object_tag.to_bytes(2, byteorder='big'))
else:
tag = list(object_tag.to_bytes(3, byteorder='big'))
GET_DATA = [0x00, 0xCB, 0x3F, 0xFF]
apdu = GET_DATA + [len(tag) + 2] + [0x5C, len(tag)] + tag + [0]
print(f"apdu = {apdu}")
return c.transmit(apdu)
|
<gh_stars>1-10
#!/usr/bin/env python3
#
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import argparse
import functools
import json
import os
import re
import pathlib
from pathlib import Path
import shutil
import subprocess
from tempfile import TemporaryDirectory
import sys
sys.path.append(str(Path(__file__).parent.parent.parent / 'Scripts'))
from builders.vcpkgbuilder import VcpkgBuilder
import builders.monkeypatch_tempdir_cleanup
class NvClothBuilder(object):
def __init__(self, workingDir: pathlib.Path, basePackageSystemDir: pathlib.Path, targetPlatform: str):
self._workingDir = workingDir
self._packageSystemDir = basePackageSystemDir
self._platform = targetPlatform
self._env = dict(os.environ)
self._env.update(
GW_DEPS_ROOT=str(workingDir),
)
self.check_call = functools.partial(subprocess.check_call,
cwd=self.workingDir,
env=self.env
)
@property
def workingDir(self):
return self._workingDir
@property
def packageSystemDir(self):
return self._packageSystemDir
@property
def platform(self):
return self._platform
@property
def env(self):
return self._env
def clone(self, lockToCommit: str):
if not (self.workingDir / '.git').exists():
self.check_call(
['git', 'init',],
)
self.check_call(
['git', 'remote', 'add', 'origin', 'https://github.com/NVIDIAGameWorks/NvCloth.git',],
)
self.check_call(
['git', 'fetch', 'origin', '--depth=1', 'pull/58/head:pr-58',],
)
self.check_call(
['git', 'checkout', 'pr-58',],
)
# Remove /LTCG and /GL flags as it's causing compile warnings
if self.platform == 'windows':
windows_cmake_file = self.workingDir / 'NvCloth/compiler/cmake/windows/CMakeLists.txt'
f = open(windows_cmake_file, 'r')
content = f.read()
f.close()
content = re.sub('/LTCG', r'', content, flags = re.M)
content = re.sub('/GL', r'', content, flags = re.M)
f = open(windows_cmake_file, 'w')
f.write(content)
f.close()
# Remove warnings as errors for iOS
if self.platform == 'ios':
ios_cmake_file = self.workingDir / 'NvCloth/compiler/cmake/ios/CMakeLists.txt'
f = open(ios_cmake_file, 'r')
content = f.read()
f.close()
content = re.sub('-Werror', r'', content, flags = re.M)
f = open(ios_cmake_file, 'w')
f.write(content)
f.close()
def build(self):
cmake_scripts_path = os.path.abspath(os.path.join(self.packageSystemDir, '../Scripts/cmake'))
nvcloth_dir = self.workingDir / 'NvCloth'
ly_3rdparty_path = os.getenv('LY_3RDPARTY_PATH')
folder_names = {
#system-name cmake generation, cmake build
'mac' : ([
'-G', 'Xcode',
'-DTARGET_BUILD_PLATFORM=mac',
'-DNV_CLOTH_ENABLE_CUDA=0', '-DUSE_CUDA=0',
'-DPX_GENERATE_GPU_PROJECTS=0',
'-DPX_STATIC_LIBRARIES=1',
f'-DPX_OUTPUT_DLL_DIR={nvcloth_dir}/bin/osx64-cmake',
f'-DPX_OUTPUT_LIB_DIR={nvcloth_dir}/lib/osx64-cmake',
f'-DPX_OUTPUT_EXE_DIR={nvcloth_dir}/bin/osx64-cmake'
], []),
'ios' : ([
'-G', 'Xcode',
f'-DCMAKE_TOOLCHAIN_FILE={cmake_scripts_path}/Platform/iOS/Toolchain_ios.cmake',
'-DPACKAGE_PLATFORM=ios',
'-DTARGET_BUILD_PLATFORM=ios',
'-DCMAKE_XCODE_ATTRIBUTE_IPHONEOS_DEPLOYMENT_TARGET="10.0"',
'-DNV_CLOTH_ENABLE_CUDA=0', '-DUSE_CUDA=0',
'-DPX_GENERATE_GPU_PROJECTS=0',
'-DPX_STATIC_LIBRARIES=1',
f'-DPX_OUTPUT_DLL_DIR={nvcloth_dir}/bin/ios-cmake',
f'-DPX_OUTPUT_LIB_DIR={nvcloth_dir}/lib/ios-cmake',
f'-DPX_OUTPUT_EXE_DIR={nvcloth_dir}/bin/ios-cmake'
], [
'--',
'-destination generic/platform=iOS'
]),
'linux' : ([
'-G', 'Ninja Multi-Config',
'-DCMAKE_C_COMPILER=clang-6.0',
'-DCMAKE_CXX_COMPILER=clang++-6.0',
'-DTARGET_BUILD_PLATFORM=linux',
'-DNV_CLOTH_ENABLE_CUDA=0',
'-DPX_GENERATE_GPU_PROJECTS=0',
'-DPX_STATIC_LIBRARIES=1',
f'-DPX_OUTPUT_DLL_DIR={nvcloth_dir}/bin/linux64-cmake',
f'-DPX_OUTPUT_LIB_DIR={nvcloth_dir}/lib/linux64-cmake',
f'-DPX_OUTPUT_EXE_DIR={nvcloth_dir}/bin/linux64-cmake'
], []),
'windows' : ([
'-G', 'Visual Studio 15 2017',
'-Ax64',
'-DTARGET_BUILD_PLATFORM=windows',
'-DNV_CLOTH_ENABLE_DX11=0',
'-DNV_CLOTH_ENABLE_CUDA=0',
'-DPX_GENERATE_GPU_PROJECTS=0',
'-DSTATIC_WINCRT=0',
'-DPX_STATIC_LIBRARIES=1',
f'-DPX_OUTPUT_DLL_DIR={nvcloth_dir}/bin/vc141win64-cmake',
f'-DPX_OUTPUT_LIB_DIR={nvcloth_dir}/lib/vc141win64-cmake',
f'-DPX_OUTPUT_EXE_DIR={nvcloth_dir}/bin/vc141win64-cmake'
], []),
'android' : ([
'-G', 'Ninja Multi-Config',
f'-DCMAKE_TOOLCHAIN_FILE={cmake_scripts_path}/Platform/Android/Toolchain_android.cmake',
'-DANDROID_ABI=arm64-v8a',
'-DANDROID_ARM_MODE=arm',
'-DANDROID_ARM_NEON=TRUE',
'-DANDROID_NATIVE_API_LEVEL=21',
f'-DLY_NDK_DIR={ly_3rdparty_path}/android-ndk/r21d',
'-DPACKAGE_PLATFORM=android',
'-DPX_STATIC_LIBRARIES=1',
f'-DPX_OUTPUT_DLL_DIR={nvcloth_dir}/bin/android-arm64-v8a-cmake',
f'-DPX_OUTPUT_LIB_DIR={nvcloth_dir}/lib/android-arm64-v8a-cmake',
f'-DPX_OUTPUT_EXE_DIR={nvcloth_dir}/bin/android-arm64-v8a-cmake'
], []) # Android needs to have ninja in the path
}
# intentionally generate a keyerror if its not a good platform:
cmake_generation, cmake_build = folder_names[self.platform]
build_dir = os.path.join(nvcloth_dir, 'build', self.platform)
os.makedirs(build_dir, exist_ok=True)
# Generate
cmake_generate_call =['cmake', f'{nvcloth_dir}/compiler/cmake/{self.platform}', f'-B{build_dir}']
if cmake_generation:
cmake_generate_call += cmake_generation
print(cmake_generate_call)
self.check_call(cmake_generate_call)
# Build
for config in ('debug', 'profile', 'release'):
cmake_build_call =['cmake', '--build', build_dir, '--config', config]
if cmake_build:
cmake_build_call += cmake_build
print(cmake_build_call)
self.check_call(cmake_build_call)
def copyBuildOutputTo(self, packageDir: pathlib.Path):
if packageDir.exists():
shutil.rmtree(packageDir)
for dirname in ('NvCloth/lib', 'NvCloth/include', 'NvCloth/extensions/include', 'PxShared/include'):
shutil.copytree(
src=self.workingDir / dirname,
dst=packageDir / dirname,
symlinks=True,
)
shutil.copy2(
src=self.workingDir / 'README.md',
dst=packageDir / 'README.md',
)
shutil.copy2(
src=self.workingDir / 'NvCloth/license.txt',
dst=packageDir / 'NvCloth/license.txt',
)
shutil.copy2(
src=self.workingDir / 'PxShared/license.txt',
dst=packageDir / 'PxShared/license.txt',
)
def writePackageInfoFile(self, packageDir: pathlib.Path, settings: dict):
with (packageDir / 'PackageInfo.json').open('w') as fh:
json.dump(settings, fh, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--platform-name',
dest='platformName',
choices=['windows', 'linux', 'android', 'mac', 'ios'],
default=VcpkgBuilder.defaultPackagePlatformName(),
)
args = parser.parse_args()
packageSystemDir = Path(__file__).resolve().parents[1]
packageSourceDir = packageSystemDir / 'NvCloth'
packageRoot = packageSystemDir / f'NvCloth-{args.platformName}'
cmakeFindFile = packageSourceDir / f'FindNvCloth_{args.platformName}.cmake'
if not cmakeFindFile.exists():
cmakeFindFile = packageSourceDir / 'FindNvCloth.cmake'
with TemporaryDirectory() as tempdir:
tempdir = Path(tempdir)
builder = NvClothBuilder(workingDir=tempdir, basePackageSystemDir=packageSystemDir, targetPlatform=args.platformName)
builder.clone('8e100cca5888d09f40f4721cc433f284b1841e65')
builder.build()
builder.copyBuildOutputTo(packageRoot/'NvCloth')
# Version v1.1.6-4-gd243404-pr58 describes commit 8e100cc,
# which is 4 commits above 1.1.6 release (commit d243404),
# plus pull request 58 applied on top.
builder.writePackageInfoFile(
packageRoot,
settings={
'PackageName': f'NvCloth-v1.1.6-4-gd243404-pr58-rev1-{args.platformName}',
'URL': 'https://github.com/NVIDIAGameWorks/NvCloth.git',
'License': 'custom',
'LicenseFile': 'NvCloth/NvCloth/license.txt',
},
)
shutil.copy2(
src=cmakeFindFile,
dst=packageRoot / 'FindNvCloth.cmake'
)
if __name__ == '__main__':
main()
|
<reponame>poloclub/RECAST
from pathlib import Path
from flask import Flask, request, jsonify
from flask_cors import CORS
import string
import collections
import re
from nltk.corpus import stopwords
import nltk
import os
from typing import Tuple, List
from functools import partial
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.utils.rnn import pad_sequence
from transformers import (
AutoModelWithLMHead,
AutoTokenizer,
BertTokenizer,
BertModel,
AdamW,
get_linear_schedule_with_warmup,
BertPreTrainedModel,
BertForMaskedLM
)
from model import BertClassifier
from gensim.models.word2vec import Word2Vec
import gensim.downloader as api
from utils import *
vectors = api.load("glove-twitter-25")
bert_model_name = 'bert-base-cased'
device = torch.device('cpu')
if torch.cuda.is_available(): device = torch.device('cuda:0')
tokenizer = BertTokenizer.from_pretrained(bert_model_name)
model = BertClassifier(BertModel.from_pretrained(
bert_model_name, output_attentions=True), 6).to(device)
model.load_state_dict(torch.load("finetuned_pytorch_model.bin"))
tokenizer_LM = AutoTokenizer.from_pretrained("distilbert-base-uncased")
model_LM = AutoModelWithLMHead.from_pretrained("distilbert-base-uncased")
model_LM.to('cuda')
model_LM.eval()
app = Flask(__name__)
CORS(app)
history = {}
token_types = {}
overall_memo = {}
text_memo = {}
final_submissions = {}
@app.route("/set_token_type", methods=['POST'])
def set_token_type():
token_types[request.json["uuid"]] = request.json["type"]
return jsonify({"saved": True})
@app.route("/token_type", methods=['GET'])
def get_token_type():
return jsonify(token_types)
@app.route("/submit", methods=['POST'])
def add_user_submission():
if request.json["uuid"] not in final_submissions:
final_submissions[request.json["uuid"]] = []
final_submissions[request.json["uuid"]].append(request.json["submission"])
return jsonify({"saved": True})
@app.route("/submit", methods=['GET'])
def get_user_submission():
return jsonify(final_submissions)
@app.route("/toxicity", methods=['POST'])
def selected_toxicity():
if "uuid" not in request.json:
return None
# store request
if request.json["uuid"] not in history:
history[request.json["uuid"]] = {
"requests": [],
"responses": []
}
history[request.json["uuid"]]["requests"].append(request.json)
selected_alts = collections.OrderedDict(sorted(request.json["alternatives"].items()))
swap_idxs = list(selected_alts.keys())
if len(swap_idxs) > 0:
# trim the alternatives
all_alts = [selected_alts[key][:3] for key in selected_alts]
possible_complete_alts = cartesian_product_simple_transpose(np.array(all_alts))
split_words = request.json['text'].split(" ")
options = []
raw_options = []
tokens = torch.LongTensor(tokenizer.encode(" ".join(split_words), add_special_tokens=True)).to(device)
options.append(tokens)
for candidate in possible_complete_alts:
for idx, word in enumerate(candidate):
split_words[int(swap_idxs[idx])] = word
raw_options.append(" ".join(split_words))
tokens = torch.LongTensor(tokenizer.encode(" ".join(split_words), add_special_tokens=True)).to(device)
options.append(tokens)
x = pad_sequence(options, batch_first=True,
padding_value=tokenizer.pad_token_id).to(device)
mask = (x != tokenizer.pad_token_id).float().to(device)
with torch.no_grad():
_, outputs, attns, last_layer = model(x, attention_mask=mask)
model_outs = outputs[:, 0].squeeze().tolist()
original_toxicity = model_outs[:1]
candidate_replacements = model_outs[1:]
sorted_candidates = np.argsort(candidate_replacements)
updated_toxicity_replacements = []
original_toxicity = model_outs[:1]
for i in sorted_candidates[:5]:
updated_toxicity_replacements.append(
[candidate_replacements[i], list(possible_complete_alts[i])])
return_obj = {
"originalToxicity": original_toxicity,
"alternatives": updated_toxicity_replacements
}
history[request.json["uuid"]]["responses"].append(return_obj)
return jsonify(return_obj)
sigmoid_outputs, orig_attentions, orig_tokens = get_probs_and_attention(request.json['text'], model, tokenizer)
return jsonify({
"originalToxicity": [sigmoid_outputs[0]["value"]],
"alternatives": []
})
@app.route("/history", methods=['GET'])
def get_history():
return jsonify(history)
@app.route("/", methods=['POST'])
def get_toxic_labels():
if "uuid" not in request.json:
return None
# store request
if request.json["uuid"] not in history:
history[request.json["uuid"]] = {
"requests": [],
"responses": []
}
history[request.json["uuid"]]["requests"].append(request.json)
if request.json['text'] in overall_memo:
history[request.json["uuid"]]["responses"].append(
overall_memo[request.json['text']])
return jsonify(overall_memo[request.json['text']])
sigmoid_outputs, orig_attentions, orig_tokens = get_probs_and_attention(
request.json['text'], model, tokenizer)
def generate_alternatives(index, tokens, original_score, orig_attentions):
selection_regexed = re.findall(r"[\w']+|[.,!?;]", tokens[index])
selection = [selection_regexed[0].lower()] + ["".join(selection_regexed[1:])]
if (selection[0] not in vectors.vocab):
return None
similar_vectors = get_synonyms(selection[0].lower())
similar_lm_words = predict_masked_tokens(tokens.copy(), index, tokenizer_LM, model_LM)
if similar_vectors == None:
similar_vectors = {elem[0]: elem[1] for elem in vectors.most_similar(selection[0].lower(), topn=10)}
potential_options = {}
for k in similar_vectors:
potential_options[k] = similar_vectors[k]
for k in similar_lm_words:
stripped = re.sub(r'\W+', '', k)
if '#' in stripped or len(stripped) == 0 or stripped.lower() in set(stopwords.words('english')):
continue
if k in potential_options:
potential_options[k] += potential_options[k] + similar_lm_words[k]
else: potential_options[k] = similar_lm_words[k]
sorted_options = {k: v for k, v in sorted(potential_options.items(), key=lambda item: item[1])}
local_options = []
for word in list(sorted_options.keys())[:20] + [""]:
if '#' in word: continue
old = tokens[index]
tokens[index] = word
alt_sigmoid_outputs, alt_attentions, alt_tokens = get_probs_and_attention(" ".join(tokens), model, tokenizer)
old_word_sigmoid_out, _, _ = get_probs_and_attention(old, model, tokenizer)
old_word_score = old_word_sigmoid_out[0]["value"]
word_sigmoid_out, _, _ = get_probs_and_attention(word, model, tokenizer)
word_score = word_sigmoid_out[0]["value"]
if word_score < .4 and old_word_score >= .4:
local_options.append(
[word + selection[1], word_score, alt_attentions])
tokens[index] = old
if len(local_options) == 0:
return None
return sorted(local_options, key=lambda x: x[1], reverse=False)
options = {}
orig_tokens_copy = orig_tokens.copy()
if sigmoid_outputs[0]["value"] > .25:
for idx, attention in enumerate(orig_attentions):
if orig_tokens[idx].lower() in set(stopwords.words('english')):
continue
if attention > .25:
alternatives = generate_alternatives(
idx, orig_tokens_copy, sigmoid_outputs[0]["value"], orig_attentions)
if alternatives is not None:
options[idx] = alternatives
response = {
"mainInputResults": {
"attentionOutput": orig_attentions,
"sigmoidOutput": sigmoid_outputs
},
"alternatives": options,
}
overall_memo[request.json['text']] = response
history[request.json["uuid"]]["responses"].append(response)
return jsonify(response)
if __name__ == "__main__":
app.run(host='0.0.0.0', port='8000')
|
<reponame>vis-submissions/vis-short-2019_1027
# Powered by Python 2.7
# To cancel the modifications performed by the script
# on the current graph, click on the undo button.
# Some useful keyboard shortcuts:
# * Ctrl + D: comment selected lines.
# * Ctrl + Shift + D: uncomment selected lines.
# * Ctrl + I: indent selected lines.
# * Ctrl + Shift + I: unindent selected lines.
# * Ctrl + Return: run script.
# * Ctrl + F: find selected text.
# * Ctrl + R: replace selected text.
# * Ctrl + Space: show auto-completion dialog.
from tulip import tlp
# The updateVisualization(centerViews = True) function can be called
# during script execution to update the opened views
# The pauseScript() function can be called to pause the script execution.
# To resume the script execution, you will have to click on the
# "Run script " button.
# The runGraphScript(scriptFile, graph) function can be called to launch
# another edited script on a tlp.Graph object.
# The scriptFile parameter defines the script name to call
# (in the form [a-zA-Z0-9_]+.py)
# The main(graph) function must be defined
# to run the script on the current graph
def main(graph):
viewBorderColor = graph['viewBorderColor']
viewBorderWidth = graph['viewBorderWidth']
viewColor = graph['viewColor']
viewFont = graph['viewFont']
viewFontSize = graph['viewFontSize']
viewIcon = graph['viewIcon']
viewLabel = graph['viewLabel']
viewLabelBorderColor = graph['viewLabelBorderColor']
viewLabelBorderWidth = graph['viewLabelBorderWidth']
viewLabelColor = graph['viewLabelColor']
viewLabelPosition = graph['viewLabelPosition']
viewLayout = graph['viewLayout']
viewMetric = graph['viewMetric']
viewRotation = graph['viewRotation']
viewSelection = graph['viewSelection']
viewShape = graph['viewShape']
viewSize = graph['viewSize']
viewSrcAnchorShape = graph['viewSrcAnchorShape']
viewSrcAnchorSize = graph['viewSrcAnchorSize']
viewTexture = graph['viewTexture']
viewTgtAnchorShape = graph['viewTgtAnchorShape']
viewTgtAnchorSize = graph['viewTgtAnchorSize']
#for n in graph.getNodes():
# print(n)
viewColor = graph.getColorProperty("viewColor")
for n in graph.getNodes():
if graph.deg(n) == 1:
viewColor[n] = tlp.Color.SpringGreen
else:
viewColor[n] = tlp.Color.JungleGreen
for n in graph.getNodes():
viewBorderWidth[n] = 1
# Compute an anonymous degree property
degree = tlp.DoubleProperty(graph)
degreeParams = tlp.getDefaultPluginParameters("Degree")
graph.applyDoubleAlgorithm("Degree", degree, degreeParams)
# Map the node sizes to their degree
# Compute an anonymous degree property
degree = tlp.DoubleProperty(graph)
degreeParams = tlp.getDefaultPluginParameters("Degree")
graph.applyDoubleAlgorithm("Degree", degree, degreeParams)
baseSize = tlp.Size(1,1,1)/13
for n in graph.getNodes():
viewSize[n] = baseSize * (graph.deg(n) + 1)
sizeMappingParams = tlp.getDefaultPluginParameters("Size Mapping", graph)
graph.applySizeAlgorithm("Size Mapping", viewSize, sizeMappingParams)
# Create a Node Link Diagram view without displaying it
nodeLinkView = tlpgui.createView("Node Link Diagram view", graph, {}, False)
renderingParams = nodeLinkView.getRenderingParameters()
# Set border colors values
# viewBorderColor.setAllNodeValue(tlp.Color.Black)
# viewLabelColor.setAllNodeValue(tlp.Color.Blue)
# viewLabelBorderColor.setAllNodeValue(tlp.Color.Blue)
# Add a border to nodes/edges
# viewBorderWidth.setAllNodeValue(0.0002)
# viewBorderWidth.setAllEdgeValue(0.0002)
# Sets nodes shapes to circle
viewShape.setAllNodeValue(tlp.NodeShape.Circle)
# Activate the ordered rendering mode
# renderingParams.setElementOrdered(True)
# renderingParams.setElementOrderingProperty(renderingOrderingProp)
# Activate the "no labels overlaps" mode
renderingParams.setLabelsDensity(0)
renderingParams.setMinSizeOfLabel(4)
renderingParams.setEdge3D(True)
nodeLinkView.setRenderingParameters(renderingParams)
#View Center
#updateVisualization(centerViews = True)
#nodeLinkView.zoomFactor(1.08)
#nodeLinkView.saveSnapshot("/local/tree16_view1.png", 2048, 2048)
#View Zoomed 01
updateVisualization(centerViews = True)
nodeLinkView.zoomXY(18, -5, -60)
nodeLinkView.saveSnapshot("/local/tree16.png", 2048, 2048)
|
<reponame>James2250/SMW-MusicNamer<filename>ConvertMusic.py<gh_stars>0
import shutil
import time
import os.path
import sys
from zipfile import ZipFile
SampleFolderName = ""
TxtFileWorkingOn = ""
ZipFileName = ""
ListOfTxtFilePaths = [] #text files per zip folder
ListOfTxtFileNames = []
ListOfBrrFiles = []
ListOfCopiedBrrFiles = []
ListOfCopiedBrrFolders = []
ListOfCopiedBrrNames = []
#Taken from AddMusicK
def SNESToPC(addr):
if addr < 0 or addr > 0xFFFFFF or (addr & 0xFE0000) == 0x7E0000 or (addr & 0x408000) == 0x000000:
return -1
addr = ((addr & 0x7F0000) >> 1 | (addr & 0x7FFF))
return addr
def ReadROMData(File, OutputFile):
address = SNESToPC(0x0E8000) #addmusic @amk
address = address + 512 #200 hex header
File.seek(address, 0) #@amk
amkTest = File.read(4)
if amkTest[0] == 64: # @ symbol
File.read(4) # skip forward to music pointer
##---------------------------------##
data = File.read(1).hex()
data2 = File.read(1).hex()
data3 = File.read(1).hex()
addr = data3 + data2 + data #main music pointer
addr = "0x" + addr
IntAddr = int(addr, 0)
IntAddr = SNESToPC(IntAddr)
IntAddr = IntAddr + 512 #header
File.seek(IntAddr, 0)
File.read(30) #specific song pointer we want is 30 bytes ahead, 00 00 00 00 before then
##---------------------------------##
data4 = File.read(1).hex()
data5 = File.read(1).hex()
data6 = File.read(1).hex()
NewAddr = data6 + data5 + data4 #main music pointer
NewAddr = "0x" + NewAddr
IntAddr2 = int(NewAddr, 0)
IntAddr2 = SNESToPC(IntAddr2)
IntAddr2 = IntAddr2 + 512 #header
IntAddr2 -= 4 #go back to size after STAR
File.seek(IntAddr2, 0)
size1 = int("0x" + File.read(1).hex(), 0) #STAR size 1
size2 = int("0x" + File.read(1).hex(), 0) #STAR size 2
File.read(2) #ignore inverse size
TotalSize = size1 | size2 << 8
FinalData = (File.read(TotalSize).hex())
OutputFile.write(ZipFileName + " -- " + TxtFileWorkingOn + " : " + FinalData + "\n")
File.close()
os.remove("ROM.smc")
os.remove("ROM.msc")
os.remove("ROM.smc~")
else:
OutputFile.write(ZipFileName + " -- " + TxtFileWorkingOn + " FAILED TO PATCH \n" )
def ExtractSongFromZip(path, OutputFile):
global SampleFolderName
global TxtFileWorkingOn
brrFolderNames = set()
for (dirpath, dirnames, filenames) in os.walk(path):
if "__MACOSX" in dirnames:
dirnames.remove("__MACOSX")
if "SPCs" in dirnames:
dirnames.remove("SPCs")
for filename in filenames:
if filename.endswith(".brr"): # we are in samples folder
ListOfBrrFiles.append(dirpath + "/" + filename)
ListOfCopiedBrrNames.append(filename)
#if not SeenBrrFile:
if os.path.basename(dirpath) not in brrFolderNames:
SeenBrrFile = True
NewFolder = "./samples/" + os.path.basename(dirpath)
shutil.copytree(dirpath, NewFolder)
ListOfCopiedBrrFolders.append(NewFolder)
brrFolderNames.add(os.path.basename(dirpath))
SampleFolderName = NewFolder
if filename.endswith('.txt'):
if "readme" in filename.lower(): #ignore
continue
elif "patterns" in filename.lower(): #ignore
continue
else: # .txt file song
ListOfTxtFilePaths.append(dirpath + "/" + filename)
ListOfTxtFileNames.append(filename)
count = 0
brr_count = 0
for File in ListOfBrrFiles:
shutil.copy(File, "./samples")
ListOfCopiedBrrFiles.append("./samples/" + ListOfCopiedBrrNames[brr_count])
brr_count +=1
for File in ListOfTxtFilePaths:
TxtFileWorkingOn = ListOfTxtFileNames[count]
shutil.copy(File, "./music")
OldName = "./music/" + ListOfTxtFileNames[count]
NewName = "./music/Song1.txt"
if os.path.isfile(NewName):
os.remove(NewName)
os.rename(OldName, NewName)
shutil.copy("ROMo.smc", "ROM.smc")
#-noblock needed so cmd doesn't wait for pressing enter
os.system('cmd /c "AddmusicK ROM.smc" -noblock')
File = open("ROM.smc", "rb")
ReadROMData(File, OutputFile)
count += 1
def main():
OutputFile = open('OutputFile.txt', 'a', encoding='utf-8')
global ZipFileName
for filename in os.listdir("./SONGS"):
os.mkdir("./SONGS/TEMP")
ListOfTxtFileNames.clear()
ListOfTxtFilePaths.clear()
ListOfBrrFiles.clear()
ListOfCopiedBrrNames.clear()
ListOfCopiedBrrFiles.clear()
ListOfCopiedBrrFolders.clear()
if filename.endswith(".zip"):
ZipFileName = filename
with ZipFile("./SONGS/" + filename, 'r') as zipObj:
try:
zipObj.extractall('./SONGS/TEMP')
ExtractSongFromZip("./SONGS/TEMP", OutputFile)
except OSError as e:
OutputFile.write(filename + " FAILED TO OPEN\n")
for File in ListOfCopiedBrrFiles:
if os.path.isfile(File):
if File != "./samples/EMPTY.brr": #don't remove default sample
os.remove(File)
for Folder in ListOfCopiedBrrFolders:
if os.path.isdir(Folder):
shutil.rmtree(Folder)
shutil.rmtree("./SONGS/TEMP") #can't use os.rmdir, needs empty folder
OutputFile.close()
if __name__== "__main__":
main()
|
import importlib
import time
import pandas as pd
import data.dataPrep as dataPrep
import data.dataTransform as dataTransform
from utils.loggingUtils import custom_logger, shutdown_logger
from utils.modelUtils import *
from validation.eval import Evaluator
LOG_FREQ_PERCENT = 0.25
DEFAULT_MODEL_CONFIG = "experiment_config/sampleConfig.yaml"
def load_config(model_config_path):
"""
Load all config files.
:return: database config, data config, model config, sql template config, meta information config
"""
with open('experiment_config/dataConfig.yaml', 'r') as data_config_f:
raw_config = yaml.safe_load(data_config_f.read())
db_config = raw_config['database']
data_config = raw_config['data']
data_config_f.close()
with open('data/templates.yaml', 'r') as f2:
sql_templates_config = yaml.safe_load(f2.read())
f2.close()
model_configs, meta_configs, history_configs, top_k_configs = [], [], [], []
if model_config_path is None:
model_config_path = [DEFAULT_MODEL_CONFIG]
for i in range(len(model_config_path)):
with open(os.path.abspath(model_config_path[i]), "r") as model_config_f:
raw_config = yaml.safe_load(model_config_f.read())
print("loaded model config from: {}".format(model_config_path[i]))
model_configs.append(raw_config['model'])
meta_configs.append(raw_config['meta'])
history_configs.append(raw_config['with_history'])
top_k_configs.append(raw_config['top_k'])
model_config_f.close()
return db_config, data_config, model_configs, sql_templates_config, meta_configs, history_configs, top_k_configs
def fit_one_model(which_model, which_model_config, model_name, train_features, train_labels, train_set, logger):
"""
Fit one model given specifications
:param which_model: model type
:param which_model_config: model config for which_model
:param model_name: name of the model
:param train_features: training features
:param train_labels: training labels
:param train_set: time range of training set [training set label start, label end]
:param logger: global logger
:return:
"""
if which_model in SKLEARN_MODELS or which_model == "xgboost":
class_name = MODELS_MAP[which_model]
model_class = getattr(importlib.import_module("model.{}".format(class_name)), "{}".format(class_name))
model = model_class(name=model_name,
logger=logger,
train_set=train_set,
**which_model_config)
model.fit(data=[train_features, train_labels],
label_name="formal",
id_name="handler_id")
else:
logger.error("sorry we don't support {}!".format(which_model))
raise NotImplementedError()
return model
def eval_one_model(model, plot, save_path, test_features, test_labels, logger, eval_year, top_k):
"""
Evaluate one model on one validation set
:param save_path: path to save eval results
:param model: trained model
:param test_features: validation/testing features
:param test_labels: validation/testing labels
:param logger: global logger
:param eval_year: validation label year
:return: precision & support at top 5% # TODO: make this variable for different cohort
"""
score = model.predict(data=test_features,
label_name="formal",
id_name="handler_id")
score_df = pd.DataFrame(data=score, columns=['handler_id', 'score'])
label_df = test_labels[['handler_id', 'formal']]
logger.debug("evaluating on test set from year {} with {} rows".format(eval_year, label_df.shape[0]))
eva = Evaluator(scores=score_df,
y_true=label_df,
save_path=save_path,
model_name=model.name,
eval_year=eval_year,
logger=logger)
prk = eva.precision_recall_k()
if plot:
eva.graph_prk(prk)
if eval_year == 2014:
dataPrep.export2csv(os.path.join(save_path, model.name, 'scores_{}.csv'.format(eval_year)), score_df)
logger.debug("finished evaluation")
precision_at_k, support_at_k, recall_at_k = eva.precision_support_recall_at_k(top_k)
return precision_at_k, support_at_k
def save_one_model(which_model, save_path, which_model_config, model):
"""
Export one model
:param save_path: directory to save model
:param which_model: model type
:param which_model_config: model config for model type
:param model: trained model
:return: None
"""
if which_model in SKLEARN_MODELS:
model.export_model(path=save_path)
elif which_model == 'xgboost':
model.export_model(path=save_path,
save_mode=which_model_config['model_save_mode'])
else:
raise NotImplementedError
def train_routine(which_model, model_name, db_config, data_config, model_config, sql_templates_config, top_k, logger):
"""
Training routine (train only one model)
:param which_model: model type
:param model_name: name of the model
:param logger: global logger
:param db_config: database config
:param data_config: data setting config
:param model_config: model config
:param sql_templates_config: sql template config
:return:
"""
# create engine
db_engine = dataPrep.getEngine(filePath=db_config['db_secret_directory'])
logger.debug("successfully connected to database engine")
# get dates
data_config = dataPrep.inferDates(data_config, logger)
logger.debug("successfully inferred dates from config")
logger.info("start preparing train and test/validation features and labels")
# prepare train features/labels for multiple years and val features/labels for one year
train_features_all, train_labels, \
test_features_all, test_labels = dataPrep.prepDataOvertime(data_config, sql_templates_config, db_engine, logger)
logger.info("Total # training instance:{}, "
"#testing/val instance:{}".format(train_features_all.shape[0],
test_features_all.shape[0]))
train_set = [data_config['years']['train_label_start'][0], data_config['years']['train_label_start'][-1]]
model = fit_one_model(which_model=which_model,
which_model_config=model_config[which_model],
model_name=model_name,
train_features=train_features_all,
train_labels=train_labels,
train_set=train_set,
logger=logger)
if model_config['save_model']:
save_one_model(which_model=which_model,
save_path=model_config['base_dir'],
which_model_config=model_config[which_model],
model=model)
if model_config['eval_model']:
_ = eval_one_model(model=model,
save_path=model_config['base_dir'],
plot=model_config['plot_prk'],
test_features=test_features_all,
test_labels=test_labels,
logger=logger,
eval_year=data_config['years']['val_label_start'][0],
top_k=top_k)
def cv_routine_multiple_model(which_model, which_model_grid, db_config, data_config,
model_config, sql_templates_config, top_k, logger):
"""
Temporal cross-validation routine with model grid search.
:param which_model: type of model running running
:param which_model_grid: model grid for which_model
:param db_config: database config
:param data_config: global data config
:param model_config: global model config
:param sql_templates_config: sql feature templates
:param logger: global logger
:return: prk matrix
"""
db_engine = dataPrep.getEngine(filePath=db_config['db_secret_directory'])
logger.debug("successfully connected to database engine")
data_config = dataPrep.inferDates(data_config, logger)
logger.debug("successfully inferred dates from config")
logger.info("start preparing train and test/validation features and labels")
train_features_all, test_features_all = None, None
train_labels, test_labels = None, None
train_label_start = data_config['years']['train_label_start']
precision_matrix = np.zeros(shape=(len(which_model_grid), len(train_label_start)))
support_matrix = np.zeros(shape=(len(which_model_grid), len(train_label_start)))
which_model_names = []
which_model_params = []
save_model = model_config['save_model']
eval_model = model_config['eval_model']
start_start_time = time.time()
for i in range(len(train_label_start)):
val_label_start = data_config['years']['val_label_start'][i]
train_set = [data_config['years']['train_label_start'][0], data_config['years']['train_label_start'][i]]
logger.debug("start preparing train features with labels in {} and "
"test/validation with labels in {}".format(train_label_start[i], val_label_start))
train_handlers_table_name = dataPrep.prepCohortOneYear(data_config=data_config,
sql_template_config=sql_templates_config,
year_index=i,
mode="train",
db_engine=db_engine,
logger=logger)
test_handlers_table_name = dataPrep.prepCohortOneYear(data_config=data_config,
sql_template_config=sql_templates_config,
year_index=i,
mode="val",
db_engine=db_engine,
logger=logger)
logger.info("finished generating active cohort tables (train:{}-{}, val:{})".format(train_label_start[0],
train_label_start[i],
val_label_start))
logger.debug("start preparing training and testing labels")
train_labels_cur = dataPrep.prepLabelsOneYear(data_config=data_config,
sql_template_config=sql_templates_config,
year_index=i,
mode="train",
handlers_table_name=train_handlers_table_name,
db_engine=db_engine)
test_labels_cur = dataPrep.prepLabelsOneYear(data_config=data_config,
sql_template_config=sql_templates_config,
year_index=i,
mode="val",
handlers_table_name=test_handlers_table_name,
db_engine=db_engine)
num_train, num_test = len(train_labels_cur), len(test_labels_cur)
logger.info("#training instance:{}, #testing/val instance:{}".format(num_train, num_test))
train_features_all_cur = dataPrep.prepFeaturesOneYear(data_config=data_config,
sql_template_config=sql_templates_config,
year_index=i,
mode="train",
handlers_table_name=train_handlers_table_name,
num_data=num_train,
db_engine=db_engine,
logger=logger)
test_features_all_cur = dataPrep.prepFeaturesOneYear(data_config=data_config,
sql_template_config=sql_templates_config,
year_index=i,
mode="val",
handlers_table_name=test_handlers_table_name,
num_data=num_test,
db_engine=db_engine,
logger=logger)
if train_features_all is None:
train_features_all, train_labels = train_features_all_cur, train_labels_cur
test_features_all, test_labels = test_features_all_cur, test_labels_cur
else:
assert train_features_all.shape[1] == train_features_all_cur.shape[1]
assert test_features_all.shape[1] == test_features_all_cur.shape[1]
prev_train_len = train_features_all.shape[0]
# train features/labels are cumulative
train_features_all = pd.concat([train_features_all, train_features_all_cur], ignore_index=True)
train_labels = pd.concat([train_labels, train_labels_cur], ignore_index=True)
assert train_features_all.shape[0] == train_labels.shape[0] == prev_train_len + num_train
# test/val features/label are different each time since we're only using one year
test_features_all, test_labels = test_features_all_cur, test_labels_cur
processed_train_features, train_onehot = dataTransform.impute_transform(train_features_all, data_config, logger)
processed_test_features, _ = dataTransform.impute_transform(test_features_all, data_config, logger,
train_onehot)
processed_train_features, processed_test_features = dataTransform.consistent_transform(processed_train_features,
processed_test_features)
assert processed_train_features.shape[1] == processed_test_features.shape[1]
logger.info("Total # training instance for this fold:{} [{}, {}], "
"#testing/val instance for this fold:{} [{}], "
"Total # of features: {}".format(processed_train_features.shape[0],
train_set[0], train_set[1],
processed_test_features.shape[0],
val_label_start,
processed_train_features.shape[1]))
curr_best_prk = None
N = len(which_model_grid)
log_N = 1 if N * LOG_FREQ_PERCENT < 1 else int(N * LOG_FREQ_PERCENT)
start_time = time.time()
for j in range(N):
model_grid_j = which_model_grid[j]
model_name_j = which_model + "_" + str(j)
if 'model_name' in which_model_grid[j].keys():
model_name_j = which_model_grid[j]['model_name']
model = fit_one_model(which_model=which_model,
which_model_config=model_grid_j,
model_name=model_name_j,
train_features=processed_train_features,
train_labels=train_labels,
train_set=train_set,
logger=logger)
if save_model:
save_one_model(which_model=which_model,
save_path=model_config['base_dir'],
which_model_config=model_config[which_model],
model=model)
if eval_model:
precision_at_k, support_at_k = eval_one_model(model=model,
plot=model_config['plot_prk'],
save_path=model_config['base_dir'],
test_features=processed_test_features,
test_labels=test_labels,
eval_year=val_label_start,
logger=logger,
top_k=top_k)
precision_matrix[j, i] = precision_at_k
support_matrix[j, i] = support_at_k
if curr_best_prk is None or precision_at_k > curr_best_prk:
curr_best_prk = precision_at_k
logger.info(
"current best prk@top {}%:{} with support@top {}%: {}".format(int(top_k*100),
curr_best_prk,
int(top_k*100),
support_at_k,
))
if i == len(train_label_start) - 1 and model_config['save_valid']:
base_path = model_config['base_dir']
if not os.path.exists(base_path):
os.makedirs(base_path)
out_path = os.path.join(base_path, "valid_features_{}.csv".format(val_label_start))
dataPrep.export2csv(out_path, processed_test_features)
out_path = os.path.join(base_path, 'valid_labels_{}.csv'.format(val_label_start))
dataPrep.export2csv(out_path, test_labels)
logger.debug("exported last validation set to {}".format(out_path))
if (j + 1) % log_N == 0:
percent = (j + 1) / N
past_time = time.time() - start_time
minute_since_start = time.time() - start_start_time
logger.info("{:.1f}% completed for this fold. "
"Avg speed: {:2.2f}s/iteration. "
"Total time: {:2.2f} min.".format(percent * 100,
past_time / log_N,
minute_since_start / 60))
start_time = time.time()
if i == 0:
which_model_names.append(model_name_j)
which_model_params.append(model_grid_j)
# save_model_config(os.path.join(model_config['base_dir'], model_name_j), model_grid_j)
return precision_matrix, support_matrix, which_model_names, which_model_params
def grid_search_routine(model_name, db_config, data_config, model_config, sql_templates_config, top_k, logger):
"""
Model grid search routine.
:param model_name: name of the model
:param db_config: database config
:param data_config:
:param model_config:
:param sql_templates_config:
:param logger:
:return:
"""
# TODO: move history setting to model config
model_config_copy = copy.copy(model_config)
final_output = dict()
new_base_dir = os.path.join(model_config_copy['base_dir'], model_name)
direct_import = model_config_copy['mode'] == "grid_search_import"
for which_model in model_config_copy['which_model']:
final_output[which_model] = dict()
model_grid = parse_hyperparams(model_config_copy[which_model], direct_import=direct_import)
logger.info("starting grid search for model:{}".format(which_model))
history = "with" if data_config['with_history'] else "without"
model_config_copy['base_dir'] = os.path.join(new_base_dir, which_model + "_" + history)
precision_matrix, support_matrix, which_model_names, which_model_params = \
cv_routine_multiple_model(which_model=which_model,
which_model_grid=model_grid,
db_config=db_config,
data_config=data_config,
model_config=model_config_copy,
sql_templates_config=sql_templates_config,
top_k=top_k,
logger=logger)
final_output[which_model]['precision_overtime'] = precision_matrix
final_output[which_model]['support_overtime'] = support_matrix
final_output[which_model]['model_names'] = which_model_names
final_output[which_model]['which_model_params'] = which_model_params
return final_output
def main(model_config_names, db_config, data_config, sql_templates_config, model_configs, meta_configs,
history_configs, top_k_configs):
"""
Main routine
:param history_configs: list of history configs
:param meta_configs: list of meta configs
:param model_configs: list of model configs
:param model_config_names: list of name of model configs
:param db_config: database config
:param data_config: data related config
:param sql_templates_config: feature templates config
:return:
"""
time_for_filename = datetime.now().strftime("%y%m%d_%H%M")
if model_config_names is None:
model_config_names = [DEFAULT_MODEL_CONFIG]
for config_idx in range(len(model_config_names)):
model_config_name = model_config_names[config_idx].split("/")[-1][0:-len(".yaml")]
meta_config = meta_configs[config_idx]
model_config = model_configs[config_idx]
history_config = history_configs[config_idx]
top_k_config = top_k_configs[config_idx]
np.random.seed(meta_config['random_seed'])
mode = model_config["mode"]
if 'grid_search' in mode:
appendix = 'autogen' if mode == "grid_search_autogen" else 'import'
name = "grid" + "_" + appendix
dir_name = name + "_" + time_for_filename + "_" + model_config_name
base_dir = os.path.join(model_config['base_dir'], dir_name)
LOGGER = custom_logger(name=name,
dir=base_dir,
save=meta_config['dump_log'],
level=meta_config['log_level'])
if not isinstance(history_config, list):
history_config = [history_config]
top_k_config = [top_k_config]
for idx, cur_history in enumerate(history_config):
data_config['with_history'] = cur_history
top_k = top_k_config[idx]
history_str = 'with' if cur_history else 'without'
if mode == "grid_search_autogen":
LOGGER.info("start grid search on cohort {} history "
"using config {}".format(history_str, model_config_name))
else:
LOGGER.info("start running model grid on cohort {} history with "
"imported params from config {}".format(history_str, model_config_name))
final_output = grid_search_routine(dir_name, db_config, data_config, model_config,
sql_templates_config, top_k, LOGGER)
save_grid_search_result(base_dir, "{}_grid_result_top{}%.csv".format(history_str,
int(100*top_k)), final_output)
LOGGER.info("finished grid search routine!")
else:
# if not grid search, only fit the zeroth model type
which_model = model_config['which_model'][0]
model_name = which_model + "_" + datetime.now().strftime("%y%m%d-%H%M")
# if not grid search mode, only fit one type of model
if isinstance(history_config, list):
history_config = history_config[0]
top_k = top_k_config[0]
history = 'with' if history_config else 'without'
data_config['with_history'] = history_config
LOGGER = custom_logger(name=which_model,
dir=os.path.join(model_config['base_dir'], model_name),
save=meta_config['dump_log'],
level=meta_config['log_level'])
if mode == 'cv':
which_model_config = model_config[which_model]
LOGGER.info("start cross validation of {} model on cohort {} history "
"using config {}".format(model_name, history, model_config_name))
precision_lst = cv_routine_multiple_model(which_model, [which_model_config], db_config, data_config,
model_config, sql_templates_config, top_k, LOGGER)
LOGGER.info("Precision%5 for cv folds:{}".format(precision_lst))
elif mode == 'train':
LOGGER.info("start training {} model on cohort {} history "
"using config {}".format(model_name, history, model_config_name))
train_routine(which_model, model_name, db_config,
data_config, model_config, sql_templates_config, top_k, LOGGER)
else:
raise NotImplementedError("unrecognized mode")
shutdown_logger(LOGGER)
del LOGGER
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Execute pipeline.')
parser.add_argument('-model_config', '--mc',
dest='model_config_filenames',
metavar='model_config_filenames', type=str, nargs='+',
help='paths to model config file')
args = parser.parse_args()
db_config_main, data_config_main, model_configs_main, \
sql_templates_config_main, meta_configs_main, \
history_configs_main, top_k_configs_main = load_config(args.model_config_filenames)
main(args.model_config_filenames, db_config_main, data_config_main, sql_templates_config_main,
model_configs_main, meta_configs_main, history_configs_main, top_k_configs_main)
|
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
#import matplotlib.cm as CM
import os
import numpy as np
from skimage import io;
import glob;
import cv2 ;
import sys;
#from scipy.misc import imresize
#from scipy.ndimage.filters import convolve
from skimage.measure import label
from skimage import filters
import math
from tqdm import tqdm as tqdm
import torch
import torch.nn as nn
#from scipy import ndimage
#from unet_vgg4_cc import UnetVggCC
#from my_dataset_highres4_jhu_wdots_wname import CrowdDataset
#from TDFMain_pytorch import *
'''
Evaluate the error for each category in the JHU++ dataset: low, medium, high, and weather
Output files:
out_jhu_categories.txt: mae and rmse per category.
In main modify the directories:
data_dir: path contains prediction files.
root: dataset root.
gt_dir: ground truth dot maps.
out_dir: output directory.
label_filepath: dataset labels text file containing categorical labels
'''
if __name__=="__main__":
####################################################################################
## Configuration for JHU++ - Test
####################################################################################
'''
data_dir = './eval/jhu_custom_topo1_patch100_topocount_test'; # contains prediction files
root = './datasets/jhu/jhu_crowd_v2.0' # dataset root
gt_dir = os.path.join(root, 'test','ground-truth_dots') # ground truth dot maps
label_filepath = os.path.join(root, 'test','image_labels.txt') # labels file
out_dir= './eval/jhu_custom_topo1_patch100_topocount_test'; # output directory
log_filename = 'out_jhu_categories.txt'
thresh_low = 0.4
thresh_high = 0.5
size_thresh = -1 # if set gets rid of connected components < size_thresh pixels
'''
####################################################################################
## Configuration for JHU++ - Validation
####################################################################################
#'''
data_dir = './eval/jhu_custom_topo1_patch100_topocount_val'; # contains prediction files
root = './datasets/jhu/jhu_crowd_v2.0' # dataset root
gt_dir = os.path.join(root, 'val','ground-truth_dots') # ground truth dot maps
label_filepath = os.path.join(root, 'val','image_labels.txt') # labels file
out_dir= './eval/jhu_custom_topo1_patch100_topocount_val'; # output directory
log_filename = 'out_jhu_categories.txt'
thresh_low = 0.4
thresh_high = 0.5
size_thresh = -1 # if set gets rid of connected components < size_thresh pixels
#'''
####################################################################################
if not os.path.exists(out_dir):
os.mkdir(out_dir)
with open(os.path.join(out_dir, log_filename), 'a+') as log_file:
mae = 0
rmse = 0
files_count = 0
cat_dict = {'low':0, 'medium':1, 'high':2, 'weather':3}
mae_cat = np.array([0, 0, 0, 0]) # low, medium, high, weather
rmse_cat = np.array([0, 0, 0, 0]) # low, medium, high, weather
files_count_cat = np.array([0, 0, 0, 0]) # low, medium, high, weather
# get prediction files paths
e_soft_map_files = glob.glob(os.path.join(data_dir, '*_likelihood'+'.npy'))
print('files count', len(e_soft_map_files))
# load labels file
labels = np.loadtxt(label_filepath, dtype=str,delimiter=',')
i=-1
for file in e_soft_map_files:
files_count += 1
i +=1
print('processing ', i)
img_name = os.path.basename(file)[:-len('_likelihood.npy')]
#g_dot=np.load(os.path.join(gt_dir, img_name + '_gt_dots.npy'))
g_dot=np.load(os.path.join(gt_dir, img_name + '.npy'))
g_count = g_dot.sum()
e_soft = np.load(file)
print('img_name',img_name)
g_dot = g_dot[:e_soft.shape[0],:e_soft.shape[1]]
label_row = labels[np.where(labels[:,0]==os.path.splitext(img_name)[0])].squeeze()
print('label_row',label_row)
#print('g_dot',g_dot.shape)
# get topological map from likelihood prediction
e_hard = filters.apply_hysteresis_threshold(e_soft, thresh_low, thresh_high)
e_hard2 = (e_hard > 0).astype(np.uint8)
comp_mask = label(e_hard2)
e_count = comp_mask.max()
s_count=0
if(size_thresh > 0):
for c in range(1,comp_mask.max()+1):
s = (comp_mask == c).sum()
if(s < size_thresh):
e_count -=1
s_count +=1
# get dot predictions from topological map (centers of connected components)
e_dot = np.zeros(g_dot.shape)
e_dot_vis = np.zeros(g_dot.shape)
contours, hierarchy = cv2.findContours(e_hard2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
e_coord_y = []
e_coord_x = []
for idx in range(len(contours)):
#print('idx=',idx)
contour_i = contours[idx]
M = cv2.moments(contour_i)
#print(M)
if(M['m00'] == 0):
continue;
cx = round(M['m10'] / M['m00'])
cy = round(M['m01'] / M['m00'])
e_dot_vis[cy-1:cy+1, cx-1:cx+1] = 1
e_dot[cy, cx] = 1
e_coord_y.append(cy)
e_coord_x.append(cx)
err= e_count - g_count
mae += abs(err)
rmse += err**2
#print(img_name, e_count, g_count, err)
#print(img_name, e_count, g_count,s_count, err)
log_file.write("image {} e_count {} g_count {} err {} \n".format(img_name, e_count, g_count, err))
log_file.flush()
if(float(label_row[1]) <51):
mae_cat[cat_dict['low']] = mae_cat[cat_dict['low']] + abs(err)
rmse_cat[cat_dict['low']] = rmse_cat[cat_dict['low']] + err**2
files_count_cat[cat_dict['low']] = files_count_cat[cat_dict['low']] + 1
elif(float(label_row[1]) <501):
mae_cat[cat_dict['medium']] = mae_cat[cat_dict['medium']] + abs(err)
rmse_cat[cat_dict['medium']] = rmse_cat[cat_dict['medium']] + err**2
files_count_cat[cat_dict['medium']] = files_count_cat[cat_dict['medium']] + 1
else:
mae_cat[cat_dict['high']] = mae_cat[cat_dict['high']] + abs(err)
rmse_cat[cat_dict['high']] = rmse_cat[cat_dict['high']] + err**2
files_count_cat[cat_dict['high']] = files_count_cat[cat_dict['high']] + 1
if(int(label_row[3]) >0):
mae_cat[cat_dict['weather']] = mae_cat[cat_dict['weather']] + abs(err)
rmse_cat[cat_dict['weather']] = rmse_cat[cat_dict['weather']] + err**2
files_count_cat[cat_dict['weather']] = files_count_cat[cat_dict['weather']] + 1
mae /= files_count
rmse = math.sqrt(rmse/files_count)
#print('mae', mae, 'rmse', rmse)
log_file.write("mae {} rmse {} \n".format(mae, rmse))
log_file.flush()
mae_cat = mae_cat/files_count_cat
rmse_cat = np.sqrt(rmse_cat/files_count_cat)
for cat in cat_dict.keys():
#print('cat', cat, 'mae', mae_cat[cat_dict[cat]], 'rmse', rmse_cat[cat_dict[cat]], 'files_count', files_count_cat[cat_dict[cat]])
log_file.write("cat {} mae {} rmse {} files_count {} \n".format(cat, mae_cat[cat_dict[cat]], rmse_cat[cat_dict[cat]], files_count_cat[cat_dict[cat]]))
log_file.flush()
sys.stdout.flush();
print('Done.')
print('Check output in: ', os.path.join(out_dir, log_filename))
|
<reponame>pksenpai/Durer<gh_stars>1-10
from torchvision import models, transforms
import torch
from PIL import Image
import torch.nn as nn
import streamlit as st
image_size = 64
batch_size = 32
stats = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
latent_size = 150
def denorm(img_tensors):
return img_tensors * stats[1][0] + stats[0][0]
def new():
for i in range(1,2):
generator = torch.load("WEIGHTS/w1.pth", map_location = "cpu")
generator.eval()
device = torch.device('cpu')
generator.to(device);
y = generator(torch.randn(batch_size, latent_size, 1, 1))
gen_imgs = denorm(y.detach())
to_pil_image = transforms.ToPILImage()
result1 = to_pil_image(gen_imgs[0])
result2 = to_pil_image(gen_imgs[1])
result3 = to_pil_image(gen_imgs[2])
result4 = to_pil_image(gen_imgs[3])
result5 = to_pil_image(gen_imgs[4])
result6 = to_pil_image(gen_imgs[5])
result7 = to_pil_image(gen_imgs[6])
result8 = to_pil_image(gen_imgs[7])
result9 = to_pil_image(gen_imgs[8])
newsize = (70,70)
im1 = result1.resize(newsize)
im2 = result2.resize(newsize)
im3 = result3.resize(newsize)
im4 = result4.resize(newsize)
im5 = result5.resize(newsize)
im6 = result6.resize(newsize)
im7 = result7.resize(newsize)
im8 = result8.resize(newsize)
im9 = result9.resize(newsize)
im1 = im1.save("SAMPLES/new.jpg",quality=100)
im2 = im2.save("SAMPLES/new1.jpg",quality=100)
im3 = im3.save("SAMPLES/new2.jpg",quality=100)
im4 = im4.save("SAMPLES/new3.jpg",quality=100)
im5 = im5.save("SAMPLES/new4.jpg",quality=100)
im6 = im6.save("SAMPLES/new5.jpg",quality=100)
im7 = im7.save("SAMPLES/new6.jpg",quality=100)
im8 = im8.save("SAMPLES/new7.jpg",quality=100)
im9 = im9.save("SAMPLES/new8.jpg",quality=100)
st.image(["SAMPLES/new.jpg","SAMPLES/new1.jpg","SAMPLES/new2.jpg","SAMPLES/new3.jpg","SAMPLES/new4.jpg","SAMPLES/new5.jpg","SAMPLES/new6.jpg","SAMPLES/new7.jpg"],width = 70)
for i in range(1,2):
generator = torch.load("WEIGHTS/w2.pth", map_location = "cpu")
generator.eval()
device = torch.device('cpu')
generator.to(device);
y = generator(torch.randn(batch_size, latent_size, 1, 1))
gen_imgs = denorm(y.detach())
to_pil_image = transforms.ToPILImage()
result1 = to_pil_image(gen_imgs[0])
result2 = to_pil_image(gen_imgs[1])
result3 = to_pil_image(gen_imgs[2])
result4 = to_pil_image(gen_imgs[3])
result5 = to_pil_image(gen_imgs[4])
result6 = to_pil_image(gen_imgs[5])
result7 = to_pil_image(gen_imgs[6])
result8 = to_pil_image(gen_imgs[7])
result9 = to_pil_image(gen_imgs[8])
newsize = (70,70)
im1 = result1.resize(newsize)
im2 = result2.resize(newsize)
im3 = result3.resize(newsize)
im4 = result4.resize(newsize)
im5 = result5.resize(newsize)
im6 = result6.resize(newsize)
im7 = result7.resize(newsize)
im8 = result8.resize(newsize)
im9 = result9.resize(newsize)
im1 = im1.save("SAMPLES/new.jpg",quality=100)
im2 = im2.save("SAMPLES/new1.jpg",quality=100)
im3 = im3.save("SAMPLES/new2.jpg",quality=100)
im4 = im4.save("SAMPLES/new3.jpg",quality=100)
im5 = im5.save("SAMPLES/new4.jpg",quality=100)
im6 = im6.save("SAMPLES/new5.jpg",quality=100)
im7 = im7.save("SAMPLES/new6.jpg",quality=100)
im8 = im8.save("SAMPLES/new7.jpg",quality=100)
im9 = im9.save("SAMPLES/new8.jpg",quality=100)
st.image(["SAMPLES/new.jpg","SAMPLES/new1.jpg","SAMPLES/new2.jpg","SAMPLES/new3.jpg","SAMPLES/new4.jpg","SAMPLES/new5.jpg","SAMPLES/new6.jpg","SAMPLES/new7.jpg"],width = 70)
|
""" Testing DKI """
from __future__ import division, print_function, absolute_import
import numpy as np
import random
import dipy.reconst.dki as dki
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_almost_equal)
from nose.tools import assert_raises
from dipy.sims.voxel import multi_tensor_dki
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from dipy.data import get_data
from dipy.reconst.dti import (from_lower_triangular, decompose_tensor)
from dipy.reconst.dki import (mean_kurtosis, carlson_rf, carlson_rd,
axial_kurtosis, radial_kurtosis, _positive_evals)
from dipy.core.sphere import Sphere
from dipy.core.geometry import perpendicular_directions
fimg, fbvals, fbvecs = get_data('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gtab = gradient_table(bvals, bvecs)
# 2 shells for techniques that requires multishell data
bvals_2s = np.concatenate((bvals, bvals * 2), axis=0)
bvecs_2s = np.concatenate((bvecs, bvecs), axis=0)
gtab_2s = gradient_table(bvals_2s, bvecs_2s)
# Simulation 1. signals of two crossing fibers are simulated
mevals_cross = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087],
[0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
angles_cross = [(80, 10), (80, 10), (20, 30), (20, 30)]
fie = 0.49
frac_cross = [fie*50, (1-fie) * 50, fie*50, (1-fie) * 50]
# Noise free simulates
signal_cross, dt_cross, kt_cross = multi_tensor_dki(gtab_2s, mevals_cross,
S0=100,
angles=angles_cross,
fractions=frac_cross,
snr=None)
evals_cross, evecs_cross = decompose_tensor(from_lower_triangular(dt_cross))
crossing_ref = np.concatenate((evals_cross, evecs_cross[0], evecs_cross[1],
evecs_cross[2], kt_cross), axis=0)
# Simulation 2. Spherical kurtosis tensor.- for white matter, this can be a
# biological implaussible scenario, however this simulation is usefull for
# testing the estimation of directional apparent kurtosis and the mean
# kurtosis, since its directional and mean kurtosis ground truth are a constant
# which can be easly mathematicaly calculated.
Di = 0.00099
De = 0.00226
mevals_sph = np.array([[Di, Di, Di], [De, De, De]])
frac_sph = [50, 50]
signal_sph, dt_sph, kt_sph = multi_tensor_dki(gtab_2s, mevals_sph, S0=100,
fractions=frac_sph,
snr=None)
evals_sph, evecs_sph = decompose_tensor(from_lower_triangular(dt_sph))
params_sph = np.concatenate((evals_sph, evecs_sph[0], evecs_sph[1],
evecs_sph[2], kt_sph), axis=0)
# Compute ground truth - since KT is spherical, appparent kurtosic coeficient
# for all gradient directions and mean kurtosis have to be equal to Kref_sph.
f = 0.5
Dg = f*Di + (1-f)*De
Kref_sphere = 3 * f * (1-f) * ((Di-De) / Dg) ** 2
# Simulation 3. Multi-voxel simulations - dataset of four voxels is simulated.
# Since the objective of this simulation is to see if procedures are able to
# work with multi-dimentional data all voxels contains the same crossing signal
# produced in simulation 1.
DWI = np.zeros((2, 2, 1, len(gtab_2s.bvals)))
DWI[0, 0, 0] = DWI[0, 1, 0] = DWI[1, 0, 0] = DWI[1, 1, 0] = signal_cross
multi_params = np.zeros((2, 2, 1, 27))
multi_params[0, 0, 0] = multi_params[0, 1, 0] = crossing_ref
multi_params[1, 0, 0] = multi_params[1, 1, 0] = crossing_ref
def test_positive_evals():
# Tested evals
L1 = np.array([[1e-3, 1e-3, 2e-3], [0, 1e-3, 0]])
L2 = np.array([[3e-3, 0, 2e-3], [1e-3, 1e-3, 0]])
L3 = np.array([[4e-3, 1e-4, 0], [0, 1e-3, 0]])
# only the first voxels have all eigenvalues larger than zero, thus:
expected_ind = np.array([[True, False, False], [False, True, False]],
dtype=bool)
# test function _positive_evals
ind = _positive_evals(L1, L2, L3)
assert_array_equal(ind, expected_ind)
def test_split_dki_param():
dkiM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="OLS")
dkiF = dkiM.fit(DWI)
evals, evecs, kt = dki.split_dki_param(dkiF.model_params)
assert_array_almost_equal(evals, dkiF.evals)
assert_array_almost_equal(evecs, dkiF.evecs)
assert_array_almost_equal(kt, dkiF.kt)
def test_dki_fits():
""" DKI fits are tested on noise free crossing fiber simulates """
# OLS fitting
dkiM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="OLS")
dkiF = dkiM.fit(signal_cross)
assert_array_almost_equal(dkiF.model_params, crossing_ref)
# WLS fitting
dki_wlsM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="WLS")
dki_wlsF = dki_wlsM.fit(signal_cross)
assert_array_almost_equal(dki_wlsF.model_params, crossing_ref)
# testing multi-voxels
dkiF_multi = dkiM.fit(DWI)
assert_array_almost_equal(dkiF_multi.model_params, multi_params)
dkiF_multi = dki_wlsM.fit(DWI)
assert_array_almost_equal(dkiF_multi.model_params, multi_params)
def test_apparent_kurtosis_coef():
""" Apparent kurtosis coeficients are tested for a spherical kurtosis
tensor """
sph = Sphere(xyz=gtab.bvecs[gtab.bvals > 0])
AKC = dki.apparent_kurtosis_coef(params_sph, sph)
# check all direction
for d in range(len(gtab.bvecs[gtab.bvals > 0])):
assert_array_almost_equal(AKC[d], Kref_sphere)
def test_dki_predict():
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
pred = dkiM.predict(crossing_ref, S0=100)
assert_array_almost_equal(pred, signal_cross)
# just to check that it works with more than one voxel:
pred_multi = dkiM.predict(multi_params, S0=100)
assert_array_almost_equal(pred_multi, DWI)
# Check that it works with more than one voxel, and with a different S0
# in each voxel:
pred_multi = dkiM.predict(multi_params,
S0=100*np.ones(pred_multi.shape[:3]))
assert_array_almost_equal(pred_multi, DWI)
# check the function predict of the DiffusionKurtosisFit object
dkiF = dkiM.fit(DWI)
pred_multi = dkiF.predict(gtab_2s, S0=100)
assert_array_almost_equal(pred_multi, DWI)
dkiF = dkiM.fit(pred_multi)
pred_from_fit = dkiF.predict(dkiM.gtab, S0=100)
assert_array_almost_equal(pred_from_fit, DWI)
# Test the module function:
pred = dki.dki_prediction(crossing_ref, gtab_2s, S0=100)
assert_array_almost_equal(pred, signal_cross)
# Test the module function with S0 volume:
pred = dki.dki_prediction(multi_params, gtab_2s,
S0=100 * np.ones(multi_params.shape[:3]))
assert_array_almost_equal(pred, DWI)
def test_carlson_rf():
# Define inputs that we know the outputs from:
# <NAME>., 1994. Numerical computation of real or complex
# elliptic integrals. arXiv:math/9409227 [math.CA]
# Real values (test in 2D format)
x = np.array([[1.0, 0.5], [2.0, 2.0]])
y = np.array([[2.0, 1.0], [3.0, 3.0]])
z = np.array([[0.0, 0.0], [4.0, 4.0]])
# Defene reference outputs
RF_ref = np.array([[1.3110287771461, 1.8540746773014],
[0.58408284167715, 0.58408284167715]])
# Compute integrals
RF = carlson_rf(x, y, z)
# Compare
assert_array_almost_equal(RF, RF_ref)
# Complex values
x = np.array([1j, 1j - 1, 1j, 1j - 1])
y = np.array([-1j, 1j, -1j, 1j])
z = np.array([0.0, 0.0, 2, 1 - 1j])
# Defene reference outputs
RF_ref = np.array([1.8540746773014, 0.79612586584234 - 1.2138566698365j,
1.0441445654064, 0.93912050218619 - 0.53296252018635j])
# Compute integrals
RF = carlson_rf(x, y, z, errtol=3e-5)
# Compare
assert_array_almost_equal(RF, RF_ref)
def test_carlson_rd():
# Define inputs that we know the outputs from:
# <NAME>., 1994. Numerical computation of real or complex
# elliptic integrals. arXiv:math/9409227 [math.CA]
# Real values
x = np.array([0.0, 2.0])
y = np.array([2.0, 3.0])
z = np.array([1.0, 4.0])
# Defene reference outputs
RD_ref = np.array([1.7972103521034, 0.16510527294261])
# Compute integrals
RD = carlson_rd(x, y, z, errtol=1e-5)
# Compare
assert_array_almost_equal(RD, RD_ref)
# Complex values (testing in 2D format)
x = np.array([[1j, 0.0], [0.0, -2 - 1j]])
y = np.array([[-1j, 1j], [1j-1, -1j]])
z = np.array([[2.0, -1j], [1j, -1 + 1j]])
# Defene reference outputs
RD_ref = np.array([[0.65933854154220, 1.2708196271910 + 2.7811120159521j],
[-1.8577235439239 - 0.96193450888839j,
1.8249027393704 - 1.2218475784827j]])
# Compute integrals
RD = carlson_rd(x, y, z, errtol=1e-5)
# Compare
assert_array_almost_equal(RD, RD_ref)
def test_Wrotate_single_fiber():
# Rotate the kurtosis tensor of single fiber simulate to the diffusion
# tensor diagonal and check that is equal to the kurtosis tensor of the
# same single fiber simulated directly to the x-axis
# Define single fiber simulate
mevals = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
fie = 0.49
frac = [fie*100, (1 - fie)*100]
# simulate single fiber not aligned to the x-axis
theta = random.uniform(0, 180)
phi = random.uniform(0, 320)
angles = [(theta, phi), (theta, phi)]
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
evals, evecs = decompose_tensor(from_lower_triangular(dt))
kt_rotated = dki.Wrotate(kt, evecs)
# Now coordinate system has the DT diagonal aligned to the x-axis
# Reference simulation in which DT diagonal is directly aligned to the
# x-axis
angles = (90, 0), (90, 0)
signal, dt_ref, kt_ref = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
assert_array_almost_equal(kt_rotated, kt_ref)
def test_Wrotate_crossing_fibers():
# Test 2 - simulate crossing fibers intersecting at 70 degrees.
# In this case, diffusion tensor principal eigenvector will be aligned in
# the middle of the crossing fibers. Thus, after rotating the kurtosis
# tensor, this will be equal to a kurtosis tensor simulate of crossing
# fibers both deviating 35 degrees from the x-axis. Moreover, we know that
# crossing fibers will be aligned to the x-y plane, because the smaller
# diffusion eigenvalue, perpendicular to both crossings fibers, will be
# aligned to the z-axis.
# Simulate the crossing fiber
angles = [(90, 30), (90, 30), (20, 30), (20, 30)]
fie = 0.49
frac = [fie*50, (1-fie) * 50, fie*50, (1-fie) * 50]
mevals = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087],
[0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
evals, evecs = decompose_tensor(from_lower_triangular(dt))
kt_rotated = dki.Wrotate(kt, evecs)
# Now coordinate system has diffusion tensor diagonal aligned to the x-axis
# Simulate the reference kurtosis tensor
angles = [(90, 35), (90, 35), (90, -35), (90, -35)]
signal, dt, kt_ref = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
# Compare rotated with the reference
assert_array_almost_equal(kt_rotated, kt_ref)
def test_Wcons():
# Construct the 4D kurtosis tensor manualy from the crossing fiber kt
# simulate
Wfit = np.zeros([3, 3, 3, 3])
# Wxxxx
Wfit[0, 0, 0, 0] = kt_cross[0]
# Wyyyy
Wfit[1, 1, 1, 1] = kt_cross[1]
# Wzzzz
Wfit[2, 2, 2, 2] = kt_cross[2]
# Wxxxy
Wfit[0, 0, 0, 1] = Wfit[0, 0, 1, 0] = Wfit[0, 1, 0, 0] = kt_cross[3]
Wfit[1, 0, 0, 0] = kt_cross[3]
# Wxxxz
Wfit[0, 0, 0, 2] = Wfit[0, 0, 2, 0] = Wfit[0, 2, 0, 0] = kt_cross[4]
Wfit[2, 0, 0, 0] = kt_cross[4]
# Wxyyy
Wfit[0, 1, 1, 1] = Wfit[1, 0, 1, 1] = Wfit[1, 1, 1, 0] = kt_cross[5]
Wfit[1, 1, 0, 1] = kt_cross[5]
# Wxxxz
Wfit[1, 1, 1, 2] = Wfit[1, 2, 1, 1] = Wfit[2, 1, 1, 1] = kt_cross[6]
Wfit[1, 1, 2, 1] = kt_cross[6]
# Wxzzz
Wfit[0, 2, 2, 2] = Wfit[2, 2, 2, 0] = Wfit[2, 0, 2, 2] = kt_cross[7]
Wfit[2, 2, 0, 2] = kt_cross[7]
# Wyzzz
Wfit[1, 2, 2, 2] = Wfit[2, 2, 2, 1] = Wfit[2, 1, 2, 2] = kt_cross[8]
Wfit[2, 2, 1, 2] = kt_cross[8]
# Wxxyy
Wfit[0, 0, 1, 1] = Wfit[0, 1, 0, 1] = Wfit[0, 1, 1, 0] = kt_cross[9]
Wfit[1, 0, 0, 1] = Wfit[1, 0, 1, 0] = Wfit[1, 1, 0, 0] = kt_cross[9]
# Wxxzz
Wfit[0, 0, 2, 2] = Wfit[0, 2, 0, 2] = Wfit[0, 2, 2, 0] = kt_cross[10]
Wfit[2, 0, 0, 2] = Wfit[2, 0, 2, 0] = Wfit[2, 2, 0, 0] = kt_cross[10]
# Wyyzz
Wfit[1, 1, 2, 2] = Wfit[1, 2, 1, 2] = Wfit[1, 2, 2, 1] = kt_cross[11]
Wfit[2, 1, 1, 2] = Wfit[2, 2, 1, 1] = Wfit[2, 1, 2, 1] = kt_cross[11]
# Wxxyz
Wfit[0, 0, 1, 2] = Wfit[0, 0, 2, 1] = Wfit[0, 1, 0, 2] = kt_cross[12]
Wfit[0, 1, 2, 0] = Wfit[0, 2, 0, 1] = Wfit[0, 2, 1, 0] = kt_cross[12]
Wfit[1, 0, 0, 2] = Wfit[1, 0, 2, 0] = Wfit[1, 2, 0, 0] = kt_cross[12]
Wfit[2, 0, 0, 1] = Wfit[2, 0, 1, 0] = Wfit[2, 1, 0, 0] = kt_cross[12]
# Wxyyz
Wfit[0, 1, 1, 2] = Wfit[0, 1, 2, 1] = Wfit[0, 2, 1, 1] = kt_cross[13]
Wfit[1, 0, 1, 2] = Wfit[1, 1, 0, 2] = Wfit[1, 1, 2, 0] = kt_cross[13]
Wfit[1, 2, 0, 1] = Wfit[1, 2, 1, 0] = Wfit[2, 0, 1, 1] = kt_cross[13]
Wfit[2, 1, 0, 1] = Wfit[2, 1, 1, 0] = Wfit[1, 0, 2, 1] = kt_cross[13]
# Wxyzz
Wfit[0, 1, 2, 2] = Wfit[0, 2, 1, 2] = Wfit[0, 2, 2, 1] = kt_cross[14]
Wfit[1, 0, 2, 2] = Wfit[1, 2, 0, 2] = Wfit[1, 2, 2, 0] = kt_cross[14]
Wfit[2, 0, 1, 2] = Wfit[2, 0, 2, 1] = Wfit[2, 1, 0, 2] = kt_cross[14]
Wfit[2, 1, 2, 0] = Wfit[2, 2, 0, 1] = Wfit[2, 2, 1, 0] = kt_cross[14]
# Function to be tested
W4D = dki.Wcons(kt_cross)
Wfit = Wfit.reshape(-1)
W4D = W4D.reshape(-1)
assert_array_almost_equal(W4D, Wfit)
def test_spherical_dki_statistics():
# tests if MK, AK and RK are equal to expected values of a spherical
# kurtosis tensor
# Define multi voxel spherical kurtosis simulations
MParam = np.zeros((2, 2, 2, 27))
MParam[0, 0, 0] = MParam[0, 0, 1] = MParam[0, 1, 0] = params_sph
MParam[0, 1, 1] = MParam[1, 1, 0] = params_sph
# MParam[1, 1, 1], MParam[1, 0, 0], and MParam[1, 0, 1] remains zero
MRef = np.zeros((2, 2, 2))
MRef[0, 0, 0] = MRef[0, 0, 1] = MRef[0, 1, 0] = Kref_sphere
MRef[0, 1, 1] = MRef[1, 1, 0] = Kref_sphere
MRef[1, 1, 1] = MRef[1, 0, 0] = MRef[1, 0, 1] = 0
# Mean kurtosis analytical solution
MK_multi = mean_kurtosis(MParam)
assert_array_almost_equal(MK_multi, MRef)
# radial kurtosis analytical solution
RK_multi = radial_kurtosis(MParam)
assert_array_almost_equal(RK_multi, MRef)
# axial kurtosis analytical solution
AK_multi = axial_kurtosis(MParam)
assert_array_almost_equal(AK_multi, MRef)
def test_compare_MK_method():
# tests if analytical solution of MK is equal to the average of directional
# kurtosis sampled from a sphere
# DKI Model fitting
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
dkiF = dkiM.fit(signal_cross)
# MK analytical solution
MK_as = dkiF.mk()
# MK numerical method
sph = Sphere(xyz=gtab.bvecs[gtab.bvals > 0])
MK_nm = np.mean(dki.apparent_kurtosis_coef(dkiF.model_params, sph),
axis=-1)
assert_array_almost_equal(MK_as, MK_nm, decimal=1)
def test_single_voxel_DKI_stats():
# tests if AK and RK are equal to expected values for a single fiber
# simulate randomly oriented
ADi = 0.00099
ADe = 0.00226
RDi = 0
RDe = 0.00087
# Reference values
AD = fie*ADi + (1-fie)*ADe
AK = 3 * fie * (1-fie) * ((ADi-ADe) / AD) ** 2
RD = fie*RDi + (1-fie)*RDe
RK = 3 * fie * (1-fie) * ((RDi-RDe) / RD) ** 2
ref_vals = np.array([AD, AK, RD, RK])
# simulate fiber randomly oriented
theta = random.uniform(0, 180)
phi = random.uniform(0, 320)
angles = [(theta, phi), (theta, phi)]
mevals = np.array([[ADi, RDi, RDi], [ADe, RDe, RDe]])
frac = [fie*100, (1-fie)*100]
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, S0=100, angles=angles,
fractions=frac, snr=None)
evals, evecs = decompose_tensor(from_lower_triangular(dt))
dki_par = np.concatenate((evals, evecs[0], evecs[1], evecs[2], kt), axis=0)
# Estimates using dki functions
ADe1 = dki.axial_diffusivity(evals)
RDe1 = dki.radial_diffusivity(evals)
AKe1 = axial_kurtosis(dki_par)
RKe1 = radial_kurtosis(dki_par)
e1_vals = np.array([ADe1, AKe1, RDe1, RKe1])
assert_array_almost_equal(e1_vals, ref_vals)
# Estimates using the kurtosis class object
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
dkiF = dkiM.fit(signal)
e2_vals = np.array([dkiF.ad, dkiF.ak(), dkiF.rd, dkiF.rk()])
assert_array_almost_equal(e2_vals, ref_vals)
# test MK (note this test correspond to the MK singularity L2==L3)
MK_as = dkiF.mk()
sph = Sphere(xyz=gtab.bvecs[gtab.bvals > 0])
MK_nm = np.mean(dkiF.akc(sph))
assert_array_almost_equal(MK_as, MK_nm, decimal=1)
def test_compare_RK_methods():
# tests if analytical solution of RK is equal to the perpendicular kurtosis
# relative to the first diffusion axis
# DKI Model fitting
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
dkiF = dkiM.fit(signal_cross)
# MK analytical solution
RK_as = dkiF.rk()
# MK numerical method
evecs = dkiF.evecs
p_dir = perpendicular_directions(evecs[:, 0], num=30, half=True)
ver = Sphere(xyz=p_dir)
RK_nm = np.mean(dki.apparent_kurtosis_coef(dkiF.model_params, ver),
axis=-1)
assert_array_almost_equal(RK_as, RK_nm)
def test_MK_singularities():
# To test MK in case that analytical solution was a singularity not covered
# by other tests
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
# test singularity L1 == L2 - this is the case of a prolate diffusion
# tensor for crossing fibers at 90 degrees
angles_all = np.array([[(90, 0), (90, 0), (0, 0), (0, 0)],
[(89.9, 0), (89.9, 0), (0, 0), (0, 0)]])
for angles_90 in angles_all:
s_90, dt_90, kt_90 = multi_tensor_dki(gtab_2s, mevals_cross, S0=100,
angles=angles_90,
fractions=frac_cross, snr=None)
dkiF = dkiM.fit(s_90)
MK = dkiF.mk()
sph = Sphere(xyz=gtab.bvecs[gtab.bvals > 0])
MK_nm = np.mean(dkiF.akc(sph))
assert_almost_equal(MK, MK_nm, decimal=2)
# test singularity L1 == L3 and L1 != L2
# since L1 is defined as the larger eigenvalue and L3 the smallest
# eigenvalue, this singularity teoretically will never be called,
# because for L1 == L3, L2 have also to be = L1 and L2.
# Nevertheless, I decided to include this test since this singularity
# is revelant for cases that eigenvalues are not ordered
# artificially revert the eigenvalue and eigenvector order
dki_params = dkiF.model_params.copy()
dki_params[1] = dkiF.model_params[2]
dki_params[2] = dkiF.model_params[1]
dki_params[4] = dkiF.model_params[5]
dki_params[5] = dkiF.model_params[4]
dki_params[7] = dkiF.model_params[8]
dki_params[8] = dkiF.model_params[7]
dki_params[10] = dkiF.model_params[11]
dki_params[11] = dkiF.model_params[10]
MK = dki.mean_kurtosis(dki_params)
MK_nm = np.mean(dki.apparent_kurtosis_coef(dki_params, sph))
assert_almost_equal(MK, MK_nm, decimal=2)
def test_dki_errors():
# first error of DKI module is if a unknown fit method is given
assert_raises(ValueError, dki.DiffusionKurtosisModel, gtab_2s,
fit_method="JOANA")
# second error of DKI module is if a min_signal is defined as negative
assert_raises(ValueError, dki.DiffusionKurtosisModel, gtab_2s,
min_signal=-1)
# try case with correct min_signal
dkiM = dki.DiffusionKurtosisModel(gtab_2s, min_signal=1)
dkiF = dkiM.fit(DWI)
assert_array_almost_equal(dkiF.model_params, multi_params)
# third error is if a given mask do not have same shape as data
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
# test a correct mask
dkiF = dkiM.fit(DWI)
mask_correct = dkiF.fa > 0
mask_correct[1, 1] = False
multi_params[1, 1] = np.zeros(27)
mask_not_correct = np.array([[True, True, False], [True, False, False]])
dkiF = dkiM.fit(DWI, mask=mask_correct)
assert_array_almost_equal(dkiF.model_params, multi_params)
# test a incorrect mask
assert_raises(ValueError, dkiM.fit, DWI, mask=mask_not_correct)
|
<reponame>plezmo/python-sdk-examples
# Copyright (c) 2019 Gunakar Pvt Ltd
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Gunakar Pvt Ltd/Plezmo nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# * This software must only be used with Plezmo elements manufactured by
# Gunakar Pvt Ltd.
# * Any software provided in binary or object form under this license must not be
# reverse engineered, decompiled, modified and/or disassembled.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Real-time charting of RAW Acceleration data from Motion sensor.
# This example requires a Python 3.7 setup with some basic libraries setup (see the imports below).
# A Motion sensor needs to be available for the experiment.
# A simple experiment setup to study the pendulum behavior with raw-acceleration data is as follows.
# Tie one end of a 2-3 meter thin thread to a Plezmo adapter such that a Motion sensor attached to
# this adapter will hang up-side-down when suspended by the thread. Our pendulum is ready!
# Tie the other end at a suitable place for a good 15-20 degree free swinging pendulum.
# Now, hold the attachment at a swing angle with the thread taught and start this experiment.
# As soon as the chart shows up, release the attachment (with the attached Motion sensor)
# without any extra jerks or force to start the swinging.
# When you want to end the experiment, press 'q' in the chart window.
# This will exit the program and dump a motion-raw-accel-data.csv data file for the session.
# You can do the same charting with any other acceleration experiments.
# Note that this script charts only the Z-axis and Resultant raw samples.
# You can edit the code below to suit your requirements.
# Note: A motion_inverted configuration variable is set to True in the script below to account for
# the inverted position of the motion element suspended as described above. Depending on how you
# setup your experiment you might want to set that to False.
# This program will connect with the specified Motion sensor
# and fetch a batch of RAW z-axis and resultant acceleration values.
# It will then update a chart with these against time.
# The program will continue fetching and showing these lines as long as 'q' is not pressed in the chart context.
import time
import traceback
import sys
import math
import pandas as pd
import matplotlib.animation as animation
import matplotlib.pyplot as plt
# Generic Plezmo SDK imports.
from plezmo import *
import plezmo.utils.logger as pzLogger
import plezmo as pz
import plezmo.elements.element_types as pzType
from plezmo.elements.plezmo_motion import *
# Configurations.
motion_inverted = True ## Correction for inverted swinging Motion element w.r.t resultant data
cfgsize = (14,6) # inches
width = 100 # data-points across the chart
ani_interval = 1 # msec update interval
OutFile = 'motion-raw-accel-data.csv'
# Globals.
x_data, y_data, y2_data = [], [], [] # x,y,y2 data arrays
g_paused = False
# Global time tracking variables
gTime = 0
rTime = 0
# Functions.
# Init bluetooth communication
def init(element_names):
# Register global exception handler
pz.registerExceptionHandler(globalExceptionHandler)
# Elements to connect
elementList = [
{"name" : element_names["motion"], "type": pzType.PlezmoElementType.MOTION},
]
connectedElements = []
try:
# connect to elements one by one
for e in elementList:
pz.plezmoApi.connect(e["name"], e["type"])
# keep track of connected elements
connectedElements.append(e["name"])
return True
except Exception as e:
# Disconnect and stop program if connection to element fails
print(f'Err! Failed to connect to element, ex {e}')
#traceback.print_exc()
# Disconnect already connected elements
for e in connectedElements:
pz.plezmoApi.disconnect(e["name"])
return False
def extract_element_names():
motion_name = None
if len(sys.argv) < 2:
print('Error 1')
return None
else:
motion_name = sys.argv[1]
return {"motion": motion_name}
# All unhandled exceptions from event handlers will be directed to this handler
def globalExceptionHandler(e):
print(f'Err! Got exception {e}')
# Generic 'keepalive' function used by all handlers that keep program running.
def mark_time():
global gTime
gTime = time.time()
def ref_time(set=False):
global rTime
if set is True:
rTime = time.time()
else:
lt = time.time()
return round((lt - rTime),6) # usec resolution
def keypress(event):
global g_paused
print(f'Keypress... {event.key}')
if event.key == ' ': # SPACE controls pause -- TODO
g_paused ^= True
def refresh(frame):
global dataset, width, x_data, y_data, y2_data
#print(f'Refresh> {dataset.shape}')
if g_paused:
print(f'Paused...')
time.sleep(1) # slowdown animation checks
return line, line2
# pzLog.info(f'Fetching Motion Accl data...')
rows = []
for i in range(5):
# For the Pendulum experiment we are looking at the Z-axis and Resultant data
z = Motion.getAccelerometerData(motion_name, Acceleration.Z)
r = Motion.getAccelerometerData(motion_name, Acceleration.RESULTANT)
t = ref_time()
#print(f' {i:3} {t:10.3f} {z:10} {r:10}')
row = [ t, -z if motion_inverted else z, r]
rows.append(row)
dataset = pd.concat([dataset, pd.DataFrame(rows, columns=['t','z','r'])])
if (dataset.shape[0] < width):
x_data = list(dataset['t'])
y_data = list(dataset['z'])
y2_data = list(dataset['r'])
else:
x_data = list(dataset['t'][-width:])
y_data = list(dataset['z'][-width:])
y2_data = list(dataset['r'][-width:])
line.set_data(x_data, y_data)
line2.set_data(x_data, y2_data)
fig.gca().relim()
fig.gca().autoscale_view()
return line, line2
# Handlers that keep the program running.
@pz.PlezmoEventHandler
def move_handler():
pzLog.info(f' {motion_name} moved ...')
return mark_time()
@pz.PlezmoEventHandler
def stop_handler():
pzLog.info(f' {motion_name} STOP received.')
return mark_time()
@pz.PlezmoEventHandler
def l_tilt_handler():
pzLog.info(f' {motion_name} Left tilt')
return mark_time()
@pz.PlezmoEventHandler
def r_tilt_handler():
pzLog.info(f' {motion_name} Right tilt')
return mark_time()
@pz.PlezmoEventHandler
def f_tilt_handler():
pzLog.info(f' {motion_name} Front tilt')
return mark_time()
@pz.PlezmoEventHandler
def b_tilt_handler():
pzLog.info(f' {motion_name} Back tilt')
return mark_time()
@pz.PlezmoEventHandler
def flat_handler():
pzLog.info(f' {motion_name} Flat.')
return mark_time()
# Main.
pzLog = pzLogger.Logger()
pzLog.info(f'Begin.')
element_names = extract_element_names()
if element_names is None:
pz.plezmoApi.close()
print(f'Err! Need one argument <Motion element>')
exit(1)
pzLog.info(f'Looking for: {element_names}')
# Init bluetooth communication and connect to elements
retval = init(element_names)
if not retval:
pz.plezmoApi.close()
print(f'Err! Could not connect to all the required elements!')
exit(0)
motion_name = element_names["motion"]
# Register event handlers in a try-except-finally form.
try:
pz.Motion.onMotion(motion_name, move_handler, Movement.START)
pz.Motion.onMotion(motion_name, stop_handler, Movement.STOP)
pz.Motion.onTilt(motion_name, l_tilt_handler, Tilt.LEFT)
pz.Motion.onTilt(motion_name, r_tilt_handler, Tilt.RIGHT)
pz.Motion.onTilt(motion_name, f_tilt_handler, Tilt.FRONT)
pz.Motion.onTilt(motion_name, b_tilt_handler, Tilt.BACK)
dataset = pd.DataFrame(columns=[i for i in range(3)])
dataset.rename(columns={0:'t',1:'z',2:'r'}, inplace=True)
# Setup Animated Charting.
fig = plt.figure(figsize=cfgsize)
fig.canvas.mpl_connect('key_press_event', keypress)
line,line2 = plt.plot(x_data, y_data, 'r-', x_data, y2_data, 'b-', linewidth=1)
plt.legend([line,line2], ['z-axis','Resultant'])
plt.title('Charting RAW Acceleration from Motion sensor')
plt.xlabel('Time (secs)')
plt.ylabel('Accel (mm/sq_sec)')
ref_time(True)
ani = animation.FuncAnimation(fig, refresh, interval=ani_interval)
plt.tight_layout()
plt.show()
dataset.reset_index(inplace=True, drop=True)
dataset.to_csv(OutFile, encoding='utf-8')
except Exception as e:
print(f'Err! Failed to run commands: ex {e}')
#traceback.print_exc()
finally:
pzLog.info(f'End.')
# Program completed, disconnect elements and quit
pz.plezmoApi.disconnect(motion_name)
time.sleep(1)
pz.plezmoApi.close()
exit(0)
|
from django.shortcuts import render
from .forms import CommentForm
from django.shortcuts import get_object_or_404, redirect, render
import requests
from main.models import Book, Comment
import json
from django.views.generic.detail import DetailView
from django.contrib.auth.decorators import login_required
import csv
from django.http import HttpResponse
from reportlab.lib.units import inch
import io
from django.http import FileResponse
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
# Create your views here.
@login_required(login_url='login')
def search_by_id(request):
info = {}
id_book = None
title = None
publication_date = None
data = None
if 'id' in request.GET:
id = request.GET['id']
url = 'https://www.etnassoft.com/api/v1/get/?id=%s' % id
response = requests.get(url)
if response.json():
info = response.json()[0]
id_book = int(info['ID'])
title = info['title']
publication_date = int(info['publisher_date'])
print(response.json())
for key, value in info.items():
print(f'{key}: {value}')
if Book.objects.filter(id_book=id_book).exists():
pass
else:
book = Book.objects.create(id_book=id_book, title=title, publication_date=publication_date)
book.save()
data = Book.objects.get(id_book=id_book)
return render(request, 'main/home.html', {'book': data, 'info': info})
else:
pass
return render(request, 'main/home.html', {'book': data} )
@login_required(login_url='login')
def details(request, pk):
book = Book.objects.get(id_book=pk)
if request.method == 'POST':
cf = CommentForm(request.POST or None)
if cf.is_valid():
text = request.POST.get('text')
comment = Comment.objects.create(book = book , user = request.user, text = text)
comment.save()
else:
cf = CommentForm()
context ={
'book': book,
'comment_form':cf,
}
return render(request, 'main/detail.html', context)
@login_required(login_url='login')
def csv_file(request, *args, **kwargs):
id = kwargs.get('pk')
url = 'https://www.etnassoft.com/api/v1/get/?id=%s' % id
data = requests.get(url)
info = data.json()[0].items()
print(data.json())
response = HttpResponse('text/csv')
response['Content-Disposition'] = 'attachment; filename=book.csv'
write = csv.writer(response)
header = []
row = []
for key,value in info:
header.append(key)
row.append(value)
write.writerow(header)
write.writerow(row)
return response
@login_required(login_url='login')
def pdf_file(request, pk):
# Create Bytestream buffer
buffer = io.BytesIO()
# Create the PDF object, using the buffer as its "file."
c = canvas.Canvas(buffer, pagesize=letter, bottomup=0)
# Create a text object
textob = c.beginText()
textob.setTextOrigin(inch, inch)
textob.setFont("Helvetica", 14)
book = Book.objects.get(id_book=pk)
lines = [
f'ID : {book.id_book} ',
f'Title: {book.title}',
f'Publication date: {book.publication_date}',
]
for line in lines:
textob.textLine(line)
# Finish Up
c.drawText(textob)
c.showPage()
c.save()
buffer.seek(0)
return FileResponse(buffer, as_attachment=True, filename='book.pdf')
|
<filename>Lib/site-packages/elementpath/xpath1_parser.py
#
# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author <NAME> <<EMAIL>>
#
import re
import math
import decimal
from copy import copy
from .exceptions import ElementPathNameError, MissingContextError
from .datatypes import UntypedAtomic, DayTimeDuration, YearMonthDuration, \
NumericTypeProxy, ArithmeticTypeProxy
from .xpath_context import XPathSchemaContext
from .tdop_parser import Parser
from .namespaces import XML_ID, XML_LANG, XML_NAMESPACE, qname_to_prefixed
from .schema_proxy import AbstractSchemaProxy
from .xpath_token import XPathToken
from .xpath_nodes import NamespaceNode, TypedAttribute, TypedElement, is_etree_element, \
is_xpath_node, is_element_node, is_document_node, is_attribute_node, is_text_node, \
is_comment_node, is_processing_instruction_node, node_name
class XPath1Parser(Parser):
"""
XPath 1.0 expression parser class. A parser instance represents also the XPath static context.
With *variables* you can pass a dictionary with the static context's in-scope variables.
Provide a *namespaces* dictionary argument for mapping namespace prefixes to URI inside
expressions. If *strict* is set to `False` the parser enables also the parsing of QNames,
like the ElementPath library.
:param namespaces: A dictionary with mapping from namespace prefixes into URIs.
:param variables: A dictionary with the static context's in-scope variables.
:param strict: If strict mode is `False` the parser enables parsing of QNames \
in extended format, like the Python's ElementPath library. Default is `True`.
"""
token_base_class = XPathToken
name_pattern = re.compile(r'[^\d\W][\w.\-\xb7\u0300-\u036F\u203F\u2040]*')
SYMBOLS = Parser.SYMBOLS | {
# Axes
'descendant-or-self', 'following-sibling', 'preceding-sibling',
'ancestor-or-self', 'descendant', 'attribute', 'following',
'namespace', 'preceding', 'ancestor', 'parent', 'child', 'self',
# Operators
'and', 'mod', 'div', 'or', '..', '//', '!=', '<=', '>=', '(', ')', '[', ']',
':', '.', '@', ',', '/', '|', '*', '-', '=', '+', '<', '>', '$', '::',
# Node test functions
'node', 'text', 'comment', 'processing-instruction',
# Node set functions
'last', 'position', 'count', 'id', 'name', 'local-name', 'namespace-uri',
# String functions
'string', 'concat', 'starts-with', 'contains',
'substring-before', 'substring-after', 'substring',
'string-length', 'normalize-space', 'translate',
# Boolean functions
'boolean', 'not', 'true', 'false', 'lang',
# Number functions
'number', 'sum', 'floor', 'ceiling', 'round',
# Symbols for ElementPath extensions
'{', '}'
}
DEFAULT_NAMESPACES = {'xml': XML_NAMESPACE}
"""
The default prefix-to-namespace associations of the XPath class. Those namespaces are updated
in the instance with the ones passed with the *namespaces* argument.
"""
# Labels and symbols admitted after a path step
PATH_STEP_LABELS = ('axis', 'kind test')
PATH_STEP_SYMBOLS = {
'(integer)', '(string)', '(float)', '(decimal)', '(name)', '*', '@', '..', '.', '{'
}
schema = None # To simplify the schema bind checks in compatibility with XPath2Parser
def __init__(self, namespaces=None, variables=None, strict=True, *args, **kwargs):
super(XPath1Parser, self).__init__()
self.namespaces = self.DEFAULT_NAMESPACES.copy()
if namespaces is not None:
self.namespaces.update(namespaces)
self.variables = dict(variables if variables is not None else [])
self.strict = strict
@property
def version(self):
"""The XPath version string."""
return '1.0'
@property
def compatibility_mode(self):
"""XPath 1.0 compatibility mode."""
return True
@property
def default_namespace(self):
"""
The default namespace. For XPath 1.0 this value is always `None` because the default
namespace is ignored (see https://www.w3.org/TR/1999/REC-xpath-19991116/#node-tests).
"""
return
@classmethod
def axis(cls, symbol, bp=80):
"""Register a token for a symbol that represents an XPath *axis*."""
def nud_(self):
self.parser.advance('::')
self.parser.next_token.expected(
'(name)', '*', 'text', 'node', 'document-node', 'comment', 'processing-instruction',
'attribute', 'schema-attribute', 'element', 'schema-element'
)
self[:] = self.parser.expression(rbp=bp),
return self
pattern = r'\b%s(?=\s*\:\:|\s*\(\:.*\:\)\s*\:\:)' % symbol
return cls.register(symbol, pattern=pattern, label='axis', lbp=bp, rbp=bp, nud=nud_)
@classmethod
def function(cls, symbol, nargs=None, label='function', bp=90):
"""
Registers a token class for a symbol that represents an XPath *callable* object.
For default a callable labeled as *function* is registered but a different label
can be provided.
"""
def nud_(self):
self.value = None
self.parser.advance('(')
if nargs is None:
del self[:]
while True:
self.append(self.parser.expression(5))
if self.parser.next_token.symbol != ',':
break
self.parser.advance(',')
self.parser.advance(')')
return self
elif nargs == 0:
self.parser.advance(')')
return self
elif isinstance(nargs, (tuple, list)):
min_args, max_args = nargs
else:
min_args = max_args = nargs
k = 0
while k < min_args:
if self.parser.next_token.symbol == ')':
msg = 'Too few arguments: expected at least %s arguments' % min_args
self.wrong_nargs(msg if min_args > 1 else msg[:-1])
self[k:] = self.parser.expression(5),
k += 1
if k < min_args:
if self.parser.next_token.symbol == ')':
msg = 'Too few arguments: expected at least %s arguments' % min_args
self.wrong_nargs(msg if min_args > 1 else msg[:-1])
self.parser.advance(',')
while k < max_args:
if self.parser.next_token.symbol == ',':
self.parser.advance(',')
self[k:] = self.parser.expression(5),
elif k == 0 and self.parser.next_token.symbol != ')':
self[k:] = self.parser.expression(5),
else:
break
k += 1
if self.parser.next_token.symbol == ',':
msg = 'Too many arguments: expected at most %s arguments' % max_args
self.wrong_nargs(msg if max_args > 1 else msg[:-1])
self.parser.advance(')')
return self
pattern = r'\b%s(?=\s*\(|\s*\(\:.*\:\)\()' % symbol
return cls.register(symbol, pattern=pattern, label=label, lbp=bp, rbp=bp, nud=nud_)
def parse(self, source):
root_token = super(XPath1Parser, self).parse(source)
try:
root_token.evaluate() # Static context evaluation
except MissingContextError:
pass
return root_token
##
# XPath1 definitions
register = XPath1Parser.register
literal = XPath1Parser.literal
nullary = XPath1Parser.nullary
prefix = XPath1Parser.prefix
infix = XPath1Parser.infix
postfix = XPath1Parser.postfix
method = XPath1Parser.method
function = XPath1Parser.function
axis = XPath1Parser.axis
###
# Simple symbols
register(',')
register(')')
register(']')
register('::')
register('}')
###
# Literals
literal('(string)')
literal('(float)')
literal('(decimal)')
literal('(integer)')
@method(literal('(name)', bp=10))
def evaluate(self, context=None):
return [x for x in self.select(context)]
@method('(name)')
def select(self, context=None):
if context is None:
return
name = self.value
if isinstance(context, XPathSchemaContext):
# Bind with the XSD type from a schema
for item in map(lambda x: self.match_xsd_type(x, name), context.iter_children_or_self()):
if item:
yield item
return
if name[0] == '{' or not self.parser.default_namespace:
tag = name
else:
tag = '{%s}%s' % (self.parser.default_namespace, name)
# With an ElementTree context checks if the token is bound to an XSD type. If not
# try a match using the element path. If this match fails the xsd_type attribute
# is set with the schema object to prevent other checks until the schema change.
if self.xsd_types is self.parser.schema:
# Untyped selection
for item in context.iter_children_or_self():
if is_attribute_node(item, name) or is_element_node(item, tag):
yield item
elif self.xsd_types is None or isinstance(self.xsd_types, AbstractSchemaProxy):
# Try to match the type using the path
for item in context.iter_children_or_self():
if is_attribute_node(item, name) or is_element_node(item, tag):
path = context.get_path(item)
xsd_component = self.parser.schema.find(path, self.parser.namespaces)
if xsd_component is not None:
self.xsd_types = {tag: xsd_component.type}
else:
self.xsd_types = self.parser.schema
yield self.get_typed_node(item)
else:
# XSD typed selection
for item in context.iter_children_or_self():
if is_attribute_node(item, name) or is_element_node(item, tag):
yield self.get_typed_node(item)
###
# Namespace prefix reference
@method(':', bp=95)
def led(self, left):
if self.parser.version == '1.0':
left.expected('(name)')
else:
left.expected('(name)', '*')
if self.parser.next_token.label not in ('function', 'constructor'):
self.parser.expected_next('(name)', '*')
if left.symbol == '(name)':
namespace = self.get_namespace(left.value)
self.parser.next_token.bind_namespace(namespace)
elif left.symbol == '*' and self.parser.next_token.symbol != '(name)':
self.parser.next_token.wrong_syntax()
if self.parser.is_spaced():
self.wrong_syntax("a QName cannot contains spaces before or after ':'")
self[:] = left, self.parser.expression(90)
self.value = '{}:{}'.format(self[0].value, self[1].value)
return self
@method(':')
def evaluate(self, context=None):
if self[1].label in ('function', 'constructor'):
return self[1].evaluate(context)
return [x for x in self.select(context)]
@method(':')
def select(self, context=None):
if self[1].label in ('function', 'constructor'):
value = self[1].evaluate(context)
if isinstance(value, list):
yield from value
else:
yield value
return
if self[0].value == '*':
name = '*:%s' % self[1].value
else:
namespace = self.get_namespace(self[0].value)
name = '{%s}%s' % (namespace, self[1].value)
if context is None:
return
elif isinstance(context, XPathSchemaContext):
for item in map(lambda x: self.match_xsd_type(x, name), context.iter_children_or_self()):
if item:
yield item
elif self.xsd_types is self.parser.schema:
for item in context.iter_children_or_self():
if is_attribute_node(item, name) or is_element_node(item, name):
yield item
elif self.xsd_types is None or isinstance(self.xsd_types, AbstractSchemaProxy):
for item in context.iter_children_or_self():
if is_attribute_node(item, name) or is_element_node(item, name):
path = context.get_path(item)
xsd_component = self.parser.schema.find(path, self.parser.namespaces)
if xsd_component is not None:
self.add_xsd_type(xsd_component.name, xsd_component.type)
else:
self.xsd_types = self.parser.schema
yield self.get_typed_node(item)
else:
# XSD typed selection
for item in context.iter_children_or_self():
if is_attribute_node(item, name) or is_element_node(item, name):
yield self.get_typed_node(item)
###
# Namespace URI as in ElementPath
@method('{', bp=95)
def nud(self):
if self.parser.strict:
self.unexpected()
namespace = self.parser.next_token.value + self.parser.raw_advance('}')
self.parser.advance()
if self.parser.next_token.label not in ('function', 'constructor'):
self.parser.expected_next('(name)', '*')
self.parser.next_token.bind_namespace(namespace)
self[:] = self.parser.symbol_table['(string)'](self.parser, namespace), \
self.parser.expression(90)
return self
@method('{')
def evaluate(self, context=None):
if self[1].label == 'function':
return self[1].evaluate(context)
else:
return '{%s}%s' % (self[0].value, self[1].value)
@method('{')
def select(self, context=None):
if self[1].label == 'function':
yield self[1].evaluate(context)
elif context is not None:
value = '{%s}%s' % (self[0].value, self[1].value)
for item in context.iter_children_or_self():
if is_attribute_node(item, value):
yield item[1]
elif is_element_node(item, value):
yield item
###
# Variables
@method('$', bp=90)
def nud(self):
self.parser.expected_next('(name)')
self[:] = self.parser.expression(rbp=90),
if ':' in self[0].value:
self[0].wrong_syntax("variable reference requires a simple reference name")
return self
@method('$')
def evaluate(self, context=None):
varname = self[0].value
if varname in self.parser.variables:
return self.parser.variables[varname]
elif context is None:
return
elif varname in context.variables:
return context.variables[varname]
elif isinstance(context, XPathSchemaContext):
return
else:
raise ElementPathNameError('unknown variable', token=self)
###
# Nullary operators (use only the context)
@method(nullary('*'))
def select(self, context=None):
if self:
# Product operator
item = self.evaluate(context)
if context is not None:
context.item = item
yield item
elif context is None:
self.missing_context()
else:
# Wildcard literal
for item in context.iter_children_or_self():
if context.is_principal_node_kind():
if hasattr(item, 'type'):
self.add_xsd_type(item.name, item.type)
if is_attribute_node(item):
yield item[1]
else:
yield item
@method(nullary('.'))
def select(self, context=None):
if context is None:
self.missing_context()
for item in context.iter_self():
if item is not None:
if hasattr(item, 'type') and isinstance(context, XPathSchemaContext):
self.add_xsd_type(item.name, item.type)
yield item
elif is_document_node(context.root):
yield context.root
@method(nullary('..'))
def select(self, context=None):
if context is None:
self.missing_context()
else:
parent = context.get_parent(context.item)
if is_element_node(parent):
context.item = parent
yield parent
###
# Logical Operators
@method(infix('or', bp=20))
def evaluate(self, context=None):
return self.boolean_value(self[0].evaluate(copy(context))) or \
self.boolean_value(self[1].evaluate(copy(context)))
@method(infix('and', bp=25))
def evaluate(self, context=None):
return self.boolean_value(self[0].evaluate(copy(context))) and \
self.boolean_value(self[1].evaluate(copy(context)))
@method(infix('=', bp=30))
def evaluate(self, context=None):
return any(op1 == op2 for op1, op2 in self.get_comparison_data(context))
@method(infix('!=', bp=30))
def evaluate(self, context=None):
return any(op1 != op2 for op1, op2 in self.get_comparison_data(context))
@method(infix('<', bp=30))
def evaluate(self, context=None):
return any(op1 < op2 for op1, op2 in self.get_comparison_data(context))
@method(infix('>', bp=30))
def evaluate(self, context=None):
return any(op1 > op2 for op1, op2 in self.get_comparison_data(context))
@method(infix('<=', bp=30))
def evaluate(self, context=None):
return any(op1 <= op2 for op1, op2 in self.get_comparison_data(context))
@method(infix('>=', bp=30))
def evaluate(self, context=None):
return any(op1 >= op2 for op1, op2 in self.get_comparison_data(context))
###
# Numerical operators
prefix('+')
prefix('-', bp=70)
@method(infix('+', bp=40))
def evaluate(self, context=None):
if len(self) == 1:
arg = self.get_argument(context, cls=NumericTypeProxy)
if arg is not None:
return +arg
else:
op1, op2 = self.get_operands(context, cls=ArithmeticTypeProxy)
if op1 is not None:
try:
return op1 + op2
except TypeError as err:
raise self.wrong_type(str(err))
@method(infix('-', bp=40))
def evaluate(self, context=None):
if len(self) == 1:
arg = self.get_argument(context, cls=NumericTypeProxy)
if arg is not None:
return -arg
else:
op1, op2 = self.get_operands(context, cls=ArithmeticTypeProxy)
if op1 is not None:
try:
return op1 - op2
except TypeError as err:
raise self.wrong_type(str(err))
@method(infix('*', bp=45))
def evaluate(self, context=None):
if self:
op1, op2 = self.get_operands(context, cls=ArithmeticTypeProxy)
if op1 is not None:
try:
return op1 * op2
except TypeError as err:
raise self.wrong_type(str(err))
else:
# This is not a multiplication operator but a wildcard select statement
return [x for x in self.select(context)]
@method(infix('div', bp=45))
def evaluate(self, context=None):
dividend, divisor = self.get_operands(context, cls=ArithmeticTypeProxy)
if dividend is None:
return
elif divisor != 0:
try:
return dividend / divisor
except TypeError as err:
raise self.wrong_type(str(err))
elif dividend == 0:
return float('nan')
elif dividend > 0:
return float('inf')
else:
return float('-inf')
@method(infix('mod', bp=45))
def evaluate(self, context=None):
op1, op2 = self.get_operands(context, cls=NumericTypeProxy)
if op1 is not None:
try:
return op1 % op2
except TypeError as err:
raise self.wrong_type(str(err))
###
# Union expressions
@method('|', bp=50)
def led(self, left):
self.cut_and_sort = True
if left.symbol in {'|', 'union'}:
left.cut_and_sort = False
self[:] = left, self.parser.expression(rbp=50)
return self
@method('|')
def select(self, context=None):
if context is None:
return
elif not self.cut_and_sort:
for k in range(2):
yield from self[k].select(context.copy())
else:
results = {item for k in range(2) for item in self[k].select(context.copy())}
yield from context.iter_results(results)
###
# Path expressions
@method('//', bp=75)
@method('/', bp=75)
def nud(self):
if self.parser.next_token.symbol == '(end)' and self.symbol == '/':
return self
elif self.parser.next_token.label not in self.parser.PATH_STEP_LABELS:
self.parser.expected_next(*self.parser.PATH_STEP_SYMBOLS)
self[:] = self.parser.expression(75),
return self
@method('//')
@method('/')
def led(self, left):
if self.parser.next_token.label not in self.parser.PATH_STEP_LABELS:
self.parser.expected_next(*self.parser.PATH_STEP_SYMBOLS)
self[:] = left, self.parser.expression(75)
return self
@method('/')
def select(self, context=None):
"""
Child path expression. Selects child:: axis as default (when bind to '*' or '(name)').
"""
if context is None:
return
elif not self:
if is_document_node(context.root):
yield context.root
elif len(self) == 1:
if is_document_node(context.root) or context.item is context.root:
context.item = None
yield from self[0].select(context)
else:
items = []
for _ in context.iter_selector(self[0].select):
if not is_xpath_node(context.item):
self.wrong_type("left operand must returns XPath nodes: {}".format(context.item))
for result in self[1].select(context):
if not is_etree_element(result) and not isinstance(result, tuple):
yield result
elif result in items:
pass
elif isinstance(result, (TypedAttribute, TypedElement)):
if result[0] not in items:
items.append(result)
yield result
else:
items.append(result)
yield result
if isinstance(context, XPathSchemaContext):
if isinstance(result, tuple):
self[1].add_xsd_type(result[0], result[1].type)
elif hasattr(result, 'type'):
self[1].add_xsd_type(result.tag, result.type)
@method('//')
def select(self, context=None):
if context is None:
return
elif len(self) == 1:
if is_document_node(context.root) or context.item is context.root:
context.item = None
for _ in context.iter_descendants(axis='descendant-or-self'):
yield from self[0].select(context)
else:
for elem in self[0].select(context):
if not is_element_node(elem):
self.wrong_type("left operand must returns element nodes: %r" % elem)
for _ in context.iter_descendants(item=elem):
yield from self[1].select(context)
###
# Predicate filters
@method('[', bp=80)
def led(self, left):
self.parser.next_token.unexpected(']')
self[:] = left, self.parser.expression()
self.parser.advance(']')
return self
@method('[')
def select(self, context=None):
if context is not None:
if self[0].label == 'axis':
selector = self[0].select(context)
else:
selector = context.iter_selector(self[0].select)
for _ in selector:
predicate = [x for x in self[1].select(context.copy())]
if len(predicate) == 1 and isinstance(predicate[0], NumericTypeProxy):
if context.position == predicate[0]:
yield context.item
elif self.boolean_value(predicate):
yield context.item
###
# Parenthesized expressions
@method('(', bp=100)
def nud(self):
self.parser.next_token.unexpected(')')
self[:] = self.parser.expression(),
self.parser.advance(')')
return self
@method('(')
def evaluate(self, context=None):
return self[0].evaluate(context)
@method('(')
def select(self, context=None):
return self[0].select(context)
###
# Axes
@method('@', bp=80)
def nud(self):
self.parser.expected_next('*', '(name)', ':', message="invalid attribute specification")
self[:] = self.parser.expression(rbp=80),
return self
@method('@')
@method(axis('attribute'))
def select(self, context=None):
if context is None:
self.missing_context()
for _ in context.iter_attributes():
yield from self[0].select(context)
@method(axis('namespace'))
def select(self, context=None):
if context is not None and is_element_node(context.item):
elem = context.item
namespaces = self.parser.namespaces
for prefix_, uri in namespaces.items():
context.item = NamespaceNode(prefix_, uri)
yield context.item
if hasattr(elem, 'nsmap'):
# Add element's namespaces for lxml (and use None for default namespace)
# noinspection PyUnresolvedReferences
for prefix_, uri in elem.nsmap.items():
if prefix_ not in namespaces:
context.item = NamespaceNode(prefix_, uri)
yield context.item
@method(axis('self'))
def select(self, context=None):
if context is not None:
for _ in context.iter_self():
yield from self[0].select(context)
@method(axis('child'))
def select(self, context=None):
if context is not None:
for _ in context.iter_children_or_self(child_axis=True):
yield from self[0].select(context)
@method(axis('parent'))
def select(self, context=None):
if context is not None:
for _ in context.iter_parent():
yield from self[0].select(context)
@method(axis('following-sibling'))
@method(axis('preceding-sibling'))
def select(self, context=None):
if context is not None:
for _ in context.iter_siblings(axis=self.symbol):
yield from self[0].select(context)
@method(axis('ancestor'))
@method(axis('ancestor-or-self'))
def select(self, context=None):
if context is not None:
for _ in context.iter_ancestors(axis=self.symbol):
yield from self[0].select(context)
@method(axis('descendant'))
@method(axis('descendant-or-self'))
def select(self, context=None):
if context is not None:
for _ in context.iter_descendants(axis=self.symbol):
yield from self[0].select(context)
@method(axis('following'))
def select(self, context=None):
if context is not None:
for _ in context.iter_followings():
yield from self[0].select(context)
@method(axis('preceding'))
def select(self, context=None):
if context is not None and is_element_node(context.item):
for _ in context.iter_preceding():
yield from self[0].select(context)
###
# Kind tests (for matching of node types in XPath 1.0 or sequence types in XPath 2.0)
@method(function('node', nargs=0, label='kind test'))
def select(self, context=None):
if context is not None:
for item in context.iter_children_or_self():
if item is None:
yield context.root
elif is_xpath_node(item):
yield item
@method(function('processing-instruction', nargs=(0, 1), label='kind test'))
def evaluate(self, context=None):
if context and is_processing_instruction_node(context.item):
return context.item
@method(function('comment', nargs=0, label='kind test'))
def evaluate(self, context=None):
if context and is_comment_node(context.item):
return context.item
@method(function('text', nargs=0, label='kind test'))
def select(self, context=None):
if context is not None:
for item in context.iter_children_or_self():
if is_text_node(item):
yield item
###
# Node set functions
@method(function('last', nargs=0))
def evaluate(self, context=None):
return context.size if context is not None else 0
@method(function('position', nargs=0))
def evaluate(self, context=None):
return context.position if context is not None else 0
@method(function('count', nargs=1))
def evaluate(self, context=None):
return len([x for x in self[0].select(context)])
@method(function('id', nargs=1))
def select(self, context=None):
if context is not None:
value = self[0].evaluate(context)
item = context.item
if is_element_node(item):
yield from filter(lambda e: e.get(XML_ID) == value, item.iter())
@method(function('name', nargs=(0, 1)))
@method(function('local-name', nargs=(0, 1)))
@method(function('namespace-uri', nargs=(0, 1)))
def evaluate(self, context=None):
name = node_name(self.get_argument(context, default_to_context=True))
if name is None:
return ''
symbol = self.symbol
if symbol == 'name':
return qname_to_prefixed(name, self.parser.namespaces)
elif not name or name[0] != '{':
return name if symbol == 'local-name' else ''
elif symbol == 'local-name':
return name.split('}')[1]
elif symbol == 'namespace-uri':
return name.split('}')[0][1:]
###
# String functions
@method(function('string', nargs=1))
def evaluate(self, context=None):
return self.string_value(self.get_argument(context))
@method(function('contains', nargs=2))
def evaluate(self, context=None):
arg1 = self.get_argument(context, default='', cls=str)
arg2 = self.get_argument(context, index=1, default='', cls=str)
return arg2 in arg1
@method(function('concat'))
def evaluate(self, context=None):
return ''.join(self.string_value(self.get_argument(context, index=k))
for k in range(len(self)))
@method(function('string-length', nargs=(0, 1)))
def evaluate(self, context=None):
return len(self.get_argument(context, default_to_context=True, default='', cls=str))
@method(function('normalize-space', nargs=(0, 1)))
def evaluate(self, context=None):
if self.parser.version == '1.0':
arg = self.string_value(self.get_argument(context, default_to_context=True, default=''))
else:
arg = self.get_argument(context, default_to_context=True, default='', cls=str)
return ' '.join(arg.strip().split())
@method(function('starts-with', nargs=2))
def evaluate(self, context=None):
arg1 = self.get_argument(context, default='', cls=str)
arg2 = self.get_argument(context, index=1, default='', cls=str)
return arg1.startswith(arg2)
@method(function('translate', nargs=3))
def evaluate(self, context=None):
arg = self.get_argument(context, default='', cls=str)
map_string = self.get_argument(context, index=1, default='', cls=str)
trans_string = self.get_argument(context, index=2, default='', cls=str)
maketrans = str.maketrans
if len(map_string) == len(trans_string):
return arg.translate(maketrans(map_string, trans_string))
elif len(map_string) > len(trans_string):
k = len(trans_string)
return arg.translate(maketrans(map_string[:k], trans_string, map_string[k:]))
else:
self.wrong_value("the third argument must have a length less or equal than the second")
@method(function('substring', nargs=(2, 3)))
def evaluate(self, context=None):
item = self.get_argument(context, default='', cls=str)
start = self.get_argument(context, index=1)
try:
if math.isnan(start) or math.isinf(start):
return ''
except TypeError:
self.wrong_type("the second argument must be xs:numeric")
else:
start = int(round(start)) - 1
if len(self) == 2:
return '' if item is None else item[max(start, 0):]
else:
length = self.get_argument(context, index=2)
try:
if math.isnan(length) or length <= 0:
return ''
except TypeError:
self.wrong_type("the third argument must be xs:numeric")
if item is None:
return ''
elif math.isinf(length):
return item[max(start, 0):]
else:
stop = start + int(round(length))
return '' if item is None else item[slice(max(start, 0), max(stop, 0))]
@method(function('substring-before', nargs=2))
@method(function('substring-after', nargs=2))
def evaluate(self, context=None):
arg1 = self.get_argument(context, default='', cls=str)
arg2 = self.get_argument(context, index=1, default='', cls=str)
if arg1 is None:
return ''
index = 0
try:
index = arg1.find(arg2)
except AttributeError:
self.wrong_type("the first argument must be a string")
except TypeError:
self.wrong_type("the second argument must be a string")
if index < 0:
return ''
if self.symbol == 'substring-before':
return arg1[:index]
else:
return arg1[index + len(arg2):]
###
# Boolean functions
@method(function('boolean', nargs=1))
def evaluate(self, context=None):
return self.boolean_value([x for x in self[0].select(context)])
@method(function('not', nargs=1))
def evaluate(self, context=None):
return not self.boolean_value([x for x in self[0].select(context)])
@method(function('true', nargs=0))
def evaluate(self, context=None):
return True
@method(function('false', nargs=0))
def evaluate(self, context=None):
return False
@method(function('lang', nargs=1))
def evaluate(self, context=None):
if context is None:
return
elif not is_element_node(context.item):
return False
else:
try:
lang = context.item.attrib[XML_LANG].strip()
except KeyError:
for elem in context.iter_ancestor():
if XML_LANG in elem.attrib:
lang = elem.attrib[XML_LANG]
break
else:
return False
if '-' in lang:
lang, _ = lang.split('-')
return lang.lower() == self[0].evaluate().lower()
###
# Number functions
@method(function('number', nargs=(0, 1)))
def evaluate(self, context=None):
arg = self.get_argument(context, default_to_context=True)
try:
return float(self.string_value(arg) if is_xpath_node(arg) else arg)
except (TypeError, ValueError):
return float('nan')
@method(function('sum', nargs=(1, 2)))
def evaluate(self, context=None):
values = [self.number_value(x) if isinstance(x, UntypedAtomic) else x
for x in self[0].select(context)]
if not values:
zero = 0 if len(self) == 1 else self.get_argument(context, index=1)
return [] if zero is None else zero
elif any(isinstance(x, float) and math.isnan(x) for x in values):
return float('nan')
if any(isinstance(x, DayTimeDuration) for x in values) or \
all(isinstance(x, YearMonthDuration) for x in values):
return sum(values)
try:
return sum(self.number_value(x) for x in values)
except TypeError:
if self.parser.version == '1.0':
return float('nan')
raise self.error('FORG0006')
@method(function('ceiling', nargs=1))
@method(function('floor', nargs=1))
def evaluate(self, context=None):
arg = self.get_argument(context)
if arg is None:
return float('nan') if self.parser.version == '1.0' else []
elif is_xpath_node(arg) or self.parser.compatibility_mode:
arg = self.number_value(arg)
if isinstance(arg, float) and (math.isnan(arg) or math.isinf(arg)):
return arg
try:
return math.floor(arg) if self.symbol == 'floor' else math.ceil(arg)
except TypeError as err:
self.wrong_type(str(err))
@method(function('round', nargs=1))
def evaluate(self, context=None):
arg = self.get_argument(context)
if arg is None:
return float('nan') if self.parser.version == '1.0' else []
elif is_xpath_node(arg) or self.parser.compatibility_mode:
arg = self.number_value(arg)
if isinstance(arg, float) and (math.isnan(arg) or math.isinf(arg)):
return arg
try:
number = decimal.Decimal(arg)
if number > 0:
return number.quantize(decimal.Decimal('1'), rounding='ROUND_HALF_UP')
else:
return round(number)
except TypeError as err:
self.wrong_type(str(err))
except decimal.DecimalException as err:
self.wrong_value(str(err))
register('(end)')
XPath1Parser.build()
|
# Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE
"""
CLI program for storing packages to Data.World
"""
# flake8: noqa
import json
import mimetypes
import sys
from os import getcwd
from os.path import basename, join
from metapack import open_package
from metapack.cli.core import err, prt, warn
from metatab import (DEFAULT_METATAB_FILE, MetatabDoc, MetatabError,
resolve_package_metadata_url)
from metatab.util import slugify
from rowgenerators import Url, get_cache
try:
import datadotworld as dw
from datadotworld.client.api import RestApiError
except ImportError:
err(
"To run the Metataworld importer, you must first install the datadotworld package. See https://github.com/datadotworld/data.world-py")
def metaworld():
import argparse
parser = argparse.ArgumentParser(
prog='metakan',
description='Publish packages to Data.World')
parser.add_argument('-i', '--info', default=False, action='store_true',
help="Show package information")
parser.add_argument('metatabfile', nargs='?', default=DEFAULT_METATAB_FILE, help='Path to a Metatab file')
class MetapackCliMemo(object):
def __init__(self, args):
self.cwd = getcwd()
self.args = args
self.cache = get_cache('metapack')
self.mtfile_arg = args.metatabfile if args.metatabfile else join(self.cwd, DEFAULT_METATAB_FILE)
self.mtfile_url = Url(self.mtfile_arg)
self.resource = self.mtfile_url.parts.fragment
self.package_url, self.mt_file = resolve_package_metadata_url(self.mtfile_url.rebuild_url(False, False))
m = MetapackCliMemo(parser.parse_args(sys.argv[1:]))
try:
doc = MetatabDoc(m.mt_file, cache=m.cache)
except (IOError, MetatabError) as e:
err("Failed to open metatab '{}': {}".format(m.mt_file, e))
if m.args.info:
package_info(doc)
else:
send_to_dw(doc)
exit(0)
def package_info(doc):
client = dw.api_client()
username = 'ericbusboom'
title = doc.find_first_value("Root.Title")
key = join(username, slugify(title))
try:
ds = client.get_dataset(key)
prt(json.dumps(ds, indent=4))
except RestApiError as e:
err(e)
def get_resource_urls(doc):
resources = {}
for dist in doc.find("Root.Distribution"):
try:
package_url, metadata_url = resolve_package_metadata_url(dist.value)
except Exception as e:
warn("Failed for Distribution {}; {}".format(dist.value, e))
continue
u = Url(package_url)
if u.resource_format == 'zip':
prt("Skipping ZIP package ", package_url)
elif u.resource_format == 'xlsx':
if False:
resources[basename(package_url)] = package_url
prt("Adding XLS package ", package_url)
pass
elif u.resource_format == 'csv':
resources[basename(package_url)] = u.signed_resource_url
prt("Adding CSV package {}".format(basename(package_url)))
try:
p = open_package(package_url)
except (IOError, MetatabError) as e:
err("Failed to open package '{}' from reference '{}': {}".format(package_url, dist.value, e))
for r in p.resources():
mimetype = mimetypes.guess_type(r.resolved_url)[0]
try:
ext = mimetypes.guess_extension(mimetype)[1:]
except:
ext = None
# '.csv': Data>world currently get the format from the name, not the URL
resources[r.name + '.csv'] = r.resolved_url
prt("Adding CSV resource {}".format(r.name))
else:
prt('Skipping {}'.format(package_url))
return resources
def truncate(v, l, suffix=''):
return v[:(l - len(suffix))] if len(v) > l else v
def send_to_dw(doc):
client = dw.api_client()
username = 'ericbusboom'
title = doc.find_first_value("Root.Title")
key = username + '/' + slugify(truncate(title, 30))
d = dict(
title=truncate(title, 30),
description=doc.find_first_value("Root.Description"),
summary=doc.markdown,
visibility='OPEN',
files=get_resource_urls(doc)
)
try:
ds = client.get_dataset(key) # Raise an error if the dataset does not exist
ds = client.replace_dataset(key, **d)
ds = client.get_dataset(key)
except RestApiError:
ds = client.create_dataset(username, **d)
ds = client.get_dataset(key)
|
from MetadataManagerCore.file.WatchDogFileHandler import WatchDogFileHandler
from MetadataManagerCore.file.FileHandlerManager import FileHandlerManager
from MetadataManagerCore.file.FileSystemWatchDog import FileSystemWatchDog
from MetadataManagerCore.file.WatchDog import WatchDog
from typing import List
from MetadataManagerCore.service.Service import Service, ServiceStatus
from MetadataManagerCore.ftp.SFTPWatchDog import SFTPWatchDog
class WatchDogService(Service):
def __init__(self) -> None:
super().__init__()
self.watchedFolder : str = None
self.watchedFileExtensions : str = None
self.watchDog : WatchDog = None
self.recursive : False
self.processExistingFiles = False
self.fileHandler = None
self.fileHandlerManager = None
self.existingFileHandler = None
self.fileCreatedHandler = None
self.fileModifiedHandler = None
def setup(self, watchedFolder: str, watchedFileExtensions: List[str], recursive = False):
self.watchedFolder = watchedFolder
self.watchedFileExtensions = watchedFileExtensions
self.watchDog : WatchDog = None
self.recursive = recursive
def setFileHandlers(self, existingFileHandler: WatchDogFileHandler = None,
fileCreatedHandler: WatchDogFileHandler = None,
fileModifiedHandler: WatchDogFileHandler = None):
self.existingFileHandler = existingFileHandler
self.fileCreatedHandler = fileCreatedHandler
self.fileModifiedHandler = fileModifiedHandler
if self.existingFileHandler:
self.existingFileHandler.watchDog = self.watchDog
self.setupEvents()
def initSFTPWatchDog(self, host: str, username: str, password: str, pollingIntervalInSeconds: float):
self.watchDog = SFTPWatchDog(self.watchedFolder, host, username, password, self.watchedFileExtensions, self.recursive)
self.watchDog.setPollingIntervalInSeconds(pollingIntervalInSeconds)
self.watchDog.onConnectionEstablished.subscribe(self.onSFTPWatchDogConnectionEstablished)
def initWatchDog(self):
self.watchDog = FileSystemWatchDog(self.watchedFolder, self.watchedFileExtensions, self.recursive)
def setupEvents(self):
if self.fileCreatedHandler:
self.watchDog.fileCreatedEvent.subscribe(self.fileCreatedHandler)
if self.fileModifiedHandler:
self.watchDog.fileModifiedEvent.subscribe(self.fileModifiedHandler)
def onStatusChanged(self, status: ServiceStatus):
if status != ServiceStatus.Running:
if self.watchDog and self.watchDog.running:
self.watchDog.stop()
def _run(self):
if self.watchDog:
if self.existingFileHandler and isinstance(self.watchDog, FileSystemWatchDog):
self.watchDog.processFiles(self.existingFileHandler)
self.watchDog.run()
def onSFTPWatchDogConnectionEstablished(self):
if self.existingFileHandler:
self.watchDog.processFiles(self.existingFileHandler)
def asDict(self):
isSFTP = isinstance(self.watchDog, SFTPWatchDog)
host = self.watchDog.host if isSFTP else None
username = self.watchDog.username if isSFTP else None
password = self.watchDog.password if isSFTP else None
pollingIntervalInSeconds = self.watchDog.pollingIntervalInSeconds if isSFTP else None
serviceDict = WatchDogService.constructDict(self.watchedFolder, self.watchedFileExtensions,
self.recursive, isSFTP, host, username,
password, pollingIntervalInSeconds)
# Save file handler info:
if self.existingFileHandler:
serviceDict['existingFileHandler'] = self.existingFileHandler.asDict()
if self.fileCreatedHandler:
serviceDict['fileCreatedHandler'] = self.fileCreatedHandler.asDict()
if self.fileModifiedHandler:
serviceDict['fileModifiedHandler'] = self.fileModifiedHandler.asDict()
return serviceDict
@staticmethod
def constructDict(watchedFolder: str, watchedFileExtensions: List[str], recursive: bool,
isSFTP: bool, sftpHost: str, sftpUsername: str, sftpPassword: str,
sftpPollingIntervalInSeconds: float,
existingFileHandlerDict: dict = None, fileCreatedHandlerDict: dict = None, fileModifiedHandlerDict: dict = None):
base = {
'watchedFolder': watchedFolder,
'watchedFileExtensions': watchedFileExtensions,
'isSFTP': isSFTP,
'recursive': recursive
}
if isSFTP:
base = {
**base,
'host': sftpHost,
'username': sftpUsername,
'password': <PASSWORD>,
'pollingIntervalInSeconds': sftpPollingIntervalInSeconds if sftpPollingIntervalInSeconds else 15.0
}
if existingFileHandlerDict:
base['existingFileHandler'] = existingFileHandlerDict
if fileCreatedHandlerDict:
base['fileCreatedHandler'] = fileCreatedHandlerDict
if fileModifiedHandlerDict:
base['fileModifiedHandler'] = fileModifiedHandlerDict
return base
def setupFromDict(self, theDict: dict):
self.setup(theDict.get('watchedFolder'), theDict.get('watchedFileExtensions'), theDict.get('recursive'))
if theDict.get('isSFTP'):
self.initSFTPWatchDog(theDict.get('host'), theDict.get('username'), theDict.get('password'), theDict.get('pollingIntervalInSeconds'))
else:
self.initWatchDog()
# Load and setup file handlers:
existingFileHandlerDict = theDict.get('existingFileHandler')
fileCreatedHandlerDict = theDict.get('fileCreatedHandler')
fileModifiedHandlerDict = theDict.get('fileModifiedHandler')
self.fileHandlerManager = self.serviceRegistry.fileHandlerManager
existingFileHandler = self.fileHandlerManager.constructFileHandlerFromDict(existingFileHandlerDict) if existingFileHandlerDict else None
fileCreatedHandler = self.fileHandlerManager.constructFileHandlerFromDict(fileCreatedHandlerDict) if fileCreatedHandlerDict else None
fileModifiedHandler = self.fileHandlerManager.constructFileHandlerFromDict(fileModifiedHandlerDict) if fileModifiedHandlerDict else None
self.setFileHandlers(existingFileHandler, fileCreatedHandler, fileModifiedHandler) |
# coding: utf-8
"""
cloudFPGA Resource Manager API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class MantleArchitectureApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def cf_manager_rest_api_get_composable_logic_all_part(self, username, password, part, **kwargs): # noqa: E501
"""Returns all composable logics of the given part that are `IN_USE` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_all_part(username, password, part, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param str part: The part of the composable logics (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_composable_logic_all_part_with_http_info(username, password, part, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_composable_logic_all_part_with_http_info(username, password, part, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_composable_logic_all_part_with_http_info(self, username, password, part, **kwargs): # noqa: E501
"""Returns all composable logics of the given part that are `IN_USE` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_all_part_with_http_info(username, password, part, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param str part: The part of the composable logics (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password', 'part'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_composable_logic_all_part" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_composable_logic_all_part`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_composable_logic_all_part`") # noqa: E501
# verify the required parameter 'part' is set
if ('part' not in params or
params['part'] is None):
raise ValueError("Missing the required parameter `part` when calling `cf_manager_rest_api_get_composable_logic_all_part`") # noqa: E501
collection_formats = {}
path_params = {}
if 'part' in params:
path_params['part'] = params['part'] # noqa: E501
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/composablelogic/by_part/{part}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_get_composable_logic_all_prp(self, username, password, prp, **kwargs): # noqa: E501
"""Returns all composable logics of the given prp-type that are `IN_USE` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_all_prp(username, password, prp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param int prp: The prp-level of the composable logics (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_composable_logic_all_prp_with_http_info(username, password, prp, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_composable_logic_all_prp_with_http_info(username, password, prp, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_composable_logic_all_prp_with_http_info(self, username, password, prp, **kwargs): # noqa: E501
"""Returns all composable logics of the given prp-type that are `IN_USE` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_all_prp_with_http_info(username, password, prp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param int prp: The prp-level of the composable logics (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password', 'prp'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_composable_logic_all_prp" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_composable_logic_all_prp`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_composable_logic_all_prp`") # noqa: E501
# verify the required parameter 'prp' is set
if ('prp' not in params or
params['prp'] is None):
raise ValueError("Missing the required parameter `prp` when calling `cf_manager_rest_api_get_composable_logic_all_prp`") # noqa: E501
collection_formats = {}
path_params = {}
if 'prp' in params:
path_params['prp'] = params['prp'] # noqa: E501
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/composablelogic/by_prp/{prp}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_get_composable_logic_all_shell_type(self, username, password, shell_type, **kwargs): # noqa: E501
"""Returns all composable logics of the given shell-type that are `IN_USE` # noqa: E501
If the resulting list is empty, the shell_type is invalid (or no such composalbe logics exist). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_all_shell_type(username, password, shell_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param str shell_type: Name of cloudFPGA Shell (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_composable_logic_all_shell_type_with_http_info(username, password, shell_type, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_composable_logic_all_shell_type_with_http_info(username, password, shell_type, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_composable_logic_all_shell_type_with_http_info(self, username, password, shell_type, **kwargs): # noqa: E501
"""Returns all composable logics of the given shell-type that are `IN_USE` # noqa: E501
If the resulting list is empty, the shell_type is invalid (or no such composalbe logics exist). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_all_shell_type_with_http_info(username, password, shell_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param str shell_type: Name of cloudFPGA Shell (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password', 'shell_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_composable_logic_all_shell_type" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_composable_logic_all_shell_type`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_composable_logic_all_shell_type`") # noqa: E501
# verify the required parameter 'shell_type' is set
if ('shell_type' not in params or
params['shell_type'] is None):
raise ValueError("Missing the required parameter `shell_type` when calling `cf_manager_rest_api_get_composable_logic_all_shell_type`") # noqa: E501
collection_formats = {}
path_params = {}
if 'shell_type' in params:
path_params['shell_type'] = params['shell_type'] # noqa: E501
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/composablelogic/by_shell/{shell_type}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_get_composable_logic_dcp(self, username, password, cl_id, **kwargs): # noqa: E501
"""Get the dcp file of a composable logic # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_dcp(username, password, cl_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param int cl_id: ID of a composable logic (Static Shell or Mantles) (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_composable_logic_dcp_with_http_info(username, password, cl_id, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_composable_logic_dcp_with_http_info(username, password, cl_id, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_composable_logic_dcp_with_http_info(self, username, password, cl_id, **kwargs): # noqa: E501
"""Get the dcp file of a composable logic # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_dcp_with_http_info(username, password, cl_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: <PASSWORD>Stack password (required)
:param int cl_id: ID of a composable logic (Static Shell or Mantles) (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password', 'cl_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_composable_logic_dcp" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_composable_logic_dcp`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_composable_logic_dcp`") # noqa: E501
# verify the required parameter 'cl_id' is set
if ('cl_id' not in params or
params['cl_id'] is None):
raise ValueError("Missing the required parameter `cl_id` when calling `cf_manager_rest_api_get_composable_logic_dcp`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cl_id' in params:
path_params['cl_id'] = params['cl_id'] # noqa: E501
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/composablelogic/{cl_id}/dcp', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_get_composable_logic_meta(self, username, password, cl_id, **kwargs): # noqa: E501
"""Get the meta data of a composable logic # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_meta(username, password, cl_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: <PASSWORD> (required)
:param int cl_id: ID of a composable logic (Static Shell or Mantles) (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_composable_logic_meta_with_http_info(username, password, cl_id, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_composable_logic_meta_with_http_info(username, password, cl_id, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_composable_logic_meta_with_http_info(self, username, password, cl_id, **kwargs): # noqa: E501
"""Get the meta data of a composable logic # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_composable_logic_meta_with_http_info(username, password, cl_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param int cl_id: ID of a composable logic (Static Shell or Mantles) (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password', 'cl_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_composable_logic_meta" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_composable_logic_meta`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_composable_logic_meta`") # noqa: E501
# verify the required parameter 'cl_id' is set
if ('cl_id' not in params or
params['cl_id'] is None):
raise ValueError("Missing the required parameter `cl_id` when calling `cf_manager_rest_api_get_composable_logic_meta`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cl_id' in params:
path_params['cl_id'] = params['cl_id'] # noqa: E501
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/composablelogic/{cl_id}/meta', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
<reponame>gabriellasroman/ceph-medic<filename>ceph_medic/tests/remote/test_functions.py
import os
from ceph_medic.remote import functions
def make_test_file(filename, contents=None):
contents = contents or "foo"
with open(filename, 'w') as f:
f.write(contents)
def make_test_tree(path, contents=None, tree=None):
file1 = os.path.join(path, "file1.txt")
dir1 = os.path.join(path, "dir1")
file2 = os.path.join(path, "dir1/file2.txt")
make_test_file(file1)
os.mkdir(dir1)
make_test_file(file2)
class TestStatPath(object):
def test_stat_file_includes_owner(self, tmpdir):
filename = os.path.join(str(tmpdir), 'file')
make_test_file(filename)
result = functions.stat_path(filename)
assert "owner" in result
def test_stat_file_includes_group(self, tmpdir):
filename = os.path.join(str(tmpdir), 'file')
make_test_file(filename)
result = functions.stat_path(filename)
assert "group" in result
def test_includes_file_content(self, tmpdir):
filename = os.path.join(str(tmpdir), 'file')
make_test_file(filename, contents="foo")
result = functions.stat_path(filename, get_contents=True)
assert result["contents"] == "foo"
def test_exception_is_empty_on_success(self, tmpdir):
filename = os.path.join(str(tmpdir), 'file')
make_test_file(filename)
result = functions.stat_path(filename)
assert not result["exception"]
def test_stat_dir(self, tmpdir):
result = functions.stat_path(str(tmpdir))
assert result != {}
def test_no_callables(self, tmpdir):
result = functions.stat_path(str(tmpdir))
for value in result.values():
assert callable(value) is False
class TestStatPathErrors(object):
def test_captures_exceptions(self):
result = functions.stat_path('/does/not/exist')
assert result['exception']['attributes']['errno'] == '2'
assert result['exception']['name'] in ['FileNotFoundError', 'OSError']
class AttributeLandMine(object):
@property
def explode(self):
raise ValueError('Raising on attribute access')
class TestCaptureException(object):
def test_exceptions_in_errors_are_ignored(self):
result = functions.capture_exception(AttributeLandMine())
assert result['attributes'] == {'explode': None}
def test_unserializable_attributes(self, factory):
error = factory(unserial=lambda: True)
result = functions.capture_exception(error)
assert '<function ' in result['attributes']['unserial']
class TestPathTree(object):
def test_skip_dirs(self, tmpdir):
path = str(tmpdir)
make_test_tree(path)
result = functions.path_tree(path, skip_dirs=['dir1'])
assert "dir1" not in result["dirs"]
def test_skip_files(self, tmpdir):
path = str(tmpdir)
make_test_tree(path)
result = functions.path_tree(path, skip_files=['file1.txt'])
assert "file1.txt" not in result["files"]
def test_includes_path(self, tmpdir):
path = str(tmpdir)
make_test_tree(path)
result = functions.path_tree(path)
assert result["path"] == path
def test_includes_files(self, tmpdir):
path = str(tmpdir)
make_test_tree(path)
result = functions.path_tree(path)
assert "files" in result
assert os.path.join(path, "file1.txt") in result["files"]
def test_includes_dirs(self, tmpdir):
path = str(tmpdir)
make_test_tree(path)
result = functions.path_tree(path)
assert "dirs" in result
assert os.path.join(path, "dir1") in result["dirs"]
|
## \file serialiser.py
from pathlib import Path
from collections import defaultdict
import indigox as ix
import os
__all__ = ["SaveITPFile", "SavePDBFile", "SaveIXDFile", "SaveRTPFile"]
# Correction required to convert GROMOS improper units to GROMACS improper units
improper_correction = 1. / (0.0174532925 * 0.0174532925)
def SaveRTPFile(path, mol, pmol=None):
print("Saving molecule %s to RTP file at path %s." % (mol.GetName(), str(os.path.join(os.getcwd(), path))))
path = Path(path)
h_mass = ix.GetPeriodicTable()["H"].GetAtomicMass()
path.parent.mkdir(parents=True, exist_ok=True)
file = path.open('w')
# header printing
header = """; File generated by the indigox package
; No guarantees are provided for the usefulness of this file
[ {} ]
[ atoms ]"""
print(header.format(mol.GetAtoms()[0].GetResidueName()), file=file)
# atom printing
atm_fmt_str = "{name:>5} {type:>6} {charge:>11.5f} {chargegroup:>5} {extra}"
for atom in mol.GetAtoms():
atm_dat = {
"type": atom.GetType().GetName() if atom.HasType() else "%%%",
"name": atom.GetName(),
"chargegroup": atom.GetChargeGroupID() + 1,
"charge": atom.GetPartialCharge(),
"extra": ""
}
if pmol is not None:
patom = pmol.GetAtom(atom)
extra_info = ";"
if not len(patom.GetMappedCharges()):
extra_info += " UNMAPPED"
else:
mu = patom.MeanCharge()
eta = patom.MedianCharge()
sigma = patom.StandardDeviationCharge()
delta = patom.RedistributedChargeAdded()
to_add = []
if round(mu, 5) != round(eta, 5) or round(sigma, 5) != 0.0:
to_add.append("mean: {:.5f}".format(mu))
to_add.append("median: {:.5f}".format(eta))
to_add.append("stdev: {:.5f}".format(sigma))
if round(delta, 5) != 0.0:
to_add.append("added: {:.5f}".format(delta))
extra_info += " " + ", ".join(to_add)
if extra_info == ";":
atm_dat["extra"] = ""
else:
atm_dat["extra"] = extra_info
print(atm_fmt_str.format(**atm_dat), file=file)
# bond printing
print(" [ bonds ]", file=file)
bnd_fmt_str = "{atoma:>5} {atomb:>5} {typeid} {extra}"
for bond in sorted(mol.GetBonds(), key=lambda x: x.HasType()):
bnd_dat = {"atoma" : bond.GetAtoms()[0].GetName(),
"atomb" : bond.GetAtoms()[1].GetName(),
"extra" : "" }
if not bond.HasType(): bnd_dat["typeid"] = "UNMAPPED"
else:
t = bond.GetType()
if t.GetType() != ix.BondType.Quartic: t = t.GetLinkedType()
bnd_dat["typeid"] = "{:>.4f} {:>.4e}".format(t.GetIdealLength(), t.GetForceConstant())
if pmol is not None and bond.HasType():
pbond = pmol.GetBond(bond)
other = ["gb_{}".format(tm.GetID()) for tm in pbond.GetMappedTypeCounts()]
bnd_dat["extra"] = ", ".join(x for x in other if x != "gb_{}".format(bond.GetType().GetID()))
else:
bnd_dat["extra"] = ""
if bnd_dat["extra"]:
bnd_dat["extra"] = "; Other terms: " + bnd_dat["extra"]
print(bnd_fmt_str.format(**bnd_dat), file=file)
# exclusions printing
print(" [ exclusions ]", file=file)
pair_fmt_str = "{atoma:>5} {atomb:>5}"
for dhd in mol.GetDihedrals():
atoms = dhd.GetAtoms()
if not mol.HasBond(atoms[0], atoms[1]):
continue
if not mol.HasBond(atoms[1], atoms[2]):
continue
if not mol.HasBond(atoms[2], atoms[3]):
continue
rot_bond = mol.GetBond(atoms[1], atoms[2])
if rot_bond.GetOrder() == ix.BondOrder.Aromatic:
pair_dat = {"atoma" : atoms[3].GetName(),
"atomb" : atoms[0].GetName()}
print(pair_fmt_str.format(**pair_dat), file=file)
# angle printing
print(" [ angles ]", file=file)
ang_fmt_str = "{atoma:>5} {atomb:>5} {atomc:>5} {typeid} {extra}"
for angle in sorted(mol.GetAngles(), key=lambda x: x.HasType()):
atoms = angle.GetAtoms()
ang_dat = {"atoma" : atoms[0].GetName(),
"atomb" : atoms[1].GetName(),
"atomc" : atoms[2].GetName(),
"extra" : "" }
if not angle.HasType(): ang_dat["typeid"] = "UNMAPPED"
else:
t = angle.GetType()
if t.GetType() != ix.AngleType.CosineHarmonic: t = t.GetLinkedType()
ang_dat["typeid"] = "{:>.2f} {:>.2f}".format(t.GetIdealAngle(), t.GetForceConstant())
if pmol is not None and angle.HasType():
pangle = pmol.GetAngle(angle)
other = ["ga_{}".format(tm.GetID()) for tm in pangle.GetMappedTypeCounts()]
ang_dat["extra"] = ", ".join(x for x in other if x != "ga_{}".format(angle.GetType().GetID()))
if ang_dat["extra"]:
ang_dat["extra"] = "; Other terms: " + ang_dat["extra"]
print(ang_fmt_str.format(**ang_dat), file=file)
# improper printing
done_dhds = []
print(" [ impropers ]", file=file)
dhd_fmt_str = "{atoma:>5} {atomb:>5} {atomc:>5} {atomd:>5} {typeid} {extra}"
for imp in mol.GetDihedrals():
if not imp.HasType(): continue
for t in imp.GetTypes():
if t.GetType() != ix.DihedralType.Improper: continue
atoms = imp.GetAtoms()
imp_dat = {"atoma" : atoms[0].GetName(),
"atomb" : atoms[1].GetName(),
"atomc" : atoms[2].GetName(),
"atomd" : atoms[3].GetName(),
"typeid": "{:>.5f} {:>.5f}".format(t.GetIdealAngle(), t.GetForceConstant() * improper_correction),
"extra" : "" }
print(dhd_fmt_str.format(**imp_dat), file=file)
# dihedral printing
print(" [ dihedrals ]", file=file)
for bnd in mol.GetBonds():
dhds = _GetBondDihedrals(bnd)
if not dhds: continue
param_dhd = [x for x in dhds if x.HasType()]
if not param_dhd: done_dhds.append(dhds[0])
else: done_dhds.extend(param_dhd)
for dhd in mol.GetDihedrals():
if dhd.HasType() and dhd not in done_dhds: done_dhds.append(dhd)
for dhd in sorted(done_dhds, key=lambda x: x.HasType()):
atoms = dhd.GetAtoms()
if not dhd.HasType():
dhd_dat = {"atoma" : atoms[0].GetName(),
"atomb" : atoms[1].GetName(),
"atomc" : atoms[2].GetName(),
"atomd" : atoms[3].GetName(),
"extra" : "",
"typeid" : "UNMAPPED" }
print(dhd_fmt_str.format(**dhd_dat), file=file)
else:
for t in dhd.GetTypes():
dhd_dat = {"atoma" : atoms[0].GetName(),
"atomb" : atoms[1].GetName(),
"atomc" : atoms[2].GetName(),
"atomd" : atoms[3].GetName(),
"extra" : "" }
if t.GetType() == ix.DihedralType.Proper:
dhd_dat["typeid"] = "{:.4f} {:.4f} {}".format(t.GetPhaseShift(), t.GetForceConstant(), t.GetMultiplicity())
print(dhd_fmt_str.format(**dhd_dat), file=file)
file.close()
def SaveITPFile(path, mol, pmol=None):
print("Saving molecule %s to ITP file at path %s." % (mol.GetName(), str(os.path.join(os.getcwd(), path))))
path = Path(path)
h_mass = ix.GetPeriodicTable()["H"].GetAtomicMass()
path.parent.mkdir(parents=True, exist_ok=True)
file = path.open('w')
# header printing
header = """; File generated by the indigox package
; No guarantees are provided for the usefulness of this file
[ moleculetype ]
; Name nrexcl
TEST 3
[ atoms ]"""
print(header, file=file)
# atom printing
atm_fmt_str = "{index:>5} {type:>6} {residx:>5} {resname:>8} {name:>7} {chargegroup:>5} {charge:>11.5f} {mass:>9.4f} {extra}"
for atom in mol.GetAtoms():
atm_dat = {"index": atom.GetIndex() + 1,
"type": atom.GetType().GetName() if atom.HasType() else "%%%",
"residx": atom.GetResidueID(),
"resname": atom.GetResidueName(),
"name": atom.GetName(),
"chargegroup": atom.GetChargeGroupID() + 1,
"charge": atom.GetPartialCharge(),
"mass": atom.GetElement().GetAtomicMass() + atom.GetImplicitCount() * h_mass,
"extra": ""
}
if pmol is not None:
patom = pmol.GetAtom(atom)
extra_info = ";"
if not len(patom.GetMappedCharges()):
extra_info += " UNMAPPED"
else:
mu = patom.MeanCharge()
eta = patom.MedianCharge()
sigma = patom.StandardDeviationCharge()
delta = patom.RedistributedChargeAdded()
to_add = []
if round(mu, 5) != round(eta, 5) or round(sigma, 5) != 0.0:
to_add.append("mean: {:.5f}".format(mu))
to_add.append("median: {:.5f}".format(eta))
to_add.append("stdev: {:.5f}".format(sigma))
if round(delta, 5) != 0.0:
to_add.append("added: {:.5f}".format(delta))
extra_info += " " + ','.join(to_add)
if extra_info == ";":
atm_dat["extra"] = ""
else:
atm_dat["extra"] = extra_info
print(atm_fmt_str.format(**atm_dat), file=file)
# bond printing
print("\n[ bonds ]", file=file)
bnd_fmt_str = "{atoma:>5} {atomb:>5} {typecode:>5} gb_{typeid} {extra}"
for bond in sorted(mol.GetBonds(), key=lambda x: x.HasType()):
bnd_dat = {"atoma" : bond.GetAtoms()[0].GetIndex() + 1,
"atomb" : bond.GetAtoms()[1].GetIndex() + 1,
"typecode" : 2,
"typeid" : bond.GetType().GetID() if bond.HasType() else "UNMAPPED"
}
if pmol is not None and bond.HasType():
pbond = pmol.GetBond(bond)
other = ["gb_{}".format(tm.GetID()) for tm in pbond.GetMappedTypeCounts()]
bnd_dat["extra"] = ", ".join(x for x in other if x != "gb_{}".format(bnd_dat["typeid"]))
else:
bnd_dat["extra"] = ""
if bnd_dat["extra"]:
bnd_dat["extra"] = "; Other terms: " + bnd_dat["extra"]
print(bnd_fmt_str.format(**bnd_dat), file=file)
# pairs printing
print("\n[ pairs ]", file=file)
pair_fmt_str = "{atoma:>5} {atomb:>5} 1"
for dhd in mol.GetDihedrals():
atoms = dhd.GetAtoms()
if not mol.HasBond(atoms[0], atoms[1]):
continue
if not mol.HasBond(atoms[1], atoms[2]):
continue
if not mol.HasBond(atoms[2], atoms[3]):
continue
if ((mol.GetBond(atoms[0], atoms[1]).GetOrder() == ix.BondOrder.Aromatic)
and (mol.GetBond(atoms[1], atoms[2]).GetOrder() == ix.BondOrder.Aromatic)
and (mol.GetBond(atoms[2], atoms[3]).GetOrder() == ix.BondOrder.Aromatic)):
continue
if atoms[0].GetIndex() > atoms[3].GetIndex():
pair_dat = {"atoma" : atoms[3].GetIndex() + 1,
"atomb" : atoms[0].GetIndex() + 1}
else:
pair_dat = {"atoma" : atoms[0].GetIndex() + 1,
"atomb" : atoms[3].GetIndex() + 1}
print(pair_fmt_str.format(**pair_dat), file=file)
# angle printing
print("\n[ angles ]", file=file)
ang_fmt_str = "{atoma:>5} {atomb:>5} {atomc:>5} {typecode:>5} ga_{typeid} {extra}"
for angle in sorted(mol.GetAngles(), key=lambda x: x.HasType()):
atoms = angle.GetAtoms()
ang_dat = {"atoma" : atoms[0].GetIndex() + 1,
"atomb" : atoms[1].GetIndex() + 1,
"atomc" : atoms[2].GetIndex() + 1,
"typecode" : 2,
"typeid" : angle.GetType().GetID() if angle.HasType() else "UNMAPPED",
"extra" : ""
}
if pmol is not None and angle.HasType():
pangle = pmol.GetAngle(angle)
other = ["ga_{}".format(tm.GetID()) for tm in pangle.GetMappedTypeCounts()]
ang_dat["extra"] = ", ".join(x for x in other if x != "ga_{}".format(ang_dat["typeid"]))
if ang_dat["extra"]:
ang_dat["extra"] = "; Other terms: " + ang_dat["extra"]
print(ang_fmt_str.format(**ang_dat), file=file)
# dihedral printing
print("\n[ dihedrals ]", file=file)
dhd_fmt_str = "{atoma:>5} {atomb:>5} {atomc:>5} {atomd:>5} {typecode:>5} {typeid} {extra}"
done_dhds = []
for bnd in mol.GetBonds():
dhds = _GetBondDihedrals(bnd)
if not dhds:
continue
param_dhd = [x for x in dhds if x.HasType()]
if not param_dhd:
done_dhds.append(dhds[0])
else:
done_dhds.extend(param_dhd)
for dhd in mol.GetDihedrals():
if dhd.HasType() and dhd not in done_dhds:
done_dhds.append(dhd)
for dhd in sorted(done_dhds, key=lambda x: x.HasType()):
atoms = dhd.GetAtoms()
if not dhd.HasType():
dhd_dat = {"atoma" : atoms[0].GetIndex() + 1,
"atomb" : atoms[1].GetIndex() + 1,
"atomc" : atoms[2].GetIndex() + 1,
"atomd" : atoms[3].GetIndex() + 1,
"extra" : "",
"typecode" : "",
"typeid" : "UNMAPPED"
}
print(dhd_fmt_str.format(**dhd_dat), file=file)
else:
for t in dhd.GetTypes():
dhd_dat = {"atoma" : atoms[0].GetIndex() + 1,
"atomb" : atoms[1].GetIndex() + 1,
"atomc" : atoms[2].GetIndex() + 1,
"atomd" : atoms[3].GetIndex() + 1,
"extra" : ""
}
if t.GetType() == ix.DihedralType.Proper:
dhd_dat["typecode"] = 1
dhd_dat["typeid"] = "gd_{}".format(t.GetID())
else:
dhd_dat["typecode"] = 2
dhd_dat["typeid"] = "gi_{}".format(t.GetID())
print(dhd_fmt_str.format(**dhd_dat), file=file)
file.close()
def _GetBondDihedrals(bond):
atoms = bond.GetAtoms()
params = []
for dhd in atoms[0].GetDihedrals():
dhd_atms = dhd.GetAtoms()
if ((dhd_atms[1] == atoms[0] and dhd_atms[2] == atoms[1])
or (dhd_atms[2] == atoms[0] and dhd_atms[1] == atoms[1])):
params.append(dhd)
return params
def SavePDBFile(path, mol):
print("Saving molecule %s to PDB file at path %s." % (mol.GetName(), str(os.path.join(os.getcwd(), path))))
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
file = path.open('w')
header = """TITLE Structure for molecule {}""".format(mol.GetName())
print(header, file=file)
for atom in mol.GetAtoms():
atom_info = ['HETATM']
atom_info.append('{:>5} '.format(atom.GetIndex() + 1))
atom_info.append('{:>4} '.format(atom.GetName()))
atom_info.append('{:>4} '.format(atom.GetResidueName())) # residue name
atom_info.append('{:>4} '.format(atom.GetResidueID())) # residue number
# convert nm to angstroms
atom_info.append('{:>8.3f}'.format(atom.GetX() * 10))
atom_info.append('{:>8.3f}'.format(atom.GetY() * 10))
atom_info.append('{:>8.3f}'.format(atom.GetZ() * 10))
atom_info.append('{:>6.2f}'.format(1.00))
atom_info.append('{:>6.2f} '.format(0.00))
atom_info.append('{:>2}'.format(atom.GetElement().GetSymbol().upper()))
print(''.join(atom_info), file=file)
bonds_raw = defaultdict(list)
for bond in mol.GetBonds():
atoms = bond.GetAtoms()
bonds_raw[atoms[0].GetIndex() + 1].append(atoms[1].GetIndex() + 1)
bonds_raw[atoms[1].GetIndex() + 1].append(atoms[0].GetIndex() + 1)
for begin in sorted(bonds_raw):
bond_info = ['CONECT']
bond_info.append('{:>5}'.format(begin))
for end in sorted(bonds_raw[begin]):
bond_info.append('{:>5}'.format(end))
print(''.join(bond_info), file=file)
file.close()
def SaveIXDFile(path, mol):
print("Saving molecule %s to IXD file at path %s." % (mol.GetName(), str(os.path.join(os.getcwd(), path))))
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
file = path.open('w')
tot_fc = 0
for atom in mol.GetAtoms():
if atom.GetFormalCharge() == 0: continue
tot_fc += atom.GetFormalCharge()
print("ATOM {} {} {}".format(atom.GetIndex() + 1, atom.GetFormalCharge(), atom.GetImplicitCount()), file=file)
print("MOLECULE {}".format(tot_fc), file=file)
for bond in mol.GetBonds():
if bond.GetOrder() == ix.BondOrder.Single: continue
print("BOND {} {} {}".format(bond.GetAtoms()[0].GetIndex() + 1, bond.GetAtoms()[1].GetIndex() + 1, int(bond.GetOrder())), file=file)
file.close()
|
<filename>Ag/BarChartRace.py
from manimlib import *
import scipy.stats as ss
import colorsys
def get_coords_from_csvdata(file_name):
import csv
coords = []
with open(f'{file_name}.csv', 'r', encoding='UTF-8') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
coords.append(row)
csvFile.close()
return coords
class TheBars(VGroup,ValueTracker):
CONFIG = {
"bar_height" : None,
"name_size" : 30,
"bar_origin" : 2.6*UP + 4.5*LEFT,
"bar_length": 9,
"bar_opacity": 0.9,
"bar_color": None,
"bar_stroke_width": 1.238,
"min_length": 1e-2,
"num_txt_buff": 0.96,
"deci_config_nums": {
"num_decimal_places": 0,
"font_size": 30,
}
}
def __init__(self, name, value, max_val, **kwargs):
VGroup.__init__(self, **kwargs)
ValueTracker.__init__(self, value, **kwargs)
self.init_bar(name, value, max_val)
def init_bar(self, name, value, max_val):
bar = self.the_bar(self.value_conversion(value, max_val))
text = Text(str(name), font_size=self.name_size)
num_txt = DecimalNumber(self.get_value(), **self.deci_config_nums)
self.bar = bar
self.text = text
self.num_txt = num_txt
self.text.add_updater(self.text_updater)
self.num_txt.add_updater(self.num_txt_updater)
self.add(self.bar, self.text, self.num_txt,)
def the_bar(self, length):
return Rectangle(
# return RRectangle(
height = self.bar_height,
width = length,
fill_color = self.bar_color,
fill_opciaty = 1,
stroke_width = self.bar_stroke_width,
).set_opacity(self.bar_opacity)
def text_updater(self, text):
text.next_to(self.bar, LEFT, buff=0.25)
text.set_opacity(self.bar.get_opacity())
def num_txt_updater(self, deci_txt):
deci_txt.set_value(self.get_value())
deci_txt.next_to(self.bar, RIGHT,buff=self.num_txt_buff)
deci_txt.set_opacity(self.bar.get_opacity())
def bar_updater(self, value, max_value):
length = self.value_conversion(value, max_value)
bar = self[0]
bar_left = bar.get_left()
bar.stretch_to_fit_width(length, about_point = bar_left)
self.set_value(value)
def value_conversion(self, val, max_val):
return max(self.min_length, val*self.bar_length/max_val)
class TheLines(TheBars):
CONFIG = {
"lines_height": None,
"lines_origin": ORIGIN,
"text_direction": UP,
"deci_config_ruler": {
"num_decimal_places": 0,
"font_size": 50,
}
}
def __init__(self, value, max_val, **kwargs):
VGroup.__init__(self, **kwargs)
ValueTracker.__init__(self, value, **kwargs)
self.init_line(value, max_val)
self.set_value(value)
def init_line(self, value, max_val):
line = Line(
start= UP*self.lines_height/2,
end= DOWN*self.lines_height/2,
).shift(self.lines_origin)
line.move_to(
self.bar_origin + RIGHT*self.value_conversion(value, max_val),
coor_mask=np.array([1, 0 ,0])
)
line_txt = DecimalNumber(
value,
**self.deci_config_ruler
)
self.line = line
self.line_txt = line_txt
self.line_txt.add_updater(self.line_txt_updater)
self.add(self.line, self.line_txt)
def line_txt_updater(self, mob_txt):
mob_txt.set_value(self.get_value())
mob_txt.next_to(self.line, self.text_direction, buff=0.1)
mob_txt.set_opacity(self.line.get_opacity())
def line_updater(self, value, max_value):
length = self.value_conversion(value, max_value)
self.move_to(
self.bar_origin + RIGHT*length,
coor_mask=np.array([1, 0 ,0])
)
self.set_value(value)
class TheIcons(ImageMobject):
def __init__(self, path, bar, **kwargs):
ImageMobject.__init__(self, path, **kwargs)
def image_updater(img):
img.next_to(bar, RIGHT, buff=0.1618)
img.set_opacity(bar.get_opacity())
self.add_updater(image_updater)
class BarChartRace(VGroup):
CONFIG = {
"bars_origin": 2.9*UP + 4.5*LEFT,
"bars_height": 0.5,
"spacing": 0.6,
"datas_value_max": None,
"value_max": 10000,
"bar_num": 1,
"value_0": 1e-2,
"lines_opacity": 0.236,
"lightness": 0.9,
"color_seed": 100,
"star_anim": False,
"add_lines": True,
"add_icons": True,
"path": "GNI_icon/",
}
def __init__(
self,
legends,
data_0 = None,
**kwargs
):
VGroup.__init__(self, **kwargs)
self.init_bars(legends, data_0,)
if self.add_lines:
self.init_lines(data_0)
def init_bars(self, legends, data_0,):
if data_0 is None:
data_0 = [self.value_0]*len(legends)
rand_serial =len(data_0)-ss.rankdata(data_0, method='ordinal')
max_value = self.find_max_value(data_0)
random.seed(self.color_seed)
icons = Group()
for i,legend in enumerate(legends):
cust_color = self.random_color()
if self.star_anim:
one_bar = TheBars(
legend,
self.value_0,
max_value,
bar_height = self.bars_height,
bar_origin = self.bars_origin,
bar_color = cust_color,
)
else:
one_bar = TheBars(
legend,
data_0[i],
max_value,
bar_height = self.bars_height,
bar_origin = self.bars_origin,
bar_color = cust_color,
)
if self.add_icons:
the_icon = TheIcons(self.path+str(legend), one_bar[0])
the_icon.set_width(0.65)
icons.add(the_icon)
bottom_down = one_bar.bar_origin + DOWN*self.spacing*(rand_serial[i])
if bottom_down[1] < (BOTTOM+self.bars_height*DOWN)[1]:
bottom_down = one_bar.bar_origin*RIGHT + BOTTOM + 2*self.bars_height*DOWN
one_bar.bar.move_to(bottom_down, aligned_edge=LEFT)
one_bar.set_opacity(0)
self.add(one_bar)
self.icons = icons
def init_lines(self, data_0):
self.line_nums = int(self.datas_value_max/self.value_max)
max_value = self.find_max_value(data_0)
num_x = [0, 1, 2, 3, 4]
while num_x[-1]<self.line_nums:
num_x.append(num_x[-1]+num_x[-3])
num_x.append(2*num_x[-1]-num_x[-2])
for i in range(len(num_x)):
line = TheLines(
self.value_max*num_x[i],
max_value,
lines_height = self.spacing*(self.bar_num-1)+self.bars_height+0.1,
lines_origin = self[0].bar_origin + (DOWN*self.spacing*(self.bar_num-1))/2,
bar_origin = self.bars_origin,
)
line.set_opacity(0)
self.add(line)
self.num_x = num_x
def rank_bars_anim(self, values):
rand_serial =len(values)-ss.rankdata(values, method='ordinal')
# 从小到大写法:
# rand_serial =ss.rankdata(self.nums)-1
max_value = self.find_max_value(values)
for i, the_bar in enumerate(self[:len(values)]):
if values[i] == 0:
value = self.value_0
else:
value = values[i]
the_bar.bar_updater(value, max_value)
bottom_down = the_bar.bar_origin + DOWN*self.spacing*(rand_serial[i])
if bottom_down[1] < (BOTTOM+self.bars_height*DOWN)[1]:
bottom_down = the_bar.bar_origin*RIGHT + BOTTOM + 2*self.bars_height*DOWN
the_bar.move_to(bottom_down,coor_mask=np.array([0, 1 ,0]))
mask_position = the_bar.bar_origin + DOWN*self.spacing*self.bar_num
if the_bar.get_center()[1] > mask_position[1]:
the_bar.set_opacity(the_bar.bar_opacity)
else:
the_bar.set_opacity(0)
def rank_lines_anim(self, values):
max_value = self.find_max_value(values)
in_lines_index = 0
for the_line in self[len(values):]:
the_line.line_updater(the_line.get_value(),max_value)
if the_line.get_center()[0] < (the_line.bar_origin+the_line.bar_length*RIGHT)[0]:
in_lines_index+=1
the_line.set_opacity(self.lines_opacity)
else:
the_line.set_opacity(0)
if in_lines_index>=5:
if in_lines_index%2 == 1:
for jk in range(in_lines_index):
if jk not in [0, in_lines_index-1, in_lines_index-3]:
self[jk+len(values)].set_opacity(0)
if in_lines_index%2 == 0:
for jk in range(in_lines_index):
if jk not in [0, in_lines_index-1, in_lines_index-2, in_lines_index-4]:
self[jk+len(values)].set_opacity(0)
def find_max_value(self, values):
return max(*values, self.value_max)
def random_color(self):
col = list(map(
lambda x: hex(x).split('x')[1].zfill(2),
tuple(round(i * 255) for i in colorsys.hsv_to_rgb(
random.random(),
random.random(),
self.lightness)
)
)
)
return "#%s%s%s"%(col[0],col[1],col[2])
class PlotBarChart(Scene):
def construct(self):
data = get_coords_from_csvdata("Ag/data_files/GNI-data")
dataArray = np.array(data)
row = dataArray.shape[0]
column = dataArray.shape[1]
print(row,column)
n_row = row
star = 0
end = 3
title = dataArray[1:n_row, 1]
years = dataArray[0, 2:column].astype(np.float)
datas = dataArray[1:n_row, 2:column].astype(np.float)
data_nums = [nums for nums in [datas[:,i] for i in range(star, end)]]
datas_nums_max = datas.max()
year_nums = years[star:end]
year_val = ValueTracker(year_nums[0])
year_text = DecimalNumber(
year_val.get_value(),
num_decimal_places = 0,
group_with_commas = False,
font_size = 250,
color = BLUE,
).to_corner(DR).shift(UP*0.5)
year_text.add_updater(lambda mob: mob.set_value(year_val.get_value()))
bars = BarChartRace(title, data_nums[0], datas_value_max = datas_nums_max,)
self.add(bars, year_text, bars.icons,)
dur_time = 2
for i,data_num in enumerate(data_nums):
self.play(
# bars.rank_lines_anim, data_num,
bars.rank_bars_anim, data_num,
year_val.set_value, year_nums[i],
rate_func=linear,
run_time=dur_time,
)
self.wait(1) |
from Common import BS
from MapData import MapDataElement
from PyQt5.QtCore import qDebug
def parseBS(s: str):
if not "{{Routemap" in s:
return parseBSOld(s)
i = 0
buf = ""
row = []
def app(x):
x = x.strip()
row.append(x)
return ''
rows = []
s = s.replace('~~', '\\').replace('! !', '\\')
while i < len(s):
if s[i] == '\\':
i = i + 1
buf = app(buf)
continue
if s[i] == '\n':
i = i + 1
buf = app(buf)
rows.append(row)
row = []
continue
buf += s[i]
i = i + 1
app(buf)
rows.append(row)
for row in rows:
for i in range(len(row)):
if row[i].count('!~') > 0:
row[i] = row[i].split('!~')
if row[i] == 'leer':
row[i] = ''
for i in range(len(rows) - 1, -1, -1):
if len(rows[i]) == 1 and rows[i][0] == '':
del rows[i]
return rows
def parseBSOld(s: str):
try:
rows = []
for l in s.split('\n'):
l = l.strip().strip("{}")
if not l or not l.startswith("BS"):
continue
if l.startswith("BS-map"):
continue
count = int(l[2])
row = l.split("|")[1:]
i = 0
while i < len(row):
if row[i].startswith("O") and i > 0:
x = row[i].split('=')[1]
row[i - 1] = row[i - 1] + [x] if isinstance(row[i-1], list) else [row[i-1], x]
row = row[:i] + row[i+1:]
else:
i = i + 1
if len(row) > count:
row[count - 1] = ' '.join(row[count:]).strip()
row = row[:count]
rows.append(row)
return rows
except Exception as e:
qDebug('parse {}'.format(e).encode('utf-8'))
return []
def filterBS(rows):
c = []
rowsEl, maxRowWidth = [], 0
for y in range(len(rows)):
x = 0
rowEl = []
while x < len(rows[y]):
d = rows[y][x]
if not d:
x = x + 1
continue
# if isinstance(d, str) and not (d[0].isascii() and d[0].isalpha()):
# x = x + 1
# continue
el = MapDataElement.createWithXY(x, y, d)
if el:
c.append(el)
rowEl.append(el)
elif x == 0:
d = str(d)
for xx in range(1, len(rows[y])):
el = MapDataElement.createWithXY(x, y, rows[y][xx])
if el:
el.text, el.textAlign, el.textPlacement = d, 'r', 'l'
c.append(el)
rowEl.append(el)
rows[y][0:xx] = []
break
else:
d = d + str(rows[y][xx])
elif c:
el: MapDataElement = c[-1]
el.text, el.textAlign, el.textPlacement = str(d), 'l', 'r'
x = x + 1
if rowEl:
maxRowWidth = max(rowEl[-1].x, maxRowWidth)
rowsEl.append(rowEl)
maxRowWidth = maxRowWidth // 2 * 2 + 1 # keep it odd
for row in rowsEl:
if not row:
continue
prepend = (maxRowWidth - row[-1].x) // 2
if prepend > 0:
for el in row:
el.x = el.x + prepend
if row[-1].text:
row[-1].textX = (maxRowWidth - row[-1].x) * BS
return c
if __name__ == "__main__":
print(parseBSOld("""
{{出典の明記|date=2019年9月}}
{{Infobox 鉄道路線
|路線名 = 上海軌道交通3号線(明珠線)
|路線色 = #ffd100
|ロゴ = SMLine3.svg
|ロゴサイズ=100px
|画像 = Shanghai Metro Line 3 AC03 Train WestYangangRoad.jpg
|画像サイズ = 300px
|画像説明 = 03A01型
|通称 = 軽軌(轻轨)
|国 = {{CHN}}
|種類 = [[地下鉄]]
|路線網 = [[上海軌道交通]]
|起点 = [[上海南駅|上海南站駅]]
|終点 = [[江楊北路駅]]
|駅数 = 29[[駅]]
|路線記号 = M3
|路線番号 = 3
|路線色3={{Color|#ffd100|■}}[[黄色]]
|開業 = 2000年12月16日
|所有者 = 上海軌道交通明珠線发展有限公司、上海軌道交通宝山線发展有限公司
|運営者 = [[上海轨道交通]]
|路線構造 = 高架・地上・地下
|路線距離 = 40.340 [[キロメートル|km]]
|営業キロ= ? km
|軌間 = 1,435 [[ミリメートル|mm]] ([[標準軌]])
|線路数 = [[複線]]
|複線区間=全区間
|電化区間=全区間
|電化方式 = [[直流電化|直流]]1,500[[ボルト (単位)|V]] [[架空電車線方式|架線集電方式]]
|最高速度 = 80[[キロメートル毎時|km/h]]
|路線図 = Shanghai Metro Line 3.svg
|路線図名 = 上海軌道交通3号線
}}
{{BS-map|title=営業区間|title-bg=#ffd100|title-color=black|collapse=yes
|map=
{{BS3|uKBHFa|||0|[[江楊北路駅|江楊北路]]||}}
{{BS3|uKRWgl|uKRW+r|||||}}
{{BS3|utSTRa|uKDSTe|||江楊北路車両基地||}}
{{BS5|STRq|umtKRZ|STRq|||||[[中華人民共和国鉄道部|国鉄]]宝鋼支線|}}
{{BS3|utBHF|||3|[[鉄力路駅|鉄力路]]||}}
{{BS3|uhtSTRe||||||}}
{{BS3|uhBHF|||6|[[友誼路駅 (上海市)|友誼路]]||}}
{{BS3|uhBHF|||8|[[宝楊路駅|宝楊路]]||}}
{{BS3|uhBHF|||11|[[水産路駅|水産路]]||}}
{{BS3|uhBHF|||13|[[淞浜路駅|淞浜路]]||}}
{{BS3|uhBHF|||16|[[張華浜駅|張華浜]]||}}
{{BS3|uhBHF|||18|[[淞発路駅|淞発路]]||}}
{{BS3|uhBHF|||22|[[長江南路駅|長江南路]]||}}
{{BS5|STRq|umhKRZ|STRq|||||国鉄南何支線|}}
{{BS3|uhBHF|||24|[[殷高西路駅|殷高西路]]||}}
{{BS3|uhBHF|||27|[[江湾鎮駅|江湾鎮]]||}}
{{BS3|uhBHF|||30|[[大柏樹駅|大柏樹]]||}}
{{BS3|uhBHF|||33|[[赤峰路駅|赤峰路]]||}}
{{BS5|utSTRq|mhKRZt|O2=uBHF|utSTRq|||35|[[虹口足球場駅|虹口足球場]]|{{Color|#009dd9|●}}[[上海軌道交通8号線|8号線]]|}}
{{BS3|uhBHF|||37|[[東宝興路駅|東宝興路]]||}}
{{BS5||uhABZg+l|uhSTRq|uhtSTRaq|utSTR+r|O5=POINTERf@g|||{{Color|#660066|●}}[[上海軌道交通4号線|4号線]]|}}
{{BS5||uhBHFe|||uLSTR|40|[[宝山路駅|宝山路]]||}}
{{BS3|uSTR|ENDEa|utENDEa|||↓{{Color|#660066|●}}4号線共用区間|}}
{{BS3|uBHF|O1=HUBaq|BHF|O2=HUBq|utÜST|O3=HUBlg|43|[[上海駅 (上海地下鉄)|上海火車站]]||}}
{{BS5|utSTRq|uKRZt|mKRZt|utABZg+r|O4=HUB||||{{Color|#EA0437|●}}[[上海軌道交通1号線|1号線]]|}}
{{BS5|STRq|umhKRZa|STRr|utBHF|O4=HUBe||||国鉄[[京滬線]]|}}
{{BS3|uhBHF||uLSTR|46|[[中潭路駅|中潭路]]||}}
{{BS5|utSTRq|mhKRZt|O2=uBHF|utSTRq|||49|[[鎮坪路駅|鎮坪路]]|{{Color|#ff7200|●}}[[上海軌道交通7号線|7号線]]|}}
{{BS5|utSTRq|mhKRZt|O2=uBHF|utSTRq|||52|[[曹楊路駅|曹楊路]]|{{Color|#841c21|●}}[[上海軌道交通11号線|11号線]]|}}
{{BS3|uhBHF|||54|[[金沙江路駅|金沙江路]]||}}
{{BS5|WASSERq|uhKRZW|WASSERq|||||[[呉淞江|蘇州河]]|}}
{{BS5|utSTRq|mhKRZt|O2=uBHF|utSTRq|||57|[[中山公園駅|中山公園]]|{{Color|#87D300|●}}[[上海軌道交通2号線|2号線]]|}}
{{BS3|uhBHF|||59|[[延安西路駅|延安西路]]||}}
{{BS3|uhSTR|||||↑{{Color|#660066|●}}4号線共用区間|}}
{{BS5|utSTRq|mhKRZt|O2=uBHF|utSTRq|||62|[[虹橋路駅|虹橋路]]|{{Color|#c0a8e0|●}}[[上海軌道交通10号線|10号線]]|}}
{{BS3|uhABZgl|uhSTR+r|O2=POINTERf@g||||{{Color|#660066|●}}4号線|}}
{{BS3|uhSTR|htSTRa|O2=utSTRa|||||}}
{{BS5||O1=HUBrg|uhBHF|O2=HUBq|utBHF|O3=HUBeq|||64|[[宜山路駅|宜山路]]||}}
{{BS5|utBHFq|O1=HUBe|uhKRZt|utKRZt|||||{{Color|#78C7EB|●}}[[上海軌道交通9号線|9号線]]|}}
{{BS5||uhSTR|utSTR|uLSTR|||||}}
{{BS5|utSTR+l|uhKRZt|utTHSTt|utSTRr|uLSTR|||{{Color|#EA0437|●}}1号線・{{Color|#660066|●}}4号線 [[上海体育館駅|上海体育館]]|}}
{{BS5|uLSTR|uhBHF|utSTRl|utSTRq|utSTRr|67|[[漕渓路駅|漕渓路]]||}}
{{BS3|uhBHFe|||69|[[竜漕路駅|龍漕路]]||}}
{{BS3|uBHF|||72|[[石竜路駅|石龍路]]||}}
{{BS3|uSTR2|O1=uSTRl|uSTRq|O2=uSTRc3|uSTR+r||||}}
{{BS3|uSTRc1|O1=ENDEa|uSTR+4|uKDSTe|||石龍路留置線|}}
{{BS5|uLSTR|uKRW+l|O2=STR|uKRWgr|||||国鉄連絡線({{Color|#EA0437|●}}1号線へ連絡)|}}
{{BS5|utBHF|O1=HUBaq|BHF|O2=HUBq|uKBHFe|O3=HUBeq|||74|[[上海南駅 (上海地下鉄)|上海南站]]||}}
{{BS5|utSTR|STR||||||国鉄[[滬杭線]]|}}
{{BS5|utSTR|||||||{{Color|#EA0437|●}}1号線|}}
}}
'''上海軌道交通3号線'''(シャンハイきどうこうつう3ごうせん、、{{中文表記|上海轨道交通3号线}}、{{英文表記|Shanghai Metro Line 3}})、別名'''明珠線'''(めいじゅせん、{{中文表記|明珠线}})と宝山線は、[[上海軌道交通]]の[[鉄道路線|路線]]。現地では'''軽軌'''({{Lang|zh|轻轨}})と呼ばれる。
== 概要 ==
[[2000年]]12月26日に開業。北延長第1期工程[[2006年]]12月18日開通後、営業距離は40.3キロになり、一時は上海市内で最も長い地下鉄路線であった。
全区間ATO制御による運転である。地下鉄の仲間であるが、全区間高架線を走る。
=== 路線データ ===
* 路線距離:40.340km
* [[軌間]]:1435mm
* 駅数:29駅(起終点駅含む)
* 複線区間:全線
* 電化区間:全線(直流1500V)
<!--* [[閉塞 (鉄道)|閉塞方式]]:?-->
* 最高速度: 80km/h
* 地上区間:[[上海南駅]] - [[石竜路駅]]東北、[[中潭路駅]]西 - [[宝山路駅]]西、[[鉄力路駅]]西-[[江楊北路駅]]
* 地下区間:[[友誼路駅]]西 - [[鉄力路駅]]西
* 高架区間:[[石竜路駅]]東北 - [[中潭路駅]]西、[[宝山路駅]]西 - [[江湾鎮駅]] - [[友誼路駅]]西
== 歴史 ==
* [[2000年]][[12月16日]]<ref>{{Cite journal|和書 |date = 2001-05-01 |title = Overseas Railway Topics |journal = 鉄道ジャーナル |issue = 5 |volume = 35 |publisher = 鉄道ジャーナル社 |page = 119 }}</ref>:[[上海南駅駅]] - [[江湾鎮駅]]が開通。
* [[2006年]][[12月18日]]:[[江湾鎮駅]] - [[江楊北路駅]]が開通。
* [[2014年]]:新型車両・[[上海軌道交通03A02型]]電車を導入
* [[2015年]][[7月10日]]:新型車両・[[上海軌道交通03A02型]]電車の営業開始
== 車両 ==
{{Seealso|アルストム・メトロポリス}}
; [[上海軌道交通03A01型]]電車
* 製造会社:[[アルストム]]、[[南京浦鎮車輛廠]]
* 設計最高速度:80km/h
* ATO制御
* 車両編成:6両編成(制御車+4×中間電動車+制御車)
* 車両:長さ23.54m、幅3m
* 定員:310人
; [[上海軌道交通03A02型]]電車
* 製造会社:[[中車長春軌道客車]]、[[上海電気]]
* 設計時速:80km/h
* [[編成 (鉄道)|車両編成]]:6両(制御車+中間電動車×4+制御車(Tc+Mp+M+M+Mp+Tc))
* 寸法:長さ23.54m、幅3m
* 定員:310人
== 路線 ==
{| class=wikitable
|-
!colspan="3"|駅名
!rowspan="2" style="border-bottom:solid 3px #ffd100;"|駅間<br/>キロ
!rowspan="2" style="border-bottom:solid 3px #ffd100;"|営業<br/>キロ
!rowspan="2" style="border-bottom:solid 3px #ffd100;"|接続路線・備考
!rowspan="2" style="border-bottom:solid 3px #ffd100;"|所在地
|-
!style="border-bottom:solid 3px #ffd100;"|<small>[[日本語]]</small>
!style="border-bottom:solid 3px #ffd100;"|<small>[[簡体字]][[中国語]]</small>
!style="border-bottom:solid 3px #ffd100;"|<small>[[英語]]</small>
|-
|[[上海南駅駅|上海南站駅]]
|{{lang|zh|上海南站}}
|Shanghai South Railway Station
|align=right|0.000
|align=right|0.000
|上海軌道交通:[[ファイル:SML1.svg|10px|1|link=]] [[上海軌道交通1号線|1号線]]<br />[[中国鉄路総公司]]:[[滬昆線]]・{{Color|gray|■}}[[上海軌道交通22号線|金山支線]]([[上海南駅]])
|align=center rowspan=5|[[徐匯区]]
|-
|[[石竜路駅|石龍路駅]]
|{{lang|zh|石龙路站}}
|Shilong Road
|align=right|1.314
|align=right|1.314
|
|-
|[[竜漕路駅|龍漕路駅]]
|{{lang|zh|龙漕路站}}
|Longcao Road
|align=right|1.484
|align=right|2.798
|上海軌道交通:[[ファイル:SML12.svg|10px|12|link=]] [[上海軌道交通12号線|12号線]]
|-
|[[漕渓路駅]]
|{{lang|zh|漕溪路站}}
|Caoxi Road
|align=right|1.025
|align=right|3.823
|
|-
|[[宜山路駅]]
|{{lang|zh|宜山路站}}
|Yishan Road
|align=right|1.538
|align=right|5.361
|上海軌道交通:[[ファイル:SML4.svg|10px|4|link=]] [[上海軌道交通4号線|4号線]]、[[ファイル:SML9.svg|10px|9|link=]] [[上海軌道交通9号線|9号線]]
|-
|[[虹橋路駅]]
|{{lang|zh|虹桥路站}}
|Hongqiao Road
|align=right|1.309
|align=right|6.670
|上海軌道交通:[[ファイル:SML4.svg|10px|4|link=]] [[上海軌道交通4号線|4号線]]、[[ファイル:SML10.svg|10px|10|link=]] [[上海軌道交通10号線|10号線]]
|align=center rowspan=3|[[長寧区]]
|-
|[[延安西路駅]]
|{{lang|zh|延安西路站}}
|West Yan'an Road
|align=right|1.461
|align=right|8.131
|上海軌道交通:[[ファイル:SML4.svg|10px|4|link=]] 4号線
|-
|[[中山公園駅 (上海市)|中山公園駅]]
|{{lang|zh|中山公园站}}
|Zhongshan Park
|align=right|1.101
|align=right|9.232
|上海軌道交通:[[ファイル:SML2.svg|10px|2|link=]] [[上海軌道交通2号線|2号線]]、[[ファイル:SML4.svg|10px|4|link=]] 4号線
|-
|[[金沙江路駅]]
|{{lang|zh|金沙江路站}}
|Jinshajiang Road
|align=right|1.507
|align=right|10.739
|上海軌道交通:[[ファイル:SML4.svg|10px|4|link=]] 4号線、[[ファイル:SML13.svg|10px|13|link=]] [[上海軌道交通13号線|13号線]]
|align=center rowspan=4|[[普陀区 (上海市)|普陀区]]
|-
|[[曹楊路駅]]
|{{lang|zh|曹杨路站}}
|Caoyang Road
|align=right|0.903
|align=right|11.642
|上海軌道交通:[[ファイル:SML4.svg|10px|4|link=]] 4号線、[[ファイル:SML11.svg|10px|11|link=]] [[上海軌道交通11号線|11号線]]
|-
|[[鎮坪路駅]]
|{{lang|zh|镇坪路站}}
|Zhenping Road
|align=right|1.373
|align=right|13.015
|上海軌道交通:[[ファイル:SML4.svg|10px|4|link=]] 4号線、[[ファイル:SML7.svg|10px|7|link=]] [[上海軌道交通7号線|7号線]]
|-
|[[中潭路駅]]
|{{lang|zh|中潭路站}}
|Zhongtan Road
|align=right|1.473
|align=right|14.488
|上海軌道交通:[[ファイル:SML4.svg|10px|4|link=]] 4号線
|-
|[[上海駅|上海火車站駅]]
|{{lang|zh|上海火车站}}
|Shanghai Railway Station
|align=right|1.727
|align=right|16.215
|上海軌道交通:[[ファイル:SML1.svg|10px|1|link=]] [[上海軌道交通1号線|1号線]](改札外乗換え)、[[ファイル:SML4.svg|10px|4|link=]] 4号線<br/>中国鉄路総公司:[[京滬線]]・[[滬昆線]]・[[滬寧都市間鉄道]]([[上海駅]])
|align=center rowspan=2|[[静安区]]
|-
|[[宝山路駅]]
|{{lang|zh|宝山路站}}
|Baoshan Road
|align=right|2.017
|align=right|18.232
|上海軌道交通:[[ファイル:SML4.svg|10px|4|link=]] 4号線
|-
|[[東宝興路駅]]
|{{lang|zh|东宝兴路站}}
|Dongbaoxing Road
|align=right|1.073
|align=right|19.305
|
|align=center rowspan=5|[[虹口区]]
|-
|[[虹口足球場駅]]
|{{lang|zh|虹口足球场站}}
|Hongkou Football Stadium
|align=right|1.311
|align=right|20.616
|上海軌道交通:[[ファイル:SML8.svg|10px|8|link=]] [[上海軌道交通8号線|8号線]]
|-
|[[赤峰路駅]]
|{{lang|zh|赤峰路站}}
|Chifeng Road
|align=right|1.150
|align=right|21.766
|
|-
|[[大柏樹駅]]
|{{lang|zh|大柏树站}}
|Dabaishu
|align=right|0.911
|align=right|22.677
|
|-
|[[江湾鎮駅]]
|{{lang|zh|江湾镇站}}
|Jiangwan Town
|align=right|1.800
|align=right|24.477
|
|-
|[[殷高西路駅]]
|{{lang|zh|殷高西路站}}
|West Yingao Road
|align=right|1.603
|align=right|26.080
|
|align=center rowspan=10|[[宝山区 (上海市)|宝山区]]
|-
|[[長江南路駅]]
|{{lang|zh|长江南路站}}
|South Changjiang Road
|align=right|1.515
|align=right|27.595
|
|-
|[[淞発路駅]]
|{{lang|zh|淞发路站}}
|Songfa Road
|align=right|1.690
|align=right|29.285
|
|-
|[[張華浜駅]]
|{{lang|zh|张华浜站}}
|Zhanghuabang
|align=right|1.513
|align=right|30.798
|
|-
|[[淞浜路駅]]
|{{lang|zh|淞滨路站}}
|Songbin Road
|align=right|1.540
|align=right|32.338
|
|-
|[[水産路駅]]
|{{lang|zh|水产路站}}
|Shuichan Road
|align=right|1.241
|align=right|33.579
|
|-
|[[宝楊路駅]]
|{{lang|zh|宝杨路站}}
|Baoyang Road
|align=right|1.755
|align=right|35.334
|
|-
|[[友誼路駅 (上海市)|友誼路駅]]
|{{lang|zh|友谊路站}}
|Youyi Road
|align=right|1.029
|align=right|36.363
|
|-
|[[鉄力路駅]]
|{{lang|zh|铁力路站}}
|Tieli Road
|align=right|1.704
|align=right|38.067
|
|-
|[[江楊北路駅]]
|{{lang|zh|江杨北路站}}
|North Jiangyang Road
|align=right|2.081
|align=right|40.148
|
|-
|}
== 脚注 ==
{{脚注ヘルプ}}
{{reflist}}
==関連項目==
{{Commons|Category:Shmetro Line 3}}
*[[中華人民共和国の鉄道]]
*[[淞滬鉄道]]
{{上海軌道交通3号線}}
{{上海軌道交通}}
{{Rail-stub}}
{{China-stub}}
{{DEFAULTSORT:しやんはいきとうこうつう03}}
[[Category:上海軌道交通|路03]]
[[Category:中国の地下鉄路線]]
"""))
raise 1
print(parseBS(r"""
KBHFa~~起點
WASSERq\hKRZW\WASSERq~~ ~~ ~~ ~~天橋
LDER\INT\~~1公里~~中途站~~轉乘高鐵
\KBHFe\BUS~~2公里~~終點~~巴士總站
"""))
raise 1
print(parseBS(r"""
CONTg~~ ~~ ~~ ~~{{RoutemapRoute|Licon=u|[[京沪铁路]]往{{站|北京}}}}
\\ABZg+l\STRq\STR+r~~ ~~ ~~ ~~{{RoutemapRoute|Licon=r|[[京津城际铁路|京津城际]]往{{站|天津}}}}
\\BHF\\LSTR~~0~~'''{{站|北京南}}'''
LSTR+l\LSTRq\ABZgr\\LSTR~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[京沪铁路]]往{{站|上海}}}}
LSTR\\hSTRa@g\\LSTR~~ ~~ ~~ ~~[[北京特大桥]]
LSTR\CONTgq\hABZgr\\LSTR~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|联络线往[[北京动车段]]}}
LSTRl\LSTRq\hKRZ\STR+r\LSTR~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[京沪铁路]]、[[京九铁路]]}}
\CONTgq\hKRZ\ABZgr\LSTR~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[京九铁路]]往{{站|常平}}方向}}
\\hSTRe@f\LSTR\LSTR
\\HST\LSTR\LSTR~~59~~{{站|廊坊}}
\\hSTRa@g\LSTR\LSTR~~ ~~ ~~ ~~[[天津特大桥]]
\CONTgq\hKRZ\ABZql\ABZg+r~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[津霸铁路]]往{{站|霸州}}}}
\\hSTR\\STR~~ ~~ ~~ ~~{{RoutemapRoute|Licon=u|[[京津城际铁路|京津城际]]、[[京沪线]]往{{站|北京南}}}}
\\\hSTR\leer\ABZgl+l\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|Licon=r|[[京津城际铁路|京津城际]]、[[津山铁路]]往{{站|北京南}}}}
\\\hSTR\\ABZg+l\tCONTfq~~ ~~ ~~ ~~{{RoutemapRoute|Ricon=r|[[天津地下直径线]]往{{站|天津}}}}
\\hSTR\\BHF~~ ~~'''{{站|天津西}}'''
\\hABZgl\hSTReq\ABZgr
\\hSTR\KBSTaq\ABZgr~~ ~~ ~~ ~~[[曹庄动车运用所]]
\CONTgq\hKRZhu\hSTRq\hABZgr~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[津保铁路]]往{{站|保定}}}}
\LSTR+l\hKRZ\HSTq\ABZgr~~ ~~ ~~ ~~[[京沪铁路]][[曹庄站]]
\LSTR\hABZg+l\STRq\STRr
LSTR\hSTR\
LSTR\hHST\~~131~~{{站|天津南}}
LSTR\hSTR\
LSTR\hSTR\
LSTRl\hKRZ\LSTR+r~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[京沪铁路]]}}
CONTgq\hKRZ\KRZo+l~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[朔黄铁路]]}}
\hSTRe@f\LSTR
\HST\LSTR~~219~~{{站|沧州西}}
\hSTRa@g\LSTR
LSTR+l\hKRZ\LSTRr~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[京沪铁路]]}}
LSTRe\hSTR\
CONTgq\hKRZ\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[邯黄铁路]]}}
hSTRe@f
CONTgq\ABZg+r\~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[石济客运专线|石济客专]]往{{站|石家庄}}}}
HST~~327~~{{站|德州东}}
\ABZgl\LSTR+r
\CONTgq\KRZo\KRZo\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[德大铁路]]}}
\STR\LSTR
\\ABZg+l\LABZql\LCONTfq~~ ~~ ~~ ~~{{RoutemapRoute|Licon=r|[[石济客运专线|石济客专]]往[[济南东站]]}}
hSTRa@g
CONTgq\hKRZ\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[邯济铁路]]}}
LSTRa!~WASSERq\hKRZW\WASSERq~~ ~~ ~~ ~~[[济南黄河特大桥]]
LSTRe!~LSTRl\hKRZ\STR+r~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[济南线]]}}
CONTgq\hABZg+r\LSTR~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|往[[济南西动车运用所]]}}
LSTRa\hSTRe@f\LSTRe
LSTR\BHF\LSTRa~~419~~'''{{站|济南西}}'''
LSTR\STR\BHF~~ ~~[[济南站|{{小|济南}}]]
LSTR!~LSTRl\hKRZa\LSTR!~LSTRr~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[济南线]]}}
LSTR\hABZgl+l\ABZgr!~hSTRra
LSTRl\hKRZ\LSTR!~LSTR+r~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[京沪铁路]]}}
\\hABZgl+l\KRZh\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|Licon=r|[[胶济客运专线|胶济客专]]往{{站|青岛}}}}
\hSTRe@f\LSTR
\TUNNEL1\LSTR~~ ~~ ~~ ~~济南泰山区间隧道
CONTgq\KRZo\LSTR!~LSTR+r~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[泰肥铁路]]往{{站|湖屯}}}}
\HST\LSTR~~462~~{{站|泰安}}
\hSTRa@g\LSTR
LSTR+l\hKRZ\STRr~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[京沪铁路]]}}
LSTRe\hSTR\
CONTgq\hKRZ\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[瓦日铁路]]}}
CONTgq\hKRZ\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[磁莱铁路]]}}
CONTgq\hKRZ\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[新石铁路]]}}
hSTRe@f
HST~~533~~{{站|曲阜东}}
exCONTgq\KRZolr\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|{{站|兰考南}}|[[鲁南客运专线|鲁南客专]] – {{站|临沂北}}|Ricon=r}}
HST~~589~~{{站|滕州东}}
CONTgq\KRZo\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[枣临铁路]]}}
HST~~625~~{{站|枣庄}}
CONTgq\ABZg+r\~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[郑徐客运专线|郑徐客专]]往{{站|郑州东}}}}
BHF~~688~~'''{{站|徐州东}}'''
dCONTgq\dHSTq\KRZor\CONTfq~~ ~~ ~~ ~~[[陇海铁路]][[大湖站 (江苏省)|大湖站]]
CONTgq\KRZo\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[宿淮铁路]]}}
HST~~767~~{{站|宿州东}}
WASSERq\hKRZWae\WASSERq~~ ~~ ~~ ~~[[蚌埠淮河特大桥]]
LSTRa\STR\
LSTRl\KRZo+r\LSTR+r~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|蚌南联络线往[[京沪铁路]]{{站|蚌埠}}}}
\BHF\LSTRe~~844~~'''{{站|蚌埠南}}'''
CONTgq\ABZgr\~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[合蚌客运专线|合蚌客专]]往{{站|合肥}}}}
HST~~897~~{{站|定远}}
HST~~959~~{{站|滁州}}
<!--合宁铁路现状引入合肥枢纽合肥站;合肥南环线、合肥南站启用后,也可引入合肥南站-->
CONTgq\KRZo+r\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[宁合铁路]]往{{站|合肥}}|永亭线往{{站|永宁镇}}|Ricon=r}}
WASSERq\hKRZWae\WASSERq~~ ~~ ~~ ~~[[南京大胜关长江大桥 (铁路)|南京大胜关长江大桥]]
CONTgq\KRZo\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[宁铜铁路]]}}
\ABZg+l\KBSTeq~~ ~~ ~~ ~~{{RoutemapRoute|Licon=r|[[南京南动车运用所]]}}
CONTgq\ABZg+r\ ~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[宁安城际铁路|宁安城际]]往{{站|安庆}}}}
BHF~~1018~~'''{{站|南京南}}'''
CONTgq\ABZgr\~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[宁杭客运专线|宁杭客专]]往{{站|杭州东}}、{{站|杭州}}}}
\ABZgl\CONTfq~~ ~~ ~~ ~~{{RoutemapRoute|Licon=r|''[[仙宁铁路]]''往[[沪宁城际铁路|沪宁城际]]}}
HST~~1087~~{{站|镇江南}}
\hSTRa@g\vCONTg~~ ~~ ~~ ~~{{RoutemapRoute|Ricon=u|往{{站|北京}}|Licon=u|往{{站|南京}}}}
vSTR+l\hKRZv\vSTRr~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[京沪铁路]]、[[沪宁城际]]}}
vLSTR\hSTRe@f\
vLSTR\HST\~~1112~~{{站|丹阳北}}
vLSTR\hSTRa@g\~~ ~~ ~~ ~~[[丹昆特大桥]]
vLSTR\hHST\~~1144~~{{站|常州北}}
CONTgq\ABZq2\vKRZu\hKRZ\CONTfq\\~~ ~~ ~~ ~~{{RoutemapRoute|上跨[[新长铁路]]}}
vABZg+4-STR\hSTR\
vLSTR\hHST\~~1201~~{{站|无锡东}}
vLSTR\hHST\~~1227~~{{站|苏州北}}
vSTRl-SHI1ro\hKRZ\STR+r
hBHF-L\hBHF-R\LSTR~~1259~~{{站|崑山南}}
vSTR+l-SHI1+ro\hKRZ\STRr
vLSTR\hSTRe@f\
vLSTR\hSTRa@g\~~ ~~ ~~ ~~上海特大桥
\vSTR-ABZgl\KRZl!~hABZgl\hSTR+re\~~ ~~ ~~ ~~{{BSsplit|{{rmri|c2}}黄渡联络线往[[站|上海]]|{{RoutemapRoute|Licon=l|[[沪宁城际铁路|沪宁城际]]往[[站|南京]]}}}}
\\vSTRl-SHI1ro\hKRZ\ABZql+l\BHFq\CONTfq~~ ~~ ~~ ~~{{rmri|left}}[[京沪铁路]]往[[北京站|北京]] <big>'''[[上海站|上海]]'''</big> 往[[上海客技站]]{{rmri|r}}
CONTgq\KRZo+l\hKRZ\STRr\~~ ~~ ~~ ~~{{RoutemapRoute|Licon=l|[[沪昆铁路]]往{{站|昆明}}}}
STR\hSTRe@f\~~ ~~ ~~ ~~
STRl\ABZg+lr\CONTfq~~ ~~ ~~ ~~{{BSsplit|{{RoutemapRoute|Licon=l|虹安联络线往[[站|南京南]]}}|{{RoutemapRoute|Licon=r|虹所出库线往[[上海动车段|上海动车段(虹桥)]]}}|align=right}}
BHF~~1302~~{{nowrap|'''{{站|上海虹桥}}''' [[虹桥综合交通枢纽]]}}{{rint|air|link=上海虹桥国际机场}}
CONTf~~ ~~ ~~ ~~{{RoutemapRoute|Licon=d|[[沪杭高速铁路|沪杭高铁]]往{{站|杭州东}}}}
""")) |
<gh_stars>1-10
"""
Settings and configuration for django_pds.
this file is re-created from django.conf.__init__.py file
main reason of this settings to lazy load all the configuration
from either django_pds.core.settings file or to load
settings for django_pds defined in django project settings
"""
import importlib
from django.utils.functional import LazyObject, empty
from mongoengine import ImproperlyConfigured
from .core.utils import get_environment
ENVIRONMENT_VARIABLE = "DJANGO_PDS_SETTINGS_MODULE"
DEFAULT_CORE_SETTINGS_MODULE = 'django_pds.core.settings'
list_settings = [
"SYSTEM_SUPPORTED_ROLES",
"SECURITY_ATTRIBUTES",
"SECURITY_ROLES_ATTRIBUTES",
"SECURITY_IDS_ATTRIBUTES",
"READ_ONLY_FIELDS",
"SELECT_NOT_ALLOWED_ENTITIES",
"READ_NOT_ALLOWED_ATTRIBUTES",
"MONGO_ENGINE_USABLE_OPERATORS",
"EDIT_NOT_ALLOWED_ATTRIBUTES_PDS"
]
class LazySettings(LazyObject):
def _setup(self, name=None):
default_settings_module = DEFAULT_CORE_SETTINGS_MODULE
self._wrapped = Settings(default_settings_module)
override_settings_module = get_environment(ENVIRONMENT_VARIABLE, False, None)
print(override_settings_module)
if override_settings_module:
override_settings = SettingsOverride(self._wrapped)
override_settings.override(override_settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<django_pds.conf.LazySettings [Unevaluated]>'
return '<django_pds.conf.LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
"""Return the value of a setting and cache it in self.__dict__."""
if self._wrapped is empty:
self._setup(name)
val = getattr(self._wrapped, name)
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
"""
Set the value of setting. Clear all cached values if _wrapped changes
(@override_settings does this) or clear single values when set.
"""
if name == '_wrapped':
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
"""Delete a setting and clear it from cache if needed."""
super().__delattr__(name)
self.__dict__.pop(name, None)
class Settings:
def __init__(self, settings_module):
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in list_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
class SettingsOverride:
def __init__(self, __settings):
self.__settings = __settings
def override(self, override_module):
mod = importlib.import_module(override_module)
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in list_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self.__settings, setting, setting_value)
settings = LazySettings()
|
#!/usr/bin/env python
# ############################################################################ #
# NetJobs - a network job synchronizer. #
# #
# Author: <NAME> (<EMAIL>) #
# For: Deepstorage, LLC (deepstorage.net) #
# Version: 1.0 #
# #
# Usage: NetJobs.py [OPTIONS] [PATH] #
# OPTIONS #
# -h Display help message. #
# -s Run in simulator mode (disables networking). #
# -v Run in verbose mode. #
# PATH #
# Relative or absolute path to configuration file (required). #
# #
# Example: $ NetJobs.py -v "C:\NetJobs\testconfig.txt" #
# ############################################################################ #
import sys
import os
import re
import socket
import select
import threading
from collections import deque
# ############################################################################ #
# Constants and global variables. #
# ############################################################################ #
ARGC_MAX = 3
ARGS_REGEX = '\-[hsv]+'
DELIMETER = ': *'
TEST_LABEL_REGEX = '^(\w|\.)+ *:.*$'
TEST_SPEC_REGEX = '^(\w|\.)+ *: *(\d+ *[hms] *: *)?.*\s*$'
TEST_TIMEOUT_REGEX = '^\-timeout *: *((\d+ *[hms])|(none))\s*$$'
TEST_GENERAL_TIMEOUT_REGEX = '^\-generaltimeout *: *((\d+ *[hms])|(none))\s*$'
TEST_MIN_HOSTS_REGEX = '^\-minhosts *: *(\d+|all)\s*$'
TEST_END_REGEX = '^end\s*$'
TIME_FORMAT_REGEX = '\d+ *[hms]'
TIMEOUT_NONE = 0
MIN_HOSTS_ALL = -1
AGENT_LISTEN_PORT = 16192
BUFFER_SIZE = 4096
SOCKET_TIMEOUT = 60
START_STRING = '// START //\n'
KILL_STRING = '// KILL //\n'
SUCCESS_STRING = 'SUCCESS'
ERROR_STRING = 'ERROR'
global stdscr
global verbose
verbose = False
global simulate
simulate = False
# ############################################################################ #
# NetJobs class. #
# ############################################################################ #
class NetJobs:
"NetJobs main class"
# Instance variables.
path_in = ''
tests = []
sockets = {}
listeners = {}
#
# Initializer.
#
def __init__(self, argv):
"basic initializer"
# Process CLI arguments.
self.eval_options(argv)
if verbose == True:
print('Setup...')
print(' "%s" given as configuration file path.' % (self.path_in))
# Parse configuration file.
self.parse_config()
#
# Evaluate CLI arguments.
#
def eval_options(self, argv):
"evaluate CLI arguments and act on them"
argc = len(argv)
sample = re.compile(ARGS_REGEX)
if argc > ARGC_MAX:
terminate()
elif argc == 1:
self.path_in = ask_for_path()
elif argc == 2:
if not sample.match(argv[1]) == None:
self.act_on_options(argv[1])
self.path_in = ask_for_path()
else:
self.path_in = argv[1]
elif argc == ARGC_MAX:
if (sample.match(argv[1]) == None or
not sample.match(argv[2]) == None):
terminate()
else:
self.act_on_options(argv[1])
self.path_in = argv[2]
#
# Process optional CLI arguments. Called as part of eval_options.
#
def act_on_options(self, args):
"act on CLI arguments"
if 'h' in args:
instructions()
exit(0)
if 's' in args:
global simulate
simulate = True
if 'v' in args:
global verbose
verbose = True
print('\nVerbose logging enabled.\n')
#
# Parse input file.
#
def parse_config(self):
"configure the run according to the configuration file specifications"
testLabelRegex = re.compile(TEST_LABEL_REGEX)
testSpecRegex = re.compile(TEST_SPEC_REGEX)
testTimeoutRegex = re.compile(TEST_TIMEOUT_REGEX)
testGeneralTimeoutRegex = re.compile(TEST_GENERAL_TIMEOUT_REGEX)
testMinHostsRegex = re.compile(TEST_MIN_HOSTS_REGEX)
testEndRegex = re.compile(TEST_END_REGEX)
numTests = -1
try:
with open(self.path_in, 'r', newline='') as file:
# Filter out empty lines.
lines = deque(filter(None, (line.rstrip() for line in file)))
# Read rest of file.
while lines:
inTestBlock = False
# Get test name.
line = lines.popleft()
tokens = re.split(DELIMETER, line)
testName = ''
if testLabelRegex.match(line) == None:
sys.exit('ERROR: file %s: expected test label but found '\
'"%s"' % (self.path_in, line))
else:
inTestBlock = True
target = None
timeout = TIMEOUT_NONE
generalTimeout = TIMEOUT_NONE
minHosts = MIN_HOSTS_ALL
testLabel = tokens[0]
specs = {}
timeouts = {}
# Get test specifications. Go until next test label line.
while inTestBlock:
# Reached end of file without closing block.
if not lines:
sys.exit('ERROR: file %s: test "%s" - no end marker'
% (self.path_in, testName))
line = lines.popleft()
tokens = re.split(DELIMETER, line)
# Is it a target/spec line?
if testSpecRegex.match(line):
target = tokens[0]
command = tokens[1]
# Add test spec to specs dictionary.
specs[target] = command
# Set timeout to general (might be overridden).
timeouts[target] = generalTimeout
# Is it a timeout line?
elif testTimeoutRegex.match(line):
try:
timeout = evaluate_timeout_string(
tokens[1])
if timeout < 0:
raise ValueError
else:
if target:
timeouts[target] = timeout
else:
sys.exit('ERROR: file %s: timeout specified '\
'but no current target'
% self.path_in)
except ValueError:
sys.exit('ERROR: file %s: timeout values must be '\
'"none" or integer >= 0'
% self.path_in)
# Is it a general timeout line?
elif testGeneralTimeoutRegex.match(line):
try:
generalTimeout = evaluate_timeout_string(
tokens[1])
if generalTimeout < 0:
raise ValueError
except ValueError:
sys.exit('ERROR: file %s: timeout values must be '\
'"none" or integer >= 0'
% self.path_in)
# Is it a minhosts line?
elif testMinHostsRegex.match(line):
if tokens[1] == 'all':
minHosts = MIN_HOSTS_ALL
else:
try:
minHosts = int(tokens[1])
except ValueError:
sys.exit('ERROR: file %s: minhosts specification '\
'must be "all" or integer > 0 '
% self.path_in)
# Is it an end marker?
elif testEndRegex.match(line):
inTestBlock = False
# Add the test configuration to the list.
self.tests.append(TestConfig(testLabel,
generalTimeout,
minHosts,
specs,
timeouts))
# Else unknown.
else:
sys.exit('ERROR: file %s: unable to interpret line "%s"'
% (self.path_in, line))
# Catch IOError exception and exit.
except IOError as e:
sys.exit('file %s: %s' % (self.path_in, e))
#
# Prepare remote agents.
#
def prepAgents(self, test):
if verbose:
print(' Preparing agents...')
targets = list(test.specs.keys())
for target in targets:
# Create TCP socket. Skip if in simulation mode.
if not simulate:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as e:
sys.exit('ERROR: failed to create socket for target "%s": %s.'
% (target, e))
# Bind socket.
try:
port = AGENT_LISTEN_PORT
if test.timeouts[target] == TIMEOUT_NONE:
timeout = None
else:
timeout = test.timeouts[target]
sock = socket.create_connection((target, port), timeout=timeout)
# Perform a simple echo test to make sure it works.
testBytes = bytes('hello, ' + target + '\n', 'UTF-8')
sock.sendall(testBytes)
response = sock.recv(BUFFER_SIZE)
if response != testBytes:
sys.exit('ERROR: agent %d failed echo test. Unsure of agent '\
'identity. Terminating.' % target)
# Timeout.
testBytes = bytes('timeout=' + str(test.timeouts[target]) + '\n', 'UTF-8')
sock.sendall(testBytes)
response = sock.recv(BUFFER_SIZE)
if response != testBytes:
sys.exit('ERROR: agent %d failed timeout echo test. Terminating.' % target)
# Command.
testBytes = bytes(test.specs[target] + '\n', 'UTF-8')
sock.sendall(testBytes)
response = sock.recv(BUFFER_SIZE)
if response != testBytes:
sys.exit('ERROR: agent %d failed command echo test. Terminating.' % target)
# Good to go.
self.sockets[target] = sock
except socket.timeout as e:
self.timeoutHandler(e, target, test, self)
except socket.error as e:
sys.exit('ERROR: failed to open connection to socket for target '\
'"%s": %s.' % (target, e))
if verbose:
print(' ...finished.\n')
#
# Start remote agents.
#
def startAgents(self, test):
if verbose:
print(' Starting agents...')
for target in list(self.sockets.keys()):
sock = self.sockets[target]
# Start a ListenThread to wait for results.
listener = ListenThread(target, sock, test.timeouts[target],
self, test)
self.listeners[target] = listener
listener.start()
# Send the start command.
sock.sendall(bytes(START_STRING, 'UTF-8'))
if verbose:
print(' ...finished.\n')
#
# Start remote agents.
#
def waitForResults(self, test):
if verbose:
print(' Waiting for agent results...')
print()
print(' -- %s // RESULTS:' % test.label)
# Listener threads print results here before joining.
for listener in self.listeners.values():
listener.join()
if verbose:
print(' ...finished.\n')
#
# Clean up after test.
#
def cleanUp(self, test):
if verbose:
print(' Cleaning up...')
for sock in list(self.sockets.values()):
sock.close()
if verbose:
print(' ...finished.\n')
#
# Timeout handler. Kills all listen threads.
#
def timeoutHandler(self, exception, target, test, netJobs):
"called when a socket timeout occurs"
if test.minHosts == MIN_HOSTS_ALL:
print('ERROR: test requires all hosts but host %s timed out.'
% target, file=sys.stderr)
os._exit(1)
elif test.timeoutsRemaining != None:
if test.timeoutsRemaining < 1:
print('ERROR: too many timeouts; test requires at least %d '\
'host(s).' % test.minHosts, file=sys.stderr)
self.stopAndKillListeners()
else:
test.results[target] = "TIMEOUT"
test.timeoutsRemaining -= 1
#
# Cause all ListenThreads to rejoin.
#
def stopAndKillListeners(self):
for listener in self.listeners.values():
listener.kill()
#
# Start.
#
def start(self):
"begin execution"
if verbose:
print('\nStarting run...\n')
for test in self.tests:
# Reset instance data structures.
self.sockets = {}
self.listeners = {}
if verbose:
print(' %s...' % test.label)
# Prepare remote agents.
self.prepAgents(test)
# Start remote agents.
self.startAgents(test)
# Wait for remote agent return status.
self.waitForResults(test)
# Clean up.
self.cleanUp(test)
if verbose:
print('\nFinishing...\n')
# ############################################################################ #
# TestConfig class for storing test configurations. #
# ############################################################################ #
class TestConfig:
"data structure class for storing test configurations"
def __init__(self, label, generalTimeout, minHosts, specs, timeouts):
"basic initializer"
self.label = label
self.generalTimeout = generalTimeout
self.minHosts = minHosts
self.specs = specs
self.timeouts = timeouts
self.results = {}
if minHosts == 0:
self.timeoutsRemaining = None
else:
self.timeoutsRemaining = minHosts
# ############################################################################ #
# ListenThread class for listening for test results. #
# ############################################################################ #
class ListenThread(threading.Thread):
"listens for test results for a given agent"
def __init__(self, target, sock, timeout, netJobs, test):
threading.Thread.__init__(self)
self.target = target
self.sock = sock
self.timeout = timeout
self.netJobs = netJobs
self.test = test
def run(self):
if self.timeout == TIMEOUT_NONE:
self.timeout = None
running = True
self.test.results[self.target] = ''
try:
while running:
# Wait for result to be transmitted from agent.
ready = select.select([self.sock], [], [], self.timeout)
if ready[0]:
buffer = self.sock.recv(BUFFER_SIZE)
if buffer:
status = buffer.decode('UTF-8').replace('\n', '')
self.test.results[self.target] = status
running = False
else:
if self.timeout != None:
self.kill()
raise socket.timeout()
else:
self.kill()
raise socket.timeout
except socket.timeout as e:
self.test.results[self.target] = "TIMEOUT"
self.netJobs.timeoutHandler(e, self.target, self.test, self.netJobs)
finally:
self.printResult()
def kill(self):
self.running = False
try:
self.sock.sendall(bytes(KILL_STRING, 'UTF-8'))
except:
pass
def printResult(self):
print(' %s : %s' % (self.target, str(self.test.results[self.target])))
# ############################################################################ #
# Functions. #
# ############################################################################ #
#
# Ask the user to provide the config file path.
#
def ask_for_path():
"ask user to provide config file path"
return input('Please enter the configuration file path: ')
#
# Evaluate timeout string.
#
# Params:
# timeout String to evaluate.
#
# Return:
# Timeout specified (in seconds) or 0 if no timeout.
#
def evaluate_timeout_string(timeout):
"evaluate the passed string to see if it's a valid timeout"
if timeout == 'none':
return TIMEOUT_NONE
else:
unit = timeout[-1] # Last character in string.
value = int(timeout[:-1]) # Everything except last character in string.
if unit is 'h':
multiplier = 60 * 60
elif unit is 'm':
multiplier = 60
else:
multiplier = 1
return value * multiplier
#
# Print CLI usage instructions.
#
def instructions():
"print usage instructions"
print()
print(r'Usage: NetJobs.py [OPTIONS] [PATH]')
print(r'OPTIONS')
print(r' -h Display this message.')
print(r' -s Run in simulator mode (disables networking).')
print(r' -v Run in verbose mode.')
print(r'PATH')
print(r' Relative or absolute path to source file (required).')
print()
print(r'NetJobs.py -v "C:\NetJobs\testconfig.txt"')
print()
#
# Exit with error and print instructions.
#
def terminate():
"terminate with error and print instructions"
instructions()
sys.exit(1)
#
# Main.
#
def main():
"main function"
# Create NetJobs object to handle the work.
jobs = NetJobs(sys.argv)
# Run.
jobs.start()
# Finish.
if verbose:
print('All jobs completed.')
exit(0)
# ############################################################################ #
# Execute main. #
# ############################################################################ #
if __name__ == "__main__":
main()
|
<filename>src/wavcheck/test_timecode.py
# SPDX-FileCopyrightText: 2022 Barndollar Music, Ltd.
#
# SPDX-License-Identifier: Apache-2.0
import unittest
from .timecode import FrameRate, Timecode, parse_framerate_within, parse_timecode_str, tc_to_wall_secs, wall_secs_to_durstr, wall_secs_to_fractional_frame_idx, wall_secs_to_tc_left
def _fr(s: str) -> FrameRate:
frame_rate = parse_framerate_within(s)
assert frame_rate is not None
return frame_rate
def _tc(s: str) -> Timecode:
return parse_timecode_str(s)
class TestWallSecsToDurStr(unittest.TestCase):
def test_works_for_positive_durations(self):
self.assertEqual(wall_secs_to_durstr(0), "00s")
self.assertEqual(wall_secs_to_durstr(0.49999999), "00s")
self.assertEqual(wall_secs_to_durstr(0.5), "01s")
self.assertEqual(wall_secs_to_durstr(59.49999999), "59s")
self.assertEqual(wall_secs_to_durstr(59.5), "1m 00s")
self.assertEqual(wall_secs_to_durstr(3540), "59m 00s")
self.assertEqual(wall_secs_to_durstr(3599.49999999), "59m 59s")
self.assertEqual(wall_secs_to_durstr(3600), "1h 00m 00s")
self.assertEqual(wall_secs_to_durstr(3765), "1h 02m 45s")
self.assertEqual(wall_secs_to_durstr(359999.49999999), "99h 59m 59s")
self.assertEqual(wall_secs_to_durstr(359999.98333333), "100h 00m 00s")
def test_works_for_negative_durations(self):
self.assertEqual(wall_secs_to_durstr(-0.0), "00s")
self.assertEqual(wall_secs_to_durstr(-0.49999999), "00s")
self.assertEqual(wall_secs_to_durstr(-0.5), "(-) 01s")
self.assertEqual(wall_secs_to_durstr(-59.49999999), "(-) 59s")
self.assertEqual(wall_secs_to_durstr(-3540), "(-) 59m 00s")
self.assertEqual(wall_secs_to_durstr(-3765), "(-) 1h 02m 45s")
class TimecodeTestCase(unittest.TestCase):
def assert_tc(self, tc: Timecode, expected: str):
self.assertEqual(str(tc), expected)
class TestTimecodeEq(TimecodeTestCase):
def test_works(self):
self.assertTrue(_tc("01:02:03:04") == _tc("01:02:03:04"))
self.assertFalse(_tc("01:02:03:04") == _tc("01:02:03:05"))
self.assertFalse(_tc("01:02:03:04") != _tc("01:02:03:04"))
self.assertTrue(_tc("01:02:03:04") != _tc("01:02:03:05"))
class TestParseTimecodeStr(TimecodeTestCase):
def test_works(self):
self.assert_tc(parse_timecode_str("01020304"), "01:02:03:04")
self.assert_tc(parse_timecode_str(" 01.02.03.04 "), "01:02:03:04")
self.assert_tc(parse_timecode_str("1_02_03_04"), "01:02:03:04")
self.assert_tc(parse_timecode_str("1:02:03:04"), "01:02:03:04")
self.assert_tc(parse_timecode_str("1:02:03;04"), "01:02:03:04")
class TestTcToWallSecs(TimecodeTestCase):
def test_works(self):
self.assertAlmostEqual(1.04, tc_to_wall_secs(
_tc("00:00:01:02"), _fr("50.00 non-drop")))
self.assertAlmostEqual(60.02663333, tc_to_wall_secs(
_tc("00:00:59:29"), _fr("29.97 drop")))
self.assertAlmostEqual(60.06, tc_to_wall_secs(
_tc("00:01:00;02"), _fr("29.97 drop")))
# 44:33:22:11 => 160,402 timecode seconds plus 11 frames:
self.assertAlmostEqual(160562.86079167, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("23.976 non-drop")))
self.assertAlmostEqual(160402.45833333, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("24.000 non-drop")))
self.assertAlmostEqual(160402.44, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("25.000 non-drop")))
self.assertAlmostEqual(160562.76903333, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("29.970 non-drop")))
self.assertAlmostEqual(160402.36666667, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("30.000 non-drop")))
self.assertAlmostEqual(160562.63139583, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("47.952 non-drop")))
self.assertAlmostEqual(160402.22916667, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("48.000 non-drop")))
self.assertAlmostEqual(160402.22, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("50.000 non-drop")))
self.assertAlmostEqual(160562.58551667, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("59.940 non-drop")))
self.assertAlmostEqual(160402.18333333, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("60.000 non-drop")))
self.assertAlmostEqual(160402.20863333, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("29.970 drop")))
self.assertAlmostEqual(160402.02511667, tc_to_wall_secs(
_tc("44:33:22:11"), _fr("59.940 drop")))
class TestWallSecsToTcLeft(TimecodeTestCase):
def test_works(self):
self.assert_tc(wall_secs_to_tc_left(
1.04, _fr("50.00 non-drop")), "00:00:01:02")
self.assert_tc(wall_secs_to_tc_left(
60.02663333, _fr("29.97 drop")), "00:00:59:28")
self.assert_tc(wall_secs_to_tc_left(
60.02663334, _fr("29.97 drop")), "00:00:59:29")
self.assert_tc(wall_secs_to_tc_left(
60.06, _fr("29.97 drop")), "00:01:00:02")
# 44:33:22:11 => 160,402 timecode seconds plus 11 frames:
self.assert_tc(wall_secs_to_tc_left(160562.86079167,
_fr("23.976 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160562.86079167,
_fr("23.98 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160402.45833334,
_fr("24.000 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160402.45833334,
_fr("24.00 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(
160402.44, _fr("25.000 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(
160402.44, _fr("25.00 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160562.76903334,
_fr("29.970 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160562.76903334,
_fr("29.97 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160402.36666667,
_fr("30.000 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160402.36666667,
_fr("30.00 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160562.63139584,
_fr("47.952 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160562.63139584,
_fr("47.95 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160402.22916667,
_fr("48.000 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160402.22916667,
_fr("48.00 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(
160402.22, _fr("50.000 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(
160402.22, _fr("50.00 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160562.58551667,
_fr("59.940 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160562.58551667,
_fr("59.94 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160402.18333334,
_fr("60.000 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(160402.18333334,
_fr("60.00 non-drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(
160402.20863334, _fr("29.970 drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(
160402.20863334, _fr("29.97 drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(
160402.02511667, _fr("59.940 drop")), "44:33:22:11")
self.assert_tc(wall_secs_to_tc_left(
160402.02511667, _fr("59.94 drop")), "44:33:22:11")
class TestWallSecsToFractionalFrameIdx(TimecodeTestCase):
def test_works(self):
self.assertAlmostEqual(
50.0,
wall_secs_to_fractional_frame_idx(1.0, _fr("50.000 non-drop")))
self.assertAlmostEqual(
50.25,
wall_secs_to_fractional_frame_idx(1.005, _fr("50.000 non-drop")))
self.assertAlmostEqual(
50.5,
wall_secs_to_fractional_frame_idx(1.01, _fr("50.000 non-drop")))
self.assertAlmostEqual(
50.75,
wall_secs_to_fractional_frame_idx(1.015, _fr("50.000 non-drop")))
self.assertAlmostEqual(
51.0,
wall_secs_to_fractional_frame_idx(1.02, _fr("50.000 non-drop")))
if __name__ == "__main__":
unittest.main()
|
<filename>zwutils/dlso.py
'''
dict list set object utils
'''
import collections
class ZWObject(object):
pass
def dict2obj(kv):
kv = kv or {}
# o = type('', (), {})()
o = ZWObject()
for key, val in kv.items():
setattr(o, key, val)
return o
def obj2dict(o):
# o = o or type('', (), {})()
o = o or ZWObject()
r = {}
attrs = [a for a in dir(o) if not a.startswith('_')]
for attr in attrs:
r[attr] = getattr(o, attr)
return r
def tbl2dict(h, rs):
'''h:表头list,rs:数据二维list。
将每条数据(r)与表头按顺序匹配,形成dict list
'''
return [dict(zip(h, r)) for r in rs]
def extend_attrs(o, kv):
'''
Extend num of o's attrs, update o's attr's value by kv
kv: dict/obj
'''
# o = o or type('', (), {})()
o = o or ZWObject()
kv = kv or {}
o = dict2obj(o) if isinstance(o, dict) else o
kv = obj2dict(kv) if not isinstance(kv, dict) else kv
for key, val in kv.items():
setattr(o, key, val)
return o
def update_attrs(o, kv):
'''
Update o's attr's value by kv without add new attrs into o
kv: dict/obj
'''
# o = o or type('', (), {})()
o = o or ZWObject()
kv = kv or {}
o = dict2obj(o) if isinstance(o, dict) else o
kv = obj2dict(kv) if not isinstance(kv, dict) else kv
for key, val in kv.items():
if hasattr(o, key):
setattr(o, key, val)
return o
def upsert_config(parent_cfg, default_cfg, new_cfg, param_cfg):
'''
param_cfg overwirte new_cfg overwirte default_cfg overwirte parent_cfg
'''
# pcfg = parent_cfg or type('', (), {})()
pcfg = parent_cfg or ZWObject()
dcfg = default_cfg or {}
ncfg = new_cfg or {}
pmcfg = param_cfg or {}
pcfg = extend_attrs(pcfg, dcfg)
pcfg = extend_attrs(pcfg, ncfg)
pcfg = extend_attrs(pcfg, pmcfg)
def change_nest_dict_to_obj(o):
attrs = dir(o)
attrs = [a for a in attrs if not a.startswith('_')]
for attr in attrs:
val = getattr(o, attr)
if isinstance(val, dict):
new_val = dict2obj(val)
new_val = change_nest_dict_to_obj(new_val)
setattr(o, attr, new_val)
return o
change_nest_dict_to_obj(pcfg)
return pcfg
def list_intersection(a, b, ordered=False):
if ordered:
return [i for i, j in zip(a, b) if i == j]
else:
return list(set(a).intersection(b)) # choose smaller to a or b?
def list_split(arr, num):
''' split list into several parts
'''
rtn = []
arrlen = len(arr)
step = int(arrlen / num) + 1
for i in range(0, arrlen, step):
rtn.append(arr[i:i+step])
return rtn
def list_uniqify(arr):
'''Remove duplicates from provided list but maintain original order.
Derived from http://www.peterbe.com/plog/uniqifiers-benchmark
'''
seen = {}
result = []
for item in arr:
if item.lower() in seen:
continue
seen[item.lower()] = 1
result.append(item.title())
return result
def list_compare(a, b):
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
return compare(a, b)
|
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import seaborn as sns
from scipy.stats import norm
from sklearn.model_selection import train_test_split
# Set seeds
the_meaning_of_life = 42
np.random.seed(the_meaning_of_life)
tf.set_random_seed(the_meaning_of_life)
# Create a toy dataset
def build_toy_dataset(N):
y_data = np.random.uniform(-10.5, 10.5, N)
r_data = np.random.normal(size=N) # random noise
x_data = np.sin(0.75 * y_data) * 7.0 + y_data * 0.5 + r_data * 1.0
x_data = x_data.reshape((N, 1))
y_data = y_data.reshape((N, 1))
return train_test_split(x_data, y_data, random_state=the_meaning_of_life)
points = 5000 # number of data points
features = 1 # number of input features
mixture_components = 20 # number of mixture components
layer_size = 15
x_train, x_test, y_train, y_test = build_toy_dataset(points)
print("Size of features in training data: {}".format(x_train.shape))
print("Size of output in training data: {}".format(y_train.shape))
print("Size of features in test data: {}".format(x_test.shape))
print("Size of output in test data: {}".format(y_test.shape))
plt.plot(x_train, y_train, 'or', mew=0, ms=3, alpha=0.2)
plt.title('Training data')
plt.show()
# Setup our tensorflow model
# Define some placeholders for data
x_placeholder = tf.placeholder(tf.float32, [None, features])
y_placeholder = tf.placeholder(tf.float32, [None, features])
# Setup our layers
hidden_layer_1 = tf.layers.dense(x_placeholder, layer_size, activation=tf.nn.relu)
hidden_layer_2 = tf.layers.dense(hidden_layer_1, layer_size, activation=tf.nn.relu)
means = tf.layers.dense(hidden_layer_2, mixture_components, activation=None)
std_deviations = tf.layers.dense(hidden_layer_2, mixture_components, activation=tf.exp)
mixture_weights = tf.layers.dense(hidden_layer_2, mixture_components, activation=tf.nn.softmax)
# Define a normal distribution
oneDivSqrtTwoPI = 1 / np.sqrt(2*np.pi)
def tf_mixture_normal(a_point, my_means, my_std_deviations):
"""Normal distribution implemented in tensorflow notation."""
result = tf.subtract(a_point, my_means)
result = tf.multiply(result, tf.reciprocal(my_std_deviations))
result = -tf.square(result)/2
return tf.multiply(tf.exp(result), tf.reciprocal(my_std_deviations)) * oneDivSqrtTwoPI
def get_loss_func(a_point, my_weights, my_means, my_std_deviations):
"""Lossfunc defined in tensorflow notation."""
# Calculate normal distribution mixture and normalise
result = tf_mixture_normal(a_point, my_means, my_std_deviations)
result = tf.multiply(result, my_weights)
# Sum the result and take the mean negative log
result = tf.reduce_sum(result, 1, keepdims=True)
result = -tf.log(result)
return tf.reduce_mean(result)
# Setup de-facto pointers to loss & training functions
loss_func = get_loss_func(y_placeholder, mixture_weights, means, std_deviations)
train_func = tf.train.AdamOptimizer().minimize(loss_func)
# Session parameters
n_epochs = 2000
loss = np.zeros(n_epochs)
# Run the session a few times
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(n_epochs):
# Setup a feed dictionary of data
cookies = {x_placeholder: x_train, y_placeholder: y_train}
# Feed the session some cookies (...aka data)
sess.run(train_func, feed_dict=cookies)
loss[i] = sess.run(loss_func, feed_dict=cookies)
# Update the user on progress
print('Epoch = {}, loss = {}'.format(i, loss[i]))
plt.figure()
plt.plot(np.arange(0, n_epochs), loss, 'r-')
plt.title('Loss function evolution')
plt.show()
# todo: add random distribution sampling
# Define some functions to pull points from the distribution
def generate_points(my_x_test, my_weights, my_means, my_std_deviations):
"""Generates points randomly given a loada points. Uses uniform deviates to guess a mixture coefficient to use. Then
draws a point randomly from said selected distribution. We do this instead of picking the mode because we're fitting
a model to data with intrinsic scatter!"""
n_test_points = my_x_test.size
# Pick out some uniform deviates
mixtures_to_use = np.random.rand(n_test_points).reshape(n_test_points, 1)
# Create a cumulative sum of weights
my_weights_sum = np.cumsum(my_weights, axis=1)
# Find the first argument that's greater than the uniform deviate (since np.argmax stops at the first instance)
random_weights_indexes = np.argmax(np.greater(my_weights_sum, mixtures_to_use), axis=1)
# Grab the random means and standard deviations
random_means = my_means[np.arange(0, n_test_points), random_weights_indexes]
random_std_deviations = my_std_deviations[np.arange(0, n_test_points), random_weights_indexes]
print(random_weights_indexes.shape)
print(random_means.shape)
# Use these parameters to make some random numbers that are normal distributed
return np.random.normal(loc=random_means, scale=random_std_deviations)
# Get the stuff with tensorflow
weights_test = sess.run(mixture_weights, feed_dict={x_placeholder: x_test})
means_test = sess.run(means, feed_dict={x_placeholder: x_test})
std_deviations_test = sess.run(std_deviations, feed_dict={x_placeholder: x_test})
# Make some points
y_test_random = generate_points(x_test, weights_test, means_test, std_deviations_test)
# Plot some stuff
plt.figure()
plt.plot(x_test, y_test, 'or', mew=0, ms=3, alpha=0.5, label='Training data')
plt.plot(x_test, y_test_random, 'ob', mew=0, ms=3, alpha=0.5, label='Predictions')
plt.title('Network prediction vs training data')
plt.legend(fancybox=True)
plt.ylim(-25, 25)
plt.show()
sess.close()
|
<reponame>GarimaVishvakarma/intel-chroma
from chroma_core.services.syslog.parser import admin_client_eviction_handler, client_connection_handler, server_security_flavor_handler, client_eviction_handler
from chroma_core.models.event import ClientConnectEvent
from tests.unit.chroma_core.helpers import synthetic_host
from tests.unit.chroma_core.helpers import load_default_profile
from tests.unit.lib.iml_unit_test_case import IMLUnitTestCase
examples = {
client_connection_handler: [
{
"lustre_pid": 5629,
"message": " Lustre: 5629:0:(ldlm_lib.c:877:target_handle_connect()) lustre-MDT0000: connection from 26959b68-1208-1fca-1f07-da2dc872c55f@192.168.122.218@tcp t0 exp 0000000000000000 cur 1317994929 last 0"
},
{
"lustre_pid": 27559,
"message": " Lustre: 27559:0:(ldlm_lib.c:871:target_handle_connect()) lustre-OST0001: connection from 26959b68-1208-1fca-1f07-da2dc872c55f@192.168.122.218@tcp t0 exp 0000000000000000 cur 1317994930 last 0"
},
{
"lustre_pid": 9150,
"message": " Lustre: 9150:0:(ldlm_lib.c:871:target_handle_connect()) lustre-OST0000: connection from 26959b68-1208-1fca-1f07-da2dc872c55f@192.168.122.218@tcp t0 exp 0000000000000000 cur 1317994930 last 0"
},
{
"lustre_pid": 31793,
"message": " Lustre: 31793:0:(ldlm_lib.c:877:target_handle_connect()) MGS: connection from e5232e74-1e61-fad1-b59b-6e4a7d674016@192.168.122.218@tcp t0 exp 0000000000000000 cur 1317994928 last 0"
}
],
admin_client_eviction_handler: [
{
'message': " Lustre: 2689:0:(genops.c:1379:obd_export_evict_by_uuid()) lustre-OST0001: evicting 26959b68-1208-1fca-1f07-da2dc872c55f at adminstrative request",
'lustre_pid': 2689
}
],
client_eviction_handler: [
{
'message': " LustreError: 0:0:(ldlm_lockd.c:356:waiting_locks_callback()) ### lock callback timer expired after 101s: evicting client at 0@lo ns: mdt-ffff8801cd5be000 lock: ffff880126f8f480/0xe99a593b682aed45 lrc: 3/0,0 mode: PR/PR res: 8589935876/10593 bits 0x3 rrc: 2 type: IBT flags: 0x4000020 remote: 0xe99a593b682aecea expref: 14 pid: 3636 timeout: 4389324308'",
'lustre_pid': 3636
},
{
'message': " LustreError: 0:0:(ldlm_lockd.c:356:waiting_locks_callback()) ### lock callback timer expired after 151s: evicting client at 10.10.6.127@tcp ns: mdt-ffff880027554000 lock: ffff8800345b9480/0x7e9e6dc241f05651 lrc: 3/0,0 mode: PR/PR res: 8589935619/19678 bits 0x3 rrc: 2 type: IBT flags: 0x4000020 remote: 0xebc1380d8b532fd7 expref: 5104 pid: 23056 timeout: 4313115550",
'lustre_pid': 23056
}
]
}
class TestHandlers(IMLUnitTestCase):
def setUp(self):
super(TestHandlers, self).setUp()
load_default_profile()
self.host = synthetic_host('myaddress')
def test_server_security_flavor_handler(self):
ssfh_examples = [
{'message': " Lustre: 5629:0:(sec.c:1474:sptlrpc_import_sec_adapt()) import lustre-MDT0000->NET_0x20000c0a87ada_UUID netid 20000: select flavor null"},
{'message': "Lustre: 20380:0:(sec.c:1474:sptlrpc_import_sec_adapt()) import MGC192.168.122.105@tcp->MGC192.168.122.105@tcp_0 netid 20000: select flavor null"}]
# These will not create events, but should also not raise exceptions
for example in ssfh_examples:
server_security_flavor_handler(example['message'], None)
# TODO: test doing a client connection and then one of these, to see it get correlated
def test_client_connection_handler(self):
for example in examples[client_connection_handler]:
client_connection_handler(example['message'], self.host)
event = ClientConnectEvent.objects.latest('id')
self.assertEqual(event.lustre_pid, example['lustre_pid'])
def test_admin_client_eviction_handler(self):
for example in examples[admin_client_eviction_handler]:
admin_client_eviction_handler(example['message'], self.host)
event = ClientConnectEvent.objects.latest('id')
self.assertEqual(event.lustre_pid, example['lustre_pid'])
def test_client_eviction_handler(self):
for example in examples[client_eviction_handler]:
client_eviction_handler(example['message'], self.host)
event = ClientConnectEvent.objects.latest('id')
self.assertEqual(event.lustre_pid, example['lustre_pid'])
|
<gh_stars>1000+
"""SSD model builder
Utilities for building network layers are also provided
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from tensorflow.keras.layers import Activation, Dense, Input
from tensorflow.keras.layers import Conv2D, Flatten
from tensorflow.keras.layers import BatchNormalization, Concatenate
from tensorflow.keras.layers import ELU, MaxPooling2D, Reshape
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
import numpy as np
def conv2d(inputs,
filters=32,
kernel_size=3,
strides=1,
name=None):
conv = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='he_normal',
name=name,
padding='same')
return conv(inputs)
def conv_layer(inputs,
filters=32,
kernel_size=3,
strides=1,
use_maxpool=True,
postfix=None,
activation=None):
x = conv2d(inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
name='conv'+postfix)
x = BatchNormalization(name="bn"+postfix)(x)
x = ELU(name='elu'+postfix)(x)
if use_maxpool:
x = MaxPooling2D(name='pool'+postfix)(x)
return x
def build_ssd(input_shape,
backbone,
n_layers=4,
n_classes=4,
aspect_ratios=(1, 2, 0.5)):
"""Build SSD model given a backbone
Arguments:
input_shape (list): input image shape
backbone (model): Keras backbone model
n_layers (int): Number of layers of ssd head
n_classes (int): Number of obj classes
aspect_ratios (list): annchor box aspect ratios
Returns:
n_anchors (int): Number of anchor boxes per feature pt
feature_shape (tensor): SSD head feature maps
model (Keras model): SSD model
"""
# number of anchor boxes per feature map pt
n_anchors = len(aspect_ratios) + 1
inputs = Input(shape=input_shape)
# no. of base_outputs depends on n_layers
base_outputs = backbone(inputs)
outputs = []
feature_shapes = []
out_cls = []
out_off = []
for i in range(n_layers):
# each conv layer from backbone is used
# as feature maps for class and offset predictions
# also known as multi-scale predictions
conv = base_outputs if n_layers==1 else base_outputs[i]
name = "cls" + str(i+1)
classes = conv2d(conv,
n_anchors*n_classes,
kernel_size=3,
name=name)
# offsets: (batch, height, width, n_anchors * 4)
name = "off" + str(i+1)
offsets = conv2d(conv,
n_anchors*4,
kernel_size=3,
name=name)
shape = np.array(K.int_shape(offsets))[1:]
feature_shapes.append(shape)
# reshape the class predictions, yielding 3D tensors of
# shape (batch, height * width * n_anchors, n_classes)
# last axis to perform softmax on them
name = "cls_res" + str(i+1)
classes = Reshape((-1, n_classes),
name=name)(classes)
# reshape the offset predictions, yielding 3D tensors of
# shape (batch, height * width * n_anchors, 4)
# last axis to compute the (smooth) L1 or L2 loss
name = "off_res" + str(i+1)
offsets = Reshape((-1, 4),
name=name)(offsets)
# concat for alignment with ground truth size
# made of ground truth offsets and mask of same dim
# needed during loss computation
offsets = [offsets, offsets]
name = "off_cat" + str(i+1)
offsets = Concatenate(axis=-1,
name=name)(offsets)
# collect offset prediction per scale
out_off.append(offsets)
name = "cls_out" + str(i+1)
#activation = 'sigmoid' if n_classes==1 else 'softmax'
#print("Activation:", activation)
classes = Activation('softmax',
name=name)(classes)
# collect class prediction per scale
out_cls.append(classes)
if n_layers > 1:
# concat all class and offset from each scale
name = "offsets"
offsets = Concatenate(axis=1,
name=name)(out_off)
name = "classes"
classes = Concatenate(axis=1,
name=name)(out_cls)
else:
offsets = out_off[0]
classes = out_cls[0]
outputs = [classes, offsets]
model = Model(inputs=inputs,
outputs=outputs,
name='ssd_head')
return n_anchors, feature_shapes, model
|
<reponame>DanIulian/minigrid_rl
# AndreiN, 2019
# parts from https://github.com/lcswillems/torch-rl
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import torch_rl
from typing import Optional, Tuple
from models.utils import initialize_parameters
class Model(nn.Module, torch_rl.RecurrentACModel):
def __init__(self, cfg, obs_space, action_space, use_memory=False):
''' Standard CNN model used for PPO
:param cfg: A parsed config file
:param obs_space: The dimensions of the observation space 3D vector
:param action_space: Number of actions the agent can take
:param use_memory: True if the agent uses LSTM/GRU
'''
super().__init__()
# extract necessary info from config file
self.memory_type = cfg.memory_type
self.mem_size = getattr(cfg, "memory_size", 128)
hidden_size = getattr(cfg, "hidden_size", 128) # feature size after CNN processing
k_sizes = getattr(cfg, "k_sizes", [5, 5, 3]) # kernel size for each layer
s_sizes = getattr(cfg, "s_sizes", [3, 3, 1]) # stride size for each layer
# Decide which components are enabled
self.use_memory = use_memory
# experiment used model
self.image_conv = nn.Sequential(
nn.Conv2d(3, 16, k_sizes[0], s_sizes[0]),
#nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, k_sizes[1], s_sizes[1]),
#nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, k_sizes[2], s_sizes[2]),
#nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
print(f"OBS space {obs_space}")
n = obs_space["image"][0]
m = obs_space["image"][1]
out_conv_size = self.image_conv(torch.rand((1, obs_space["image"][2], n, m))).size()
out_feat_size = int(np.prod(out_conv_size))
self.image_embedding_size = out_feat_size
self.fc1 = nn.Sequential(
nn.Linear(self.image_embedding_size, hidden_size),
# nn.ReLU(inplace=True),
)
crt_size = hidden_size
# Define memory
if self.use_memory:
if self.memory_type == "LSTM":
self.memory_rnn = nn.LSTMCell(crt_size, self.mem_size)
else:
self.memory_rnn = nn.GRUCell(crt_size, self.mem_size)
crt_size = self.mem_size
# Resize image embedding
self.embedding_size = crt_size
self.fc2_val = nn.Sequential(
nn.Linear(self.embedding_size, self.mem_size),
nn.ReLU(inplace=True),
)
self.fc2_act = nn.Sequential(
nn.Linear(self.embedding_size, self.mem_size),
nn.ReLU(inplace=True),
)
# Define action and value heads
self.vf = nn.Linear(self.mem_size, 1)
self.pd = nn.Linear(self.mem_size, action_space.n)
# Initialize parameters correctly
self.apply(initialize_parameters)
@property
def memory_size(self):
if self.memory_type == "LSTM":
return 2 * self.mem_size
else:
return self.mem_size
@property
def semi_memory_size(self):
return self.embedding_size
def forward(self, obs, memory):
x = torch.transpose(torch.transpose(obs.image, 1, 3), 2, 3).contiguous()
x = self.image_conv(x)
x = x.reshape(x.shape[0], -1)
x = self.fc1(x)
if self.use_memory:
if self.memory_type == "LSTM":
hidden = (memory[:, :self.semi_memory_size], memory[:, self.semi_memory_size:])
hidden = self.memory_rnn(x, hidden) # type: Tuple[torch.Tensor]
embedding = hidden[0]
memory = torch.cat(hidden, dim=1)
else:
hidden = memory # type: Optional[torch.Tensor]
hidden = self.memory_rnn(x, hidden)
embedding = hidden
memory = hidden
else:
embedding = x
val = self.fc2_val(embedding)
act = self.fc2_act(embedding)
# Value function head
vpred = self.vf(val).squeeze(1)
# Action head
pd = self.pd(act)
dist = Categorical(logits=F.log_softmax(pd, dim=1))
return dist, vpred, memory
|
<filename>energy_demand/energy_model.py
"""Energy Model
==============
The main function executing all the submodels of the energy demand model
"""
import uuid
import numpy as np
from energy_demand.geography import region
from energy_demand.geography import WeatherRegion
import energy_demand.rs_model as rs_model
import energy_demand.ss_model as ss_model
import energy_demand.is_model as is_model
import energy_demand.ts_model as ts_model
from energy_demand.profiles import load_factors as load_factors
from energy_demand.profiles import load_profile
from energy_demand.initalisations import helpers
from energy_demand.profiles import generic_shapes
'''# pylint: disable=I0011,C0321,C0301,C0103,C0325,no-member'''
class EnergyModel(object):
"""EnergyModel of a simulation yearly run
Parameters
----------
region_names : list
Region names
data : dict
Main data dictionary
Note
----
- All submodels are executed here
- All aggregation functions of the results are exectued here
"""
def __init__(self, region_names, data):
"""Constructor
"""
print("..start main energy demand function")
self.curr_yr = data['sim_param']['curr_yr']
# Non regional load profiles
data['non_regional_profile_stock'] = self.create_load_profile_stock(data)
# --------------------
# Industry SubModel
# --------------------
self.weather_regions = self.create_weather_regions(
data['weather_stations'], data, 'is_submodel')
self.regions = self.create_regions(
region_names, data, 'is_submodel')
self.is_submodel = self.industry_submodel(
data, data['is_all_enduses'], data['is_sectors'])
# --------------------
# Residential SubModel
# --------------------
self.weather_regions = self.create_weather_regions(
data['weather_stations'], data, 'rs_submodel')
self.regions = self.create_regions(
region_names, data, 'rs_submodel')
self.rs_submodel = self.residential_submodel(
data, data['rs_all_enduses'])
# --------------------
# Service SubModel
# --------------------
self.weather_regions = self.create_weather_regions(
data['weather_stations'], data, 'ss_submodel')
self.regions = self.create_regions(
region_names, data, 'ss_submodel')
self.ss_submodel = self.service_submodel(
data, data['ss_all_enduses'], data['ss_sectors'])
# --------------------
# Transport SubModel
# --------------------
self.weather_regions = self.create_weather_regions(
data['weather_stations'], data, 'ts_submodel')
self.regions = self.create_regions(
region_names, data, 'ts_submodel')
self.ts_submodel = self.other_submodels()
# ---------------------------------------------------------------------
# Functions to summarise data for all Regions in the EnergyModel class
# ---------------------------------------------------------------------
print("...summarise fuel")
# Sum according to weekend, working day
# Sum across all regions, all enduse and sectors sum_reg
self.sum_uk_fueltypes_enduses_y = self.sum_reg('fuel_yh', data['nr_of_fueltypes'], [self.ss_submodel, self.rs_submodel, self.is_submodel, self.ts_submodel], 'sum', 'non_peak')
self.all_submodels_sum_uk_specfuelype_enduses_y = self.sum_reg('fuel_yh', data['nr_of_fueltypes'], [self.ss_submodel, self.rs_submodel, self.is_submodel, self.ts_submodel], 'no_sum', 'non_peak')
self.rs_sum_uk_specfuelype_enduses_y = self.sum_reg('fuel_yh', data['nr_of_fueltypes'], [self.rs_submodel], 'no_sum', 'non_peak')
self.ss_sum_uk_specfuelype_enduses_y = self.sum_reg('fuel_yh', data['nr_of_fueltypes'], [self.ss_submodel], 'no_sum', 'non_peak')
self.is_sum_uk_specfuelype_enduses_y = self.sum_reg('fuel_yh', data['nr_of_fueltypes'], [self.is_submodel], 'no_sum', 'non_peak')
self.ts_sum_uk_specfuelype_enduses_y = self.sum_reg('fuel_yh', data['nr_of_fueltypes'], [self.ts_submodel], 'no_sum', 'non_peak')
self.rs_tot_fuels_all_enduses_y = self.sum_reg('fuel_yh', data['nr_of_fueltypes'], [self.rs_submodel], 'no_sum', 'non_peak')
self.ss_tot_fuels_all_enduses_y = self.sum_reg('fuel_yh', data['nr_of_fueltypes'], [self.ss_submodel], 'no_sum', 'non_peak')
# Sum across all regions for enduse
self.all_models_tot_fuel_y_enduse_specific_h = self.sum_enduse_all_regions('fuel_yh', [self.rs_submodel, self.ss_submodel, self.is_submodel, self.ts_submodel])
self.rs_tot_fuel_y_enduse_specific_h = self.sum_enduse_all_regions('fuel_yh', [self.rs_submodel])
self.ss_tot_fuel_enduse_specific_h = self.sum_enduse_all_regions('fuel_yh', [self.ss_submodel])
# Sum across all regions, enduses for peak hour
# NEW
self.peak_all_models_all_enduses_fueltype = self.sum_reg('fuel_peak_dh', data['nr_of_fueltypes'], [self.rs_submodel, self.ss_submodel, self.is_submodel, self.ts_submodel], 'no_sum', 'peak_dh')
self.rs_tot_fuel_y_max_allenduse_fueltyp = self.sum_reg('fuel_peak_h', data['nr_of_fueltypes'], [self.rs_submodel], 'no_sum', 'peak_h')
self.ss_tot_fuel_y_max_allenduse_fueltyp = self.sum_reg('fuel_peak_h', data['nr_of_fueltypes'], [self.ss_submodel], 'no_sum', 'peak_h')
# Functions for load calculations
# ---------------------------
self.rs_fuels_peak_h = self.sum_reg('fuel_peak_h', data['nr_of_fueltypes'], [self.rs_submodel], 'no_sum', 'peak_h')
self.ss_fuels_peak_h = self.sum_reg('fuel_peak_h', data['nr_of_fueltypes'], [self.ss_submodel], 'no_sum', 'peak_h')
# Across all enduses calc_load_factor_h
self.rs_reg_load_factor_h = load_factors.calc_load_factor_h(data, self.rs_tot_fuels_all_enduses_y, self.rs_fuels_peak_h)
self.ss_reg_load_factor_h = load_factors.calc_load_factor_h(data, self.ss_tot_fuels_all_enduses_y, self.ss_fuels_peak_h)
# SUMMARISE FOR EVERY REGION AND ENDSE
#self.tot_country_fuel_y_load_max_h = self.peak_loads_per_fueltype(data, self.regions, 'rs_reg_load_factor_h')
@classmethod
def create_load_profile_stock(cls, data):
"""Assign load profiles which are the same for all regions
``non_regional_load_profiles``
Parameters
----------
data : dict
Data container
Returns
-------
non_regional_profile_stock : object
Load profile stock with non regional dependent load profiles
"""
non_regional_profile_stock = load_profile.LoadProfileStock("non_regional_load_profiles")
# Lighting (residential)
non_regional_profile_stock.add_load_profile(
unique_identifier=uuid.uuid4(),
technologies=data['assumptions']['technology_list']['rs_lighting'],
enduses=['rs_lighting'],
shape_yd=data['rs_shapes_yd']['rs_lighting']['shape_non_peak_yd'],
shape_yh=data['rs_shapes_dh']['rs_lighting']['shape_non_peak_y_dh'] * data['rs_shapes_yd']['rs_lighting']['shape_non_peak_yd'][:, np.newaxis],
enduse_peak_yd_factor=data['rs_shapes_yd']['rs_lighting']['shape_peak_yd_factor'],
shape_peak_dh=data['rs_shapes_dh']['rs_lighting']['shape_peak_dh']
)
# rs_cold (residential refrigeration)
non_regional_profile_stock.add_load_profile(
unique_identifier=uuid.uuid4(),
technologies=data['assumptions']['technology_list']['rs_cold'],
enduses=['rs_cold'],
shape_yd=data['rs_shapes_yd']['rs_cold']['shape_non_peak_yd'],
shape_yh=data['rs_shapes_dh']['rs_cold']['shape_non_peak_y_dh'] * data['rs_shapes_yd']['rs_cold']['shape_non_peak_yd'][:, np.newaxis],
enduse_peak_yd_factor=data['rs_shapes_yd']['rs_cold']['shape_peak_yd_factor'],
shape_peak_dh=data['rs_shapes_dh']['rs_cold']['shape_peak_dh']
)
# rs_cooking
non_regional_profile_stock.add_load_profile(
unique_identifier=uuid.uuid4(),
technologies=data['assumptions']['technology_list']['rs_cooking'],
enduses=['rs_cooking'],
shape_yd=data['rs_shapes_yd']['rs_cooking']['shape_non_peak_yd'],
shape_yh=data['rs_shapes_dh']['rs_cooking']['shape_non_peak_y_dh'] * data['rs_shapes_yd']['rs_cooking']['shape_non_peak_yd'][:, np.newaxis],
enduse_peak_yd_factor=data['rs_shapes_yd']['rs_cooking']['shape_peak_yd_factor'],
shape_peak_dh=data['rs_shapes_dh']['rs_cooking']['shape_peak_dh']
)
# rs_wet
non_regional_profile_stock.add_load_profile(
unique_identifier=uuid.uuid4(),
technologies=data['assumptions']['technology_list']['rs_wet'],
enduses=['rs_wet'],
shape_yd=data['rs_shapes_yd']['rs_wet']['shape_non_peak_yd'],
shape_yh=data['rs_shapes_dh']['rs_wet']['shape_non_peak_y_dh'] * data['rs_shapes_yd']['rs_wet']['shape_non_peak_yd'][:, np.newaxis],
enduse_peak_yd_factor=data['rs_shapes_yd']['rs_wet']['shape_peak_yd_factor'],
shape_peak_dh=data['rs_shapes_dh']['rs_wet']['shape_peak_dh']
)
# -- dummy rs technologies (apply enduse sepcific shape)
for enduse in data['assumptions']['rs_dummy_enduses']:
tech_list = helpers.get_nested_dict_key(data['assumptions']['rs_fuel_tech_p_by'][enduse])
non_regional_profile_stock.add_load_profile(
unique_identifier=uuid.uuid4(),
technologies=tech_list,
enduses=[enduse],
shape_yd=data['rs_shapes_yd'][enduse]['shape_non_peak_yd'],
shape_yh=data['rs_shapes_dh'][enduse]['shape_non_peak_y_dh'] * data['rs_shapes_yd'][enduse]['shape_non_peak_yd'][:, np.newaxis],
enduse_peak_yd_factor=data['rs_shapes_yd'][enduse]['shape_peak_yd_factor'],
shape_peak_dh=data['rs_shapes_dh'][enduse]['shape_peak_dh']
)
# - dummy ss technologies
for enduse in data['assumptions']['ss_dummy_enduses']:
tech_list = helpers.get_nested_dict_key(data['assumptions']['ss_fuel_tech_p_by'][enduse])
for sector in data['ss_sectors']:
non_regional_profile_stock.add_load_profile(
unique_identifier=uuid.uuid4(),
technologies=tech_list,
enduses=[enduse],
sectors=[sector],
shape_yd=data['ss_shapes_yd'][sector][enduse]['shape_non_peak_yd'],
shape_yh=data['ss_shapes_dh'][sector][enduse]['shape_non_peak_y_dh'] * data['ss_shapes_yd'][sector][enduse]['shape_non_peak_yd'][:, np.newaxis],
enduse_peak_yd_factor=data['ss_shapes_yd'][sector][enduse]['shape_peak_yd_factor'],
shape_peak_dh=data['ss_shapes_dh'][sector][enduse]['shape_peak_dh']
)
# dummy is - Flat load profile
shape_peak_dh, _, shape_peak_yd_factor, shape_non_peak_yd, shape_non_peak_yh = generic_shapes.generic_flat_shape()
for enduse in data['assumptions']['is_dummy_enduses']:
tech_list = helpers.get_nested_dict_key(data['assumptions']['is_fuel_tech_p_by'][enduse])
for sector in data['is_sectors']:
non_regional_profile_stock.add_load_profile(
unique_identifier=uuid.uuid4(),
technologies=tech_list,
enduses=[enduse],
sectors=[sector],
shape_yd=shape_non_peak_yd,
shape_yh=shape_non_peak_yh,
enduse_peak_yd_factor=shape_peak_yd_factor,
shape_peak_dh=shape_peak_dh
)
return non_regional_profile_stock
def get_regional_yh(self, nr_of_fueltypes, region_name):
"""Get yh fuel for all fueltype for a specific region of all submodels
Parameters
----------
region_name : str
Name of region to get attributes
nr_of_fueltypes : int
Number of fueltypes
Return
------
region_fuel_yh : array
Summed fuel of a region
Note
----
- Summing function
"""
region_fuel_yh = self.sum_reg(
'fuel_yh',
nr_of_fueltypes,
[self.ss_submodel, self.rs_submodel, self.is_submodel, self.ts_submodel],
'no_sum',
'non_peak',
region_name,
)
return region_fuel_yh
def get_fuel_region_all_models_yh(self, nr_of_fueltypes, region_name_to_get, sector_models, attribute_to_get):
"""Summarise fuel yh for a certain region
Parameters
----------
nr_of_fueltypes : int
Number of fueltypes
region_name_to_get : str
Name of region to read out
sector_models : list
Objectos of submodel runs
attribute_to_get : str
Attribute to get
Note
----
- Summing function
"""
tot_fuels_all_enduse_yh = np.zeros((nr_of_fueltypes, 365, 24))
for sector_model in sector_models:
sector_model_objects = getattr(self, sector_model)
for model_object in sector_model_objects:
if model_object.region_name == region_name_to_get:
tot_fuels_all_enduse_yh += self.get_fuels_yh(model_object, attribute_to_get)
return tot_fuels_all_enduse_yh
def other_submodels(self):
"""Other submodel
Return
------
submodules : list
Submodule objects
Note
----
- The ``regions`` and ``weather_regions`` gets deleted to save memory
"""
#print("..other submodel start")
#_scrap_cnt = 0
submodules = []
# Iterate regions, sectors and enduses
for region_object in self.regions:
# Create submodule
submodule = ts_model.OtherModel(
region_object,
'generic_transport_enduse'
)
# Add to list
submodules.append(submodule)
#_scrap_cnt += 1
#print(" ...running other submodel {} of total: {}".format(_scrap_cnt, len(self.regions)))
del self.regions, self.weather_regions
print("...finished other submodel")
return submodules
def industry_submodel(self, data, enduses, sectors):
"""Industry subsector model
Parameters
----------
data : dict
Data containter
enduses : list
Enduses of industry submodel
sectors : list
Sectors of industry submodel
Return
------
submodules : list
Submodule objects
Note
----
- The ``regions`` and ``weather_regions`` gets deleted to save memory
"""
print("..industry submodel start")
#_scrap_cnt = 0
submodules = []
# Iterate regions, sectors and enduses
for region_object in self.regions:
for sector in sectors:
for enduse in enduses:
# Create submodule
submodule = is_model.IndustryModel(
data,
region_object,
enduse,
sector=sector
)
# Add to list
submodules.append(submodule)
#_scrap_cnt += 1
#print(" ...running industry model {} in % {} ".format(data['sim_param']['curr_yr'], 100 / (len(self.regions) * len(sectors) * len(enduses)) *_scrap_cnt))
del self.regions, self.weather_regions
return submodules
def residential_submodel(self, data, enduses, sectors=['dummy_sector']):
"""Create the residential submodules (per enduse and region) and add them to list
Parameters
----------
data : dict
Data container
enduses : list
All residential enduses
sectors : dict, default=['dummy_sector']
Sectors
Returns
-------
submodule_list : list
List with submodules
Note
----
- The ``regions`` and ``weather_regions`` gets deleted to save memory
"""
print("..residential submodel start")
#_scrap_cnt = 0
submodule_list = []
# Iterate regions and enduses
for region_object in self.regions:
for sector in sectors:
for enduse in enduses:
# Create submodule
submodel_object = rs_model.ResidentialModel(
data,
region_object,
enduse,
sector
)
submodule_list.append(submodel_object)
#_scrap_cnt += 1
#print(" ...running residential model {} {} of total".format(data['sim_param']['curr_yr'], 100.0 / (len(self.regions) * len(sectors) * len(enduses)) * _scrap_cnt))
# To save on memory
del self.regions, self.weather_regions
return submodule_list
def service_submodel(self, data, enduses, sectors):
"""Create the service submodules per enduse, sector and region and add to list
Parameters
----------
data : dict
Data container
enduses : list
All residential enduses
sectors : list
Service sectors
Returns
-------
submodule_list : list
List with submodules
Note
----
- The ``regions`` and ``weather_regions`` gets deleted to save memory
"""
print("..service submodel start")
_scrap_cnt = 0
submodule_list = []
# Iterate regions, sectors and enduses
for region_object in self.regions:
for sector in sectors:
for enduse in enduses:
# Create submodule
submodule = ss_model.ServiceModel(
data,
region_object,
enduse,
sector
)
# Add to list
submodule_list.append(submodule)
_scrap_cnt += 1
print(" ...running service model {} {}".format(data['sim_param']['curr_yr'], 100.0 / (len(self.regions) * len(sectors) * len(enduses)) * _scrap_cnt))
# To save on memory
del self.regions, self.weather_regions
return submodule_list
@classmethod
def create_weather_regions(cls, weather_regions, data, model_type):
"""Create all weather regions and calculate
TODO:
-stocks
-load profiles
Parameters
----------
weather_region : list
The name of the Weather Region
"""
weather_region_objects = []
for weather_region_name in weather_regions:
region_object = WeatherRegion.WeatherRegion(
weather_region_name=weather_region_name,
data=data,
modeltype=model_type
)
weather_region_objects.append(region_object)
return weather_region_objects
def create_regions(self, region_names, data, submodel_type):
"""Create all regions and add them in a list
Parameters
----------
region_names : list
Regions
data : dict
Data container
submodel_type : str
Type of submodel [rs_submodel, ss_submodel, ...]
"""
regions = []
# Iterate all regions
for region_name in region_names:
print("...creating region: '{}' {}".format(region_name, submodel_type))
# Generate region object
region_object = region.Region(
region_name=region_name,
data=data,
submodel_type=submodel_type,
weather_regions=self.weather_regions
)
# Add region to list
regions.append(region_object)
return regions
def sum_enduse_all_regions(self, attribute_to_get, sector_models):
"""Summarise an enduse attribute across all regions
Parameters
----------
attribute_to_get : string
Enduse attribute to summarise
sector_models : List
List with sector models
Return
------
enduse_dict : dict
Summarise enduses across all regions
"""
enduse_dict = {}
for sector_model in sector_models:
for model_object in sector_model:
if model_object.enduse not in enduse_dict:
enduse_dict[model_object.enduse] = 0
# Add fuel with flat load shape
enduse_dict[model_object.enduse] += self.get_fuels_yh(
model_object, attribute_to_get)
return enduse_dict
def sum_reg(self, attribute_to_get, nr_of_fueltypes, sector_models, crit, crit2, region_name=False):
"""Collect hourly data from all regions and sum across all fuel types and enduses
Parameters
----------
attribute_to_get : str
Attribue to summarise
nr_of_fueltypes : int
Number of fueltypes
sector_models : list
Sector models to summarise
crit : str
Criteria
crit2 : str
Criteria
region_name : str
Name of region
Returns
-------
fuels : array
Summarised fuels
"""
if crit2 == 'peak_h':
fuels = np.zeros((nr_of_fueltypes)) #np.zeros((nr_of_fueltypes, ))
elif crit2 == 'non_peak':
fuels = np.zeros((nr_of_fueltypes, 365, 24))
elif crit2 == 'peak_dh':
fuels = np.zeros((nr_of_fueltypes, 24))
# Iterate all submodel
for sector_model in sector_models:
for model_object in sector_model:
# Select specific region
if region_name:
if model_object.region_name == region_name:
fuels += self.get_fuels_yh(model_object, attribute_to_get)
else:
fuels += self.get_fuels_yh(model_object, attribute_to_get)
# Criteria if fuel is summed or not
if crit == 'no_sum':
fuels = fuels
elif crit == 'sum':
fuels = np.sum(fuels)
return fuels
def get_fuels_yh(self, model_object, attribute_to_get):
"""Assign yh shape for enduses with flat load profiles
Parameters
----------
model_object : dict
Object of submodel run
attribute_to_get : str
Attribute to read out
Returns
-------
fuels : array
Fuels with flat load profile
Note
-----
- For enduses where 'crit_flat_profile' in Enduse Class is True
a flat load profile is generated. Otherwise, the yh as calculated
for each enduse is used
"""
if model_object.enduse_object.crit_flat_profile:
# Yearly fuel
fuels_reg_y = model_object.enduse_object.fuel_y
if attribute_to_get == 'fuel_peak_dh':
# Flat shape
shape_peak_dh = np.full((24), 1/24)
# Because flat shape, the dh_peak is 24/8760
fuels_reg_peak = fuels_reg_y * (1/365)
fuels = fuels_reg_peak[:, np.newaxis] * shape_peak_dh
elif attribute_to_get == 'shape_non_peak_y_dh':
# Flat shape
shape_non_peak_y_dh = np.full((365, 24), (1.0/24))
fuels = fuels_reg_y * shape_non_peak_y_dh
elif attribute_to_get == 'shape_non_peak_yd':
# Flat shape
shape_non_peak_yd = np.ones((365)) / 365
fuels = fuels_reg_y * shape_non_peak_yd
elif attribute_to_get == 'fuel_yh':
# Flat shape
shape_non_peak_yh = np.full((365, 24), 1/8760)
fast_shape_non_peak_yh = np.zeros((model_object.enduse_object.fuel_new_y.shape[0], 365, 24))
for fueltype, _ in enumerate(fast_shape_non_peak_yh):
fast_shape_non_peak_yh[fueltype] = shape_non_peak_yh
fuels = fuels_reg_y[:, np.newaxis, np.newaxis] * fast_shape_non_peak_yh
else:
# If not flat shape, use yh load profile of enduse
fuels = getattr(model_object.enduse_object, attribute_to_get)
return fuels
|
import numpy as np
class FeudalAgent:
NUM_TOP_ACTIONS = 5
NUM_BOTTOM_ACTIONS = 4
def __init__(self, size, agents_per_level, make_policy, alpha, gamma):
self.size = size
self.hierarchy = []
self.cells_per_agent = []
for level_idx, num_agents in enumerate(reversed(agents_per_level)):
if level_idx == 0:
num_cells = size ** 2
num_actions = self.NUM_BOTTOM_ACTIONS
else:
num_cells = len(self.hierarchy[-1])
num_actions = self.NUM_TOP_ACTIONS
if level_idx == len(agents_per_level) - 1:
num_master_actions = 1
else:
num_master_actions = self.NUM_TOP_ACTIONS
assert num_cells % num_agents == 0
cells_per_agent = num_cells // num_agents
self.hierarchy.append([
CellAgent(cells_per_agent, num_actions, num_master_actions, make_policy(num_actions), alpha, gamma)
for _ in range(num_agents)
])
self.cells_per_agent.append(cells_per_agent)
self.current_level = len(self.hierarchy) - 1
def act(self, x, y, master_action=None):
print("act level:", self.current_level)
if self.current_level == 0:
cell = self.get_cell(self.current_level, x, y)
if master_action is not None:
cell.master_action = master_action
action = cell.act(self.multiplex_state_for_cell(self.current_level, x, y))
return action
elif master_action is not None:
cell = self.get_cell(self.current_level, x, y)
if master_action is not None:
cell.master_action = master_action
action = cell.act(self.multiplex_state_for_cell(self.current_level, x, y))
self.current_level -= 1
return self.act(x, y, master_action=action)
else:
cell = self.get_cell(self.current_level, x, y)
cell.master_action = 0
action = cell.act(self.multiplex_state_for_cell(self.current_level, x, y))
self.current_level -= 1
return self.act(x, y, master_action=action)
def backup(self, current_x, current_y, next_x, next_y, action, reward):
print("backup level:", self.current_level)
cell = self.get_cell(self.current_level, current_x, current_y)
master_action = cell.master_action
current_cell_x, current_cell_y = self.get_cell_x_y(self.current_level + 1, current_x, current_y)
next_cell_x, next_cell_y = self.get_cell_x_y(self.current_level + 1, next_x, next_y)
if current_cell_x != next_cell_x or current_cell_y != next_cell_y:
#if master_action == 4:
# meta_reward = int(reward > 0)
self.current_level = self.current_level + 1
self.backup(current_x, current_y, next_x, next_y, action, reward)
def get_cell_x_y(self, level, x, y):
length = int(np.sqrt(len(self.hierarchy[level])))
length_per_cell = self.size // length
level_x = x // length_per_cell
level_y = y // length_per_cell
return level_x, level_y
def get_cell(self, level, x, y):
length = int(np.sqrt(len(self.hierarchy[level])))
length_per_cell = self.size // length
level_x = x // length_per_cell
level_y = y // length_per_cell
idx = level_x * length + level_y
return self.hierarchy[level][idx]
def multiplex_state_for_cell(self, level, x, y):
length = int(np.sqrt(len(self.hierarchy[level])))
if level == 0:
length_below = self.size
else:
length_below = int(np.sqrt(len(self.hierarchy[level - 1])))
length_per_cell = length_below // length
level_x = x // length_per_cell
level_y = y // length_per_cell
cell_x = x - level_x * length_per_cell
cell_y = y - level_y * length_per_cell
return cell_x * length_per_cell + cell_y
class CellAgent:
def __init__(self, num_states, num_actions, num_master_actions, policy, alpha, gamma):
self.qs = np.zeros((num_actions, num_master_actions, num_states))
self.policy = policy
self.alpha = alpha
self.gamma = gamma
self.master_action = None
def act(self, state):
qs = self.qs[:, self.master_action, state]
return self.policy.act(qs)
def learn(self, state, action, reward, next_state, done):
if done:
self.qs[action, self.master_action, state] += self.alpha * (
reward - self.qs[action, self.master_action, state]
)
else:
next_qs = self.qs[:, self.master_action, next_state]
target = reward + self.gamma * np.max(next_qs)
self.qs[action, self.master_action, state] += self.alpha * (
target - self.qs[action, self.master_action, state]
)
def order(self, master_action):
self.master_action = master_action
|
<reponame>jjwatts/gigantum-client<gh_stars>0
# Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import (Any, List, Dict, Optional)
import requests
import json
from gtmcore.container.container import ContainerOperations
from gtmcore.labbook import LabBook
from natsort import natsorted
from distutils.version import StrictVersion
from distutils.version import LooseVersion
from gtmcore.environment.packagemanager import PackageManager, PackageResult
class PipPackageManager(PackageManager):
"""Class to implement the pip package manager
"""
def search(self, search_str: str, labbook: LabBook, username: str) -> List[str]:
"""Method to search a package manager for packages based on a string. The string can be a partial string.
Args:
search_str: The string to search on
labbook: Subject LabBook
username: username of current user
Returns:
list(str): The list of package names that match the search string
"""
search_result = ContainerOperations.run_command(
f'pip search {search_str}', labbook, username,
fallback_image=self.fallback_image(labbook))
lines = search_result.decode().splitlines()
packages = [x.split(' ')[0] for x in lines]
return sorted(packages)
def list_versions(self, package_name: str, labbook: LabBook, username: str) -> List[str]:
"""Method to list all available versions of a package based on the package name
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: username of current user
Returns:
list(str): Version strings
"""
url = f"https://pypi.python.org/pypi/{package_name}/json"
result = requests.get(url)
if result.status_code == 404:
# Didn't find the package
raise ValueError("Package not found in package index")
if result.status_code != 200:
raise IOError("Failed to query package index for package versions. Check internet connection.")
versions = list(result.json()["releases"].keys())
# Don't include release candidates that have been pushed to pip
versions = [x for x in versions if 'rc' not in x]
try:
# First attempt to sort by StrictVersion which enforces a standard version convention
versions.sort(key=StrictVersion)
except ValueError as e:
if 'invalid version number' in str(e):
try:
# If this failed, try LooseVersion, which is much more flexible, but can fail sometimes
versions.sort(key=LooseVersion)
except Exception:
# Finally, try natural sorting the version strings if you still have a problem
versions = natsorted(versions, key=lambda x: x.replace('.', '~') + 'z')
else:
raise e
versions.reverse()
return versions
def latest_version(self, package_name: str, labbook: LabBook, username: str) -> str:
"""Method to get the latest version string for a package
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: username of current user
Returns:
str: latest version string
"""
versions = self.list_versions(package_name, labbook, username)
if versions:
return versions[0]
else:
raise ValueError("Could not retrieve version list for provided package name")
def latest_versions(self, package_names: List[str], labbook: LabBook, username: str) -> List[str]:
"""Method to get the latest version string for a list of packages
Args:
package_names: list of names of the packages to query
labbook: Subject LabBook
username: username of current user
Returns:
list: latest version strings
"""
return [self.latest_version(pkg, labbook, username) for pkg in package_names]
def list_installed_packages(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all packages that are currently installed
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format (name: <package name>, version: <version string>)
Returns:
list
"""
packages = ContainerOperations.run_command('pip list --format=json', labbook, username,
fallback_image=self.fallback_image(labbook))
return json.loads(packages.decode())
def list_available_updates(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all installed packages that could be updated and the new version string
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format
{name: <package name>, version: <currently installed version string>, latest_version: <latest version string>}
Returns:
list
"""
packages = ContainerOperations.run_command('pip list --format=json -o', labbook, username,
fallback_image=self.fallback_image(labbook))
return json.loads(packages.decode())
def validate_packages(self, package_list: List[Dict[str, str]], labbook: LabBook, username: str) \
-> List[PackageResult]:
"""Method to validate a list of packages, and if needed fill in any missing versions
Should check both the provided package name and version. If the version is omitted, it should be generated
from the latest version.
Args:
package_list(list): A list of dictionaries of packages to validate
labbook(str): The labbook instance
username(str): The username for the logged in user
Returns:
namedtuple: namedtuple indicating if the package and version are valid
"""
result = list()
for package in package_list:
pkg_result = PackageResult(package=package['package'], version=package['version'], error=True)
try:
version_list = self.list_versions(package['package'], labbook, username)
except ValueError:
result.append(pkg_result)
continue
if not version_list:
# If here, no versions found for the package...so invalid
result.append(pkg_result)
else:
if package['version']:
if package['version'] in version_list:
# Both package name and version are valid
pkg_result = pkg_result._replace(error=False)
result.append(pkg_result)
else:
# The package version is not in the list, so invalid
result.append(pkg_result)
else:
# You need to look up the version and then add
try:
pkg_result = pkg_result._replace(version=self.latest_version(package['package'],
labbook,
username))
pkg_result = pkg_result._replace(error=False)
except ValueError:
# Can't set the version so just continue
pass
finally:
result.append(pkg_result)
return result
def generate_docker_install_snippet(self, packages: List[Dict[str, str]], single_line: bool = False) -> List[str]:
"""Method to generate a docker snippet to install 1 or more packages
Args:
packages(list(dict)): A list of package names and versions to install
single_line(bool): If true, collapse
Returns:
list
"""
package_strings = [f"{x['name']}=={x['version']}" for x in packages]
if single_line:
return [f"RUN pip install {' '.join(package_strings)}"]
else:
docker_strings = [f"RUN pip install {x}" for x in package_strings]
return docker_strings
|
<reponame>Xilinx/roast-xilinx<filename>roast/component/bif/generate.py<gh_stars>1-10
#
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
import os
import re
import logging
from collections import namedtuple
from roast.xexpect import Xexpect
from roast.component.basebuild import Basebuild
from roast.utils import is_file, copy_file, remove
from roast.providers.bif import BifProvider
log = logging.getLogger(__name__)
# Data containers for bif generation
Header = namedtuple("Header", ["header", "name", "args"])
Header.__new__.__defaults__ = ("image", None, None)
Component = namedtuple("Component", ["name", "params"])
Block = namedtuple("Block", ["header", "components"])
def bifstring(bifstr: str, value: str, config) -> str:
# Get the design name
if "designs" in config:
design = config["designs"].split("/")[-2]
if re.search(r"ext=", value):
bifstr += config["designs"] + "outputs/" + f"{design}.{value.split('=')[1]}"
elif re.search(r"rel=", value):
bifstr += config["imagesDir"] + f"{value.split('=')[1]}"
elif re.search(r"path=", value):
config["tmpvalue"] = value.split("=")[1]
bifstr += config["tmpvalue"]
else:
config["tmpvalue"] = value
bifstr += config["tmpvalue"]
return bifstr
def create_partition(id, type_data, extra_args, image_path) -> str:
str_component = (
f" partition \n"
f" {{\n"
f" id = {hex(id)}\n"
f" {type_data} {extra_args}\n"
f" file = {image_path}\n"
f" }}\n"
)
return str_component
def generate_bif(config, format_type: str) -> None:
b = BifProvider(seed=config.get("seed"), randomize=config["randomize"])
# reconstruct bif data structure
bif = tuple()
headers, l_components = zip(*config.bif)
for header, components in zip(headers, l_components):
# config library returns list so cast back to namedtuple
header = Header(*header)
components = [Component(*component) for component in components]
bif = bif + (Block(header, components),)
# randomize
bif = b.shuffle_sections(bif, config.get("block_constraints", {}))
bif_path = os.path.join(config["workDir"], config["bifFile"])
with open(bif_path, "w") as biffile:
if format_type == "new":
defdata = {
"plm": ["type = bootloader", "path"],
"bootimg": ["type = bootimage", "path"],
"pmccdo": ["type = pmcdata,load=0xF2000000", "path"],
"cdo": ["type = cdo", "path"],
"sys_dtb": ["load=0x1000", "path"],
"linux_image": ["load=0x2000000", "path"],
"uboot": ["", "path"],
"puf": ["puf_file", "path"],
"aie-elf": ["core = aie", "path"],
"psm": ["core = psm", "psmElf"],
"a72": ["core = a72-0", "path"],
"r5": ["core = r5-0", "path"],
"a72_1": ["core = a72-1", "path"],
"r5_1": ["core = r5-1", "path"],
"r5_lock": ["core = r5-lockstep", "path"],
"sys_dtb_osl": ["offset=0x300000", "load=0x1000", "path"],
"linux_image_osl": ["offset=0x380000", "load=0x80000", "path"],
"rootfs_osl": ["offset=0x2080000", "load=0x30000000", "path"],
}
subsys_data = {
"pmc_subsys": "0x1c000001",
"lpd_subsys": "0x4210002",
"pl_cfi_subsys": "0x18700000",
"aie_subsys": "0x421c005",
"fpd_subsys": "0x420c003",
"subsystem": "0x1c000000",
"apu_subsys": "0x1c000003",
"rpulock_subsys": "0x1c000004",
"rpu0_subsys": "0x1c000005",
"rpu1_subsys": "0x1c000006",
}
biffile.write("new_bif:\n{\n")
id_code = "0x04ca8093"
extended_id_code = "0x01"
if "id_code" in config:
id_code = config["id_code"]
if "extended_id_code" in config:
extended_id_code = config["extended_id_code"]
if "bootheader" in config:
if "{{" in config["bootheader"]:
biffile.write(config["bootheader"].format())
else:
biffile.write(config["bootheader"])
biffile.write(
f" id_code = {id_code}\n extended_id_code = {extended_id_code} \n id = 0x1\n"
)
id = 1
# write partitions
for block in bif:
header = block.header
components = block.components
if header.header == "image":
subsystem_id = subsys_data[header.name]
biffile.write(
f" {header.header}\n {{\n name = {header.name}\n id = {subsystem_id}\n"
)
if header.args:
biffile.write(f" {header.args}\n")
for component in components:
id = id + 1
extra_args = bifstring(
"", "".join(component.params[:-1]), config
)
image_path = bifstring("", component.params[-1], config)
str_component = create_partition(
id, defdata[component.name][0], extra_args, image_path
)
biffile.write(str_component)
elif header.header == "metaheader":
for component in components:
extra_args = bifstring(
"", "".join(component.params[:-1]), config
)
str_component = (
f"\n metaheader \n" f" {{\n" f" {extra_args}\n"
)
biffile.write(str_component)
biffile.write(" }\n")
biffile.write("}\n")
elif format_type == "old":
defdata = {
"plm": ["bootloader", "path"],
"pmccdo": ["pmcdata,load=0xF2000000", "path"],
"cdo": ["partition_type=cdo", "path"],
"aie-elf": ["destination_cpu=aie", "path"],
"psm": ["destination_cpu=psm", "psmElf"],
"a72": ["destination_cpu=a72-0", "path"],
"a72_1": ["destination_cpu=a72-1", "path"],
"r5": ["destination_cpu=r5-0", "path"],
"r5_1": ["destination_cpu=r5-1", "path"],
"r5_lock": ["destination_cpu=r5-lockstep", "path"],
"a53": ["destination_cpu=a53-0", "path"],
"a53_1": ["destination_cpu=a53-1", "path"],
"a53_2": ["destination_cpu=a53-2", "path"],
"a53_3": ["destination_cpu=a53-3", "path"],
"fsbl_a53": ["bootloader", "destination_cpu=a53-0", "path"],
"fsbl_r5": ["bootloader", "destination_cpu=r5-0", "path"],
"fsbl_a9": ["bootloader", "path"],
"pmu": ["destination_cpu=pmu", "path"],
"atf": [
"destination_cpu=a53-0",
"exception_level=el-3",
"trustzone",
"path",
],
"dtb_uboot": ["destination_cpu=a53-0", "load=0x100000", "path"],
"uboot": ["destination_cpu=a53-0", "exception_level=el-2", "path"],
"zynq_uboot": ["", "path"],
"dtb_linux": [
"destination_device=ps",
"offset=0x200000",
"partition_owner=uboot",
"path",
],
"linux_image": [
"destination_device=ps",
"offset=0x280000",
"partition_owner=uboot",
"path",
],
"bitstream": ["destination_device=pl", "path"],
"pmufw_image": ["pmufw_image", "path"],
}
if config["boot_header"]:
biffile.write(f"{config['boot_header']}" ":\n{\n")
else:
biffile.write("all:\n{\n")
for image in config["bif"]:
comp = image[0]
value = image[1]
bifstr = "[{}] ".format(defdata[comp][0])
bifstr = bifstring(bifstr, value, config)
bifstr = "\t{}\n".format(bifstr)
biffile.write(bifstr)
biffile.write("}\n")
if is_file(bif_path):
copy_file(
bif_path,
os.path.join(config["imagesDir"], config["bifFile"]),
)
log.info("Bif Generated successfully")
else:
err_msg = "Bif generation failed"
log.error(err_msg)
raise Exception(err_msg)
def generate_pdi(config) -> None:
console = Xexpect(log)
remove(os.path.join(config["imagesDir"], config["pdiFile"]))
if is_file(config["bifFile"]):
copy_file(config["bifFile"], config["imagesDir"])
console.runcmd(f"cd {config['imagesDir']}")
bootgen_cmd = [
config["bootgenCmd"],
"-arch",
config["platform"].lower(),
config["bootgenExtraArgs"],
"-log",
"info",
"-image",
config["bifFile"],
"-o",
config["pdiFile"],
]
cmd = " ".join(bootgen_cmd)
console.runcmd(
cmd, expected="Bootimage generated successfully", wait_for_prompt=False
)
if is_file(os.path.join(config["imagesDir"], config["pdiFile"])):
log.info("PDI generated successfully")
else:
err_msg = f"{config['pdiFile']} creation failed"
log.error(err_msg)
raise Exception(err_msg)
def gen_boot_bin(config) -> None:
console = Xexpect(log)
remove(os.path.join(config["imagesDir"], config["binFile"]))
if config["bifFile"]:
copy_file(config["bifFile"], config["imagesDir"])
console.runcmd(f"cd {config['imagesDir']}")
bootgen_cmd = [
config["bootgenCmd"],
"-arch",
config["platform"].lower(),
config["bootgenExtraArgs"],
"-log",
"info",
"-image",
config["bifFile"],
"-o",
config["binFile"],
]
cmd = " ".join(bootgen_cmd)
if "BOOTGEN_ENV" in config:
for env in config["BOOTGEN_ENV"]:
console.runcmd(f"export {env}")
console.runcmd(
cmd, expected="Bootimage generated successfully", wait_for_prompt=False
)
if is_file(os.path.join(config["imagesDir"], config["binFile"])):
log.info("Bootable Image generate successful")
else:
err_msg = f"{config['binFile']} creation failed"
log.error(err_msg)
raise Exception(err_msg)
def pdi(config, format_type: str = "new") -> bool:
ret = False
try:
generate_bif(config, format_type=format_type)
generate_pdi(config)
ret = True
except Exception as err:
log.error(err)
return ret
def bin(config, format_type: str = "old") -> bool:
ret = False
try:
generate_bif(config, format_type=format_type)
gen_boot_bin(config)
ret = True
except Exception as err:
log.error(err)
return ret
|
<gh_stars>0
from tkinter import *
import random
from main import *
from mpClasses import *
def init(data):
data.scored = False
data.flatImage = PhotoImage(file="./images/ball03.png")
data.stripedImage = PhotoImage(file="./images/ball11.png")
data.setTurn = True
data.winner = None
data.player1 = Player("None")
data.player2 = Player("None")
data.mode = "mp"
data.hasScored = False
data.score = 0
data.turn = "Player 1"
data.gameOver = False
data.start = False
data.timer = 120
data.score = 0
data.startX = 0
data.startY = 0
data.dragX = 0
data.dragY = 0
data.isReleased = False
data.isDragging = False
data.slider = Slider()
data.powerBar = PowerBar()
data.cueBall = CueBall(300, data.height//2)
data.cue = Cue((200,100), math.pi)
data.balls = []
data.holes = []
data.boundaries = []
data.r = 15
data.margin = 100
data.tableWidth = 800
data.tableHeight = 400
data.wallWidth = 30
startTable(data)
def startTable(data):
wall = data.wallWidth
margin = data.margin
width = data.tableWidth
height = data.tableHeight
data.boundaries.append(Boundary(margin, margin + width, "x"))
data.boundaries.append(Boundary(margin, margin + height, "y"))
data.holes.append(Hole(margin, margin, 25)) # top left
data.holes.append(Hole(margin+width, margin, 25)) #top right
data.holes.append(Hole(margin, margin+height, 25)) #bottom left
data.holes.append(Hole(margin+width//2, margin, 25)) #top mid
data.holes.append(Hole(margin+width//2, margin+height, 25)) #bottom mid
data.holes.append(Hole(margin+width, margin+height, 25)) #bottom right
data.balls.append(FlatBall(600, data.height//2, 1))
data.balls.append(StripedBall(628, data.height//2-16, 9))
data.balls.append(FlatBall(628, data.height//2+16, 2))
data.balls.append(StripedBall(656, data.height//2-32, 10))
data.balls.append(EightBall(656, data.height//2, 8))
data.balls.append(FlatBall(656, data.height//2+32, 3))
data.balls.append(StripedBall(684, data.height//2-48, 11))
data.balls.append(FlatBall(684, data.height//2-16, 7))
data.balls.append(StripedBall(684, data.height//2+16, 14))
data.balls.append(FlatBall(684, data.height//2+48, 4))
data.balls.append(FlatBall(712, data.height//2-64, 5))
data.balls.append(StripedBall(712, data.height//2-32, 13))
data.balls.append(StripedBall(712, data.height//2, 15))
data.balls.append(FlatBall(712, data.height//2+32, 6))
data.balls.append(StripedBall(712, data.height//2+64, 12))
def drawTable(canvas, data):
wallWidth = data.wallWidth
margin = data.margin
width = data.tableWidth
height = data.tableHeight
color = "#3d362b"
canvas.create_rectangle(margin, margin, width+margin,
height+margin, fill="#1c1c1c")
canvas.create_rectangle(margin-wallWidth, margin-wallWidth, margin+width+wallWidth,
margin, fill=color)
canvas.create_rectangle(margin-wallWidth, margin+height,
margin+width+wallWidth, margin+height+wallWidth,fill=color)
canvas.create_rectangle(margin-wallWidth, margin-wallWidth, margin,
margin+height+wallWidth, fill=color)
canvas.create_rectangle(margin+width, margin-wallWidth, margin+width+wallWidth,
margin+height+wallWidth, fill=color)
for hole in data.holes:
r = hole.r
x0,y0 = hole.cx-r, hole.cy-r
x1,y1 = hole.cx+r, hole.cy+r
canvas.create_oval(x0,y0,x1,y1, fill = hole.color)
def checkScoredAll(data):
flatCount = 0
stripedCount = 0
for ball in data.balls:
if isinstance(ball, FlatBall):
flatCount += 1
elif isinstance(ball, StripedBall):
stripedCount += 1
if flatCount == 0:
if data.player1.ballType == "Flat":
data.player1.hasScoredAll = True
elif data.player2.ballType == "Flat":
data.player2.hasScoredAll = True
elif stripedCount == 0:
if data.player1.ballType == "Striped":
data.player1.hasScoredAll = True
elif data.player2.ballType == "Striped":
data.player2.hasScoredAll = True
def allBallsStopped(data):
speedSum = 0
for ball in data.balls:
speedSum += ball.speed
speedSum += data.cueBall.speed
if len(data.balls) == data.qBalls:
data.scored = False
return speedSum == 0
def switchPlayers(data):
if not data.setTurn and allBallsStopped(data):
if not data.scored:
if data.turn == "Player 1":
data.turn = "Player 2"
else:
data.turn = "Player 1"
data.setTurn = True
def mousePressed(event, data):
data.startX, data.startY = event.x, event.y
x0,y0 = event.x, event.y
x1, y1 = data.cueBall.cx, data.cueBall.cy
distance = math.sqrt((x1-x0)**2+(y1-y0)**2)
if data.cueBall.speed == 0 and distance <= data.r:
data.cue.place()
#mouseMotion,leftReleased and leftMoved taken from previous 15-112 classes notes
#http://www.kosbie.net/cmu/fall-15/15-112/notes/notes-tkinter-demos.html
def mouseMotion(event, data):
#deals with moving the cue around the table
data.cue.tip = (event.x, event.y)
if not data.cue.isPlaced:
data.cue.move(event.x, event.y)
def leftReleased(event, data):
#deals with hitting cue ball
data.isDragging = False
if data.slider.clickInSlider(event.x, event.y) and data.cue.isPlaced:
x = data.cueBall.cx+math.cos(math.pi+data.cue.angle)*(data.r+8)
y = data.cueBall.cy+math.sin(math.pi+data.cue.angle)*(data.r+8)
data.cue.move(x, y)
def leftMoved(event, data):
#deals with slider dragging
data.startX = event.x
data.startY = event.y
if not data.isDragging and data.slider.clickInSlider(event.x, event.y):
data.isDragging = True
if data.isDragging:
data.slider.drag(data)
def keyPressed(event, data):
cueBall = data.balls[0]
if event.keysym == "Left":
if not data.cue.isPlaced:
data.cue.changeAngle("Left")
elif event.keysym == "Right":
if not data.cue.isPlaced:
data.cue.changeAngle("Right")
elif event.char == " ":
data.cue.isPlaced = False
elif event.char == "r":
init(data)
def timerFired(data):
checkScoredAll(data)
# if data.turn == "Player 1":
# if data.player1.turns > 1:
# data.player1.turns -= 1
# if data.player1.turns == 0: data.setTurn = True
# else:
# if data.player2.turns > 1:
# data.player2.turns -= 1
# if data.player2.turns == 0: data.setTurn = True
# if data.setTurn:
# if data.turn == "Player 1":
# data.turn = "Player 2"
# data.setTurn = False
# elif data.turn == "Player 2":
# data.turn = "Player 1"
# data.setTurn = False
data.timerDelay = 20
data.cueBall.onTimerFired(data)
for ball in data.balls:
ball.onTimerFired(data)
for hole in data.holes:
hole.onTimerFired(data)
data.cue.onTimerFired(data)
if not data.isDragging:
data.slider.reset()
if data.timer <= 0:
if data.highscore < data.score:
file = open("Highscore.txt", "w")
file.write(str(data.score))
file.close()
switchPlayers(data)
def redrawAll(canvas, data):
if not data.gameOver:
canvas.create_rectangle(0,0,data.width,data.height, fill="#e5d5b5")
drawTable(canvas, data)
data.powerBar.draw(canvas, data)
# x0,y0 = data.cueBall.cx-data.r, data.cueBall.cy-data.r
# x1,y1 = data.cueBall.cx + data.r, data.cueBall.cy + data.r
# canvas.create_oval(x0,y0,x1,y1, fill = data.cueBall.color)
canvas.create_image(data.cueBall.cx, data.cueBall.cy,
image = data.cueBall.ballImage)
for ball in data.balls:
# x0,y0 = ball.cx-data.r, ball.cy-data.r
# x1,y1 = ball.cx + data.r, ball.cy + data.r
# canvas.create_oval(x0,y0,x1,y1, fill = ball.color)
canvas.create_image(ball.cx,ball.cy,image=ball.ballImage)
data.slider.drawSlider(canvas, data)
data.cue.draw(canvas, data)
if data.cueBall.speed == 0 and not data.cue.isPlaced:
canvas.create_text(data.width//2,data.margin//2,font=\
"Helvetica 16 bold",
text = "Change cue angle using left/right arrow keys, then rest the " +
"cue by clicking the cue ball")
elif data.cueBall.speed == 0 and data.cue.isPlaced:
canvas.create_text(data.width//2,data.margin//2, font = "Helvetica 16\
bold", text = "Drag the slider to hit the ball. " +
"The more you drag the slider, the more power you hit the ball with.\n"+
" Press spacebar to unrest the cue")
canvas.create_text(30,data.height-25, anchor = "sw", text="P1:",font = "Helvetica 16\
bold")
canvas.create_text(data.width-90,data.height-25, anchor = "se", text="P2:", font = "Helvetica 16\
bold")
canvas.create_text(data.width-10,10, anchor = "ne", font = "Helvetica 16\
bold", text="Turn: %s"%(data.turn))
if data.player1.ballType == "Flat":
canvas.create_image(70, data.height-30, image=data.flatImage)
canvas.create_image(data.width-70, data.height-30, image=data.stripedImage)
elif data.player2.ballType == "Flat":
canvas.create_image(data.width-70, data.height-30, image=data.flatImage)
canvas.create_image(70, data.height-30, image=data.stripedImage)
else:
canvas.create_text(data.width//2, data.height//2, font = "Helvetica 24\
bold", text = "Game Over! The winner is: %s"%(data.winner) +
" Press r to restart")
##########################################
# run function from class notes:
# http://www.cs.cmu.edu/~112n18/notes/notes-animations-part1.html
# and http://www.kosbie.net/cmu/fall-15/15-112/notes/notes-tkinter-demos.html
##########################################
def run(root, width=1000, height=600):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
canvas.create_rectangle(0, 0, data.width, data.height,
fill='white', width=0)
redrawAll(canvas, data)
canvas.update()
def mouseWrapper(mouseFn, event, canvas, data):
if data.mouseWrapperMutex: return
data.mouseWrapperMutex = True
mouseFn(event, data)
redrawAllWrapper(canvas, data)
data.mouseWrapperMutex = False
def mousePressedWrapper(event, canvas, data):
mousePressed(event, data)
redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def mouseMotionWrapper(event, canvas, data):
mouseMotion(event, data)
redrawAllWrapper(canvas, data)
def leftMouseReleasedWrapper(event, canvas, data):
leftMouseReleased(event, data)
redrawAllWrapper(canvas, data)
def leftMouseMovedWrapper(event, canvas, data):
leftMouseMoved(event, data)
redrawAllWrapper(canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.mouseWrapperMutex = False
data.width = width
data.height = height
data.timerDelay = 100 # milliseconds
# root2 = Tk()
init(data)
# create the root and the canvas
canvas = Canvas(root, width=data.width, height=data.height)
canvas.configure(bd=0, highlightthickness=0)
canvas.pack()
# set up events
root.bind("<Button-1>", lambda event:
mousePressedWrapper(event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas, data))
canvas.bind("<Motion>", lambda event:
mouseWrapper(mouseMotion, event, canvas, data))
canvas.bind("<B1-ButtonRelease>", lambda event:
mouseWrapper(leftReleased, event, canvas, data))
canvas.bind("<B1-Motion>", lambda event:
mouseWrapper(leftMoved, event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
# run() |
#!/usr/bin/env python3
import random
import click
from config import db, log
from model import PeriodicScript, PendingTweet, Config, ResponseScript
from script import compile_script, process_mention
from support import get_twitter_api
@click.command()
@click.option('--debug', '-d', is_flag=True, help='Debug mode. No tweet if set')
@click.option('--count', '-c', default=1, help='Number of tweets to generate')
def generate(debug, count):
return do_generate(debug, count)
def do_generate(debug, count, id=None, is_response=False):
tweets = []
for _ in range(count):
if is_response:
script = ResponseScript.query.filter_by(id=id).first()
else:
if not id:
ind = random.randrange(0, PeriodicScript.query.count())
script = PeriodicScript.query[ind]
else:
script = PeriodicScript.query.filter_by(id=id).first()
content = script.content.replace('%{상대}', '(상대ID)')
tweet = compile_script(content)
pending_tweet = PendingTweet(content=tweet, image_keyword=script.image_keyword)
if not debug:
db.session.add(pending_tweet)
msg = f'pending tweet: {tweet}'
log.info(msg)
print(msg)
tweets.append(tweet)
if not debug:
db.session.commit()
log.info('done')
print('done')
return tweets
@click.command()
@click.option('--debug', '-d', is_flag=True, help='Debug mode. No tweet if set')
def tweet(debug):
return do_tweet(debug)
def do_tweet(debug, tweet=None, script=None):
if script is not None:
msg = script
if not debug:
api = get_twitter_api()
api.update_status(status=tweet.content)
msg = f'tweeted: {tweet}'
log.info(msg)
print(msg)
return
if tweet is None:
tweet = PendingTweet.query.order_by(PendingTweet.added_at.asc()).first()
if not tweet:
msg = 'has no pending tweet. generate..'
print(msg)
log.warn(msg)
do_generate(debug, 1)
tweet = PendingTweet.query.order_by(PendingTweet.added_at.asc()).first()
if not debug:
api = get_twitter_api()
api.update_status(status=tweet.content)
db.session.delete(tweet)
db.session.commit()
msg = f'tweeted: {tweet}'
log.info(msg)
print(msg)
@click.command()
@click.option('--debug', '-d', is_flag=True, help='Debug mode. No tweet if set')
def mention(debug):
return do_mention(debug)
def do_mention(debug):
since_id = Config.query.filter_by(key='last_processed_mention_id').first()
log.error(f'since_id: {since_id.value}')
api = get_twitter_api()
mentioned_tweets = api.mentions_timeline(since_id=int(since_id.value), count=20)
tweets = []
for mention in mentioned_tweets:
if mention.user.screen_name == 'nixieko': # prevent self mention panic :<
continue
tweet = process_mention(mention)
reply_id = mention.id_str
if not debug:
api.update_status(status=tweet.content, in_reply_to_status_id=reply_id)
since_id.value = reply_id
db.session.add(since_id)
msg = f'mention: {mention.text}'
log.info(msg)
print(msg)
msg = f'response tweet: {tweet}'
log.info(msg)
print(msg)
tweets.append(tweet)
# log.error(f'mention: {mention}')
# log.error(f'mention.text: {mention.text}')
# log.error(f'dir(mention.user): {dir(mention.user)}')
# log.error(f'mention.text: {mention.text}')
# log.error(f'mention.id: {mention.id}')
# log.error(f'mention.user.name: {mention.user.name}')
# log.error(f'mention.user.screen_name: {mention.user.screen_name}')
# log.error(f'mention.user.id: {mention.user.id}')
# log.error(f'dir(mention): {dir(mention)}')
return tweets
@click.group()
def cli():
log.debug('cli() called')
cli.add_command(generate)
cli.add_command(tweet)
cli.add_command(mention)
if __name__ == '__main__':
cli()
|
<filename>indra/pysb_assembler.py
from pysb import Model, Monomer, Parameter
from pysb.core import SelfExporter
from bel import bel_api
from biopax import biopax_api
from trips import trips_api
SelfExporter.do_export = False
class BaseAgentSet(object):
"""A container for a set of BaseAgents. Wraps a dict of BaseAgent instances."""
def __init__(self):
self.agents = {}
def get_create_agent(self, name):
"""Return agent with given name, creating it if needed."""
try:
agent = self.agents[name]
except KeyError:
agent = BaseAgent(name)
self.agents[name] = agent
return agent
def iteritems(self):
return self.agents.iteritems()
def __getitem__(self, name):
return self.agents[name]
class BaseAgent(object):
def __init__(self, name):
self.name = name
self.sites = []
self.site_states = {}
# The list of site/state configurations that lead to this agent
# being active (where the agent is currently assumed to have only
# one type of activity)
self.activating_mods = []
def create_site(self, site, states=None):
"""Create a new site on an agent if it doesn't already exist"""
if site not in self.sites:
self.sites.append(site)
if states is not None:
self.site_states.setdefault(site, [])
try:
states = list(states)
except TypeError:
return
self.add_site_states(site, states)
def add_site_states(self, site, states):
"""Create new states on a agent site if the site doesn't exist"""
for state in states:
if state not in self.site_states[site]:
self.site_states[site].append(state)
def add_activating_modification(self, activity_pattern):
self.activating_mods.append(activity_pattern)
def add_default_initial_conditions(model):
# Iterate over all monomers
for m in model.monomers:
set_base_initial_condition(model, m, 100.0)
def set_base_initial_condition(model, monomer, value):
# Build up monomer pattern dict
sites_dict = {}
for site in monomer.sites:
if site in monomer.site_states:
sites_dict[site] = monomer.site_states[site][0]
else:
sites_dict[site] = None
mp = monomer(**sites_dict)
pname = monomer.name + '_0'
try:
p = model.parameters[pname]
p.value = value
except KeyError:
p = Parameter(pname, value)
model.add_component(p)
model.initial(mp, p)
class PysbAssembler(object):
def __init__(self):
self.statements = []
self.agent_set = None
def add_statements(self, stmts):
self.statements.extend(stmts)
def make_model(self, initial_conditions=True, policies=None):
model = Model()
# Keep track of which policies we're using
self.policies = policies
self.agent_set = BaseAgentSet()
# Collect information about the monomers/self.agent_set from the
# statements
for stmt in self.statements:
stmt.monomers(self.agent_set, policies=policies)
# Add the monomers to the model based on our BaseAgentSet
for agent_name, agent in self.agent_set.iteritems():
m = Monomer(agent_name, agent.sites, agent.site_states)
model.add_component(m)
# Iterate over the statements to generate rules
for stmt in self.statements:
stmt.assemble(model, self.agent_set, policies=policies)
# Add initial conditions
if initial_conditions:
add_default_initial_conditions(model)
return model
if __name__ == '__main__':
pa = PysbAssembler()
bp = bel_api.process_belrdf('data/RAS_neighborhood.rdf')
pa.add_statements(bp.statements)
# bp = bel_api.process_ndex_neighborhood("ARAF")
# pa.add_statements(bp.statements)
# tp = trips_api.process_text("BRAF phosphorylates MEK1 at Ser222")
# pa.add_statements(tp.statements)
model = pa.make_model()
|
<gh_stars>1-10
import queue
import select
import socket
import threading
import uuid
from messages import *
class Game:
def __init__(self):
self.game_started = False
self.player_amount = 0
def start_game(self):
self.game_started = True
logging.info("----------------------------")
logging.info("Let's play The Liar's Dice !")
logging.info("----------------------------")
# TODO get access to players
# Send them a new hand
class Client(threading.Thread):
def __init__(self, playerID, client_socket, client_queue, main_queue):
threading.Thread.__init__(self)
logging.debug("Sending connection ACK to client")
self.playerID = playerID
self.client_socket = client_socket
self.queue = client_queue
self.main_queue = main_queue
def run(self):
self.main_queue.put("Test from client")
while True:
data = self.client_socket.recv(2048)
if not data:
break
message = decode_message(data)
if message is not None:
self.handle_client_message(message)
else:
logging.info("Received the following data {}".format(message))
logging.info("This is not a recognized message...")
# Handle disconnection
logging.info("Client disconnected...")
self.client_socket.close()
def handle_client_message(self, msg):
"""
Handle the messages received from clients.
:param msg: The received message from client.
:type msg: Message
:return: void
"""
if msg.is_type('START'):
logging.info("Start message received from ... {clientID}".format(clientID = self.playerID))
msg.id = self.playerID
try:
self.main_queue.put(msg, block=False)
except queue.Full:
logging.error("Could not send the message to the queue...")
# elif: # TODO other type of message
else:
logging.info("message received: {}".format(msg.to_string()))
logging.info("This is an unhandled message.. For now !")
logging.info("What about handling it Jonathan ?!")
class Server:
def handle_disconnection(self, client_socket):
self.game.player_amount -= 1
def handle_client(self, client_socket, playerID):
self.game.player_amount += 1
def __init__(self, port):
self.port = port
self.game = Game()
self.main_queue = queue.Queue()
self.queues = dict()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_address = ('localhost', self.port)
self.sock.bind(server_address)
logging.info("Starting PyLiar server on {ip_serv}:{port_serv}".format(ip_serv=server_address[0],
port_serv=server_address[1]))
self.sock.listen(1)
exit_gracefully = False
self.client_sockets = [self.sock]
while not exit_gracefully:
# Handling sockets events.
readable, writable, errored = select.select(self.client_sockets, [], [], 1)
logging.debug("Selected : %d - %d - %d",len(readable), len(writable), len(errored))
if len(readable) > 0:
self.handle_sockets(readable)
else:
item = self.main_queue.get(block=False)
logging.debug("Item value {}".format(item))
self.sock.close()
def handle_sockets(self, sockets):
for s in sockets:
if s is self.sock:
if not self.game.game_started:
self.handle_new_connections()
else:
logging.debug("Handle game started and new connection detected.")
def handle_new_connections(self):
playerID = uuid.uuid1()
client_queue = queue.Queue()
self.queues[playerID] = client_queue
client_sock, client_address = self.sock.accept()
logging.debug("Accepted connection from {}".format(client_address[0]))
client_handler = Client(playerID, client_sock, client_queue, self.main_queue)
client_handler.start()
client_queue.put("Test of queue")
|
# Copyright 2016 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import security_testcase
import time
from OpenSSL import SSL
from util import winnforum_testcase, configurable_testcase,\
writeConfig, loadConfig, getCertFilename
class SasCbsdSecurityTestcase(security_testcase.SecurityTestCase):
# Tests changing the SAS UUT state must explicitly call the SasReset().
@winnforum_testcase
def test_WINNF_FT_S_SCS_1(self):
"""New registration with TLS_RSA_WITH_AES_128_GCM_SHA256 cipher.
Checks that SAS UUT response satisfy cipher security conditions.
Checks that a CBSD registration with this configuration succeed.
"""
self.doCbsdTestCipher('AES128-GCM-SHA256',
getCertFilename("device_a.cert"),
getCertFilename("device_a.key"))
@winnforum_testcase
def test_WINNF_FT_S_SCS_2(self):
"""New registration with TLS_RSA_WITH_AES_256_GCM_SHA384 cipher.
Checks that SAS UUT response satisfy specific security conditions.
Checks that a CBSD registration with this configuration succeed.
"""
self.doCbsdTestCipher('AES256-GCM-SHA384',
getCertFilename("device_a.cert"),
getCertFilename("device_a.key"))
@winnforum_testcase
def test_WINNF_FT_S_SCS_3(self):
"""New registration with TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 cipher.
Checks that SAS UUT response satisfy specific security conditions.
Checks that a CBSD registration with this configuration succeed.
Note that the test require a SAS UUT
"""
self.doCbsdTestCipher('ECDHE-ECDSA-AES128-GCM-SHA256',
getCertFilename("device_a.cert"),
getCertFilename("device_a.key"))
@winnforum_testcase
def test_WINNF_FT_S_SCS_4(self):
"""New registration with TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 cipher.
Checks that SAS UUT response satisfy specific security conditions.
Checks that a CBSD registration with this configuration succeed.
"""
self.doCbsdTestCipher('ECDHE-ECDSA-AES256-GCM-SHA384',
getCertFilename("device_a.cert"),
getCertFilename("device_a.key"))
@winnforum_testcase
def test_WINNF_FT_S_SCS_5(self):
"""New registration with TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 cipher.
Checks that SAS UUT response satisfy specific security conditions.
Checks that a CBSD registration with this configuration succeed.
"""
self.doCbsdTestCipher('ECDHE-RSA-AES128-GCM-SHA256',
getCertFilename("device_a.cert"),
getCertFilename("device_a.key"))
def generate_SCS_6_default_config(self, filename):
"""Generates the WinnForum configuration for SCS_6"""
# Create the actual config for client cert/key path
config = {
'clientCert': getCertFilename("unrecognized_device.cert"),
'clientKey': getCertFilename("unrecognized_device.key")
}
writeConfig(filename, config)
@configurable_testcase(generate_SCS_6_default_config)
def test_WINNF_FT_S_SCS_6(self, config_filename):
"""Unrecognized root of trust certificate presented during registration.
Checks that SAS UUT response with fatal alert with unknown_ca.
"""
config = loadConfig(config_filename)
self.assertTlsHandshakeFailureOrHttp403(client_cert=config['clientCert'],
client_key=config['clientKey'])
def generate_SCS_7_default_config(self, filename):
"""Generates the WinnForum configuration for SCS_7"""
# Create the actual config for client cert/key path
config = {
'clientCert': getCertFilename("device_corrupted.cert"),
'clientKey': getCertFilename("device_corrupted.key")
}
writeConfig(filename, config)
@configurable_testcase(generate_SCS_7_default_config)
def test_WINNF_FT_S_SCS_7(self,config_filename):
"""Corrupted certificate presented during registration.
Checks that SAS UUT response with fatal alert message.
"""
config = loadConfig(config_filename)
self.assertTlsHandshakeFailureOrHttp403(client_cert=config['clientCert'],
client_key=config['clientKey'])
def generate_SCS_8_default_config(self, filename):
"""Generates the WinnForum configuration for SCS_8"""
# Create the actual config for client cert/key path
config = {
'clientCert': getCertFilename("device_self_signed.cert"),
'clientKey': getCertFilename("device_a.key")
}
writeConfig(filename, config)
@configurable_testcase(generate_SCS_8_default_config)
def test_WINNF_FT_S_SCS_8(self,config_filename):
"""Self-signed certificate presented during registration.
Checks that SAS UUT response with fatal alert message.
"""
config = loadConfig(config_filename)
self.assertTlsHandshakeFailureOrHttp403(client_cert=config['clientCert'],
client_key=config['clientKey'])
def generate_SCS_9_default_config(self, filename):
"""Generates the WinnForum configuration for SCS_9"""
# Create the actual config for client cert/key path
config = {
'clientCert': getCertFilename("non_cbrs_signed_device.cert"),
'clientKey': getCertFilename("non_cbrs_signed_device.key")
}
writeConfig(filename, config)
@configurable_testcase(generate_SCS_9_default_config)
def test_WINNF_FT_S_SCS_9(self,config_filename):
"""Non-CBRS trust root signed certificate presented during registration.
Checks that SAS UUT response with fatal alert message.
"""
config = loadConfig(config_filename)
self.assertTlsHandshakeFailureOrHttp403(client_cert=config['clientCert'],
client_key=config['clientKey'])
def generate_SCS_10_default_config(self, filename):
"""Generates the WinnForum configuration for SCS_10. """
# Create the actual config for client cert/key path
config = {
'clientCert': getCertFilename("device_wrong_type.cert"),
'clientKey': getCertFilename("server.key")
}
writeConfig(filename, config)
@configurable_testcase(generate_SCS_10_default_config)
def test_WINNF_FT_S_SCS_10(self,config_filename):
"""Certificate of wrong type presented during registration.
Checks that SAS UUT response with fatal alert message.
"""
config = loadConfig(config_filename)
try:
self.assertTlsHandshakeFailure(client_cert=config['clientCert'],
client_key=config['clientKey'])
except AssertionError as e:
self.SasReset()
# Load Devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
# Register the devices
devices = [device_a]
request = {'registrationRequest': devices}
response = self._sas.Registration(request, ssl_cert=config['clientCert'],
ssl_key=config['clientKey'])['registrationResponse']
# Check Registration Response
self.assertEqual(response[0]['response']['responseCode'], 104)
def generate_SCS_11_default_config(self, filename):
"""Generate the WinnForum configuration for SCS_11."""
# Create the configuration for blacklisted client cert/key path
config = {
'clientCert': getCertFilename("device_blacklisted.cert"),
'clientKey': getCertFilename("device_blacklisted.key")
}
writeConfig(filename, config)
@configurable_testcase(generate_SCS_11_default_config)
def test_WINNF_FT_S_SCS_11(self, config_filename):
"""Blacklisted certificate presented during registration.
Checks that SAS UUT response with fatal alert message.
"""
# Read the configuration
config = loadConfig(config_filename)
# Tls handshake fails or http 403 response
self.assertTlsHandshakeFailureOrHttp403(client_cert=config['clientCert'],
client_key=config['clientKey'])
logging.info("TLS handshake failed as the client certificate has blacklisted")
def generate_SCS_12_default_config(self, filename):
"""Generates the WinnForum configuration for SCS.12"""
# Create the actual config for client cert/key path
config = {
'clientCert': getCertFilename("device_expired.cert"),
'clientKey': getCertFilename("device_expired.key")
}
writeConfig(filename, config)
@configurable_testcase(generate_SCS_12_default_config)
def test_WINNF_FT_S_SCS_12(self,config_filename):
"""Expired certificate presented during registration.
Checks that SAS UUT response with fatal alert message.
"""
config = loadConfig(config_filename)
self.assertTlsHandshakeFailureOrHttp403(client_cert=config['clientCert'],
client_key=config['clientKey'])
@winnforum_testcase
def test_WINNF_FT_S_SCS_13(self):
""" Disallowed TLS method attempted during registration.
Checks that SAS UUT response with fatal alert message.
"""
self.assertTlsHandshakeFailureOrHttp403(
ssl_method=SSL.TLSv1_1_METHOD,
client_cert=getCertFilename('device_a.cert'),
client_key=getCertFilename('device_a.key'))
@winnforum_testcase
def test_WINNF_FT_S_SCS_14(self):
"""Invalid ciphersuite presented during registration.
Checks that SAS UUT response with fatal alert message.
"""
self.assertTlsHandshakeFailureOrHttp403(
ciphers='ECDHE-RSA-AES256-GCM-SHA384',
client_cert=getCertFilename('device_a.cert'),
client_key=getCertFilename('device_a.key'))
def generate_SCS_15_default_config(self, filename):
""" Generates the WinnForum configuration for SCS.15 """
# Create the actual config for client cert/key path
config = {
'clientCert': getCertFilename("device_inapplicable.cert"),
'clientKey': getCertFilename("device_a.key")
}
writeConfig(filename, config)
@configurable_testcase(generate_SCS_15_default_config)
def test_WINNF_FT_S_SCS_15(self,config_filename):
"""Certificate with inapplicable fields presented during registration.
Checks that SAS UUT response with tls connection succedded and response should be 104.
"""
config = loadConfig(config_filename)
# Load the keys/certs and check that TLS handshake is valid
device_a_cert = config['clientCert']
device_a_key = config['clientKey']
self.assertTlsHandshakeSucceed(self._sas_admin._base_url, ['AES128-GCM-SHA256'], device_a_cert, device_a_key)
# Load device and inject fccId and userId
device_a = json.load(open(os.path.join('testcases', 'testdata', 'device_a.json')))
self.SasReset()
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
self._sas_admin.InjectUserId({'userId': device_a['userId']})
# Send registration Request with certs(inapplicable fields) to SAS UUT
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request,ssl_cert=device_a_cert,ssl_key=device_a_key)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'],104)
def generate_SCS_16_default_config(self, filename):
"""Generate the WinnForum configuration for SCS_16."""
# Create the configuration for client cert/key path.
config = {
'clientCert': getCertFilename("device_cert_from_revoked_ca.cert"),
'clientKey': getCertFilename("device_cert_from_revoked_ca.key")
}
writeConfig(filename, config)
@configurable_testcase(generate_SCS_16_default_config)
def test_WINNF_FT_S_SCS_16(self, config_filename):
"""Certificate signed by a revoked CA presented during registration.
Checks that SAS UUT response with fatal alert message.
"""
# Read the configuration
config = loadConfig(config_filename)
# Tls handshake fails since CA is revoked
self.assertTlsHandshakeFailureOrHttp403(client_cert=config['clientCert'],
client_key=config['clientKey'])
logging.info("TLS handshake failed as the CA certificate has been revoked")
@winnforum_testcase
def test_WINNF_FT_S_SCS_17(self):
"""Invalid certificate following an approved registration request.
Checks that SAS UUT response with fatal alert message.
"""
# Reset the SAS UUT
self.SasReset()
device_cert_name = "short_lived_client"
cert_duration_minutes = 1 # in minutes
# Create a short lived certificate signed by intermediate CBSD CA.
self.createShortLivedCertificate("CBSD", device_cert_name, cert_duration_minutes)
# Get the absolute path of the short lived certificate created.
device_cert = getCertFilename(device_cert_name + ".cert")
device_key = getCertFilename(device_cert_name + ".key")
# Successful TLS Handshake
self.assertTlsHandshakeSucceed(self._sas_admin._base_url, ['AES128-GCM-SHA256'],
device_cert, device_key)
logging.info("TLS Handshake Succeeded")
# Load the device_a file
device_a = json.load(open(os.path.join('testcases', 'testdata', 'device_a.json')))
# Register device and grant device with certs(short lived certificates) to SAS UUT.
# Ensure the registration and grant requests are successful.
# The CiphersOverload approach is used in to override the default certificates with
# the certificates configured for this test case.
with security_testcase.CiphersOverload(self._sas, self._sas._tls_config.ciphers,
device_cert, device_key):
self.assertRegistered([device_a])
logging.info("CBSD device is in registered state")
# Wait for the short lived certificate to expire.
wait_timer = (cert_duration_minutes * 60) + 5
logging.info("Waiting for %s secs so the certificate will become invalid", wait_timer)
time.sleep(wait_timer)
# Verify TLS handshake fails
logging.info("CBSD attempts to re-establish TLS Handshake with SAS UUT")
self.assertTlsHandshakeFailureOrHttp403(client_cert=device_cert, client_key=device_key)
logging.info("TLS handshake failed as the client certificate is invalid")
@winnforum_testcase
def test_WINNF_FT_S_SCS_18(self):
"""Invalid certificate following an approved grant request.
Checks that SAS UUT response with fatal alert message.
"""
# Reset the SAS UUT
self.SasReset()
device_cert_name = "short_lived_client"
cert_duration_minutes = 1 # in minutes
# Create a short lived certificate signed by intermediate CBSD CA.
self.createShortLivedCertificate("CBSD", device_cert_name, cert_duration_minutes)
# Get the absolute path of the short lived certificate created.
device_cert = getCertFilename(device_cert_name + ".cert")
device_key = getCertFilename(device_cert_name + ".key")
# Successful TLS Handshake
self.assertTlsHandshakeSucceed(self._sas_admin._base_url, ['AES128-GCM-SHA256'], device_cert,
device_key)
logging.info("TLS Handshake Succeeded")
# Load the device_a file
device_a = json.load(open(os.path.join('testcases', 'testdata', 'device_a.json')))
# Load the grant_0 file
grant_0 = json.load(open(os.path.join('testcases', 'testdata', 'grant_0.json')))
# Register device and grant device with certs(short lived certificates) to SAS UUT.
# Ensure the registration and grant requests are successful.
# The CiphersOverload approach is used in to override the default certificates with
# the certificates configured for this test case.
with security_testcase.CiphersOverload(self._sas, self._sas._tls_config.ciphers,
device_cert, device_key):
cbsd_ids, grant_ids = self.assertRegisteredAndGranted([device_a], [grant_0])
logging.info("CBSD is in Granted State")
# Wait for the short lived certificate to expire.
wait_timer = (cert_duration_minutes * 60) + 5
logging.info("Waiting for %s secs so the certificate will become invalid", wait_timer)
time.sleep(wait_timer)
# Verify TLS handshake fails.
logging.info("CBSD attempts to re-establish TLS Handshake with SAS UUT")
self.assertTlsHandshakeFailureOrHttp403(client_cert=device_cert, client_key=device_key)
logging.info("TLS handshake failed as the client certificate is invalid")
@winnforum_testcase
def test_WINNF_FT_S_SCS_19(self):
"""Invalid certificate following an approved heartbeat request.
Checks that SAS UUT response with fatal alert message.
"""
# Reset the SAS UUT
self.SasReset()
device_cert_name = "short_lived_client"
cert_duration_minutes = 1 # in minutes
# Create a short lived certificate signed by intermediate CBSD CA.
self.createShortLivedCertificate("CBSD", device_cert_name, cert_duration_minutes)
# Get the absolute path of the short lived certificate created.
device_cert = getCertFilename(device_cert_name + ".cert")
device_key = getCertFilename(device_cert_name + ".key")
# Successful TLS Handshake.
self.assertTlsHandshakeSucceed(self._sas_admin._base_url, ['AES128-GCM-SHA256'],
device_cert, device_key)
logging.info("TLS Handshake Succeeded")
# Load the device_a file.
device_a = json.load(open(os.path.join('testcases', 'testdata', 'device_a.json')))
# Load the grant_0 file.
grant_0 = json.load(open(os.path.join('testcases', 'testdata', 'grant_0.json')))
# Register device and grant device with certs(short lived certificates) to SAS UUT.
# Ensure the registration and grant requests are successful.
# The CiphersOverload approach is used in to override the default certificates with
# the certificates configured for this test case.
with security_testcase.CiphersOverload(self._sas, self._sas._tls_config.ciphers,
device_cert, device_key):
cbsd_ids, grant_ids = self.assertRegisteredAndGranted([device_a], [grant_0])
operation_states = ['GRANTED']
# Send the Heartbeat request for the Grant of CBSD to SAS UUT.
transmit_expire_time = self.assertHeartbeatsSuccessful(cbsd_ids, grant_ids, operation_states)
logging.info("CBSD is in HeartBeat Successful State")
# Wait for the short lived certificate to expire.
wait_timer = (cert_duration_minutes * 60) + 5
logging.info("Waiting for %s secs so the certificate will become invalid", wait_timer)
time.sleep(wait_timer)
# Verify TLS handshake fails.
logging.info("CBSD attempts to re-establish TLS Handshake with SAS UUT")
self.assertTlsHandshakeFailureOrHttp403(client_cert=device_cert, client_key=device_key)
logging.info("TLS handshake failed as the client certificate is invalid")
|
<reponame>robert-haas/mevis
import os
import mevis as mv
import shared
def test_entrypoint():
exit_status = os.system('mevis -h')
assert exit_status == 0
exit_status = os.system('mevis -nonsense')
assert exit_status != 0
def test_convert_and_plot(tmpdir):
atomspace = shared.load_moses_atomspace()
source = 'some_atomspace.scm'
with tmpdir.as_cwd():
mv.store(atomspace, source)
# Generate a plot and display it
cmd = 'mevis -i {}'.format(source)
exit_status = os.system(cmd)
assert exit_status == 0
# Generate a graph object or plot and export it to a file
for extension in ('html', 'jpg', 'png', 'svg', 'gml', 'gml.gz', 'gml.bz2'):
target = 'test.' + extension
assert not os.path.isfile(target)
cmd = 'mevis -b d3 -i {} -o {}'.format(source, target)
exit_status = os.system(cmd)
assert exit_status == 0
assert os.path.isfile(target)
# default: overwrite fails
cmd = 'mevis -b d3 -i {} -o {}'.format(source, target)
exit_status = os.system(cmd)
assert exit_status != 0
# --force: overwrite is enforced
for force, verbose in [('-f', '-v'), ('--force', '--verbose')]:
cmd = 'mevis -b d3 -i {} -o {} {} {}'.format(source, target, force, verbose)
exit_status = os.system(cmd)
assert exit_status == 0
# Invalid source
cmd = 'mevis -i {} -o {}'.format('nonsense', target)
exit_status = os.system(cmd)
assert exit_status != 0
# Invalid target
cmd = 'mevis -i {} -o {}'.format(source, 'nonsense')
exit_status = os.system(cmd)
assert exit_status != 0
# Existing target
cmd = 'mevis -i {} -o {}'.format(source, target)
exit_status = os.system(cmd)
assert exit_status != 0
def test_filter(tmpdir):
known_targets = [
'AndLink',
'''"['NotLink', 'OrLink']"''']
known_contexts = [
'atom', 'in', 'out', 'both', 'in-tree', 'out-tree',
'''"('in', 2)"''', '''"('out', 2)"''', '''"('both', 2)"''']
known_modes = ['include', 'exclude']
atomspace = shared.load_moses_atomspace()
source = 'some_atomspace.scm'
with tmpdir.as_cwd():
# Valid target, context and mode
mv.store(atomspace, source)
for ft in known_targets:
for fc in known_contexts:
for fm in known_modes:
cmd = 'mevis -i {} -o test.html -f -ft {} -fc {} -fm {}'.format(
source, ft, fc, fm)
exit_status = os.system(cmd)
assert exit_status == 0
# Invalid context
for fc in ['nonsense', '''"('in', -1)"''', '''"('nonsense', 2)"''']:
cmd = 'mevis -i {} -o test.html -f -ft atom -fc {}'.format(source, fc)
exit_status = os.system(cmd)
assert exit_status != 0
def test_layout(tmpdir):
known_layouts = [
'dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp',
'bipartite', 'circular', 'kamada_kawai', 'planar', 'random', 'shell', 'spring',
'spectral', 'spiral']
atomspace = shared.load_moses_atomspace()
source = 'some_atomspace.scm'
with tmpdir.as_cwd():
mv.store(atomspace, source)
for lm in known_layouts:
cmd = 'mevis -i {} -o test.html -f -l {}'.format(source, lm)
exit_status = os.system(cmd)
assert exit_status == 0
def test_kwargs(tmpdir):
atomspace = shared.load_moses_atomspace()
source = 'some_atomspace.scm'
with tmpdir.as_cwd():
mv.store(atomspace, source)
# Valid kwargs
valid_kwargs = [
'edge_curvature=0.4',
'node_label_data_source="id" show_node=False',
]
for kwarg_str in valid_kwargs:
cmd = 'mevis -i {} -o test.html -f -b d3 -l dot --kwargs {}'.format(source, kwarg_str)
exit_status = os.system(cmd)
assert exit_status == 0
# Invalid kwargs
invalid_kwargs = [
'',
'edge_curvature 0.4',
'node_label_data_source "id" show_node=False',
'node_label_data_source="id" show_node False',
'nonsense=0.4',
]
for kwarg_str in invalid_kwargs:
cmd = 'mevis -i {} -o test.html -f -b d3 -l dot --kwargs {}'.format(source, kwarg_str)
exit_status = os.system(cmd)
assert exit_status != 0
|
from __future__ import division
import os
from ctypes import *
from itertools import count
from openslide import lowlevel
print(os.getcwd())
_dirname_ = os.path.dirname(os.path.abspath(__file__))
_lib = cdll.LoadLibrary(f'{_dirname_}/lib/libkfbslide.so')
class KFBSlideError(Exception):
"""docstring for KFBSlideError"""
class _KfbSlide(object):
def __init__(self, ptr):
self._as_parameter_ = ptr
self._valid = True
self._close = kfbslide_close
def __del__(self):
if self._valid:
self._close(self)
def invalidate(self):
self._valid = False
@classmethod
def from_param(cls, obj):
if obj.__class__ != cls:
raise ValueError("Not an KfbSlide reference")
if not obj._as_parameter_:
raise ValueError("Passing undefined slide object")
if not obj._valid:
raise ValueError("Passing closed kfbSlide object")
return obj
# check for errors opening an image file and wrap the resulting handle
def _check_open(result, _func, _args):
if result is None:
raise lowlevel.OpenSlideUnsupportedFormatError(
"Unsupported or missing image file")
slide = _KfbSlide(c_void_p(result))
# err = get_error(slide)
# if err is not None:
# raise lowlevel.OpenSlideError(err)
return slide
# prevent further operations on slide handle after it is closed
def _check_close(_result, _func, args):
args[0].invalidate()
# check if the library got into an error state after each library call
def _check_error(result, func, args):
# err = get_error(args[0])
# if err is not None:
# raise lowlevel.OpenSlideError(err)
return lowlevel._check_string(result, func, args)
# Convert returned NULL-terminated char** into a list of strings
def _check_name_list(result, func, args):
_check_error(result, func, args)
names = []
for i in count():
name = result[i]
if not name:
break
names.append(name.decode('UTF-8', 'replace'))
return names
# resolve and return an OpenSlide function with the specified properties
def _func(name, restype, argtypes, errcheck=_check_error):
func = getattr(_lib, name)
func.argtypes = argtypes
func.restype = restype
if errcheck is not None:
func.errcheck = errcheck
return func
detect_vendor = _func("kfbslide_detect_vendor", c_char_p, [lowlevel._utf8_p],
lowlevel._check_string)
_kfbslide_open = _func("kfbslide_open", c_void_p, [lowlevel._utf8_p], _check_open)
# _kfbslide_set_attrs = _func("kfbslide_set_attrs", None, [_KfbSlide, c_int, c_int, c_int, c_float, c_double, c_float, c_int])
def kfbslide_open(name):
osr = _kfbslide_open(name)
# height, width, scale, spendTime, scanTime, imgCapRes, blkSize = kfbHeaderInfo.GetHeaderInfo(name)
# _kfbslide_set_attrs(osr, height, width, scale, spendTime, scanTime, imgCapRes, blkSize)
return osr
kfbslide_close = _func("kfbslide_close", None, [_KfbSlide], lowlevel._check_close)
kfbslide_get_level_count = _func("kfbslide_get_level_count", c_int32, [_KfbSlide])
_kfbslide_get_level_dimensions = _func("kfbslide_get_level_dimensions", None,
[_KfbSlide, c_int32, POINTER(c_int64), POINTER(c_int64)])
def kfbslide_get_level_dimensions(osr, level):
w, h = c_int64(), c_int64()
_kfbslide_get_level_dimensions(osr, level, byref(w), byref(h))
return (w.value, h.value)
kfbslide_get_level_downsample = _func("kfbslide_get_level_downsample",
c_double, [_KfbSlide, c_int32])
kfbslide_get_best_level_for_downsample = _func(
"kfbslide_get_best_level_for_downsample", c_int32, [_KfbSlide, c_double])
_kfbslide_read_region = _func("kfbslide_read_region", c_bool,
[_KfbSlide, c_int32, c_int64, c_int64, POINTER(c_int), POINTER(POINTER(c_ubyte))])
_kfbslide_read_roi_region = _func("kfbslide_get_image_roi_stream", c_bool,
[_KfbSlide, c_int32, c_int64, c_int64, c_int64, c_int64, POINTER(c_int),
POINTER(POINTER(c_ubyte))])
def kfbslide_read_region(osr, level, pos_x, pos_y):
data_length = c_int()
pixel = POINTER(c_ubyte)()
if not _kfbslide_read_region(osr, level, pos_x, pos_y, byref(data_length), byref(pixel)):
raise ValueError("Fail to read region")
import numpy as np
return np.ctypeslib.as_array(pixel, shape=(data_length.value,))
def kfbslide_read_roi_region(osr, level, pos_x, pos_y, width, height):
data_length = c_int()
pixel = POINTER(c_ubyte)()
if not _kfbslide_read_roi_region(osr, level, pos_x, pos_y, width, height, byref(data_length), byref(pixel)):
raise ValueError("Fail to read roi region")
# img = PIL.Image.frombuffer('RGBA', (width, height), pixel, 'raw', 'RGBA', 0, 1)
import numpy as np
return np.ctypeslib.as_array(pixel, shape=(data_length.value,))
kfbslide_property_names = _func("kfbslide_get_property_names", POINTER(c_char_p),
[_KfbSlide], _check_name_list)
kfbslide_property_value = _func("kfbslide_get_property_value", c_char_p,
[_KfbSlide, lowlevel._utf8_p])
_kfbslide_get_associated_image_names = _func("kfbslide_get_associated_image_names", POINTER(c_char_p), [_KfbSlide],
_check_name_list)
def kfbslide_get_associated_image_names(osr):
names = _kfbslide_get_associated_image_names(osr)
rtn = []
for name in names:
if name is None:
break
rtn.append(name)
return rtn
_kfbslide_get_associated_image_dimensions = _func("kfbslide_get_associated_image_dimensions", c_void_p,
[_KfbSlide, lowlevel._utf8_p, POINTER(c_int64), POINTER(c_int64),
POINTER(c_int)])
def kfbslide_get_associated_image_dimensions(osr, name):
w, h = c_int64(), c_int64()
data_length = c_int()
_kfbslide_get_associated_image_dimensions(osr, name, byref(w), byref(h), byref(data_length))
return (w.value, h.value), data_length.value
_kfbslide_read_associated_image = _func("kfbslide_read_associated_image", c_void_p,
[_KfbSlide, lowlevel._utf8_p, POINTER(POINTER(c_ubyte))])
def kfbslide_read_associated_image(osr, name):
data_length = kfbslide_get_associated_image_dimensions(osr, name)[1]
pixel = POINTER(c_ubyte)()
_kfbslide_read_associated_image(osr, name, byref(pixel))
import numpy as np
narray = np.ctypeslib.as_array(pixel, shape=(data_length,))
from io import BytesIO
buf = BytesIO(narray)
from PIL import Image
return Image.open(buf)
|
#
# Autogenerated by Thrift Compiler (0.14.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class Type(object):
"""
Types supported by Parquet. These types are intended to be used in combination
with the encodings to control the on disk storage format.
For example INT16 is not included as a type since a good encoding of INT32
would handle this.
"""
BOOLEAN = 0
INT32 = 1
INT64 = 2
INT96 = 3
FLOAT = 4
DOUBLE = 5
BYTE_ARRAY = 6
FIXED_LEN_BYTE_ARRAY = 7
_VALUES_TO_NAMES = {
0: "BOOLEAN",
1: "INT32",
2: "INT64",
3: "INT96",
4: "FLOAT",
5: "DOUBLE",
6: "BYTE_ARRAY",
7: "FIXED_LEN_BYTE_ARRAY",
}
_NAMES_TO_VALUES = {
"BOOLEAN": 0,
"INT32": 1,
"INT64": 2,
"INT96": 3,
"FLOAT": 4,
"DOUBLE": 5,
"BYTE_ARRAY": 6,
"FIXED_LEN_BYTE_ARRAY": 7,
}
class ConvertedType(object):
"""
DEPRECATED: Common types used by frameworks(e.g. hive, pig) using parquet.
ConvertedType is superseded by LogicalType. This enum should not be extended.
See LogicalTypes.md for conversion between ConvertedType and LogicalType.
"""
UTF8 = 0
MAP = 1
MAP_KEY_VALUE = 2
LIST = 3
ENUM = 4
DECIMAL = 5
DATE = 6
TIME_MILLIS = 7
TIME_MICROS = 8
TIMESTAMP_MILLIS = 9
TIMESTAMP_MICROS = 10
UINT_8 = 11
UINT_16 = 12
UINT_32 = 13
UINT_64 = 14
INT_8 = 15
INT_16 = 16
INT_32 = 17
INT_64 = 18
JSON = 19
BSON = 20
INTERVAL = 21
_VALUES_TO_NAMES = {
0: "UTF8",
1: "MAP",
2: "MAP_KEY_VALUE",
3: "LIST",
4: "ENUM",
5: "DECIMAL",
6: "DATE",
7: "TIME_MILLIS",
8: "TIME_MICROS",
9: "TIMESTAMP_MILLIS",
10: "TIMESTAMP_MICROS",
11: "UINT_8",
12: "UINT_16",
13: "UINT_32",
14: "UINT_64",
15: "INT_8",
16: "INT_16",
17: "INT_32",
18: "INT_64",
19: "JSON",
20: "BSON",
21: "INTERVAL",
}
_NAMES_TO_VALUES = {
"UTF8": 0,
"MAP": 1,
"MAP_KEY_VALUE": 2,
"LIST": 3,
"ENUM": 4,
"DECIMAL": 5,
"DATE": 6,
"TIME_MILLIS": 7,
"TIME_MICROS": 8,
"TIMESTAMP_MILLIS": 9,
"TIMESTAMP_MICROS": 10,
"UINT_8": 11,
"UINT_16": 12,
"UINT_32": 13,
"UINT_64": 14,
"INT_8": 15,
"INT_16": 16,
"INT_32": 17,
"INT_64": 18,
"JSON": 19,
"BSON": 20,
"INTERVAL": 21,
}
class FieldRepetitionType(object):
"""
Representation of Schemas
"""
REQUIRED = 0
OPTIONAL = 1
REPEATED = 2
_VALUES_TO_NAMES = {
0: "REQUIRED",
1: "OPTIONAL",
2: "REPEATED",
}
_NAMES_TO_VALUES = {
"REQUIRED": 0,
"OPTIONAL": 1,
"REPEATED": 2,
}
class Encoding(object):
"""
Encodings supported by Parquet. Not all encodings are valid for all types. These
enums are also used to specify the encoding of definition and repetition levels.
See the accompanying doc for the details of the more complicated encodings.
"""
PLAIN = 0
PLAIN_DICTIONARY = 2
RLE = 3
BIT_PACKED = 4
DELTA_BINARY_PACKED = 5
DELTA_LENGTH_BYTE_ARRAY = 6
DELTA_BYTE_ARRAY = 7
RLE_DICTIONARY = 8
BYTE_STREAM_SPLIT = 9
_VALUES_TO_NAMES = {
0: "PLAIN",
2: "PLAIN_DICTIONARY",
3: "RLE",
4: "BIT_PACKED",
5: "DELTA_BINARY_PACKED",
6: "DELTA_LENGTH_BYTE_ARRAY",
7: "DELTA_BYTE_ARRAY",
8: "RLE_DICTIONARY",
9: "BYTE_STREAM_SPLIT",
}
_NAMES_TO_VALUES = {
"PLAIN": 0,
"PLAIN_DICTIONARY": 2,
"RLE": 3,
"BIT_PACKED": 4,
"DELTA_BINARY_PACKED": 5,
"DELTA_LENGTH_BYTE_ARRAY": 6,
"DELTA_BYTE_ARRAY": 7,
"RLE_DICTIONARY": 8,
"BYTE_STREAM_SPLIT": 9,
}
class CompressionCodec(object):
"""
Supported compression algorithms.
Codecs added in format version X.Y can be read by readers based on X.Y and later.
Codec support may vary between readers based on the format version and
libraries available at runtime.
See Compression.md for a detailed specification of these algorithms.
"""
UNCOMPRESSED = 0
SNAPPY = 1
GZIP = 2
LZO = 3
BROTLI = 4
LZ4 = 5
ZSTD = 6
LZ4_RAW = 7
_VALUES_TO_NAMES = {
0: "UNCOMPRESSED",
1: "SNAPPY",
2: "GZIP",
3: "LZO",
4: "BROTLI",
5: "LZ4",
6: "ZSTD",
7: "LZ4_RAW",
}
_NAMES_TO_VALUES = {
"UNCOMPRESSED": 0,
"SNAPPY": 1,
"GZIP": 2,
"LZO": 3,
"BROTLI": 4,
"LZ4": 5,
"ZSTD": 6,
"LZ4_RAW": 7,
}
class PageType(object):
DATA_PAGE = 0
INDEX_PAGE = 1
DICTIONARY_PAGE = 2
DATA_PAGE_V2 = 3
_VALUES_TO_NAMES = {
0: "DATA_PAGE",
1: "INDEX_PAGE",
2: "DICTIONARY_PAGE",
3: "DATA_PAGE_V2",
}
_NAMES_TO_VALUES = {
"DATA_PAGE": 0,
"INDEX_PAGE": 1,
"DICTIONARY_PAGE": 2,
"DATA_PAGE_V2": 3,
}
class BoundaryOrder(object):
"""
Enum to annotate whether lists of min/max elements inside ColumnIndex
are ordered and if so, in which direction.
"""
UNORDERED = 0
ASCENDING = 1
DESCENDING = 2
_VALUES_TO_NAMES = {
0: "UNORDERED",
1: "ASCENDING",
2: "DESCENDING",
}
_NAMES_TO_VALUES = {
"UNORDERED": 0,
"ASCENDING": 1,
"DESCENDING": 2,
}
class Statistics(object):
"""
Statistics per row group and per page
All fields are optional.
Attributes:
- max: DEPRECATED: min and max value of the column. Use min_value and max_value.
Values are encoded using PLAIN encoding, except that variable-length byte
arrays do not include a length prefix.
These fields encode min and max values determined by signed comparison
only. New files should use the correct order for a column's logical type
and store the values in the min_value and max_value fields.
To support older readers, these may be set when the column order is
signed.
- min
- null_count: count of null value in the column
- distinct_count: count of distinct values occurring
- max_value: Min and max values for the column, determined by its ColumnOrder.
Values are encoded using PLAIN encoding, except that variable-length byte
arrays do not include a length prefix.
- min_value
"""
def __init__(self, max=None, min=None, null_count=None, distinct_count=None, max_value=None, min_value=None,):
self.max = max
self.min = min
self.null_count = null_count
self.distinct_count = distinct_count
self.max_value = max_value
self.min_value = min_value
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.max = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.min = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.null_count = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.distinct_count = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.max_value = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.min_value = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Statistics')
if self.max is not None:
oprot.writeFieldBegin('max', TType.STRING, 1)
oprot.writeBinary(self.max)
oprot.writeFieldEnd()
if self.min is not None:
oprot.writeFieldBegin('min', TType.STRING, 2)
oprot.writeBinary(self.min)
oprot.writeFieldEnd()
if self.null_count is not None:
oprot.writeFieldBegin('null_count', TType.I64, 3)
oprot.writeI64(self.null_count)
oprot.writeFieldEnd()
if self.distinct_count is not None:
oprot.writeFieldBegin('distinct_count', TType.I64, 4)
oprot.writeI64(self.distinct_count)
oprot.writeFieldEnd()
if self.max_value is not None:
oprot.writeFieldBegin('max_value', TType.STRING, 5)
oprot.writeBinary(self.max_value)
oprot.writeFieldEnd()
if self.min_value is not None:
oprot.writeFieldBegin('min_value', TType.STRING, 6)
oprot.writeBinary(self.min_value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StringType(object):
"""
Empty structs to use as logical type annotations
"""
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('StringType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UUIDType(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('UUIDType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MapType(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MapType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ListType(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ListType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EnumType(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('EnumType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DateType(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DateType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NullType(object):
"""
Logical type to annotate a column that is always null.
Sometimes when discovering the schema of existing data, values are always
null and the physical type can't be determined. This annotation signals
the case where the physical type was guessed from all null values.
"""
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('NullType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DecimalType(object):
"""
Decimal logical type annotation
To maintain forward-compatibility in v1, implementations using this logical
type must also set scale and precision on the annotated SchemaElement.
Allowed for physical types: INT32, INT64, FIXED, and BINARY
Attributes:
- scale
- precision
"""
def __init__(self, scale=None, precision=None,):
self.scale = scale
self.precision = precision
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.scale = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.precision = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DecimalType')
if self.scale is not None:
oprot.writeFieldBegin('scale', TType.I32, 1)
oprot.writeI32(self.scale)
oprot.writeFieldEnd()
if self.precision is not None:
oprot.writeFieldBegin('precision', TType.I32, 2)
oprot.writeI32(self.precision)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.scale is None:
raise TProtocolException(message='Required field scale is unset!')
if self.precision is None:
raise TProtocolException(message='Required field precision is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MilliSeconds(object):
"""
Time units for logical types
"""
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MilliSeconds')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MicroSeconds(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MicroSeconds')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NanoSeconds(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('NanoSeconds')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TimeUnit(object):
"""
Attributes:
- MILLIS
- MICROS
- NANOS
"""
def __init__(self, MILLIS=None, MICROS=None, NANOS=None,):
self.MILLIS = MILLIS
self.MICROS = MICROS
self.NANOS = NANOS
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.MILLIS = MilliSeconds()
self.MILLIS.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.MICROS = MicroSeconds()
self.MICROS.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.NANOS = NanoSeconds()
self.NANOS.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TimeUnit')
if self.MILLIS is not None:
oprot.writeFieldBegin('MILLIS', TType.STRUCT, 1)
self.MILLIS.write(oprot)
oprot.writeFieldEnd()
if self.MICROS is not None:
oprot.writeFieldBegin('MICROS', TType.STRUCT, 2)
self.MICROS.write(oprot)
oprot.writeFieldEnd()
if self.NANOS is not None:
oprot.writeFieldBegin('NANOS', TType.STRUCT, 3)
self.NANOS.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TimestampType(object):
"""
Timestamp logical type annotation
Allowed for physical types: INT64
Attributes:
- isAdjustedToUTC
- unit
"""
def __init__(self, isAdjustedToUTC=None, unit=None,):
self.isAdjustedToUTC = isAdjustedToUTC
self.unit = unit
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.isAdjustedToUTC = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.unit = TimeUnit()
self.unit.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TimestampType')
if self.isAdjustedToUTC is not None:
oprot.writeFieldBegin('isAdjustedToUTC', TType.BOOL, 1)
oprot.writeBool(self.isAdjustedToUTC)
oprot.writeFieldEnd()
if self.unit is not None:
oprot.writeFieldBegin('unit', TType.STRUCT, 2)
self.unit.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.isAdjustedToUTC is None:
raise TProtocolException(message='Required field isAdjustedToUTC is unset!')
if self.unit is None:
raise TProtocolException(message='Required field unit is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TimeType(object):
"""
Time logical type annotation
Allowed for physical types: INT32 (millis), INT64 (micros, nanos)
Attributes:
- isAdjustedToUTC
- unit
"""
def __init__(self, isAdjustedToUTC=None, unit=None,):
self.isAdjustedToUTC = isAdjustedToUTC
self.unit = unit
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.isAdjustedToUTC = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.unit = TimeUnit()
self.unit.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TimeType')
if self.isAdjustedToUTC is not None:
oprot.writeFieldBegin('isAdjustedToUTC', TType.BOOL, 1)
oprot.writeBool(self.isAdjustedToUTC)
oprot.writeFieldEnd()
if self.unit is not None:
oprot.writeFieldBegin('unit', TType.STRUCT, 2)
self.unit.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.isAdjustedToUTC is None:
raise TProtocolException(message='Required field isAdjustedToUTC is unset!')
if self.unit is None:
raise TProtocolException(message='Required field unit is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IntType(object):
"""
Integer logical type annotation
bitWidth must be 8, 16, 32, or 64.
Allowed for physical types: INT32, INT64
Attributes:
- bitWidth
- isSigned
"""
def __init__(self, bitWidth=None, isSigned=None,):
self.bitWidth = bitWidth
self.isSigned = isSigned
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BYTE:
self.bitWidth = iprot.readByte()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.isSigned = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('IntType')
if self.bitWidth is not None:
oprot.writeFieldBegin('bitWidth', TType.BYTE, 1)
oprot.writeByte(self.bitWidth)
oprot.writeFieldEnd()
if self.isSigned is not None:
oprot.writeFieldBegin('isSigned', TType.BOOL, 2)
oprot.writeBool(self.isSigned)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.bitWidth is None:
raise TProtocolException(message='Required field bitWidth is unset!')
if self.isSigned is None:
raise TProtocolException(message='Required field isSigned is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JsonType(object):
"""
Embedded JSON logical type annotation
Allowed for physical types: BINARY
"""
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('JsonType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BsonType(object):
"""
Embedded BSON logical type annotation
Allowed for physical types: BINARY
"""
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('BsonType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LogicalType(object):
"""
LogicalType annotations to replace ConvertedType.
To maintain compatibility, implementations using LogicalType for a
SchemaElement must also set the corresponding ConvertedType (if any)
from the following table.
Attributes:
- STRING
- MAP
- LIST
- ENUM
- DECIMAL
- DATE
- TIME
- TIMESTAMP
- INTEGER
- UNKNOWN
- JSON
- BSON
- UUID
"""
def __init__(self, STRING=None, MAP=None, LIST=None, ENUM=None, DECIMAL=None, DATE=None, TIME=None, TIMESTAMP=None, INTEGER=None, UNKNOWN=None, JSON=None, BSON=None, UUID=None,):
self.STRING = STRING
self.MAP = MAP
self.LIST = LIST
self.ENUM = ENUM
self.DECIMAL = DECIMAL
self.DATE = DATE
self.TIME = TIME
self.TIMESTAMP = TIMESTAMP
self.INTEGER = INTEGER
self.UNKNOWN = UNKNOWN
self.JSON = JSON
self.BSON = BSON
self.UUID = UUID
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.STRING = StringType()
self.STRING.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.MAP = MapType()
self.MAP.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.LIST = ListType()
self.LIST.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.ENUM = EnumType()
self.ENUM.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.DECIMAL = DecimalType()
self.DECIMAL.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.DATE = DateType()
self.DATE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.TIME = TimeType()
self.TIME.read(iprot)
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.TIMESTAMP = TimestampType()
self.TIMESTAMP.read(iprot)
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRUCT:
self.INTEGER = IntType()
self.INTEGER.read(iprot)
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRUCT:
self.UNKNOWN = NullType()
self.UNKNOWN.read(iprot)
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.STRUCT:
self.JSON = JsonType()
self.JSON.read(iprot)
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRUCT:
self.BSON = BsonType()
self.BSON.read(iprot)
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.STRUCT:
self.UUID = UUIDType()
self.UUID.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('LogicalType')
if self.STRING is not None:
oprot.writeFieldBegin('STRING', TType.STRUCT, 1)
self.STRING.write(oprot)
oprot.writeFieldEnd()
if self.MAP is not None:
oprot.writeFieldBegin('MAP', TType.STRUCT, 2)
self.MAP.write(oprot)
oprot.writeFieldEnd()
if self.LIST is not None:
oprot.writeFieldBegin('LIST', TType.STRUCT, 3)
self.LIST.write(oprot)
oprot.writeFieldEnd()
if self.ENUM is not None:
oprot.writeFieldBegin('ENUM', TType.STRUCT, 4)
self.ENUM.write(oprot)
oprot.writeFieldEnd()
if self.DECIMAL is not None:
oprot.writeFieldBegin('DECIMAL', TType.STRUCT, 5)
self.DECIMAL.write(oprot)
oprot.writeFieldEnd()
if self.DATE is not None:
oprot.writeFieldBegin('DATE', TType.STRUCT, 6)
self.DATE.write(oprot)
oprot.writeFieldEnd()
if self.TIME is not None:
oprot.writeFieldBegin('TIME', TType.STRUCT, 7)
self.TIME.write(oprot)
oprot.writeFieldEnd()
if self.TIMESTAMP is not None:
oprot.writeFieldBegin('TIMESTAMP', TType.STRUCT, 8)
self.TIMESTAMP.write(oprot)
oprot.writeFieldEnd()
if self.INTEGER is not None:
oprot.writeFieldBegin('INTEGER', TType.STRUCT, 10)
self.INTEGER.write(oprot)
oprot.writeFieldEnd()
if self.UNKNOWN is not None:
oprot.writeFieldBegin('UNKNOWN', TType.STRUCT, 11)
self.UNKNOWN.write(oprot)
oprot.writeFieldEnd()
if self.JSON is not None:
oprot.writeFieldBegin('JSON', TType.STRUCT, 12)
self.JSON.write(oprot)
oprot.writeFieldEnd()
if self.BSON is not None:
oprot.writeFieldBegin('BSON', TType.STRUCT, 13)
self.BSON.write(oprot)
oprot.writeFieldEnd()
if self.UUID is not None:
oprot.writeFieldBegin('UUID', TType.STRUCT, 14)
self.UUID.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SchemaElement(object):
"""
Represents a element inside a schema definition.
- if it is a group (inner node) then type is undefined and num_children is defined
- if it is a primitive type (leaf) then type is defined and num_children is undefined
the nodes are listed in depth first traversal order.
Attributes:
- type: Data type for this field. Not set if the current element is a non-leaf node
- type_length: If type is FIXED_LEN_BYTE_ARRAY, this is the byte length of the vales.
Otherwise, if specified, this is the maximum bit length to store any of the values.
(e.g. a low cardinality INT col could have this set to 3). Note that this is
in the schema, and therefore fixed for the entire file.
- repetition_type: repetition of the field. The root of the schema does not have a repetition_type.
All other nodes must have one
- name: Name of the field in the schema
- num_children: Nested fields. Since thrift does not support nested fields,
the nesting is flattened to a single list by a depth-first traversal.
The children count is used to construct the nested relationship.
This field is not set when the element is a primitive type
- converted_type: DEPRECATED: When the schema is the result of a conversion from another model.
Used to record the original type to help with cross conversion.
This is superseded by logicalType.
- scale: DEPRECATED: Used when this column contains decimal data.
See the DECIMAL converted type for more details.
This is superseded by using the DecimalType annotation in logicalType.
- precision
- field_id: When the original schema supports field ids, this will save the
original field id in the parquet schema
- logicalType: The logical type of this SchemaElement
LogicalType replaces ConvertedType, but ConvertedType is still required
for some logical types to ensure forward-compatibility in format v1.
"""
def __init__(self, type=None, type_length=None, repetition_type=None, name=None, num_children=None, converted_type=None, scale=None, precision=None, field_id=None, logicalType=None,):
self.type = type
self.type_length = type_length
self.repetition_type = repetition_type
self.name = name
self.num_children = num_children
self.converted_type = converted_type
self.scale = scale
self.precision = precision
self.field_id = field_id
self.logicalType = logicalType
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.type_length = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.repetition_type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.num_children = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.converted_type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.scale = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.precision = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.field_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRUCT:
self.logicalType = LogicalType()
self.logicalType.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('SchemaElement')
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 1)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
if self.type_length is not None:
oprot.writeFieldBegin('type_length', TType.I32, 2)
oprot.writeI32(self.type_length)
oprot.writeFieldEnd()
if self.repetition_type is not None:
oprot.writeFieldBegin('repetition_type', TType.I32, 3)
oprot.writeI32(self.repetition_type)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 4)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.num_children is not None:
oprot.writeFieldBegin('num_children', TType.I32, 5)
oprot.writeI32(self.num_children)
oprot.writeFieldEnd()
if self.converted_type is not None:
oprot.writeFieldBegin('converted_type', TType.I32, 6)
oprot.writeI32(self.converted_type)
oprot.writeFieldEnd()
if self.scale is not None:
oprot.writeFieldBegin('scale', TType.I32, 7)
oprot.writeI32(self.scale)
oprot.writeFieldEnd()
if self.precision is not None:
oprot.writeFieldBegin('precision', TType.I32, 8)
oprot.writeI32(self.precision)
oprot.writeFieldEnd()
if self.field_id is not None:
oprot.writeFieldBegin('field_id', TType.I32, 9)
oprot.writeI32(self.field_id)
oprot.writeFieldEnd()
if self.logicalType is not None:
oprot.writeFieldBegin('logicalType', TType.STRUCT, 10)
self.logicalType.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DataPageHeader(object):
"""
Data page header
Attributes:
- num_values: Number of values, including NULLs, in this data page. *
- encoding: Encoding used for this data page *
- definition_level_encoding: Encoding used for definition levels *
- repetition_level_encoding: Encoding used for repetition levels *
- statistics: Optional statistics for the data in this page*
"""
def __init__(self, num_values=None, encoding=None, definition_level_encoding=None, repetition_level_encoding=None, statistics=None,):
self.num_values = num_values
self.encoding = encoding
self.definition_level_encoding = definition_level_encoding
self.repetition_level_encoding = repetition_level_encoding
self.statistics = statistics
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.num_values = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.encoding = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.definition_level_encoding = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.repetition_level_encoding = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.statistics = Statistics()
self.statistics.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DataPageHeader')
if self.num_values is not None:
oprot.writeFieldBegin('num_values', TType.I32, 1)
oprot.writeI32(self.num_values)
oprot.writeFieldEnd()
if self.encoding is not None:
oprot.writeFieldBegin('encoding', TType.I32, 2)
oprot.writeI32(self.encoding)
oprot.writeFieldEnd()
if self.definition_level_encoding is not None:
oprot.writeFieldBegin('definition_level_encoding', TType.I32, 3)
oprot.writeI32(self.definition_level_encoding)
oprot.writeFieldEnd()
if self.repetition_level_encoding is not None:
oprot.writeFieldBegin('repetition_level_encoding', TType.I32, 4)
oprot.writeI32(self.repetition_level_encoding)
oprot.writeFieldEnd()
if self.statistics is not None:
oprot.writeFieldBegin('statistics', TType.STRUCT, 5)
self.statistics.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.num_values is None:
raise TProtocolException(message='Required field num_values is unset!')
if self.encoding is None:
raise TProtocolException(message='Required field encoding is unset!')
if self.definition_level_encoding is None:
raise TProtocolException(message='Required field definition_level_encoding is unset!')
if self.repetition_level_encoding is None:
raise TProtocolException(message='Required field repetition_level_encoding is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IndexPageHeader(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('IndexPageHeader')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DictionaryPageHeader(object):
"""
The dictionary page must be placed at the first position of the column chunk
if it is partly or completely dictionary encoded. At most one dictionary page
can be placed in a column chunk.
Attributes:
- num_values: Number of values in the dictionary *
- encoding: Encoding using this dictionary page *
- is_sorted: If true, the entries in the dictionary are sorted in ascending order *
"""
def __init__(self, num_values=None, encoding=None, is_sorted=None,):
self.num_values = num_values
self.encoding = encoding
self.is_sorted = is_sorted
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.num_values = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.encoding = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.is_sorted = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DictionaryPageHeader')
if self.num_values is not None:
oprot.writeFieldBegin('num_values', TType.I32, 1)
oprot.writeI32(self.num_values)
oprot.writeFieldEnd()
if self.encoding is not None:
oprot.writeFieldBegin('encoding', TType.I32, 2)
oprot.writeI32(self.encoding)
oprot.writeFieldEnd()
if self.is_sorted is not None:
oprot.writeFieldBegin('is_sorted', TType.BOOL, 3)
oprot.writeBool(self.is_sorted)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.num_values is None:
raise TProtocolException(message='Required field num_values is unset!')
if self.encoding is None:
raise TProtocolException(message='Required field encoding is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DataPageHeaderV2(object):
"""
New page format allowing reading levels without decompressing the data
Repetition and definition levels are uncompressed
The remaining section containing the data is compressed if is_compressed is true
Attributes:
- num_values: Number of values, including NULLs, in this data page. *
- num_nulls: Number of NULL values, in this data page.
Number of non-null = num_values - num_nulls which is also the number of values in the data section *
- num_rows: Number of rows in this data page. which means pages change on record boundaries (r = 0) *
- encoding: Encoding used for data in this page *
- definition_levels_byte_length: length of the definition levels
- repetition_levels_byte_length: length of the repetition levels
- is_compressed: whether the values are compressed.
Which means the section of the page between
definition_levels_byte_length + repetition_levels_byte_length + 1 and compressed_page_size (included)
is compressed with the compression_codec.
If missing it is considered compressed
- statistics: optional statistics for the data in this page *
"""
def __init__(self, num_values=None, num_nulls=None, num_rows=None, encoding=None, definition_levels_byte_length=None, repetition_levels_byte_length=None, is_compressed=True, statistics=None,):
self.num_values = num_values
self.num_nulls = num_nulls
self.num_rows = num_rows
self.encoding = encoding
self.definition_levels_byte_length = definition_levels_byte_length
self.repetition_levels_byte_length = repetition_levels_byte_length
self.is_compressed = is_compressed
self.statistics = statistics
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.num_values = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.num_nulls = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.num_rows = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.encoding = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.definition_levels_byte_length = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.repetition_levels_byte_length = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.is_compressed = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.statistics = Statistics()
self.statistics.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DataPageHeaderV2')
if self.num_values is not None:
oprot.writeFieldBegin('num_values', TType.I32, 1)
oprot.writeI32(self.num_values)
oprot.writeFieldEnd()
if self.num_nulls is not None:
oprot.writeFieldBegin('num_nulls', TType.I32, 2)
oprot.writeI32(self.num_nulls)
oprot.writeFieldEnd()
if self.num_rows is not None:
oprot.writeFieldBegin('num_rows', TType.I32, 3)
oprot.writeI32(self.num_rows)
oprot.writeFieldEnd()
if self.encoding is not None:
oprot.writeFieldBegin('encoding', TType.I32, 4)
oprot.writeI32(self.encoding)
oprot.writeFieldEnd()
if self.definition_levels_byte_length is not None:
oprot.writeFieldBegin('definition_levels_byte_length', TType.I32, 5)
oprot.writeI32(self.definition_levels_byte_length)
oprot.writeFieldEnd()
if self.repetition_levels_byte_length is not None:
oprot.writeFieldBegin('repetition_levels_byte_length', TType.I32, 6)
oprot.writeI32(self.repetition_levels_byte_length)
oprot.writeFieldEnd()
if self.is_compressed is not None:
oprot.writeFieldBegin('is_compressed', TType.BOOL, 7)
oprot.writeBool(self.is_compressed)
oprot.writeFieldEnd()
if self.statistics is not None:
oprot.writeFieldBegin('statistics', TType.STRUCT, 8)
self.statistics.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.num_values is None:
raise TProtocolException(message='Required field num_values is unset!')
if self.num_nulls is None:
raise TProtocolException(message='Required field num_nulls is unset!')
if self.num_rows is None:
raise TProtocolException(message='Required field num_rows is unset!')
if self.encoding is None:
raise TProtocolException(message='Required field encoding is unset!')
if self.definition_levels_byte_length is None:
raise TProtocolException(message='Required field definition_levels_byte_length is unset!')
if self.repetition_levels_byte_length is None:
raise TProtocolException(message='Required field repetition_levels_byte_length is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SplitBlockAlgorithm(object):
"""
Block-based algorithm type annotation. *
"""
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('SplitBlockAlgorithm')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BloomFilterAlgorithm(object):
"""
The algorithm used in Bloom filter. *
Attributes:
- BLOCK: Block-based Bloom filter. *
"""
def __init__(self, BLOCK=None,):
self.BLOCK = BLOCK
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.BLOCK = SplitBlockAlgorithm()
self.BLOCK.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('BloomFilterAlgorithm')
if self.BLOCK is not None:
oprot.writeFieldBegin('BLOCK', TType.STRUCT, 1)
self.BLOCK.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class XxHash(object):
"""
Hash strategy type annotation. xxHash is an extremely fast non-cryptographic hash
algorithm. It uses 64 bits version of xxHash.
"""
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('XxHash')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BloomFilterHash(object):
"""
The hash function used in Bloom filter. This function takes the hash of a column value
using plain encoding.
Attributes:
- XXHASH: xxHash Strategy. *
"""
def __init__(self, XXHASH=None,):
self.XXHASH = XXHASH
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.XXHASH = XxHash()
self.XXHASH.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('BloomFilterHash')
if self.XXHASH is not None:
oprot.writeFieldBegin('XXHASH', TType.STRUCT, 1)
self.XXHASH.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Uncompressed(object):
"""
The compression used in the Bloom filter.
"""
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Uncompressed')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BloomFilterCompression(object):
"""
Attributes:
- UNCOMPRESSED
"""
def __init__(self, UNCOMPRESSED=None,):
self.UNCOMPRESSED = UNCOMPRESSED
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.UNCOMPRESSED = Uncompressed()
self.UNCOMPRESSED.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('BloomFilterCompression')
if self.UNCOMPRESSED is not None:
oprot.writeFieldBegin('UNCOMPRESSED', TType.STRUCT, 1)
self.UNCOMPRESSED.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BloomFilterHeader(object):
"""
Bloom filter header is stored at beginning of Bloom filter data of each column
and followed by its bitset.
Attributes:
- numBytes: The size of bitset in bytes *
- algorithm: The algorithm for setting bits. *
- hash: The hash function used for Bloom filter. *
- compression: The compression used in the Bloom filter *
"""
def __init__(self, numBytes=None, algorithm=None, hash=None, compression=None,):
self.numBytes = numBytes
self.algorithm = algorithm
self.hash = hash
self.compression = compression
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.numBytes = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.algorithm = BloomFilterAlgorithm()
self.algorithm.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.hash = BloomFilterHash()
self.hash.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.compression = BloomFilterCompression()
self.compression.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('BloomFilterHeader')
if self.numBytes is not None:
oprot.writeFieldBegin('numBytes', TType.I32, 1)
oprot.writeI32(self.numBytes)
oprot.writeFieldEnd()
if self.algorithm is not None:
oprot.writeFieldBegin('algorithm', TType.STRUCT, 2)
self.algorithm.write(oprot)
oprot.writeFieldEnd()
if self.hash is not None:
oprot.writeFieldBegin('hash', TType.STRUCT, 3)
self.hash.write(oprot)
oprot.writeFieldEnd()
if self.compression is not None:
oprot.writeFieldBegin('compression', TType.STRUCT, 4)
self.compression.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.numBytes is None:
raise TProtocolException(message='Required field numBytes is unset!')
if self.algorithm is None:
raise TProtocolException(message='Required field algorithm is unset!')
if self.hash is None:
raise TProtocolException(message='Required field hash is unset!')
if self.compression is None:
raise TProtocolException(message='Required field compression is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PageHeader(object):
"""
Attributes:
- type: the type of the page: indicates which of the *_header fields is set *
- uncompressed_page_size: Uncompressed page size in bytes (not including this header) *
- compressed_page_size: Compressed (and potentially encrypted) page size in bytes, not including this header *
- crc: The 32bit CRC for the page, to be be calculated as follows:
- Using the standard CRC32 algorithm
- On the data only, i.e. this header should not be included. 'Data'
hereby refers to the concatenation of the repetition levels, the
definition levels and the column value, in this exact order.
- On the encoded versions of the repetition levels, definition levels and
column values
- On the compressed versions of the repetition levels, definition levels
and column values where possible;
- For v1 data pages, the repetition levels, definition levels and column
values are always compressed together. If a compression scheme is
specified, the CRC shall be calculated on the compressed version of
this concatenation. If no compression scheme is specified, the CRC
shall be calculated on the uncompressed version of this concatenation.
- For v2 data pages, the repetition levels and definition levels are
handled separately from the data and are never compressed (only
encoded). If a compression scheme is specified, the CRC shall be
calculated on the concatenation of the uncompressed repetition levels,
uncompressed definition levels and the compressed column values.
If no compression scheme is specified, the CRC shall be calculated on
the uncompressed concatenation.
- In encrypted columns, CRC is calculated after page encryption; the
encryption itself is performed after page compression (if compressed)
If enabled, this allows for disabling checksumming in HDFS if only a few
pages need to be read.
- data_page_header
- index_page_header
- dictionary_page_header
- data_page_header_v2
"""
def __init__(self, type=None, uncompressed_page_size=None, compressed_page_size=None, crc=None, data_page_header=None, index_page_header=None, dictionary_page_header=None, data_page_header_v2=None,):
self.type = type
self.uncompressed_page_size = uncompressed_page_size
self.compressed_page_size = compressed_page_size
self.crc = crc
self.data_page_header = data_page_header
self.index_page_header = index_page_header
self.dictionary_page_header = dictionary_page_header
self.data_page_header_v2 = data_page_header_v2
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.uncompressed_page_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.compressed_page_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.crc = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.data_page_header = DataPageHeader()
self.data_page_header.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.index_page_header = IndexPageHeader()
self.index_page_header.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.dictionary_page_header = DictionaryPageHeader()
self.dictionary_page_header.read(iprot)
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.data_page_header_v2 = DataPageHeaderV2()
self.data_page_header_v2.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('PageHeader')
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 1)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
if self.uncompressed_page_size is not None:
oprot.writeFieldBegin('uncompressed_page_size', TType.I32, 2)
oprot.writeI32(self.uncompressed_page_size)
oprot.writeFieldEnd()
if self.compressed_page_size is not None:
oprot.writeFieldBegin('compressed_page_size', TType.I32, 3)
oprot.writeI32(self.compressed_page_size)
oprot.writeFieldEnd()
if self.crc is not None:
oprot.writeFieldBegin('crc', TType.I32, 4)
oprot.writeI32(self.crc)
oprot.writeFieldEnd()
if self.data_page_header is not None:
oprot.writeFieldBegin('data_page_header', TType.STRUCT, 5)
self.data_page_header.write(oprot)
oprot.writeFieldEnd()
if self.index_page_header is not None:
oprot.writeFieldBegin('index_page_header', TType.STRUCT, 6)
self.index_page_header.write(oprot)
oprot.writeFieldEnd()
if self.dictionary_page_header is not None:
oprot.writeFieldBegin('dictionary_page_header', TType.STRUCT, 7)
self.dictionary_page_header.write(oprot)
oprot.writeFieldEnd()
if self.data_page_header_v2 is not None:
oprot.writeFieldBegin('data_page_header_v2', TType.STRUCT, 8)
self.data_page_header_v2.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.type is None:
raise TProtocolException(message='Required field type is unset!')
if self.uncompressed_page_size is None:
raise TProtocolException(message='Required field uncompressed_page_size is unset!')
if self.compressed_page_size is None:
raise TProtocolException(message='Required field compressed_page_size is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class KeyValue(object):
"""
Wrapper struct to store key values
Attributes:
- key
- value
"""
def __init__(self, key=None, value=None,):
self.key = key
self.value = value
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.value = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('KeyValue')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8') if sys.version_info[0] == 2 else self.key)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 2)
oprot.writeString(self.value.encode('utf-8') if sys.version_info[0] == 2 else self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocolException(message='Required field key is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SortingColumn(object):
"""
Wrapper struct to specify sort order
Attributes:
- column_idx: The column index (in this row group) *
- descending: If true, indicates this column is sorted in descending order. *
- nulls_first: If true, nulls will come before non-null values, otherwise,
nulls go at the end.
"""
def __init__(self, column_idx=None, descending=None, nulls_first=None,):
self.column_idx = column_idx
self.descending = descending
self.nulls_first = nulls_first
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.column_idx = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.descending = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.nulls_first = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('SortingColumn')
if self.column_idx is not None:
oprot.writeFieldBegin('column_idx', TType.I32, 1)
oprot.writeI32(self.column_idx)
oprot.writeFieldEnd()
if self.descending is not None:
oprot.writeFieldBegin('descending', TType.BOOL, 2)
oprot.writeBool(self.descending)
oprot.writeFieldEnd()
if self.nulls_first is not None:
oprot.writeFieldBegin('nulls_first', TType.BOOL, 3)
oprot.writeBool(self.nulls_first)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.column_idx is None:
raise TProtocolException(message='Required field column_idx is unset!')
if self.descending is None:
raise TProtocolException(message='Required field descending is unset!')
if self.nulls_first is None:
raise TProtocolException(message='Required field nulls_first is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PageEncodingStats(object):
"""
statistics of a given page type and encoding
Attributes:
- page_type: the page type (data/dic/...) *
- encoding: encoding of the page *
- count: number of pages of this type with this encoding *
"""
def __init__(self, page_type=None, encoding=None, count=None,):
self.page_type = page_type
self.encoding = encoding
self.count = count
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.page_type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.encoding = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.count = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('PageEncodingStats')
if self.page_type is not None:
oprot.writeFieldBegin('page_type', TType.I32, 1)
oprot.writeI32(self.page_type)
oprot.writeFieldEnd()
if self.encoding is not None:
oprot.writeFieldBegin('encoding', TType.I32, 2)
oprot.writeI32(self.encoding)
oprot.writeFieldEnd()
if self.count is not None:
oprot.writeFieldBegin('count', TType.I32, 3)
oprot.writeI32(self.count)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.page_type is None:
raise TProtocolException(message='Required field page_type is unset!')
if self.encoding is None:
raise TProtocolException(message='Required field encoding is unset!')
if self.count is None:
raise TProtocolException(message='Required field count is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnMetaData(object):
"""
Description for column metadata
Attributes:
- type: Type of this column *
- encodings: Set of all encodings used for this column. The purpose is to validate
whether we can decode those pages. *
- path_in_schema: Path in schema *
- codec: Compression codec *
- num_values: Number of values in this column *
- total_uncompressed_size: total byte size of all uncompressed pages in this column chunk (including the headers) *
- total_compressed_size: total byte size of all compressed, and potentially encrypted, pages
in this column chunk (including the headers) *
- key_value_metadata: Optional key/value metadata *
- data_page_offset: Byte offset from beginning of file to first data page *
- index_page_offset: Byte offset from beginning of file to root index page *
- dictionary_page_offset: Byte offset from the beginning of file to first (only) dictionary page *
- statistics: optional statistics for this column chunk
- encoding_stats: Set of all encodings used for pages in this column chunk.
This information can be used to determine if all data pages are
dictionary encoded for example *
- bloom_filter_offset: Byte offset from beginning of file to Bloom filter data. *
"""
def __init__(self, type=None, encodings=None, path_in_schema=None, codec=None, num_values=None, total_uncompressed_size=None, total_compressed_size=None, key_value_metadata=None, data_page_offset=None, index_page_offset=None, dictionary_page_offset=None, statistics=None, encoding_stats=None, bloom_filter_offset=None,):
self.type = type
self.encodings = encodings
self.path_in_schema = path_in_schema
self.codec = codec
self.num_values = num_values
self.total_uncompressed_size = total_uncompressed_size
self.total_compressed_size = total_compressed_size
self.key_value_metadata = key_value_metadata
self.data_page_offset = data_page_offset
self.index_page_offset = index_page_offset
self.dictionary_page_offset = dictionary_page_offset
self.statistics = statistics
self.encoding_stats = encoding_stats
self.bloom_filter_offset = bloom_filter_offset
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.encodings = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readI32()
self.encodings.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.path_in_schema = []
(_etype9, _size6) = iprot.readListBegin()
for _i10 in range(_size6):
_elem11 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.path_in_schema.append(_elem11)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.codec = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.num_values = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.total_uncompressed_size = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.total_compressed_size = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.key_value_metadata = []
(_etype15, _size12) = iprot.readListBegin()
for _i16 in range(_size12):
_elem17 = KeyValue()
_elem17.read(iprot)
self.key_value_metadata.append(_elem17)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I64:
self.data_page_offset = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I64:
self.index_page_offset = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.I64:
self.dictionary_page_offset = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.STRUCT:
self.statistics = Statistics()
self.statistics.read(iprot)
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.LIST:
self.encoding_stats = []
(_etype21, _size18) = iprot.readListBegin()
for _i22 in range(_size18):
_elem23 = PageEncodingStats()
_elem23.read(iprot)
self.encoding_stats.append(_elem23)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.I64:
self.bloom_filter_offset = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ColumnMetaData')
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 1)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
if self.encodings is not None:
oprot.writeFieldBegin('encodings', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.encodings))
for iter24 in self.encodings:
oprot.writeI32(iter24)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.path_in_schema is not None:
oprot.writeFieldBegin('path_in_schema', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.path_in_schema))
for iter25 in self.path_in_schema:
oprot.writeString(iter25.encode('utf-8') if sys.version_info[0] == 2 else iter25)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.codec is not None:
oprot.writeFieldBegin('codec', TType.I32, 4)
oprot.writeI32(self.codec)
oprot.writeFieldEnd()
if self.num_values is not None:
oprot.writeFieldBegin('num_values', TType.I64, 5)
oprot.writeI64(self.num_values)
oprot.writeFieldEnd()
if self.total_uncompressed_size is not None:
oprot.writeFieldBegin('total_uncompressed_size', TType.I64, 6)
oprot.writeI64(self.total_uncompressed_size)
oprot.writeFieldEnd()
if self.total_compressed_size is not None:
oprot.writeFieldBegin('total_compressed_size', TType.I64, 7)
oprot.writeI64(self.total_compressed_size)
oprot.writeFieldEnd()
if self.key_value_metadata is not None:
oprot.writeFieldBegin('key_value_metadata', TType.LIST, 8)
oprot.writeListBegin(TType.STRUCT, len(self.key_value_metadata))
for iter26 in self.key_value_metadata:
iter26.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.data_page_offset is not None:
oprot.writeFieldBegin('data_page_offset', TType.I64, 9)
oprot.writeI64(self.data_page_offset)
oprot.writeFieldEnd()
if self.index_page_offset is not None:
oprot.writeFieldBegin('index_page_offset', TType.I64, 10)
oprot.writeI64(self.index_page_offset)
oprot.writeFieldEnd()
if self.dictionary_page_offset is not None:
oprot.writeFieldBegin('dictionary_page_offset', TType.I64, 11)
oprot.writeI64(self.dictionary_page_offset)
oprot.writeFieldEnd()
if self.statistics is not None:
oprot.writeFieldBegin('statistics', TType.STRUCT, 12)
self.statistics.write(oprot)
oprot.writeFieldEnd()
if self.encoding_stats is not None:
oprot.writeFieldBegin('encoding_stats', TType.LIST, 13)
oprot.writeListBegin(TType.STRUCT, len(self.encoding_stats))
for iter27 in self.encoding_stats:
iter27.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.bloom_filter_offset is not None:
oprot.writeFieldBegin('bloom_filter_offset', TType.I64, 14)
oprot.writeI64(self.bloom_filter_offset)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.type is None:
raise TProtocolException(message='Required field type is unset!')
if self.encodings is None:
raise TProtocolException(message='Required field encodings is unset!')
if self.path_in_schema is None:
raise TProtocolException(message='Required field path_in_schema is unset!')
if self.codec is None:
raise TProtocolException(message='Required field codec is unset!')
if self.num_values is None:
raise TProtocolException(message='Required field num_values is unset!')
if self.total_uncompressed_size is None:
raise TProtocolException(message='Required field total_uncompressed_size is unset!')
if self.total_compressed_size is None:
raise TProtocolException(message='Required field total_compressed_size is unset!')
if self.data_page_offset is None:
raise TProtocolException(message='Required field data_page_offset is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EncryptionWithFooterKey(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('EncryptionWithFooterKey')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EncryptionWithColumnKey(object):
"""
Attributes:
- path_in_schema: Column path in schema *
- key_metadata: Retrieval metadata of column encryption key *
"""
def __init__(self, path_in_schema=None, key_metadata=None,):
self.path_in_schema = path_in_schema
self.key_metadata = key_metadata
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.path_in_schema = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in range(_size28):
_elem33 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.path_in_schema.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key_metadata = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('EncryptionWithColumnKey')
if self.path_in_schema is not None:
oprot.writeFieldBegin('path_in_schema', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.path_in_schema))
for iter34 in self.path_in_schema:
oprot.writeString(iter34.encode('utf-8') if sys.version_info[0] == 2 else iter34)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.key_metadata is not None:
oprot.writeFieldBegin('key_metadata', TType.STRING, 2)
oprot.writeBinary(self.key_metadata)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.path_in_schema is None:
raise TProtocolException(message='Required field path_in_schema is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnCryptoMetaData(object):
"""
Attributes:
- ENCRYPTION_WITH_FOOTER_KEY
- ENCRYPTION_WITH_COLUMN_KEY
"""
def __init__(self, ENCRYPTION_WITH_FOOTER_KEY=None, ENCRYPTION_WITH_COLUMN_KEY=None,):
self.ENCRYPTION_WITH_FOOTER_KEY = ENCRYPTION_WITH_FOOTER_KEY
self.ENCRYPTION_WITH_COLUMN_KEY = ENCRYPTION_WITH_COLUMN_KEY
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ENCRYPTION_WITH_FOOTER_KEY = EncryptionWithFooterKey()
self.ENCRYPTION_WITH_FOOTER_KEY.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ENCRYPTION_WITH_COLUMN_KEY = EncryptionWithColumnKey()
self.ENCRYPTION_WITH_COLUMN_KEY.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ColumnCryptoMetaData')
if self.ENCRYPTION_WITH_FOOTER_KEY is not None:
oprot.writeFieldBegin('ENCRYPTION_WITH_FOOTER_KEY', TType.STRUCT, 1)
self.ENCRYPTION_WITH_FOOTER_KEY.write(oprot)
oprot.writeFieldEnd()
if self.ENCRYPTION_WITH_COLUMN_KEY is not None:
oprot.writeFieldBegin('ENCRYPTION_WITH_COLUMN_KEY', TType.STRUCT, 2)
self.ENCRYPTION_WITH_COLUMN_KEY.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnChunk(object):
"""
Attributes:
- file_path: File where column data is stored. If not set, assumed to be same file as
metadata. This path is relative to the current file.
- file_offset: Byte offset in file_path to the ColumnMetaData *
- meta_data: Column metadata for this chunk. This is the same content as what is at
file_path/file_offset. Having it here has it replicated in the file
metadata.
- offset_index_offset: File offset of ColumnChunk's OffsetIndex *
- offset_index_length: Size of ColumnChunk's OffsetIndex, in bytes *
- column_index_offset: File offset of ColumnChunk's ColumnIndex *
- column_index_length: Size of ColumnChunk's ColumnIndex, in bytes *
- crypto_metadata: Crypto metadata of encrypted columns *
- encrypted_column_metadata: Encrypted column metadata for this chunk *
"""
def __init__(self, file_path=None, file_offset=None, meta_data=None, offset_index_offset=None, offset_index_length=None, column_index_offset=None, column_index_length=None, crypto_metadata=None, encrypted_column_metadata=None,):
self.file_path = file_path
self.file_offset = file_offset
self.meta_data = meta_data
self.offset_index_offset = offset_index_offset
self.offset_index_length = offset_index_length
self.column_index_offset = column_index_offset
self.column_index_length = column_index_length
self.crypto_metadata = crypto_metadata
self.encrypted_column_metadata = encrypted_column_metadata
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.file_path = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.file_offset = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.meta_data = ColumnMetaData()
self.meta_data.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.offset_index_offset = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.offset_index_length = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.column_index_offset = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.column_index_length = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.crypto_metadata = ColumnCryptoMetaData()
self.crypto_metadata.read(iprot)
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.encrypted_column_metadata = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ColumnChunk')
if self.file_path is not None:
oprot.writeFieldBegin('file_path', TType.STRING, 1)
oprot.writeString(self.file_path.encode('utf-8') if sys.version_info[0] == 2 else self.file_path)
oprot.writeFieldEnd()
if self.file_offset is not None:
oprot.writeFieldBegin('file_offset', TType.I64, 2)
oprot.writeI64(self.file_offset)
oprot.writeFieldEnd()
if self.meta_data is not None:
oprot.writeFieldBegin('meta_data', TType.STRUCT, 3)
self.meta_data.write(oprot)
oprot.writeFieldEnd()
if self.offset_index_offset is not None:
oprot.writeFieldBegin('offset_index_offset', TType.I64, 4)
oprot.writeI64(self.offset_index_offset)
oprot.writeFieldEnd()
if self.offset_index_length is not None:
oprot.writeFieldBegin('offset_index_length', TType.I32, 5)
oprot.writeI32(self.offset_index_length)
oprot.writeFieldEnd()
if self.column_index_offset is not None:
oprot.writeFieldBegin('column_index_offset', TType.I64, 6)
oprot.writeI64(self.column_index_offset)
oprot.writeFieldEnd()
if self.column_index_length is not None:
oprot.writeFieldBegin('column_index_length', TType.I32, 7)
oprot.writeI32(self.column_index_length)
oprot.writeFieldEnd()
if self.crypto_metadata is not None:
oprot.writeFieldBegin('crypto_metadata', TType.STRUCT, 8)
self.crypto_metadata.write(oprot)
oprot.writeFieldEnd()
if self.encrypted_column_metadata is not None:
oprot.writeFieldBegin('encrypted_column_metadata', TType.STRING, 9)
oprot.writeBinary(self.encrypted_column_metadata)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.file_offset is None:
raise TProtocolException(message='Required field file_offset is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RowGroup(object):
"""
Attributes:
- columns: Metadata for each column chunk in this row group.
This list must have the same order as the SchemaElement list in FileMetaData.
- total_byte_size: Total byte size of all the uncompressed column data in this row group *
- num_rows: Number of rows in this row group *
- sorting_columns: If set, specifies a sort ordering of the rows in this RowGroup.
The sorting columns can be a subset of all the columns.
- file_offset: Byte offset from beginning of file to first page (data or dictionary)
in this row group *
- total_compressed_size: Total byte size of all compressed (and potentially encrypted) column data
in this row group *
- ordinal: Row group ordinal in the file *
"""
def __init__(self, columns=None, total_byte_size=None, num_rows=None, sorting_columns=None, file_offset=None, total_compressed_size=None, ordinal=None,):
self.columns = columns
self.total_byte_size = total_byte_size
self.num_rows = num_rows
self.sorting_columns = sorting_columns
self.file_offset = file_offset
self.total_compressed_size = total_compressed_size
self.ordinal = ordinal
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.columns = []
(_etype38, _size35) = iprot.readListBegin()
for _i39 in range(_size35):
_elem40 = ColumnChunk()
_elem40.read(iprot)
self.columns.append(_elem40)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.total_byte_size = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.num_rows = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.sorting_columns = []
(_etype44, _size41) = iprot.readListBegin()
for _i45 in range(_size41):
_elem46 = SortingColumn()
_elem46.read(iprot)
self.sorting_columns.append(_elem46)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.file_offset = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.total_compressed_size = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I16:
self.ordinal = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('RowGroup')
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.columns))
for iter47 in self.columns:
iter47.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.total_byte_size is not None:
oprot.writeFieldBegin('total_byte_size', TType.I64, 2)
oprot.writeI64(self.total_byte_size)
oprot.writeFieldEnd()
if self.num_rows is not None:
oprot.writeFieldBegin('num_rows', TType.I64, 3)
oprot.writeI64(self.num_rows)
oprot.writeFieldEnd()
if self.sorting_columns is not None:
oprot.writeFieldBegin('sorting_columns', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.sorting_columns))
for iter48 in self.sorting_columns:
iter48.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.file_offset is not None:
oprot.writeFieldBegin('file_offset', TType.I64, 5)
oprot.writeI64(self.file_offset)
oprot.writeFieldEnd()
if self.total_compressed_size is not None:
oprot.writeFieldBegin('total_compressed_size', TType.I64, 6)
oprot.writeI64(self.total_compressed_size)
oprot.writeFieldEnd()
if self.ordinal is not None:
oprot.writeFieldBegin('ordinal', TType.I16, 7)
oprot.writeI16(self.ordinal)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.columns is None:
raise TProtocolException(message='Required field columns is unset!')
if self.total_byte_size is None:
raise TProtocolException(message='Required field total_byte_size is unset!')
if self.num_rows is None:
raise TProtocolException(message='Required field num_rows is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TypeDefinedOrder(object):
"""
Empty struct to signal the order defined by the physical or logical type
"""
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TypeDefinedOrder')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnOrder(object):
"""
Union to specify the order used for the min_value and max_value fields for a
column. This union takes the role of an enhanced enum that allows rich
elements (which will be needed for a collation-based ordering in the future).
Possible values are:
* TypeDefinedOrder - the column uses the order defined by its logical or
physical type (if there is no logical type).
If the reader does not support the value of this union, min and max stats
for this column should be ignored.
Attributes:
- TYPE_ORDER: The sort orders for logical types are:
UTF8 - unsigned byte-wise comparison
INT8 - signed comparison
INT16 - signed comparison
INT32 - signed comparison
INT64 - signed comparison
UINT8 - unsigned comparison
UINT16 - unsigned comparison
UINT32 - unsigned comparison
UINT64 - unsigned comparison
DECIMAL - signed comparison of the represented value
DATE - signed comparison
TIME_MILLIS - signed comparison
TIME_MICROS - signed comparison
TIMESTAMP_MILLIS - signed comparison
TIMESTAMP_MICROS - signed comparison
INTERVAL - unsigned comparison
JSON - unsigned byte-wise comparison
BSON - unsigned byte-wise comparison
ENUM - unsigned byte-wise comparison
LIST - undefined
MAP - undefined
In the absence of logical types, the sort order is determined by the physical type:
BOOLEAN - false, true
INT32 - signed comparison
INT64 - signed comparison
INT96 (only used for legacy timestamps) - undefined
FLOAT - signed comparison of the represented value (*)
DOUBLE - signed comparison of the represented value (*)
BYTE_ARRAY - unsigned byte-wise comparison
FIXED_LEN_BYTE_ARRAY - unsigned byte-wise comparison
(*) Because the sorting order is not specified properly for floating
point values (relations vs. total ordering) the following
compatibility rules should be applied when reading statistics:
- If the min is a NaN, it should be ignored.
- If the max is a NaN, it should be ignored.
- If the min is +0, the row group may contain -0 values as well.
- If the max is -0, the row group may contain +0 values as well.
- When looking for NaN values, min and max should be ignored.
"""
def __init__(self, TYPE_ORDER=None,):
self.TYPE_ORDER = TYPE_ORDER
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.TYPE_ORDER = TypeDefinedOrder()
self.TYPE_ORDER.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ColumnOrder')
if self.TYPE_ORDER is not None:
oprot.writeFieldBegin('TYPE_ORDER', TType.STRUCT, 1)
self.TYPE_ORDER.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PageLocation(object):
"""
Attributes:
- offset: Offset of the page in the file *
- compressed_page_size: Size of the page, including header. Sum of compressed_page_size and header
length
- first_row_index: Index within the RowGroup of the first row of the page; this means pages
change on record boundaries (r = 0).
"""
def __init__(self, offset=None, compressed_page_size=None, first_row_index=None,):
self.offset = offset
self.compressed_page_size = compressed_page_size
self.first_row_index = first_row_index
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.offset = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.compressed_page_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.first_row_index = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('PageLocation')
if self.offset is not None:
oprot.writeFieldBegin('offset', TType.I64, 1)
oprot.writeI64(self.offset)
oprot.writeFieldEnd()
if self.compressed_page_size is not None:
oprot.writeFieldBegin('compressed_page_size', TType.I32, 2)
oprot.writeI32(self.compressed_page_size)
oprot.writeFieldEnd()
if self.first_row_index is not None:
oprot.writeFieldBegin('first_row_index', TType.I64, 3)
oprot.writeI64(self.first_row_index)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.offset is None:
raise TProtocolException(message='Required field offset is unset!')
if self.compressed_page_size is None:
raise TProtocolException(message='Required field compressed_page_size is unset!')
if self.first_row_index is None:
raise TProtocolException(message='Required field first_row_index is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class OffsetIndex(object):
"""
Attributes:
- page_locations: PageLocations, ordered by increasing PageLocation.offset. It is required
that page_locations[i].first_row_index < page_locations[i+1].first_row_index.
"""
def __init__(self, page_locations=None,):
self.page_locations = page_locations
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.page_locations = []
(_etype52, _size49) = iprot.readListBegin()
for _i53 in range(_size49):
_elem54 = PageLocation()
_elem54.read(iprot)
self.page_locations.append(_elem54)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('OffsetIndex')
if self.page_locations is not None:
oprot.writeFieldBegin('page_locations', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.page_locations))
for iter55 in self.page_locations:
iter55.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.page_locations is None:
raise TProtocolException(message='Required field page_locations is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnIndex(object):
"""
Description for ColumnIndex.
Each <array-field>[i] refers to the page at OffsetIndex.page_locations[i]
Attributes:
- null_pages: A list of Boolean values to determine the validity of the corresponding
min and max values. If true, a page contains only null values, and writers
have to set the corresponding entries in min_values and max_values to
byte[0], so that all lists have the same length. If false, the
corresponding entries in min_values and max_values must be valid.
- min_values: Two lists containing lower and upper bounds for the values of each page
determined by the ColumnOrder of the column. These may be the actual
minimum and maximum values found on a page, but can also be (more compact)
values that do not exist on a page. For example, instead of storing ""<NAME>", a writer may set min_values[i]="B", max_values[i]="C".
Such more compact values must still be valid values within the column's
logical type. Readers must make sure that list entries are populated before
using them by inspecting null_pages.
- max_values
- boundary_order: Stores whether both min_values and max_values are orderd and if so, in
which direction. This allows readers to perform binary searches in both
lists. Readers cannot assume that max_values[i] <= min_values[i+1], even
if the lists are ordered.
- null_counts: A list containing the number of null values for each page *
"""
def __init__(self, null_pages=None, min_values=None, max_values=None, boundary_order=None, null_counts=None,):
self.null_pages = null_pages
self.min_values = min_values
self.max_values = max_values
self.boundary_order = boundary_order
self.null_counts = null_counts
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.null_pages = []
(_etype59, _size56) = iprot.readListBegin()
for _i60 in range(_size56):
_elem61 = iprot.readBool()
self.null_pages.append(_elem61)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.min_values = []
(_etype65, _size62) = iprot.readListBegin()
for _i66 in range(_size62):
_elem67 = iprot.readBinary()
self.min_values.append(_elem67)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.max_values = []
(_etype71, _size68) = iprot.readListBegin()
for _i72 in range(_size68):
_elem73 = iprot.readBinary()
self.max_values.append(_elem73)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.boundary_order = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.null_counts = []
(_etype77, _size74) = iprot.readListBegin()
for _i78 in range(_size74):
_elem79 = iprot.readI64()
self.null_counts.append(_elem79)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ColumnIndex')
if self.null_pages is not None:
oprot.writeFieldBegin('null_pages', TType.LIST, 1)
oprot.writeListBegin(TType.BOOL, len(self.null_pages))
for iter80 in self.null_pages:
oprot.writeBool(iter80)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.min_values is not None:
oprot.writeFieldBegin('min_values', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.min_values))
for iter81 in self.min_values:
oprot.writeBinary(iter81)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.max_values is not None:
oprot.writeFieldBegin('max_values', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.max_values))
for iter82 in self.max_values:
oprot.writeBinary(iter82)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.boundary_order is not None:
oprot.writeFieldBegin('boundary_order', TType.I32, 4)
oprot.writeI32(self.boundary_order)
oprot.writeFieldEnd()
if self.null_counts is not None:
oprot.writeFieldBegin('null_counts', TType.LIST, 5)
oprot.writeListBegin(TType.I64, len(self.null_counts))
for iter83 in self.null_counts:
oprot.writeI64(iter83)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.null_pages is None:
raise TProtocolException(message='Required field null_pages is unset!')
if self.min_values is None:
raise TProtocolException(message='Required field min_values is unset!')
if self.max_values is None:
raise TProtocolException(message='Required field max_values is unset!')
if self.boundary_order is None:
raise TProtocolException(message='Required field boundary_order is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AesGcmV1(object):
"""
Attributes:
- aad_prefix: AAD prefix *
- aad_file_unique: Unique file identifier part of AAD suffix *
- supply_aad_prefix: In files encrypted with AAD prefix without storing it,
readers must supply the prefix *
"""
def __init__(self, aad_prefix=None, aad_file_unique=None, supply_aad_prefix=None,):
self.aad_prefix = aad_prefix
self.aad_file_unique = aad_file_unique
self.supply_aad_prefix = supply_aad_prefix
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.aad_prefix = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.aad_file_unique = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.supply_aad_prefix = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AesGcmV1')
if self.aad_prefix is not None:
oprot.writeFieldBegin('aad_prefix', TType.STRING, 1)
oprot.writeBinary(self.aad_prefix)
oprot.writeFieldEnd()
if self.aad_file_unique is not None:
oprot.writeFieldBegin('aad_file_unique', TType.STRING, 2)
oprot.writeBinary(self.aad_file_unique)
oprot.writeFieldEnd()
if self.supply_aad_prefix is not None:
oprot.writeFieldBegin('supply_aad_prefix', TType.BOOL, 3)
oprot.writeBool(self.supply_aad_prefix)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AesGcmCtrV1(object):
"""
Attributes:
- aad_prefix: AAD prefix *
- aad_file_unique: Unique file identifier part of AAD suffix *
- supply_aad_prefix: In files encrypted with AAD prefix without storing it,
readers must supply the prefix *
"""
def __init__(self, aad_prefix=None, aad_file_unique=None, supply_aad_prefix=None,):
self.aad_prefix = aad_prefix
self.aad_file_unique = aad_file_unique
self.supply_aad_prefix = supply_aad_prefix
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.aad_prefix = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.aad_file_unique = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.supply_aad_prefix = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AesGcmCtrV1')
if self.aad_prefix is not None:
oprot.writeFieldBegin('aad_prefix', TType.STRING, 1)
oprot.writeBinary(self.aad_prefix)
oprot.writeFieldEnd()
if self.aad_file_unique is not None:
oprot.writeFieldBegin('aad_file_unique', TType.STRING, 2)
oprot.writeBinary(self.aad_file_unique)
oprot.writeFieldEnd()
if self.supply_aad_prefix is not None:
oprot.writeFieldBegin('supply_aad_prefix', TType.BOOL, 3)
oprot.writeBool(self.supply_aad_prefix)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EncryptionAlgorithm(object):
"""
Attributes:
- AES_GCM_V1
- AES_GCM_CTR_V1
"""
def __init__(self, AES_GCM_V1=None, AES_GCM_CTR_V1=None,):
self.AES_GCM_V1 = AES_GCM_V1
self.AES_GCM_CTR_V1 = AES_GCM_CTR_V1
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.AES_GCM_V1 = AesGcmV1()
self.AES_GCM_V1.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.AES_GCM_CTR_V1 = AesGcmCtrV1()
self.AES_GCM_CTR_V1.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('EncryptionAlgorithm')
if self.AES_GCM_V1 is not None:
oprot.writeFieldBegin('AES_GCM_V1', TType.STRUCT, 1)
self.AES_GCM_V1.write(oprot)
oprot.writeFieldEnd()
if self.AES_GCM_CTR_V1 is not None:
oprot.writeFieldBegin('AES_GCM_CTR_V1', TType.STRUCT, 2)
self.AES_GCM_CTR_V1.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class FileMetaData(object):
"""
Description for file metadata
Attributes:
- version: Version of this file *
- schema: Parquet schema for this file. This schema contains metadata for all the columns.
The schema is represented as a tree with a single root. The nodes of the tree
are flattened to a list by doing a depth-first traversal.
The column metadata contains the path in the schema for that column which can be
used to map columns to nodes in the schema.
The first element is the root *
- num_rows: Number of rows in this file *
- row_groups: Row groups in this file *
- key_value_metadata: Optional key/value metadata *
- created_by: String for application that wrote this file. This should be in the format
<Application> version <App Version> (build <App Build Hash>).
e.g. impala version 1.0 (build 6cf94d29b2b7115df4de2c06e2ab4326d721eb55)
- column_orders: Sort order used for the min_value and max_value fields in the Statistics
objects and the min_values and max_values fields in the ColumnIndex
objects of each column in this file. Sort orders are listed in the order
matching the columns in the schema. The indexes are not necessary the same
though, because only leaf nodes of the schema are represented in the list
of sort orders.
Without column_orders, the meaning of the min_value and max_value fields
in the Statistics object and the ColumnIndex object is undefined. To ensure
well-defined behaviour, if these fields are written to a Parquet file,
column_orders must be written as well.
The obsolete min and max fields in the Statistics object are always sorted
by signed comparison regardless of column_orders.
- encryption_algorithm: Encryption algorithm. This field is set only in encrypted files
with plaintext footer. Files with encrypted footer store algorithm id
in FileCryptoMetaData structure.
- footer_signing_key_metadata: Retrieval metadata of key used for signing the footer.
Used only in encrypted files with plaintext footer.
"""
def __init__(self, version=None, schema=None, num_rows=None, row_groups=None, key_value_metadata=None, created_by=None, column_orders=None, encryption_algorithm=None, footer_signing_key_metadata=None,):
self.version = version
self.schema = schema
self.num_rows = num_rows
self.row_groups = row_groups
self.key_value_metadata = key_value_metadata
self.created_by = created_by
self.column_orders = column_orders
self.encryption_algorithm = encryption_algorithm
self.footer_signing_key_metadata = footer_signing_key_metadata
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.schema = []
(_etype87, _size84) = iprot.readListBegin()
for _i88 in range(_size84):
_elem89 = SchemaElement()
_elem89.read(iprot)
self.schema.append(_elem89)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.num_rows = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.row_groups = []
(_etype93, _size90) = iprot.readListBegin()
for _i94 in range(_size90):
_elem95 = RowGroup()
_elem95.read(iprot)
self.row_groups.append(_elem95)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.key_value_metadata = []
(_etype99, _size96) = iprot.readListBegin()
for _i100 in range(_size96):
_elem101 = KeyValue()
_elem101.read(iprot)
self.key_value_metadata.append(_elem101)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.created_by = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.column_orders = []
(_etype105, _size102) = iprot.readListBegin()
for _i106 in range(_size102):
_elem107 = ColumnOrder()
_elem107.read(iprot)
self.column_orders.append(_elem107)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.encryption_algorithm = EncryptionAlgorithm()
self.encryption_algorithm.read(iprot)
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.footer_signing_key_metadata = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('FileMetaData')
if self.version is not None:
oprot.writeFieldBegin('version', TType.I32, 1)
oprot.writeI32(self.version)
oprot.writeFieldEnd()
if self.schema is not None:
oprot.writeFieldBegin('schema', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.schema))
for iter108 in self.schema:
iter108.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.num_rows is not None:
oprot.writeFieldBegin('num_rows', TType.I64, 3)
oprot.writeI64(self.num_rows)
oprot.writeFieldEnd()
if self.row_groups is not None:
oprot.writeFieldBegin('row_groups', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.row_groups))
for iter109 in self.row_groups:
iter109.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.key_value_metadata is not None:
oprot.writeFieldBegin('key_value_metadata', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.key_value_metadata))
for iter110 in self.key_value_metadata:
iter110.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.created_by is not None:
oprot.writeFieldBegin('created_by', TType.STRING, 6)
oprot.writeString(self.created_by.encode('utf-8') if sys.version_info[0] == 2 else self.created_by)
oprot.writeFieldEnd()
if self.column_orders is not None:
oprot.writeFieldBegin('column_orders', TType.LIST, 7)
oprot.writeListBegin(TType.STRUCT, len(self.column_orders))
for iter111 in self.column_orders:
iter111.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.encryption_algorithm is not None:
oprot.writeFieldBegin('encryption_algorithm', TType.STRUCT, 8)
self.encryption_algorithm.write(oprot)
oprot.writeFieldEnd()
if self.footer_signing_key_metadata is not None:
oprot.writeFieldBegin('footer_signing_key_metadata', TType.STRING, 9)
oprot.writeBinary(self.footer_signing_key_metadata)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.version is None:
raise TProtocolException(message='Required field version is unset!')
if self.schema is None:
raise TProtocolException(message='Required field schema is unset!')
if self.num_rows is None:
raise TProtocolException(message='Required field num_rows is unset!')
if self.row_groups is None:
raise TProtocolException(message='Required field row_groups is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class FileCryptoMetaData(object):
"""
Crypto metadata for files with encrypted footer *
Attributes:
- encryption_algorithm: Encryption algorithm. This field is only used for files
with encrypted footer. Files with plaintext footer store algorithm id
inside footer (FileMetaData structure).
- key_metadata: Retrieval metadata of key used for encryption of footer,
and (possibly) columns *
"""
def __init__(self, encryption_algorithm=None, key_metadata=None,):
self.encryption_algorithm = encryption_algorithm
self.key_metadata = key_metadata
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.encryption_algorithm = EncryptionAlgorithm()
self.encryption_algorithm.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key_metadata = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('FileCryptoMetaData')
if self.encryption_algorithm is not None:
oprot.writeFieldBegin('encryption_algorithm', TType.STRUCT, 1)
self.encryption_algorithm.write(oprot)
oprot.writeFieldEnd()
if self.key_metadata is not None:
oprot.writeFieldBegin('key_metadata', TType.STRING, 2)
oprot.writeBinary(self.key_metadata)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.encryption_algorithm is None:
raise TProtocolException(message='Required field encryption_algorithm is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(Statistics)
Statistics.thrift_spec = (
None, # 0
(1, TType.STRING, 'max', 'BINARY', None, ), # 1
(2, TType.STRING, 'min', 'BINARY', None, ), # 2
(3, TType.I64, 'null_count', None, None, ), # 3
(4, TType.I64, 'distinct_count', None, None, ), # 4
(5, TType.STRING, 'max_value', 'BINARY', None, ), # 5
(6, TType.STRING, 'min_value', 'BINARY', None, ), # 6
)
all_structs.append(StringType)
StringType.thrift_spec = (
)
all_structs.append(UUIDType)
UUIDType.thrift_spec = (
)
all_structs.append(MapType)
MapType.thrift_spec = (
)
all_structs.append(ListType)
ListType.thrift_spec = (
)
all_structs.append(EnumType)
EnumType.thrift_spec = (
)
all_structs.append(DateType)
DateType.thrift_spec = (
)
all_structs.append(NullType)
NullType.thrift_spec = (
)
all_structs.append(DecimalType)
DecimalType.thrift_spec = (
None, # 0
(1, TType.I32, 'scale', None, None, ), # 1
(2, TType.I32, 'precision', None, None, ), # 2
)
all_structs.append(MilliSeconds)
MilliSeconds.thrift_spec = (
)
all_structs.append(MicroSeconds)
MicroSeconds.thrift_spec = (
)
all_structs.append(NanoSeconds)
NanoSeconds.thrift_spec = (
)
all_structs.append(TimeUnit)
TimeUnit.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'MILLIS', [MilliSeconds, None], None, ), # 1
(2, TType.STRUCT, 'MICROS', [MicroSeconds, None], None, ), # 2
(3, TType.STRUCT, 'NANOS', [NanoSeconds, None], None, ), # 3
)
all_structs.append(TimestampType)
TimestampType.thrift_spec = (
None, # 0
(1, TType.BOOL, 'isAdjustedToUTC', None, None, ), # 1
(2, TType.STRUCT, 'unit', [TimeUnit, None], None, ), # 2
)
all_structs.append(TimeType)
TimeType.thrift_spec = (
None, # 0
(1, TType.BOOL, 'isAdjustedToUTC', None, None, ), # 1
(2, TType.STRUCT, 'unit', [TimeUnit, None], None, ), # 2
)
all_structs.append(IntType)
IntType.thrift_spec = (
None, # 0
(1, TType.BYTE, 'bitWidth', None, None, ), # 1
(2, TType.BOOL, 'isSigned', None, None, ), # 2
)
all_structs.append(JsonType)
JsonType.thrift_spec = (
)
all_structs.append(BsonType)
BsonType.thrift_spec = (
)
all_structs.append(LogicalType)
LogicalType.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'STRING', [StringType, None], None, ), # 1
(2, TType.STRUCT, 'MAP', [MapType, None], None, ), # 2
(3, TType.STRUCT, 'LIST', [ListType, None], None, ), # 3
(4, TType.STRUCT, 'ENUM', [EnumType, None], None, ), # 4
(5, TType.STRUCT, 'DECIMAL', [DecimalType, None], None, ), # 5
(6, TType.STRUCT, 'DATE', [DateType, None], None, ), # 6
(7, TType.STRUCT, 'TIME', [TimeType, None], None, ), # 7
(8, TType.STRUCT, 'TIMESTAMP', [TimestampType, None], None, ), # 8
None, # 9
(10, TType.STRUCT, 'INTEGER', [IntType, None], None, ), # 10
(11, TType.STRUCT, 'UNKNOWN', [NullType, None], None, ), # 11
(12, TType.STRUCT, 'JSON', [JsonType, None], None, ), # 12
(13, TType.STRUCT, 'BSON', [BsonType, None], None, ), # 13
(14, TType.STRUCT, 'UUID', [UUIDType, None], None, ), # 14
)
all_structs.append(SchemaElement)
SchemaElement.thrift_spec = (
None, # 0
(1, TType.I32, 'type', None, None, ), # 1
(2, TType.I32, 'type_length', None, None, ), # 2
(3, TType.I32, 'repetition_type', None, None, ), # 3
(4, TType.STRING, 'name', 'UTF8', None, ), # 4
(5, TType.I32, 'num_children', None, None, ), # 5
(6, TType.I32, 'converted_type', None, None, ), # 6
(7, TType.I32, 'scale', None, None, ), # 7
(8, TType.I32, 'precision', None, None, ), # 8
(9, TType.I32, 'field_id', None, None, ), # 9
(10, TType.STRUCT, 'logicalType', [LogicalType, None], None, ), # 10
)
all_structs.append(DataPageHeader)
DataPageHeader.thrift_spec = (
None, # 0
(1, TType.I32, 'num_values', None, None, ), # 1
(2, TType.I32, 'encoding', None, None, ), # 2
(3, TType.I32, 'definition_level_encoding', None, None, ), # 3
(4, TType.I32, 'repetition_level_encoding', None, None, ), # 4
(5, TType.STRUCT, 'statistics', [Statistics, None], None, ), # 5
)
all_structs.append(IndexPageHeader)
IndexPageHeader.thrift_spec = (
)
all_structs.append(DictionaryPageHeader)
DictionaryPageHeader.thrift_spec = (
None, # 0
(1, TType.I32, 'num_values', None, None, ), # 1
(2, TType.I32, 'encoding', None, None, ), # 2
(3, TType.BOOL, 'is_sorted', None, None, ), # 3
)
all_structs.append(DataPageHeaderV2)
DataPageHeaderV2.thrift_spec = (
None, # 0
(1, TType.I32, 'num_values', None, None, ), # 1
(2, TType.I32, 'num_nulls', None, None, ), # 2
(3, TType.I32, 'num_rows', None, None, ), # 3
(4, TType.I32, 'encoding', None, None, ), # 4
(5, TType.I32, 'definition_levels_byte_length', None, None, ), # 5
(6, TType.I32, 'repetition_levels_byte_length', None, None, ), # 6
(7, TType.BOOL, 'is_compressed', None, True, ), # 7
(8, TType.STRUCT, 'statistics', [Statistics, None], None, ), # 8
)
all_structs.append(SplitBlockAlgorithm)
SplitBlockAlgorithm.thrift_spec = (
)
all_structs.append(BloomFilterAlgorithm)
BloomFilterAlgorithm.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'BLOCK', [SplitBlockAlgorithm, None], None, ), # 1
)
all_structs.append(XxHash)
XxHash.thrift_spec = (
)
all_structs.append(BloomFilterHash)
BloomFilterHash.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'XXHASH', [XxHash, None], None, ), # 1
)
all_structs.append(Uncompressed)
Uncompressed.thrift_spec = (
)
all_structs.append(BloomFilterCompression)
BloomFilterCompression.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'UNCOMPRESSED', [Uncompressed, None], None, ), # 1
)
all_structs.append(BloomFilterHeader)
BloomFilterHeader.thrift_spec = (
None, # 0
(1, TType.I32, 'numBytes', None, None, ), # 1
(2, TType.STRUCT, 'algorithm', [BloomFilterAlgorithm, None], None, ), # 2
(3, TType.STRUCT, 'hash', [BloomFilterHash, None], None, ), # 3
(4, TType.STRUCT, 'compression', [BloomFilterCompression, None], None, ), # 4
)
all_structs.append(PageHeader)
PageHeader.thrift_spec = (
None, # 0
(1, TType.I32, 'type', None, None, ), # 1
(2, TType.I32, 'uncompressed_page_size', None, None, ), # 2
(3, TType.I32, 'compressed_page_size', None, None, ), # 3
(4, TType.I32, 'crc', None, None, ), # 4
(5, TType.STRUCT, 'data_page_header', [DataPageHeader, None], None, ), # 5
(6, TType.STRUCT, 'index_page_header', [IndexPageHeader, None], None, ), # 6
(7, TType.STRUCT, 'dictionary_page_header', [DictionaryPageHeader, None], None, ), # 7
(8, TType.STRUCT, 'data_page_header_v2', [DataPageHeaderV2, None], None, ), # 8
)
all_structs.append(KeyValue)
KeyValue.thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'UTF8', None, ), # 1
(2, TType.STRING, 'value', 'UTF8', None, ), # 2
)
all_structs.append(SortingColumn)
SortingColumn.thrift_spec = (
None, # 0
(1, TType.I32, 'column_idx', None, None, ), # 1
(2, TType.BOOL, 'descending', None, None, ), # 2
(3, TType.BOOL, 'nulls_first', None, None, ), # 3
)
all_structs.append(PageEncodingStats)
PageEncodingStats.thrift_spec = (
None, # 0
(1, TType.I32, 'page_type', None, None, ), # 1
(2, TType.I32, 'encoding', None, None, ), # 2
(3, TType.I32, 'count', None, None, ), # 3
)
all_structs.append(ColumnMetaData)
ColumnMetaData.thrift_spec = (
None, # 0
(1, TType.I32, 'type', None, None, ), # 1
(2, TType.LIST, 'encodings', (TType.I32, None, False), None, ), # 2
(3, TType.LIST, 'path_in_schema', (TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.I32, 'codec', None, None, ), # 4
(5, TType.I64, 'num_values', None, None, ), # 5
(6, TType.I64, 'total_uncompressed_size', None, None, ), # 6
(7, TType.I64, 'total_compressed_size', None, None, ), # 7
(8, TType.LIST, 'key_value_metadata', (TType.STRUCT, [KeyValue, None], False), None, ), # 8
(9, TType.I64, 'data_page_offset', None, None, ), # 9
(10, TType.I64, 'index_page_offset', None, None, ), # 10
(11, TType.I64, 'dictionary_page_offset', None, None, ), # 11
(12, TType.STRUCT, 'statistics', [Statistics, None], None, ), # 12
(13, TType.LIST, 'encoding_stats', (TType.STRUCT, [PageEncodingStats, None], False), None, ), # 13
(14, TType.I64, 'bloom_filter_offset', None, None, ), # 14
)
all_structs.append(EncryptionWithFooterKey)
EncryptionWithFooterKey.thrift_spec = (
)
all_structs.append(EncryptionWithColumnKey)
EncryptionWithColumnKey.thrift_spec = (
None, # 0
(1, TType.LIST, 'path_in_schema', (TType.STRING, 'UTF8', False), None, ), # 1
(2, TType.STRING, 'key_metadata', 'BINARY', None, ), # 2
)
all_structs.append(ColumnCryptoMetaData)
ColumnCryptoMetaData.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ENCRYPTION_WITH_FOOTER_KEY', [EncryptionWithFooterKey, None], None, ), # 1
(2, TType.STRUCT, 'ENCRYPTION_WITH_COLUMN_KEY', [EncryptionWithColumnKey, None], None, ), # 2
)
all_structs.append(ColumnChunk)
ColumnChunk.thrift_spec = (
None, # 0
(1, TType.STRING, 'file_path', 'UTF8', None, ), # 1
(2, TType.I64, 'file_offset', None, None, ), # 2
(3, TType.STRUCT, 'meta_data', [ColumnMetaData, None], None, ), # 3
(4, TType.I64, 'offset_index_offset', None, None, ), # 4
(5, TType.I32, 'offset_index_length', None, None, ), # 5
(6, TType.I64, 'column_index_offset', None, None, ), # 6
(7, TType.I32, 'column_index_length', None, None, ), # 7
(8, TType.STRUCT, 'crypto_metadata', [ColumnCryptoMetaData, None], None, ), # 8
(9, TType.STRING, 'encrypted_column_metadata', 'BINARY', None, ), # 9
)
all_structs.append(RowGroup)
RowGroup.thrift_spec = (
None, # 0
(1, TType.LIST, 'columns', (TType.STRUCT, [ColumnChunk, None], False), None, ), # 1
(2, TType.I64, 'total_byte_size', None, None, ), # 2
(3, TType.I64, 'num_rows', None, None, ), # 3
(4, TType.LIST, 'sorting_columns', (TType.STRUCT, [SortingColumn, None], False), None, ), # 4
(5, TType.I64, 'file_offset', None, None, ), # 5
(6, TType.I64, 'total_compressed_size', None, None, ), # 6
(7, TType.I16, 'ordinal', None, None, ), # 7
)
all_structs.append(TypeDefinedOrder)
TypeDefinedOrder.thrift_spec = (
)
all_structs.append(ColumnOrder)
ColumnOrder.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'TYPE_ORDER', [TypeDefinedOrder, None], None, ), # 1
)
all_structs.append(PageLocation)
PageLocation.thrift_spec = (
None, # 0
(1, TType.I64, 'offset', None, None, ), # 1
(2, TType.I32, 'compressed_page_size', None, None, ), # 2
(3, TType.I64, 'first_row_index', None, None, ), # 3
)
all_structs.append(OffsetIndex)
OffsetIndex.thrift_spec = (
None, # 0
(1, TType.LIST, 'page_locations', (TType.STRUCT, [PageLocation, None], False), None, ), # 1
)
all_structs.append(ColumnIndex)
ColumnIndex.thrift_spec = (
None, # 0
(1, TType.LIST, 'null_pages', (TType.BOOL, None, False), None, ), # 1
(2, TType.LIST, 'min_values', (TType.STRING, 'BINARY', False), None, ), # 2
(3, TType.LIST, 'max_values', (TType.STRING, 'BINARY', False), None, ), # 3
(4, TType.I32, 'boundary_order', None, None, ), # 4
(5, TType.LIST, 'null_counts', (TType.I64, None, False), None, ), # 5
)
all_structs.append(AesGcmV1)
AesGcmV1.thrift_spec = (
None, # 0
(1, TType.STRING, 'aad_prefix', 'BINARY', None, ), # 1
(2, TType.STRING, 'aad_file_unique', 'BINARY', None, ), # 2
(3, TType.BOOL, 'supply_aad_prefix', None, None, ), # 3
)
all_structs.append(AesGcmCtrV1)
AesGcmCtrV1.thrift_spec = (
None, # 0
(1, TType.STRING, 'aad_prefix', 'BINARY', None, ), # 1
(2, TType.STRING, 'aad_file_unique', 'BINARY', None, ), # 2
(3, TType.BOOL, 'supply_aad_prefix', None, None, ), # 3
)
all_structs.append(EncryptionAlgorithm)
EncryptionAlgorithm.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'AES_GCM_V1', [AesGcmV1, None], None, ), # 1
(2, TType.STRUCT, 'AES_GCM_CTR_V1', [AesGcmCtrV1, None], None, ), # 2
)
all_structs.append(FileMetaData)
FileMetaData.thrift_spec = (
None, # 0
(1, TType.I32, 'version', None, None, ), # 1
(2, TType.LIST, 'schema', (TType.STRUCT, [SchemaElement, None], False), None, ), # 2
(3, TType.I64, 'num_rows', None, None, ), # 3
(4, TType.LIST, 'row_groups', (TType.STRUCT, [RowGroup, None], False), None, ), # 4
(5, TType.LIST, 'key_value_metadata', (TType.STRUCT, [KeyValue, None], False), None, ), # 5
(6, TType.STRING, 'created_by', 'UTF8', None, ), # 6
(7, TType.LIST, 'column_orders', (TType.STRUCT, [ColumnOrder, None], False), None, ), # 7
(8, TType.STRUCT, 'encryption_algorithm', [EncryptionAlgorithm, None], None, ), # 8
(9, TType.STRING, 'footer_signing_key_metadata', 'BINARY', None, ), # 9
)
all_structs.append(FileCryptoMetaData)
FileCryptoMetaData.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'encryption_algorithm', [EncryptionAlgorithm, None], None, ), # 1
(2, TType.STRING, 'key_metadata', 'BINARY', None, ), # 2
)
fix_spec(all_structs)
del all_structs
|
<reponame>ecmwf/metview-docs
"""
ODB - TEMP Wind
"""
# (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import metview as mv
# -------------------------------------------------------------------------
# Demonstrates how to plot TEMP wind data from ODB onto a map
# ------------------------------------------------------------------------
# read db
filename = "temp.odb"
if mv.exist(filename):
db = mv.read(filename)
else:
db = mv.gallery.load_dataset(filename)
# define pressure level
lev = 250
# define query for u wind component
q_u = """select lat@hdr as lat,
lon@hdr as lon,
obsvalue as val
where varno=3 and vertco_reference_1={}""".format(
lev * 100
)
# define query for v wind component
q_v = """select lat@hdr as lat,
lon@hdr as lon,
obsvalue as val
where varno=4 and vertco_reference_1={}""".format(
lev * 100
)
# define query for metadata
q_meta = "select DISTINCT andate, antime"
# filter u
f_u = mv.odb_filter(odb_query=q_u, odb_data=db)
# filter v
f_v = mv.odb_filter(odb_query=q_v, odb_data=db)
# filter metadata
f_m = mv.odb_filter(odb_query=q_meta, odb_data=db)
# read the odb columns int vectors
lat = mv.values(f_u, "lat")
lon = mv.values(f_u, "lon")
u = mv.values(f_u, "val")
v = mv.values(f_v, "val")
# read values for the title
andate = mv.values(f_m, "andate")[0]
antime = mv.values(f_m, "antime")[0]
# create gepoints from the odb data
gpt = mv.create_geo(
type="xy_vector", latitudes=lat, longitudes=lon, levels=lev, values=u, value2s=v
)
# define wind plotting style
colour_wind = mv.mwind(
legend="on",
wind_advanced_method="on",
wind_arrow_unit_velocity=50.0,
wind_thinning_factor=1.0,
wind_advanced_colour_selection_type="interval",
wind_advanced_colour_level_interval=5,
wind_advanced_colour_min_value=0,
wind_advanced_colour_max_level_colour="red",
wind_advanced_colour_min_level_colour="blue",
wind_advanced_colour_direction="clockwise",
)
# define coastlines
coast = mv.mcoast(
map_coastline_colour="RGB(0.5,0.5,0.5)",
map_coastline_resolution="low",
map_coastline_land_shade="on",
map_coastline_land_shade_colour="RGB(0.21,0.21,0.21)",
map_coastline_sea_shade="on",
map_coastline_sea_shade_colour="RGB(0.53,0.57,0.58)",
map_grid_colour="RGB(0.38,0.37,0.37)",
)
# define title
title = mv.mtext(
text_font_size=0.4,
text_line_1="Land TEMP wind Date={:.0f} Time={:.0f} Level={:.0f} hPa".format(
andate, antime, lev
),
)
# define the output plot file
mv.setoutput(mv.pdf_output(output_name="odb_temp_wind"))
# generate the plot
mv.plot(coast, gpt, colour_wind, title)
|
<reponame>dmtvanzanten/ezdxf<filename>tests/test_09_cython_acceleration/test_906_acc_bspline.py
# Copyright (c) 2021, <NAME>
# License: MIT License
import pytest
pytest.importorskip('ezdxf.acc.bspline')
from ezdxf.math._bspline import Basis as PyBasis, Evaluator as PyEvaluator
from ezdxf.acc.bspline import Basis as CyBasis, Evaluator as CyEvaluator
from ezdxf.math import linspace, Vec3, close_vectors
COUNT = 10
ORDER = 4
KNOTS = tuple(range(COUNT + ORDER))
WEIGHTS = [1.0, 0.7, 0.6, 0.5, 0.5, 0.5, 0.5, 0.6, 0.7, 1.0]
POINTS = [
Vec3(0.181, 0.753, 0.15),
Vec3(0.527, 0.944, 0.176),
Vec3(1.326, 1.015, 0.184),
Vec3(1.086, 0.607, 0.185),
Vec3(1.286, 1.468, 0.255),
Vec3(1.451, 0.596, 0.175),
Vec3(1.282, 0.703, 0.17),
Vec3(0.79, 0.77, 0.169),
Vec3(1.622, 0.831, 0.172),
Vec3(1.099, 0.922, 0.163)
]
@pytest.fixture
def t_vector():
return list(linspace(0, max(KNOTS), 20))
@pytest.fixture
def py_basis():
return PyBasis(KNOTS, ORDER, COUNT)
@pytest.fixture
def cy_basis():
return CyBasis(KNOTS, ORDER, COUNT)
@pytest.fixture
def py_wbasis():
return PyBasis(KNOTS, ORDER, COUNT, WEIGHTS)
@pytest.fixture
def cy_wbasis():
return CyBasis(KNOTS, ORDER, COUNT, WEIGHTS)
def test_find_span(py_basis, cy_basis, t_vector):
for u in t_vector:
assert py_basis.find_span(u) == cy_basis.find_span(u)
def test_cython_knots(cy_basis):
for _ in range(10):
assert cy_basis.knots == KNOTS
assert cy_basis.max_t == KNOTS[-1]
def test_basis_funcs(py_basis, cy_basis, t_vector):
for u in t_vector:
span1 = py_basis.find_span(u)
p = py_basis.basis_funcs(span1, u)
span2 = cy_basis.find_span(u)
c = cy_basis.basis_funcs(span2, u)
assert p == c
def test_basis_vector(py_basis, cy_basis, t_vector):
for u in t_vector:
p = py_basis.basis_vector(u)
c = list(cy_basis.basis_vector(u))
assert p == c
def test_weighted_basis_vector(py_wbasis, cy_wbasis, t_vector):
for u in t_vector:
p = py_wbasis.basis_vector(u)
c = list(cy_wbasis.basis_vector(u))
assert p == c
def test_basis_funcs_derivatives(py_basis, cy_basis, t_vector):
for u in t_vector:
span = py_basis.find_span(u)
p = py_basis.basis_funcs_derivatives(span, u, 2)
span = cy_basis.find_span(u)
c = cy_basis.basis_funcs_derivatives(span, u, 2)
assert p == c
def test_weighted_basis_funcs_derivatives(py_wbasis, cy_wbasis, t_vector):
for u in t_vector:
span = py_wbasis.find_span(u)
p = py_wbasis.basis_funcs_derivatives(span, u, 2)
span = cy_wbasis.find_span(u)
c = cy_wbasis.basis_funcs_derivatives(span, u, 2)
assert p == c
@pytest.fixture
def py_eval(py_basis):
return PyEvaluator(py_basis, POINTS)
@pytest.fixture
def cy_eval(cy_basis):
return CyEvaluator(cy_basis, POINTS)
def test_point_evaluator(py_eval, cy_eval, t_vector):
py_points = list(py_eval.points(t_vector))
cy_points = list(cy_eval.points(t_vector))
assert py_points == cy_points
def test_derivative_evaluator(py_eval, cy_eval, t_vector):
py_ders = list(py_eval.derivatives(t_vector, 2))
cy_ders = list(cy_eval.derivatives(t_vector, 2))
assert py_ders == cy_ders
@pytest.fixture
def py_weval(py_wbasis):
return PyEvaluator(py_wbasis, POINTS)
@pytest.fixture
def cy_weval(cy_wbasis):
return CyEvaluator(cy_wbasis, POINTS)
def test_weighted_point_evaluator(py_weval, cy_weval, t_vector):
py_points = list(py_weval.points(t_vector))
cy_points = list(cy_weval.points(t_vector))
assert py_points == cy_points
def test_weighted_derivative_evaluator(py_weval, cy_weval, t_vector):
py_ders = list(py_weval.derivatives(t_vector, 2))
cy_ders = list(cy_weval.derivatives(t_vector, 2))
for d1, d2 in zip(py_ders, cy_ders):
assert close_vectors(d1, d2)
if __name__ == '__main__':
pytest.main([__file__])
|
# encoding: utf-8
# module Tekla.Structures.Model calls itself Model
# from Tekla.Structures.Model,Version=2017.0.0.0,Culture=neutral,PublicKeyToken=2f04dbe497b71114
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class Object(object):
# no doc
Identifier=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Identifier(self: Object) -> Identifier
Set: Identifier(self: Object)=value
"""
class ModelObject(Object):
# no doc
def CompareTo(self,obj):
""" CompareTo(self: ModelObject,obj: object) -> int """
pass
def Delete(self):
""" Delete(self: ModelObject) -> bool """
pass
def DeleteInstance(self,*args):
""" DeleteInstance(self: ModelObject) -> bool """
pass
def Equals(self,*__args):
""" Equals(self: ModelObject,other: ModelObject) -> bool """
pass
def GetAllReportProperties(self,stringNames,doubleNames,integerNames,values):
""" GetAllReportProperties(self: ModelObject,stringNames: ArrayList,doubleNames: ArrayList,integerNames: ArrayList,values: Hashtable) -> (bool,Hashtable) """
pass
def GetAllUserProperties(self,values):
""" GetAllUserProperties(self: ModelObject,values: Hashtable) -> (bool,Hashtable) """
pass
def GetChildren(self):
""" GetChildren(self: ModelObject) -> ModelObjectEnumerator """
pass
def GetCoordinateSystem(self):
""" GetCoordinateSystem(self: ModelObject) -> CoordinateSystem """
pass
def GetDoubleReportProperties(self,names,values):
""" GetDoubleReportProperties(self: ModelObject,names: ArrayList,values: Hashtable) -> (bool,Hashtable) """
pass
def GetDoubleUserProperties(self,values):
""" GetDoubleUserProperties(self: ModelObject,values: Hashtable) -> (bool,Hashtable) """
pass
def GetDynamicStringProperty(self,name,value):
""" GetDynamicStringProperty(self: ModelObject,name: str,value: str) -> (bool,str) """
pass
def GetFatherComponent(self):
""" GetFatherComponent(self: ModelObject) -> BaseComponent """
pass
def GetHierarchicObjects(self):
""" GetHierarchicObjects(self: ModelObject) -> ModelObjectEnumerator """
pass
def GetIntegerReportProperties(self,names,values):
""" GetIntegerReportProperties(self: ModelObject,names: ArrayList,values: Hashtable) -> (bool,Hashtable) """
pass
def GetIntegerUserProperties(self,values):
""" GetIntegerUserProperties(self: ModelObject,values: Hashtable) -> (bool,Hashtable) """
pass
def GetPhase(self,phase):
""" GetPhase(self: ModelObject) -> (bool,Phase) """
pass
def GetReportProperty(self,name,value):
"""
GetReportProperty(self: ModelObject,name: str,value: int) -> (bool,int)
GetReportProperty(self: ModelObject,name: str,value: float) -> (bool,float)
GetReportProperty(self: ModelObject,name: str,value: str) -> (bool,str)
"""
pass
def GetStringReportProperties(self,names,values):
""" GetStringReportProperties(self: ModelObject,names: ArrayList,values: Hashtable) -> (bool,Hashtable) """
pass
def GetStringUserProperties(self,values):
""" GetStringUserProperties(self: ModelObject,values: Hashtable) -> (bool,Hashtable) """
pass
def GetUserProperty(self,name,value):
"""
GetUserProperty(self: ModelObject,name: str,value: int) -> (bool,int)
GetUserProperty(self: ModelObject,name: str,value: float) -> (bool,float)
GetUserProperty(self: ModelObject,name: str,value: str) -> (bool,str)
"""
pass
def Insert(self):
""" Insert(self: ModelObject) -> bool """
pass
def Modify(self):
""" Modify(self: ModelObject) -> bool """
pass
def Select(self):
""" Select(self: ModelObject) -> bool """
pass
def SetDynamicStringProperty(self,name,value):
""" SetDynamicStringProperty(self: ModelObject,name: str,value: str) -> bool """
pass
def SetLabel(self,label):
""" SetLabel(self: ModelObject,label: str) -> bool """
pass
def SetPhase(self,phase):
""" SetPhase(self: ModelObject,phase: Phase) -> bool """
pass
def SetUserProperty(self,name,value):
"""
SetUserProperty(self: ModelObject,name: str,value: int) -> bool
SetUserProperty(self: ModelObject,name: str,value: float) -> bool
SetUserProperty(self: ModelObject,name: str,value: str) -> bool
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
IsUpToDate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsUpToDate(self: ModelObject) -> bool
"""
ModificationTime=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ModificationTime(self: ModelObject) -> Nullable[DateTime]
"""
ModelObjectEnum=None
class Assembly(ModelObject):
""" Assembly() """
def Add(self,*__args):
"""
Add(self: Assembly,Assembly: Assembly) -> bool
Add(self: Assembly,Assemblables: ArrayList) -> bool
Add(self: Assembly,Object: IAssemblable) -> bool
"""
pass
def CompareTo(self,*__args):
""" CompareTo(self: Assembly,AssemblyToCompare: Assembly) -> bool """
pass
def Delete(self):
""" Delete(self: Assembly) -> bool """
pass
def GetAssembly(self):
""" GetAssembly(self: Assembly) -> Assembly """
pass
def GetAssemblyType(self):
""" GetAssemblyType(self: Assembly) -> AssemblyTypeEnum """
pass
def GetFatherPour(self):
""" GetFatherPour(self: Assembly) -> PourObject """
pass
def GetMainPart(self):
""" GetMainPart(self: Assembly) -> ModelObject """
pass
def GetSecondaries(self):
""" GetSecondaries(self: Assembly) -> ArrayList """
pass
def GetSubAssemblies(self):
""" GetSubAssemblies(self: Assembly) -> ArrayList """
pass
def Insert(self):
""" Insert(self: Assembly) -> bool """
pass
def Modify(self):
""" Modify(self: Assembly) -> bool """
pass
def Remove(self,Object):
""" Remove(self: Assembly,Object: ModelObject) -> bool """
pass
def Select(self):
""" Select(self: Assembly) -> bool """
pass
def SetMainPart(self,Part):
""" SetMainPart(self: Assembly,Part: Part) -> bool """
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+y """
pass
AssemblyNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AssemblyNumber(self: Assembly) -> NumberingSeries
Set: AssemblyNumber(self: Assembly)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: Assembly) -> str
Set: Name(self: Assembly)=value
"""
AssemblyTypeEnum=None
class BaseComponent(ModelObject):
""" BaseComponent() """
def AddAttributesToStack(self,*args):
""" AddAttributesToStack(self: BaseComponent) -> bool """
pass
def GetAttribute(self,AttrName,*__args):
"""
GetAttribute(self: BaseComponent,AttrName: str,DValue: float) -> (bool,float)
GetAttribute(self: BaseComponent,AttrName: str,Value: int) -> (bool,int)
GetAttribute(self: BaseComponent,AttrName: str,StrValue: str) -> (bool,str)
"""
pass
def LoadAttributesFromFile(self,Filename):
""" LoadAttributesFromFile(self: BaseComponent,Filename: str) -> bool """
pass
def LoadComponentAttributes(self,*args):
""" LoadComponentAttributes(self: BaseComponent) -> bool """
pass
def SetAttribute(self,AttrName,*__args):
""" SetAttribute(self: BaseComponent,AttrName: str,DValue: float)SetAttribute(self: BaseComponent,AttrName: str,Value: int)SetAttribute(self: BaseComponent,AttrName: str,StrValue: str) """
pass
InputPolygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: BaseComponent) -> str
Set: Name(self: BaseComponent)=value
"""
Number=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Number(self: BaseComponent) -> int
Set: Number(self: BaseComponent)=value
"""
PrimaryObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
SecondaryObjects=property(lambda self: object(),lambda self,v: None,lambda self: None)
ClassFromAttributeFile=-100
ConnectionCodeFromAttributeFile='CodeFromAttrFile'
CUSTOM_OBJECT_NUMBER=-1
PLUGIN_OBJECT_NUMBER=-100000
class Reinforcement(ModelObject):
# no doc
def GetFatherPour(self):
""" GetFatherPour(self: Reinforcement) -> PourObject """
pass
def GetNumberOfRebars(self):
""" GetNumberOfRebars(self: Reinforcement) -> int """
pass
def GetRebarGeometries(self,withHooks):
""" GetRebarGeometries(self: Reinforcement,withHooks: bool) -> ArrayList """
pass
def GetRebarGeometriesWithoutClashes(self,withHooks):
""" GetRebarGeometriesWithoutClashes(self: Reinforcement,withHooks: bool) -> ArrayList """
pass
def GetSingleRebar(self,index,withHooks):
""" GetSingleRebar(self: Reinforcement,index: int,withHooks: bool) -> RebarGeometry """
pass
def GetSingleRebarWithoutClash(self,index,withHooks):
""" GetSingleRebarWithoutClash(self: Reinforcement,index: int,withHooks: bool) -> RebarGeometry """
pass
def GetSolid(self):
""" GetSolid(self: Reinforcement) -> Solid """
pass
def IsGeometryValid(self):
""" IsGeometryValid(self: Reinforcement) -> bool """
pass
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: Reinforcement) -> int
Set: Class(self: Reinforcement)=value
"""
EndPointOffsetType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPointOffsetType(self: Reinforcement) -> RebarOffsetTypeEnum
Set: EndPointOffsetType(self: Reinforcement)=value
"""
EndPointOffsetValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPointOffsetValue(self: Reinforcement) -> float
Set: EndPointOffsetValue(self: Reinforcement)=value
"""
Father=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Father(self: Reinforcement) -> ModelObject
Set: Father(self: Reinforcement)=value
"""
FromPlaneOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FromPlaneOffset(self: Reinforcement) -> float
Set: FromPlaneOffset(self: Reinforcement)=value
"""
Grade=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Grade(self: Reinforcement) -> str
Set: Grade(self: Reinforcement)=value
"""
InputPointDeformingState=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: InputPointDeformingState(self: Reinforcement) -> DeformingType
Set: InputPointDeformingState(self: Reinforcement)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: Reinforcement) -> str
Set: Name(self: Reinforcement)=value
"""
NumberingSeries=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: NumberingSeries(self: Reinforcement) -> NumberingSeries
Set: NumberingSeries(self: Reinforcement)=value
"""
OnPlaneOffsets=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: OnPlaneOffsets(self: Reinforcement) -> ArrayList
Set: OnPlaneOffsets(self: Reinforcement)=value
"""
RadiusValues=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RadiusValues(self: Reinforcement) -> ArrayList
Set: RadiusValues(self: Reinforcement)=value
"""
StartPointOffsetType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPointOffsetType(self: Reinforcement) -> RebarOffsetTypeEnum
Set: StartPointOffsetType(self: Reinforcement)=value
"""
StartPointOffsetValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPointOffsetValue(self: Reinforcement) -> float
Set: StartPointOffsetValue(self: Reinforcement)=value
"""
RebarOffsetTypeEnum=None
class BaseRebarGroup(Reinforcement):
# no doc
EndFromPlaneOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndFromPlaneOffset(self: BaseRebarGroup) -> float
Set: EndFromPlaneOffset(self: BaseRebarGroup)=value
"""
EndHook=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndHook(self: BaseRebarGroup) -> RebarHookData
Set: EndHook(self: BaseRebarGroup)=value
"""
EndPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPoint(self: BaseRebarGroup) -> Point
Set: EndPoint(self: BaseRebarGroup)=value
"""
ExcludeType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExcludeType(self: BaseRebarGroup) -> ExcludeTypeEnum
Set: ExcludeType(self: BaseRebarGroup)=value
"""
FromPlaneOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FromPlaneOffset(self: BaseRebarGroup) -> float
Set: FromPlaneOffset(self: BaseRebarGroup)=value
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Size(self: BaseRebarGroup) -> str
Set: Size(self: BaseRebarGroup)=value
"""
Spacings=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Spacings(self: BaseRebarGroup) -> ArrayList
Set: Spacings(self: BaseRebarGroup)=value
"""
SpacingType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SpacingType(self: BaseRebarGroup) -> RebarGroupSpacingTypeEnum
Set: SpacingType(self: BaseRebarGroup)=value
"""
StartFromPlaneOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartFromPlaneOffset(self: BaseRebarGroup) -> float
Set: StartFromPlaneOffset(self: BaseRebarGroup)=value
"""
StartHook=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartHook(self: BaseRebarGroup) -> RebarHookData
Set: StartHook(self: BaseRebarGroup)=value
"""
StartPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPoint(self: BaseRebarGroup) -> Point
Set: StartPoint(self: BaseRebarGroup)=value
"""
ExcludeTypeEnum=None
RebarGroupSpacingTypeEnum=None
class BaseRebarModifier(ModelObject):
# no doc
def CreateInstance(self,*args):
""" CreateInstance(self: BaseRebarModifier) -> bool """
pass
def Delete(self):
""" Delete(self: BaseRebarModifier) -> bool """
pass
def FromStruct(self,*args):
""" FromStruct(self: BaseRebarModifier,dotStrip: dotRebarStrip_t) -> dotRebarStrip_t """
pass
def Insert(self):
""" Insert(self: BaseRebarModifier) -> bool """
pass
def Modify(self):
""" Modify(self: BaseRebarModifier) -> bool """
pass
def ModifyInstance(self,*args):
""" ModifyInstance(self: BaseRebarModifier) -> bool """
pass
def Select(self):
""" Select(self: BaseRebarModifier) -> bool """
pass
def SelectInstance(self,*args):
""" SelectInstance(self: BaseRebarModifier) -> bool """
pass
def ToStruct(self,*args):
""" ToStruct(self: BaseRebarModifier,dotStrip: dotRebarStrip_t) -> dotRebarStrip_t """
pass
Curve=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Curve(self: BaseRebarModifier) -> Contour
Set: Curve(self: BaseRebarModifier)=value
"""
Father=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Father(self: BaseRebarModifier) -> RebarSet
Set: Father(self: BaseRebarModifier)=value
"""
class BaseWeld(ModelObject):
# no doc
def GetSolid(self):
""" GetSolid(self: BaseWeld) -> Solid """
pass
def GetWeldGeometries(self):
""" GetWeldGeometries(self: BaseWeld) -> ArrayList """
pass
AngleAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AngleAbove(self: BaseWeld) -> float
Set: AngleAbove(self: BaseWeld)=value
"""
AngleBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AngleBelow(self: BaseWeld) -> float
Set: AngleBelow(self: BaseWeld)=value
"""
AroundWeld=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AroundWeld(self: BaseWeld) -> bool
Set: AroundWeld(self: BaseWeld)=value
"""
ConnectAssemblies=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ConnectAssemblies(self: BaseWeld) -> bool
Set: ConnectAssemblies(self: BaseWeld)=value
"""
ContourAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ContourAbove(self: BaseWeld) -> WeldContourEnum
Set: ContourAbove(self: BaseWeld)=value
"""
ContourBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ContourBelow(self: BaseWeld) -> WeldContourEnum
Set: ContourBelow(self: BaseWeld)=value
"""
EffectiveThroatAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EffectiveThroatAbove(self: BaseWeld) -> float
Set: EffectiveThroatAbove(self: BaseWeld)=value
"""
EffectiveThroatBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EffectiveThroatBelow(self: BaseWeld) -> float
Set: EffectiveThroatBelow(self: BaseWeld)=value
"""
ElectrodeClassification=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ElectrodeClassification(self: BaseWeld) -> WeldElectrodeClassificationEnum
Set: ElectrodeClassification(self: BaseWeld)=value
"""
ElectrodeCoefficient=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ElectrodeCoefficient(self: BaseWeld) -> float
Set: ElectrodeCoefficient(self: BaseWeld)=value
"""
ElectrodeStrength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ElectrodeStrength(self: BaseWeld) -> float
Set: ElectrodeStrength(self: BaseWeld)=value
"""
FinishAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FinishAbove(self: BaseWeld) -> WeldFinishEnum
Set: FinishAbove(self: BaseWeld)=value
"""
FinishBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FinishBelow(self: BaseWeld) -> WeldFinishEnum
Set: FinishBelow(self: BaseWeld)=value
"""
IncrementAmountAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IncrementAmountAbove(self: BaseWeld) -> int
Set: IncrementAmountAbove(self: BaseWeld)=value
"""
IncrementAmountBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IncrementAmountBelow(self: BaseWeld) -> int
Set: IncrementAmountBelow(self: BaseWeld)=value
"""
IntermittentType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IntermittentType(self: BaseWeld) -> WeldIntermittentTypeEnum
Set: IntermittentType(self: BaseWeld)=value
"""
LengthAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LengthAbove(self: BaseWeld) -> float
Set: LengthAbove(self: BaseWeld)=value
"""
LengthBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LengthBelow(self: BaseWeld) -> float
Set: LengthBelow(self: BaseWeld)=value
"""
MainObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: MainObject(self: BaseWeld) -> ModelObject
Set: MainObject(self: BaseWeld)=value
"""
NDTInspection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: NDTInspection(self: BaseWeld) -> WeldNDTInspectionEnum
Set: NDTInspection(self: BaseWeld)=value
"""
PitchAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PitchAbove(self: BaseWeld) -> float
Set: PitchAbove(self: BaseWeld)=value
"""
PitchBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PitchBelow(self: BaseWeld) -> float
Set: PitchBelow(self: BaseWeld)=value
"""
Placement=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Placement(self: BaseWeld) -> WeldPlacementTypeEnum
Set: Placement(self: BaseWeld)=value
"""
PrefixAboveLine=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PrefixAboveLine(self: BaseWeld) -> str
Set: PrefixAboveLine(self: BaseWeld)=value
"""
PrefixBelowLine=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PrefixBelowLine(self: BaseWeld) -> str
Set: PrefixBelowLine(self: BaseWeld)=value
"""
Preparation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Preparation(self: BaseWeld) -> WeldPreparationTypeEnum
Set: Preparation(self: BaseWeld)=value
"""
ProcessType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ProcessType(self: BaseWeld) -> WeldProcessTypeEnum
Set: ProcessType(self: BaseWeld)=value
"""
ReferenceText=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ReferenceText(self: BaseWeld) -> str
Set: ReferenceText(self: BaseWeld)=value
"""
RootFaceAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RootFaceAbove(self: BaseWeld) -> float
Set: RootFaceAbove(self: BaseWeld)=value
"""
RootFaceBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RootFaceBelow(self: BaseWeld) -> float
Set: RootFaceBelow(self: BaseWeld)=value
"""
RootOpeningAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RootOpeningAbove(self: BaseWeld) -> float
Set: RootOpeningAbove(self: BaseWeld)=value
"""
RootOpeningBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RootOpeningBelow(self: BaseWeld) -> float
Set: RootOpeningBelow(self: BaseWeld)=value
"""
SecondaryObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SecondaryObject(self: BaseWeld) -> ModelObject
Set: SecondaryObject(self: BaseWeld)=value
"""
ShopWeld=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ShopWeld(self: BaseWeld) -> bool
Set: ShopWeld(self: BaseWeld)=value
"""
SizeAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SizeAbove(self: BaseWeld) -> float
Set: SizeAbove(self: BaseWeld)=value
"""
SizeBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SizeBelow(self: BaseWeld) -> float
Set: SizeBelow(self: BaseWeld)=value
"""
Standard=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Standard(self: BaseWeld) -> str
Set: Standard(self: BaseWeld)=value
"""
StitchWeld=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StitchWeld(self: BaseWeld) -> bool
Set: StitchWeld(self: BaseWeld)=value
"""
TypeAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TypeAbove(self: BaseWeld) -> WeldTypeEnum
Set: TypeAbove(self: BaseWeld)=value
"""
TypeBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TypeBelow(self: BaseWeld) -> WeldTypeEnum
Set: TypeBelow(self: BaseWeld)=value
"""
WeldNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: WeldNumber(self: BaseWeld) -> int
"""
WeldNumberPrefix=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: WeldNumberPrefix(self: BaseWeld) -> str
Set: WeldNumberPrefix(self: BaseWeld)=value
"""
WeldContourEnum=None
WeldElectrodeClassificationEnum=None
WeldFinishEnum=None
WeldIntermittentTypeEnum=None
WeldNDTInspectionEnum=None
WeldPlacementTypeEnum=None
WeldPreparationTypeEnum=None
WeldProcessTypeEnum=None
WeldTypeEnum=None
class Part(ModelObject):
""" Part() """
def CompareTo(self,*__args):
""" CompareTo(self: Part,partToCompare: Part) -> bool """
pass
def GetAssembly(self):
""" GetAssembly(self: Part) -> Assembly """
pass
def GetBolts(self):
""" GetBolts(self: Part) -> ModelObjectEnumerator """
pass
def GetBooleans(self):
""" GetBooleans(self: Part) -> ModelObjectEnumerator """
pass
def GetCenterLine(self,withCutsFittings):
""" GetCenterLine(self: Part,withCutsFittings: bool) -> ArrayList """
pass
def GetComponents(self):
""" GetComponents(self: Part) -> ModelObjectEnumerator """
pass
def GetDSTVCoordinateSystem(self):
""" GetDSTVCoordinateSystem(self: Part) -> CoordinateSystem """
pass
def GetPartMark(self):
""" GetPartMark(self: Part) -> str """
pass
def GetPours(self):
""" GetPours(self: Part) -> ModelObjectEnumerator """
pass
def GetReferenceLine(self,withCutsFittings):
""" GetReferenceLine(self: Part,withCutsFittings: bool) -> ArrayList """
pass
def GetReinforcements(self):
""" GetReinforcements(self: Part) -> ModelObjectEnumerator """
pass
def GetSolid(self,*__args):
"""
GetSolid(self: Part,formingStates: FormingStates) -> Solid
GetSolid(self: Part,solidCreationType: SolidCreationTypeEnum) -> Solid
GetSolid(self: Part) -> Solid
"""
pass
def GetSurfaceObjects(self):
""" GetSurfaceObjects(self: Part) -> ModelObjectEnumerator """
pass
def GetSurfaceTreatments(self):
""" GetSurfaceTreatments(self: Part) -> ModelObjectEnumerator """
pass
def GetWelds(self):
""" GetWelds(self: Part) -> ModelObjectEnumerator """
pass
AssemblyNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AssemblyNumber(self: Part) -> NumberingSeries
Set: AssemblyNumber(self: Part)=value
"""
CastUnitType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CastUnitType(self: Part) -> CastUnitTypeEnum
Set: CastUnitType(self: Part)=value
"""
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: Part) -> str
Set: Class(self: Part)=value
"""
DeformingData=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DeformingData(self: Part) -> DeformingData
Set: DeformingData(self: Part)=value
"""
Finish=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Finish(self: Part) -> str
Set: Finish(self: Part)=value
"""
Material=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Material(self: Part) -> Material
Set: Material(self: Part)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: Part) -> str
Set: Name(self: Part)=value
"""
PartNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PartNumber(self: Part) -> NumberingSeries
Set: PartNumber(self: Part)=value
"""
Position=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position(self: Part) -> Position
Set: Position(self: Part)=value
"""
PourPhase=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PourPhase(self: Part) -> int
Set: PourPhase(self: Part)=value
"""
Profile=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Profile(self: Part) -> Profile
Set: Profile(self: Part)=value
"""
CastUnitTypeEnum=None
class Beam(Part):
"""
Beam()
Beam(beamType: BeamTypeEnum)
Beam(startPoint: Point,endPoint: Point)
"""
def Delete(self):
""" Delete(self: Beam) -> bool """
pass
def Insert(self):
""" Insert(self: Beam) -> bool """
pass
def Modify(self):
""" Modify(self: Beam) -> bool """
pass
def Select(self):
""" Select(self: Beam) -> bool """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type)
__new__(cls: type,beamType: BeamTypeEnum)
__new__(cls: type,startPoint: Point,endPoint: Point)
"""
pass
EndPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPoint(self: Beam) -> Point
Set: EndPoint(self: Beam)=value
"""
EndPointOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPointOffset(self: Beam) -> Offset
Set: EndPointOffset(self: Beam)=value
"""
StartPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPoint(self: Beam) -> Point
Set: StartPoint(self: Beam)=value
"""
StartPointOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPointOffset(self: Beam) -> Offset
Set: StartPointOffset(self: Beam)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: Beam) -> BeamTypeEnum
"""
BeamTypeEnum=None
class BentPlate(Part):
""" BentPlate() """
def Delete(self):
""" Delete(self: BentPlate) -> bool """
pass
def Insert(self):
""" Insert(self: BentPlate) -> bool """
pass
def Modify(self):
""" Modify(self: BentPlate) -> bool """
pass
def Select(self):
""" Select(self: BentPlate) -> bool """
pass
Geometry=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Geometry(self: BentPlate) -> ConnectiveGeometry
Set: Geometry(self: BentPlate)=value
"""
Thickness=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Thickness(self: BentPlate) -> float
"""
class BentPlateGeometrySolver(object):
""" BentPlateGeometrySolver() """
def AddLeg(self,geometry,*__args):
"""
AddLeg(self: BentPlateGeometrySolver,geometry: ConnectiveGeometry,segment1: LineSegment,polygon: Contour,segment2: LineSegment) -> ConnectiveGeometry
AddLeg(self: BentPlateGeometrySolver,geometry: ConnectiveGeometry,segment1: LineSegment,polygon: Contour,segment2: LineSegment,radius: float) -> ConnectiveGeometry
AddLeg(self: BentPlateGeometrySolver,geometry: ConnectiveGeometry,polygon: Contour) -> ConnectiveGeometry
AddLeg(self: BentPlateGeometrySolver,geometry: ConnectiveGeometry,polygon: Contour,radius: float) -> ConnectiveGeometry
"""
pass
def ModifyCylindricalSurface(self,geometry,cylindricalSection,surface):
""" ModifyCylindricalSurface(self: BentPlateGeometrySolver,geometry: ConnectiveGeometry,cylindricalSection: GeometrySection,surface: CylindricalSurface) -> ConnectiveGeometry """
pass
def ModifyPolygon(self,geometry,polygonSection,points):
""" ModifyPolygon(self: BentPlateGeometrySolver,geometry: ConnectiveGeometry,polygonSection: GeometrySection,points: Contour) -> ConnectiveGeometry """
pass
def ModifyRadius(self,geometry,cylindricalSection,radius):
""" ModifyRadius(self: BentPlateGeometrySolver,geometry: ConnectiveGeometry,cylindricalSection: GeometrySection,radius: float) -> ConnectiveGeometry """
pass
def RemoveLeg(self,geometry,legSection):
""" RemoveLeg(self: BentPlateGeometrySolver,geometry: ConnectiveGeometry,legSection: GeometrySection) -> ConnectiveGeometry """
pass
def Split(self,geometry,geometrySection):
""" Split(self: BentPlateGeometrySolver,geometry: ConnectiveGeometry,geometrySection: GeometrySection) -> IList[ConnectiveGeometry] """
pass
OperationStatus=None
class BoltGroup(ModelObject):
""" BoltGroup() """
def AddOtherPartToBolt(self,M):
""" AddOtherPartToBolt(self: BoltGroup,M: Part) -> bool """
pass
def GetFatherPour(self):
""" GetFatherPour(self: BoltGroup) -> PourObject """
pass
def GetOtherPartsToBolt(self):
""" GetOtherPartsToBolt(self: BoltGroup) -> ArrayList """
pass
def GetSolid(self,withHighAccuracy=None):
"""
GetSolid(self: BoltGroup,withHighAccuracy: bool) -> Solid
GetSolid(self: BoltGroup) -> Solid
"""
pass
def RemoveOtherPartToBolt(self,M):
""" RemoveOtherPartToBolt(self: BoltGroup,M: Part) -> bool """
pass
Bolt=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Bolt(self: BoltGroup) -> bool
Set: Bolt(self: BoltGroup)=value
"""
BoltPositions=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BoltPositions(self: BoltGroup) -> ArrayList
"""
BoltSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BoltSize(self: BoltGroup) -> float
Set: BoltSize(self: BoltGroup)=value
"""
BoltStandard=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BoltStandard(self: BoltGroup) -> str
Set: BoltStandard(self: BoltGroup)=value
"""
BoltType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BoltType(self: BoltGroup) -> BoltTypeEnum
Set: BoltType(self: BoltGroup)=value
"""
ConnectAssemblies=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ConnectAssemblies(self: BoltGroup) -> bool
Set: ConnectAssemblies(self: BoltGroup)=value
"""
CutLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CutLength(self: BoltGroup) -> float
Set: CutLength(self: BoltGroup)=value
"""
EndPointOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPointOffset(self: BoltGroup) -> Offset
Set: EndPointOffset(self: BoltGroup)=value
"""
ExtraLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtraLength(self: BoltGroup) -> float
Set: ExtraLength(self: BoltGroup)=value
"""
FirstPosition=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FirstPosition(self: BoltGroup) -> Point
Set: FirstPosition(self: BoltGroup)=value
"""
Hole1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Hole1(self: BoltGroup) -> bool
Set: Hole1(self: BoltGroup)=value
"""
Hole2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Hole2(self: BoltGroup) -> bool
Set: Hole2(self: BoltGroup)=value
"""
Hole3=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Hole3(self: BoltGroup) -> bool
Set: Hole3(self: BoltGroup)=value
"""
Hole4=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Hole4(self: BoltGroup) -> bool
Set: Hole4(self: BoltGroup)=value
"""
Hole5=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Hole5(self: BoltGroup) -> bool
Set: Hole5(self: BoltGroup)=value
"""
HoleType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: HoleType(self: BoltGroup) -> BoltHoleTypeEnum
Set: HoleType(self: BoltGroup)=value
"""
Length=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Length(self: BoltGroup) -> float
Set: Length(self: BoltGroup)=value
"""
Nut1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Nut1(self: BoltGroup) -> bool
Set: Nut1(self: BoltGroup)=value
"""
Nut2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Nut2(self: BoltGroup) -> bool
Set: Nut2(self: BoltGroup)=value
"""
OtherPartsToBolt=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: OtherPartsToBolt(self: BoltGroup) -> ArrayList
"""
PartToBeBolted=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PartToBeBolted(self: BoltGroup) -> Part
Set: PartToBeBolted(self: BoltGroup)=value
"""
PartToBoltTo=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PartToBoltTo(self: BoltGroup) -> Part
Set: PartToBoltTo(self: BoltGroup)=value
"""
Position=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position(self: BoltGroup) -> Position
Set: Position(self: BoltGroup)=value
"""
RotateSlots=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RotateSlots(self: BoltGroup) -> BoltRotateSlotsEnum
Set: RotateSlots(self: BoltGroup)=value
"""
SecondPosition=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SecondPosition(self: BoltGroup) -> Point
Set: SecondPosition(self: BoltGroup)=value
"""
Shape=property(lambda self: object(),lambda self,v: None,lambda self: None)
SlottedHoleX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SlottedHoleX(self: BoltGroup) -> float
Set: SlottedHoleX(self: BoltGroup)=value
"""
SlottedHoleY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SlottedHoleY(self: BoltGroup) -> float
Set: SlottedHoleY(self: BoltGroup)=value
"""
StartPointOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPointOffset(self: BoltGroup) -> Offset
Set: StartPointOffset(self: BoltGroup)=value
"""
ThreadInMaterial=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ThreadInMaterial(self: BoltGroup) -> BoltThreadInMaterialEnum
Set: ThreadInMaterial(self: BoltGroup)=value
"""
Tolerance=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Tolerance(self: BoltGroup) -> float
Set: Tolerance(self: BoltGroup)=value
"""
Washer1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Washer1(self: BoltGroup) -> bool
Set: Washer1(self: BoltGroup)=value
"""
Washer2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Washer2(self: BoltGroup) -> bool
Set: Washer2(self: BoltGroup)=value
"""
Washer3=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Washer3(self: BoltGroup) -> bool
Set: Washer3(self: BoltGroup)=value
"""
BoltHoleTypeEnum=None
BoltRotateSlotsEnum=None
BoltShapeEnum=None
BoltThreadInMaterialEnum=None
BoltTypeEnum=None
class BoltArray(BoltGroup):
""" BoltArray() """
def AddBoltDistX(self,DistX):
""" AddBoltDistX(self: BoltArray,DistX: float) -> bool """
pass
def AddBoltDistY(self,DistY):
""" AddBoltDistY(self: BoltArray,DistY: float) -> bool """
pass
def Delete(self):
""" Delete(self: BoltArray) -> bool """
pass
def GetBoltDistX(self,Index):
""" GetBoltDistX(self: BoltArray,Index: int) -> float """
pass
def GetBoltDistXCount(self):
""" GetBoltDistXCount(self: BoltArray) -> int """
pass
def GetBoltDistY(self,Index):
""" GetBoltDistY(self: BoltArray,Index: int) -> float """
pass
def GetBoltDistYCount(self):
""" GetBoltDistYCount(self: BoltArray) -> int """
pass
def Insert(self):
""" Insert(self: BoltArray) -> bool """
pass
def Modify(self):
""" Modify(self: BoltArray) -> bool """
pass
def RemoveBoltDistX(self,Index):
""" RemoveBoltDistX(self: BoltArray,Index: int) -> bool """
pass
def RemoveBoltDistY(self,Index):
""" RemoveBoltDistY(self: BoltArray,Index: int) -> bool """
pass
def Select(self):
""" Select(self: BoltArray) -> bool """
pass
def SetBoltDistX(self,Index,DistX):
""" SetBoltDistX(self: BoltArray,Index: int,DistX: float) -> bool """
pass
def SetBoltDistY(self,Index,DistY):
""" SetBoltDistY(self: BoltArray,Index: int,DistY: float) -> bool """
pass
Shape=property(lambda self: object(),lambda self,v: None,lambda self: None)
class BoltCircle(BoltGroup):
""" BoltCircle() """
def Delete(self):
""" Delete(self: BoltCircle) -> bool """
pass
def Insert(self):
""" Insert(self: BoltCircle) -> bool """
pass
def Modify(self):
""" Modify(self: BoltCircle) -> bool """
pass
def Select(self):
""" Select(self: BoltCircle) -> bool """
pass
Diameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Diameter(self: BoltCircle) -> float
Set: Diameter(self: BoltCircle)=value
"""
NumberOfBolts=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: NumberOfBolts(self: BoltCircle) -> float
Set: NumberOfBolts(self: BoltCircle)=value
"""
Shape=property(lambda self: object(),lambda self,v: None,lambda self: None)
class BoltXYList(BoltGroup):
""" BoltXYList() """
def AddBoltDistX(self,DistX):
""" AddBoltDistX(self: BoltXYList,DistX: float) -> bool """
pass
def AddBoltDistY(self,DistY):
""" AddBoltDistY(self: BoltXYList,DistY: float) -> bool """
pass
def Delete(self):
""" Delete(self: BoltXYList) -> bool """
pass
def GetBoltDistX(self,Index):
""" GetBoltDistX(self: BoltXYList,Index: int) -> float """
pass
def GetBoltDistXCount(self):
""" GetBoltDistXCount(self: BoltXYList) -> int """
pass
def GetBoltDistY(self,Index):
""" GetBoltDistY(self: BoltXYList,Index: int) -> float """
pass
def GetBoltDistYCount(self):
""" GetBoltDistYCount(self: BoltXYList) -> int """
pass
def Insert(self):
""" Insert(self: BoltXYList) -> bool """
pass
def Modify(self):
""" Modify(self: BoltXYList) -> bool """
pass
def Select(self):
""" Select(self: BoltXYList) -> bool """
pass
Shape=property(lambda self: object(),lambda self,v: None,lambda self: None)
class Boolean(ModelObject):
""" Boolean() """
Father=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Father(self: Boolean) -> ModelObject
Set: Father(self: Boolean)=value
"""
class BooleanPart(Boolean):
""" BooleanPart() """
def Delete(self):
""" Delete(self: BooleanPart) -> bool """
pass
def Insert(self):
""" Insert(self: BooleanPart) -> bool """
pass
def Modify(self):
""" Modify(self: BooleanPart) -> bool """
pass
def Select(self):
""" Select(self: BooleanPart) -> bool """
pass
def SetOperativePart(self,Part):
""" SetOperativePart(self: BooleanPart,Part: Part) -> bool """
pass
OperativePart=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: OperativePart(self: BooleanPart) -> Part
Set: OperativePart(self: BooleanPart)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: BooleanPart) -> BooleanTypeEnum
Set: Type(self: BooleanPart)=value
"""
BooleanOperativeClassName='BlOpCl'
BooleanTypeEnum=None
class Brep(Part):
"""
Brep()
Brep(startPoint: Point,endPoint: Point)
"""
def Delete(self):
""" Delete(self: Brep) -> bool """
pass
def Insert(self):
""" Insert(self: Brep) -> bool """
pass
def Modify(self):
""" Modify(self: Brep) -> bool """
pass
def Select(self):
""" Select(self: Brep) -> bool """
pass
@staticmethod
def __new__(self,startPoint=None,endPoint=None):
"""
__new__(cls: type)
__new__(cls: type,startPoint: Point,endPoint: Point)
"""
pass
EndPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPoint(self: Brep) -> Point
Set: EndPoint(self: Brep)=value
"""
EndPointOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPointOffset(self: Brep) -> Offset
Set: EndPointOffset(self: Brep)=value
"""
StartPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPoint(self: Brep) -> Point
Set: StartPoint(self: Brep)=value
"""
StartPointOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPointOffset(self: Brep) -> Offset
Set: StartPointOffset(self: Brep)=value
"""
class Chamfer(object):
"""
Chamfer()
Chamfer(X: float,Y: float,Type: ChamferTypeEnum)
"""
@staticmethod
def __new__(self,X=None,Y=None,Type=None):
"""
__new__(cls: type)
__new__(cls: type,X: float,Y: float,Type: ChamferTypeEnum)
"""
pass
DZ1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DZ1(self: Chamfer) -> float
Set: DZ1(self: Chamfer)=value
"""
DZ2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DZ2(self: Chamfer) -> float
Set: DZ2(self: Chamfer)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: Chamfer) -> ChamferTypeEnum
Set: Type(self: Chamfer)=value
"""
X=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: X(self: Chamfer) -> float
Set: X(self: Chamfer)=value
"""
Y=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Y(self: Chamfer) -> float
Set: Y(self: Chamfer)=value
"""
ChamferTypeEnum=None
class ChangeData(object):
# no doc
Object=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Object(self: ChangeData) -> ModelObject
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: ChangeData) -> ChangeTypeEnum
"""
ChangeTypeEnum=None
class CircleRebarGroup(BaseRebarGroup):
""" CircleRebarGroup() """
def Delete(self):
""" Delete(self: CircleRebarGroup) -> bool """
pass
def Insert(self):
""" Insert(self: CircleRebarGroup) -> bool """
pass
def Modify(self):
""" Modify(self: CircleRebarGroup) -> bool """
pass
def Select(self):
""" Select(self: CircleRebarGroup) -> bool """
pass
Polygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polygon(self: CircleRebarGroup) -> Polygon
Set: Polygon(self: CircleRebarGroup)=value
"""
StirrupType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StirrupType(self: CircleRebarGroup) -> CircleRebarGroupStirrupTypeEnum
Set: StirrupType(self: CircleRebarGroup)=value
"""
CircleRebarGroupStirrupTypeEnum=None
class ClashCheckData(object):
""" ClashCheckData() """
Object1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Object1(self: ClashCheckData) -> ModelObject
"""
Object2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Object2(self: ClashCheckData) -> ModelObject
"""
Overlap=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Overlap(self: ClashCheckData) -> float
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: ClashCheckData) -> ClashTypeEnum
"""
ClashTypeEnum=None
class ClashCheckHandler(object):
# no doc
def GetIntersectionBoundingBoxes(self,ID1,ID2):
""" GetIntersectionBoundingBoxes(self: ClashCheckHandler,ID1: Identifier,ID2: Identifier) -> ArrayList """
pass
def RunClashCheck(self):
""" RunClashCheck(self: ClashCheckHandler) -> bool """
pass
def StopClashCheck(self):
""" StopClashCheck(self: ClashCheckHandler) -> bool """
pass
class Component(BaseComponent):
"""
Component()
Component(I: ComponentInput)
"""
def Delete(self):
""" Delete(self: Component) -> bool """
pass
def GetAssembly(self):
""" GetAssembly(self: Component) -> Assembly """
pass
def GetComponentInput(self):
""" GetComponentInput(self: Component) -> ComponentInput """
pass
def GetComponents(self):
""" GetComponents(self: Component) -> ModelObjectEnumerator """
pass
def Insert(self):
""" Insert(self: Component) -> bool """
pass
def Modify(self):
""" Modify(self: Component) -> bool """
pass
def Select(self):
""" Select(self: Component) -> bool """
pass
def SetComponentInput(self,I):
""" SetComponentInput(self: Component,I: ComponentInput) -> bool """
pass
@staticmethod
def __new__(self,I=None):
"""
__new__(cls: type)
__new__(cls: type,I: ComponentInput)
"""
pass
InputPolygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
PrimaryObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
SecondaryObjects=property(lambda self: object(),lambda self,v: None,lambda self: None)
class ComponentInput(object):
""" ComponentInput() """
def AddInputObject(self,M):
""" AddInputObject(self: ComponentInput,M: ModelObject) -> bool """
pass
def AddInputObjects(self,Objects):
""" AddInputObjects(self: ComponentInput,Objects: ArrayList) -> bool """
pass
def AddInputPolygon(self,P):
""" AddInputPolygon(self: ComponentInput,P: Polygon) -> bool """
pass
def AddOneInputPosition(self,P):
""" AddOneInputPosition(self: ComponentInput,P: Point) -> bool """
pass
def AddTwoInputPositions(self,Position1,Position2):
""" AddTwoInputPositions(self: ComponentInput,Position1: Point,Position2: Point) -> bool """
pass
def CopyTo(self,array,index):
""" CopyTo(self: ComponentInput,array: Array,index: int) """
pass
def GetEnumerator(self):
""" GetEnumerator(self: ComponentInput) -> IEnumerator """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
Count=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Count(self: ComponentInput) -> int
"""
IsSynchronized=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsSynchronized(self: ComponentInput) -> bool
"""
SyncRoot=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SyncRoot(self: ComponentInput) -> object
"""
class Connection(BaseComponent):
""" Connection() """
def Delete(self):
""" Delete(self: Connection) -> bool """
pass
def GetPrimaryObject(self):
""" GetPrimaryObject(self: Connection) -> ModelObject """
pass
def GetSecondaryObjects(self):
""" GetSecondaryObjects(self: Connection) -> ArrayList """
pass
def Insert(self):
""" Insert(self: Connection) -> bool """
pass
def Modify(self):
""" Modify(self: Connection) -> bool """
pass
def Select(self):
""" Select(self: Connection) -> bool """
pass
def SetPrimaryObject(self,M):
""" SetPrimaryObject(self: Connection,M: ModelObject) -> bool """
pass
def SetSecondaryObject(self,M):
""" SetSecondaryObject(self: Connection,M: ModelObject) -> bool """
pass
def SetSecondaryObjects(self,Secondaries):
""" SetSecondaryObjects(self: Connection,Secondaries: ArrayList) -> bool """
pass
AutoDirectionType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AutoDirectionType(self: Connection) -> AutoDirectionTypeEnum
Set: AutoDirectionType(self: Connection)=value
"""
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: Connection) -> int
Set: Class(self: Connection)=value
"""
Code=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Code(self: Connection) -> str
Set: Code(self: Connection)=value
"""
InputPolygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
PositionType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PositionType(self: Connection) -> PositionTypeEnum
Set: PositionType(self: Connection)=value
"""
PrimaryObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
SecondaryObjects=property(lambda self: object(),lambda self,v: None,lambda self: None)
Status=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Status(self: Connection) -> ConnectionStatusEnum
"""
UpVector=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: UpVector(self: Connection) -> Vector
Set: UpVector(self: Connection)=value
"""
class ConnectiveGeometry(object):
""" ConnectiveGeometry(contour: Contour) """
def GetConnection(self,geometrySection1,geometrySection2):
""" GetConnection(self: ConnectiveGeometry,geometrySection1: GeometrySection,geometrySection2: GeometrySection) -> IList[LineSegment] """
pass
def GetGeometryEnumerator(self):
""" GetGeometryEnumerator(self: ConnectiveGeometry) -> GeometrySectionEnumerator """
pass
def GetGeometryLegSections(self):
""" GetGeometryLegSections(self: ConnectiveGeometry) -> IList[GeometrySection] """
pass
def GetNeighborSections(self,geometrySection):
""" GetNeighborSections(self: ConnectiveGeometry,geometrySection: GeometrySection) -> IList[GeometrySection] """
pass
def IsEmpty(self):
""" IsEmpty(self: ConnectiveGeometry) -> bool """
pass
@staticmethod
def __new__(self,contour):
""" __new__(cls: type,contour: Contour) """
pass
InvalidGeometrySectionIndex=-1
class ConnectiveGeometryException(Exception):
"""
ConnectiveGeometryException()
ConnectiveGeometryException(status: OperationStatus,errorMessage: str)
"""
@staticmethod
def __new__(self,status=None,errorMessage=None):
"""
__new__(cls: type)
__new__(cls: type,status: OperationStatus,errorMessage: str)
"""
pass
OperationStatus=None
class Contour(object):
""" Contour() """
def AddContourPoint(self,Point):
""" AddContourPoint(self: Contour,Point: ContourPoint) """
pass
def CalculatePolygon(self,polygon):
""" CalculatePolygon(self: Contour) -> (bool,Polygon) """
pass
ContourPoints=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ContourPoints(self: Contour) -> ArrayList
Set: ContourPoints(self: Contour)=value
"""
class ContourPlate(Part):
""" ContourPlate() """
def AddContourPoint(self,contourPoint):
""" AddContourPoint(self: ContourPlate,contourPoint: ContourPoint) -> bool """
pass
def Delete(self):
""" Delete(self: ContourPlate) -> bool """
pass
def Insert(self):
""" Insert(self: ContourPlate) -> bool """
pass
def Modify(self):
""" Modify(self: ContourPlate) -> bool """
pass
def Select(self):
""" Select(self: ContourPlate) -> bool """
pass
Contour=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Contour(self: ContourPlate) -> Contour
Set: Contour(self: ContourPlate)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: ContourPlate) -> ContourPlateTypeEnum
"""
ContourPlateTypeEnum=None
class ContourPoint(Point):
"""
ContourPoint()
ContourPoint(P: Point,C: Chamfer)
"""
def SetPoint(self,P):
""" SetPoint(self: ContourPoint,P: Point) """
pass
@staticmethod
def __new__(self,P=None,C=None):
"""
__new__(cls: type)
__new__(cls: type,P: Point,C: Chamfer)
"""
pass
Chamfer=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Chamfer(self: ContourPoint) -> Chamfer
Set: Chamfer(self: ContourPoint)=value
"""
class ControlCircle(ModelObject):
"""
ControlCircle()
ControlCircle(point1: Point,point2: Point,point3: Point)
"""
def Delete(self):
""" Delete(self: ControlCircle) -> bool """
pass
def Insert(self):
""" Insert(self: ControlCircle) -> bool """
pass
def Modify(self):
""" Modify(self: ControlCircle) -> bool """
pass
def Select(self):
""" Select(self: ControlCircle) -> bool """
pass
@staticmethod
def __new__(self,point1=None,point2=None,point3=None):
"""
__new__(cls: type)
__new__(cls: type,point1: Point,point2: Point,point3: Point)
"""
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Color(self: ControlCircle) -> ControlCircleColorEnum
Set: Color(self: ControlCircle)=value
"""
Extension=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Extension(self: ControlCircle) -> float
Set: Extension(self: ControlCircle)=value
"""
Point1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Point1(self: ControlCircle) -> Point
Set: Point1(self: ControlCircle)=value
"""
Point2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Point2(self: ControlCircle) -> Point
Set: Point2(self: ControlCircle)=value
"""
Point3=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Point3(self: ControlCircle) -> Point
Set: Point3(self: ControlCircle)=value
"""
ControlCircleColorEnum=None
class ControlLine(ModelObject):
"""
ControlLine()
ControlLine(Line: LineSegment,IsMagnetic: bool)
"""
def Delete(self):
""" Delete(self: ControlLine) -> bool """
pass
def Insert(self):
""" Insert(self: ControlLine) -> bool """
pass
def Modify(self):
""" Modify(self: ControlLine) -> bool """
pass
def Select(self):
""" Select(self: ControlLine) -> bool """
pass
@staticmethod
def __new__(self,Line=None,IsMagnetic=None):
"""
__new__(cls: type)
__new__(cls: type,Line: LineSegment,IsMagnetic: bool)
"""
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Color(self: ControlLine) -> ControlLineColorEnum
Set: Color(self: ControlLine)=value
"""
Extension=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Extension(self: ControlLine) -> float
Set: Extension(self: ControlLine)=value
"""
IsMagnetic=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsMagnetic(self: ControlLine) -> bool
Set: IsMagnetic(self: ControlLine)=value
"""
Line=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Line(self: ControlLine) -> LineSegment
Set: Line(self: ControlLine)=value
"""
ControlLineColorEnum=None
class ControlPlane(ModelObject):
"""
ControlPlane()
ControlPlane(P: Plane,IsMagnetic: bool)
"""
def Delete(self):
""" Delete(self: ControlPlane) -> bool """
pass
def Insert(self):
""" Insert(self: ControlPlane) -> bool """
pass
def Modify(self):
""" Modify(self: ControlPlane) -> bool """
pass
def Select(self):
""" Select(self: ControlPlane) -> bool """
pass
@staticmethod
def __new__(self,P=None,IsMagnetic=None):
"""
__new__(cls: type)
__new__(cls: type,P: Plane,IsMagnetic: bool)
"""
pass
IsMagnetic=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsMagnetic(self: ControlPlane) -> bool
Set: IsMagnetic(self: ControlPlane)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: ControlPlane) -> str
Set: Name(self: ControlPlane)=value
"""
Plane=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Plane(self: ControlPlane) -> Plane
Set: Plane(self: ControlPlane)=value
"""
class ControlPoint(ModelObject):
"""
ControlPoint()
ControlPoint(existPoint: Point)
"""
def Delete(self):
""" Delete(self: ControlPoint) -> bool """
pass
def Insert(self):
""" Insert(self: ControlPoint) -> bool """
pass
def Modify(self):
""" Modify(self: ControlPoint) -> bool """
pass
def Select(self):
""" Select(self: ControlPoint) -> bool """
pass
@staticmethod
def __new__(self,existPoint=None):
"""
__new__(cls: type)
__new__(cls: type,existPoint: Point)
"""
pass
Point=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Point(self: ControlPoint) -> Point
Set: Point(self: ControlPoint)=value
"""
class CurvedRebarGroup(BaseRebarGroup):
""" CurvedRebarGroup() """
def Delete(self):
""" Delete(self: CurvedRebarGroup) -> bool """
pass
def Insert(self):
""" Insert(self: CurvedRebarGroup) -> bool """
pass
def Modify(self):
""" Modify(self: CurvedRebarGroup) -> bool """
pass
def Select(self):
""" Select(self: CurvedRebarGroup) -> bool """
pass
Polygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polygon(self: CurvedRebarGroup) -> Polygon
Set: Polygon(self: CurvedRebarGroup)=value
"""
class CustomPart(BaseComponent):
"""
CustomPart()
CustomPart(StartPoint: Point,EndPoint: Point)
"""
def Delete(self):
""" Delete(self: CustomPart) -> bool """
pass
def GetAssembly(self):
""" GetAssembly(self: CustomPart) -> Assembly """
pass
def GetComponents(self):
""" GetComponents(self: CustomPart) -> ModelObjectEnumerator """
pass
def GetStartAndEndPositions(self,StartPoint,EndPoint):
""" GetStartAndEndPositions(self: CustomPart,StartPoint: Point,EndPoint: Point) -> (bool,Point,Point) """
pass
def Insert(self):
""" Insert(self: CustomPart) -> bool """
pass
def Modify(self):
""" Modify(self: CustomPart) -> bool """
pass
def Select(self):
""" Select(self: CustomPart) -> bool """
pass
def SetInputPositions(self,StartPoint,EndPoint):
""" SetInputPositions(self: CustomPart,StartPoint: Point,EndPoint: Point) -> bool """
pass
@staticmethod
def __new__(self,StartPoint=None,EndPoint=None):
"""
__new__(cls: type)
__new__(cls: type,StartPoint: Point,EndPoint: Point)
"""
pass
InputPolygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
Position=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position(self: CustomPart) -> Position
Set: Position(self: CustomPart)=value
"""
PrimaryObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
SecondaryObjects=property(lambda self: object(),lambda self,v: None,lambda self: None)
class CutPlane(Boolean):
""" CutPlane() """
def Delete(self):
""" Delete(self: CutPlane) -> bool """
pass
def Insert(self):
""" Insert(self: CutPlane) -> bool """
pass
def Modify(self):
""" Modify(self: CutPlane) -> bool """
pass
def Select(self):
""" Select(self: CutPlane) -> bool """
pass
Plane=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Plane(self: CutPlane) -> Plane
Set: Plane(self: CutPlane)=value
"""
class CylindricalSurface(object):
""" CylindricalSurface(endFaceNormal1: Vector,endFaceNormal2: Vector,sideBoundary1: LineSegment,sideBoundary2: LineSegment) """
@staticmethod
def __new__(self,endFaceNormal1,endFaceNormal2,sideBoundary1,sideBoundary2):
""" __new__(cls: type,endFaceNormal1: Vector,endFaceNormal2: Vector,sideBoundary1: LineSegment,sideBoundary2: LineSegment) """
pass
EndFaceNormal1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndFaceNormal1(self: CylindricalSurface) -> Vector
"""
EndFaceNormal2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndFaceNormal2(self: CylindricalSurface) -> Vector
"""
IntersectionLine=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IntersectionLine(self: CylindricalSurface) -> Line
"""
InwardCurved=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: InwardCurved(self: CylindricalSurface) -> bool
"""
Radius=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Radius(self: CylindricalSurface) -> float
"""
SideBoundary1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SideBoundary1(self: CylindricalSurface) -> LineSegment
Set: SideBoundary1(self: CylindricalSurface)=value
"""
SideBoundary2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SideBoundary2(self: CylindricalSurface) -> LineSegment
Set: SideBoundary2(self: CylindricalSurface)=value
"""
class CylindricalSurfaceNode(object):
""" CylindricalSurfaceNode(surface: CylindricalSurface) """
def AcceptVisitor(self,visitor):
""" AcceptVisitor(self: CylindricalSurfaceNode,visitor: IGeometryNodeVisitor) """
pass
def Clone(self):
""" Clone(self: CylindricalSurfaceNode) -> IGeometryNode """
pass
@staticmethod
def __new__(self,surface):
""" __new__(cls: type,surface: CylindricalSurface) """
pass
IsAutomatic=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsAutomatic(self: CylindricalSurfaceNode) -> bool
"""
Surface=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Surface(self: CylindricalSurfaceNode) -> CylindricalSurface
"""
class DeformingData(object):
""" DeformingData() """
Angle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Angle(self: DeformingData) -> float
Set: Angle(self: DeformingData)=value
"""
Angle2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Angle2(self: DeformingData) -> float
Set: Angle2(self: DeformingData)=value
"""
Cambering=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Cambering(self: DeformingData) -> float
Set: Cambering(self: DeformingData)=value
"""
Shortening=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Shortening(self: DeformingData) -> float
Set: Shortening(self: DeformingData)=value
"""
class Detail(BaseComponent):
""" Detail() """
def Delete(self):
""" Delete(self: Detail) -> bool """
pass
def GetPrimaryObject(self):
""" GetPrimaryObject(self: Detail) -> ModelObject """
pass
def GetReferencePoint(self):
""" GetReferencePoint(self: Detail) -> Point """
pass
def Insert(self):
""" Insert(self: Detail) -> bool """
pass
def Modify(self):
""" Modify(self: Detail) -> bool """
pass
def Select(self):
""" Select(self: Detail) -> bool """
pass
def SetPrimaryObject(self,M):
""" SetPrimaryObject(self: Detail,M: ModelObject) -> bool """
pass
def SetReferencePoint(self,ReferencePoint):
""" SetReferencePoint(self: Detail,ReferencePoint: Point) -> bool """
pass
AutoDirectionType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AutoDirectionType(self: Detail) -> AutoDirectionTypeEnum
Set: AutoDirectionType(self: Detail)=value
"""
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: Detail) -> int
Set: Class(self: Detail)=value
"""
Code=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Code(self: Detail) -> str
Set: Code(self: Detail)=value
"""
DetailType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DetailType(self: Detail) -> DetailTypeEnum
Set: DetailType(self: Detail)=value
"""
InputPolygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
PositionType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PositionType(self: Detail) -> PositionTypeEnum
Set: PositionType(self: Detail)=value
"""
PrimaryObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
SecondaryObjects=property(lambda self: object(),lambda self,v: None,lambda self: None)
Status=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Status(self: Detail) -> ConnectionStatusEnum
"""
UpVector=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: UpVector(self: Detail) -> Vector
Set: UpVector(self: Detail)=value
"""
class EdgeChamfer(Boolean):
"""
EdgeChamfer()
EdgeChamfer(FirstEnd: Point,SecondEnd: Point)
"""
def Delete(self):
""" Delete(self: EdgeChamfer) -> bool """
pass
def Insert(self):
""" Insert(self: EdgeChamfer) -> bool """
pass
def Modify(self):
""" Modify(self: EdgeChamfer) -> bool """
pass
def Select(self):
""" Select(self: EdgeChamfer) -> bool """
pass
@staticmethod
def __new__(self,FirstEnd=None,SecondEnd=None):
"""
__new__(cls: type)
__new__(cls: type,FirstEnd: Point,SecondEnd: Point)
"""
pass
Chamfer=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Chamfer(self: EdgeChamfer) -> Chamfer
Set: Chamfer(self: EdgeChamfer)=value
"""
FirstBevelDimension=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FirstBevelDimension(self: EdgeChamfer) -> float
Set: FirstBevelDimension(self: EdgeChamfer)=value
"""
FirstChamferEndType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FirstChamferEndType(self: EdgeChamfer) -> ChamferEndTypeEnum
Set: FirstChamferEndType(self: EdgeChamfer)=value
"""
FirstEnd=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FirstEnd(self: EdgeChamfer) -> Point
Set: FirstEnd(self: EdgeChamfer)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: EdgeChamfer) -> str
Set: Name(self: EdgeChamfer)=value
"""
SecondBevelDimension=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SecondBevelDimension(self: EdgeChamfer) -> float
Set: SecondBevelDimension(self: EdgeChamfer)=value
"""
SecondChamferEndType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SecondChamferEndType(self: EdgeChamfer) -> ChamferEndTypeEnum
Set: SecondChamferEndType(self: EdgeChamfer)=value
"""
SecondEnd=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SecondEnd(self: EdgeChamfer) -> Point
Set: SecondEnd(self: EdgeChamfer)=value
"""
ChamferEndTypeEnum=None
class Events(MarshalByRefObject):
""" Events() """
def InitializeLifetimeService(self):
""" InitializeLifetimeService(self: Events) -> object """
pass
def OnClashCheckDone(self,eventName,parameters):
""" OnClashCheckDone(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnClashDetected(self,eventName,parameters):
""" OnClashDetected(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnCommandStatusChange(self,eventName,parameters):
""" OnCommandStatusChange(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnDbCommit(self,eventName,parameters):
""" OnDbCommit(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnModelLoad(self,eventName,parameters):
""" OnModelLoad(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnModelObjectChanged(self,eventName,parameters):
""" OnModelObjectChanged(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnModelObjectNumbered(self,eventName,parameters):
""" OnModelObjectNumbered(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnModelSave(self,eventName,parameters):
""" OnModelSave(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnModelSaveAs(self,eventName,parameters):
""" OnModelSaveAs(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnNumbering(self,eventName,parameters):
""" OnNumbering(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnSelectionChange(self,eventName,parameters):
""" OnSelectionChange(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnTeklaStructuresExit(self,eventName,parameters):
""" OnTeklaStructuresExit(self: Events,eventName: str,*parameters: Array[object]) """
pass
def OnTrackEvent(self,eventName,parameters):
""" OnTrackEvent(self: Events,eventName: str,*parameters: Array[object]) """
pass
def Register(self):
""" Register(self: Events) """
pass
def UnRegister(self):
""" UnRegister(self: Events) """
pass
ClashCheckDone=None
ClashCheckDoneDelegate=None
ClashDetected=None
ClashDetectedDelegate=None
CommandStatusChange=None
CommandStatusChangeDelegate=None
ModelChanged=None
ModelChangedDelegate=None
ModelLoad=None
ModelLoadDelegate=None
ModelObjectChanged=None
ModelObjectChangedDelegate=None
ModelObjectNumbered=None
ModelObjectNumberedDelegate=None
ModelSave=None
ModelSaveAs=None
ModelSaveAsDelegate=None
ModelSaveDelegate=None
Numbering=None
NumberingDelegate=None
SelectionChange=None
SelectionChangeDelegate=None
TeklaStructuresExit=None
TeklaStructuresExitDelegate=None
TrackEvent=None
TrackEventDelegate=None
class ExtensionIntersectsWithPlateException(ConnectiveGeometryException):
""" ExtensionIntersectsWithPlateException() """
class FacePerpendicularToIntersectionLineException(ConnectiveGeometryException):
""" FacePerpendicularToIntersectionLineException() """
class FacesAtAnObtuseAngleException(ConnectiveGeometryException):
""" FacesAtAnObtuseAngleException() """
class FacesTooNearEachOtherException(ConnectiveGeometryException):
""" FacesTooNearEachOtherException() """
class Fitting(Boolean):
""" Fitting() """
def Delete(self):
""" Delete(self: Fitting) -> bool """
pass
def Insert(self):
""" Insert(self: Fitting) -> bool """
pass
def Modify(self):
""" Modify(self: Fitting) -> bool """
pass
def Select(self):
""" Select(self: Fitting) -> bool """
pass
Plane=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Plane(self: Fitting) -> Plane
Set: Plane(self: Fitting)=value
"""
class GeneralConnectiveGeometryException(ConnectiveGeometryException):
""" GeneralConnectiveGeometryException() """
class GeometrySection(object):
# no doc
GeometryNode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GeometryNode(self: GeometrySection) -> IGeometryNode
"""
Index=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Index(self: GeometrySection) -> int
"""
class GeometrySectionEnumerator(object):
# no doc
def MoveNext(self):
""" MoveNext(self: GeometrySectionEnumerator) -> bool """
pass
def next(self,*args):
""" next(self: object) -> object """
pass
def Reset(self):
""" Reset(self: GeometrySectionEnumerator) """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerator) -> object """
pass
Current=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Current(self: GeometrySectionEnumerator) -> GeometrySection
"""
class Grid(ModelObject):
""" Grid() """
def Delete(self):
""" Delete(self: Grid) -> bool """
pass
def Insert(self):
""" Insert(self: Grid) -> bool """
pass
def Modify(self):
""" Modify(self: Grid) -> bool """
pass
def Select(self):
""" Select(self: Grid) -> bool """
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Color(self: Grid) -> int
Set: Color(self: Grid)=value
"""
CoordinateX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CoordinateX(self: Grid) -> str
Set: CoordinateX(self: Grid)=value
"""
CoordinateY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CoordinateY(self: Grid) -> str
Set: CoordinateY(self: Grid)=value
"""
CoordinateZ=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CoordinateZ(self: Grid) -> str
Set: CoordinateZ(self: Grid)=value
"""
ExtensionForMagneticArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionForMagneticArea(self: Grid) -> float
Set: ExtensionForMagneticArea(self: Grid)=value
"""
ExtensionLeftX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionLeftX(self: Grid) -> float
Set: ExtensionLeftX(self: Grid)=value
"""
ExtensionLeftY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionLeftY(self: Grid) -> float
Set: ExtensionLeftY(self: Grid)=value
"""
ExtensionLeftZ=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionLeftZ(self: Grid) -> float
Set: ExtensionLeftZ(self: Grid)=value
"""
ExtensionRightX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionRightX(self: Grid) -> float
Set: ExtensionRightX(self: Grid)=value
"""
ExtensionRightY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionRightY(self: Grid) -> float
Set: ExtensionRightY(self: Grid)=value
"""
ExtensionRightZ=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionRightZ(self: Grid) -> float
Set: ExtensionRightZ(self: Grid)=value
"""
IsMagnetic=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsMagnetic(self: Grid) -> bool
Set: IsMagnetic(self: Grid)=value
"""
LabelX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LabelX(self: Grid) -> str
Set: LabelX(self: Grid)=value
"""
LabelY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LabelY(self: Grid) -> str
Set: LabelY(self: Grid)=value
"""
LabelZ=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LabelZ(self: Grid) -> str
Set: LabelZ(self: Grid)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: Grid) -> str
Set: Name(self: Grid)=value
"""
class GridPlane(ModelObject):
"""
GridPlane()
GridPlane(Plane: Plane,Label: str)
"""
def Delete(self):
""" Delete(self: GridPlane) -> bool """
pass
def Insert(self):
""" Insert(self: GridPlane) -> bool """
pass
def Modify(self):
""" Modify(self: GridPlane) -> bool """
pass
def Select(self):
""" Select(self: GridPlane) -> bool """
pass
@staticmethod
def __new__(self,Plane=None,Label=None):
"""
__new__(cls: type)
__new__(cls: type,Plane: Plane,Label: str)
"""
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Color(self: GridPlane) -> int
Set: Color(self: GridPlane)=value
"""
DrawingVisibility=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DrawingVisibility(self: GridPlane) -> bool
Set: DrawingVisibility(self: GridPlane)=value
"""
ExtensionAbove=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionAbove(self: GridPlane) -> float
Set: ExtensionAbove(self: GridPlane)=value
"""
ExtensionBelow=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionBelow(self: GridPlane) -> float
Set: ExtensionBelow(self: GridPlane)=value
"""
ExtensionForMagneticArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionForMagneticArea(self: GridPlane) -> float
Set: ExtensionForMagneticArea(self: GridPlane)=value
"""
ExtensionLeft=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionLeft(self: GridPlane) -> float
Set: ExtensionLeft(self: GridPlane)=value
"""
ExtensionRight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtensionRight(self: GridPlane) -> float
Set: ExtensionRight(self: GridPlane)=value
"""
Father=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Father(self: GridPlane) -> Grid
Set: Father(self: GridPlane)=value
"""
IsMagnetic=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsMagnetic(self: GridPlane) -> bool
Set: IsMagnetic(self: GridPlane)=value
"""
Label=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Label(self: GridPlane) -> str
Set: Label(self: GridPlane)=value
"""
Plane=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Plane(self: GridPlane) -> Plane
Set: Plane(self: GridPlane)=value
"""
class HierarchicDefinition(ModelObject):
"""
HierarchicDefinition()
HierarchicDefinition(ID: Identifier)
"""
def AddObjects(self,Objects):
""" AddObjects(self: HierarchicDefinition,Objects: ArrayList) -> bool """
pass
def Delete(self):
""" Delete(self: HierarchicDefinition) -> bool """
pass
def Insert(self):
""" Insert(self: HierarchicDefinition) -> bool """
pass
def Modify(self):
""" Modify(self: HierarchicDefinition) -> bool """
pass
def RemoveObjects(self,Objects):
""" RemoveObjects(self: HierarchicDefinition,Objects: ArrayList) -> bool """
pass
def Select(self):
""" Select(self: HierarchicDefinition) -> bool """
pass
@staticmethod
def __new__(self,ID=None):
"""
__new__(cls: type)
__new__(cls: type,ID: Identifier)
"""
pass
CustomType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CustomType(self: HierarchicDefinition) -> str
Set: CustomType(self: HierarchicDefinition)=value
"""
Drawable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Drawable(self: HierarchicDefinition) -> bool
Set: Drawable(self: HierarchicDefinition)=value
"""
Father=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Father(self: HierarchicDefinition) -> HierarchicDefinition
Set: Father(self: HierarchicDefinition)=value
"""
HierarchicChildren=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: HierarchicChildren(self: HierarchicDefinition) -> ArrayList
Set: HierarchicChildren(self: HierarchicDefinition)=value
"""
HierarchyIdentifier=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: HierarchyIdentifier(self: HierarchicDefinition) -> str
Set: HierarchyIdentifier(self: HierarchicDefinition)=value
"""
HierarchyType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: HierarchyType(self: HierarchicDefinition) -> HierarchicDefinitionTypeEnum
Set: HierarchyType(self: HierarchicDefinition)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: HierarchicDefinition) -> str
Set: Name(self: HierarchicDefinition)=value
"""
class HierarchicDefinitionTypeEnum(Enum):
""" enum HierarchicDefinitionTypeEnum,values: DOT_HIERARCHIC_CUSTOM_TYPE (0),DOT_HIERARCHIC_LOGICAL_BUILDING_AREA (1),DOT_HIERARCHIC_OBJECT_TYPE (2),DOT_HIERARCHIC_TASK_SCENARIO (4),DOT_HIERARCHIC_TASK_WORK_TYPE (3) """
DOT_HIERARCHIC_CUSTOM_TYPE=None
DOT_HIERARCHIC_LOGICAL_BUILDING_AREA=None
DOT_HIERARCHIC_OBJECT_TYPE=None
DOT_HIERARCHIC_TASK_SCENARIO=None
DOT_HIERARCHIC_TASK_WORK_TYPE=None
value__=None
class HierarchicObject(ModelObject):
"""
HierarchicObject()
HierarchicObject(ID: Identifier)
"""
def AddObjects(self,Objects):
""" AddObjects(self: HierarchicObject,Objects: ArrayList) -> bool """
pass
def Delete(self):
""" Delete(self: HierarchicObject) -> bool """
pass
def Insert(self):
""" Insert(self: HierarchicObject) -> bool """
pass
def Modify(self):
""" Modify(self: HierarchicObject) -> bool """
pass
def RemoveObjects(self,Objects):
""" RemoveObjects(self: HierarchicObject,Objects: ArrayList) -> bool """
pass
def Select(self):
""" Select(self: HierarchicObject) -> bool """
pass
@staticmethod
def __new__(self,ID=None):
"""
__new__(cls: type)
__new__(cls: type,ID: Identifier)
"""
pass
Definition=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Definition(self: HierarchicObject) -> HierarchicDefinition
Set: Definition(self: HierarchicObject)=value
"""
Father=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Father(self: HierarchicObject) -> HierarchicObject
Set: Father(self: HierarchicObject)=value
"""
HierarchicChildren=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: HierarchicChildren(self: HierarchicObject) -> ArrayList
Set: HierarchicChildren(self: HierarchicObject)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: HierarchicObject) -> str
Set: Name(self: HierarchicObject)=value
"""
class IAssemblable:
# no doc
def GetAssembly(self):
""" GetAssembly(self: IAssemblable) -> Assembly """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class IGeometryNode:
# no doc
def AcceptVisitor(self,visitor):
""" AcceptVisitor(self: IGeometryNode,visitor: IGeometryNodeVisitor) """
pass
def Clone(self):
""" Clone(self: IGeometryNode) -> IGeometryNode """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsAutomatic=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsAutomatic(self: IGeometryNode) -> bool
"""
class IGeometryNodeVisitor:
# no doc
def Visit(self,node):
""" Visit(self: IGeometryNodeVisitor,node: CylindricalSurfaceNode)Visit(self: IGeometryNodeVisitor,node: PolygonNode) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class InputItem(object):
# no doc
def GetData(self):
""" GetData(self: InputItem) -> object """
pass
def GetInputType(self):
""" GetInputType(self: InputItem) -> InputTypeEnum """
pass
InputTypeEnum=None
class InvalidFacePointsException(ConnectiveGeometryException):
""" InvalidFacePointsException() """
class InvalidRadiusException(ConnectiveGeometryException):
""" InvalidRadiusException() """
class Load(ModelObject):
""" Load() """
AutomaticPrimaryAxisWeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AutomaticPrimaryAxisWeight(self: Load) -> bool
Set: AutomaticPrimaryAxisWeight(self: Load)=value
"""
BoundingBoxDx=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BoundingBoxDx(self: Load) -> float
Set: BoundingBoxDx(self: Load)=value
"""
BoundingBoxDy=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BoundingBoxDy(self: Load) -> float
Set: BoundingBoxDy(self: Load)=value
"""
BoundingBoxDz=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BoundingBoxDz(self: Load) -> float
Set: BoundingBoxDz(self: Load)=value
"""
CreateFixedSupportConditionsAutomatically=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CreateFixedSupportConditionsAutomatically(self: Load) -> bool
Set: CreateFixedSupportConditionsAutomatically(self: Load)=value
"""
FatherId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FatherId(self: Load) -> Identifier
Set: FatherId(self: Load)=value
"""
Group=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Group(self: Load) -> LoadGroup
Set: Group(self: Load)=value
"""
LoadAttachment=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LoadAttachment(self: Load) -> LoadAttachmentEnum
Set: LoadAttachment(self: Load)=value
"""
LoadDispersionAngle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LoadDispersionAngle(self: Load) -> float
Set: LoadDispersionAngle(self: Load)=value
"""
PartFilter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PartFilter(self: Load) -> str
Set: PartFilter(self: Load)=value
"""
PartNames=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PartNames(self: Load) -> LoadPartNamesEnum
Set: PartNames(self: Load)=value
"""
PrimaryAxisDirection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PrimaryAxisDirection(self: Load) -> Vector
Set: PrimaryAxisDirection(self: Load)=value
"""
Spanning=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Spanning(self: Load) -> LoadSpanningEnum
Set: Spanning(self: Load)=value
"""
Weight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Weight(self: Load) -> float
Set: Weight(self: Load)=value
"""
LoadAttachmentEnum=None
LoadPartNamesEnum=None
LoadSpanningEnum=None
class LoadArea(Load):
""" LoadArea() """
def Delete(self):
""" Delete(self: LoadArea) -> bool """
pass
def Insert(self):
""" Insert(self: LoadArea) -> bool """
pass
def Modify(self):
""" Modify(self: LoadArea) -> bool """
pass
def Select(self):
""" Select(self: LoadArea) -> bool """
pass
DistanceA=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DistanceA(self: LoadArea) -> float
Set: DistanceA(self: LoadArea)=value
"""
LoadForm=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LoadForm(self: LoadArea) -> AreaLoadFormEnum
Set: LoadForm(self: LoadArea)=value
"""
P1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: P1(self: LoadArea) -> Vector
Set: P1(self: LoadArea)=value
"""
P2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: P2(self: LoadArea) -> Vector
Set: P2(self: LoadArea)=value
"""
P3=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: P3(self: LoadArea) -> Vector
Set: P3(self: LoadArea)=value
"""
P4=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: P4(self: LoadArea) -> Vector
Set: P4(self: LoadArea)=value
"""
Position1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position1(self: LoadArea) -> Point
Set: Position1(self: LoadArea)=value
"""
Position2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position2(self: LoadArea) -> Point
Set: Position2(self: LoadArea)=value
"""
Position3=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position3(self: LoadArea) -> Point
Set: Position3(self: LoadArea)=value
"""
AreaLoadFormEnum=None
class LoadGroup(ModelObject):
""" LoadGroup() """
def Delete(self):
""" Delete(self: LoadGroup) -> bool """
pass
def Insert(self):
""" Insert(self: LoadGroup) -> bool """
pass
def Modify(self):
""" Modify(self: LoadGroup) -> bool """
pass
def Select(self):
""" Select(self: LoadGroup) -> bool """
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Color(self: LoadGroup) -> Colors
Set: Color(self: LoadGroup)=value
"""
Compatible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Compatible(self: LoadGroup) -> int
Set: Compatible(self: LoadGroup)=value
"""
Direction=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Direction(self: LoadGroup) -> LoadGroupDirection
Set: Direction(self: LoadGroup)=value
"""
GroupName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GroupName(self: LoadGroup) -> str
Set: GroupName(self: LoadGroup)=value
"""
GroupType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GroupType(self: LoadGroup) -> LoadGroupType
Set: GroupType(self: LoadGroup)=value
"""
Incompatible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Incompatible(self: LoadGroup) -> int
Set: Incompatible(self: LoadGroup)=value
"""
Colors=None
LoadGroupDirection=None
LoadGroupType=None
class LoadLine(Load):
""" LoadLine() """
def Delete(self):
""" Delete(self: LoadLine) -> bool """
pass
def Insert(self):
""" Insert(self: LoadLine) -> bool """
pass
def Modify(self):
""" Modify(self: LoadLine) -> bool """
pass
def Select(self):
""" Select(self: LoadLine) -> bool """
pass
DistanceA=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DistanceA(self: LoadLine) -> float
Set: DistanceA(self: LoadLine)=value
"""
DistanceB=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DistanceB(self: LoadLine) -> float
Set: DistanceB(self: LoadLine)=value
"""
LoadForm=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LoadForm(self: LoadLine) -> LineLoadFormEnum
Set: LoadForm(self: LoadLine)=value
"""
P1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: P1(self: LoadLine) -> Vector
Set: P1(self: LoadLine)=value
"""
P2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: P2(self: LoadLine) -> Vector
Set: P2(self: LoadLine)=value
"""
Position1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position1(self: LoadLine) -> Point
Set: Position1(self: LoadLine)=value
"""
Position2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position2(self: LoadLine) -> Point
Set: Position2(self: LoadLine)=value
"""
Torsion1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Torsion1(self: LoadLine) -> float
Set: Torsion1(self: LoadLine)=value
"""
Torsion2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Torsion2(self: LoadLine) -> float
Set: Torsion2(self: LoadLine)=value
"""
LineLoadFormEnum=None
class LoadPoint(Load):
""" LoadPoint() """
def Delete(self):
""" Delete(self: LoadPoint) -> bool """
pass
def Insert(self):
""" Insert(self: LoadPoint) -> bool """
pass
def Modify(self):
""" Modify(self: LoadPoint) -> bool """
pass
def Select(self):
""" Select(self: LoadPoint) -> bool """
pass
Moment=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Moment(self: LoadPoint) -> Vector
Set: Moment(self: LoadPoint)=value
"""
P=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: P(self: LoadPoint) -> Vector
Set: P(self: LoadPoint)=value
"""
Position=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position(self: LoadPoint) -> Point
Set: Position(self: LoadPoint)=value
"""
class LoadTemperature(Load):
""" LoadTemperature() """
def Delete(self):
""" Delete(self: LoadTemperature) -> bool """
pass
def Insert(self):
""" Insert(self: LoadTemperature) -> bool """
pass
def Modify(self):
""" Modify(self: LoadTemperature) -> bool """
pass
def Select(self):
""" Select(self: LoadTemperature) -> bool """
pass
AutomaticPrimaryAxisWeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AutomaticPrimaryAxisWeight(self: LoadTemperature) -> bool
Set: AutomaticPrimaryAxisWeight(self: LoadTemperature)=value
"""
CreateFixedSupportConditionsAutomatically=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CreateFixedSupportConditionsAutomatically(self: LoadTemperature) -> bool
Set: CreateFixedSupportConditionsAutomatically(self: LoadTemperature)=value
"""
InitialAxialElongation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: InitialAxialElongation(self: LoadTemperature) -> float
Set: InitialAxialElongation(self: LoadTemperature)=value
"""
LoadDispersionAngle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LoadDispersionAngle(self: LoadTemperature) -> float
Set: LoadDispersionAngle(self: LoadTemperature)=value
"""
Position1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position1(self: LoadTemperature) -> Point
Set: Position1(self: LoadTemperature)=value
"""
Position2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position2(self: LoadTemperature) -> Point
Set: Position2(self: LoadTemperature)=value
"""
PrimaryAxisDirection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PrimaryAxisDirection(self: LoadTemperature) -> Vector
Set: PrimaryAxisDirection(self: LoadTemperature)=value
"""
Spanning=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Spanning(self: LoadTemperature) -> LoadSpanningEnum
Set: Spanning(self: LoadTemperature)=value
"""
TemperatureChangeForAxialElongation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TemperatureChangeForAxialElongation(self: LoadTemperature) -> float
Set: TemperatureChangeForAxialElongation(self: LoadTemperature)=value
"""
TemperatureDifferentialSideToSide=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TemperatureDifferentialSideToSide(self: LoadTemperature) -> float
Set: TemperatureDifferentialSideToSide(self: LoadTemperature)=value
"""
TemperatureDifferentialTopToBottom=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TemperatureDifferentialTopToBottom(self: LoadTemperature) -> float
Set: TemperatureDifferentialTopToBottom(self: LoadTemperature)=value
"""
Weight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Weight(self: LoadTemperature) -> float
Set: Weight(self: LoadTemperature)=value
"""
class LoadUniform(Load):
""" LoadUniform() """
def Delete(self):
""" Delete(self: LoadUniform) -> bool """
pass
def Insert(self):
""" Insert(self: LoadUniform) -> bool """
pass
def Modify(self):
""" Modify(self: LoadUniform) -> bool """
pass
def Select(self):
""" Select(self: LoadUniform) -> bool """
pass
DistanceA=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DistanceA(self: LoadUniform) -> float
Set: DistanceA(self: LoadUniform)=value
"""
P1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: P1(self: LoadUniform) -> Vector
Set: P1(self: LoadUniform)=value
"""
Polygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polygon(self: LoadUniform) -> Polygon
Set: Polygon(self: LoadUniform)=value
"""
class LogicalWeld(BaseWeld):
""" LogicalWeld(MainWeld: BaseWeld) """
def AddWeld(self,Weld):
""" AddWeld(self: LogicalWeld,Weld: BaseWeld) -> bool """
pass
def Delete(self):
""" Delete(self: LogicalWeld) -> bool """
pass
def Explode(self):
""" Explode(self: LogicalWeld) -> bool """
pass
def GetMainWeld(self):
""" GetMainWeld(self: LogicalWeld) -> BaseWeld """
pass
def Insert(self):
""" Insert(self: LogicalWeld) -> bool """
pass
def Modify(self):
""" Modify(self: LogicalWeld) -> bool """
pass
def RemoveWeld(self,Weld):
""" RemoveWeld(self: LogicalWeld,Weld: BaseWeld) -> bool """
pass
def Select(self,ChildWeld=None):
"""
Select(self: LogicalWeld,ChildWeld: BaseWeld) -> bool
Select(self: LogicalWeld) -> bool
"""
pass
def SetMainWeld(self,Weld):
""" SetMainWeld(self: LogicalWeld,Weld: BaseWeld) -> bool """
pass
@staticmethod
def __new__(self,MainWeld):
""" __new__(cls: type,MainWeld: BaseWeld) """
pass
class Material(object):
""" Material() """
MaterialString=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: MaterialString(self: Material) -> str
Set: MaterialString(self: Material)=value
"""
class Model(object):
""" Model() """
def CommitChanges(self,Message=None):
"""
CommitChanges(self: Model,Message: str) -> bool
CommitChanges(self: Model) -> bool
"""
pass
def GetClashCheckHandler(self):
""" GetClashCheckHandler(self: Model) -> ClashCheckHandler """
pass
def GetConnectionStatus(self):
""" GetConnectionStatus(self: Model) -> bool """
pass
def GetGUIDByIdentifier(self,identifier):
""" GetGUIDByIdentifier(self: Model,identifier: Identifier) -> str """
pass
def GetIdentifierByGUID(self,guid):
""" GetIdentifierByGUID(self: Model,guid: str) -> Identifier """
pass
def GetInfo(self):
""" GetInfo(self: Model) -> ModelInfo """
pass
def GetModelObjectSelector(self):
""" GetModelObjectSelector(self: Model) -> ModelObjectSelector """
pass
def GetPhases(self):
""" GetPhases(self: Model) -> PhaseCollection """
pass
def GetProjectInfo(self):
""" GetProjectInfo(self: Model) -> ProjectInfo """
pass
def GetWorkPlaneHandler(self):
""" GetWorkPlaneHandler(self: Model) -> WorkPlaneHandler """
pass
def SelectModelObject(self,ID):
""" SelectModelObject(self: Model,ID: Identifier) -> ModelObject """
pass
class ModelHandler(object):
""" ModelHandler() """
def Close(self):
""" Close(self: ModelHandler) """
pass
def CreateNewMultiUserModel(self,ModelName,ModelFolder,ServerName):
""" CreateNewMultiUserModel(self: ModelHandler,ModelName: str,ModelFolder: str,ServerName: str) -> bool """
pass
def CreateNewSingleUserModel(self,ModelName,ModelFolder,Template):
""" CreateNewSingleUserModel(self: ModelHandler,ModelName: str,ModelFolder: str,Template: str) -> bool """
pass
def IsModelAutoSaved(self,ModelFolder):
""" IsModelAutoSaved(self: ModelHandler,ModelFolder: str) -> bool """
pass
def IsModelSaved(self):
""" IsModelSaved(self: ModelHandler) -> bool """
pass
def Open(self,ModelFolder,OpenAutoSaved):
""" Open(self: ModelHandler,ModelFolder: str,OpenAutoSaved: bool) -> bool """
pass
def Save(self,Comment,User):
""" Save(self: ModelHandler,Comment: str,User: str) -> bool """
pass
class ModelInfo(object):
# no doc
CurrentPhase=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CurrentPhase(self: ModelInfo) -> int
Set: CurrentPhase(self: ModelInfo)=value
"""
ModelName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ModelName(self: ModelInfo) -> str
Set: ModelName(self: ModelInfo)=value
"""
ModelPath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ModelPath(self: ModelInfo) -> str
Set: ModelPath(self: ModelInfo)=value
"""
NorthDirection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: NorthDirection(self: ModelInfo) -> float
Set: NorthDirection(self: ModelInfo)=value
"""
SharedModel=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SharedModel(self: ModelInfo) -> bool
"""
SingleUserModel=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SingleUserModel(self: ModelInfo) -> bool
"""
class ModelObjectEnumerator(object):
# no doc
def GetEnumerator(self):
""" GetEnumerator(self: ModelObjectEnumerator) -> IEnumerator """
pass
def GetSize(self):
""" GetSize(self: ModelObjectEnumerator) -> int """
pass
def MoveNext(self):
""" MoveNext(self: ModelObjectEnumerator) -> bool """
pass
def next(self,*args):
""" next(self: object) -> object """
pass
def Reset(self):
""" Reset(self: ModelObjectEnumerator) """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerator) -> object """
pass
Current=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Current(self: ModelObjectEnumerator) -> ModelObject
"""
SelectInstances=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SelectInstances(self: ModelObjectEnumerator) -> bool
Set: SelectInstances(self: ModelObjectEnumerator)=value
"""
AutoFetch=False
EnumeratorTypeEnum=None
class ModelObjectSelector(object):
# no doc
def GetAllObjects(self):
""" GetAllObjects(self: ModelObjectSelector) -> ModelObjectEnumerator """
pass
def GetAllObjectsWithType(self,*__args):
"""
GetAllObjectsWithType(self: ModelObjectSelector,TypeFilter: Array[Type]) -> ModelObjectEnumerator
GetAllObjectsWithType(self: ModelObjectSelector,Enum: ModelObjectEnum) -> ModelObjectEnumerator
"""
pass
def GetEnumerator(self):
""" GetEnumerator(self: ModelObjectSelector) -> ModelObjectEnumerator """
pass
def GetFilteredObjectsWithType(self,Enum,FilterName):
""" GetFilteredObjectsWithType(self: ModelObjectSelector,Enum: ModelObjectEnum,FilterName: str) -> ModelObjectEnumerator """
pass
def GetObjectsByBoundingBox(self,MinPoint,MaxPoint):
""" GetObjectsByBoundingBox(self: ModelObjectSelector,MinPoint: Point,MaxPoint: Point) -> ModelObjectEnumerator """
pass
def GetObjectsByFilter(self,FilterExpression):
""" GetObjectsByFilter(self: ModelObjectSelector,FilterExpression: FilterExpression) -> ModelObjectEnumerator """
pass
def GetObjectsByFilterName(self,FilterName):
""" GetObjectsByFilterName(self: ModelObjectSelector,FilterName: str) -> ModelObjectEnumerator """
pass
class NumberingSeries(object):
"""
NumberingSeries()
NumberingSeries(Prefix: str,Number: int)
"""
@staticmethod
def __new__(self,Prefix=None,Number=None):
"""
__new__(cls: type)
__new__(cls: type,Prefix: str,Number: int)
"""
pass
Prefix=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Prefix(self: NumberingSeries) -> str
Set: Prefix(self: NumberingSeries)=value
"""
StartNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartNumber(self: NumberingSeries) -> int
Set: StartNumber(self: NumberingSeries)=value
"""
class NumberingSeriesNullable(object):
"""
NumberingSeriesNullable()
NumberingSeriesNullable(prefix: str,number: int)
"""
@staticmethod
def __new__(self,prefix=None,number=None):
"""
__new__(cls: type)
__new__(cls: type,prefix: str,number: int)
"""
pass
Prefix=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Prefix(self: NumberingSeriesNullable) -> str
Set: Prefix(self: NumberingSeriesNullable)=value
"""
StartNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartNumber(self: NumberingSeriesNullable) -> Nullable[int]
Set: StartNumber(self: NumberingSeriesNullable)=value
"""
class Offset(object):
""" Offset() """
Dx=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Dx(self: Offset) -> float
Set: Dx(self: Offset)=value
"""
Dy=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Dy(self: Offset) -> float
Set: Dy(self: Offset)=value
"""
Dz=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Dz(self: Offset) -> float
Set: Dz(self: Offset)=value
"""
class Phase(object):
"""
Phase()
Phase(PhaseNumber: int)
Phase(PhaseNumber: int,PhaseName: str,PhaseComment: str,IsCurrentPhase: int)
"""
def Delete(self):
""" Delete(self: Phase) -> bool """
pass
def GetUserProperty(self,Name,Value):
"""
GetUserProperty(self: Phase,Name: str,Value: int) -> (bool,int)
GetUserProperty(self: Phase,Name: str,Value: float) -> (bool,float)
GetUserProperty(self: Phase,Name: str,Value: str) -> (bool,str)
"""
pass
def Insert(self):
""" Insert(self: Phase) -> bool """
pass
def Modify(self):
""" Modify(self: Phase) -> bool """
pass
def Select(self):
""" Select(self: Phase) -> bool """
pass
def SetUserProperty(self,Name,Value):
"""
SetUserProperty(self: Phase,Name: str,Value: int) -> bool
SetUserProperty(self: Phase,Name: str,Value: float) -> bool
SetUserProperty(self: Phase,Name: str,Value: str) -> bool
"""
pass
@staticmethod
def __new__(self,PhaseNumber=None,PhaseName=None,PhaseComment=None,IsCurrentPhase=None):
"""
__new__(cls: type)
__new__(cls: type,PhaseNumber: int)
__new__(cls: type,PhaseNumber: int,PhaseName: str,PhaseComment: str,IsCurrentPhase: int)
"""
pass
IsCurrentPhase=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsCurrentPhase(self: Phase) -> int
Set: IsCurrentPhase(self: Phase)=value
"""
PhaseComment=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PhaseComment(self: Phase) -> str
Set: PhaseComment(self: Phase)=value
"""
PhaseName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PhaseName(self: Phase) -> str
Set: PhaseName(self: Phase)=value
"""
PhaseNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PhaseNumber(self: Phase) -> int
Set: PhaseNumber(self: Phase)=value
"""
class PhaseCollection(object):
# no doc
def CopyTo(self,Array,Index):
""" CopyTo(self: PhaseCollection,Array: Array,Index: int) """
pass
def GetEnumerator(self):
""" GetEnumerator(self: PhaseCollection) -> IEnumerator """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
Count=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Count(self: PhaseCollection) -> int
"""
IsSynchronized=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsSynchronized(self: PhaseCollection) -> bool
"""
SyncRoot=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SyncRoot(self: PhaseCollection) -> object
"""
class Plane(object):
""" Plane() """
AxisX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AxisX(self: Plane) -> Vector
Set: AxisX(self: Plane)=value
"""
AxisY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AxisY(self: Plane) -> Vector
Set: AxisY(self: Plane)=value
"""
Origin=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Origin(self: Plane) -> Point
Set: Origin(self: Plane)=value
"""
class PlateIntersectsWithIntersectionLineException(ConnectiveGeometryException):
""" PlateIntersectsWithIntersectionLineException() """
class PolyBeam(Part):
"""
PolyBeam()
PolyBeam(polyBeamType: PolyBeamTypeEnum)
"""
def AddContourPoint(self,contourPoint):
""" AddContourPoint(self: PolyBeam,contourPoint: ContourPoint) -> bool """
pass
def Delete(self):
""" Delete(self: PolyBeam) -> bool """
pass
def GetPolybeamCoordinateSystems(self):
""" GetPolybeamCoordinateSystems(self: PolyBeam) -> ArrayList """
pass
def Insert(self):
""" Insert(self: PolyBeam) -> bool """
pass
def Modify(self):
""" Modify(self: PolyBeam) -> bool """
pass
def Select(self):
""" Select(self: PolyBeam) -> bool """
pass
@staticmethod
def __new__(self,polyBeamType=None):
"""
__new__(cls: type)
__new__(cls: type,polyBeamType: PolyBeamTypeEnum)
"""
pass
Contour=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Contour(self: PolyBeam) -> Contour
Set: Contour(self: PolyBeam)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: PolyBeam) -> PolyBeamTypeEnum
"""
PolyBeamTypeEnum=None
class Polygon(object):
""" Polygon() """
Points=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Points(self: Polygon) -> ArrayList
Set: Points(self: Polygon)=value
"""
MAX_POLYGON_POINTS=99
MIN_POLYGON_POINTS=3
class PolygonNode(object):
""" PolygonNode(contour: Contour,isAutomaticNode: bool) """
def AcceptVisitor(self,visitor):
""" AcceptVisitor(self: PolygonNode,visitor: IGeometryNodeVisitor) """
pass
def Clone(self):
""" Clone(self: PolygonNode) -> IGeometryNode """
pass
@staticmethod
def __new__(self,contour,isAutomaticNode):
""" __new__(cls: type,contour: Contour,isAutomaticNode: bool) """
pass
Contour=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Contour(self: PolygonNode) -> Contour
"""
IsAutomatic=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsAutomatic(self: PolygonNode) -> bool
"""
class PolygonWeld(BaseWeld):
""" PolygonWeld() """
def Delete(self):
""" Delete(self: PolygonWeld) -> bool """
pass
def GetLogicalWeld(self,LogicalWeld):
""" GetLogicalWeld(self: PolygonWeld,LogicalWeld: LogicalWeld) -> (bool,LogicalWeld) """
pass
def Insert(self):
""" Insert(self: PolygonWeld) -> bool """
pass
def Modify(self):
""" Modify(self: PolygonWeld) -> bool """
pass
def Select(self):
""" Select(self: PolygonWeld) -> bool """
pass
Polygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polygon(self: PolygonWeld) -> Polygon
Set: Polygon(self: PolygonWeld)=value
"""
class Polymesh(object):
""" Polymesh() """
@staticmethod
def CompareFingerprints(fingerprint1,fingerprint2):
""" CompareFingerprints(fingerprint1: str,fingerprint2: str) -> bool """
pass
@staticmethod
def Convert(input):
""" Convert(input: dotPolymesh_t) -> (List[FacetedBrep],dotPolymesh_t) """
pass
@staticmethod
def ConvertInvalidInfoFromStruct(input):
""" ConvertInvalidInfoFromStruct(input: dotPolymeshValidateInvalidInfo_t) -> (List[KeyValuePair[int,PolymeshHealthCheckEnum]],dotPolymeshValidateInvalidInfo_t) """
pass
@staticmethod
def ConvertToStruct(brep,output):
""" ConvertToStruct(brep: FacetedBrep,output: dotPolymesh_t) -> dotPolymesh_t """
pass
@staticmethod
def Fingerprint(brep):
""" Fingerprint(brep: FacetedBrep) -> str """
pass
def FromStruct(self,input):
""" FromStruct(self: Polymesh,input: dotPolymesh_t) -> dotPolymesh_t """
pass
@staticmethod
def GetSolidBrep(inBrep,outBrep):
""" GetSolidBrep(inBrep: FacetedBrep) -> (bool,FacetedBrep) """
pass
def ToStruct(self,output):
""" ToStruct(self: Polymesh,output: dotPolymesh_t) -> dotPolymesh_t """
pass
@staticmethod
def Validate(brep,checkCriteria,invalidInfo):
""" Validate(brep: FacetedBrep,checkCriteria: PolymeshCheckerFlags,invalidInfo: List[KeyValuePair[int,PolymeshHealthCheckEnum]]) -> (bool,List[KeyValuePair[int,PolymeshHealthCheckEnum]]) """
pass
Brep=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Brep(self: Polymesh) -> FacetedBrep
Set: Brep(self: Polymesh)=value
"""
PolymeshCheckerFlags=None
PolymeshHealthCheckEnum=None
class PolymeshEnumerator(object):
# no doc
def MoveNext(self):
""" MoveNext(self: PolymeshEnumerator) -> bool """
pass
def next(self,*args):
""" next(self: object) -> object """
pass
def Reset(self):
""" Reset(self: PolymeshEnumerator) """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerator) -> object """
pass
Current=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Current(self: PolymeshEnumerator) -> object
"""
class Position(object):
""" Position() """
Depth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Depth(self: Position) -> DepthEnum
Set: Depth(self: Position)=value
"""
DepthOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DepthOffset(self: Position) -> float
Set: DepthOffset(self: Position)=value
"""
Plane=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Plane(self: Position) -> PlaneEnum
Set: Plane(self: Position)=value
"""
PlaneOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PlaneOffset(self: Position) -> float
Set: PlaneOffset(self: Position)=value
"""
Rotation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Rotation(self: Position) -> RotationEnum
Set: Rotation(self: Position)=value
"""
RotationOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RotationOffset(self: Position) -> float
Set: RotationOffset(self: Position)=value
"""
DepthEnum=None
PlaneEnum=None
RotationEnum=None
class PourBreak(ModelObject):
""" PourBreak() """
def CreateInstanceDelegate(self,*args):
""" CreateInstanceDelegate(self: PourBreak,pourBreak: dotPolymeshObject_t) -> (int,dotPolymeshObject_t) """
pass
def Delete(self):
""" Delete(self: PourBreak) -> bool """
pass
def Insert(self):
""" Insert(self: PourBreak) -> bool """
pass
def Modify(self):
""" Modify(self: PourBreak) -> bool """
pass
def ModifyInstanceDelegate(self,*args):
""" ModifyInstanceDelegate(self: PourBreak,pourBreak: dotPolymeshObject_t) -> (int,dotPolymeshObject_t) """
pass
def Select(self):
""" Select(self: PourBreak) -> bool """
pass
def SelectInstanceDelegate(self,*args):
""" SelectInstanceDelegate(self: PourBreak,pourBreak: dotPolymeshObject_t) -> (int,dotPolymeshObject_t) """
pass
ModelObjectType=property(lambda self: object(),lambda self,v: None,lambda self: None)
Polymesh=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polymesh(self: PourBreak) -> FacetedBrep
Set: Polymesh(self: PourBreak)=value
"""
class PourObject(ModelObject):
""" PourObject() """
def Delete(self):
""" Delete(self: PourObject) -> bool """
pass
def GetAssembly(self):
""" GetAssembly(self: PourObject) -> Assembly """
pass
def GetObjects(self):
""" GetObjects(self: PourObject) -> ModelObjectEnumerator """
pass
def GetParts(self):
""" GetParts(self: PourObject) -> ModelObjectEnumerator """
pass
def GetPourPolymeshes(self):
""" GetPourPolymeshes(self: PourObject) -> PolymeshEnumerator """
pass
def GetSolid(self):
""" GetSolid(self: PourObject) -> Solid """
pass
def GetSurfaceObjects(self):
""" GetSurfaceObjects(self: PourObject) -> ModelObjectEnumerator """
pass
def Insert(self):
""" Insert(self: PourObject) -> bool """
pass
def Modify(self):
""" Modify(self: PourObject) -> bool """
pass
def Select(self):
""" Select(self: PourObject) -> bool """
pass
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: PourObject) -> int
Set: Class(self: PourObject)=value
"""
ConcreteMixture=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ConcreteMixture(self: PourObject) -> str
Set: ConcreteMixture(self: PourObject)=value
"""
PourNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PourNumber(self: PourObject) -> str
Set: PourNumber(self: PourObject)=value
"""
PourType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PourType(self: PourObject) -> str
Set: PourType(self: PourObject)=value
"""
class Profile(object):
""" Profile() """
@staticmethod
def FormatProfileString(profileString):
""" FormatProfileString(profileString: str) -> str """
pass
@staticmethod
def ParseProfileString(profileString):
""" ParseProfileString(profileString: str) -> str """
pass
ProfileString=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ProfileString(self: Profile) -> str
Set: ProfileString(self: Profile)=value
"""
class ProjectInfo(object):
# no doc
def GetDoubleUserProperties(self,Values):
""" GetDoubleUserProperties(self: ProjectInfo,Values: Hashtable) -> (bool,Hashtable) """
pass
def GetIntegerUserProperties(self,Values):
""" GetIntegerUserProperties(self: ProjectInfo,Values: Hashtable) -> (bool,Hashtable) """
pass
def GetStringUserProperties(self,Values):
""" GetStringUserProperties(self: ProjectInfo,Values: Hashtable) -> (bool,Hashtable) """
pass
def GetUserProperty(self,Name,Value):
"""
GetUserProperty(self: ProjectInfo,Name: str,Value: float) -> (bool,float)
GetUserProperty(self: ProjectInfo,Name: str,Value: int) -> (bool,int)
GetUserProperty(self: ProjectInfo,Name: str,Value: str) -> (bool,str)
"""
pass
def Modify(self):
""" Modify(self: ProjectInfo) -> bool """
pass
def SetUserProperty(self,Name,Value):
"""
SetUserProperty(self: ProjectInfo,Name: str,Value: int) -> bool
SetUserProperty(self: ProjectInfo,Name: str,Value: float) -> bool
SetUserProperty(self: ProjectInfo,Name: str,Value: str) -> bool
"""
pass
Address=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Address(self: ProjectInfo) -> str
Set: Address(self: ProjectInfo)=value
"""
Builder=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Builder(self: ProjectInfo) -> str
Set: Builder(self: ProjectInfo)=value
"""
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Description(self: ProjectInfo) -> str
Set: Description(self: ProjectInfo)=value
"""
Designer=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Designer(self: ProjectInfo) -> str
Set: Designer(self: ProjectInfo)=value
"""
EndDate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndDate(self: ProjectInfo) -> str
Set: EndDate(self: ProjectInfo)=value
"""
GUID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GUID(self: ProjectInfo) -> str
Set: GUID(self: ProjectInfo)=value
"""
Info1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Info1(self: ProjectInfo) -> str
Set: Info1(self: ProjectInfo)=value
"""
Info2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Info2(self: ProjectInfo) -> str
Set: Info2(self: ProjectInfo)=value
"""
ModelSharingLocalPath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ModelSharingLocalPath(self: ProjectInfo) -> DirectoryInfo
Set: ModelSharingLocalPath(self: ProjectInfo)=value
"""
ModelSharingServerPath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ModelSharingServerPath(self: ProjectInfo) -> Uri
Set: ModelSharingServerPath(self: ProjectInfo)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: ProjectInfo) -> str
Set: Name(self: ProjectInfo)=value
"""
Object=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Object(self: ProjectInfo) -> str
Set: Object(self: ProjectInfo)=value
"""
ProjectNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ProjectNumber(self: ProjectInfo) -> str
Set: ProjectNumber(self: ProjectInfo)=value
"""
StartDate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartDate(self: ProjectInfo) -> str
Set: StartDate(self: ProjectInfo)=value
"""
class RebarEndDetailModifier(BaseRebarModifier):
""" RebarEndDetailModifier() """
RebarHook=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RebarHook(self: RebarEndDetailModifier) -> RebarHookDataNullable
Set: RebarHook(self: RebarEndDetailModifier)=value
"""
RebarThreading=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RebarThreading(self: RebarEndDetailModifier) -> RebarThreadingDataNullable
Set: RebarThreading(self: RebarEndDetailModifier)=value
"""
class RebarGeometry(object):
# no doc
BendingRadiuses=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BendingRadiuses(self: RebarGeometry) -> ArrayList
"""
Diameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Diameter(self: RebarGeometry) -> float
"""
Shape=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Shape(self: RebarGeometry) -> PolyLine
"""
class RebarGroup(BaseRebarGroup):
""" RebarGroup() """
def Delete(self):
""" Delete(self: RebarGroup) -> bool """
pass
def Insert(self):
""" Insert(self: RebarGroup) -> bool """
pass
def Modify(self):
""" Modify(self: RebarGroup) -> bool """
pass
def Select(self):
""" Select(self: RebarGroup) -> bool """
pass
Polygons=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polygons(self: RebarGroup) -> ArrayList
Set: Polygons(self: RebarGroup)=value
"""
StirrupType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StirrupType(self: RebarGroup) -> RebarGroupStirrupTypeEnum
Set: StirrupType(self: RebarGroup)=value
"""
RebarGroupStirrupTypeEnum=None
class RebarGuideline(object):
""" RebarGuideline() """
Curve=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Curve(self: RebarGuideline) -> Contour
Set: Curve(self: RebarGuideline)=value
"""
Id=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Id(self: RebarGuideline) -> int
Set: Id(self: RebarGuideline)=value
"""
Spacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Spacing(self: RebarGuideline) -> RebarSpacing
Set: Spacing(self: RebarGuideline)=value
"""
class RebarHookData(object):
""" RebarHookData() """
Angle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Angle(self: RebarHookData) -> float
Set: Angle(self: RebarHookData)=value
"""
Length=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Length(self: RebarHookData) -> float
Set: Length(self: RebarHookData)=value
"""
Radius=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Radius(self: RebarHookData) -> float
Set: Radius(self: RebarHookData)=value
"""
Shape=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Shape(self: RebarHookData) -> RebarHookShapeEnum
Set: Shape(self: RebarHookData)=value
"""
RebarHookShapeEnum=None
class RebarHookDataNullable(object):
""" RebarHookDataNullable() """
Angle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Angle(self: RebarHookDataNullable) -> Nullable[float]
Set: Angle(self: RebarHookDataNullable)=value
"""
Length=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Length(self: RebarHookDataNullable) -> Nullable[float]
Set: Length(self: RebarHookDataNullable)=value
"""
Radius=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Radius(self: RebarHookDataNullable) -> Nullable[float]
Set: Radius(self: RebarHookDataNullable)=value
"""
Shape=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Shape(self: RebarHookDataNullable) -> Nullable[RebarHookShapeEnum]
Set: Shape(self: RebarHookDataNullable)=value
"""
class RebarLegFace(object):
"""
RebarLegFace()
RebarLegFace(contour: Contour)
"""
@staticmethod
def __new__(self,contour=None):
"""
__new__(cls: type)
__new__(cls: type,contour: Contour)
"""
pass
AdditonalOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AdditonalOffset(self: RebarLegFace) -> float
Set: AdditonalOffset(self: RebarLegFace)=value
"""
Contour=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Contour(self: RebarLegFace) -> Contour
Set: Contour(self: RebarLegFace)=value
"""
Id=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Id(self: RebarLegFace) -> int
Set: Id(self: RebarLegFace)=value
"""
LayerOrderNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LayerOrderNumber(self: RebarLegFace) -> int
Set: LayerOrderNumber(self: RebarLegFace)=value
"""
Reversed=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Reversed(self: RebarLegFace) -> bool
Set: Reversed(self: RebarLegFace)=value
"""
class RebarMesh(Reinforcement):
""" RebarMesh() """
def Delete(self):
""" Delete(self: RebarMesh) -> bool """
pass
def Insert(self):
""" Insert(self: RebarMesh) -> bool """
pass
def Modify(self):
""" Modify(self: RebarMesh) -> bool """
pass
def Select(self):
""" Select(self: RebarMesh) -> bool """
pass
CatalogName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CatalogName(self: RebarMesh) -> str
Set: CatalogName(self: RebarMesh)=value
"""
CrossBarLocation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CrossBarLocation(self: RebarMesh) -> RebarMeshCrossBarLocationEnum
Set: CrossBarLocation(self: RebarMesh)=value
"""
CrossDistances=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CrossDistances(self: RebarMesh) -> ArrayList
Set: CrossDistances(self: RebarMesh)=value
"""
CrossSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CrossSize(self: RebarMesh) -> str
Set: CrossSize(self: RebarMesh)=value
"""
CutByFatherPartCuts=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CutByFatherPartCuts(self: RebarMesh) -> bool
Set: CutByFatherPartCuts(self: RebarMesh)=value
"""
EndFromPlaneOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndFromPlaneOffset(self: RebarMesh) -> float
Set: EndFromPlaneOffset(self: RebarMesh)=value
"""
EndHook=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndHook(self: RebarMesh) -> RebarHookData
Set: EndHook(self: RebarMesh)=value
"""
EndPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPoint(self: RebarMesh) -> Point
Set: EndPoint(self: RebarMesh)=value
"""
FromPlaneOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FromPlaneOffset(self: RebarMesh) -> float
Set: FromPlaneOffset(self: RebarMesh)=value
"""
LeftOverhangCross=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LeftOverhangCross(self: RebarMesh) -> float
Set: LeftOverhangCross(self: RebarMesh)=value
"""
LeftOverhangLongitudinal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LeftOverhangLongitudinal(self: RebarMesh) -> float
Set: LeftOverhangLongitudinal(self: RebarMesh)=value
"""
Length=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Length(self: RebarMesh) -> float
Set: Length(self: RebarMesh)=value
"""
LongitudinalDistances=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LongitudinalDistances(self: RebarMesh) -> ArrayList
Set: LongitudinalDistances(self: RebarMesh)=value
"""
LongitudinalSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LongitudinalSize(self: RebarMesh) -> str
Set: LongitudinalSize(self: RebarMesh)=value
"""
LongitudinalSpacingMethod=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LongitudinalSpacingMethod(self: RebarMesh) -> RebarMeshSpacingMethodEnum
Set: LongitudinalSpacingMethod(self: RebarMesh)=value
"""
MeshType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: MeshType(self: RebarMesh) -> RebarMeshTypeEnum
Set: MeshType(self: RebarMesh)=value
"""
Polygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polygon(self: RebarMesh) -> Polygon
Set: Polygon(self: RebarMesh)=value
"""
RightOverhangCross=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RightOverhangCross(self: RebarMesh) -> float
Set: RightOverhangCross(self: RebarMesh)=value
"""
RightOverhangLongitudinal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RightOverhangLongitudinal(self: RebarMesh) -> float
Set: RightOverhangLongitudinal(self: RebarMesh)=value
"""
StartFromPlaneOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartFromPlaneOffset(self: RebarMesh) -> float
Set: StartFromPlaneOffset(self: RebarMesh)=value
"""
StartHook=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartHook(self: RebarMesh) -> RebarHookData
Set: StartHook(self: RebarMesh)=value
"""
StartPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPoint(self: RebarMesh) -> Point
Set: StartPoint(self: RebarMesh)=value
"""
Width=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Width(self: RebarMesh) -> float
Set: Width(self: RebarMesh)=value
"""
RebarMeshCrossBarLocationEnum=None
RebarMeshSpacingMethodEnum=None
RebarMeshTypeEnum=None
class RebarProperties(object):
""" RebarProperties() """
BendingRadius=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BendingRadius(self: RebarProperties) -> float
Set: BendingRadius(self: RebarProperties)=value
"""
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: RebarProperties) -> int
Set: Class(self: RebarProperties)=value
"""
Grade=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Grade(self: RebarProperties) -> str
Set: Grade(self: RebarProperties)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: RebarProperties) -> str
Set: Name(self: RebarProperties)=value
"""
NumberingSeries=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: NumberingSeries(self: RebarProperties) -> NumberingSeries
Set: NumberingSeries(self: RebarProperties)=value
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Size(self: RebarProperties) -> str
Set: Size(self: RebarProperties)=value
"""
class RebarPropertiesNullable(object):
""" RebarPropertiesNullable() """
BendingRadius=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BendingRadius(self: RebarPropertiesNullable) -> Nullable[float]
Set: BendingRadius(self: RebarPropertiesNullable)=value
"""
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: RebarPropertiesNullable) -> Nullable[int]
Set: Class(self: RebarPropertiesNullable)=value
"""
Grade=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Grade(self: RebarPropertiesNullable) -> str
Set: Grade(self: RebarPropertiesNullable)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: RebarPropertiesNullable) -> str
Set: Name(self: RebarPropertiesNullable)=value
"""
NumberingSeries=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: NumberingSeries(self: RebarPropertiesNullable) -> NumberingSeriesNullable
Set: NumberingSeries(self: RebarPropertiesNullable)=value
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Size(self: RebarPropertiesNullable) -> str
Set: Size(self: RebarPropertiesNullable)=value
"""
class RebarPropertyModifier(BaseRebarModifier):
""" RebarPropertyModifier() """
RebarProperties=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RebarProperties(self: RebarPropertyModifier) -> RebarPropertiesNullable
Set: RebarProperties(self: RebarPropertyModifier)=value
"""
class RebarSet(ModelObject):
""" RebarSet() """
def Delete(self):
""" Delete(self: RebarSet) -> bool """
pass
def GetRebarModifiers(self):
""" GetRebarModifiers(self: RebarSet) -> ModelObjectEnumerator """
pass
def GetRebarSetAdditions(self):
""" GetRebarSetAdditions(self: RebarSet) -> ModelObjectEnumerator """
pass
def GetReinforcements(self):
""" GetReinforcements(self: RebarSet) -> ModelObjectEnumerator """
pass
def Insert(self):
""" Insert(self: RebarSet) -> bool """
pass
def Modify(self):
""" Modify(self: RebarSet) -> bool """
pass
def Select(self):
""" Select(self: RebarSet) -> bool """
pass
Guidelines=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Guidelines(self: RebarSet) -> List[RebarGuideline]
Set: Guidelines(self: RebarSet)=value
"""
LayerOrderNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LayerOrderNumber(self: RebarSet) -> int
Set: LayerOrderNumber(self: RebarSet)=value
"""
LegFaces=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LegFaces(self: RebarSet) -> List[RebarLegFace]
Set: LegFaces(self: RebarSet)=value
"""
RebarProperties=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RebarProperties(self: RebarSet) -> RebarProperties
Set: RebarProperties(self: RebarSet)=value
"""
class RebarSetAddition(ModelObject):
""" RebarSetAddition() """
def Delete(self):
""" Delete(self: RebarSetAddition) -> bool """
pass
def Insert(self):
""" Insert(self: RebarSetAddition) -> bool """
pass
def Modify(self):
""" Modify(self: RebarSetAddition) -> bool """
pass
def Select(self):
""" Select(self: RebarSetAddition) -> bool """
pass
Father=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Father(self: RebarSetAddition) -> RebarSet
Set: Father(self: RebarSetAddition)=value
"""
LegFaces=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LegFaces(self: RebarSetAddition) -> List[RebarLegFace]
Set: LegFaces(self: RebarSetAddition)=value
"""
class RebarSpacing(object):
""" RebarSpacing() """
EndOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndOffset(self: RebarSpacing) -> float
Set: EndOffset(self: RebarSpacing)=value
"""
EndOffsetIsAutomatic=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndOffsetIsAutomatic(self: RebarSpacing) -> bool
Set: EndOffsetIsAutomatic(self: RebarSpacing)=value
"""
EndOffsetType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndOffsetType(self: RebarSpacing) -> OffsetEnum
Set: EndOffsetType(self: RebarSpacing)=value
"""
StartOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartOffset(self: RebarSpacing) -> float
Set: StartOffset(self: RebarSpacing)=value
"""
StartOffsetIsAutomatic=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartOffsetIsAutomatic(self: RebarSpacing) -> bool
Set: StartOffsetIsAutomatic(self: RebarSpacing)=value
"""
StartOffsetType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartOffsetType(self: RebarSpacing) -> OffsetEnum
Set: StartOffsetType(self: RebarSpacing)=value
"""
Zones=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Zones(self: RebarSpacing) -> List[RebarSpacingZone]
Set: Zones(self: RebarSpacing)=value
"""
OffsetEnum=None
class RebarSpacingZone(object):
""" RebarSpacingZone() """
Length=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Length(self: RebarSpacingZone) -> float
Set: Length(self: RebarSpacingZone)=value
"""
LengthType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LengthType(self: RebarSpacingZone) -> LengthEnum
Set: LengthType(self: RebarSpacingZone)=value
"""
NumberOfSpaces=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: NumberOfSpaces(self: RebarSpacingZone) -> int
Set: NumberOfSpaces(self: RebarSpacingZone)=value
"""
NumberOfSpacesType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: NumberOfSpacesType(self: RebarSpacingZone) -> SpacingEnum
Set: NumberOfSpacesType(self: RebarSpacingZone)=value
"""
Spacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Spacing(self: RebarSpacingZone) -> float
Set: Spacing(self: RebarSpacingZone)=value
"""
SpacingType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SpacingType(self: RebarSpacingZone) -> SpacingEnum
Set: SpacingType(self: RebarSpacingZone)=value
"""
LengthEnum=None
SpacingEnum=None
class RebarSplice(ModelObject):
"""
RebarSplice(InputRebar1: RebarGroup,InputRebar2: RebarGroup)
RebarSplice()
"""
def Delete(self):
""" Delete(self: RebarSplice) -> bool """
pass
def Insert(self):
""" Insert(self: RebarSplice) -> bool """
pass
def Modify(self):
""" Modify(self: RebarSplice) -> bool """
pass
def Select(self):
""" Select(self: RebarSplice) -> bool """
pass
@staticmethod
def __new__(self,InputRebar1=None,InputRebar2=None):
"""
__new__(cls: type,InputRebar1: RebarGroup,InputRebar2: RebarGroup)
__new__(cls: type)
"""
pass
BarPositions=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BarPositions(self: RebarSplice) -> RebarSpliceBarPositionsEnum
Set: BarPositions(self: RebarSplice)=value
"""
Clearance=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Clearance(self: RebarSplice) -> float
Set: Clearance(self: RebarSplice)=value
"""
LapLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LapLength(self: RebarSplice) -> float
Set: LapLength(self: RebarSplice)=value
"""
Offset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Offset(self: RebarSplice) -> float
Set: Offset(self: RebarSplice)=value
"""
RebarGroup1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RebarGroup1(self: RebarSplice) -> Reinforcement
Set: RebarGroup1(self: RebarSplice)=value
"""
RebarGroup2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RebarGroup2(self: RebarSplice) -> Reinforcement
Set: RebarGroup2(self: RebarSplice)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: RebarSplice) -> RebarSpliceTypeEnum
Set: Type(self: RebarSplice)=value
"""
RebarSpliceBarPositionsEnum=None
RebarSpliceTypeEnum=None
class RebarSplitter(BaseRebarModifier):
""" RebarSplitter() """
BarsToSplit=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BarsToSplit(self: RebarSplitter) -> BarsToSplitEnum
Set: BarsToSplit(self: RebarSplitter)=value
"""
LapLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LapLength(self: RebarSplitter) -> float
Set: LapLength(self: RebarSplitter)=value
"""
LapPlacement=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LapPlacement(self: RebarSplitter) -> LapPlacementEnum
Set: LapPlacement(self: RebarSplitter)=value
"""
LapSide=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LapSide(self: RebarSplitter) -> LapSideEnum
Set: LapSide(self: RebarSplitter)=value
"""
LapType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LapType(self: RebarSplitter) -> LapTypeEnum
Set: LapType(self: RebarSplitter)=value
"""
SplitOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SplitOffset(self: RebarSplitter) -> float
Set: SplitOffset(self: RebarSplitter)=value
"""
StaggerOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StaggerOffset(self: RebarSplitter) -> float
Set: StaggerOffset(self: RebarSplitter)=value
"""
StaggerType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StaggerType(self: RebarSplitter) -> StaggerTypeEnum
Set: StaggerType(self: RebarSplitter)=value
"""
BarsToSplitEnum=None
LapPlacementEnum=None
LapSideEnum=None
LapTypeEnum=None
StaggerTypeEnum=None
class RebarStrand(Reinforcement):
""" RebarStrand() """
def Delete(self):
""" Delete(self: RebarStrand) -> bool """
pass
def Insert(self):
""" Insert(self: RebarStrand) -> bool """
pass
def Modify(self):
""" Modify(self: RebarStrand) -> bool """
pass
def Select(self):
""" Select(self: RebarStrand) -> bool """
pass
EndPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPoint(self: RebarStrand) -> Point
Set: EndPoint(self: RebarStrand)=value
"""
OnPlaneOffsets=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: OnPlaneOffsets(self: RebarStrand) -> ArrayList
"""
Patterns=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Patterns(self: RebarStrand) -> ArrayList
Set: Patterns(self: RebarStrand)=value
"""
PullPerStrand=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PullPerStrand(self: RebarStrand) -> float
Set: PullPerStrand(self: RebarStrand)=value
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Size(self: RebarStrand) -> str
Set: Size(self: RebarStrand)=value
"""
StartPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPoint(self: RebarStrand) -> Point
Set: StartPoint(self: RebarStrand)=value
"""
Unbondings=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Unbondings(self: RebarStrand) -> ArrayList
Set: Unbondings(self: RebarStrand)=value
"""
class RebarThreadingDataNullable(object):
""" RebarThreadingDataNullable() """
ExtraFabricationLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExtraFabricationLength(self: RebarThreadingDataNullable) -> Nullable[float]
Set: ExtraFabricationLength(self: RebarThreadingDataNullable)=value
"""
Length=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Length(self: RebarThreadingDataNullable) -> Nullable[float]
Set: Length(self: RebarThreadingDataNullable)=value
"""
ThreadingType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ThreadingType(self: RebarThreadingDataNullable) -> str
Set: ThreadingType(self: RebarThreadingDataNullable)=value
"""
class ReferenceModel(ModelObject):
"""
ReferenceModel()
ReferenceModel(filename: str,position: Point,scale: float)
"""
def Delete(self):
""" Delete(self: ReferenceModel) -> bool """
pass
def GetChildren(self):
""" GetChildren(self: ReferenceModel) -> ModelObjectEnumerator """
pass
def GetConvertedObjects(self):
""" GetConvertedObjects(self: ReferenceModel) -> ModelObjectEnumerator """
pass
def GetCurrentRevision(self):
""" GetCurrentRevision(self: ReferenceModel) -> Revision """
pass
def GetReferenceModelObjectByExternalGuid(self,externalGuid):
""" GetReferenceModelObjectByExternalGuid(self: ReferenceModel,externalGuid: str) -> ReferenceModelObject """
pass
def GetRevisions(self):
""" GetRevisions(self: ReferenceModel) -> List[Revision] """
pass
def Insert(self):
""" Insert(self: ReferenceModel) -> bool """
pass
def Modify(self):
""" Modify(self: ReferenceModel) -> bool """
pass
def RefreshFile(self):
""" RefreshFile(self: ReferenceModel) -> bool """
pass
def Select(self):
""" Select(self: ReferenceModel) -> bool """
pass
def SetAsCurrentRevision(self,*__args):
"""
SetAsCurrentRevision(self: ReferenceModel,modelId: int,revisionId: int) -> bool
SetAsCurrentRevision(self: ReferenceModel,revision: Revision) -> bool
"""
pass
@staticmethod
def __new__(self,filename=None,position=None,scale=None):
"""
__new__(cls: type)
__new__(cls: type,filename: str,position: Point,scale: float)
"""
pass
ActiveFilePath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ActiveFilePath(self: ReferenceModel) -> str
"""
BasePointGuid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BasePointGuid(self: ReferenceModel) -> Guid
Set: BasePointGuid(self: ReferenceModel)=value
"""
Filename=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Filename(self: ReferenceModel) -> str
Set: Filename(self: ReferenceModel)=value
"""
ModelGUID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ModelGUID(self: ReferenceModel) -> Guid
Set: ModelGUID(self: ReferenceModel)=value
"""
Position=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position(self: ReferenceModel) -> Point
Set: Position(self: ReferenceModel)=value
"""
ProjectGUID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ProjectGUID(self: ReferenceModel) -> Guid
Set: ProjectGUID(self: ReferenceModel)=value
"""
Rotation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Rotation(self: ReferenceModel) -> float
Set: Rotation(self: ReferenceModel)=value
"""
Scale=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Scale(self: ReferenceModel) -> float
Set: Scale(self: ReferenceModel)=value
"""
VersionGUID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: VersionGUID(self: ReferenceModel) -> Guid
Set: VersionGUID(self: ReferenceModel)=value
"""
Visibility=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Visibility(self: ReferenceModel) -> VisibilityEnum
Set: Visibility(self: ReferenceModel)=value
"""
Revision=None
VisibilityEnum=None
class ReferenceModelObject(ModelObject):
"""
ReferenceModelObject(ReferenceModelId: int,ID: Identifier)
ReferenceModelObject()
"""
def Delete(self):
""" Delete(self: ReferenceModelObject) -> bool """
pass
def GetFather(self):
""" GetFather(self: ReferenceModelObject) -> ReferenceModelObject """
pass
def GetReferenceModel(self):
""" GetReferenceModel(self: ReferenceModelObject) -> ReferenceModel """
pass
def Insert(self):
""" Insert(self: ReferenceModelObject) -> bool """
pass
def Modify(self):
""" Modify(self: ReferenceModelObject) -> bool """
pass
def Select(self):
""" Select(self: ReferenceModelObject) -> bool """
pass
@staticmethod
def __new__(self,ReferenceModelId=None,ID=None):
"""
__new__(cls: type,ReferenceModelId: int,ID: Identifier)
__new__(cls: type)
"""
pass
class Seam(BaseComponent):
""" Seam() """
def Delete(self):
""" Delete(self: Seam) -> bool """
pass
def GetInputPolygon(self):
""" GetInputPolygon(self: Seam) -> Polygon """
pass
def GetPrimaryObject(self):
""" GetPrimaryObject(self: Seam) -> ModelObject """
pass
def GetSecondaryObjects(self):
""" GetSecondaryObjects(self: Seam) -> ArrayList """
pass
def GetStartAndEndPositions(self,StartPoint,EndPoint):
""" GetStartAndEndPositions(self: Seam,StartPoint: Point,EndPoint: Point) -> (bool,Point,Point) """
pass
def Insert(self):
""" Insert(self: Seam) -> bool """
pass
def Modify(self):
""" Modify(self: Seam) -> bool """
pass
def Select(self):
""" Select(self: Seam) -> bool """
pass
def SetInputPolygon(self,InputPolygon):
""" SetInputPolygon(self: Seam,InputPolygon: Polygon) -> bool """
pass
def SetInputPositions(self,StartPoint,EndPoint):
""" SetInputPositions(self: Seam,StartPoint: Point,EndPoint: Point) -> bool """
pass
def SetPrimaryObject(self,M):
""" SetPrimaryObject(self: Seam,M: ModelObject) -> bool """
pass
def SetSecondaryObject(self,M):
""" SetSecondaryObject(self: Seam,M: ModelObject) -> bool """
pass
def SetSecondaryObjects(self,Secondaries):
""" SetSecondaryObjects(self: Seam,Secondaries: ArrayList) -> bool """
pass
AutoDirectionType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AutoDirectionType(self: Seam) -> AutoDirectionTypeEnum
Set: AutoDirectionType(self: Seam)=value
"""
AutoPosition=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: AutoPosition(self: Seam) -> bool
Set: AutoPosition(self: Seam)=value
"""
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: Seam) -> int
Set: Class(self: Seam)=value
"""
Code=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Code(self: Seam) -> str
Set: Code(self: Seam)=value
"""
InputPolygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
PrimaryObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
SecondaryObjects=property(lambda self: object(),lambda self,v: None,lambda self: None)
Status=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Status(self: Seam) -> ConnectionStatusEnum
"""
UpVector=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: UpVector(self: Seam) -> Vector
Set: UpVector(self: Seam)=value
"""
class SingleRebar(Reinforcement):
""" SingleRebar() """
def Delete(self):
""" Delete(self: SingleRebar) -> bool """
pass
def GetRebarSet(self):
""" GetRebarSet(self: SingleRebar) -> RebarSet """
pass
def Insert(self):
""" Insert(self: SingleRebar) -> bool """
pass
def Modify(self):
""" Modify(self: SingleRebar) -> bool """
pass
def Select(self):
""" Select(self: SingleRebar) -> bool """
pass
EndHook=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndHook(self: SingleRebar) -> RebarHookData
Set: EndHook(self: SingleRebar)=value
"""
Polygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polygon(self: SingleRebar) -> Polygon
Set: Polygon(self: SingleRebar)=value
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Size(self: SingleRebar) -> str
Set: Size(self: SingleRebar)=value
"""
StartHook=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartHook(self: SingleRebar) -> RebarHookData
Set: StartHook(self: SingleRebar)=value
"""
class Solid(object):
# no doc
def GetAllIntersectionPoints(self,point1,point2,point3):
""" GetAllIntersectionPoints(self: Solid,point1: Point,point2: Point,point3: Point) -> IEnumerator """
pass
def GetCutPart(self,CuttingPart):
""" GetCutPart(self: Solid,CuttingPart: Solid) -> ShellEnumerator """
pass
def GetEdgeEnumerator(self):
""" GetEdgeEnumerator(self: Solid) -> EdgeEnumerator """
pass
def GetFaceEnumerator(self):
""" GetFaceEnumerator(self: Solid) -> FaceEnumerator """
pass
def Intersect(self,*__args):
"""
Intersect(self: Solid,point1: Point,point2: Point,point3: Point) -> ArrayList
Intersect(self: Solid,point1: Point,point2: Point) -> ArrayList
Intersect(self: Solid,line: LineSegment) -> ArrayList
"""
pass
def IntersectAllFaces(self,point1,point2,point3):
""" IntersectAllFaces(self: Solid,point1: Point,point2: Point,point3: Point) -> IEnumerator """
pass
MaximumPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: MaximumPoint(self: Solid) -> Point
"""
MinimumPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: MinimumPoint(self: Solid) -> Point
"""
SolidCreationTypeEnum=None
class StrandUnbondingData(object):
""" StrandUnbondingData() """
FromEnd=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FromEnd(self: StrandUnbondingData) -> float
Set: FromEnd(self: StrandUnbondingData)=value
"""
FromStart=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: FromStart(self: StrandUnbondingData) -> float
Set: FromStart(self: StrandUnbondingData)=value
"""
MiddleToEnd=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: MiddleToEnd(self: StrandUnbondingData) -> float
Set: MiddleToEnd(self: StrandUnbondingData)=value
"""
MiddleToStart=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: MiddleToStart(self: StrandUnbondingData) -> float
Set: MiddleToStart(self: StrandUnbondingData)=value
"""
StrandIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StrandIndex(self: StrandUnbondingData) -> int
Set: StrandIndex(self: StrandUnbondingData)=value
"""
class SurfaceObject(ModelObject):
""" SurfaceObject() """
def CreateInstanceDelegate(self,*args):
""" CreateInstanceDelegate(self: SurfaceObject,surface: dotSurfaceObject_t) -> (int,dotSurfaceObject_t) """
pass
def Delete(self):
""" Delete(self: SurfaceObject) -> bool """
pass
def Insert(self):
""" Insert(self: SurfaceObject) -> bool """
pass
def Modify(self):
""" Modify(self: SurfaceObject) -> bool """
pass
def ModifyInstanceDelegate(self,*args):
""" ModifyInstanceDelegate(self: SurfaceObject,surface: dotSurfaceObject_t) -> (int,dotSurfaceObject_t) """
pass
def Select(self):
""" Select(self: SurfaceObject) -> bool """
pass
def SelectInstanceDelegate(self,*args):
""" SelectInstanceDelegate(self: SurfaceObject,surface: dotSurfaceObject_t) -> (int,dotSurfaceObject_t) """
pass
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: SurfaceObject) -> str
Set: Class(self: SurfaceObject)=value
"""
CreateHoles=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CreateHoles(self: SurfaceObject) -> bool
Set: CreateHoles(self: SurfaceObject)=value
"""
Father=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Father(self: SurfaceObject) -> ModelObject
Set: Father(self: SurfaceObject)=value
"""
ModelObjectType=property(lambda self: object(),lambda self,v: None,lambda self: None)
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: SurfaceObject) -> str
Set: Name(self: SurfaceObject)=value
"""
Polymesh=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polymesh(self: SurfaceObject) -> FacetedBrep
Set: Polymesh(self: SurfaceObject)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: SurfaceObject) -> str
Set: Type(self: SurfaceObject)=value
"""
class SurfaceTreatment(ModelObject):
""" SurfaceTreatment() """
def Delete(self):
""" Delete(self: SurfaceTreatment) -> bool """
pass
def Insert(self):
""" Insert(self: SurfaceTreatment) -> bool """
pass
def Modify(self):
""" Modify(self: SurfaceTreatment) -> bool """
pass
def Select(self):
""" Select(self: SurfaceTreatment) -> bool """
pass
Class=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Class(self: SurfaceTreatment) -> str
Set: Class(self: SurfaceTreatment)=value
"""
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Color(self: SurfaceTreatment) -> SurfaceColorEnum
Set: Color(self: SurfaceTreatment)=value
"""
CutByFatherBooleans=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: CutByFatherBooleans(self: SurfaceTreatment) -> bool
Set: CutByFatherBooleans(self: SurfaceTreatment)=value
"""
EndPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EndPoint(self: SurfaceTreatment) -> Point
Set: EndPoint(self: SurfaceTreatment)=value
"""
Father=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Father(self: SurfaceTreatment) -> Part
Set: Father(self: SurfaceTreatment)=value
"""
Material=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Material(self: SurfaceTreatment) -> Material
Set: Material(self: SurfaceTreatment)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: SurfaceTreatment) -> str
Set: Name(self: SurfaceTreatment)=value
"""
Polygon=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Polygon(self: SurfaceTreatment) -> Contour
Set: Polygon(self: SurfaceTreatment)=value
"""
Position=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position(self: SurfaceTreatment) -> Position
Set: Position(self: SurfaceTreatment)=value
"""
StartPoint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: StartPoint(self: SurfaceTreatment) -> Point
Set: StartPoint(self: SurfaceTreatment)=value
"""
Thickness=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Thickness(self: SurfaceTreatment) -> float
Set: Thickness(self: SurfaceTreatment)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: SurfaceTreatment) -> SurfaceTypeEnum
Set: Type(self: SurfaceTreatment)=value
"""
TypeName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TypeName(self: SurfaceTreatment) -> str
Set: TypeName(self: SurfaceTreatment)=value
"""
SurfaceColorEnum=None
SurfaceTypeEnum=None
class Task(ModelObject):
"""
Task()
Task(ID: Identifier)
"""
def AddObjectsToTask(self,ModelObjects):
""" AddObjectsToTask(self: Task,ModelObjects: ArrayList) -> bool """
pass
def Delete(self):
""" Delete(self: Task) -> bool """
pass
@staticmethod
def GetAllTasksOfSelectedObjects():
""" GetAllTasksOfSelectedObjects() -> ModelObjectEnumerator """
pass
def GetDependencies(self):
""" GetDependencies(self: Task) -> ModelObjectEnumerator """
pass
def GetFathers(self):
""" GetFathers(self: Task) -> ModelObjectEnumerator """
pass
def Insert(self):
""" Insert(self: Task) -> bool """
pass
def Modify(self):
""" Modify(self: Task) -> bool """
pass
def RemoveObjectsFromTask(self,ModelObjects):
""" RemoveObjectsFromTask(self: Task,ModelObjects: ArrayList) -> bool """
pass
def Select(self):
""" Select(self: Task) -> bool """
pass
@staticmethod
def __new__(self,ID=None):
"""
__new__(cls: type)
__new__(cls: type,ID: Identifier)
"""
pass
ActualEndDate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ActualEndDate(self: Task) -> DateTime
Set: ActualEndDate(self: Task)=value
"""
ActualStartDate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ActualStartDate(self: Task) -> DateTime
Set: ActualStartDate(self: Task)=value
"""
ActualWorkAmount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ActualWorkAmount(self: Task) -> float
Set: ActualWorkAmount(self: Task)=value
"""
Completeness=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Completeness(self: Task) -> int
Set: Completeness(self: Task)=value
"""
Critical=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Critical(self: Task) -> bool
Set: Critical(self: Task)=value
"""
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Description(self: Task) -> str
Set: Description(self: Task)=value
"""
Local=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Local(self: Task) -> bool
Set: Local(self: Task)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: Task) -> str
Set: Name(self: Task)=value
"""
PlannedEndDate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PlannedEndDate(self: Task) -> DateTime
Set: PlannedEndDate(self: Task)=value
"""
PlannedStartDate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PlannedStartDate(self: Task) -> DateTime
Set: PlannedStartDate(self: Task)=value
"""
PlannedWorkAmount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PlannedWorkAmount(self: Task) -> float
Set: PlannedWorkAmount(self: Task)=value
"""
Scenario=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Scenario(self: Task) -> HierarchicObject
Set: Scenario(self: Task)=value
"""
Url=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Url(self: Task) -> str
Set: Url(self: Task)=value
"""
class TaskDependency(ModelObject):
"""
TaskDependency()
TaskDependency(primary: Task,secondary: Task)
"""
def Delete(self):
""" Delete(self: TaskDependency) -> bool """
pass
def Insert(self):
""" Insert(self: TaskDependency) -> bool """
pass
def Modify(self):
""" Modify(self: TaskDependency) -> bool """
pass
def Select(self):
""" Select(self: TaskDependency) -> bool """
pass
@staticmethod
def __new__(self,primary=None,secondary=None):
"""
__new__(cls: type)
__new__(cls: type,primary: Task,secondary: Task)
"""
pass
DependencyType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DependencyType(self: TaskDependency) -> DependencyTypeEnum
Set: DependencyType(self: TaskDependency)=value
"""
Lag=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Lag(self: TaskDependency) -> int
Set: Lag(self: TaskDependency)=value
"""
Local=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Local(self: TaskDependency) -> bool
Set: Local(self: TaskDependency)=value
"""
Primary=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Primary(self: TaskDependency) -> Task
Set: Primary(self: TaskDependency)=value
"""
Secondary=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Secondary(self: TaskDependency) -> Task
Set: Secondary(self: TaskDependency)=value
"""
DependencyTypeEnum=None
class TaskWorktype(ModelObject):
""" TaskWorktype() """
def Delete(self):
""" Delete(self: TaskWorktype) -> bool """
pass
def Insert(self):
""" Insert(self: TaskWorktype) -> bool """
pass
def Modify(self):
""" Modify(self: TaskWorktype) -> bool """
pass
def Select(self):
""" Select(self: TaskWorktype) -> bool """
pass
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: TaskWorktype) -> str
Set: Name(self: TaskWorktype)=value
"""
class TransformationPlane(object):
"""
TransformationPlane()
TransformationPlane(CoordinateSystem: CoordinateSystem)
TransformationPlane(Origo: Point,Xvector: Vector,Yvector: Vector)
"""
def ToString(self):
""" ToString(self: TransformationPlane) -> str """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type)
__new__(cls: type,CoordinateSystem: CoordinateSystem)
__new__(cls: type,Origo: Point,Xvector: Vector,Yvector: Vector)
"""
pass
TransformationMatrixToGlobal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TransformationMatrixToGlobal(self: TransformationPlane) -> Matrix
"""
TransformationMatrixToLocal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TransformationMatrixToLocal(self: TransformationPlane) -> Matrix
"""
class UndefinedCurveDirectionException(ConnectiveGeometryException):
""" UndefinedCurveDirectionException() """
class UnsupportedChamferException(ConnectiveGeometryException):
""" UnsupportedChamferException() """
class Weld(BaseWeld):
""" Weld() """
def Delete(self):
""" Delete(self: Weld) -> bool """
pass
def GetLogicalWeld(self,LogicalWeld):
""" GetLogicalWeld(self: Weld,LogicalWeld: LogicalWeld) -> (bool,LogicalWeld) """
pass
def Insert(self):
""" Insert(self: Weld) -> bool """
pass
def Modify(self):
""" Modify(self: Weld) -> bool """
pass
def Select(self):
""" Select(self: Weld) -> bool """
pass
Direction=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Direction(self: Weld) -> Vector
Set: Direction(self: Weld)=value
"""
Position=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Position(self: Weld) -> WeldPositionEnum
Set: Position(self: Weld)=value
"""
WeldPositionEnum=None
class WorkPlaneHandler(object):
# no doc
def GetCurrentTransformationPlane(self):
""" GetCurrentTransformationPlane(self: WorkPlaneHandler) -> TransformationPlane """
pass
def SetCurrentTransformationPlane(self,TransformationPlane):
""" SetCurrentTransformationPlane(self: WorkPlaneHandler,TransformationPlane: TransformationPlane) -> bool """
pass
# variables with complex values
|
# input - output from sh int addr
# output - list of words containing ip/prefix
from robot.api import logger
def Find_IPV4_In_Text(text):
ipv4 = []
for word in text.split():
if (word.count('.') == 3) and (word.count('/') == 1):
ipv4.append(word)
return ipv4
def Find_IPV6_In_Text(text):
"""Find and return all IPv6 addresses in the given string.
:param text: string to search.
:type text: str
:return: IPv6 addresses found in string.
:rtype: list of str
"""
ipv6 = []
for word in text.split():
if (word.count(':') >= 2) and (word.count('/') == 1):
ipv6.append(word)
return ipv6
# input - output from sh hardware interface_name
# output - list of words containing mac
def Find_MAC_In_Text(text):
mac = ''
for word in text.split():
if (word.count(':') == 5):
mac = word
break
return mac
# input - output from sh ip arp command
# output - state info list
def parse_arp(info, intf, ip, mac):
"""Parse ARP list from vpp console and find a specific entry using the provided arguments.
:param info: ARP list from VPP console.
:param intf: VPP-internal name of the interface configured with this ARP entry.
:param ip: IP address of the ARP entry.
:param mac: MAC address of the ARP entry.
:type info: str
:type intf: str
:type ip: str
:type mac: str
:returns: True if a matching entry is found, else False
:rtype: bool
"""
for line in info.splitlines():
if intf in line and ip in line and mac in line:
print("ARP Found:"+line)
return True
logger.debug("ARP not Found")
return False
# input - output from sh ip arp command
# output - state info list
def parse_neighbor(info, intf, ip, mac):
"""Parse neighbor list from vpp console and find a specific entry using the provided arguments.
:param info: Neighbor list from VPP console.
:param intf: VPP-internal name of the interface configured with this neighbor.
:param ip: IP address of the neighbor entry.
:param mac: MAC address of the neighbor entry.
:type info: str
:type intf: str
:type ip: str
:type mac: str
:returns: True if a matching entry is found, else False
:rtype: bool
"""
for line in info.splitlines():
if intf in line and ip in line and mac in line:
print("Neighbor Found:"+line)
return True
logger.debug("Neighbor not Found")
return False
# input - output from sh ip arp command
# output - state info list
def parse_stn_rule(info):
state = {}
for line in info.splitlines():
try:
if "address" in line.strip().split()[0]:
state['ip_address'] = line.strip().split()[1]
elif "iface" in line.strip().split()[0]:
state['iface'] = line.strip().split()[1]
elif "next_node" in line.strip().split()[0]:
state['next_node'] = line.strip().split()[1]
except IndexError:
pass
return state['ip_address'], state['iface'], state['next_node']
def parse_memif_info(info):
state = []
sockets_line = []
for line in info.splitlines():
if line:
try:
_ = int(line.strip().split()[0])
sockets_line.append(line)
except ValueError:
pass
if line.strip().split()[0] == "flags":
if "admin-up" in line:
state.append("enabled=1")
if "slave" in line:
state.append("role=slave")
if "connected" in line:
state.append("connected=1")
if line.strip().split()[0] == "socket-id":
try:
socket_id = int(line.strip().split()[1])
state.append("id="+line.strip().split()[3])
for sock_line in sockets_line:
try:
num = int(sock_line.strip().split()[0])
if num == socket_id:
state.append("socket=" + sock_line.strip().split()[-1])
except ValueError:
pass
except ValueError:
pass
if "enabled=1" not in state:
state.append("enabled=0")
if "role=slave" not in state:
state.append("role=master")
if "connected=1" not in state:
state.append("connected=0")
return state
|
#!/usr/bin/python3
import xml.etree.ElementTree as ET
from datetime import datetime
class LocationTable():
"""docstring for LocationTable."""
def __init__(self, tijd=datetime.now(), meetpunten=[]):#TODO datetime
super(LocationTable, self).__init__()
self.tijd_laatste_config_wijziging = tijd
self.meetpunten = meetpunten
def __iter__(self):
return iter(self.meetpunten)
@classmethod
def fromXmlString(cls, string):
root = ET.fromstring(string)
meetpunten = []
try:
time = datetime.fromisoformat(root.find('tijd_laatste_config_wijziging').text)
except AttributeError: #datetime.datetime.fromisoformat only >= Python 3.7
time = datetime.now()
for meetpunt in root.iter('meetpunt'):
mp = Meetpunt.fromXml(meetpunt)
meetpunten.append(mp)
return cls(time, meetpunten) #TODO datetime
def groupByLve(self):
"""Returns a dict of {lve_id: [mp1, mp2, ...], lve_id2: ...}"""
meetpunten = {}
for mp in self.meetpunten:
lve = mp.lve_nr
meetpunten.setdefault(lve, []).append(mp) #add key or create dict and add key
return meetpunten
def groupByBeschrijvende_id(self):
meetpunten = {}
for mp in self.meetpunten:
a, b, c, d = mp.beschrijvende_id[0], mp.beschrijvende_id[1:4], mp.beschrijvende_id[4], mp.beschrijvende_id[5:7]
meetpunten.setdefault(b, []).append((a, c, d, mp)) #add key or create dict and add key
return meetpunten
def byId(self):
"""Returns a dict of {unieke_id: mp1_object, unieke_id2: mp2_object, ...}"""
return {mp.unieke_id: mp for mp in self.meetpunten}
def toDatexXml(self):
d2lm = ET.Element('d2LogicalModel', {
'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xmlns': 'http://datex2.eu/schema/2/2_0',
'modelBaseVersion': '2',
'xsi:schemaLocation': 'http://datex2.eu/schema/2/2_0 http://datex2.eu/schema/2/2_3/DATEXIISchema_2_2_3.xsd'
})
ex = ET.SubElement(d2lm, 'exchange')
sup_id = ET.SubElement(ex, 'supplierIdentification')
country = ET.SubElement(sup_id, 'country')
country.text = 'be'
nid = ET.SubElement(sup_id, 'nationalIdentifier')
nid.text = 'required'
pub = ET.SubElement(d2lm, 'payloadPublication', {
'xsi:type': 'MeasurementSiteTablePublication',
'lang': 'nl'
})
# lang = ET.SubElement(pub, 'defaultLanguage')
# lang.text = 'nl'
pubt = ET.SubElement(pub, 'publicationTime')
pubt.text = self.tijd_laatste_config_wijziging.isoformat()
sup_id = ET.SubElement(pub, 'publicationCreator')
country = ET.SubElement(sup_id, 'country')
country.text = 'be'
nid = ET.SubElement(sup_id, 'nationalIdentifier')
nid.text = 'required'
hi = ET.SubElement(pub, 'headerInformation')
conf = ET.SubElement(hi, 'confidentiality')
conf.text = 'noRestriction'
instat = ET.SubElement(hi, 'informationStatus')
instat.text = 'technicalExercise' # 'real'
mst = ET.SubElement(pub, 'measurementSiteTable', {
'id': 'config',
'version': str(int(self.tijd_laatste_config_wijziging.timestamp()))
})
for mp in self:
mst.append(mp.toDatexXml())
return d2lm
class Meetpunt():
"""docstring for Meetpunt."""
def __init__(self, unieke_id):
super(Meetpunt, self).__init__()
self.unieke_id = unieke_id
@property
def datex_lane_type(self):
if self.rijstrook.startswith('R'):
lane_nr = int(self.rijstrook[1:])
if lane_nr >= 10:
return 'lane' + str(lane_nr-9)
else:
return None
elif self.rijstrook.startswith('P'):
return 'emergencyLane'
elif self.rijstrook == 'R':
return 'hardShoulder'
elif self.rijstrook == 'B':
return 'busLane'
elif self.rijstrook == 'S':
return 'rushHourLane'
return None
@property
def datex_direction(self):
if self.rijstrook.startswith('TR'):
return 'opposite'
else:
return None
def __str__(self):
return """
Meetpunt {0}:
beschrijvende_id: {1}
volledige_naam: {2}
Ident_8: {3}
lve_nr: {4}
Kmp_Rsys: {5}
Rijstrook: {6}
lengtegraad_EPSG_4326: {7}
breedtegraad_EPSG_4326: {8}
""".format(self.unieke_id,
self.beschrijvende_id,
self.volledige_naam,
self.ident_8,
self.lve_nr,
self.kmp_Rsys,
self.rijstrook,
self.lengtegraad_EPSG_4326,
self.breedtegraad_EPSG_4326)
@classmethod
def fromXml(cls, mp_xml):
mp = cls(int(mp_xml.get('unieke_id')))
mp.beschrijvende_id = mp_xml.find('beschrijvende_id').text
mp.volledige_naam = mp_xml.find('volledige_naam').text
mp.ident_8 = mp_xml.find('Ident_8').text
mp.lve_nr = int(mp_xml.find('lve_nr').text)
try:
mp.kmp_Rsys = float(mp_xml.find('Kmp_Rsys').text.replace(',', '.'))
except (ValueError, AttributeError):
mp.kmp_Rsys = None
mp.rijstrook = mp_xml.find('Rijstrook').text
mp.lengtegraad_EPSG_4326 = float(mp_xml.find('lengtegraad_EPSG_4326').text.replace(',', '.'))
mp.breedtegraad_EPSG_4326 = float(mp_xml.find('breedtegraad_EPSG_4326').text.replace(',', '.'))
return mp
def toDatexXml(self, versiontime=datetime.now()):
""" versiontime is a datetime.datetime object """
msr = ET.Element('measurementSiteRecord')
msr.set('id', str(self.unieke_id))
msr.set('version', '1')
# msrrvt = ET.SubElement(msr, 'measurementSiteRecordVersionTime')
# msrrvt.text = versiontime.isoformat()
# cm = ET.SubElement(msr, 'computationMethod')
mer = ET.SubElement(msr, 'measurementEquipmentReference')
mer.text = str(self.lve_nr)
# metu = ET.SubElement(msr, 'measurementEquipmentTypeUsed')
# metu_val = ET.SubElement(ET.SubElement(metu, 'values'), 'value')
# metu_val.set('lang', 'en')
# metu_val.text = 'loop'
msn = ET.SubElement(msr, 'measurementSiteName')
msn_val = ET.SubElement(ET.SubElement(msn, 'values'), 'value')
msn_val.set('lang', 'nl') # default
msn_val.text = self.volledige_naam
msnol = ET.SubElement(msr, 'measurementSiteNumberOfLanes')
msnol.text = '1'
msi = ET.SubElement(msr, 'measurementSiteIdentification')
msi.text = self.beschrijvende_id
LENGTH_CHARACTERISTICS_BY_CATEGORY = {
1: (('greaterThan', 0), ('lessThan', 1.00)),
2: (('greaterThan', 1.00), ('lessThan', 4.90)),
3: (('greaterThan', 4.90), ('lessThan', 6.90)),
4: (('greaterThan', 6.90), ('lessThan', 12.00)),
5: (('greaterThan', 12.00),)
}
VALUE_TYPES = ('trafficFlow', 'trafficSpeed')
for category, length_characteristics in LENGTH_CHARACTERISTICS_BY_CATEGORY.items():
i = 0
for value_type in VALUE_TYPES:
i += 1
calc_index = (len(VALUE_TYPES)*(category-1)) + i
msc = ET.SubElement(msr, 'measurementSpecificCharacteristics', {'index': str(calc_index)})
msc = ET.SubElement(msc, 'measurementSpecificCharacteristics')
ET.SubElement(msc, 'period').text = '60.0'
ET.SubElement(msc, 'specificLane').text = self.datex_lane_type
ET.SubElement(msc, 'specificMeasurementValueType').text = value_type
svc = ET.SubElement(msc, 'specificVehicleCharacteristics')
for comparison_op, length in length_characteristics:
lc = ET.SubElement(svc, 'lengthCharacteristic')
ET.SubElement(lc, 'comparisonOperator').text = comparison_op
ET.SubElement(lc, 'vehicleLength').text = str(length)
msc = ET.SubElement(msr, 'measurementSpecificCharacteristics', {'index': str(calc_index+1)})
msc = ET.SubElement(msc, 'measurementSpecificCharacteristics')
ET.SubElement(msc, 'period').text = '60.0'
ET.SubElement(msc, 'specificLane').text = self.datex_lane_type
ET.SubElement(msc, 'specificMeasurementValueType').text = 'trafficConcentration'
svc = ET.SubElement(msc, 'specificVehicleCharacteristics')
msl = ET.SubElement(msr, 'measurementSiteLocation')
msl.set('xsi:type', 'Point')
pc = ET.SubElement(ET.SubElement(msl, 'pointByCoordinates'), 'pointCoordinates')
lat = ET.SubElement(pc, 'latitude') # Should be in ETRS89, but using WGS84 interchangeably shouldn't introduce errors > +/-1m
lat.text = str(self.breedtegraad_EPSG_4326)
lon = ET.SubElement(pc, 'longitude')
lon.text = str(self.lengtegraad_EPSG_4326)
# cm = ET.SubElement(msr, 'computationMethod')
return msr
|
<gh_stars>100-1000
import os
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.TagsLib import *
from StockAnalysisSystem.core.Utility.df_utility import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.core.Utility.WaitingWindow import *
from StockAnalysisSystem.core.Utility.AnalyzerUtility import *
from StockAnalysisSystem.core.Utility.relative_import import RelativeImport
with RelativeImport(__file__):
from BlackListUi import *
from StockChartUi import StockChartUi
from StockMemoEditor import StockMemoEditor
# ------------------------------------------------ Memo Extra Interface ------------------------------------------------
class MemoExtra:
def __init__(self):
self.__memo_ui = None
def set_memo_ui(self, memo_ui):
self.__memo_ui = memo_ui
# def notify_memo_ui_update(self):
# self.__memo_ui.update_list()
def global_entry(self):
pass
def security_entry(self, security: str):
pass
def title_text(self) -> str:
pass
def global_entry_text(self) -> str:
pass
def security_entry_text(self, security: str) -> str:
pass
class DummyMemoExtra(MemoExtra):
def __init__(self, title_text: str):
self.__title_text = title_text
super(DummyMemoExtra, self).__init__()
def global_entry(self):
print('global_entry')
def security_entry(self, security: str):
print('security_entry')
def title_text(self) -> str:
return self.__title_text
def global_entry_text(self) -> str:
return 'DummyMemoExtra'
def security_entry_text(self, security: str) -> str:
return self.__title_text + ': ' + security
# ---------------------------------------------------- Memo Extras -----------------------------------------------------
# --------------------------------- Editor ---------------------------------
class MemoExtra_MemoContent(MemoExtra):
def __init__(self, memo_context: dict):
self.__memo_context = memo_context
self.__sas_if: sasIF = self.__memo_context.get('sas_if')
self.__memo_editor: StockMemoEditor = self.__memo_context.get('editor') \
if self.__memo_context is not None else None
# self.__memo_record: StockMemoRecord = self.__memo_context.get_memo_record() \
# if self.__memo_context is not None else None
super(MemoExtra_MemoContent, self).__init__()
def global_entry(self):
pass
def security_entry(self, security: str):
if self.__memo_editor is not None:
self.__memo_editor.select_security(security)
self.__memo_editor.select_memo_by_list_index(0)
self.__memo_editor.exec()
def title_text(self) -> str:
return 'Memo'
def global_entry_text(self) -> str:
return ''
def security_entry_text(self, security: str) -> str:
if self.__sas_if is None:
return '-'
df = self.__sas_if.stock_memo_get_record(security)
if df is not None and not df.empty:
df.sort_values('time')
brief = df.iloc[-1]['brief']
content = df.iloc[-1]['content']
text = brief if str_available(brief) else content
# https://stackoverflow.com/a/2873416/12929244
return text[:30] + (text[30:] and '...')
return ''
# --------------------------------- History ---------------------------------
class MemoExtra_MemoHistory(MemoExtra):
def __init__(self, memo_context: dict):
self.__memo_context = memo_context
self.__memo_history = StockChartUi(self.__memo_context)
super(MemoExtra_MemoHistory, self).__init__()
def global_entry(self):
pass
def security_entry(self, security: str):
self.__memo_history.show_security(security, True)
self.__memo_history.setVisible(True)
def title_text(self) -> str:
return 'Chart'
def global_entry_text(self) -> str:
return ''
def security_entry_text(self, security: str) -> str:
return 'View'
# ---------------------------------- Tags ----------------------------------
class MemoExtra_StockTags(MemoExtra):
PRESET_TAGS = ['黑名单', '灰名单', '关注']
def __init__(self, memo_context: dict):
self.__memo_context = memo_context
self.__sas_if: sasIF = self.__memo_context.get('sas_if')
super(MemoExtra_StockTags, self).__init__()
# self.__stock_tags: Tags = self.__memo_context.get('tags')
self.__stock_tags_ui: TagsUi = TagsUi()
self.__stock_tags_ui.on_ensure(self.__on_tags_ui_ensure)
self.__current_stock = ''
# self.__stock_tags.set_obj_tags('', MemoExtra_StockTags.PRESET_TAGS)
def __on_tags_ui_ensure(self):
tags = self.__stock_tags_ui.get_selected_tags()
self.__stock_tags_ui.close()
self.__sas_if.stock_memo_tags_of_securities(self.__current_stock, tags)
self.__sas_if.stock_memo_save_tags()
# self.__stock_tags.set_obj_tags(self.__current_stock, tags)
# self.__stock_tags.save()
# self.__memo_context.broadcast_data_updated('tags')
def global_entry(self):
pass
def security_entry(self, security: str):
# tags = self.__stock_tags.tags_of_objs(security)
all_tags = self.__sas_if.stock_memo_all_tags()
security_tags = self.__sas_if.stock_memo_tags_of_securities(security)
self.__stock_tags_ui.reload_tags(list(set(all_tags + MemoExtra_StockTags.PRESET_TAGS)))
self.__stock_tags_ui.select_tags(security_tags)
self.__stock_tags_ui.setVisible(True)
self.__current_stock = security
def title_text(self) -> str:
return 'Tags'
def global_entry_text(self) -> str:
return ''
def security_entry_text(self, security: str) -> str:
# tags = self.__stock_tags.tags_of_objs(security)
tags = self.__sas_if.stock_memo_tags_of_securities(security)
return Tags.tags_to_str(tags)
# -------------------------------- Analysis --------------------------------
from StockAnalysisSystem.core.Utility.ui_utility import *
from StockAnalysisSystem.core.Utility.TableViewEx import *
class AnalyzerSelector(QDialog):
TABLE_HEADER_ANALYZER = ['', 'Strategy', 'Comments', 'UUID']
def __init__(self, analyzer_utility):
super(AnalyzerSelector, self).__init__()
self.__analyzer_utility = analyzer_utility
self.__ok = True
self.__table_analyzer = TableViewEx()
self.__button_ok = QPushButton('OK')
self.__button_cancel = QPushButton('Cancel')
self.init_ui()
def init_ui(self):
layout = QVBoxLayout()
self.setLayout(layout)
layout.addWidget(self.__table_analyzer)
layout.addLayout(horizon_layout([QLabel(''), self.__button_ok, self.__button_cancel], [8, 1, 1]))
self.__table_analyzer.SetCheckableColumn(0)
self.__table_analyzer.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.__button_ok.clicked.connect(self.__on_button_ok)
self.__button_cancel.clicked.connect(self.__on_button_cancel)
self.setMinimumSize(800, 600)
self.setWindowTitle('Select Analyzer')
self.load_analyzer()
def __on_button_ok(self):
self.__ok = True
self.close()
def __on_button_cancel(self):
self.close()
def is_ok(self) -> bool:
return self.__ok
def load_analyzer(self):
self.__table_analyzer.Clear()
self.__table_analyzer.SetRowCount(0)
self.__table_analyzer.SetColumn(AnalyzerSelector.TABLE_HEADER_ANALYZER)
analyzer_info = self.__analyzer_utility.analyzer_info()
for analyzer_uuid, analyzer_name, analyzer_detail, _ in analyzer_info:
self.__table_analyzer.AppendRow(['', analyzer_name, analyzer_detail, analyzer_uuid])
def get_select_strategy(self):
analyzer_list = []
for i in range(self.__table_analyzer.RowCount()):
if self.__table_analyzer.GetItemCheckState(i, 0) == QtCore.Qt.Checked:
uuid = self.__table_analyzer.GetItemText(i, 3)
analyzer_list.append(uuid)
return analyzer_list
class MemoExtra_Analysis(MemoExtra):
def __init__(self, memo_context: dict):
self.__memo_context = memo_context
super(MemoExtra_Analysis, self).__init__()
def global_entry(self):
pass
def security_entry(self, security: str):
strategy_entry = self.__memo_context.get_sas().get_strategy_entry()
selector = AnalyzerSelector(strategy_entry)
selector.exec()
if not selector.is_ok():
return
analyzers = selector.get_select_strategy()
if len(analyzers) == 0:
return
with futures.ThreadPoolExecutor(max_workers=1) as executor:
future: futures.Future = executor.submit(self.__analysis, security, analyzers)
if not WaitingWindow.wait_future('分析计算中...', future, None):
return
df = future.result(0)
if df is None:
return
# analyzer_info = strategy_entry.analyzer_info()
# analyzers = [uuid for uuid, _, _, _ in analyzer_info]
#
# result = strategy_entry.analysis_advance(security, analyzers, (years_ago(5), now()))
#
# df = analysis_result_list_to_single_stock_report(result, security)
# df = df.fillna('-')
# df = df.rename(columns=strategy_entry.strategy_name_dict())
table = QTableWidget()
table.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
table.setMinimumSize(800, 600)
write_df_to_qtable(df, table, True)
dlg = WrapperQDialog(table)
dlg.setWindowTitle('Analysis Result')
dlg.exec()
def title_text(self) -> str:
return 'Analysis'
def global_entry_text(self) -> str:
return ''
def security_entry_text(self, security: str) -> str:
return 'Go'
def __analysis(self, security: str, analyzers: [str]) -> pd.DataFrame:
strategy_entry = self.__memo_context.get_sas().get_strategy_entry()
result = strategy_entry.analysis_advance(security, analyzers, (years_ago(5), now()))
df = analysis_result_list_to_single_stock_report(result, security)
df = df.fillna('-')
df = df.rename(columns=strategy_entry.strategy_name_dict())
return df
# -------------------------------- Analysis --------------------------------
class MemoExtra_BlackList(MemoExtra):
def __init__(self, memo_context: dict):
self.__memo_context = memo_context
super(MemoExtra_BlackList, self).__init__()
def global_entry(self):
black_list_ui = BlackListUi(self.__memo_context)
dlg = WrapperQDialog(black_list_ui)
dlg.exec()
def security_entry(self, security: str):
pass
def title_text(self) -> str:
return ''
def global_entry_text(self) -> str:
return 'Black List'
def security_entry_text(self, security: str) -> str:
return ''
|
__author__ = 'mnowotka'
#-----------------------------------------------------------------------------------------------------------------------
from chembl_beaker.beaker import app
from bottle import request
from chembl_beaker.beaker.core_apps.conversions.impl import _ctab2smiles, _smiles2ctab, _inchi2ctab, _ctab2smarts
from chembl_beaker.beaker.core_apps.conversions.impl import _ctab2inchi, _inchi2inchiKey
from chembl_beaker.beaker.core_apps.conversions.impl import _canonicalize_smiles, _ctab2inchiKey
from chembl_beaker.beaker.core_apps.conversions.impl import _smiles2inchi, _smiles2inchiKey
from chembl_beaker.beaker.utils.io import _parseFlag
import base64
#-----------------------------------------------------------------------------------------------------------------------
def ctab2smilesView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['nameHeader'] = params.get('nameHeader', 'Name')
kwargs['includeHeader'] = _parseFlag(params.get('includeHeader', True))
kwargs['isomericSmiles'] = _parseFlag(params.get('isomericSmiles', False))
kwargs['kekuleSmiles'] = _parseFlag(params.get('kekuleSmiles', False))
return _ctab2smiles(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smiles/<ctab>', method=['OPTIONS', 'GET'], name="ctab2smiles")
def ctab2smiles(ctab):
"""
Converts CTAB to SMILES format. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2smiles/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_"
curl -X GET ${BEAKER_ROOT_URL}ctab2smiles/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_")?isomericSmiles=1
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=0"
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=1&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat explicitHs.mol | base64 -w 0 | tr "+/" "-_")"?removeHs=0"
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2smilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smiles', method=['OPTIONS', 'POST'], name="ctab2smiles")
def ctab2smiles():
"""
Converts CTAB to SMILES format. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST -F "file=@isomeric.mol" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@isomeric.mol" -F "isomericSmiles=1" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=0" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=1" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@explicitHs.mol" -F "removeHs=0" ${BEAKER_ROOT_URL}ctab2smiles
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2smilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def ctab2smartsView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
kwargs['isomericSmiles'] = _parseFlag(params.get('isomericSmiles', False))
return _ctab2smarts(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smarts/<ctab>', method=['OPTIONS', 'GET'], name="ctab2smarts")
def ctab2smarts(ctab):
"""
Converts CTAB to SMARTS format. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2smarts/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_"
curl -X GET ${BEAKER_ROOT_URL}ctab2smarts/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_")?isomericSmiles=1
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=0"
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=1&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat explicitHs.mol | base64 -w 0 | tr "+/" "-_")"?removeHs=0"
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2smartsView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smarts', method=['OPTIONS', 'POST'], name="ctab2smarts")
def ctab2smarts():
"""
Converts CTAB to SMILES format. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST -F "file=@isomeric.mol" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@isomeric.mol" -F "isomericSmiles=1" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=0" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=1" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@explicitHs.mol" -F "removeHs=0" ${BEAKER_ROOT_URL}ctab2smarts
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2smartsView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def smiles2ctabView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', True))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _smiles2ctab(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2ctab/<smiles>', method=['OPTIONS', 'GET'], name="smiles2ctab")
def smiles2ctab(smiles):
"""
Converts SMILES to CTAB. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES
optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET "${BEAKER_ROOT_URL}smiles2ctab/"$(cat rules.smi | base64 -w 0 | tr "+/" "-_")"?computeCoords=0"
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(smiles)
return smiles2ctabView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2ctab', method=['OPTIONS', 'POST'], name="smiles2ctab")
def smiles2ctab():
"""
Converts SMILES to CTAB. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST -F "file=@aspirin_with_header.smi" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@aspirin_no_header.smi" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@rules.smi" -F "computeCoords=0" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@mcs.smi" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@mcs_no_header.smi" ${BEAKER_ROOT_URL}smiles2ctab
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return smiles2ctabView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def smiles2inchiView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', False))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _smiles2inchi(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchi/<smiles>', method=['OPTIONS', 'GET'], name="smiles2inchi")
def smiles2inchi(smiles):
"""
Converts SMILES to InChi. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES
optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(smiles)
return smiles2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchi', method=['OPTIONS', 'POST'], name="smiles2inchi")
def smiles2inchi():
"""
Converts SMILES to InChi. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST -F "file=@aspirin_with_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@aspirin_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@mcs.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@mcs_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return smiles2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def smiles2inchiKeyView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', False))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _smiles2inchiKey(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchiKey/<smiles>', method=['OPTIONS', 'GET'], name="smiles2inchiKey")
def smiles2inchiKey(smiles):
"""
Converts SMILES to InChi Key. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES
optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(smiles)
return smiles2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchiKey', method=['OPTIONS', 'POST'], name="smiles2inchiKey")
def smiles2inchiKey():
"""
Converts SMILES to InChi Key. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST -F "file=@aspirin_with_header.smi" ${BEAKER_ROOT_URL}smiles2inchiKey
curl -X POST -F "file=@aspirin_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@mcs.smi" ${BEAKER_ROOT_URL}smiles2inchiKey
curl -X POST -F "file=@mcs_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchiKey
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return smiles2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def canonicalizeSmilesView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', False))
kwargs['in_delimiter'] = params.get('in_delimiter', ' ')
kwargs['out_delimiter'] = params.get('out_delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['nameHeader'] = params.get('nameHeader', 'Name')
kwargs['includeHeader'] = _parseFlag(params.get('includeHeader', True))
kwargs['isomericSmiles'] = _parseFlag(params.get('isomericSmiles', False))
kwargs['kekuleSmiles'] = _parseFlag(params.get('kekuleSmiles', False))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _canonicalize_smiles(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/canonicalizeSmiles/<smiles>', method=['OPTIONS', 'GET'], name="canonicalizeSmiles")
def canonicalizeSmiles(smiles):
"""
Converts SMILES to canonical SMILES. This method accepts urlsafe_base64 encoded string containing single or multiple
SMILES optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}canonicalizeSmiles/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}canonicalizeSmiles/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")"?out_delimiter=|&nameHeader=foo"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat non_kekule.smi | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=0"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat non_kekule.smi | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat non_kekule.smi | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=1&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat isomeric.smi | base64 -w 0 | tr "+/" "-_")"?isomericSmiles=1"
"""
data = base64.urlsafe_b64decode(smiles)
return canonicalizeSmilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/canonicalizeSmiles', method=['OPTIONS', 'POST'], name="canonicalizeSmiles")
def canonicalizeSmiles():
"""
Converts SMILES to canonical SMILES. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST --data-binary @aspirin_no_header.smi ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST --data-binary @aspirin_with_header.smi ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@aspirin_with_header.smi" -F "out_delimiter=|" -F "nameHeader=foo" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@non_kekule.smi" -F "kekuleSmiles=0" -F "sanitize=0" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@non_kekule.smi" -F "kekuleSmiles=0" -F "sanitize=1" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@non_kekule.smi" -F "kekuleSmiles=1" -F "sanitize=1" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@isomeric.smi" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@isomeric.smi" -F "isomericSmiles=1" ${BEAKER_ROOT_URL}canonicalizeSmiles
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return canonicalizeSmilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2ctab/<inchi>', method=['OPTIONS', 'GET'], name="inchi2ctab")
def inchi2ctab(inchi):
"""
Converts InChi to CTAB. This method accepts urlsafe_base64 encoded string containing one or multiple InChis.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}inchi2ctab/$(cat aspirin.inchi | base64 -w 0 | tr "+/" "-_")tab
"""
inchis = base64.urlsafe_b64decode(inchi)
return _inchi2ctab(inchis)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2ctab', method=['OPTIONS', 'POST'], name="inchi2ctab")
def inchi2ctab():
"""
Converts InChi to CTAB. This method accepts one or multiple InChis.
cURL examples:
curl -X POST --data-binary @aspirin.inchi ${BEAKER_ROOT_URL}inchi2ctab
curl -X POST -F "file=@aspirin.inchi" ${BEAKER_ROOT_URL}inchi2ctab
"""
inchis = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return _inchi2ctab(inchis)
#-----------------------------------------------------------------------------------------------------------------------
def ctab2inchiView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
return _ctab2inchi(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchi/<ctab>', method=['OPTIONS', 'GET'], name="ctab2inchi")
def ctab2inchi(ctab):
"""
Converts CTAB to InChis. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2inchi/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchi', method=['OPTIONS', 'POST'], name="ctab2inchi")
def ctab2inchi():
"""
Converts CTAB to InChis. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST --data-binary @aspirin.mol ${BEAKER_ROOT_URL}ctab2inchi
curl -X POST -F "file=@aspirin.mol" ${BEAKER_ROOT_URL}ctab2inchi
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def ctab2inchiKeyView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
return _ctab2inchiKey(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchiKey/<ctab>', method=['OPTIONS', 'GET'], name="ctab2inchiKey")
def ctab2inchiKey(ctab):
"""
Converts CTAB to InChi Keys. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2inchiKey/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchiKey', method=['OPTIONS', 'POST'], name="ctab2inchiKey")
def ctab2inchiKey():
"""
Converts CTAB to InChi Keys. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST --data-binary @aspirin.mol ${BEAKER_ROOT_URL}ctab2inchiKey
curl -X POST -F "file=@aspirin.mol" ${BEAKER_ROOT_URL}ctab2inchiKey
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2inchiKey/<inchi>', method=['OPTIONS', 'GET'], name="inchi2inchiKey")
def inchi2inchiKey(inchi):
"""
Converts InChis to InChiKeys. This method accepts urlsafe_base64 encoded string containing one or multiple InChis.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}inchi2inchiKey/$(cat aspirin.inchi | base64 -w 0 | tr "+/" "-_")
"""
inchis = base64.urlsafe_b64decode(inchi)
return _inchi2inchiKey(inchis)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2inchiKey', method=['OPTIONS', 'POST'], name="inchi2inchiKey")
def inchi2inchiKey():
"""
Converts InChis to InChiKeys. This method accepts one or multiple InChis.
cURL examples:
curl -X POST --data-binary @aspirin.inchi ${BEAKER_ROOT_URL}inchi2inchiKey
curl -X POST -F "file=@aspirin.inchi" ${BEAKER_ROOT_URL}inchi2inchiKey
"""
inchis = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return _inchi2inchiKey(inchis)
#-----------------------------------------------------------------------------------------------------------------------
|
import re
"""
Automatic utility for generating raylib function headers. Simply put
raylib.h in the working directory of this script and execute.
Tested with raylib version 3.7.0
"""
C_TO_ZIG = {
"bool": "bool",
"char": "u8",
"double": "f64",
"float": "f32",
"int": "c_int",
"long": "c_long",
"unsigned char": "u8",
"unsigned int": "c_uint",
}
# Some c types have a different sizes on different systems
# and zig knows that so we tell it to get the system specific size for us
def c_to_zig_type(c: str) -> str:
const = "const " if "const " in c else ""
c = c.replace("const ", "")
z = C_TO_ZIG.get(c)
if z is not None:
return const + z
return const + c
def fix_pointer(name: str, t: str):
pre = ""
while name.startswith("*"):
name = name[1:]
pre += "[*c]"
t = pre + t
if t == "[*c]const void":
t = "*const anyopaque"
elif t == "[*c]void":
t = "*anyopaque"
return name, t
def fix_enums(arg_name, arg_type, func_name):
# Hacking specifc enums in here
# Raylib doesn't use the enums but rather the resulting ints
if arg_type == "int":
if arg_name == "key":
arg_type = "KeyboardKey"
elif arg_name == "button":
arg_type = "MouseButton"
elif arg_name == "mode" and func_name == "SetCameraMode":
arg_type = "CameraMode"
elif arg_name == "gesture":
arg_type = "Gestures"
return arg_type
def parse_header(header_name: str, output_file: str, prefix: str):
header = open(header_name, mode="r")
zig_functions = []
zig_heads = []
zig_types = set()
leftover = ""
for line in header.readlines():
if line.startswith("typedef struct"):
zig_types.add(line.split(' ')[2])
elif line.startswith("typedef enum"):
# don't trip the general typedef case
pass
elif line.startswith("typedef "):
zig_types.add(line.split(' ')[2].replace(';', '').strip())
if not line.startswith(prefix):
continue
line = line.split(";", 1)[0]
if leftover:
line = leftover + line
leftover = ""
line = line.replace("* ", " *")
line = line.replace(",", ", ")
line = line.replace(" ", " ")
# each (.*) is some variable value
result = re.search(prefix + "(.*) (.*)start_arg(.*)end_arg(.*)", line.replace("(", "start_arg").replace(")", "end_arg"))
if result is None:
leftover += line
continue
# get whats in the (.*)'s
return_type = result.group(1)
func_name = result.group(2)
arguments = result.group(3)
return_type = c_to_zig_type(return_type)
func_name, return_type = fix_pointer(func_name, return_type)
zig_arguments = []
for arg in arguments.split(", "):
if arg == "void":
break
if arg == "...":
zig_arguments.append("...")
continue
arg_type = " ".join(arg.split(" ")[0:-1]) # everything but the last element (for stuff like "const Vector3")
arg_name = arg.split(" ")[-1] # last element should be the name
arg_type = fix_enums(arg_name, arg_type, func_name)
arg_type = c_to_zig_type(arg_type)
arg_name, arg_type = fix_pointer(arg_name, arg_type)
zig_types.add(arg_type.replace("const ", ""))
zig_arguments.append(arg_name + ": " + arg_type) # put everything together
zig_arguments = ", ".join(zig_arguments)
zig_heads.append("pub extern fn " + func_name + "(" + zig_arguments + ") " + return_type + ";")
zigheader = open(output_file, mode="w")
print("""const rl = @import("raylib-zig.zig");\n""", file=zigheader)
print("\n".join(sorted(f"const {t} = rl.{t};" for t in zig_types if ('*' not in t) and (t not in C_TO_ZIG.values()))), file=zigheader)
print("", file=zigheader)
print("\n".join(zig_heads), file=zigheader)
print("", file=zigheader)
print("\n".join(zig_functions), file=zigheader)
parse_header("raylib.h", "raylib-wa.zig", "RLAPI ")
parse_header("raymath.h", "raylib-zig-math.zig", "RMAPI ")
|
<gh_stars>1-10
"""
Definition of the `fiftyone` command-line interface (CLI).
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import argparse
from collections import defaultdict
import io
import json
import os
import subprocess
import sys
import time
import argcomplete
from tabulate import tabulate
import eta.core.serial as etas
import eta.core.utils as etau
import fiftyone as fo
import fiftyone.constants as foc
import fiftyone.core.dataset as fod
import fiftyone.core.session as fos
import fiftyone.core.utils as fou
import fiftyone.utils.data as foud
import fiftyone.zoo as foz
_TABLE_FORMAT = "simple"
_MAX_CONSTANT_VALUE_COL_WIDTH = 79
class Command(object):
"""Interface for defining commands.
Command instances must implement the `setup()` method, and they should
implement the `execute()` method if they perform any functionality beyond
defining subparsers.
"""
@staticmethod
def setup(parser):
"""Setup the command-line arguments for the command.
Args:
parser: an `argparse.ArgumentParser` instance
"""
raise NotImplementedError("subclass must implement setup()")
@staticmethod
def execute(parser, args):
"""Executes the command on the given args.
args:
parser: the `argparse.ArgumentParser` instance for the command
args: an `argparse.Namespace` instance containing the arguments
for the command
"""
raise NotImplementedError("subclass must implement execute()")
class FiftyOneCommand(Command):
"""The FiftyOne command-line interface."""
@staticmethod
def setup(parser):
subparsers = parser.add_subparsers(title="available commands")
_register_command(subparsers, "config", ConfigCommand)
_register_command(subparsers, "constants", ConstantsCommand)
_register_command(subparsers, "convert", ConvertCommand)
_register_command(subparsers, "datasets", DatasetsCommand)
_register_command(subparsers, "app", AppCommand)
_register_command(subparsers, "zoo", ZooCommand)
@staticmethod
def execute(parser, args):
parser.print_help()
class ConfigCommand(Command):
"""Tools for working with your FiftyOne config.
Examples::
# Print your entire config
fiftyone config
# Print a specific config field
fiftyone config <field>
# Print the location of your config
fiftyone config --locate
"""
@staticmethod
def setup(parser):
parser.add_argument(
"field", nargs="?", metavar="FIELD", help="a config field to print"
)
parser.add_argument(
"-l",
"--locate",
action="store_true",
help="print the location of your config on disk",
)
@staticmethod
def execute(parser, args):
if args.locate:
if os.path.isfile(foc.FIFTYONE_CONFIG_PATH):
print(foc.FIFTYONE_CONFIG_PATH)
else:
print(
"No config file found at '%s'.\n"
% foc.FIFTYONE_CONFIG_PATH
)
return
if args.field:
field = getattr(fo.config, args.field)
if etau.is_str(field):
print(field)
else:
print(etas.json_to_str(field))
else:
print(fo.config)
class ConstantsCommand(Command):
"""Print constants from `fiftyone.constants`.
Examples::
# Print all constants
fiftyone constants
# Print a specific constant
fiftyone constants <CONSTANT>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"constant",
nargs="?",
metavar="CONSTANT",
help="the constant to print",
)
@staticmethod
def execute(parser, args):
if args.constant:
print(getattr(foc, args.constant))
return
# Print all constants
_print_constants_table(
{
k: v
for k, v in vars(foc).items()
if not k.startswith("_") and k == k.upper()
}
)
def _print_constants_table(d):
contents = sorted(
((k, _render_constant_value(v)) for k, v in d.items()),
key=lambda kv: kv[0],
)
table_str = tabulate(
contents, headers=["constant", "value"], tablefmt=_TABLE_FORMAT
)
print(table_str)
def _render_constant_value(value):
value = str(value)
if (
_MAX_CONSTANT_VALUE_COL_WIDTH is not None
and len(value) > _MAX_CONSTANT_VALUE_COL_WIDTH
):
value = value[: (_MAX_CONSTANT_VALUE_COL_WIDTH - 4)] + " ..."
return value
class ConvertCommand(Command):
"""Convert datasets on disk between supported formats.
Examples::
# Convert an image classification directory tree to TFRecords format
fiftyone convert \\
--input-dir /path/to/image-classification-directory-tree \\
--input-type fiftyone.types.ImageClassificationDirectoryTree \\
--output-dir /path/for/tf-image-classification-dataset \\
--output-type fiftyone.types.TFImageClassificationDataset
# Convert a COCO detection dataset to CVAT image format
fiftyone convert \\
--input-dir /path/to/coco-detection-dataset \\
--input-type fiftyone.types.COCODetectionDataset \\
--output-dir /path/for/cvat-image-dataset \\
--output-type fiftyone.types.CVATImageDataset
"""
@staticmethod
def setup(parser):
parser.add_argument(
"--input-dir",
metavar="INPUT_DIR",
help="the directory containing the dataset",
)
parser.add_argument(
"--input-type",
metavar="INPUT_TYPE",
help="the fiftyone.types.Dataset type of the input dataset",
)
parser.add_argument(
"--output-dir",
metavar="OUTPUT_DIR",
help="the directory to which to write the output dataset",
)
parser.add_argument(
"--output-type",
metavar="OUTPUT_TYPE",
help="the fiftyone.types.Dataset type to output",
)
@staticmethod
def execute(parser, args):
input_dir = args.input_dir
input_type = etau.get_class(args.input_type)
output_dir = args.output_dir
output_type = etau.get_class(args.output_type)
foud.convert_dataset(
input_dir=input_dir,
input_type=input_type,
output_dir=output_dir,
output_type=output_type,
)
class DatasetsCommand(Command):
"""Tools for working with FiftyOne datasets."""
@staticmethod
def setup(parser):
subparsers = parser.add_subparsers(title="available commands")
_register_command(subparsers, "list", DatasetsListCommand)
_register_command(subparsers, "info", DatasetsInfoCommand)
_register_command(subparsers, "create", DatasetsCreateCommand)
_register_command(subparsers, "head", DatasetsHeadCommand)
_register_command(subparsers, "tail", DatasetsTailCommand)
_register_command(subparsers, "stream", DatasetsStreamCommand)
_register_command(subparsers, "export", DatasetsExportCommand)
_register_command(subparsers, "draw", DatasetsDrawCommand)
_register_command(subparsers, "delete", DatasetsDeleteCommand)
@staticmethod
def execute(parser, args):
parser.print_help()
class DatasetsListCommand(Command):
"""List FiftyOne datasets.
Examples::
# List available datasets
fiftyone datasets list
"""
@staticmethod
def setup(parser):
pass
@staticmethod
def execute(parser, args):
datasets = fod.list_datasets()
if datasets:
for dataset in datasets:
print(dataset)
else:
print("No datasets found")
class DatasetsInfoCommand(Command):
"""Print information about FiftyOne datasets.
Examples::
# Print information about the given dataset
fiftyone datasets info <name>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset",
)
@staticmethod
def execute(parser, args):
dataset = fod.load_dataset(args.name)
print(dataset)
class DatasetsCreateCommand(Command):
"""Tools for creating FiftyOne datasets.
Examples::
# Create a dataset from the given data on disk
fiftyone datasets create \\
--name <name> --dataset-dir <dataset-dir> --type <type>
# Create a dataset from the given samples JSON file
fiftyone datasets create --json-path <json-path>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"-n", "--name", metavar="NAME", help="a name for the dataset",
)
parser.add_argument(
"-d",
"--dataset-dir",
metavar="DATASET_DIR",
help="the directory containing the dataset",
)
parser.add_argument(
"-j",
"--json-path",
metavar="JSON_PATH",
help="the path to a samples JSON file to load",
)
parser.add_argument(
"-t",
"--type",
metavar="TYPE",
help="the fiftyone.types.Dataset type of the dataset",
)
@staticmethod
def execute(parser, args):
name = args.name
dataset_dir = args.dataset_dir
json_path = args.json_path
dataset_type = etau.get_class(args.type) if args.type else None
if dataset_dir:
dataset = fod.Dataset.from_dir(
dataset_dir, dataset_type, name=name
)
elif json_path:
dataset = fod.Dataset.from_json(json_path, name=name)
else:
raise ValueError(
"Either `dataset_dir` or `json_path` must be provided"
)
dataset.persistent = True
print("Dataset '%s' created" % dataset.name)
class DatasetsHeadCommand(Command):
"""Prints the first few samples in a FiftyOne dataset.
Examples::
# Print the first few samples in a dataset
fiftyone datasets head <name>
# Print the given number of samples from the head of a dataset
fiftyone datasets head <name> --num-samples <num-samples>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset"
)
parser.add_argument(
"-n",
"--num-samples",
metavar="NUM_SAMPLES",
type=int,
default=3,
help="the number of samples to print",
)
@staticmethod
def execute(parser, args):
name = args.name
num_samples = args.num_samples
dataset = fod.load_dataset(name)
for sample in dataset.head(num_samples=num_samples):
print(sample)
class DatasetsTailCommand(Command):
"""Prints the last few samples in a FiftyOne dataset.
Examples::
# Print the last few samples in a dataset
fiftyone datasets tail <name>
# Print the given number of samples from the tail of a dataset
fiftyone datasets tail <name> --num-samples <num-samples>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset"
)
parser.add_argument(
"-n",
"--num-samples",
metavar="NUM_SAMPLES",
type=int,
default=3,
help="the number of samples to print",
)
@staticmethod
def execute(parser, args):
name = args.name
num_samples = args.num_samples
dataset = fod.load_dataset(name)
for sample in dataset.tail(num_samples=num_samples):
print(sample)
class DatasetsStreamCommand(Command):
"""Stream samples in a FiftyOne dataset to the terminal.
Examples::
# Stream the samples of the dataset to the terminal
fiftyone datasets stream <name>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset"
)
@staticmethod
def execute(parser, args):
name = args.name
dataset = fod.load_dataset(name)
# @todo support Windows and other environments without `less`
# Look at pydoc.pager() for inspiration?
p = subprocess.Popen(
["less", "-F", "-R", "-S", "-X", "-K"],
shell=True,
stdin=subprocess.PIPE,
)
try:
with io.TextIOWrapper(p.stdin, errors="backslashreplace") as pipe:
for sample in dataset:
pipe.write(str(sample) + "\n")
p.wait()
except (KeyboardInterrupt, OSError):
pass
class DatasetsExportCommand(Command):
"""Export FiftyOne datasets to disk in supported formats.
Examples::
# Export the dataset to disk in the specified format
fiftyone datasets export <name> \\
--export-dir <export-dir> --type <type> --label-field <label-field>
# Export the dataset to disk in JSON format
fiftyone datasets export <name> --json-path <json-path>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset to export",
)
parser.add_argument(
"-d",
"--export-dir",
metavar="EXPORT_DIR",
help="the directory in which to export the dataset",
)
parser.add_argument(
"-j",
"--json-path",
metavar="JSON_PATH",
help="the path to export the dataset in JSON format",
)
parser.add_argument(
"-f",
"--label-field",
metavar="LABEL_FIELD",
help="the name of the label field to export",
)
parser.add_argument(
"-t",
"--type",
metavar="TYPE",
help="the fiftyone.types.Dataset type in which to export",
)
@staticmethod
def execute(parser, args):
name = args.name
export_dir = args.export_dir
json_path = args.json_path
label_field = args.label_field
dataset_type = etau.get_class(args.type) if args.type else None
dataset = fod.load_dataset(name)
if export_dir:
dataset.export(
export_dir, label_field=label_field, dataset_type=dataset_type
)
print("Dataset '%s' exported to '%s'" % (name, export_dir))
elif json_path:
dataset.write_json(json_path)
print("Dataset '%s' exported to '%s'" % (name, json_path))
else:
raise ValueError(
"Either `export_dir` or `json_path` must be provided"
)
class DatasetsDrawCommand(Command):
"""Writes annotated versions of samples in FiftyOne datasets to disk.
Examples::
# Write annotated versions of the samples in the dataset with the
# specified labels overlaid to disk
fiftyone datasets draw <name> \\
--anno-dir <anno-dir> --label-fields <label-fields>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset to annotate",
)
parser.add_argument(
"-d",
"--anno-dir",
metavar="ANNO_DIR",
help="the directory in which to write the annotated data",
)
parser.add_argument(
"-f",
"--label-fields",
metavar="LABEL_FIELDs",
help="a comma-separated list of label fields to export",
)
@staticmethod
def execute(parser, args):
name = args.name
anno_dir = args.anno_dir
label_fields = args.label_fields
dataset = fod.load_dataset(name)
if label_fields is not None:
label_fields = [f.strip() for f in label_fields.split(",")]
dataset.draw_labels(anno_dir, label_fields=label_fields)
print("Annotations written to '%s'" % anno_dir)
class DatasetsDeleteCommand(Command):
"""Delete FiftyOne datasets.
Examples::
# Delete the dataset with the given name
fiftyone datasets delete <name>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset",
)
@staticmethod
def execute(parser, args):
fod.delete_dataset(args.name)
print("Dataset '%s' deleted" % args.name)
class AppCommand(Command):
"""Tools for working with the FiftyOne App."""
@staticmethod
def setup(parser):
subparsers = parser.add_subparsers(title="available commands")
_register_command(subparsers, "launch", AppLaunchCommand)
_register_command(subparsers, "view", AppViewCommand)
_register_command(subparsers, "connect", AppConnectCommand)
@staticmethod
def execute(parser, args):
parser.print_help()
class AppLaunchCommand(Command):
"""Launch the FiftyOne App.
Examples::
# Launch the app with the given dataset
fiftyone app launch <name>
# Launch a remote app session
fiftyone app launch <name> --remote
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset to open",
)
parser.add_argument(
"-p",
"--port",
metavar="PORT",
default=5151,
type=int,
help="the port number to use",
)
parser.add_argument(
"-r",
"--remote",
action="store_true",
help="whether to launch a remote app session",
)
@staticmethod
def execute(parser, args):
dataset = fod.load_dataset(args.name)
session = fos.launch_app(
dataset=dataset, port=args.port, remote=args.remote
)
_watch_session(session, remote=args.remote)
def _watch_session(session, remote=False):
try:
if remote:
print("\nTo exit, press ctrl + c\n")
while True:
time.sleep(60)
else:
print("\nTo exit, close the app or press ctrl + c\n")
session.wait()
except KeyboardInterrupt:
pass
class AppViewCommand(Command):
"""View datasets in the App without persisting them to the database.
Examples::
# View a dataset stored on disk in the app
fiftyone app view --dataset-dir <dataset-dir> --type <type>
# View a zoo dataset in the app
fiftyone app view --zoo-dataset <name> --splits <split1> ...
# View a dataset stored in JSON format on disk in the app
fiftyone app view --json-path <json-path>
# View the dataset in a remote app session
fiftyone app view ... --remote
"""
@staticmethod
def setup(parser):
parser.add_argument(
"-n", "--name", metavar="NAME", help="a name for the dataset"
)
parser.add_argument(
"-d",
"--dataset-dir",
metavar="DATASET_DIR",
help="the directory containing the dataset to view",
)
parser.add_argument(
"-t",
"--type",
metavar="TYPE",
help="the fiftyone.types.Dataset type of the dataset",
)
parser.add_argument(
"-z",
"--zoo-dataset",
metavar="NAME",
help="the name of a zoo dataset to view",
)
parser.add_argument(
"-s",
"--splits",
metavar="SPLITS",
nargs="+",
help="the dataset splits to load",
)
parser.add_argument(
"-j",
"--json-path",
metavar="JSON_PATH",
help="the path to a samples JSON file to view",
)
parser.add_argument(
"-p",
"--port",
metavar="PORT",
default=5151,
type=int,
help="the port number to use",
)
parser.add_argument(
"-r",
"--remote",
action="store_true",
help="whether to launch a remote app session",
)
@staticmethod
def execute(parser, args):
if args.zoo_dataset:
# View a zoo dataset
name = args.zoo_dataset
splits = args.splits
dataset_dir = args.dataset_dir
dataset = foz.load_zoo_dataset(
name, splits=splits, dataset_dir=dataset_dir
)
elif args.dataset_dir:
# View a dataset from a directory
name = args.name
dataset_dir = args.dataset_dir
dataset_type = etau.get_class(args.type)
dataset = fod.Dataset.from_dir(
dataset_dir, dataset_type, name=name
)
elif args.json_path:
# View a dataset from a JSON file
name = args.name
json_path = args.json_path
dataset = fod.Dataset.from_json(json_path, name=name)
else:
raise ValueError(
"Either `zoo_dataset`, `dataset_dir`, or `json_path` must be "
"provided"
)
session = fos.launch_app(
dataset=dataset, port=args.port, remote=args.remote
)
_watch_session(session, remote=args.remote)
class AppConnectCommand(Command):
"""Connect to a remote FiftyOne App.
Examples::
# Connect to a remote app with port forwarding already configured
fiftyone app connect
# Connect to a remote app session
fiftyone app connect --destination <destination> --port <port>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"-d",
"--destination",
metavar="DESTINATION",
type=str,
help="the destination to connect to, e.g., [username@]hostname",
)
parser.add_argument(
"-p",
"--port",
metavar="PORT",
default=5151,
type=int,
help="the remote port to connect to",
)
@staticmethod
def execute(parser, args):
if args.destination:
if sys.platform.startswith("win"):
raise RuntimeError(
"This command is currently not supported on Windows."
)
control_path = os.path.join(
foc.FIFTYONE_CONFIG_DIR, "tmp", "ssh.sock"
)
etau.ensure_basedir(control_path)
# Port forwarding
ret = subprocess.call(
[
"ssh",
"-f",
"-N",
"-M",
"-S",
control_path,
"-L",
"5151:127.0.0.1:%d" % args.port,
args.destination,
]
)
if ret != 0:
print("ssh failed with exit code %r" % ret)
return
def stop_port_forward():
subprocess.call(
[
"ssh",
"-S",
control_path,
"-O",
"exit",
args.destination,
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
fou.call_on_exit(stop_port_forward)
session = fos.launch_app()
_watch_session(session)
class ZooCommand(Command):
"""Tools for working with the FiftyOne Dataset Zoo."""
@staticmethod
def setup(parser):
subparsers = parser.add_subparsers(title="available commands")
_register_command(subparsers, "list", ZooListCommand)
_register_command(subparsers, "find", ZooFindCommand)
_register_command(subparsers, "info", ZooInfoCommand)
_register_command(subparsers, "download", ZooDownloadCommand)
_register_command(subparsers, "load", ZooLoadCommand)
@staticmethod
def execute(parser, args):
parser.print_help()
class ZooListCommand(Command):
"""List datasets in the FiftyOne Dataset Zoo.
Examples::
# List available datasets
fiftyone zoo list
# List available datasets, using the specified base directory to search
# for downloaded datasets
fiftyone zoo list --base-dir <base-dir>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"-b",
"--base-dir",
metavar="BASE_DIR",
help=(
"a custom base directory in which to search for downloaded "
"datasets"
),
)
@staticmethod
def execute(parser, args):
all_datasets = foz._get_zoo_datasets()
all_sources = foz._get_zoo_dataset_sources()
base_dir = args.base_dir
downloaded_datasets = foz.list_downloaded_zoo_datasets(
base_dir=base_dir
)
_print_zoo_dataset_list(all_datasets, all_sources, downloaded_datasets)
def _print_zoo_dataset_list(all_datasets, all_sources, downloaded_datasets):
available_datasets = defaultdict(dict)
for source, datasets in all_datasets.items():
for name, zoo_dataset_cls in datasets.items():
available_datasets[name][source] = zoo_dataset_cls()
records = []
# Iterate over available datasets
for name in sorted(available_datasets):
dataset_sources = available_datasets[name]
# Check for downloaded splits
if name in downloaded_datasets:
dataset_dir, info = downloaded_datasets[name]
else:
dataset_dir, info = None, None
# Get available splits across all sources
splits = set()
for zoo_dataset in dataset_sources.values():
if zoo_dataset.has_splits:
splits.update(zoo_dataset.supported_splits)
else:
splits.add("")
# Iterate over available splits
for split in sorted(splits):
# Get available sources for the split
srcs = []
for source in all_sources:
if source not in dataset_sources:
srcs.append("")
continue
zoo_dataset = dataset_sources[source]
if split and zoo_dataset.has_split(split):
srcs.append("\u2713")
elif not split and not zoo_dataset.has_splits:
srcs.append("\u2713")
else:
srcs.append("")
# Get split directory
if not split and dataset_dir:
split_dir = dataset_dir
elif split and info and info.is_split_downloaded(split):
split_dir = zoo_dataset.get_split_dir(dataset_dir, split)
else:
split_dir = ""
is_downloaded = "\u2713" if split_dir else ""
records.append(
(name, split, is_downloaded, split_dir) + tuple(srcs)
)
headers = (
["name", "split", "downloaded", "dataset_dir"]
+ ["%s (*)" % all_sources[0]]
+ all_sources[1:]
)
table_str = tabulate(records, headers=headers, tablefmt=_TABLE_FORMAT)
print(table_str)
class ZooFindCommand(Command):
"""Locate the downloaded zoo dataset on disk.
Examples::
# Print the location of the downloaded zoo dataset on disk
fiftyone zoo find <name>
# Print the location of a specific split of the dataset
fiftyone zoo find <name> --split <split>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset"
)
parser.add_argument(
"-s", "--split", metavar="SPLIT", help="a dataset split",
)
@staticmethod
def execute(parser, args):
name = args.name
split = args.split
dataset_dir = foz.find_zoo_dataset(name, split=split)
print(dataset_dir)
class ZooInfoCommand(Command):
"""Print information about downloaded zoo datasets.
Examples::
# Print information about a downloaded zoo dataset
fiftyone zoo info <name>
# Print information about the zoo dataset downloaded to the specified
# base directory
fiftyone zoo info <name> --base-dir <base-dir>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset"
)
parser.add_argument(
"-b",
"--base-dir",
metavar="BASE_DIR",
help=(
"a custom base directory in which to search for downloaded "
"datasets"
),
)
@staticmethod
def execute(parser, args):
name = args.name
# Print dataset info
zoo_dataset = foz.get_zoo_dataset(name)
print("***** Dataset description *****\n%s" % zoo_dataset.__doc__)
# Check if dataset is downloaded
base_dir = args.base_dir
downloaded_datasets = foz.list_downloaded_zoo_datasets(
base_dir=base_dir
)
if zoo_dataset.has_splits:
print("***** Supported splits *****")
print("%s\n" % ", ".join(zoo_dataset.supported_splits))
print("***** Dataset location *****")
if name not in downloaded_datasets:
print("Dataset '%s' is not downloaded" % name)
else:
dataset_dir, info = downloaded_datasets[name]
print(dataset_dir)
print("\n***** Dataset info *****")
print(info)
class ZooDownloadCommand(Command):
"""Download zoo datasets.
Examples::
# Download the entire zoo dataset
fiftyone zoo download <name>
# Download the specified split(s) of the zoo dataset
fiftyone zoo download <name> --splits <split1> ...
# Download to the zoo dataset to a custom directory
fiftyone zoo download <name> --dataset-dir <dataset-dir>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset"
)
parser.add_argument(
"-s",
"--splits",
metavar="SPLITS",
nargs="+",
help="the dataset splits to download",
)
parser.add_argument(
"-d",
"--dataset-dir",
metavar="DATASET_DIR",
help="a custom directory to which to download the dataset",
)
@staticmethod
def execute(parser, args):
name = args.name
splits = args.splits
dataset_dir = args.dataset_dir
foz.download_zoo_dataset(name, splits=splits, dataset_dir=dataset_dir)
class ZooLoadCommand(Command):
"""Load zoo datasets as persistent FiftyOne datasets.
Examples::
# Load the zoo dataset with the given name
fiftyone zoo load <name>
# Load the specified split(s) of the zoo dataset
fiftyone zoo load <name> --splits <split1> ...
# Load the zoo dataset with a custom name
fiftyone zoo load <name> --dataset-name <dataset-name>
# Load the zoo dataset from a custom directory
fiftyone zoo load <name> --dataset-dir <dataset-dir>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"name", metavar="NAME", help="the name of the dataset"
)
parser.add_argument(
"-s",
"--splits",
metavar="SPLITS",
nargs="+",
help="the dataset splits to load",
)
parser.add_argument(
"-n",
"--dataset-name",
metavar="DATASET_NAME",
help="a custom name to give the FiftyOne dataset",
)
parser.add_argument(
"-d",
"--dataset-dir",
metavar="DATASET_DIR",
help="a custom directory in which the dataset is downloaded",
)
@staticmethod
def execute(parser, args):
name = args.name
splits = args.splits
dataset_name = args.dataset_name
dataset_dir = args.dataset_dir
dataset = foz.load_zoo_dataset(
name,
splits=splits,
dataset_name=dataset_name,
dataset_dir=dataset_dir,
)
dataset.persistent = True
print("Dataset '%s' created" % dataset.name)
def _print_dict_as_json(d):
print(json.dumps(d, indent=4))
def _print_dict_as_table(d):
records = [(k, v) for k, v in d.items()]
table_str = tabulate(
records, headers=["key", "value"], tablefmt=_TABLE_FORMAT
)
print(table_str)
def _has_subparsers(parser):
for action in parser._actions:
if isinstance(action, argparse._SubParsersAction):
return True
return False
def _iter_subparsers(parser):
for action in parser._actions:
if isinstance(action, argparse._SubParsersAction):
for subparser in action.choices.values():
yield subparser
class _RecursiveHelpAction(argparse._HelpAction):
def __call__(self, parser, *args, **kwargs):
self._recurse(parser)
parser.exit()
@staticmethod
def _recurse(parser):
print("\n%s\n%s" % ("*" * 79, parser.format_help()))
for subparser in _iter_subparsers(parser):
_RecursiveHelpAction._recurse(subparser)
def _register_main_command(command, version=None, recursive_help=True):
parser = argparse.ArgumentParser(description=command.__doc__.rstrip())
parser.set_defaults(execute=lambda args: command.execute(parser, args))
command.setup(parser)
if version:
parser.add_argument(
"-v",
"--version",
action="version",
version=version,
help="show version info",
)
if recursive_help and _has_subparsers(parser):
parser.add_argument(
"--all-help",
action=_RecursiveHelpAction,
help="show help recurisvely and exit",
)
argcomplete.autocomplete(parser)
return parser
def _register_command(parent, name, command, recursive_help=True):
parser = parent.add_parser(
name,
help=command.__doc__.splitlines()[0],
description=command.__doc__.rstrip(),
formatter_class=argparse.RawTextHelpFormatter,
)
parser.set_defaults(execute=lambda args: command.execute(parser, args))
command.setup(parser)
if recursive_help and _has_subparsers(parser):
parser.add_argument(
"--all-help",
action=_RecursiveHelpAction,
help="show help recurisvely and exit",
)
return parser
def main():
"""Executes the `fiftyone` tool with the given command-line args."""
parser = _register_main_command(FiftyOneCommand, version=foc.VERSION_LONG)
args = parser.parse_args()
args.execute(args)
|
#!/usr/bin/env python3
#
# Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# A script to kill hanging process. The tool will return non-zero if any
# process was actually found.
#
import optparse
import os
import signal
import subprocess
import sys
import utils
os_name = utils.GuessOS()
POSIX_INFO = 'ps -p %s -o args'
EXECUTABLE_NAMES = {
'win32': {
'chrome': 'chrome.exe',
'dart': 'dart.exe',
'dart_precompiled_runtime': 'dart_precompiled_runtime.exe',
'firefox': 'firefox.exe',
'gen_snapshot': 'gen_snapshot.exe',
'git': 'git.exe',
'iexplore': 'iexplore.exe',
'vctip': 'vctip.exe',
'mspdbsrv': 'mspdbsrv.exe',
},
'linux': {
'chrome': 'chrome',
'dart': 'dart',
'dart_precompiled_runtime': 'dart_precompiled_runtime',
'firefox': 'firefox',
'gen_snapshot': 'gen_snapshot',
'flutter_tester': 'flutter_tester',
'git': 'git',
},
'macos': {
'chrome': 'Chrome',
'chrome_helper': 'Chrome Helper',
'dart': 'dart',
'dart_precompiled_runtime': 'dart_precompiled_runtime',
'firefox': 'firefox',
'gen_snapshot': 'gen_snapshot',
'git': 'git',
'safari': 'Safari',
}
}
INFO_COMMAND = {
'win32': 'wmic process where Processid=%s get CommandLine',
'macos': POSIX_INFO,
'linux': POSIX_INFO,
}
STACK_INFO_COMMAND = {
'win32': None,
'macos': '/usr/bin/sample %s 1 4000 -mayDie',
'linux': '/usr/bin/eu-stack -p %s',
}
def GetOptions():
parser = optparse.OptionParser('usage: %prog [options]')
true_or_false = ['True', 'False']
parser.add_option(
"--kill_dart",
default='True',
type='choice',
choices=true_or_false,
help="Kill all dart processes")
parser.add_option(
"--kill_vc",
default='True',
type='choice',
choices=true_or_false,
help="Kill all git processes")
parser.add_option(
"--kill_vsbuild",
default='False',
type='choice',
choices=true_or_false,
help="Kill all visual studio build related processes")
parser.add_option(
"--kill_browsers",
default='False',
type='choice',
choices=true_or_false,
help="Kill all browser processes")
(options, args) = parser.parse_args()
return options
def GetPidsPosix(process_name):
# This is to have only one posix command, on linux we could just do:
# pidof process_name
cmd = 'ps -e -o pid= -o comm='
# Sample output:
# 1 /sbin/launchd
# 80943 /Applications/Safari.app/Contents/MacOS/Safari
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, stderr = p.communicate()
results = []
lines = output.splitlines()
for line in lines:
split = line.split()
# On mac this ps commands actually gives us the full path to non
# system binaries.
if len(split) >= 2 and " ".join(split[1:]).endswith(process_name):
results.append(split[0])
return results
def GetPidsWindows(process_name):
cmd = 'tasklist /FI "IMAGENAME eq %s" /NH' % process_name
# Sample output:
# dart.exe 4356 Console 1 6,800 K
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, stderr = p.communicate()
results = []
lines = output.splitlines()
for line in lines:
split = line.split()
if len(split) > 2 and split[0] == process_name:
results.append(split[1])
return results
def GetPids(process_name):
if os_name == "win32":
return GetPidsWindows(process_name)
else:
return GetPidsPosix(process_name)
def PrintPidStackInfo(pid):
command_pattern = STACK_INFO_COMMAND.get(os_name, False)
if command_pattern:
p = subprocess.Popen(command_pattern % pid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
stdout = stdout.splitlines()
stderr = stderr.splitlines()
print(" Stack:")
for line in stdout:
print(" %s" % line)
if stderr:
print(" Stack (stderr):")
for line in stderr:
print(" %s" % line)
def PrintPidInfo(pid, dump_stacks):
# We assume that the list command will return lines in the format:
# EXECUTABLE_PATH ARGS
# There may be blank strings in the output
p = subprocess.Popen(INFO_COMMAND[os_name] % pid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, stderr = p.communicate()
lines = output.splitlines()
# Pop the header
lines.pop(0)
print("Hanging process info:")
print(" PID: %s" % pid)
for line in lines:
# wmic will output a bunch of empty strings, we ignore these
if line: print(" Command line: %s" % line)
if dump_stacks:
PrintPidStackInfo(pid)
def KillPosix(pid):
try:
os.kill(int(pid), signal.SIGKILL)
except:
# Ignore this, the process is already dead from killing another process.
pass
def KillWindows(pid):
# os.kill is not available until python 2.7
cmd = "taskkill /F /PID %s" % pid
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
p.communicate()
def Kill(name, dump_stacks=False):
if name not in EXECUTABLE_NAMES[os_name]:
return 0
print("***************** Killing %s *****************" % name)
platform_name = EXECUTABLE_NAMES[os_name][name]
pids = GetPids(platform_name)
for pid in pids:
PrintPidInfo(pid, dump_stacks)
if os_name == "win32":
KillWindows(pid)
else:
KillPosix(pid)
print("Killed pid: %s" % pid)
if len(pids) == 0:
print(" No %s processes found." % name)
return len(pids)
def KillBrowsers():
status = Kill('firefox')
# We don't give error on killing chrome. It happens quite often that the
# browser controller fails in killing chrome, so we silently do it here.
Kill('chrome')
status += Kill('chrome_helper')
status += Kill('iexplore')
status += Kill('safari')
return status
def KillVCSystems():
status = Kill('git')
return status
def KillVSBuild():
status = Kill('vctip')
status += Kill('mspdbsrv')
return status
def KillDart():
status = Kill("dart", dump_stacks=True)
status += Kill("gen_snapshot", dump_stacks=True)
status += Kill("dart_precompiled_runtime", dump_stacks=True)
status += Kill("flutter_tester", dump_stacks=True)
return status
def Main():
options = GetOptions()
status = 0
if options.kill_dart == 'True':
if os_name == "win32":
# TODO(24086): Add result of KillDart into status once pub hang is fixed.
KillDart()
else:
status += KillDart()
if options.kill_vc == 'True':
status += KillVCSystems()
if options.kill_vsbuild == 'True' and os_name == 'win32':
status += KillVSBuild()
if options.kill_browsers == 'True':
status += KillBrowsers()
return status
if __name__ == '__main__':
sys.exit(Main())
|
import math
from configs import Config
def default_configs(name, batch_size=4, image_size=512):
h = Config()
h.dtype = "float32"
# backbone
h.model = dict(model=name,
convolution="conv2d",
dropblock=None,
# dropblock=dict(keep_prob=None,
# block_size=None)
normalization=dict(normalization="batch_norm",
momentum=0.99,
epsilon=1e-3,
axis=-1,
trainable=True),
activation=dict(activation="relu"),
strides=[2, 1, 2, 2, 2, 1, 2, 1],
dilation_rates=[1, 1, 1, 1, 1, 1, 1, 1],
output_indices=[-1, ],
frozen_stages=[-1, ],
num_classes=1)
# loss
h.use_sigmoid = True
h.loss=dict(loss="BinaryCrossEntropy", weight=1., from_logits=True, reduction="none")
h.weight_decay = 4e-5
# dataset
h.num_classes = 1
h.train=dict(dataset=dict(dataset="SmokingDataset",
batch_size=batch_size,
dataset_dir="/data/bail/smoking/train",
training=True,
augmentations=[
dict(Resize=dict(input_size=image_size)),
# dict(RandAugment=dict(num_layers=2,
# magnitude=10.,
# cutout_const=40.,
# translate_const=100.))
]))
h.val=dict(dataset=dict(dataset="SmokingDataset",
batch_size=batch_size,
dataset_dir="/data/bail/smoking/val",
training=False,
augmentations=[
dict(Resize=dict(input_size=image_size))
]))
# train
h.pretrained_weights_path = "/data/bail/pretrained_weights/efficientnet-b%d.h5" % phi
h.optimizer = dict(optimizer="SGD", momentum=0.9)
h.lookahead = None
h.learning_rate_scheduler = dict(scheduler="CosineDecay",
initial_learning_rate=0.016,
warmup_steps=800,
warmup_learning_rate=0.001,
train_steps=40001)
h.checkpoint_dir = "checkpoints/%s" % name
h.summary_dir = "logs/%s" % name
h.gradient_clip_norm = 0.
h.log_every_n_steps = 100
h.save_ckpt_steps = 2000
h.val_every_n_steps = 2000
return h
efficientdet_model_param_dict = {
"EfficientNetB0": dict(phi=0, batch_size=32, image_size=224),
"EfficientNetB1": dict(phi=1, batch_size=32, image_size=240),
"EfficientNetB2": dict(phi=2, batch_size=4, image_size=260),
"EfficientNetB3": dict(phi=3, batch_size=4, image_size=300),
"EfficientNetB4": dict(phi=4, batch_size=4, image_size=380),
"EfficientNetB5": dict(phi=5, batch_size=4, image_size=456),
"EfficientNetB6": dict(phi=6, batch_size=4, image_size=528),
"EfficientNetB7": dict(phi=7, batch_size=4, image_size=600),
}
def get_efficientnet_config(model_name="EfficientNetB0"):
return default_configs(**efficientdet_model_param_dict[model_name])
if __name__ == "__main__":
print(get_efficientdet_config("EfficientNetB0"))
|
# coding: utf-8
"""
蓝鲸用户管理 API
蓝鲸用户管理后台服务 API # noqa: E501
OpenAPI spec version: v2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from bkuser_sdk.api_client import ApiClient
class BatchApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def v2_batch_departments_multiple_retrieve_profiles(self, department_ids, **kwargs): # noqa: E501
"""v2_batch_departments_multiple_retrieve_profiles # noqa: E501
批量获取组织的用户 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_batch_departments_multiple_retrieve_profiles(department_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str department_ids: department id 列表,以 , 分隔 (required)
:param bool recursive:
:return: list[Profile]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.v2_batch_departments_multiple_retrieve_profiles_with_http_info(department_ids, **kwargs) # noqa: E501
else:
(data) = self.v2_batch_departments_multiple_retrieve_profiles_with_http_info(department_ids, **kwargs) # noqa: E501
return data
def v2_batch_departments_multiple_retrieve_profiles_with_http_info(self, department_ids, **kwargs): # noqa: E501
"""v2_batch_departments_multiple_retrieve_profiles # noqa: E501
批量获取组织的用户 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_batch_departments_multiple_retrieve_profiles_with_http_info(department_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str department_ids: department id 列表,以 , 分隔 (required)
:param bool recursive:
:return: list[Profile]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['department_ids', 'recursive'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_batch_departments_multiple_retrieve_profiles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'department_ids' is set
if ('department_ids' not in params or
params['department_ids'] is None):
raise ValueError("Missing the required parameter `department_ids` when calling `v2_batch_departments_multiple_retrieve_profiles`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'department_ids' in params:
query_params.append(('department_ids', params['department_ids'])) # noqa: E501
if 'recursive' in params:
query_params.append(('recursive', params['recursive'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/batch/departments/profiles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Profile]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def v2_batch_profiles_delete(self, body, **kwargs): # noqa: E501
"""v2_batch_profiles_delete # noqa: E501
批量删除用户 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_batch_profiles_delete(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[UpdateProfile] body: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.v2_batch_profiles_delete_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.v2_batch_profiles_delete_with_http_info(body, **kwargs) # noqa: E501
return data
def v2_batch_profiles_delete_with_http_info(self, body, **kwargs): # noqa: E501
"""v2_batch_profiles_delete # noqa: E501
批量删除用户 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_batch_profiles_delete_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[UpdateProfile] body: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_batch_profiles_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `v2_batch_profiles_delete`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/batch/profiles/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def v2_batch_profiles_partial_update(self, body, **kwargs): # noqa: E501
"""v2_batch_profiles_partial_update # noqa: E501
批量更新用户 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_batch_profiles_partial_update(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[UpdateProfile] body: (required)
:return: list[Profile]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.v2_batch_profiles_partial_update_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.v2_batch_profiles_partial_update_with_http_info(body, **kwargs) # noqa: E501
return data
def v2_batch_profiles_partial_update_with_http_info(self, body, **kwargs): # noqa: E501
"""v2_batch_profiles_partial_update # noqa: E501
批量更新用户 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_batch_profiles_partial_update_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[UpdateProfile] body: (required)
:return: list[Profile]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_batch_profiles_partial_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `v2_batch_profiles_partial_update`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/batch/profiles/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Profile]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def v2_batch_profiles_read(self, **kwargs): # noqa: E501
"""v2_batch_profiles_read # noqa: E501
批量获取用户 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_batch_profiles_read(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Profile]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.v2_batch_profiles_read_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.v2_batch_profiles_read_with_http_info(**kwargs) # noqa: E501
return data
def v2_batch_profiles_read_with_http_info(self, **kwargs): # noqa: E501
"""v2_batch_profiles_read # noqa: E501
批量获取用户 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_batch_profiles_read_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Profile]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_batch_profiles_read" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/batch/profiles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Profile]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
import unittest
import numpy as np
import tensorflow as tf
from kgcnn.layers.gather import GatherNodes
class TestTopKLayerDisjoint(unittest.TestCase):
n1 = [[[1.0], [6.0], [1.0], [6.0], [1.0], [1.0], [6.0], [6.0]],
[[6.0], [1.0], [1.0], [1.0], [7.0], [1.0], [6.0], [8.0], [6.0], [1.0], [6.0], [7.0], [1.0], [1.0], [1.0]]]
ei1 = [[[0, 1], [1, 0], [1, 6], [2, 3], [3, 2], [3, 5], [3, 7], [4, 7], [5, 3], [6, 1], [6, 7], [7, 3], [7, 4],
[7, 6]],
[[0, 6], [0, 8], [0, 9], [1, 11], [2, 4], [3, 4], [4, 2], [4, 3], [4, 6], [5, 10], [6, 0], [6, 4], [6, 14],
[7, 8], [8, 0], [8, 7], [8, 11], [9, 0], [10, 5], [10, 11], [10, 12], [10, 13], [11, 1], [11, 8], [11, 10],
[12, 10], [13, 10], [14, 6]]]
e1 = [[[0.408248290463863], [0.408248290463863], [0.3333333333333334], [0.35355339059327373], [0.35355339059327373],
[0.35355339059327373], [0.25], [0.35355339059327373], [0.35355339059327373], [0.3333333333333334],
[0.2886751345948129], [0.25], [0.35355339059327373], [0.2886751345948129]],
[[0.25], [0.25], [0.35355339059327373], [0.35355339059327373], [0.35355339059327373], [0.35355339059327373],
[0.35355339059327373], [0.35355339059327373], [0.25], [0.3162277660168379], [0.25], [0.25],
[0.35355339059327373], [0.35355339059327373], [0.25], [0.35355339059327373], [0.25], [0.35355339059327373],
[0.3162277660168379], [0.22360679774997896], [0.3162277660168379], [0.3162277660168379],
[0.35355339059327373], [0.25], [0.22360679774997896], [0.3162277660168379], [0.3162277660168379],
[0.35355339059327373]]]
def test_gather_nodes_concat(self):
node = tf.ragged.constant(self.n1, ragged_rank=1, inner_shape=(1,))
edgeind = tf.ragged.constant(self.ei1, ragged_rank=1, inner_shape=(2,))
gathered_nodes_concat = GatherNodes()([node,edgeind])
np_gather = np.reshape(np.array(self.n1[1])[np.array(self.ei1[1])],(28,2*1))
test = np.sum(np.abs(np.array(gathered_nodes_concat[1]) - np_gather)) < 1e-6
self.assertTrue(test)
def test_gather_nodes(self):
node = tf.ragged.constant(self.n1, ragged_rank=1, inner_shape=(1,))
edgeind = tf.ragged.constant(self.ei1, ragged_rank=1, inner_shape=(2,))
gathered_nodes = GatherNodes(concat_axis=None)([node, edgeind])
np_gather = np.array(self.n1[1])[np.array(self.ei1[1])]
test = np.sum(np.abs(np.array(gathered_nodes[1]) - np_gather)) < 1e-6
self.assertTrue(test)
# def test_gather_empty(self):
# node = tf.ragged.constant(self.n1, ragged_rank=1, inner_shape=(1,))
#
# ei2 = tf.RaggedTensor.from_row_lengths(tf.constant([],dtype=tf.int64),tf.constant([0,0],dtype=tf.int64))
# gather_empty = GatherNodes(concat_axis=False)([node,ei2])
# gather_empty_concat = GatherNodes(concat_axis=True)([node, ei2])
if __name__ == '__main__':
unittest.main() |
<filename>experimentum/Experiments/Experiment.py
# -*- coding: utf-8 -*-
"""Run experiments and save the results for future analysations.
Writing Experiments
-------------------
Experiments are created in the `experiments` directory and they must adhere to
the following naming convention: `{NAME}Experiment.py`.
All Experiments extend the :py:class:`.Experiment` class. Experiments
contain a :py:meth:`~.Experiment.reset` and :py:meth:`~.Experiment.run`
method. Within :py:meth:`~.Experiment.reset` the method, you should
reset/initialize the data structuresand values you want to use in each test run.
The :py:meth:`~.Experiment.run` method should contain the code you want to test.
It should return a dictionary with the values you want to save.
The Reset Method
----------------
As mentioned before the :py:meth:`~.Experiment.reset` should reset/initialize the
data structuresand values you want to use in each test run.
Let's take a look at a basic experiment. Within any of your experiment methods,
you always have access to the :py:attr:`~.Experiment.app` attribute which
provides access to the main app class and to the :py:attr:`~.Experiment.config` which
contains the content of the :py:attr:`~.Experiment.config_file`::
from experimentum.Experiments import Experiment
import random
class FooExperiment(Experiment):
config_file = 'foo.json'
def reset(self):
# use app to create an instance of a custom aliased class
self.user = self.config.get('user')
self.some_class = self.app.make('some_class', self.user)
self.rand = random.randint(0, 10)
The Run Method
--------------
As mentioned before the :py:meth:`~.Experiment.run` method should contain the code
you want to test and return a dictionary with the values you want to save.
Let's take a look at a basic experiment, assuming that you added a ``rand`` attribute to
your TestCaseRepository with a migration::
from experimentum.Experiments import Experiment
import random
class FooExperiment(Experiment):
config_file = 'foo.json'
def run(self):
with self.performance.point('Task to measure some Rscript algo') as point:
script = self.call('some_script.r') # prints json to stdout as return value.
algo_result = script.get_json()
script.process.wait() # Wait for child process to terminate.
# Add a custom performance entry
return {
'rand': self.rand,
'performances': [{
'label': 'Custom Rscript measuring',
'time': algo_result.get('time'),
'memory': 0,
'peak_memory': 0,
'level': 0,
'type': 'custom'
}]
}
"""
from __future__ import print_function
import os
import glob
import subprocess
import json
from datetime import datetime
from six import add_metaclass
from abc import abstractmethod, ABCMeta
from experimentum.Config import Config
from experimentum.Experiments import Performance
from experimentum.cli import print_progress, print_failure
from experimentum.utils import get_basenames, load_class, find_files
class Script(object):
"""Call another script to run algorithms for your experiment.
Example::
script = Script(['Rscript', 'myalgo.r', 'arg1', 'arg2'], verbose=True, shell=True)
algo_result = script.get_json()
script.process.wait() # Wait for child process to terminate.
Attributes:
process (subprocess.Popen): Called script.
output (str): Output of called script.
"""
def __init__(self, cmd, verbose=False, shell=False, stdout=subprocess.PIPE):
"""Get the return vaue of a process (i.e, the last print statement).
.. Warning::
Passing ``shell=True`` can be a security hazard if combined with untrusted input.
See the warning under `Frequently Used Arguments
<https://docs.python.org/2/library/subprocess.html#frequently-used-arguments>`_
for details.
Args:
cmd (str, list): Command which you want to call.
verbose (bool, optional): Defaults to False. Print the cmd output or not.
shell (bool, optional): Defaults to False. Specifices whether to use the
shell as the program to execute.
stdout (int, optional): Defaults to subprocess.PIPE. Specify standard output.
"""
self.process = subprocess.Popen(cmd, stdout=stdout, shell=shell)
self.output = None
# poll will return the exit code if the process is completed otherwise it returns null
while self.process.poll() is None:
line = self.process.stdout.readline()
if not line:
break
self.output = line # last print statement
if verbose:
print(line.rstrip().decode('utf-8'))
def get_json(self):
"""Decode JSON of process output.
Returns:
object: Process output
"""
return json.loads(self.get_text())
def get_text(self):
"""Get the text of the process output.
Returns:
str: Process output
"""
return self.output
@add_metaclass(ABCMeta)
class Experiment(object):
"""Run experiments and save the results in the data store.
Attributes:
app (App): Main Application Class.
performance (Performance): Performance Profiler.
config (Config): Hold the experiment configuration.
show_progress (bool): Flag to show/hide the progress bar.
hide_performance (bool): Flag to show/hide the performance table.
config_file (str): Config file to load.
repos (dict): Experiment and Testcast Repo to save results.
"""
config_file = None
def __init__(self, app, path):
"""Init the experiment.
Args:
app (App): Main Application class
path (str): Path to experiments folder
"""
self.app = app
self.performance = Performance()
self.config = Config()
self.show_progress = False
self.hide_performance = False
self.repos = {'experiment': None, 'testcase': None}
self._path = path
@staticmethod
def get_experiments(path):
"""[DEPRECATED] Get experiment names from exp files/classes.
Args:
path (str): Path to experiments folder.
Returns:
list: Names of experiments
"""
# TODO: Deprecated remove!
print('[DEPRECATED]: Remove Experiments.get_experiments usage!!')
files = glob.glob(os.path.join(path, '[!_]*.py'))
return list(map(
lambda exp: os.path.basename(exp).lower().replace('experiment.py', ''),
files
))
@staticmethod
def get_status(app):
"""Get status information about experiments.
Args:
app (App): Main Service Provider/Container.
Returns:
dict: Dictionary with experiment status
"""
# Load experiment classes
path = app.config.get('app.experiments.path', 'experiments')
exps = get_basenames(app.root, path, 'experiment.py')
data = {exp.lower(): {'count': 0, 'name': exp} for exp in exps}
# Load experiment stats
repo = app.repositories.get('ExperimentRepository')
rows = repo.all()
for exp in rows:
idx = exp.name.lower()
# Exp file does not exist anymore
if idx not in data:
data[idx] = {'count': 0, 'name': exp.name, 'missing': True}
data[idx]['count'] += 1
if exp.config_file:
data[idx]['config_file'] = exp.config_file
return data
@staticmethod
def load(app, path, name):
"""Load and initialize an experiment class.
Args:
app (App): Main app calss
path (str): Path to experiments folder.
name (str): Name of experiment.
Returns:
Experiment: Loaded experiment.
"""
# Find Experiment Files
files = find_files(app.root, path, name, remove='experiment.py')
if not files:
print_failure(
'Could not find experiment named "{}" under path "{}"'.format(name, path),
exit_code=1
)
# Load Experiment class if possible
experiment = load_class(files[0], 'experiments', Experiment)
return experiment(app, path)
@staticmethod
def call(cmd, verbose=False, shell=False):
"""Call another script to run algorithms for your experiment.
.. Warning::
Passing ``shell=True`` can be a security hazard if combined with untrusted input.
See the warning under `Frequently Used Arguments
<https://docs.python.org/2/library/subprocess.html#frequently-used-arguments>`_
for details.
Args:
cmd (str, list): Command which you want to call.
verbose (bool, optional): Defaults to False. Print the cmd output or not.
shell (bool, optional): Defaults to False. Specifices whether to use the
shell as the program to execute.
Returns:
Script: Executed script to get output from
"""
return Script(cmd, verbose, shell)
def boot(self):
"""Boot up the experiment, e.g. load config etc."""
# Load Config/Args for experiment
if self.config_file:
try:
with open(os.path.join(self._path, self.config_file), 'r') as cfg:
self.config.set(json.load(cfg))
except Exception as exc:
print_failure(exc, 2)
# Load Experiment and testcase repos
try:
self.repos['experiment'] = self.app.repositories.get('ExperimentRepository')
self.repos['testcase'] = self.app.repositories.get('TestCaseRepository')
self.repos['experiment'] = self.repos['experiment'].from_dict({
'name': self.__class__.__name__.replace('Experiment', ''),
'start': datetime.now(),
'config_file': self.config_file,
'config_content': json.dumps(self.config.all()),
'tests': []
})
self.repos['experiment'].create()
except Exception as exc:
print_failure(exc, 2)
def start(self, steps=10):
"""Start the test runs of the experiment.
Args:
steps (int, optional): Defaults to 10. How many tests runs should be executed.
"""
# Booting
with self.performance.point('Booting Experiment'):
self.boot()
# Running tests
for iteration in self.performance.iterate(1, steps):
# Reset test state
result = None
self.reset()
# Run experiment
with self.performance.point('Runing Experiment'):
result = self.run()
# Save Results
if result:
self.save(result, iteration)
else:
msg = 'Experiment returned an empty result. Are you sure this is correct?'
self.app.log.warning(msg)
print('[WARNING]: ' + msg)
if self.show_progress:
print_progress(iteration, steps, prefix='Progress:', suffix='Complete')
# Finished Experiment
self.repos['experiment'].finished = datetime.now()
self.repos['experiment'].update()
if self.hide_performance is False:
self.performance.results()
def save(self, result, iteration):
"""Save the test results in the data store.
Args:
result (dict): Result of experiment test run.
iteration (int): Number of test run iteration.
"""
data = {
'experiment_id': self.repos['experiment'].id,
'iteration': iteration,
'performances': []
}
data.update(result)
data['performances'].extend(self.performance.export())
try:
self.repos['testcase'].from_dict(data).create()
except Exception as exc:
for msg in str(exc).split('\n'):
print_failure(msg)
raise SystemExit(-1)
@abstractmethod
def reset(self):
"""Reset data structured and values used in the run method."""
raise NotImplementedError('Must implement reset method.')
@abstractmethod
def run(self):
"""Run a test of the experiment."""
raise NotImplementedError('Must implement run method.')
|
<reponame>lilinghell/devops
from django.db import models
from django.utils.translation import ugettext_lazy as _
from applications.models import Application
from projects.models import Project
from common.mixin import BaseModelMixin
from common.models import Attachment
class FeatureImpactDesign(BaseModelMixin):
"""
影响度分析
"""
pass
class InterfaceGroup(BaseModelMixin):
"""
API组设计
"""
name = models.CharField(max_length=128, verbose_name="组名")
parent = models.ForeignKey('self', null=True, related_name="child_group",
on_delete=models.SET_NULL,
verbose_name=_("父节点信息"))
description = models.TextField(null=True, verbose_name="描述")
application = models.ForeignKey(Application, verbose_name="归属应用", related_name="interface_group_application",
on_delete=models.CASCADE)
project = models.ForeignKey(Project, verbose_name="归属项目",
related_name="project_group", null=True, on_delete=models.CASCADE)
class Meta:
db_table = "interface_group"
unique_together = ['name', 'application']
class Interfaces(BaseModelMixin):
"""
API接口设计
"""
METHOD_GET = "GET"
METHOD_POST = "POST"
METHOD_PUT = "PUT"
METHOD_PATCH = "PATCH"
METHOD_DELETE = "DELETE"
METHOD_CHOICE = (
(METHOD_GET, _("GET")), (METHOD_POST, _("POST")), (METHOD_PUT, _("PUT")), (METHOD_PATCH, _("PATCH")), (METHOD_DELETE, _("DELETE")))
# 开发完成
STATUS_0 = "0"
# 开发中
STATUS_1 = "1"
STATUS_CHOICE = ((STATUS_0, _("0")), (STATUS_1, _("1")))
name = models.CharField(max_length=128, verbose_name="接口名")
url = models.CharField(max_length=64, verbose_name="url")
# CharFields must define a 'max_length' attribute
method = models.CharField(choices=METHOD_CHOICE,
max_length=32, verbose_name="http方法")
description = models.TextField(null=True, verbose_name="描述")
version = models.CharField(max_length=16, verbose_name="版本")
status = models.CharField(choices=STATUS_CHOICE,
max_length=2, verbose_name="状态")
open = models.BooleanField(default=True, verbose_name="是否开放")
application = models.ForeignKey(
Application, related_name="interface_application", on_delete=models.CASCADE) # 连级删除
group = models.ForeignKey(
InterfaceGroup, related_name="interface_group", on_delete=models.CASCADE)
project = models.ForeignKey(Project, verbose_name="归属项目",
related_name="project_interface", null=True, on_delete=models.CASCADE)
info = models.TextField(null=True, blank=True, verbose_name=_("接口信息"))
# 在django2.0后,定义外键和一对一关系的时候需要加on_delete选项,此参数为了避免两个表里的数据不一致问题
class Meta:
db_table = "interfaces"
ordering = ["-created_at"]
unique_together = ['url']
class InterfaceTest(BaseModelMixin):
interface = models.ForeignKey(
Interfaces, related_name="interface_test_interface", on_delete=models.CASCADE)
body = models.TextField(verbose_name="请求body")
response = models.TextField(null=True, blank=True, verbose_name="response")
class Meta:
db_table = "interface_test"
ordering = ["-created_at"]
unique_together = ['interface']
class InterfaceDictionary(BaseModelMixin):
"""
字典
"""
TYPE_STRING = "string"
TYPE_NUMBER = "number"
TYPE_BOOLEAN = "boolean"
TYPE_INTEGER = "integer"
TYPE_ARRAY = "array"
TYPE_CHOICE = ((TYPE_STRING, _("string")), (TYPE_NUMBER, _("number")),
(TYPE_BOOLEAN, _("boolean")), (TYPE_INTEGER, _("integer")), (TYPE_ARRAY, _("array")))
name = models.CharField(max_length=128, verbose_name=_("名称"))
# CharFields must define a 'max_length' attribute
type = models.CharField(choices=TYPE_CHOICE,
verbose_name=_("类型"), max_length=20)
description = models.TextField(null=True, blank=True, verbose_name=_("描述"))
class Meta:
db_table = "interface_dictionary"
ordering = ["-created_at"]
unique_together = ['name', 'type'] # 联合约束
class EsbInterfaces(BaseModelMixin):
service_name = models.CharField(max_length=128, verbose_name="服务名")
service_desc = models.CharField(max_length=128, verbose_name="服务描述")
operation_name = models.CharField(max_length=128, verbose_name="操作名")
operation_desc = models.CharField(max_length=128, verbose_name="操作描述")
attachments = models.ManyToManyField(Attachment, related_name="esb_interface_attachments", blank=True,
verbose_name=_("ESB服务附件"))
server_name = models.CharField(
max_length=128, verbose_name="服务方", null=True, blank=True)
url = models.URLField(verbose_name="url", null=True, blank=True)
class Meta:
db_table = "interface_esb"
ordering = ["-created_at"]
unique_together = ['operation_name']
|
<reponame>georgia-tech-db/Eva
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch, MagicMock, call
from eva.binder.binder_utils import (create_video_metadata,
handle_if_not_exists,
bind_table_info)
from eva.catalog.column_type import ColumnType, NdArrayType
class BinderUtilsTest(unittest.TestCase):
@patch('eva.binder.binder_utils.CatalogManager')
def test_bind_table_info(self, mock):
video = MagicMock()
catalog = mock.return_value
catalog.get_dataset_metadata.return_value = 'obj'
bind_table_info(video)
catalog.get_dataset_metadata.assert_called_with(video.database_name,
video.table_name)
self.assertEqual(video.table_obj, 'obj')
@patch('eva.binder.binder_utils.CatalogManager')
def test_bind_table_info_raise(self, mock):
with self.assertRaises(RuntimeError):
video = MagicMock()
catalog = mock.return_value
catalog.get_dataset_metadata.return_value = None
bind_table_info(video)
@patch('eva.binder.binder_utils.CatalogManager')
@patch('eva.binder.binder_utils.ColumnDefinition')
@patch('eva.binder.binder_utils.ColConstraintInfo')
@patch('eva.binder.binder_utils.create_column_metadata')
@patch('eva.binder.binder_utils.generate_file_path')
def test_create_video_metadata(self, m_gfp, m_ccm, m_cci, m_cd, m_cm):
catalog_ins = MagicMock()
expected = 'video_metadata'
name = 'eva'
uri = 'tmp'
m_gfp.return_value = uri
m_ccm.return_value = 'col_metadata'
m_cci.return_value = 'cci'
m_cd.return_value = 1
m_cm.return_value = catalog_ins
catalog_ins.create_metadata.return_value = expected
calls = [call('id', ColumnType.INTEGER, None, [],
'cci'),
call('data', ColumnType.NDARRAY, NdArrayType.UINT8,
[None, None, None])]
actual = create_video_metadata(name)
m_gfp.assert_called_once_with(name)
m_ccm.assert_called_once_with([1, 1])
m_cci.assert_called_once_with(unique=True)
m_cd.assert_has_calls(calls)
catalog_ins.create_metadata.assert_called_with(
name, uri, 'col_metadata', identifier_column='id', is_video=True)
self.assertEqual(actual, expected)
@patch('eva.binder.binder_utils.CatalogManager.check_table_exists')
def test_handle_if_not_exists_raises_error(self, check_mock):
check_mock.return_value = True
with self.assertRaises(RuntimeError):
handle_if_not_exists(check_mock, False)
@patch('eva.binder.binder_utils.CatalogManager.check_table_exists')
def test_handle_if_not_exists_return_True(self, check_mock):
check_mock.return_value = True
self.assertTrue(handle_if_not_exists(check_mock, True))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import numpy as np
def find_smallest_positive(alist):
# find first positive value
minpos = -1
for x in alist:
if x > 0:
minpos = x
break
if minpos > 0:
# find smallest positive value
for x in alist:
if x > 0 and x < minpos:
minpos = x
return minpos
def rebase_to_smallest_positive(alist):
base = find_smallest_positive(alist)
if base == -1:
return None
else:
return [x - base for x in alist]
def compute_maximum_subarray(score_vector=None):
begin_temp = begin = end = 0
start_val = score_vector[0]
max_ending_here = max_so_far = start_val
for pos, x in enumerate(score_vector[1:], 1):
if max_ending_here < 0:
max_ending_here = x
begin_temp = pos
else:
max_ending_here = max_ending_here + x
if max_ending_here > max_so_far:
max_so_far = max_ending_here
begin = begin_temp
end = pos
return begin, end
def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
original_score = score
while True:
# find (begin,end) of subarray in each element
begin, end = compute_maximum_subarray(score_vector=score)
# check that the retrieved subarray is larger than min_subarray_size
if end - begin < min_subarray_size - 1:
break
else:
# extract maximum subarray
# NOTE: in order to account for border effects we expand on the left and on the right by 'margin'
first = max(0, begin - margin)
# NOTE: we return + 1 for the rightmost postition to be compliant with the 'one after the end' semantics
last = min(len(seq), end + margin + 1)
subarray = seq[first: last]
subarray_size = len(subarray)
if max_subarray_size == -1 or subarray_size <= max_subarray_size:
# store data
acc = 0
for x in original_score[begin: end + 1]:
acc += x
if output == 'minimal':
subarray = {'subarray_string': ''.join(subarray)}
else:
subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first,
'end': last, 'size': subarray_size, 'seq': seq, 'score': acc}
yield subarray
if subarray_size > max_subarray_size:
# if the subarray is too large then rebase the score list, i.e. offset by the smallest positive value
score = rebase_to_smallest_positive(score)
if score is None:
break
else:
# remove current subarray by zeroing importance values of subarray
score[first: last] = [0.0] * subarray_size
# iterate after removal of current subarray
def extract_sequence_and_score(graph=None):
# make dict with positions as keys and lists of ids as values
pos_to_ids = defaultdict(list)
for u in graph.nodes():
if 'position' not in graph.node[u]: # no position attributes in graph, use the vertex id instead
raise Exception('Missing "position" attribute in node:%s %s' % (u, graph.node[u]))
else:
pos = graph.node[u]['position']
# accumulate all node ids
pos_to_ids[pos] += [u]
# extract sequence of labels and importances
seq = [None] * len(pos_to_ids)
score = [0] * len(pos_to_ids)
for pos in sorted(pos_to_ids):
ids = pos_to_ids[pos]
labels = [graph.node[u].get('label', 'N/A') for u in ids]
# check that all labels for the same position are identical
assert(sum([1 for label in labels if label == labels[0]]) == len(labels)
), 'ERROR: non identical labels referring to same position: %s %s' % (pos, labels)
seq[pos] = labels[0]
# average all importance score for the same position
importances = [graph.node[u].get('importance', 0) for u in ids]
score[pos] = np.mean(importances)
return seq, score
def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
# extract subarrays
for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):
yield subarray
def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
seq, score = extract_sequence_and_score(graph)
for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):
yield subarray
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.