text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
from __future__ import unicode_literals
import json
import time
from urllib.parse import urlencode
from xml.dom import minidom
import requests
from bs4 import BeautifulSoup
from . import database_tasks, report_tasks, alg_utils
CREDIBLE_USERNAME = alg_utils.get_secret('credible', 'username')
CREDIBLE_PASSWORD = alg_utils.get_secret('credible', 'password')
CREDIBLE_DOMAIN = alg_utils.get_secret('credible', 'domain')
CREDIBLE_USER_ID = alg_utils.get_secret('credible', 'emp_id')
CREDIBLE_DOMAIN_ID = alg_utils.get_secret('credible', 'domain_id')
def update_employee_email(emp_id, emp_email):
emp_username = report_tasks.get_emp_field(emp_id, 'username')[0][0]
emp_profile_code = report_tasks.get_emp_field(emp_id, 'profile_code')[0][0]
cookie_jar = get_cbh_cookie()
header = {'Content-Type': 'application/x-www-form-urlencoded'}
add_data_url = 'https://ww7.crediblebh.com/employee/user_update.asp'
add_data = {
'emp_id': emp_id,
'action': 'update',
'username': emp_username,
'email': emp_email,
'profile_code': emp_profile_code
}
results = requests.post(
add_data_url,
headers=header,
data=urlencode(add_data),
cookies=cookie_jar)
logout(cookie_jar)
def batch_adjust_notes(results):
cookie_jar = get_cbh_cookie()
session = requests.Session()
for result_dict in results:
if result_dict != 0:
service_id = next(iter(result_dict.keys()))
arbitration_package = result_dict[service_id]
service_package = arbitration_package['service_package']
arbitration = arbitration_package['arbitration']
if arbitration['approve']:
approve_service(service_package, session, cookie_jar)
if not arbitration['approve']:
red_x_package = arbitration['red_x_package']
set_single_red_x(service_package, red_x_package, session, cookie_jar)
success = report_tasks.check_service_adjustment(service_id)
completed = 0
if success:
completed = 1
database_tasks.record_arbitration_end(service_package['clientvisit_id'], time.time(), completed)
logout(cookie_jar)
def approve_batch_services(services_package, cookie_jar):
for service_id in services_package:
approve_service(services_package[service_id], cookie_jar)
def approve_service(service_package, session, cookie_jar):
service_url = 'https://ww7.crediblebh.com/visit/clientvisit_view.asp?action=approve&clientvisit_id='
if cookie_jar is 0:
return 0
clientvisit_id = service_package['clientvisit_id']
service_url += clientvisit_id
session.get(
service_url,
cookies=cookie_jar
)
return cookie_jar
def set_batch_red_x(red_x_packages, cookie_jar):
for service_id in red_x_packages:
red_x_package = red_x_packages[service_id]['red_x_package']
service_package = red_x_packages[service_id]['service_package']
set_single_red_x(
red_x_package=red_x_package,
service_package=service_package,
cookie_jar=cookie_jar
)
def set_single_red_x(service_package, red_x_package, session, cookie_jar):
service_url = 'https://ww7.crediblebh.com/visit/clientvisit_update.asp'
header = {'Content-Type': 'application/x-www-form-urlencoded'}
if cookie_jar is 0:
return 0
data = {
'clientvisit_id': service_package['clientvisit_id'],
'action': 'update',
'dd_billmatrix': service_package['matrix_id'],
'dd_location': service_package['location_id'],
'recipient_id': service_package['recipient_id'],
'manual_redx': '1',
'manual_redx_note': str(red_x_package)
}
session.post(
service_url,
headers=header,
data=urlencode(data),
cookies=cookie_jar
)
return data
def update_admin_time_type_batch(fixable_punch_dict):
cookie_jar = get_cbh_cookie()
for admintime_id in fixable_punch_dict:
punch_type = fixable_punch_dict[admintime_id]['punch_type']
time_in = fixable_punch_dict[admintime_id]['TimeIn']
update_admin_time_type(
admintime_id=admintime_id,
punch_type=punch_type,
time_in=time_in,
cookie_jar=cookie_jar
)
logout(cookie_jar)
def update_admin_time_type(admintime_id, punch_type, time_in, cookie_jar):
header = {'Content-Type': 'application/x-www-form-urlencoded'}
if cookie_jar is 0:
return 0
update_admin_time_url = 'https://ww7.crediblebh.com/employee/admintime_update.asp'
data = {
'admintime_id': admintime_id,
'action': 'update',
'admintype_id': punch_type,
'TimeIn': time_in,
'appr': '1'
}
requests.post(
update_admin_time_url,
headers=header,
data=urlencode(data),
cookies=cookie_jar
)
return 1
def add_employee(first_name, last_name):
url = 'https://ww7.crediblebh.com/employee/emp_update.asp'
cookie_jar = get_cbh_cookie()
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
'action': 'add',
'first_name': first_name,
'last_name': last_name,
'mobile_phone': '000-000-0000',
'programteam': 'T-52'
}
requests.post(
url,
headers=headers,
data=urlencode(data),
cookies=cookie_jar
)
logout(cookie_jar=cookie_jar)
def update_employee_page(emp_id, field_name, value):
url = 'https://ww7.crediblebh.com/employee/emp_update.asp'
cookie_jar = get_cbh_cookie()
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
'emp_id': emp_id,
'action': 'update',
field_name: value
}
requests.post(
url,
headers=headers,
data=urlencode(data),
cookies=cookie_jar
)
logout(cookie_jar)
def update_employee_page_batch(emp_id, update_data):
url = 'https://ww7.crediblebh.com/employee/emp_update.asp'
cookie_jar = get_cbh_cookie()
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
'emp_id': emp_id
}
for key_value in update_data.keys():
data[key_value] = update_data.get(key_value)
requests.post(
url,
headers=headers,
data=urlencode(data),
cookies=cookie_jar
)
logout(cookie_jar)
def update_admin_time(admintime_id, date, end_time):
header = {'Content-Type': 'application/x-www-form-urlencoded'}
cookie_jar = get_cbh_cookie()
if cookie_jar is 0:
return 0
update_admin_time_url = 'https://ww7.crediblebh.com/employee/admintime_update.asp'
end_time = end_time.replace('AM', 'am')
end_time = end_time.replace('PM', 'pm')
data = {
'admintime_id': admintime_id,
'action': 'update',
'rev_datein': date,
'rev_timeout': end_time,
'appr': '1'
}
requests.post(
update_admin_time_url,
headers=header,
data=urlencode(data),
cookies=cookie_jar
)
logout(cookie_jar)
return 1
def add_admin_time_batch(emp_id, punches):
header = {'Content-Type': 'application/x-www-form-urlencoded'}
cookie_jar = get_cbh_cookie()
if cookie_jar is 0:
return 0
add_admin_time_url = 'https://ww7.crediblebh.com/employee/admintime_add.asp'
for punch in punches:
date = punch['date']
time_in = punch['time_in']
time_out = punch['time_out']
punch_type = punch['punch_type_id']
notes = punch['notes']
data = {
'action': 'add',
'emp_id': emp_id,
'DateIn': date,
'TimeIn': time_in,
'TimeOut': time_out,
'admintype_id': punch_type,
'notes': notes
}
requests.post(
add_admin_time_url,
headers=header,
data=urlencode(data),
cookies=cookie_jar
)
logout(cookie_jar)
return 1
def add_admin_time(emp_id, date, time_in, time_out, punch_type, notes):
header = {'Content-Type': 'application/x-www-form-urlencoded'}
cookie_jar = get_cbh_cookie()
if cookie_jar is 0:
return 0
add_admin_time_url = 'https://ww7.crediblebh.com/employee/admintime_add.asp'
data = {
'action': 'add',
'emp_id': emp_id,
'DateIn': date,
'TimeIn': time_in,
'TimeOut': time_out,
'admintype_id': punch_type,
'notes': notes
}
requests.post(
add_admin_time_url,
headers=header,
data=urlencode(data),
cookies=cookie_jar
)
logout(cookie_jar=cookie_jar)
return 1
def get_missed_psych_appointments(client_id, start_date, end_date):
url = 'https://reportservices.crediblebh.com/reports/ExportService.asmx/ExportDataSet'
key = database_tasks.get_report_key('z_alg_getMissedPsych')
payload = {
'connection': key,
'start_date': '',
'end_date': '',
'custom_param1': client_id,
'custom_param2': start_date,
'custom_param3': end_date
}
cr = requests.get(url, params=payload)
raw_xml = cr.content
document = minidom.parseString(raw_xml).childNodes[0]
elements = document.getElementsByTagName("missed_psych")[0]
if len(elements) is 0:
return 0
else:
number = elements.firstChild.data
return number
def get_next_psych_appointment(client_id):
url = 'https://reportservices.crediblebh.com/reports/ExportService.asmx/ExportDataSet'
key = database_tasks.get_report_key('z_alg_getNextPsych')
payload = {
'connection': key,
'start_date': '',
'end_date': '',
'custom_param1': client_id,
'custom_param2': '',
'custom_param3': ''
}
cr = requests.get(url, params=payload)
raw_xml = cr.content
document = minidom.parseString(raw_xml).childNodes[0]
elements = document.getElementsByTagName("psych_appt")
if len(elements) is 0:
return 'none scheduled'
else:
number = elements.firstChild.data
number = number.replace('12:00AM ', '')
number = number.replace('Jan 1 1900 ', '')
return number
def login_credible():
cookie_jar = get_cbh_cookie()
home_page = requests.get('https://crediblebh.com', cookies=cookie_jar)
print(home_page.content)
def update_single_client_batch(client_id, updates, cookie_jar):
if cookie_jar is 0:
return 0
url = 'https://ww7.crediblebh.com/client/client_update.asp'
header = {'Content-Type': 'application/x-www-form-urlencoded'}
params = {'p': CREDIBLE_DOMAIN_ID, 'e': CREDIBLE_USER_ID}
starter_data = {
'client_id': client_id,
'action': 'update',
}
data = starter_data.copy()
data.update(updates)
requests.post(url, data=urlencode(data), cookies=cookie_jar, params=params, headers=header)
return 1
def update_client_batch(client_data_packages):
cookie_jar = get_cbh_cookie()
for client_id in client_data_packages:
change_information = {}
for profile_field in client_data_packages[client_id]:
value = client_data_packages[client_id][profile_field]
change_information[profile_field] = value
update_single_client_batch(
client_id=client_id,
updates=change_information,
cookie_jar=cookie_jar
)
logout(cookie_jar=cookie_jar)
def update_client(client_id, field, value):
cookie_jar = get_cbh_cookie()
if cookie_jar is 0:
return 0
url = 'https://ww7.crediblebh.com/client/client_update.asp'
header = {'Content-Type': 'application/x-www-form-urlencoded'}
params = {'p': CREDIBLE_DOMAIN_ID, 'e': CREDIBLE_USER_ID}
data = {
'client_id': client_id,
'action': 'update',
field: value
}
requests.post(url, data=urlencode(data), cookies=cookie_jar, params=params, headers=header)
logout(cookie_jar)
return 1
# client_id_packets is a list of dict
# of the format {'client_id': client_id, 'reason': reason}
def close_client_batch(client_id_packets):
receipt = []
cookie_jar = get_cbh_cookie()
for client_id_packet in client_id_packets:
client_id = client_id_packet['client_id']
reason = client_id_packet['reason']
result = close_single_client_in_batch(
client_id=client_id,
reason=reason,
cookie_jar=cookie_jar
)
receipt.append([client_id, result])
logout(cookie_jar=cookie_jar)
return receipt
def close_single_client_in_batch(client_id, reason, cookie_jar):
if cookie_jar is 0:
return 0
url = 'https://ww7.crediblebh.com/client/client_update.asp'
header = {'Content-Type': 'application/x-www-form-urlencoded'}
params = {'p': CREDIBLE_DOMAIN_ID, 'e': CREDIBLE_USER_ID}
data = {
'client_id': client_id,
'action': 'update',
'client_status': 'CLOSED',
'text24': reason
}
requests.post(url, data=urlencode(data), cookies=cookie_jar, params=params, headers=header)
return 1
def close_client(client_id, reason):
cookie_jar = get_cbh_cookie()
if cookie_jar is 0:
return 0
url = 'https://ww7.crediblebh.com/client/client_update.asp'
header = {'Content-Type': 'application/x-www-form-urlencoded'}
params = {'p': CREDIBLE_DOMAIN_ID, 'e': CREDIBLE_USER_ID}
data = {
'client_id': client_id,
'action': 'update',
'client_status': 'CLOSED',
'text24': reason
}
requests.post(url, data=urlencode(data), cookies=cookie_jar, params=params, headers=header)
logout(cookie_jar)
return 1
def add_insurance(client_id, payer_id, insurance_id, start_date, end_date):
# cookie_jar = get_cbh_cookie()
header = {'Content-Type': 'application/x-www-form-urlencoded'}
url = 'https://ww7.crediblebh.com/client/list_ins.asp'
params = {'p': CREDIBLE_DOMAIN_ID, 'e': CREDIBLE_USER_ID}
data = {
'action': 'add',
'client_id': client_id,
'billing_ord': 1,
'payer_id': payer_id,
'group_no': '',
'ins_id': insurance_id,
'start_date': start_date,
'end_date': end_date,
'copay': '',
'credential_group': '',
'auth_required': '',
'is_pending': 0,
'employeeorschool': '',
'notes': '',
'btn_add': 'Add Insurance',
}
return 'suggest adding insurance for client: ' + str(client_id) + ', payer_id: ' + str(payer_id)
# insurance_post = requests.post(url, data=urlencode(data), cookies=cookie_jar, params=params, headers=header)
def delete_insurance(client_id, clientins_id):
# cookie_jar = get_cbh_cookie()
header = {'Content-Type': 'application/x-www-form-urlencoded'}
url = 'https://ww7.crediblebh.com/client/list_ins.asp'
params = {'p': CREDIBLE_DOMAIN_ID, 'e': CREDIBLE_USER_ID}
data = {
'action': 'delete',
'client_id': client_id,
'clientins_id': clientins_id,
'sfilter': 1,
}
# insurance_post = requests.post(url, data=urlencode(data), cookies=cookie_jar, params=params, headers=header)
def deactivate_insurance(client_id, clientins_id):
# cookie_jar = get_cbh_cookie()
header = {'Content-Type': 'application/x-www-form-urlencoded'}
url = 'https://ww7.crediblebh.com/client/list_ins.asp'
params = {'p': CREDIBLE_DOMAIN_ID, 'e': CREDIBLE_USER_ID}
data = {
'action': 'delete',
'client_id': client_id,
'clientins_id': clientins_id,
'sfilter': 1,
'btn_inactive': 'Inactivate',
}
return 'suggest removing insurance listing: ' + str(clientins_id) + ' for client: ' + str(client_id)
# insurance_post = requests.post(url, data=urlencode(data), cookies=cookie_jar, params=params, headers=header)
def get_client_insurance(client_id):
insurances = dict()
url = 'https://reportservices.crediblebh.com/reports/ExportService.asmx/ExportDataSet'
key = database_tasks.get_report_key('_alg insurance_check')
payload = {
'connection': key,
'start_date': '',
'end_date': '',
'custom_param1': client_id,
'custom_param2': '',
'custom_param3': ''
}
cr = requests.get(url, params=payload)
raw_xml = cr.content
document = minidom.parseString(raw_xml).childNodes[0]
elements = document.getElementsByTagName("clientins_id")
for element in elements:
clientins_id = element.firstChild.data
spacer = element.nextSibling
payer_element = spacer.nextSibling
payer_id = payer_element.firstChild.data
second_spacer = payer_element.nextSibling
payer_name_element = second_spacer.nextSibling
payer_name = payer_name_element.firstChild.data
insurances[payer_id] = [clientins_id, payer_name]
return insurances
def get_cbh_cookie():
attempts = 0
while attempts < 3:
try:
jar = requests.cookies.RequestsCookieJar()
api_url = "https://login-api.crediblebh.com/api/Authenticate/CheckLogin"
index_url = "https://ww7.crediblebh.com/index.aspx"
first_payload = {'UserName': CREDIBLE_USERNAME,
'Password': CREDIBLE_PASSWORD,
'DomainName': CREDIBLE_DOMAIN}
headers = {'DomainName': CREDIBLE_DOMAIN}
post = requests.post(api_url, json=first_payload, headers=headers)
response_json = post.json()
session_cookie = response_json['SessionCookie']
jar.set('SessionId', session_cookie, domain='.crediblebh.com', path='/')
second_payload = {'SessionId': session_cookie}
second_post = requests.post(index_url, data=second_payload, cookies=jar)
history = second_post.history
cbh_response = history[0]
cbh_cookies = cbh_response.cookies
return cbh_cookies
except KeyError or ConnectionError:
attempts += 1
return 0
def get_client_ids():
client_ids = dict()
url = 'https://reportservices.crediblebh.com/reports/ExportService.asmx/ExportDataSet'
key = database_tasks.get_report_key('_alg celery clients')
payload = {
'connection': key,
'start_date': '',
'end_date': '',
'custom_param1': '',
'custom_param2': '',
'custom_param3': ''
}
cr = requests.get(url, params=payload)
raw_xml = cr.content
document = minidom.parseString(raw_xml).childNodes[0]
elements = document.getElementsByTagName("client_id")
for element in elements:
spacer = element.nextSibling
status_element = spacer.nextSibling
status = status_element.firstChild.data
number = int(element.firstChild.data)
client_ids[number] = status
return client_ids
def get_medicaid_number(credible_id):
url = 'https://reportservices.crediblebh.com/reports/ExportService.asmx/ExportDataSet'
key = 'fDFeukXL8A6v2xAzAQR2S2w2SQh9w9k0bo6GtZyZ8FCmtX0P7bwOfAn6pV39HONE'
payload = {
'connection': key,
'start_date': '',
'end_date': '',
'custom_param1': credible_id,
'custom_param2': '',
'custom_param3': ''
}
cr = requests.get(url, params=payload)
raw_xml = cr.content
document = minidom.parseString(raw_xml).childNodes[0]
elements = document.getElementsByTagName("medicaidid")[0]
number = elements.firstChild.data
return number
def logout(cookie_jar):
logout_url = 'https://ww7.crediblebh.com/secure/logout.aspx'
requests.get(
logout_url,
cookies=cookie_jar
)
def update_report_keys():
all_updated = True
get_web_users_url = 'https://reports.crediblebh.com/services/exports_services.asmx/GetAssignedExportsUsers'
assign_web_user = 'https://reports.crediblebh.com/services/exports_services.asmx/AssignExportUser'
cookie_jar = get_cbh_cookie()
ws_json_request = requests.get(get_web_users_url, cookies=cookie_jar)
ws_json = ws_json_request.text
entries = json.loads(ws_json)
sql_data = report_tasks.get_report_as_dict(686, ['', '', ''])
for entry in entries['data']:
export_user_id = entry['exportwsuser_id']
report_id = entry['exportbuilder_id']
if export_user_id is '':
params = {
'exportwsuser_id': '1',
'exportbuilder_id': report_id
}
requests.post(assign_web_user, cookies=cookie_jar, data=params)
all_updated = False
else:
sql = sql_data[report_id]['custom_query']
sql = sql.replace('\n', ' ')
report_data = {
'report_id': report_id,
'report_name': entry['export_name'],
'report_sql': sql,
'report_key': entry['connection']
}
database_tasks.add_report(
report_data=report_data
)
logout(cookie_jar)
if all_updated is True:
return 1
else:
return 0
def get_ws_reports():
returned_data = []
first_key = get_key(686)
all_keys_data = get_first_report(first_key)
exportbuilder_ids = all_keys_data.getElementsByTagName('exportbuilder_id')
for element in exportbuilder_ids:
id_number = element.firstChild.data
spacer = element.nextSibling
report_name_element = spacer.nextSibling
report_name = report_name_element.firstChild.data
second_spacer = report_name_element.nextSibling
report_sql_element = second_spacer.nextSibling
report_sql = report_sql_element.firstChild.data
new_key = get_key(id_number)
key_data = {
'report_id': id_number,
'report_name': report_name,
'report_sql': report_sql,
'report_key': new_key
}
returned_data.append(key_data)
print(key_data)
return returned_data
def get_key(key_id):
params = {'exportbuilder_id': str(key_id)}
cookie_jar = get_cbh_cookie()
cookie_jar.set('Domain', 'your_domain_name', domain='.crediblebh.com', path='/')
url = 'https://reports.crediblebh.com/reports/web_services_export.aspx'
key_get = requests.get(url, cookies=cookie_jar, params=params)
nsc_cookie = key_get.cookies
requests.cookies.merge_cookies(cookie_jar, nsc_cookie)
key_get_2 = requests.get(url, cookies=cookie_jar, params=params)
content = key_get_2.content
document = BeautifulSoup(content, "html.parser")
table_data = document.find('td')
return table_data.contents[12]
def get_first_report(first_key):
url = 'https://reportservices.crediblebh.com/reports/ExportService.asmx/ExportDataSet'
key = first_key
payload = {
'connection': key,
'start_date': '',
'end_date': '',
'custom_param1': '',
'custom_param2': '',
'custom_param3': ''
}
cr = requests.get(url, params=payload)
raw_xml = cr.content
document = minidom.parseString(raw_xml).childNodes[0]
return document
| jkcubeta/algernon | src/overwatch/src/overwatch/alg_tasks/credible_tasks.py | Python | apache-2.0 | 23,349 | [
"VisIt"
] | 8f519a092efcf7176278a2b4dec914f9185dd5b0926e24233a9ac3247f5acbac |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bowtie(MakefilePackage):
"""Bowtie is an ultrafast, memory-efficient short read aligner
for short DNA sequences (reads) from next-gen sequencers."""
homepage = "https://sourceforge.net/projects/bowtie-bio/"
url = "https://github.com/BenLangmead/bowtie/archive/v1.2.0.tar.gz"
version('1.3.0', sha256='d7c2d982a67884909f284a0ff150b56b20127cd7a1ced461c3c4c03e6a6374c5')
version('1.2.3', sha256='86402114caeacbb3a3030509cb59f0b7e96361c7b3ee2dd50e2cd68200898823')
# The bowtie project git tagged and GitHub released a v1.2.2,
# discovered/fixed a bug, git tagged a v1.2.2_p1 and moved the
# 1.2.2 release to use it rather than making a new `1.2.2_p1`
# release.
#
# We point both of the Spack versions at the same tarball so they
# build the binaries that are on the release page as v1.2.2
version('1.2.2_p1', sha256='e1b02b2e77a0d44a3dd411209fa1f44f0c4ee304ef5cc83f098275085740d5a1')
version('1.2.2', sha256='e1b02b2e77a0d44a3dd411209fa1f44f0c4ee304ef5cc83f098275085740d5a1', url="https://github.com/BenLangmead/bowtie/archive/v1.2.2_p1.tar.gz")
version('1.2.1.1', sha256='1b38408b88f61d18d7ff28b2470a8cfeefccb3fc59fd46e4cc62e23874e52c20')
version('1.2.1', sha256='b2a7c8c879cb08f00a82665bee43e1d4861de44a87912c54d168e44c90869728')
version('1.2.0', sha256='dc4e7951b8eca56ce7714c47fd4e84f72badd5312ee9546c912af1963570f894')
# Keeping the old 1.2 version around for reproducibility, it's not
# clearly identical to 1.2.0.
version('1.2', sha256='b1052de4253007890f6436e6361d40148bc2a5a9dd01827bb9f34097747e65f8', url='https://downloads.sourceforge.net/project/bowtie-bio/bowtie/1.2.0/bowtie-1.2-source.zip')
# 1.2.2 and 1.2.2_p1 fail to build with %gcc@8.3.0
# with and without issue-87 patch
conflicts('%gcc@8:', when='@1.2.2')
conflicts('%gcc@8:', when='@1.2.2_p1')
variant('tbb', default=False, description='Use Intel thread building block')
depends_on('tbb', when='+tbb')
depends_on('zlib')
# See: https://github.com/BenLangmead/bowtie/issues/87, a
# different fix is in the FreeBSD ports/package tree
# https://svnweb.freebsd.org/ports?view=revision&revision=483954
patch('issue-87.patch', when='@:1.2.2 %gcc@8.0.0:')
# correspond to 'aarch64' architecture
# reference: https://github.com/BenLangmead/bowtie/pull/13
patch('for_aarch64.patch', when='@1.2.0:1.2 target=aarch64:')
# measures for narrowing error
patch('fix_narrowing_err.patch', when='@1.2.1:1.2.3')
patch('fix_narrowing_err_1.3.0.patch', when='@1.3.0:')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
makefile.filter('CC = .*', 'CC = ' + env['CC'])
makefile.filter('CXX = .*', 'CPP = ' + env['CXX'])
def build(self, spec, prefix):
if '+tbb' in spec:
make()
else:
make('NO_TBB=1')
def install(self, spec, prefix):
make('prefix={0}'.format(self.prefix), 'install')
| LLNL/spack | var/spack/repos/builtin/packages/bowtie/package.py | Python | lgpl-2.1 | 3,221 | [
"Bowtie"
] | 5414b925e18257b7a32349a3f51eab7abc31592dc9834bb6576d389a7fce143d |
"""
Gaussian foreground simultaion
Simulate gaussian foregrounds, in particular those in the style of Santos, Cooray and Knox [1]_.
These are foregrounds that can be represented with a covariance
function that seperates with angle and frequency.
References
----------
.. [1] http://arxiv.org/abs/astro-ph/0408515
"""
import numpy as np
from cora.core import maps, gaussianfield
from cora.util import nputil
from cora.util import cubicspline as cs
class ForegroundMap(maps.Sky3d):
r"""Simulate foregrounds with a separable angular and frequency
covariance.
Used to simulate foregrounds that can be modeled by angular
covariances of the form:
.. math:: C_l(\nu,\nu') = A_l B(\nu, \nu')
"""
_weight_gen = False
def angular_ps(self, l):
r"""The angular function A_l. Must be a vectorized function
taking either np.ndarrays or scalars.
"""
pass
def frequency_covariance(self, nu1, nu2):
pass
def angular_powerspectrum(self, l, nu1, nu2):
return self.angular_ps(l) * self.frequency_covariance(nu1, nu2)
def generate_weight(self, regen=False):
r"""Pregenerate the k weights array.
Parameters
----------
regen : boolean, optional
If True, force regeneration of the weights, to be used if
parameters have been changed,
"""
if self._weight_gen and not regen:
return
f1, f2 = np.meshgrid(self.nu_pixels, self.nu_pixels)
ch = self.frequency_covariance(f1, f2)
self._freq_weight, self._num_corr_freq = nputil.matrix_root_manynull(ch)
rf = gaussianfield.RandomFieldA2.like_map(self)
## Construct a lambda function to evalutate the array of
## k-vectors.
rf.powerspectrum = lambda karray: self.angular_ps(
(karray ** 2).sum(axis=2) ** 0.5
)
self._ang_field = rf
self._weight_gen = True
def getfield(self):
self.generate_weight()
aff = np.fft.rfftn(self._ang_field.getfield())
s2 = (self._num_corr_freq,) + aff.shape
norm = np.tensordot(
self._freq_weight, np.random.standard_normal(s2), axes=(1, 0)
)
return np.fft.irfft(np.fft.ifft(norm * aff[np.newaxis, :, :], axis=1), axis=2)
class ForegroundSCK(ForegroundMap):
r"""Base class for SCK style foregrounds.
Need to set the four attributes `A`, `alpha`, `beta` and
`zeta`. This class also allows calculation of the angular
correlation function. The units are the temperature in K.
See Also
--------
Synchrotron
ExtraGalacticFreeFree
GalacticFreeFree
PointSources
"""
nu_0 = 130.0
l_0 = 1000.0
_cf_int = None
def angular_ps(self, larray):
if isinstance(larray, np.ndarray):
mask0 = np.where(larray == 0)
larray[mask0] = 1.0
psarray = self.A * (larray / self.l_0) ** (-self.beta)
if isinstance(larray, np.ndarray):
psarray[mask0] = 0.0
return psarray
def frequency_covariance(self, nu1, nu2):
return (
self.frequency_variance(nu1) * self.frequency_variance(nu2)
) ** 0.5 * self.frequency_correlation(nu1, nu2)
def frequency_variance(self, nu):
r"""Variance on a single frequency slice."""
return (nu / self.nu_0) ** (-2 * self.alpha)
def frequency_correlation(self, nu1, nu2):
r"""Correlation between two frequency slices."""
return np.exp(-0.5 * (np.log(nu1 / nu2) / self.zeta) ** 2)
def frequency_correlation_dlog(self, dlognu):
r"""Correlation between two slices as a function of the seperation of the
log of the two frequencies.
This is useful because it is stationary in the log of the
frequency and makes certain calculations easier.
Parameters
----------
dlognu : array_like
The seperation between the log of the two frequencies
(log(nu1) - log(nu2)).
Returns
-------
acf: array_like
"""
return np.exp(-(dlognu ** 2) / (2 * self.zeta ** 2))
def angular_correlation(self, tarray):
r"""The 2-point angular correlation function.
This will tabulate across the range [0, pi] on the first run,
and will subsequently interpolate all calls.
Parameters
----------
tarray : array_like
The angular seperations (in radians) to calculate at.
Returns
-------
acf : array_like
"""
if not self._cf_int:
from scipy.special import lpn
@np.vectorize
def cf(theta):
nmax = 10000
nmin = 1
larr = np.arange(nmin, nmax + 1).astype(np.float64)
pl = lpn(nmax, np.cos(theta))[0][nmin:]
return ((2 * larr + 1.0) * pl * self.angular_ps(larr)).sum() / (
4 * np.pi
)
tarr = np.linspace(0, np.pi, 1000)
cfarr = cf(tarr)
self._cf_int = cs.Interpolater(tarr, cfarr)
return self._cf_int(tarray)
class Synchrotron(ForegroundSCK):
A = 7.00e-4
alpha = 2.80
beta = 2.4
zeta = 4.0
class ExtraGalacticFreeFree(ForegroundSCK):
A = 1.40e-8
alpha = 2.10
beta = 1.0
zeta = 35.0
class GalacticFreeFree(ForegroundSCK):
A = 8.80e-8
alpha = 2.15
beta = 3.0
zeta = 35.0
class PointSources(ForegroundSCK):
A = 5.70e-5
alpha = 2.07
beta = 1.1
zeta = 1.0
| radiocosmology/cora | cora/foreground/gaussianfg.py | Python | mit | 5,626 | [
"Gaussian"
] | 9a5a8b6e6c114b24a1ca6f25dbafaabfd9c3352ffa3225d4022f23c18c7888a8 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
#
# Copyright (C) 2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
from dateutil import parser
from grimoire.elk.enrich import Enrich, metadata
class TelegramEnrich(Enrich):
def get_field_author(self):
return "from"
def get_elastic_mappings(self):
mapping = """
{
"properties": {
"text_analyzed": {
"type": "string",
"index":"analyzed"
}
}
} """
return {"items":mapping}
def get_sh_identity(self, item, identity_field=None):
identity = {}
from_ = item
if 'data' in item and type(item) == dict:
from_ = item['data']['message'][identity_field]
identity['username'] = from_['username']
identity['email'] = None
identity['name'] = from_['username']
if 'first_name' in from_:
identity['name'] = from_['first_name']
return identity
def get_identities(self, item):
""" Return the identities from an item """
identities = []
message = item['data']['message']
identity = self.get_sh_identity(message['from'])
identities.append(identity)
return identities
@metadata
def get_rich_item(self, item):
eitem = {}
# metadata fields to copy
copy_fields = ["metadata__updated_on","metadata__timestamp","ocean-unique-id","origin"]
for f in copy_fields:
if f in item:
eitem[f] = item[f]
else:
eitem[f] = None
eitem['update_id'] = item['data']['update_id']
# The real data
message = item['data']['message']
# data fields to copy
copy_fields = ["message_id", "sticker"]
for f in copy_fields:
if f in message:
eitem[f] = message[f]
else:
eitem[f] = None
# Fields which names are translated
map_fields = {"text": "message",
"date": "sent_date,"
}
for f in map_fields:
if f in message:
eitem[map_fields[f]] = message[f]
else:
eitem[map_fields[f]] = None
if "text" in message:
eitem["text_analyzed"] = message["text"]
eitem['chat_id'] = message['chat']['id']
if 'title' in message['chat']:
eitem['chat_title'] = message['chat']['title']
eitem['chat_type'] = message['chat']['type']
eitem['from_id'] = message['from']['id']
eitem['author'] = message['from']['first_name']
eitem['author_id'] = message['from']['id']
if 'last_name' in message['from']:
eitem['author_last_name'] = message['from']['last_name']
if 'username' in message['from']:
eitem['username'] = message['from']['username']
if 'reply_to_message' in message:
eitem['reply_to_message_id'] = message['reply_to_message']['message_id']
eitem['reply_to_sent_date'] = message['reply_to_message']['date']
if 'text' in message['reply_to_message']:
eitem['reply_to_message'] = message['reply_to_message']['text']
elif 'sticker' in message['reply_to_message']:
eitem['reply_to_message'] = message['reply_to_message']['sticker']
eitem['reply_to_chat_id'] = message['reply_to_message']['chat']['id']
eitem['reply_to_chat_title'] = message['reply_to_message']['chat']['title']
eitem['reply_to_chat_type'] = message['reply_to_message']['chat']['type']
eitem['reply_to_author_id'] = message['reply_to_message']['from']['id']
eitem['reply_to_author'] = message['reply_to_message']['from']['first_name']
if 'last_name' in message['reply_to_message']['from']:
eitem['reply_to_author_last_name'] = message['reply_to_message']['from']['last_name']
if 'username' in message['reply_to_message']['from']:
eitem['reply_to_username'] = message['reply_to_message']['from']['username']
if self.sortinghat:
eitem.update(self.get_item_sh(item))
eitem.update(self.get_grimoire_fields(item["metadata__updated_on"], "telegram"))
return eitem
| sanacl/GrimoireELK | grimoire/elk/telegram.py | Python | gpl-3.0 | 5,129 | [
"Elk"
] | 6d9deecba7cf1c3fd976e26df9f0306c4466087d61adf648c5f37d048be9575f |
#!/usr/bin/env python
# fig 3b processing
# description: set up reads for allele-specific ATAC (quasar) calls
# data: /srv/scratch/dskim89/ggr/ggr.atac.2019-06-06.ase
import os
import glob
import pandas as pd
import numpy as np
def filter_bam_file(in_bam, out_bam):
""" filter bam file
"""
# make header
header_bam = "{}.header.bam".format(out_bam.split(".bam")[0])
make_header = "samtools view -H {} > {}".format(
in_bam, header_bam)
os.system(make_header)
# filter
cmd = (
"samtools view -q 40 {} | " # q40 filter
"awk '$6 !~ /D|I/' | " # check CIGAR, remove indels
"awk '$12 ~ /^MD:Z:([0-9]*[A-Z][0-9]*){{0,1}}[0-9]*$/' | " # check MD, only allow 1 mismatch through
"awk '$12 !~ /^MD:Z:[0-5][A-Z][0-9]*/' | " # just throw out reads with mismatch within 5bp of start
"cat {} - | "
"samtools view -bS - > {}").format(
in_bam, header_bam, out_bam)
print cmd
os.system(cmd)
# index the file
index_cmd = "samtools index {}".format(out_bam)
print index_cmd
os.system(index_cmd)
# clean up
os.system("rm {}".format(header_bam))
return None
def build_quasar_input_file(
snp_file,
bam_file,
out_file,
fasta_file,
quasar_preprocess_script):
"""build quasar input file
"""
# pileup
pileup_file = "{}.1KG_filt.pileup.gz".format(
bam_file.split(".bam")[0])
pileup_cmd = "samtools mpileup -f {} -l {} {} | gzip -c > {}".format(
fasta_file, snp_file, bam_file, pileup_file)
if not os.path.isfile(pileup_file):
print pileup_cmd
os.system(pileup_cmd)
# convert to BED with filtering
pileup_bed_file = "{}.1KG_filt.pileup.bed.gz".format(
bam_file.split(".bam")[0])
to_bed_cmd = (
"zcat {} | "
"awk -v OFS='\t' "
"'{{ if ($4>0 && $5 !~ /[^\^][<>]/ && "
"$5 !~ /\+[0-9]+[ACGTNacgtn]+/ && "
"$5 !~ /-[0-9]+[ACGTNacgtn]+/ "
"&& $5 !~ /[^\^]\*/) "
"print $1,$2-1,$2,$3,$4,$5,$6 }}' | "
"sortBed -i stdin | "
"intersectBed -a stdin -b {} -wo | "
"cut -f 1-7,11-14 | "
"gzip > {}").format(
pileup_file, snp_file, pileup_bed_file)
if not os.path.isfile(pileup_bed_file):
print to_bed_cmd
os.system(to_bed_cmd)
# and then process to quasar input file
quasar_preprocess_cmd = "R --vanilla --args {} < {}".format(
pileup_bed_file,
quasar_preprocess_script)
if not os.path.isfile(out_file):
print quasar_preprocess_cmd
os.system(quasar_preprocess_cmd)
assert os.path.isfile(out_file)
return None
def main():
"""run ASE on GGR ATAC
"""
# server specific inputs
WORK_DIR = "."
FASTA = "/mnt/data/annotations/by_release/hg19.GRCh37/hg19.genome.fa"
BAM_DIR = "/mnt/lab_data/kundaje/projects/skin/data/bds/processed.atac.2019-06-04.bams_bp-position-matched"
SNP_FILE_URL = "http://genome.grid.wayne.edu/centisnps/files/1KG_SNPs_filt.bed.gz"
quasar_preprocess_script = "/users/dskim89/git/ggr-project/figs/fig_3.motifs_and_tfs/fig_3-b.1b.convertPileupToQuasar.R"
quasar_script = "/users/dskim89/git/ggr-project/figs/fig_3.motifs_and_tfs/fig_3-b.1c.run_quasar.R"
# pull bam files
bam_files = sorted(glob.glob("{}/*.b*bam".format(BAM_DIR)))
# get the snp file
snp_file = "1KG_SNPs_filt.bed.gz"
if not os.path.isfile(snp_file):
download_snp_file = "wget {}".format(SNP_FILE_URL)
os.system(download_snp_file)
# first filter BAMs
# up the filter quality
# adjust positions/base pairs of BAM file (trimming)
# remove reads with indels, only keep reads that match perfectly or have 1 mismatch
filt_bams = []
for bam_file in bam_files:
out_bam = "{}/{}.ase_filt.bam".format(
WORK_DIR, os.path.basename(bam_file).split(".bam")[0])
if not os.path.isfile(out_bam):
filter_bam_file(bam_file, out_bam)
filt_bams.append(out_bam)
# then set up files for quasar
quasar_inputs = []
for bam_file in filt_bams:
quasar_input_file = "{}.1KG_filt.quasar.in.gz".format(
bam_file.split(".bam")[0])
if not os.path.isfile(quasar_input_file):
build_quasar_input_file(snp_file, bam_file, quasar_input_file, FASTA, quasar_preprocess_script)
quasar_inputs.append(quasar_input_file)
# run each individual through quasar, generate individual results and pooled results
rep1_results_file = "b1.results_ALL.txt"
if not os.path.isfile(rep1_results_file):
rep1_inputs = [filename for filename in quasar_inputs if "b1" in filename]
run_quasar = "{} b1 {}".format(
quasar_script,
" ".join(rep1_inputs))
print run_quasar
os.system(run_quasar)
rep2_results_file = "b2.results_ALL.txt"
if not os.path.isfile(rep2_results_file):
rep2_inputs = [filename for filename in quasar_inputs if "b2" in filename]
run_quasar = "Rscript run_quasar.R b2 {}".format(
" ".join(rep2_inputs))
print run_quasar
os.system(run_quasar)
# merge the two rep files for a final file with sig variants
# make a VCF file, name column has sig?
rep1_data = pd.read_csv(rep1_results_file, sep="\t", index_col=0)
rep2_data = pd.read_csv(rep2_results_file, sep="\t", index_col=0)
all_data = rep1_data.merge(rep2_data, how="outer", left_index=True, right_index=True)
all_data = all_data.fillna(0)
all_data = all_data[sorted(all_data.columns)]
id_fields = all_data.index.to_series().str.split("_", n=3, expand=True)
#all_data["rsid"] = id_fields[0]
#print all_data
# and add column that considers whether sig across any timepoint?
# need to keep slope and sig effect and max time point of effect?
fdr = 0.10
qval_columns = [colname for colname in all_data.columns if "qval" in colname]
qvals = all_data[qval_columns]
sig = np.any((qvals < fdr) & (qvals != 0), axis=-1).astype(int)
all_data["sig"] = sig
beta_columns = [colname for colname in all_data.columns if "beta" in colname]
betas = all_data[beta_columns]
all_data["beta_max_idx"] = np.argmax(np.abs(betas.values), axis=-1) * 2
betas_max = []
for i in range(all_data.shape[0]):
betas_max.append(all_data.values[i,all_data["beta_max_idx"].values[i]])
all_data["beta_max"] = betas_max
#print all_data
# pull in ref/alt from snp file
all_data["rsid"] = id_fields[0].values
snps = pd.read_csv(
snp_file, sep="\t",
header=None,
names=["chr", "start", "stop", "rsid", "ref", "alt", "maf"])
all_data = all_data.merge(snps, how="left", on="rsid")
#print all_data
# save this out, reduce data
all_data_file = "all.results_ALL.txt"
all_data.to_csv(all_data_file, sep="\t")
all_reduced_file = "all.results.slim.txt"
all_reduced = all_data[["sig", "beta_max"]]
all_reduced.to_csv(all_reduced_file, sep="\t")
# convert to VCF format
# keep sig and beta val
vcf_file = "all.1KG_filt.vcf"
vcf_data = all_data[
["chr", "start", "rsid", "ref", "alt"]]
vcf_data["chr"] = vcf_data["chr"].str.replace("chr", "").values
vcf_data.columns = ["#CHROM", "POS", "ID", "REF", "ALT"]
vcf_data["QUAL"] = 1
vcf_data["FILTER"] = 1
vcf_data["INFO"] = "sig=" + all_data["sig"].map(str) + ";beta=" + all_data["beta_max"].map(str)
vcf_data.to_csv(vcf_file, sep="\t", index=False)
print np.sum(all_data["sig"].values)
return
main()
| vervacity/ggr-project | figs/fig_2.modelling/fig_3-b.1a.process.py | Python | mit | 7,702 | [
"ASE"
] | 1087694ec72d21c97001eefe62e931e057baff978fa761bb9ad8577baff673b9 |
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import logsumexp
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import deprecated
from .utils.extmath import safe_sparse_dot
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted, check_non_negative
from .utils.validation import _check_sample_weight
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB',
'CategoricalNB']
class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape (n_classes, n_samples).
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
@abstractmethod
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks.
Only used in predict* methods.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier.
epsilon_ : float
absolute additive value to variances.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
sigma_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. deprecated:: 1.0
`sigma_` is deprecated in 1.0 and will be removed in 1.2.
Use `var_` instead.
var_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. versionadded:: 1.0
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, reset=False)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_new * n_past / n_total) * (mu - new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
if _refit:
self.classes_ = None
first_call = _check_partial_fit_first_call(self, classes)
X, y = self._validate_data(X, y, reset=first_call)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if first_call:
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.var_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.var_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.var_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.var_[i, :] = new_sigma
self.class_count_[i] += N_i
self.var_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.var_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.var_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
@deprecated( # type: ignore
"Attribute sigma_ was deprecated in 1.0 and will be removed in"
"1.2. Use var_ instead."
)
@property
def sigma_(self):
return self.var_
_ALPHA_MIN = 1e-10
class _BaseDiscreteNB(_BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per _BaseNB
"""
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, accept_sparse='csr', reset=False)
def _check_X_y(self, X, y, reset=True):
"""Validate X and y in fit methods."""
return self._validate_data(X, y, accept_sparse='csr', reset=reset)
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (log_class_count -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
if np.min(self.alpha) < 0:
raise ValueError('Smoothing parameter alpha = %.1e. '
'alpha should be > 0.' % np.min(self.alpha))
if isinstance(self.alpha, np.ndarray):
if not self.alpha.shape[0] == self.n_features_in_:
raise ValueError("alpha should be a scalar or a numpy array "
"with shape [n_features]")
if np.min(self.alpha) < _ALPHA_MIN:
warnings.warn('alpha too small will result in numeric errors, '
'setting alpha = %.1e' % _ALPHA_MIN)
return np.maximum(self.alpha, _ALPHA_MIN)
return self.alpha
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
first_call = not hasattr(self, "classes_")
X, y = self._check_X_y(X, y, reset=first_call)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_classes = len(classes)
self._init_counters(n_classes, n_features)
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
if sample_weight is not None:
Y = Y.astype(np.float64, copy=False)
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_classes = Y.shape[1]
self._init_counters(n_classes, n_features)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_classes, n_features),
dtype=np.float64)
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute coef_ was deprecated in "
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def coef_(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute intercept_ was deprecated in "
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def intercept_(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
def _more_tags(self):
return {'poor_score': True}
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute n_features_ was deprecated in version 1.0 and will be "
"removed in 1.2. Use 'n_features_in_' instead."
)
@property
def n_features_(self):
return self.n_features_in_
class MultinomialNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``intercept_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
n_features_ : int
Number of features of each sample.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB()
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
check_non_negative(X, "MultinomialNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class ComplementNB(_BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
.. versionadded:: 0.20
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
fit_prior : bool, default=True
Only used in edge case with a single class in the training set.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. Not used.
norm : bool, default=False
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
feature_all_ : ndarray of shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical weights for class complements.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
n_features_ : int
Number of features of each sample.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, y)
ComplementNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
"""
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None,
norm=False):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.norm = norm
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count feature occurrences."""
check_non_negative(X, "ComplementNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# _BaseNB.predict uses argmax, but ComplementNB operates with argmin.
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(_BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, default=0.0
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `BernoulliNB`
as a linear model.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `BernoulliNB`
as a linear model.
n_features_ : int
Number of features of each sample.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, *, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = super()._check_X(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X
def _check_X_y(self, X, y, reset=True):
X, y = super()._check_X_y(X, y, reset=reset)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X, y
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
n_features = self.feature_log_prob_.shape[1]
n_features_X = X.shape[1]
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
class CategoricalNB(_BaseDiscreteNB):
"""Naive Bayes classifier for categorical features
The categorical Naive Bayes classifier is suitable for classification with
discrete features that are categorically distributed. The categories of
each feature are drawn from a categorical distribution.
Read more in the :ref:`User Guide <categorical_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
min_categories : int or array-like of shape (n_features,), default=None
Minimum number of categories per feature.
- integer: Sets the minimum number of categories per feature to
`n_categories` for each features.
- array-like: shape (n_features,) where `n_categories[i]` holds the
minimum number of categories for the ith column of the input.
- None (default): Determines the number of categories automatically
from the training data.
.. versionadded:: 0.24
Attributes
----------
category_count_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the number of samples
encountered for each class and category of the specific feature.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_log_prob_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the empirical log probability
of categories given the respective feature and class, ``P(x_i|y)``.
n_features_ : int
Number of features of each sample.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_categories_ : ndarray of shape (n_features,), dtype=np.int64
Number of categories for each feature. This value is
inferred from the data or set by the minimum number of categories.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import CategoricalNB
>>> clf = CategoricalNB()
>>> clf.fit(X, y)
CategoricalNB()
>>> print(clf.predict(X[2:3]))
[3]
"""
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None,
min_categories=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.min_categories = min_categories
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().fit(X, y, sample_weight=sample_weight)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().partial_fit(X, y, classes,
sample_weight=sample_weight)
def _more_tags(self):
return {'requires_positive_X': True}
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = self._validate_data(X, dtype='int', accept_sparse=False,
force_all_finite=True, reset=False)
check_non_negative(X, "CategoricalNB (input X)")
return X
def _check_X_y(self, X, y, reset=True):
X, y = self._validate_data(X, y, dtype='int', accept_sparse=False,
force_all_finite=True, reset=reset)
check_non_negative(X, "CategoricalNB (input X)")
return X, y
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.category_count_ = [np.zeros((n_classes, 0))
for _ in range(n_features)]
@staticmethod
def _validate_n_categories(X, min_categories):
# rely on max for n_categories categories are encoded between 0...n-1
n_categories_X = X.max(axis=0) + 1
min_categories_ = np.array(min_categories)
if min_categories is not None:
if not np.issubdtype(min_categories_.dtype, np.signedinteger):
raise ValueError(
f"'min_categories' should have integral type. Got "
f"{min_categories_.dtype} instead."
)
n_categories_ = np.maximum(n_categories_X,
min_categories_,
dtype=np.int64)
if n_categories_.shape != n_categories_X.shape:
raise ValueError(
f"'min_categories' should have shape ({X.shape[1]},"
f") when an array-like is provided. Got"
f" {min_categories_.shape} instead."
)
return n_categories_
else:
return n_categories_X
def _count(self, X, Y):
def _update_cat_count_dims(cat_count, highest_feature):
diff = highest_feature + 1 - cat_count.shape[1]
if diff > 0:
# we append a column full of zeros for each new category
return np.pad(cat_count, [(0, 0), (0, diff)], 'constant')
return cat_count
def _update_cat_count(X_feature, Y, cat_count, n_classes):
for j in range(n_classes):
mask = Y[:, j].astype(bool)
if Y.dtype.type == np.int64:
weights = None
else:
weights = Y[mask, j]
counts = np.bincount(X_feature[mask], weights=weights)
indices = np.nonzero(counts)[0]
cat_count[j, indices] += counts[indices]
self.class_count_ += Y.sum(axis=0)
self.n_categories_ = self._validate_n_categories(
X, self.min_categories)
for i in range(self.n_features_in_):
X_feature = X[:, i]
self.category_count_[i] = _update_cat_count_dims(
self.category_count_[i], self.n_categories_[i] - 1)
_update_cat_count(X_feature, Y,
self.category_count_[i],
self.class_count_.shape[0])
def _update_feature_log_prob(self, alpha):
feature_log_prob = []
for i in range(self.n_features_in_):
smoothed_cat_count = self.category_count_[i] + alpha
smoothed_class_count = smoothed_cat_count.sum(axis=1)
feature_log_prob.append(
np.log(smoothed_cat_count) -
np.log(smoothed_class_count.reshape(-1, 1)))
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
self._check_n_features(X, reset=False)
jll = np.zeros((X.shape[0], self.class_count_.shape[0]))
for i in range(self.n_features_in_):
indices = X[:, i]
jll += self.feature_log_prob_[i][:, indices].T
total_ll = jll + self.class_log_prior_
return total_ll
| kevin-intel/scikit-learn | sklearn/naive_bayes.py | Python | bsd-3-clause | 51,132 | [
"Gaussian"
] | 12595aee76faff77db5b6248f0819da33b1ebb680ecde8169b1ee47a735af6b5 |
from agpy import convolve,smooth
from pylab import *
test_image = zeros([512,512])
# point
test_image[204,204] = 1
# box
test_image[280:297,280:297] = 1/256.
shiftest_image = zeros([300,320])
shiftest_image[100:120,180:220]=1.0
print "Testing for a shift"
figure(11)
clf()
smoothed,kernel = smooth(shiftest_image,return_kernel=True)
subplot(221)
title("shiftest_image")
imshow(shiftest_image)
subplot(222)
title("smoothed")
imshow(smoothed)
subplot(223)
title("shiftest_image-smoothed")
imshow(shiftest_image-smoothed)
subplot(224)
title("kernel")
imshow(kernel)
"""
figure(0)
clf()
smoothed_gp8_sm25 ,kernel_gp8_25 = smooth(test_image,25.5,'gaussian',nwidths=8,return_kernel=True)
smoothed_gp9_sm25 ,kernel_gp9_25 = smooth(test_image,25.5,'gaussian',nwidths=9,return_kernel=True)
smoothed_gp10_sm25,kernel_gp10_25 = smooth(test_image,25.5,'gaussian',nwidths=10,return_kernel=True)
smoothed_gpmax_sm25,kernel_gpmax_25 = smooth(test_image,25.5,'gaussian',nwidths='max',return_kernel=True)
subplot(221)
imshow(log10(smoothed_gp9_sm25))
colorbar()
subplot(222)
imshow(log10(smoothed_gpmax_sm25))
colorbar()
subplot(223)
title("9-max diff")
imshow(log10(abs(smoothed_gp9_sm25-smoothed_gpmax_sm25)))
colorbar()
subplot(224)
title("9-8 diff")
imshow(log10(abs(smoothed_gp9_sm25-smoothed_gp8_sm25)))
colorbar()
print "Location of the maximum pixel (should be the same for each image) and the shape of the kernel (should be even)"
print "original: ",argmax(test_image)
print "gp8: " ,argmax(smoothed_gp8_sm25), kernel_gp8_25.shape
print "gp9: " ,argmax(smoothed_gp9_sm25), kernel_gp9_25.shape
print "gp10: ",argmax(smoothed_gp10_sm25),kernel_gp10_25.shape
print "gpmax: ",argmax(smoothed_gpmax_sm25),kernel_gpmax_25.shape
print "Maximum value of the kernel (should be ~same) and number of pixels equal to the maximum (should be 1)"
print "npeak gp8: ", (kernel_gp8_25.max()) ,sum((kernel_gp8_25.max()) ==kernel_gp8_25)
print "npeak gp9: ", (kernel_gp9_25.max()) ,sum((kernel_gp9_25.max()) ==kernel_gp9_25)
print "npeak gp10: ",(kernel_gp10_25.max()),sum((kernel_gp10_25.max())==kernel_gp10_25)
print "npeak gpmax: ",(kernel_gpmax_25.max()),sum((kernel_gpmax_25.max())==kernel_gpmax_25)
"""
"""
print "\n\nDemonstration that you need to ignore_zeros when padding (figure 10)"
figure(10)
clf()
testimage = ones([10,20]) # make a flat image
testimage[5,5] += 1 # add some contrast
smtestimage = smooth(testimage)
smtestimage_nopsfpad = smooth(testimage,psf_pad=False,force_ignore_zeros_off=True)
smtestimage_nofftpad = smooth(testimage,fft_pad=False,force_ignore_zeros_off=True)
smtestimage_ignorenan = smooth(testimage,interp_nan=True,force_ignore_zeros_off=True)
smtestimage_ignorezeros = smooth(testimage,ignore_zeros=True,force_ignore_zeros_off=True)
smtestimage_noz_nopsfpad = smooth(testimage,psf_pad=False,ignore_zeros=True)
smtestimage_noz_nofftpad = smooth(testimage,fft_pad=False,ignore_zeros=True)
smtestimage_noz_nopad = smooth(testimage,fft_pad=False,psf_pad=False,ignore_zeros=True)
smtestimage_nopad = smooth(testimage,fft_pad=False,psf_pad=False,force_ignore_zeros_off=True)
subplot(331)
title("smtestimage_nopad")
imshow(smtestimage_nopad)
subplot(332)
title("smtestimage (default)")
imshow(smtestimage)
subplot(333)
title("smtestimage_nopsfpad")
imshow(smtestimage_nopsfpad)
subplot(334)
title("smtestimage_nofftpad")
imshow(smtestimage_nofftpad)
subplot(335)
title("smtestimage_ignorenan")
imshow(smtestimage_ignorenan)
subplot(336)
title("smtestimage_ignorezeros")
imshow(smtestimage_ignorezeros)
subplot(337)
title("smtestimage_noz_nopad")
imshow(smtestimage_noz_nopad)
subplot(338)
title("smtestimage_noz_nopsfpad")
imshow(smtestimage_noz_nopsfpad)
subplot(339)
title("smtestimage_noz_nofftpad")
imshow(smtestimage_noz_nofftpad)
"""
"""
for ii,smoothsize in enumerate([10,100]): #20,50,100,128]):
figure(ii+1)
clf()
subplot(221)
imshow(smooth(test_image,smoothsize,'brickwall',silent=False))
title('Brickwall Filter (Airy pattern)')
colorbar()
subplot(222)
imshow(smooth(test_image,smoothsize,'gaussian',silent=False))
title('Gaussian')
colorbar()
subplot(223)
imshow(smooth(test_image,smoothsize,'tophat',silent=False))
title('Tophat')
colorbar()
subplot(224)
imshow(smooth(test_image,smoothsize,'boxcar',silent=False))
title('Boxcar')
colorbar()
print "smoothsize: ",smoothsize
print "brickwall sum: ", smooth(test_image,smoothsize,'brickwall').sum()
print "gaussian sum: ", smooth(test_image,smoothsize,'gaussian').sum()
print "tophat sum: ", smooth(test_image,smoothsize,'tophat').sum()
print "boxcar sum: ", smooth(test_image,smoothsize,'boxcar').sum()
print "nofilter sum: ", test_image.sum()
draw()
"""
| jcesardasilva/agpy | tests/test_smooth.py | Python | mit | 4,734 | [
"Gaussian"
] | ae6385334163293d4a921e9f16ac7aa5f027c7896f0febd01e840a28d945ddc1 |
from __future__ import print_function
__author__ = """Alex "O." Holcombe, Charles Ludowici, """ ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import time, sys, platform, os
from math import atan, atan2, pi, cos, sin, sqrt, ceil, radians, degrees
import numpy as np
import psychopy, psychopy.info
import copy
from psychopy import visual, sound, monitors, logging, gui, event, core, data
try:
from helpersAOH import accelerateComputer, openMyStimWindow
except Exception as e:
print(e); print('Problem loading helpersAOH. Check that the file helpersAOH.py in the same directory as this file')
print('Current directory is ',os.getcwd())
eyeTracking = False
if eyeTracking:
try:
import eyelinkEyetrackerForPsychopySUPA3
except Exception as e:
print(e)
print('Problem loading eyelinkEyetrackerForPsychopySUPA3. Check that the file eyelinkEyetrackerForPsychopySUPA3.py in the same directory as this file')
print('While a different version of pylink might make your eyetracking code work, your code appears to generally be out of date. Rewrite your eyetracker code based on the SR website examples')
#Psychopy v1.83.01 broke this, pylink version prevents EyelinkEyetrackerForPsychopySUPA3 stuff from importing. But what really needs to be done is to change eyetracking code to more modern calls, as indicated on SR site
eyeTracking = False
expname= "dot-jump"
demo = False; exportImages = False
autopilot = False
subject='test'
###############################
### Setup the screen parameters ##############################################################################################
##
allowGUI = False
units='deg' #'cm'
fullscrn=False
waitBlank=False
if True: #just so I can indent all the below
refreshRate= 85 *1.0; #160 #set to the framerate of the monitor
fullscrn=True; #show in small window (0) or full screen (1)
scrn=True #which screen to display the stimuli. 0 is home screen, 1 is second screen
# create a dialog from dictionary
infoFirst = { 'Autopilot':autopilot, 'Check refresh etc':True, 'Use second screen':scrn, 'Fullscreen (timing errors if not)': fullscrn, 'Screen refresh rate': refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='MOT',
order=['Autopilot','Check refresh etc', 'Use second screen', 'Screen refresh rate', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating',
'Use second Screen': ''},
)
if not OK.OK:
print('User cancelled from dialog box'); logging.info('User cancelled from dialog box'); core.quit()
autopilot = infoFirst['Autopilot']
checkRefreshEtc = infoFirst['Check refresh etc']
scrn = infoFirst['Use second screen']
print('scrn = ',scrn, ' from dialog box')
fullscrn = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
#monitor parameters
widthPix = 1280 #1440 #monitor width in pixels
heightPix =1024 #900 #monitor height in pixels
monitorwidth = 40.5 #28.5 #monitor width in centimeters
viewdist = 55.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
bgColor = [-1,-1,-1] #black background
monitorname = 'testMonitor' # 'mitsubishi' #in psychopy Monitors Center
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
myWin.setRecordFrameIntervals(False)
trialsPerCondition = 2 #default value
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
print('Finished runInfo- which assesses the refresh and processes of this computer')
refreshMsg1 = 'Median frames per second ='+ str( np.round(1000./runInfo["windowRefreshTimeMedian_ms"],1) )
refreshRateTolerancePct = 3
pctOff = abs( (1000./runInfo["windowRefreshTimeMedian_ms"]-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
##
### END Setup of the screen parameters ##############################################################################################
####################################
askUserAndConfirmExpParams = True
if autopilot:
subject = 'autoTest'
###############################
### Ask user exp params ##############################################################################################
## askUserAndConfirmExpParams
if askUserAndConfirmExpParams:
dlgLabelsOrdered = list() #new dialog box
myDlg = gui.Dlg(title=expname, pos=(200,400))
if not autopilot:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
else:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
myDlg.addField('autoPilotTime:', 0, tip='Auto response time relative to cue')
myDlg.addField('randomTime:',False, tip = 'Add (rounded) gaussian N(0,2) error to time offset?')
myDlg.addField('autoPilotSpace:',0, tip='Auto response position relative to cue')
myDlg.addField('randomSpace:',False, tip = 'Add (rounded) gaussian N(0,2) error to space offset?')
dlgLabelsOrdered.append('autoPilotTime')
dlgLabelsOrdered.append('randomTime')
dlgLabelsOrdered.append('autoPilotSpace')
dlgLabelsOrdered.append('randomSpace')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
msgWrongResolution = ''
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Instead of desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels, screen apparently '+ str(myWinRes[0])+ 'x'+ str(myWinRes[1])
myDlg.addText(msgWrongResolution, color='Red')
print(msgWrongResolution); logging.info(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at response time', color='DimGrey') #works in PsychoPy1.84
#myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) #color names not working for some pre-1.84 versions
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition ='+str(trialsPerCondition))
if autopilot:
autoSpace = thisInfo[dlgLabelsOrdered.index('autoPilotSpace')]
autoTime = thisInfo[dlgLabelsOrdered.index('autoPilotTime')]
randomTime = thisInfo[dlgLabelsOrdered.index('randomTime')]
randomSpace = thisInfo[dlgLabelsOrdered.index('randomSpace')]
else:
print('User cancelled from dialog box.'); logging.info('User cancelled from dialog box')
logging.flush()
core.quit()
### Ask user exp params
## END askUserAndConfirmExpParams ###############################
##############################################################################################
if os.path.isdir('.'+os.sep+'dataRaw'):
dataDir='dataRaw'
else:
msg= 'dataRaw directory does not exist, so saving data in present working directory'
print(msg); logging.info(msg)
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
fileNameWithPath = dataDir+os.sep+subject+ '_' + expname+timeAndDateStr
if not demo and not exportImages:
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileNameWithPath + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logF = logging.LogFile(fileNameWithPath+'.log',
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#info, data, warnings, and errors will be sent to this logfile
if demo or exportImages:
logging.console.setLevel(logging.ERROR) #only show this level's and higher messages
logging.console.setLevel(logging.WARNING) #DEBUG means set the console to receive nearly all messges, INFO is for everything else, INFO, EXP, DATA, WARNING and ERROR
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
longerThanRefreshTolerance = 0.27
longFrameLimit = round(1000./refreshRate*(1.0+longerThanRefreshTolerance),3) # round(1000/refreshRate*1.5,2)
msg = 'longFrameLimit='+ str(longFrameLimit) +' Recording trials where one or more interframe interval exceeded this figure '
logging.info(msg); print(msg)
if msgWrongResolution != '':
logging.error(msgWrongResolution)
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
msg = 'second window opening runInfo mean ms='+ str( runInfo["windowRefreshTimeAvg_ms"] )
logging.info(msg); print(msg)
logging.info(runInfo)
logging.info('gammaGrid='+str(mon.getGammaGrid()))
logging.info('linearizeMethod='+str(mon.getLinearizeMethod()))
####Functions. Save time by automating processes like stimulus creation and ordering
############################################################################
def oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, trialObjects):
cueFrame = cuePos * SOAFrames
cueMax = cueFrame + cueFrames
showIdx = int(np.floor(n/SOAFrames))
#objectIdxs = [i for i in range(len(trialObjects))]
#objectIdxs.append(len(trialObjects)-1) #AWFUL hack
#print(objectIdxs[showIdx])
#floored quotient
obj = trialObjects[showIdx]
drawObject = n%SOAFrames < itemFrames
if drawObject:
myWin.color = bgColor
if n >= cueFrame and n < cueMax:
#print('cueFrames! n is', n,'. cueFrame is ,', cueFrame, 'cueFrame + cueFrames is ', (cueFrame + cueFrames))
#if n%2 == 0: #This should make it flash, but it might be too fast
#print('cue flash')
#myWin.color = (0,0,0)
obj.draw()
cue.draw()
else:
obj.draw()
return True
#objects: Stimuli to display or
#cue: cue stimulus or stimuli
#timing parameters: Could be item duration, soa and isi. i.e. if SOA+Duration % n == 0: stimulus.setColor(stimulusColor)
#bgColor and stimulusColor: if displaying and hiding stimuli, i.e. for RSVP
#movementVector: direction and distance of movement if moving stimuli
def oneTrial(stimuli):
dotOrder = np.arange(len(stimuli))
np.random.shuffle(dotOrder)
print(dotOrder)
shuffledStimuli = [stimuli[i] for i in dotOrder]
ts = []
myWin.flip(); myWin.flip() #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
t0 = trialClock.getTime()
for n in range(trialFrames):
fixation.draw()
#print(n//SOAFrames)
oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, shuffledStimuli)
myWin.flip()
ts.append(trialClock.getTime() - t0)
return True, shuffledStimuli, dotOrder, ts
def getResponse(trialStimuli):
if autopilot:
spacing = 360./nDots
autoResponseIdx = cuePos + autoTime #The serial position of the response in the stream
if randomTime:
autoResponseIdx += int(round( np.random.normal(0,2) ))
itemAtTemporalSelection = trialStimuli[autoResponseIdx]
unshuffledPositions = [dot.pos.tolist() for dot in stimuli]
itemSpatial = unshuffledPositions.index(itemAtTemporalSelection.pos.tolist())
itemSpatial = itemSpatial + autoSpace
if randomSpace:
itemSpatial += int(round( np.random.normal(0,2) ))
while itemSpatial>23:
itemSpatial = itemSpatial - 23
#Once we have temporal pos of selected item relative to start of the trial
#Need to get the serial spatial pos of this item, so that we can select items around it based on the autoSpace offset
#print('itemSpatial is: ', itemSpatial)
selectionTemporal = trialStimuli.index(stimuli[itemSpatial]) #This seems redundant, but it tests that the item we've selected in space is the cued item in time. if the temporal and spatial offsets are 0, it should be the same as cuePos.
accuracy = cuePos == selectionTemporal
mousePos = (stimuli[itemSpatial].pos[0],stimuli[itemSpatial].pos[1])
expStop = False
item = stimuli[itemSpatial]
return accuracy, item, expStop, mousePos
elif not autopilot:
myMouse = event.Mouse(visible = False,win=myWin)
responded = False
expStop = False
event.clearEvents()
mousePos = (1e6,1e6)
escape = event.getKeys()
myMouse.setPos((0,0))
myMouse.setVisible(True)
while not responded:
for item in trialStimuli:
item.draw()
instruction.draw()
myWin.flip()
button = myMouse.getPressed()
mousePos = myMouse.getPos()
escapeKey = event.getKeys()
if button[0]:
print('click detected')
responded = True
print('getResponse mousePos:',mousePos)
elif len(escapeKey)>0:
if escapeKey[0] == 'space' or escapeKey[0] == 'ESCAPE':
expStop = True
responded = True
return False, np.random.choice(trialStimuli), expStop, (0,0)
clickDistances = []
for item in trialStimuli:
x = mousePos[0] - item.pos[0]
y = mousePos[1] - item.pos[1]
distance = sqrt(x**2 + y**2)
clickDistances.append(distance)
if not expStop:
minDistanceIdx = clickDistances.index(min(clickDistances))
accuracy = minDistanceIdx == cuePos
item = trialStimuli[minDistanceIdx]
myMouse.setVisible(False)
return accuracy, item, expStop, mousePos
def drawStimuli(nDots, radius, center, stimulusObject, sameEachTime = True):
if len(center) > 2 or len(center) < 2:
print('Center coords must be list of length 2')
return None
if not sameEachTime and not isinstance(stimulusObject, (list, tuple)):
print('You want different objects in each position, but your stimuli is not a list or tuple')
return None
if not sameEachTime and isinstance(stimulusObject, (list, tuple)) and len(stimulusObject)!=nDots:
print('You want different objects in each position, but the number of positions does not equal the number of items')
return None
spacing = 360./nDots
stimuli = []
for dot in range(nDots): #have to specify positions for multiples of 90deg because python (computers in general?) can't store exact value of pi and thus cos(pi/2) = 6.123e-17, not 0
angle = dot*spacing
if angle == 0:
xpos = radius
ypos = 0
elif angle == 90:
xpos = 0
ypos = radius
elif angle == 180:
xpos = -radius
ypos = 0
elif angle == 270:
xpos = 0
ypos = -radius
elif angle%90!=0:
xpos = radius*cos(radians(angle))
ypos = radius*sin(radians(angle))
if sameEachTime:
stim = copy.copy(stimulusObject)
elif not sameEachTime:
stim = stimulusObject[dot]
stim.pos = (xpos,ypos)
stimuli.append(stim)
return stimuli
def checkTiming(ts):
interframeIntervals = np.diff(ts) * 1000
#print(interframeIntervals)
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervals > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong > 0:
print(numCasesInterframeLong,'frames of', trialFrames,'were longer than',str(1000/refreshRate*(1.0+frameTimeTolerance)))
return numCasesInterframeLong
##Set up stimuli
stimulus = visual.Circle(myWin, radius = .2, fillColor = (1,1,1) )
nDots = 24
radius = 4
center = (0,0)
sameEachTime = True
#(nDots, radius, center, stimulusObject, sameEachTime = True)
stimuli = drawStimuli(nDots, radius, center, stimulus, sameEachTime)
#print(stimuli)
#print('length of stimuli object', len(stimuli))
######Create visual objects, noise masks, response prompts etc. ###########
######Draw your stimuli here if they don't change across trials, but other parameters do (like timing or distance)
######If you want to automate your stimuli. Do it in a function below and save clutter.
######For instance, maybe you want random pairs of letters. Write a function!
###########################################################################
fixSize = .1
fixation= visual.Circle(myWin, radius = fixSize , fillColor = (1,1,1), units=units)
cue = visual.Circle(myWin, radius = radius + 2, fillColor = None, lineColor = (1,1,1), units = units)
instruction = visual.TextStim(myWin,pos=(0, radius+1),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units=units)
instructionText = 'Click the dot that was on screen with the cue.'
###Trial timing parameters
SOAMS = 12
itemMS = 12
ISIMS = SOAMS - itemMS
trialMS = SOAMS * nDots
cueMS = itemMS
SOAFrames = int(np.floor(SOAMS/(1000./refreshRate)))
itemFrames = int(np.floor(itemMS/(1000./refreshRate)))
ISIFrames = int(np.floor(ISIMS/(1000./refreshRate)))
trialFrames = int(nDots*SOAFrames)
cueFrames = int(np.floor(cueMS/(1000./refreshRate)))
print('cueFrames=',cueFrames)
print('itemFrames=',itemFrames)
print('refreshRate =', refreshRate)
print('cueMS from frames =', cueFrames*(1000./refreshRate))
print('num of SOAs in the trial:', trialFrames/SOAFrames)
##Factorial design
numResponsesPerTrial = 1 #default. Used to create headers for dataFile
stimList = []
#cuePositions = [dot for dot in range(nDots) if dot not in [0,nDots-1]]
cuePositions = [10]
print('cuePositions: ',cuePositions)
#cuePositions = cuePositions[2:(nDots-3)] #drop the first and final two dots
#Set up the factorial design (list of all conditions)
for cuePos in cuePositions:
stimList.append({'cuePos':cuePos})
trials = data.TrialHandler(stimList, nReps = trialsPerCondition)
#print(trials)
####Create output file###
#########################################################################
dataFile = open(fileNameWithPath + '.txt', 'w')
numResponsesPerTrial = 1
#headers for initial datafile rows, they don't get repeated. These appear in the file in the order they appear here.
oneOffHeaders = [
'subject',
'task',
'staircase',
'trialNum'
]
for header in oneOffHeaders:
print(header, '\t', end='', file=dataFile)
#Headers for duplicated datafile rows. These are repeated using numResponsesPerTrial. For instance, we might have two responses in a trial.
duplicatedHeaders = [
'responseSpatialPos',
'responseX',
'responseY',
'correctX',
'correctY',
'clickX',
'clickY',
'accuracy',
'responsePosInStream',
'correctPosInStream'
]
if numResponsesPerTrial == 1:
for header in duplicatedHeaders:
print(header, '\t', end='', file=dataFile)
elif numResponsesPerTrial > 1:
for response in range(numResponsesPerTrial):
for header in duplicatedHeaders:
print(header+str(response), '\t', end='', file=dataFile)
for pos in range(nDots):
print('position'+str(pos),'\t',end='',file=dataFile)
#Headers done. Do a new line
print('longFrames',file=dataFile)
expStop = False
trialNum=0; numTrialsCorrect=0; expStop=False; framesSaved=0;
print('Starting experiment of',trials.nTotal,'trials. Current trial is trial ',trialNum)
#NextRemindCountText.setText( str(trialNum) + ' of ' + str(trials.nTotal) )
#NextRemindCountText.draw()
myWin.flip()
#end of header
trialClock = core.Clock()
stimClock = core.Clock()
if eyeTracking:
if getEyeTrackingFileFromEyetrackingMachineAtEndOfExperiment:
eyeMoveFile=('EyeTrack_'+subject+'_'+timeAndDateStr+'.EDF')
tracker=Tracker_EyeLink(myWin,trialClock,subject,1, 'HV5',(255,255,255),(0,0,0),False,(widthPix,heightPix))
while trialNum < trials.nTotal and expStop==False:
if trials.nTotal > 100 and trialNum > 0:
if(float(trialNum)/trials.nTotal)%.25 == 0:
instruction.text = instructionText+ '\n You have completed ' + str(trialNum) + ' of ' + str(trials.nTotal) + ' trials.'
else:
instruction.text = instructionText
fixation.draw()
myWin.flip()
if not autopilot:
core.wait(1)
trial = trials.next()
# print('trial idx is',trials.thisIndex)
cuePos = trial.cuePos
# print(cuePos)
print("Doing trialNum",trialNum)
trialDone, trialStimuli, trialStimuliOrder, ts = oneTrial(stimuli)
#Shift positions so that the list starts at 1, which is positioned at (0,radius), and increases clockwise. This is what the MM code expects
MMPositions = list() #Mixture modelling positions
for dotPos in trialStimuliOrder:
if dotPos < (nDots/4):
MMPositions.append(dotPos + 19)
elif dotPos >= (nDots/4):
MMPositions.append(dotPos -5)
nBlips = checkTiming(ts)
# print(trialStimuliOrder)
if trialDone:
accuracy, response, expStop, clickPos = getResponse(trialStimuli)
responseCoord = response.pos.tolist()
spatialRelativeToXAxis = [item.pos.tolist() for item in stimuli]
try:
responseSpatialRelativeToXAxis = spatialRelativeToXAxis.index(responseCoord)
except ValueError:
print('coord not in list')
if responseSpatialRelativeToXAxis < (nDots/4):
responseSpatial = responseSpatialRelativeToXAxis + 19
elif responseSpatialRelativeToXAxis >= (nDots/4):
responseSpatial = responseSpatialRelativeToXAxis - 5
trialPositions = [item.pos.tolist() for item in trialStimuli]
responseTemporal = trialPositions.index(responseCoord)
# print('trial positions in sequence:',trialPositions)
# print('position of item nearest to click:',responseSpatial)
# print('Position in sequence of item nearest to click:',responseTemporal)
correctSpatial = trialStimuli[cuePos].pos
correctTemporal = cuePos
print(subject,'\t',
'dot-jump','\t',
'False','\t',
trialNum,'\t',
responseSpatial,'\t',
responseCoord[0],'\t',
responseCoord[1],'\t',
correctSpatial[0],'\t',
correctSpatial[1],'\t',
clickPos[0],'\t',
clickPos[1],'\t',
accuracy,'\t',
responseTemporal,'\t',
correctTemporal,'\t',
end='',
file = dataFile
)
for dot in range(nDots):
print(MMPositions[dot], '\t',end='', file=dataFile)
print(nBlips, file=dataFile)
trialNum += 1
dataFile.flush()
if expStop:
print('Participant cancelled experiment on trial', trialNum)
dataFile.flush()
| alexholcombe/dot-jump | dataRaw/Fixed Cue/test_dot-jump25Oct2016_11-42.py | Python | gpl-3.0 | 25,593 | [
"Gaussian"
] | cbf37047c5f8f000d1743cf8e827eb24d1eca671cb253509fccb5c3b67c6fcd8 |
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2003 Bryce "Zooko" Wilcox-O'Hearn
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
__revision__ = "$Id: Conversation.py,v 1.19 2003/03/09 18:54:57 zooko Exp $"
# Python Standard Library modules
import threading
import types
import traceback
import sys
from sha import sha
from time import time
import exceptions
import socket
import os
import types
from traceback import print_exc
# pyutil modules
from pyutil import Cache
from pyutil import DoQ, nummedobj
from pyutil.debugprint import debugprint, debugstream
from pyutil.humanreadable import hr
from pyutil import assertutil
assertutil.hr = hr
from pyutil.assertutil import _assert, precondition, postcondition
# EGTP modules
from egtp.CommHints import HINT_EXPECT_RESPONSE, HINT_EXPECT_MORE_TRANSACTIONS, HINT_EXPECT_NO_MORE_COMMS, HINT_EXPECT_TO_RESPOND, HINT_THIS_IS_A_RESPONSE, HINT_NO_HINT
from egtp import CommStrat, MojoMessage
from egtp import idlib, mencode, mojosixbit, std
from egtp.crypto import modval, randsource
import MojoTransaction
true = 1
false = 0
HANDLED = 0
EXPECTING_RESPONSE = 1
TUNING_FACTOR=float(2**8)
def is_mojo_message(thingie):
return (type(thingie) is types.DictType) and (thingie.has_key('mojo header') or thingie.has_key('mojo message'))
class DebugDiagLoop(DoQ.DoQLoop, nummedobj.NummedObj):
def __init__(self, cm):
self.maxcbs = 0
self.maxpcbs = 0
self.maxoms = 0
self.maxli2i = 0
self.maxfps = 0
DoQ.DoQLoop.__init__(self)
nummedobj.NummedObj.__init__(self)
self.cm = cm
self.schedule_event()
def event(self):
if len(self.cm._callback_functions) > self.maxcbs:
self.maxcbs = len(self.cm._callback_functions)
debugprint("%s.event(): new max len(self.cm._callback_functions): %s, len(self.cm._posttimeout_callback_functions): %s, len(self.cm._map_inmsgid_to_info): %s, len(self.cm._map_cid_to_freshness_proof): %s\n", args=(self, len(self.cm._callback_functions), len(self.cm._posttimeout_callback_functions), len(self.cm._map_inmsgid_to_info), len(self.cm._map_cid_to_freshness_proof),), v=3, vs="Conversation")
if len(self.cm._posttimeout_callback_functions) > self.maxpcbs:
self.maxpcbs = len(self.cm._posttimeout_callback_functions)
debugprint("%s.event(): len(self.cm._callback_functions): %s, new max len(self.cm._posttimeout_callback_functions): %s, len(self.cm._map_inmsgid_to_info): %s, len(self.cm._map_cid_to_freshness_proof): %s\n", args=(self, len(self.cm._callback_functions), len(self.cm._posttimeout_callback_functions), len(self.cm._map_inmsgid_to_info), len(self.cm._map_cid_to_freshness_proof),), v=3, vs="Conversation")
if len(self.cm._callback_functions) > self.maxoms:
self.maxoms = len(self.cm._callback_functions)
debugprint("%s.event(): len(self.cm._callback_functions): %s, len(self.cm._posttimeout_callback_functions): %s, new max len(self.cm._callback_functions): %s, len(self.cm._map_inmsgid_to_info): %s, len(self.cm._map_cid_to_freshness_proof): %s\n", args=(self, len(self.cm._callback_functions), len(self.cm._posttimeout_callback_functions), len(self.cm._callback_functions), len(self.cm._map_inmsgid_to_info), len(self.cm._map_cid_to_freshness_proof),), v=3, vs="Conversation")
if len(self.cm._map_inmsgid_to_info) > self.maxli2i:
self.maxli2i = len(self.cm._map_inmsgid_to_info)
debugprint("%s.event(): len(self.cm._callback_functions): %s, len(self.cm._posttimeout_callback_functions): %s, new max len(self.cm._map_inmsgid_to_info): %s, len(self.cm._map_cid_to_freshness_proof): %s\n", args=(self, len(self.cm._callback_functions), len(self.cm._posttimeout_callback_functions), len(self.cm._map_inmsgid_to_info), len(self.cm._map_cid_to_freshness_proof),), v=3, vs="Conversation")
if len(self.cm._map_cid_to_freshness_proof) > self.maxfps:
self.maxfps = len(self.cm._map_cid_to_freshness_proof)
debugprint("%s.event(): len(self.cm._callback_functions): %s, len(self.cm._posttimeout_callback_functions): %s, len(self.cm._map_inmsgid_to_info): %s, new max len(self.cm._map_cid_to_freshness_proof): %s\n", args=(self, len(self.cm._callback_functions), len(self.cm._posttimeout_callback_functions), len(self.cm._map_inmsgid_to_info), len(self.cm._map_cid_to_freshness_proof),), v=3, vs="Conversation")
self.schedule_event(delay=60)
class ConversationManager:
def __init__(self, MTM):
self._MTM = MTM
# maps first message ids to (counterparty_id, callback function, notes for callback, conversation type, post_timeout_callback_func, timeoutcheckerschedtime,)
self.__callback_functions = {}
# maps message id's of messages who's responses have timed out to (recipient_id, conversation type, time of timeout, post timeout callback function, notes [only if post timeout callback function])
# Reminder: this indirectly holds references to the original outgoing message body as well as the metainfo used to send it. That is often large.
self._posttimeout_callback_functions = Cache.LRUCache(maxsize=128)
# maps message id to (binary counterparty_id, message type, response status)
# where status is HANDLED or EXPECTING_RESPONSE
# This data structure is used between receiving a specific initiating message and
# sending the response to it. It is not used for incoming response messages.
self._map_inmsgid_to_info = {}
# This is a map from counterparty_id to the binary msgId of the last message you
# received from that counterparty. We use the msgId in outgoing messages, thus proving
# to the counterparty that our messages are fresh (i.e., they are not replay attacks or
# other sneaky trickery). Actually our current software does not check the incoming
# freshness proofs, but this code ensures that when we ship a new, replay-attack-proof
# version which _does_ verify freshness proofs, then older apps which are running _this_
# version of our software will be able to interoperate with it.
self._map_cid_to_freshness_proof = Cache.LRUCache(maxsize=128)
self._in_message_num = 0L # used only in debugging
def shutdown(self):
debugprint("self._map_inmsgid_to_info: %s\n", args=(self._map_inmsgid_to_info,), v=6, vs="debug")
self.__callback_functions = {}
self._posttimeout_callback_functions.clear()
self._map_inmsgid_to_info = {}
self._map_cid_to_freshness_proof.clear()
def initiate_and_return_first_message(self, counterparty_id, conversationtype, firstmsgbody, outcome_func, timeout = 300, notes = None, mymetainfo=None, post_timeout_outcome_func=None):
"""
@precondition: `counterparty_id' must be an id.: idlib.is_sloppy_id(counterparty_id): "id: %s" % hr(id)
@return: a tuple of (message_id, binary_message_string)
"""
assert idlib.is_sloppy_id(counterparty_id), "precondition: `counterparty_id' must be an id." + " -- " + "id: %s" % hr(id)
counterparty_id = idlib.canonicalize(counterparty_id, "broker")
# Our `freshness proof' is the hash of the last message that we saw from this counterparty.
# If we don't have such a last message, we leave it blank.
# Currently, Mojo Nation brokers accept any freshnessproof and don't check to be
# sure that it is really a proof of freshness, so currently either of these cases
# will work. In the future Mojo Nation brokers will start rejecting messages with
# blank freshnessproofs and insisting on a hash of a recent message. This case is
# handled (even in the current version: v0.920) with a special handler inside
# MojoTransaction. --Zooko 2000-09-27
# XXXX Zooko: add the freshness challenge handler inside MojoTransaction! --Zooko 2000-09-29
message = MojoMessage.makeInitialMessage(msgtype=conversationtype, msgbody=firstmsgbody, recipient_id=idlib.to_binary(counterparty_id), nonce=idlib.new_random_uniq(), freshnessproof=self._map_cid_to_freshness_proof.get(counterparty_id), mymetainfo=mymetainfo)
msgId = idlib.make_id(message, 'msg')
# only keep track of the message details if we have a response message handler
if outcome_func:
# Schedule a timeout checker.
timeoutcheckerschedtime = DoQ.doq.add_task(self.fail_conversation, kwargs={'msgId': msgId, 'failure_reason': 'timeout', 'istimeout': 1}, delay=timeout)
self.__callback_functions[msgId] = (counterparty_id, outcome_func, notes, conversationtype, post_timeout_outcome_func, timeoutcheckerschedtime ,)
return msgId, message
def fail_conversation(self, msgId, failure_reason='generic failure', istimeout=0):
initial = self.__callback_functions.get(msgId)
if initial is None:
# This means that a response has been received, so it shouldn't fail.
return
del self.__callback_functions[msgId]
(recipient_id, callback_function, notes, conversationtype, post_timeout_callback_function, timeoutcheckerschedtime,) = initial
if not istimeout:
# only include these in the failed_conversation map for later calling if it was a timeout
post_timeout_callback_function = None
if post_timeout_callback_function is None:
# only needed for post timeout callback functions
post_timeout_notes = None
else:
post_timeout_notes = notes
self._posttimeout_callback_functions[msgId] = (recipient_id, conversationtype, time(), post_timeout_callback_function, post_timeout_notes,)
if DoQ.doq.is_currently_doq():
callback_function(failure_reason=failure_reason, notes=notes)
else:
DoQ.doq.add_task(callback_function, kwargs = {'failure_reason': failure_reason, 'notes': notes})
def is_unsatisfied_message(self, msgId):
"""
@return: `true' if and only if the msg identified by `msgId' has been sent out to a
counterparty, and it is a message to which we expect a response, and we have not
yet received a response, and we have not yet timed out and given up hope of
getting a response
"""
return self.__callback_functions.has_key(msgId)
def drop_request_state(self, firstmsgId):
"""
cleanup all internal state about received request firstmsgId
@precondition firstmsgId is required to be a canonical id.: idlib.is_id(firstmsgId)
@precondition firstmsgId is required to me the id of a message about which we are currently holding state.: self._map_inmsgid_to_info.has_key(firstmsgId)
"""
assert idlib.is_id(firstmsgId), "precondition: " + "firstmsgId is required to be a canonical id."
assert self._map_inmsgid_to_info.has_key(firstmsgId), "precondition: " + "firstmsgId is required to me the id of a message about which we are currently holding state."
del self._map_inmsgid_to_info[firstmsgId]
def send_response(self, prevmsgId, msgbody, mymetainfo=None, hint=HINT_NO_HINT):
"""
@param msgbody: the message body to be sent back
@precondition: `prevmsgId' must be a binary id.: idlib.is_binary_id(prevmsgId): "prevmsgId: %s" % hr(prevmsgId)
@precondition: `msgbody' must be either None or else the full msg dict, containing either a "mojo header" subdict or a "mojo message" subdict or both.: (not msgbody) or is_mojo_message(msgbody): "msgbody: %s" % hr(msgbody)
@precondition: internal1: self._map_inmsgid_to_info.get(prevmsgId) is not None: "prevmsgId: %s" % hr(prevmsgId)
@precondition: internal2: (type(self._map_inmsgid_to_info.get(prevmsgId)) in (types.TupleType, types.ListType)): "self._map_inmsgid_to_info.get(prevmsgId): %s :: %s" % (hr(self._map_inmsgid_to_info.get(prevmsgId)), hr(type(self._map_inmsgid_to_info.get(prevmsgId))))
@precondition: internal3: self._map_inmsgid_to_info.get(prevmsgId)[2] == EXPECTING_RESPONSE: "self._map_inmsgid_to_info.get(prevmsgId): %s" % hr(self._map_inmsgid_to_info.get(prevmsgId))
@precondition: internal4: idlib.is_binary_id(self._map_inmsgid_to_info.get(prevmsgId)[0]): "self._map_inmsgid_to_info.get(prevmsgId)[0]: %s :: %s" % (hr(self._map_inmsgid_to_info.get(prevmsgId)[0]), hr(type(self._map_inmsgid_to_info.get(prevmsgId)[0])))
"""
assert idlib.is_binary_id(prevmsgId), "precondition: `prevmsgId' must be a binary id." + " -- " + "prevmsgId: %s" % hr(prevmsgId)
assert (not msgbody) or is_mojo_message(msgbody), "precondition: `msgbody' must be either None or else the full msg dict, containing either a \"mojo header\" subdict or a \"mojo message\" subdict or both." + " -- " + "msgbody: %s" % hr(msgbody)
assert self._map_inmsgid_to_info.get(prevmsgId) is not None, "precondition: internal1" + " -- " + "prevmsgId: %s" % hr(prevmsgId)
assert (type(self._map_inmsgid_to_info.get(prevmsgId)) in (types.TupleType, types.ListType)), "precondition: internal2" + " -- " + "self._map_inmsgid_to_info.get(prevmsgId): %s :: %s" % (hr(self._map_inmsgid_to_info.get(prevmsgId)), hr(type(self._map_inmsgid_to_info.get(prevmsgId))))
assert self._map_inmsgid_to_info.get(prevmsgId)[2] == EXPECTING_RESPONSE, "precondition: internal3" + " -- " + "self._map_inmsgid_to_info.get(prevmsgId): %s" % hr(self._map_inmsgid_to_info.get(prevmsgId))
assert idlib.is_binary_id(self._map_inmsgid_to_info.get(prevmsgId)[0]), "precondition: internal4" + " -- " + "self._map_inmsgid_to_info.get(prevmsgId)[0]: %s :: %s" % (hr(self._map_inmsgid_to_info.get(prevmsgId)[0]), hr(type(self._map_inmsgid_to_info.get(prevmsgId)[0])))
counterparty_id, inmsgtype, status = self._map_inmsgid_to_info.get(prevmsgId)
assert idlib.is_binary_id(counterparty_id), "`counterparty_id' must be a binary id." + " -- " + "counterparty_id: %s" % hr(counterparty_id)
self.drop_request_state(prevmsgId)
msgstr = MojoMessage.makeResponseMessage(inmsgtype + ' response', msgbody, prevmsgId, freshnessproof=self._map_cid_to_freshness_proof.get(counterparty_id), mymetainfo=mymetainfo)
self._MTM.send_message_with_lookup(counterparty_id, msgstr, hint=hint | HINT_THIS_IS_A_RESPONSE)
def _process(self, msg, msgId, counterparty_id, commstrat=None):
"""
This gets called for all incoming messages, by `handle_raw_message()'. It verifies the
message's integrity and either send it to a handler func to generate a response or send
it to the appropriate callback func.
@return: one of MojoTransaction.NO_RESPONSE,
MojoTransaction.ASYNC_RESPONSE, or an instance of
MojoTransaction.ResponseAndCommHints
@raises MojoMessage.BadFormatError: if the message isn't
properly formatted in MojoMessage format
@precondition: `counterparty_id' must be an id.: idlib.is_sloppy_id(counterparty_id): "counterparty_id: %s" % hr(counterparty_id)
@postcondition: Result is required to not be `None'.: result is not None: "result: %s" % hr(result)
@postcondition: Result is required to be either MojoTransaction.NO_RESPONSE or MojoTransaction.ASYNC_RESPONSE or else an instance of MojoTransaction.ResponseAndCommHints.: (result in (MojoTransaction.NO_RESPONSE, MojoTransaction.ASYNC_RESPONSE,)) or isinstance(result, MojoTransaction.ResponseAndCommHints): "result: %s" % hr(result)
"""
assert idlib.is_sloppy_id(counterparty_id), "precondition: `counterparty_id' must be an id." + " -- " + "counterparty_id: %s" % hr(counterparty_id)
counterparty_id = idlib.canonicalize(counterparty_id, "broker")
# begin DEBUG do not uncomment this in normal code
# save all uncompressed incoming messages to unique files to be used for post analysis
# such as real world mdecode() performance tweaking, etc.
# (note: this writes them in the current directory, normally localweb/webroot)
##_dbg_fname = 'message.%05d' % self._in_message_num
##_dbg_f = open(_dbg_fname, 'wb')
##_dbg_f.write(msg)
##_dbg_f.close()
##self._in_message_num += 1 # note: python 2.0 syntax
# end DEBUG
reference = MojoMessage.getReference(msg)
nonce = MojoMessage.getNonce(msg)
recipient_id = MojoMessage.getRecipient(msg)
senders_metainfo = MojoMessage.getSendersMetaInfo(msg)
extra_metainfo = MojoMessage.getExtraMetaInfo(msg)
if nonce is not None:
# this is a first message
if (reference is not None) or (recipient_id is None):
debugprint("WARNING: a Mojo Message arrived with inconsistent conversation markers: nonce: %s, reference: %s, recipient_id: %s\n", args=(nonce, reference, recipient_id), v=1, vs="conversation")
return MojoTransaction.NO_RESPONSE
if not idlib.is_sloppy_id(nonce):
debugprint("WARNING: a Mojo Message arrived with badly formed nonce: %s\n", args=(nonce,), v=1, vs="conversation")
return MojoTransaction.NO_RESPONSE
conversationtype = MojoMessage.getType(msg)
# We now have a hint -- we're expecting to respond to this.
if commstrat:
commstrat.hint = commstrat.hint | HINT_EXPECT_TO_RESPOND
commstrat.hintnumexpectedsends = commstrat.hintnumexpectedsends + 1
if self._map_inmsgid_to_info.has_key(msgId):
# This can only happen if we have already started processing this unique message.
return MojoTransaction.ASYNC_RESPONSE
self._map_inmsgid_to_info[msgId] = (counterparty_id, MojoMessage.getType(msg), EXPECTING_RESPONSE)
# Reminder: do not somehow change this handle_initiating_message call to be on the DoQ in the future without changing
# the MTM.__in_message_for_you logic used in fast relay to prevent nested 'message for you' messages. -greg
result = self._MTM.handle_initiating_message(counterparty_id, conversationtype, MojoMessage.getBody(msg), firstmsgId=msgId)
if result is MojoTransaction.NO_RESPONSE:
self.drop_request_state(msgId)
if result is None:
result = MojoTransaction.NO_RESPONSE
if not isinstance(result, MojoTransaction.ResponseAndCommHints) and not result in (MojoTransaction.NO_RESPONSE, MojoTransaction.ASYNC_RESPONSE,):
result = MojoTransaction.ResponseAndCommHints(result)
postcondition((result in (MojoTransaction.NO_RESPONSE, MojoTransaction.ASYNC_RESPONSE,)) or isinstance(result, MojoTransaction.ResponseAndCommHints), "Result is required to be either MojoTransaction.NO_RESPONSE or MojoTransaction.ASYNC_RESPONSE or else an instance of MojoTransaction.ResponseAndCommHints.", result=result)
return result
else:
# this is a response message
if (reference is None) or (recipient_id is not None):
debugprint("WARNING: a Mojo Message arrived with inconsistent conversation markers: nonce: %s, reference: %s, recipient_id: %s\n", args=(nonce, reference, recipient_id), v=1, vs="conversation")
return MojoTransaction.NO_RESPONSE
responsetype = MojoMessage.getType(msg)
# Make sure that this is a response to a message that we sent, from the person to whom we sent it.
initial = self.__callback_functions.get(reference)
if initial is not None:
(recipient_id, callback_function, notes, conversationtype, post_timeout_callback_function, timeoutcheckerschedtime,) = initial
del self.__callback_functions[reference]
DoQ.doq.remove_task(timeoutcheckerschedtime)
else:
# If it wasn't in the `__callback_functions' dict, it might be in the `_posttimeout_callback_functions' dict.
recipient_id, conversationtype, msgtimeout, post_timeout_callback_function, post_timeout_notes = self._posttimeout_callback_functions.get(reference, (None, None, None, None, None,))
if recipient_id is not None:
del self._posttimeout_callback_functions[reference] # proactively clean up this cache
if not idlib.equal(recipient_id, counterparty_id):
return MojoTransaction.NO_RESPONSE
# log the late response
if conversationtype is not None:
debugprint("received %s to msgId %s %s from %s %s seconds after the timeout\n", args=(responsetype, reference, conversationtype, counterparty_id, "%0.2f" % (time() - msgtimeout)), v=2, vs='Conversation')
if post_timeout_callback_function:
# recipient_id and conversationtype were set above
callback_function = post_timeout_callback_function
notes = post_timeout_notes
debugprint("post timeout callback for response of type %s to msgId %s\n", args=(responsetype, reference,), v=5, vs='Conversation')
else:
return MojoTransaction.NO_RESPONSE
if not idlib.equal(counterparty_id, recipient_id):
return MojoTransaction.NO_RESPONSE
if conversationtype + ' response' != responsetype:
debugprint("message of unexpected type %s from %s in response to a %s\n", args=(responsetype, counterparty_id, conversationtype), v=3, vs='Conversation')
return MojoTransaction.NO_RESPONSE
# Do hints about expecting responses...
if commstrat:
if (commstrat.hint & HINT_EXPECT_RESPONSE) and (commstrat.hintnumexpectedresponses > 0):
commstrat.hintnumexpectedresponses = commstrat.hintnumexpectedresponses - 1
if commstrat.hintnumexpectedresponses == 0:
# No longer expecting response!
commstrat.hint = commstrat.hint & (~ HINT_EXPECT_RESPONSE)
else:
pass
callback_function(outcome=MojoMessage.getBody(msg), notes=notes)
return MojoTransaction.NO_RESPONSE
def handle_raw_message(self, counterparty_id, inmsg, commstrat=None):
"""
This gets called for all incoming messages. It calls `self._process()' and sends the
response returned, if any, back to the counterparty.
"""
counterparty_id = idlib.to_binary(counterparty_id) # also asserts is_sloppy_id(counterparty_id)
if commstrat:
assert isinstance(commstrat, CommStrat.Crypto)
elif not hasattr(self, '_complained'):
self._complained = true
debugprint("complaint: I wish that I had a reference to a persistent CommStrat object that represents this particular way of talking to this counterparty, so that I could use it to store hints to optimize behaviour. Oh well. counterparty_id: %s\n", args=(counterparty_id,), v=5, vs="debug")
try: # for BadFormatErrors
msgId = idlib.make_id(inmsg, 'msg')
def debugprintreceive(counterparty_id=counterparty_id, inmsg=inmsg, msgId=msgId):
printstr="<=== %s: %s\n"
theseargs=[counterparty_id, inmsg]
maxverb = debugstream.max_verbosity
if maxverb >= 5:
debugprint("<=== %s: receiving: %s, %s bytes uncomp\n", args=(counterparty_id, inmsg, len(inmsg)), v=5, vs="conversation")
elif maxverb >= 4:
# XXX note, this slows sending down A LOT! calling MojoMessage.getType() calls mdecode() & template check on the message. The mdecode part is -extremely- slow (esp on big messages). -greg 11-oct-2000
# if we want to display this quickly, we'll need to pass the type in from before we called mencode()
debugprint("<=== %s: receiving: (id: %s, type: %s, %s bytes uncomp, ...)\n", args=(counterparty_id, msgId, MojoMessage.getType(inmsg), len(inmsg)), v=4, vs="conversation") # semi-verbose
elif maxverb >= 3:
debugprint("<=== %s: receiving: (id: %s, %s bytes uncomp, ...)\n", args=(counterparty_id, msgId, len(inmsg)), v=3, vs="conversation") # semi-verbose
debugprintreceive()
self._map_cid_to_freshness_proof[counterparty_id] = msgId
result = self._process(inmsg, msgId, counterparty_id, commstrat)
postcondition((result in (MojoTransaction.NO_RESPONSE, MojoTransaction.ASYNC_RESPONSE,)) or isinstance(result, MojoTransaction.ResponseAndCommHints), "Result is required to be either MojoTransaction.NO_RESPONSE or MojoTransaction.ASYNC_RESPONSE or else an instance of MojoTransaction.ResponseAndCommHints.", result=result)
if result not in (MojoTransaction.ASYNC_RESPONSE, MojoTransaction.NO_RESPONSE):
self.send_response(msgId, result.response, hint=result.commhints)
except MojoMessage.BadFormatError, le:
debugprint("_process(): BadFormatError in message from %s, msg: %s, error: %s\n", args=(counterparty_id, inmsg, le,), v=2, vs="conversation")
| zooko/egtp_new | egtp/Conversation.py | Python | lgpl-2.1 | 25,617 | [
"VisIt"
] | 1c33b4e82e54e0eec9e9154e5ce588b97845998b7660ccc3610df5f9b28eeea7 |
########################################################################
# File: Request.py
# Date: 2012/07/16 13:43:45
########################################################################
"""
:mod: Request
.. module: Request
:synopsis: request implementation
request implementation
"""
# Disable invalid names warning
# pylint: disable=C0103
__RCSID__ = "$Id$"
# # imports
import datetime
from types import StringTypes
import json
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.private.JSONUtils import RMSEncoder
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from types import NoneType
########################################################################
class Request( object ):
"""
:param int RequestID: requestID
:param str Name: request' name
:param str OwnerDN: request's owner DN
:param str OwnerGroup: request owner group
:param str Setup: DIRAC setup
:param str SourceComponent: whatever
:param int JobID: jobID
:param datetime.datetime CreationTime: UTC datetime
:param datetime.datetime SubmissionTime: UTC datetime
:param datetime.datetime LastUpdate: UTC datetime
:param datetime.datetime NotBefore: UTC datetime
:param str Status: request's status
:param TypedList.TypedList operations: list of operations
It is managed by SQLAlchemy, so the RequestID should never be set by hand (except when constructed from
JSON of course...)
"""
ALL_STATES = ( "Waiting", "Failed", "Done", "Scheduled", "Assigned", "Canceled" )
FINAL_STATES = ( "Done", "Failed", "Canceled" )
_datetimeFormat = '%Y-%m-%d %H:%M:%S'
def __init__( self, fromDict = None ):
"""c'tor
:param self: self reference
:param fromDict : if false, new request. Can be json string that represents the object, or the dictionary directly
"""
self.__waiting = None
now = datetime.datetime.utcnow().replace( microsecond = 0 )
self._CreationTime = now
self._SubmitTime = now
self._LastUpdate = now
# the time before which the request should not be executed
# If None, no delay
self._NotBefore = now
self._Status = "Done"
self.JobID = 0
self.Error = None
self.DIRACSetup = None
self.OwnerDN = None
self.RequestName = None
self.OwnerGroup = None
self.SourceComponent = None
self.dmsHelper = DMSHelpers()
proxyInfo = getProxyInfo()
if proxyInfo["OK"]:
proxyInfo = proxyInfo["Value"]
if proxyInfo["validGroup"] and proxyInfo["validDN"]:
self.OwnerDN = proxyInfo["identity"]
self.OwnerGroup = proxyInfo["group"]
self.__operations__ = []
fromDict = fromDict if isinstance( fromDict, dict )\
else json.loads( fromDict ) if isinstance( fromDict, StringTypes )\
else {}
if "Operations" in fromDict:
for opDict in fromDict.get( "Operations", [] ):
self +=Operation( opDict )
del fromDict["Operations"]
for key, value in fromDict.items():
# The JSON module forces the use of UTF-8, which is not properly
# taken into account in DIRAC.
# One would need to replace all the '== str' with 'in StringTypes'
if type( value ) in StringTypes:
value = value.encode()
if value:
setattr( self, key, value )
self._notify()
def _notify( self ):
""" simple state machine for sub request statuses """
# # update operations statuses
self.__waiting = None
# Update the Order in Operation, and set the parent
for i in range( len( self.__operations__ ) ):
self.__operations__[i].Order = i
self.__operations__[i]._parent = self
rStatus = "Waiting"
opStatusList = [ ( op.Status, op ) for op in self ]
while opStatusList:
# # Scan all status in order!
opStatus, op = opStatusList.pop( 0 )
# # Failed -> Failed
if opStatus == "Failed":
rStatus = "Failed"
break
# Scheduled -> Scheduled
if opStatus == "Scheduled":
if self.__waiting == None:
self.__waiting = op
rStatus = "Scheduled"
# # First operation Queued becomes Waiting if no Waiting/Scheduled before
elif opStatus == "Queued":
if self.__waiting == None:
self.__waiting = op
op._setWaiting( self )
rStatus = "Waiting"
# # First operation Waiting is next to execute, others are queued
elif opStatus == "Waiting":
rStatus = "Waiting"
if self.__waiting == None:
self.__waiting = op
else:
op._setQueued( self )
# # All operations Done -> Done
elif opStatus == "Done" and self.__waiting == None:
rStatus = "Done"
self.Error = ''
self.Status = rStatus
def getWaiting( self ):
""" get waiting operation if any """
# # update states
self._notify()
return S_OK( self.__waiting )
# # Operation arithmetics
def __contains__( self, operation ):
""" in operator
:param self: self reference
:param Operation.Operation subRequest: a subRequest
"""
return bool( operation in self.__operations__ )
def __iadd__( self, operation ):
""" += operator for subRequest
:param self: self reference
:param Operation.Operation operation: sub-request to add
"""
if operation not in self:
self.__operations__.append( operation )
operation._parent = self
self._notify()
return self
def insertBefore( self, newOperation, existingOperation ):
""" insert :newOperation: just before :existingOperation:
:param self: self reference
:param Operation.Operation newOperation: Operation to be inserted
:param Operation.Operation existingOperation: previous Operation sibling
"""
if existingOperation not in self:
return S_ERROR( "%s is not in" % existingOperation )
if newOperation in self:
return S_ERROR( "%s is already in" % newOperation )
self.__operations__.insert( self.__operations__.index( existingOperation ), newOperation )
self._notify()
return S_OK()
def insertAfter( self, newOperation, existingOperation ):
""" insert :newOperation: just after :existingOperation:
:param self: self reference
:param Operation.Operation newOperation: Operation to be inserted
:param Operation.Operation existingOperation: next Operation sibling
"""
if existingOperation not in self:
return S_ERROR( "%s is not in" % existingOperation )
if newOperation in self:
return S_ERROR( "%s is already in" % newOperation )
self.__operations__.insert( self.__operations__.index( existingOperation ) + 1, newOperation )
self._notify()
return S_OK()
def addOperation( self, operation ):
""" add :operation: to list of Operations
:param self: self reference
:param Operation.Operation operation: Operation to be inserted
"""
if operation in self:
return S_ERROR( "This operation is already in!!!" )
self +=operation
return S_OK()
def isEmpty( self ):
""" Evaluate if the request is empty
"""
return len( self.__operations__ ) == 0
def __iter__( self ):
""" iterator for sub-request """
return self.__operations__.__iter__()
def __getitem__( self, i ):
""" [] op for sub requests """
return self.__operations__.__getitem__( i )
def __setitem__( self, i, value ):
""" self[i] = val """
self.__operations__.__setitem__( i, value )
self._notify()
def __delitem__( self, i ):
""" del self[i]"""
self.__operations__.__delitem__( i )
self._notify()
def indexOf( self, subReq ):
""" return index of subReq (execution order) """
return self.__operations__.index( subReq ) if subReq in self else -1
def __nonzero__( self ):
""" for comparisons
"""
return True
def __len__( self ):
""" nb of subRequests """
return len( self.__operations__ )
def __str__( self ):
""" str operator """
return self.toJSON()['Value']
def subStatusList( self ):
""" list of statuses for all operations """
return [ subReq.Status for subReq in self ]
@property
def CreationTime( self ):
""" creation time getter """
return self._CreationTime
@CreationTime.setter
def CreationTime( self, value = None ):
""" creation time setter """
if type( value ) not in ( [datetime.datetime] + list( StringTypes ) ) :
raise TypeError( "CreationTime should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._CreationTime = value
@property
def SubmitTime( self ):
""" request's submission time getter """
return self._SubmitTime
@SubmitTime.setter
def SubmitTime( self, value = None ):
""" submission time setter """
if type( value ) not in ( [datetime.datetime] + list( StringTypes ) ):
raise TypeError( "SubmitTime should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._SubmitTime = value
@property
def NotBefore( self ):
""" Getter for NotBefore time"""
return self._NotBefore
@NotBefore.setter
def NotBefore( self, value = None ):
""" Setter for the NotBefore time """
if type( value ) not in ( [NoneType] + [datetime.datetime] + list( StringTypes ) ):
raise TypeError( "NotBefore should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._NotBefore = value
def delayNextExecution( self, deltaTime ):
"""This helper sets the NotBefore attribute in deltaTime minutes
in the future
:param deltaTime : time in minutes before next execution
"""
now = datetime.datetime.utcnow().replace( microsecond = 0 )
extraDelay = datetime.timedelta( minutes = deltaTime )
self._NotBefore = now + extraDelay
return S_OK()
@property
def LastUpdate( self ):
""" last update getter """
return self._LastUpdate
@LastUpdate.setter
def LastUpdate( self, value = None ):
""" last update setter """
if type( value ) not in ( [datetime.datetime] + list( StringTypes ) ):
raise TypeError( "LastUpdate should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._LastUpdate = value
@property
def Status( self ):
""" status getter """
self._notify()
return self._Status
@Status.setter
def Status( self, value ):
""" status setter """
if value not in Request.ALL_STATES:
raise ValueError( "Unknown status: %s" % str( value ) )
# If the status moved to Failed or Done, update the lastUpdate time
if value in ( 'Done', 'Failed' ):
if value != self._Status:
self.LastUpdate = datetime.datetime.utcnow().replace( microsecond = 0 )
if value == 'Done':
self.Error = ''
self._Status = value
@property
def Order( self ):
""" ro execution order getter """
self._notify()
opStatuses = [ op.Status for op in self.__operations__ ]
return opStatuses.index( "Waiting" ) if "Waiting" in opStatuses else len( opStatuses )
def toJSON( self ):
""" Returns the JSON formated string that describes the request """
jsonStr = json.dumps( self, cls = RMSEncoder )
return S_OK( jsonStr )
def _getJSONData( self ):
""" Returns the data that have to be serialized by JSON """
attrNames = ['RequestID', "RequestName", "OwnerDN", "OwnerGroup",
"Status", "Error", "DIRACSetup", "SourceComponent",
"JobID", "CreationTime", "SubmitTime", "LastUpdate", "NotBefore"]
jsonData = {}
for attrName in attrNames :
# RequestID might not be set since it is managed by SQLAlchemy
if not hasattr( self, attrName ):
continue
value = getattr( self, attrName )
if isinstance( value, datetime.datetime ):
# We convert date time to a string
jsonData[attrName] = value.strftime( self._datetimeFormat )
else:
jsonData[attrName] = value
jsonData['Operations'] = self.__operations__
return jsonData
def getDigest( self ):
""" return digest for request """
digest = ['Name:' + self.RequestName]
for op in self:
opDigest = [ str( item ) for item in ( op.Type, op.Type, op.Status, op.Order ) ]
if op.TargetSE:
opDigest.append( op.TargetSE )
if op.Catalog:
opDigest.append( op.Catalog )
if len( op ):
opFile = op[0]
extraFilesStr = "...+<%d files>" % ( len( op ) - 1 ) if (len(op) > 1 ) else ''
opDigest.append( opFile.LFN + extraFilesStr )
digest.append( ":".join( opDigest ) )
return S_OK( "\n".join( digest ) )
def optimize( self ):
""" Merges together the operations that can be merged. They need to have the following arguments equal:
* Type
* Arguments
* SourceSE
* TargetSE
* Catalog
It also makes sure that the maximum number of Files in an Operation is never overcome.
CAUTION: this method is meant to be called before inserting into the DB.
So if the RequestID is not 0, we don't touch
:return S_ERROR if the Request should not be optimized (because already in the DB
S_OK(True) if a optimization was carried out
S_OK(False) if no optimization were carried out
"""
# If the RequestID is not the default one (0), it probably means
# the Request is already in the DB, so we don't touch anything
if hasattr( self, 'RequestID' ) and getattr( self, 'RequestID' ):
return S_ERROR( "Cannot optimize because Request seems to be already in the DB (RequestID %s)" % getattr( self, 'RequestID' ) )
# Set to True if the request could be optimized
optimized = False
# Recognise Failover request series
repAndRegList = []
removeRepList = []
i = 0
while i < len( self.__operations__ ):
insertNow = True
if i < len( self.__operations__ ) - 1:
op1 = self.__operations__[i]
op2 = self.__operations__[i + 1]
if getattr( op1, 'Type' ) == 'ReplicateAndRegister' and \
getattr( op2, 'Type' ) == 'RemoveReplica':
fileSetA = set( list( f.LFN for f in op1 ) )
fileSetB = set( list( f.LFN for f in op2 ) )
if fileSetA == fileSetB:
# Source is useless if failover
if self.dmsHelper.isSEFailover( op1.SourceSE ):
op1.SourceSE = ''
repAndRegList.append( ( op1.TargetSE, op1 ) )
removeRepList.append( ( op2.TargetSE, op2 ) )
del self.__operations__[i]
del self.__operations__[i]
# If we are at the end of the request, we must insert the new operations
insertNow = ( i == len( self.__operations__ ) )
# print i, self.__operations__[i].Type if i < len( self.__operations__ ) else None, len( repAndRegList ), insertNow
if insertNow:
if repAndRegList:
# We must insert the new operations there
# i.e. insert before operation i (if it exists)
# Replication first, removeReplica next
optimized = True
insertBefore = self.__operations__[i] if i < len( self.__operations__ ) else None
# print 'Insert new operations before', insertBefore
for op in \
[op for _targetSE, op in sorted( repAndRegList )] + \
[op for _targetSE, op in sorted( removeRepList )]:
_res = self.insertBefore( op, insertBefore ) if insertBefore else self.addOperation( op )
# Skip the newly inserted operation
i += 1
repAndRegList = []
removeRepList = []
else:
# Skip current operation
i += 1
else:
# Just to show that in that case we don't increment i
pass
# List of attributes that must be equal for operations to be merged
attrList = ["Type", "Arguments", "SourceSE", "TargetSE", "Catalog" ]
i = 0
while i < len( self.__operations__ ):
while i < len( self.__operations__ ) - 1:
# Some attributes need to be the same
attrMismatch = False
for attr in attrList:
if getattr( self.__operations__[i], attr ) != getattr( self.__operations__[i + 1], attr ):
attrMismatch = True
break
if attrMismatch:
break
# We do not do the merge if there are common files in the operations
fileSetA = set( list( f.LFN for f in self.__operations__[i] ) )
fileSetB = set( list( f.LFN for f in self.__operations__[i + 1] ) )
if fileSetA & fileSetB:
break
# There is a maximum number of files one can add into an operation
try:
while len( self.__operations__[i + 1] ):
fileToMove = self.__operations__[i + 1][0]
self.__operations__[i] += fileToMove
# If the object is mapped to SQLAlchemy object with a relationship
# having the delete-orphan option, the fileToMove will have
# already disappeared from the original operation. Silly...
# If not, we have to remove it manually
if len( self.__operations__[i + 1] )\
and ( self.__operations__[i + 1][0] == fileToMove ):
del self.__operations__[i + 1][0]
optimized = True
del self.__operations__[i + 1]
except RuntimeError:
i += 1
i += 1
return S_OK( optimized )
| vmendez/DIRAC | RequestManagementSystem/Client/Request.py | Python | gpl-3.0 | 17,938 | [
"DIRAC"
] | dcc0b67f72bd62387b14dccaea4db593572043915f2ac2c75079258c935f3424 |
import urllib.request
import sys
import json
import time
import difflib
# List of countries which we want appear to be coming from - list below selected based on top 20 countries by GDP
# Add/remove per your requirements - second field is ISO_3166 country code
# Country codes can be obtained from: https://en.wikipedia.org/wiki/ISO_3166-1#Current_codes
# If you have preferred proxy for a particular country, place it in the 3rd field in format IP:port and it will be used
proxylist_matrix = [
["United States", "US", ""],
["China", "CN", ""],
# ["Japan", "JP", ""],
["Germany", "DE", ""],
["United Kingdom", "GB", ""],
# ["France", "FR", ""],
# ["India", "IN", ""],
# ["Italy", "IT", ""],
# ["Brazil", "BR", ""],
# ["Canada", "CA", ""],
# ["South Korea", "KR", ""],
# ["Russia", "RU", ""],
# ["Australia", "AU", ""],
# ["Spain", "ES", ""],
# ["Mexico", "MX", ""],
# ["Indonesia", "ID", ""],
# ["Turkey", "TR", ""],
# ["Netherlands", "NL", ""],
# ["Switzerland", "CH", ""],
# ["Saudi Arabia", "SA", ""],
# ["Argentina", "AR", ""]
]
# User Agents below obtained from http://www.networkinghowtos.com/howto/common-user-agent-list/
# Add / remove per your requirements - first field is friendly name, second field is user-agent you want to send
useragent_matrix = [
["Google Chrome", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"],
["Mozilla Firefox","Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0"],
["Microsoft Edge","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393"],
["Microsoft Internet Explorer 6 / IE 6","Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)"],
["Microsoft Internet Explorer 7 / IE 7","Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)"],
["Microsoft Internet Explorer 8 / IE 8","Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)"],
["Microsoft Internet Explorer 9 / IE 9","Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; Trident/5.0)"],
["Microsoft Internet Explorer 10 / IE 10","Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0; MDDCJS)"],
["Microsoft Internet Explorer 11 / IE 11","Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"],
["Apple iPad","Mozilla/5.0 (iPad; CPU OS 8_4_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12H321 Safari/600.1.4"],
["Apple iPhone","Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1"],
["Googlebot (Google Search Engine Bot)","Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"],
["Bing Bot (Bing Search Engine Bot)","Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"],
["Samsung Phone","Mozilla/5.0 (Linux; Android 6.0.1; SAMSUNG SM-G570Y Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/4.0 Chrome/44.0.2403.133 Mobile Safari/537.36"],
["Samsung Galaxy Note 3","Mozilla/5.0 (Linux; Android 5.0; SAMSUNG SM-N900 Build/LRX21V) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/2.1 Chrome/34.0.1847.76 Mobile Safari/537.36"],
["Samsung Galaxy Note 4","Mozilla/5.0 (Linux; Android 6.0.1; SAMSUNG SM-N910F Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/4.0 Chrome/44.0.2403.133 Mobile Safari/537.36"],
["Google Nexus","Mozilla/5.0 (Linux; U; Android-4.0.3; en-us; Galaxy Nexus Build/IML74K) AppleWebKit/535.7 (KHTML, like Gecko) CrMo/16.0.912.75 Mobile Safari/535.7"],
["HTC","Mozilla/5.0 (Linux; Android 7.0; HTC 10 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.83 Mobile Safari/537.36"],
["Curl","curl/7.35.0"],
["Wget","Wget/1.15 (linux-gnu)"],
["Lynx","Lynx/2.8.8pre.4 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.12.23"]
]
# Shameless welcome banner
print ("########### SHAPE_SHIFTER v1.0 - Adam Kramer 2017 #############")
# If user has not provided an argument, then inform them what is required
if ((len(sys.argv)) < 2):
print ("Usage: shape_shift.py <url> - where url is the site to be analysed")
sys.exit(1)
# STAGE 1
# 1. Find working proxies for each of the required countries
# 2. Test proxies by connecting to them and having 3rd party site geolocate the IP to verify it matches requirements
print ("################### STAGE 1 - SETUP PHASE #####################")
# Iterate through each of the required countries in the proxy list matrix
for i in range(len(proxylist_matrix)):
# If the proxy field (3rd field) is not empty - skip attempt to obtain, as user has hard coded their preferred proxy in
if (proxylist_matrix[i][2] != ""):
print("[INFO] Skipping - IP field in script for " + proxylist_matrix[i][0] + " is not blank")
continue
# Loop will ensure we re-try until we get a working proxy server for the respective country
while True:
# Clear any proxies that have been set so far
proxy_handler = urllib.request.ProxyHandler({})
proxy_opener = urllib.request.build_opener(proxy_handler)
urllib.request.install_opener(proxy_opener)
# Inform user which proxy server we are attempting to obtain
print ("[INFO] Attempting to obtain proxy server which geolocates to " + proxylist_matrix[i][0])
# Connect to website which provides proxies and obtain json response
try:
# There are plenty of websites which let you obtain proxies via API query
# I have selected the website which was the top google result - replace per your preference
resp = urllib.request.urlopen("https://gimmeproxy.com/api/getProxy?user-agent=true&anonymityLevel=1&supportsHttps=true&country=" + proxylist_matrix[i][1], timeout = 5)
# Broad error catching - if website can't be accessed then keep trying after 5 second pause
except (KeyboardInterrupt, SystemExit):
raise
except:
print ("[ERROR] Error during proxy request - pausing for 5 seconds, then attempting retry")
time.sleep(5)
continue
# Read output which is returned from proxy list website, decode json and extract IP/port details
resp_json_decoded = json.loads(resp.read())
proxylist_matrix[i][2] = resp_json_decoded['ip'] + ":" + resp_json_decoded['port']
print ("[INFO] Proxy server obtained for " + proxylist_matrix[i][0] + " - " + proxylist_matrix[i][2])
# Set obtained details as proxy currently in use
proxy_handler = urllib.request.ProxyHandler({'http': proxylist_matrix[i][2], 'https' : proxylist_matrix[i][2]})
proxy_opener = urllib.request.build_opener(proxy_handler)
urllib.request.install_opener(proxy_opener)
# Connect to 3rd party website to verify that the proxy is working, and that the geolocation is per requirements
try:
resp = urllib.request.urlopen("https://ip-api.io/json/" + resp_json_decoded['ip'], timeout = 5)
except (KeyboardInterrupt, SystemExit):
raise
except:
print ("[ERROR] Error caught during testing attempt - requesting new proxy")
continue
# Response should contain required country name - if not, we will continue to next loop iteration and try another proxy
test_resp_data = resp.read()
if (proxylist_matrix[i][0] in str(test_resp_data)):
print ("[INFO] " + proxylist_matrix[i][0] + " proxy tested (including verifying geolocation), appears ok")
# break means we're happy, and we're leaving the infinite loop and onto obtaining proxy for next country
break
print ("[INFO] Testing showed proxy server not responding, or geolocation incorrect - requesting a new one")
# Inform user that we have obtained working proxies for all requested countries
print ("[INFO] Obtained all requested proxies - progressing to testing phase")
print ("################### STAGE 2 - TESTING PHASE #####################")
print ("[INFO] Each response will be compared against your default IP and user-agent " + useragent_matrix[0][0])
# Obtain a control value based on default IP / first user-agent in the list
proxy_handler = urllib.request.ProxyHandler({})
proxy_opener = urllib.request.build_opener(proxy_handler)
urllib.request.install_opener(proxy_opener)
# Set user-agent as first item from user-agent array
custom_headers = {}
custom_headers['User-Agent'] = useragent_matrix[0][1]
req = urllib.request.Request(sys.argv[1], headers = custom_headers)
# Send request and receive response into the array
while True:
try:
resp = urllib.request.urlopen(req, timeout = 5)
control_website_response = resp.read()
break
except (KeyboardInterrupt, SystemExit):
raise
except urllib.error.HTTPError as e:
print ("[INFO] Detected HTTP error: " + str(e.code) + " - this may be intentional(?)")
control_website_response = e.read()
break
except:
print ("[ERROR] Could not request website when connecting via default IP, retrying...")
time.sleep(5)
# Iterate through each requested country
for i in range(len(proxylist_matrix)):
# Inform user that we are connecting to current iterations proxy
print ("[INFO] Connecting to proxy server obtained for " + proxylist_matrix[i][0] + " - " + proxylist_matrix[i][2])
# Connect to proxy
proxy_handler = urllib.request.ProxyHandler({'http': proxylist_matrix[i][2], 'https' : proxylist_matrix[i][2]})
proxy_opener = urllib.request.build_opener(proxy_handler)
urllib.request.install_opener(proxy_opener)
# Iterate through each required user-agent
for j in range(len(useragent_matrix)):
print ("[INFO] Sending query with User-Agent: " + useragent_matrix[j][0])
# Set user-agent as current iteration from array
custom_headers = {}
custom_headers['User-Agent'] = useragent_matrix[j][1]
req = urllib.request.Request(sys.argv[1], headers = custom_headers)
# Send request and receive response into the array
try:
resp = urllib.request.urlopen(req, timeout = 5)
current_website_response = resp.read()
except (KeyboardInterrupt, SystemExit):
raise
except urllib.error.HTTPError as e:
print ("[INFO] Detected HTTP error: " + str(e.code) + " - this may be intentional(?)")
current_website_response = e.read()
except:
print ("[ERROR] Could not request when connecting via " + proxylist_matrix[i][0] + " proxy")
continue
# Check whether current response is different from the control value
if current_website_response != control_website_response:
# Inform user that we've found one that is different from the control value
print ("[ALERT] DIFFERENCE detected when using geolocation: " + proxylist_matrix[i][0] + " & user-agent: " + useragent_matrix[j][0])
print ("[ALERT] Details of the differences:")
# Show the user the DIFF details
test_data_1 = control_website_response.decode(errors="ignore").splitlines()
test_data_2 = current_website_response.decode(errors="ignore").splitlines()
differ_instance = difflib.Differ()
diff_data = differ_instance.compare(test_data_1, test_data_2)
print('\n'.join(diff_data))
| adamkramer/shape_shift | shape_shift.py | Python | gpl-3.0 | 11,221 | [
"Galaxy"
] | e6eb3ae9556e5b783b70aacecdbb30ffbde35733ffca99011f2a9306b9d2f922 |
import math
import numpy as np
import pysam
import clodius.tiles.bigwig as ctbw
def abs2genomic(chromsizes, start_pos, end_pos):
abs_chrom_offsets = np.r_[0, np.cumsum(chromsizes)]
cid_lo, cid_hi = (
np.searchsorted(abs_chrom_offsets, [start_pos, end_pos], side="right") - 1
)
rel_pos_lo = start_pos - abs_chrom_offsets[cid_lo]
rel_pos_hi = end_pos - abs_chrom_offsets[cid_hi]
start = rel_pos_lo
for cid in range(cid_lo, cid_hi):
yield cid, start, chromsizes[cid]
start = 0
yield cid_hi, start, rel_pos_hi
def load_reads(samfile, start_pos, end_pos, chrom_order=None):
"""
Sample reads from the specified region, assuming that the chromosomes
are ordered in some fashion. Returns an list of pysam reads
Parameters:
-----------
samfile: pysam.AlignmentFile
A pysam entry into an indexed bam file
start_pos: int
The start position of the sampled region
end_pos: int
The end position of the sampled region
chrom_order: ['chr1', 'chr2',...]
A listing of chromosome names to use as the order
Returns
-------
reads: [read1, read2...]
The list of in the sampled regions
"""
# if chromorder is not None...
# specify the chromosome order for the fetched reads
references = np.array(samfile.references)
lengths = np.array(samfile.lengths)
ref_lengths = dict(zip(references, lengths))
# we're going to create a natural ordering for references
# e.g. (chr1, chr2,..., chr10, chr11...chr22,chrX, chrY, chrM...)
references = ctbw.natsorted(references)
lengths = [ref_lengths[r] for r in references]
abs_chrom_offsets = np.r_[0, np.cumsum(lengths)]
if chrom_order:
chrom_order = np.array(chrom_order)
chrom_order_ixs = np.nonzero(np.in1d(references, chrom_order))
lengths = lengths[chrom_order_ixs]
results = {
"id": [],
"from": [],
"to": [],
"md": [],
"chrName": [],
"chrOffset": [],
"cigar": [],
}
for cid, start, end in abs2genomic(lengths, start_pos, end_pos):
chr_offset = int(abs_chrom_offsets[cid])
seq_name = f"{references[cid]}"
reads = samfile.fetch(seq_name, start, end)
for read in reads:
if read.is_unmapped:
continue
# query_seq = read.query_sequence
# differences = []
# try:
# for counter, (qpos, rpos, ref_base) in enumerate(read.get_aligned_pairs(with_seq=True)):
# # inferred from the pysam source code:
# # https://github.com/pysam-developers/pysam/blob/3defba98911d99abf8c14a483e979431f069a9d2/pysam/libcalignedsegment.pyx
# # and GitHub issue:
# # https://github.com/pysam-developers/pysam/issues/163
# #print('qpos, rpos, ref_base', qpos, rpos, ref_base)
# if rpos is None:
# differences += [(qpos, 'I')]
# elif qpos is None:
# differences += [(counter, 'D')]
# elif ref_base.islower():
# differences += [(qpos, query_seq[qpos], ref_base)]
# except ValueError as ve:
# # probably lacked an MD string
# pass
try:
results["id"] += [read.query_name]
results["from"] += [int(read.reference_start + chr_offset)]
results["to"] += [int(read.reference_end + chr_offset)]
results["chrName"] += [read.reference_name]
results["chrOffset"] += [chr_offset]
results["cigar"] += [read.cigarstring]
except:
raise
try:
results["md"] += [read.get_tag("MD")]
except KeyError:
results["md"] += [""]
continue
return results
def tileset_info(filename):
"""
Get the tileset info for a bam file
Parameters
----------
tileset: tilesets.models.Tileset object
The tileset that the tile ids should be retrieved from
Returns
-------
tileset_info: {'min_pos': [],
'max_pos': [],
'tile_size': 1024,
'max_zoom': 7
}
"""
samfile = pysam.AlignmentFile(filename)
total_length = sum(samfile.lengths)
references = np.array(samfile.references)
lengths = np.array(samfile.lengths)
ref_lengths = dict(zip(references, lengths))
lengths = [ref_lengths[r] for r in references]
tile_size = 256
max_zoom = math.ceil(math.log(total_length / tile_size) / math.log(2))
tileset_info = {
"min_pos": [0],
"max_pos": [total_length],
"max_width": tile_size * 2 ** max_zoom,
"tile_size": tile_size,
"chromsizes": list(zip(references, [int(l) for l in lengths])),
"max_zoom": max_zoom,
}
return tileset_info
def tiles(filename, tile_ids, index_filename=None, max_tile_width=None):
"""
Generate tiles from a bigwig file.
Parameters
----------
tileset: tilesets.models.Tileset object
The tileset that the tile ids should be retrieved from
tile_ids: [str,...]
A list of tile_ids (e.g. xyx.0.0) identifying the tiles
to be retrieved
index_filename: str
The name of the file containing the index
max_tile_width: int
How wide can each tile be before we return no data. This
can be used to limit the amount of data returned.
Returns
-------
tile_list: [(tile_id, tile_data),...]
A list of tile_id, tile_data tuples
"""
generated_tiles = []
tsinfo = tileset_info(filename)
samfile = pysam.AlignmentFile(filename, index_filename=index_filename)
for tile_id in tile_ids:
tile_id_parts = tile_id.split("|")[0].split(".")
tile_position = list(map(int, tile_id_parts[1:3]))
tile_width = tsinfo["max_width"] / 2 ** int(tile_position[0])
if max_tile_width and tile_width >= max_tile_width:
# this tile is larger than the max allowed
return [
(
tile_id,
{
"error": f"Tile too large, no data returned. Max tile size: {max_tile_width}"
},
)
]
else:
start_pos = int(tile_position[1]) * tile_width
end_pos = start_pos + tile_width
tile_value = load_reads(samfile, start_pos=start_pos, end_pos=end_pos)
generated_tiles += [(tile_id, tile_value)]
return generated_tiles
| hms-dbmi/clodius | clodius/tiles/bam.py | Python | mit | 6,802 | [
"pysam"
] | af643255900873b9b0ff009cb3fbeb31dfe8c2c60783a887cadd02dbacf381d8 |
#!/usr/bin/env python
"""
Print list of users with proxies.
Example:
$ dirac-admin-users-with-proxy
* vhamar
DN : /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar
group : dirac_admin
not after : 2011-06-29 12:04:25
persistent : False
-
DN : /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar
group : dirac_pilot
not after : 2011-06-29 12:04:27
persistent : False
-
DN : /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar
group : dirac_user
not after : 2011-06-29 12:04:30
persistent : True
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
__RCSID__ = "$Id$"
class Params(object):
limited = False
proxyPath = False
proxyLifeTime = 3600
def setProxyLifeTime(self, arg):
try:
fields = [f.strip() for f in arg.split(":")]
self.proxyLifeTime = int(fields[0]) * 3600 + int(fields[1]) * 60
except Exception:
print("Can't parse %s time! Is it a HH:MM?" % arg)
return DIRAC.S_ERROR("Can't parse time argument")
return DIRAC.S_OK()
def registerCLISwitches(self):
Script.registerSwitch("v:", "valid=", "Required HH:MM for the users", self.setProxyLifeTime)
@DIRACScript()
def main():
params = Params()
params.registerCLISwitches()
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
result = gProxyManager.getDBContents()
if not result['OK']:
print("Can't retrieve list of users: %s" % result['Message'])
DIRAC.exit(1)
keys = result['Value']['ParameterNames']
records = result['Value']['Records']
dataDict = {}
now = Time.dateTime()
for record in records:
expirationDate = record[3]
dt = expirationDate - now
secsLeft = dt.days * 86400 + dt.seconds
if secsLeft > params.proxyLifeTime:
userName, userDN, userGroup, _, persistent = record
if userName not in dataDict:
dataDict[userName] = []
dataDict[userName].append((userDN, userGroup, expirationDate, persistent))
for userName in dataDict:
print("* %s" % userName)
for iP in range(len(dataDict[userName])):
data = dataDict[userName][iP]
print(" DN : %s" % data[0])
print(" group : %s" % data[1])
print(" not after : %s" % Time.toString(data[2]))
print(" persistent : %s" % data[3])
if iP < len(dataDict[userName]) - 1:
print(" -")
DIRAC.exit(0)
if __name__ == "__main__":
main()
| yujikato/DIRAC | src/DIRAC/FrameworkSystem/scripts/dirac_admin_users_with_proxy.py | Python | gpl-3.0 | 2,701 | [
"DIRAC"
] | 080239ae148250dfa7f72e3d9cd3617e8bed7cf4a558c03391bbabf51b8e03bd |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from collections import deque
import logging
import os
import re
import nltk
nltk3 = nltk.__version__.startswith('3')
import commoncode
from textcode import analysis
from cluecode import copyrights_hint
logger = logging.getLogger(__name__)
if os.environ.get('SC_COPYRIGHT_DEBUG'):
import sys
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logger.setLevel(logging.DEBUG)
"""
Detect and collect copyright statements.
The process consists in:
- prepare and cleanup text
- identify regions of text that may contain copyright (using hints)
- tag the text for parts-of-speech (POS) to identify various copyright
statements parts such as dates, names ("named entities"), etc. This is done
using NLTK POS tagging
- feed the tagged text to a parsing grammar describing actual copyright
statements
- yield copyright statements, years, holder and authors with start and end line
from the parse tree, eventually performing some minor cleanups.
"""
def detect_copyrights(location):
"""
Yield tuples of:
(copyrights list, authors list, years list, holders list, start line, end line)
detected in file at location.
"""
detector = CopyrightDetector()
for numbered_lines in candidate_lines(analysis.text_lines(location)):
detected = detector.detect(numbered_lines)
cp, auth, yr, hold, _start, _end = detected
if any([cp, auth, yr, hold]):
yield detected
def detect(location):
"""
Return lists of detected copyrights, authors, years and holders
in file at location.
Deprecated legacy entry point.
"""
copyrights = []
authors = []
years = []
holders = []
for cp, auth, yr, hold, _start, _end in detect_copyrights(location):
copyrights.extend(cp)
authors.extend(auth)
years.extend(yr)
holders.extend(hold)
return copyrights, authors, years, holders
# FIXME: multi-tokens patterns are likely not behaving as expected
# FIXME: patterns could be greatly simplified
patterns = [
# TODO: this needs to be simplified:
# TODO: in NLTK 3.0 this will fail because of this bug:
# https://github.com/nltk/nltk/issues/1025
# JUNK are things to ignore
# All Rights Reserved. should be a terminator/delimiter.
(r'^([Aa]ll [Rr]ights? [Rr]eserved|ALL RIGHTS? RESERVED|[Aa]ll|ALL)$', 'JUNK'),
(r'^([Rr]eserved|RESERVED)[,]?$', 'JUNK'),
# found in crypto certificates and LDAP
(r'^(O=|OU=|OU|XML)$', 'JUNK'),
(r'^(Parser|Dual|Crypto|NO|PART|[Oo]riginall?y?|[Rr]epresentations?\.?)$', 'JUNK'),
(r'^(Refer|Apt|Agreement|Usage|Please|Based|Upstream|Files?|Filename:?|Description:?|Holder?s|HOLDER?S|[Pp]rocedures?|You|Everyone)$', 'JUNK'),
(r'^(Rights?|Unless|rant|Subject|Acknowledgements?|Special)$', 'JUNK'),
(r'^(Derivative|Work|[Ll]icensable|[Ss]ince|[Ll]icen[cs]e[\.d]?|[Ll]icen[cs]ors?|under|COPYING)$', 'JUNK'),
(r'^(TCK|Use|[Rr]estrictions?|[Ii]ntroduction)$', 'JUNK'),
(r'^([Ii]ncludes?|[Vv]oluntary|[Cc]ontributions?|[Mm]odifications?)$', 'JUNK'),
(r'^(CONTRIBUTORS?|OTHERS?|Contributors?\:)$', 'JUNK'),
(r'^(Company:|For|File|Last|[Rr]eleased?|[Cc]opyrighting)$', 'JUNK'),
(r'^Authori.*$', 'JUNK'),
(r'^[Bb]uild$', 'JUNK'),
#
(r'^Copyleft|LegalCopyright|AssemblyCopyright|Distributed$', 'JUNK'),
# Bare C char is COPYRIGHT SIGN
# (r'^C$', 'COPY'),
# exceptions to composed proper nouns, mostly debian copyright-related
# FIXME: may be lowercase instead?
(r'^(Title:?|Debianized-By:?|Upstream-Maintainer:?|Content-MD5)$', 'JUNK'),
(r'^(Upstream-Author:?|Packaged-By:?)$', 'JUNK'),
# NOT a copyright symbol (ie. "copyrighted."): treat as NN
(r'^[Cc](opyright(s|ed)?|OPYRIGHT(S|ED))\.$', 'NN'),
# copyright word or symbol
# note the leading @ .... this may be a source of problems
(r'.?(@?([Cc]opyright)s?:?|[(][Cc][)]|(COPYRIGHT)S?:?)', 'COPY'),
# copyright in markup, until we strip markup: apache'>Copyright
(r'[A-Za-z0-9]+[\'">]+[Cc]opyright', 'COPY'),
# company suffix
(r'^([Ii]nc[.]?|[I]ncorporated|[Cc]ompany|Limited|LIMITED).?$', 'COMP'),
# company suffix
(r'^(INC(ORPORATED|[.])?|CORP(ORATION|[.])?|FOUNDATION|GROUP|COMPANY|[(]tm[)]).?$|[Ff]orum.?', 'COMP'),
# company suffix
(r'^([cC]orp(oration|[.])?|[fF]oundation|[Aa]lliance|Working|[Gg]roup|[Tt]echnolog(y|ies)|[Cc]ommunit(y|ies)|[Mm]icrosystems.?|[Pp]roject|[Tt]eams?|[Tt]ech).?$', 'COMP'),
# company suffix : LLC, LTD, LLP followed by one extra char
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.,$', 'COMP'),
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.?,?$', 'COMP'),
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.$', 'COMP'),
# company suffix : SA, SAS, AG, AB, AS, CO, labs followed by a dot
(r'^(S\.?A\.?S?|Sas|sas|A[GBS]|Labs?|[Cc][Oo]\.|Research|INRIA).?$', 'COMP'),
# (german) company suffix
(r'^[Gg][Mm][Bb][Hh].?$', 'COMP'),
# university
(r'^[Uu]niv([.]|ersit(y|e|at?|ad?))$', 'UNI'),
# institutes
(r'^[Ii]nstitut(s|o|os|e|es|et|a|at|as|u|i)?$', 'NNP'),
# "holders" is considered as a common noun
(r'^([Hh]olders?|HOLDERS?|[Rr]espective)$', 'NN'),
# (r'^[Cc]ontributors?\.?', 'NN'),
# "authors" or "contributors" is interesting, and so a tag of its own
(r'^[Aa]uthors?$', 'AUTH'),
(r'^[Aa]uthor\(s\)$', 'AUTH'),
(r'^[Cc]ontribut(ors?|ing)\.?$', 'AUTH'),
# commiters is interesting, and so a tag of its own
(r'[Cc]ommitters?', 'COMMIT'),
# same for maintainer, developed, etc...
(r'^(([Rr]e)?[Cc]oded|[Mm]odified|[Mm]ai?nt[ea]ine(d|r)|[Ww]ritten|[Dd]eveloped)$', 'AUTH2'),
# author
(r'@author', 'AUTH'),
# of
(r'^[Oo][Ff]|[Dd][Ee]$', 'OF'),
# in
(r'^in$', 'IN'),
# by
(r'^by$', 'BY'),
# conjunction: and
(r'^([Aa]nd|&)$', 'CC'),
# conjunction: or. Even though or is not conjunctive ....
# (r'^or$', 'CC'),
# conjunction: or. Even though or is not conjunctive ....
# (r'^,$', 'CC'),
# ie. in things like "Copyright (c) 2012 John Li and others"
(r'^others$', 'OTH'),
# in year ranges: dash, or 'to': "1990-1995", "1990/1995" or "1990 to 1995"
(r'^([-/]|to)$', 'DASH'),
# explicitly ignoring these words: FIXME: WHY?
(r'^([Tt]his|THIS|[Pp]ermissions?|PERMISSIONS?|All)$', 'NN'),
# in dutch/german names, like Marco van Basten, or Klemens von Metternich
# and Spanish/French Da Siva and De Gaulle
(r'^(([Vv][ao]n)|[Dd][aeu])$', 'VAN'),
# year
(r'^[(]?(19|20)[0-9]{2}((\s)*([,-]|to)(\s)*(19|20)?[0-9]{2})*[)]?', 'YR'),
# cardinal numbers
(r'^-?[0-9]+(.[0-9]+)?.?$', 'CD'),
# exceptions to proper nouns
(r'^(The|Commons|AUTHOR|software)$', 'NN'),
# composed proper nouns, ie. Jean-Claude or ST-Microelectronics
# FIXME: what about a variant with spaces around the dash?
(r'^[A-Z][a-zA-Z]*[-][A-Z]?[a-zA-Z]+.?$', 'NNP'),
# proper nouns with digits
(r'^[A-Z][a-z0-9]+.?$', 'NNP'),
# saxon genitive, ie. Philippe's
(r"^[A-Z][a-z]+[']s$", 'NNP'),
# dotted name, ie. P.
(r"^([A-Z][.]?|[A-Z]+[\.])$", 'PN'),
# proper noun with some separator and trailing comma
(r"^[A-Z]+[.][A-Z][a-z]+[,]?$", 'NNP'),
# proper noun with apostrophe ': D'Orleans, D'Arcy, T'so, Ts'o
(r"^[A-Z][[a-z]?['][A-Z]?[a-z]+[,.]?$", 'NNP'),
# proper noun with apostrophe ': d'Itri
(r"^[a-z]['][A-Z]?[a-z]+[,\.]?$", 'NNP'),
# all CAPS word, at least 1 char long such as MIT, including an optional trailing comma or dot
(r'^[A-Z0-9]+[,]?$', 'CAPS'),
# all caps word 3 chars and more, enclosed in parens
(r'^\([A-Z0-9]{2,}\)$', 'CAPS'),
# proper noun:first CAP, including optional trailing comma
(r'^[A-Z][a-zA-Z0-9]+[,]?$', 'NNP'),
# email
(r'[a-zA-Z0-9\+_\-\.\%]+@[a-zA-Z0-9][a-zA-Z0-9\+_\-\.\%]*\.[a-zA-Z]{2,5}?', 'EMAIL'),
# email eventually in parens or brackets. The closing > or ) is optional
(r'[\<\(][a-zA-Z0-9\+_\-\.\%]+@[a-zA-Z0-9][a-zA-Z0-9\+_\-\.\%]*\.[a-zA-Z]{2,5}?[\>\)]?', 'EMAIL'),
# URLS such as ibm.com
# TODO: add more extensions?
(r'<?a?.(href)?.[a-z0-9A-Z\-\.\_]+\.(com|net|info|org|us|io|edu|co\.[a-z][a-z]|eu|biz)', 'URL'),
# derived from regex in cluecode.finder
(r'<?a?.(href)?.('
r'(?:http|ftp|sftp)s?://[^\s<>\[\]"]+'
r'|(?:www|ftp)\.[^\s<>\[\]"]+'
r')', 'URL'),
# AT&T (the company), needed special handling
(r'^AT&T$', 'ATT'),
# comma as a conjunction
(r'^,$', 'CC'),
# .\ is not a noun
(r'^\.\\$', 'JUNK'),
# nouns (default)
(r'.+', 'NN'),
]
# Comments in the Grammar are lines that start with #
grammar = """
COPY: {<COPY>}
YR-RANGE: {<YR>+ <CC> <YR>}
YR-RANGE: {<YR> <DASH>* <YR|CD>+}
YR-RANGE: {<CD>? <YR>+}
YR-RANGE: {<YR>+ }
NAME: {<NNP> <VAN|OF> <NN*> <NNP>}
NAME: {<NNP> <PN> <VAN> <NNP>}
# the Regents of the University of California
COMPANY: {<BY>? <NN> <NNP> <OF> <NN> <UNI> <OF> <COMPANY|NAME|NAME2|NAME3><COMP>?}
# "And" some name
ANDCO: {<CC>+ <NN> <NNP>+<UNI|COMP>?}
ANDCO: {<CC>+ <NNP> <NNP>+<UNI|COMP>?}
ANDCO: {<CC>+ <COMPANY|NAME|NAME2|NAME3>+<UNI|COMP>?}
COMPANY: {<COMPANY|NAME|NAME2|NAME3> <ANDCO>+}
# rare "Software in the public interest, Inc."
COMPANY: {<COMP> <CD> <COMP>}
COMPANY: {<NNP> <IN><NN> <NNP> <NNP>+<COMP>?}
COMPANY: {<NNP> <CC> <NNP> <COMP>}
COMPANY: {<NNP|CAPS> <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <COMP> <COMP>?}
COMPANY: {<UNI|NNP> <VAN|OF> <NNP>+ <UNI>?}
COMPANY: {<NNP>+ <UNI>}
COMPANY: {<COMPANY> <CC> <COMPANY>}
COMPANY: {<ATT> <COMP>?}
COMPANY: {<COMPANY> <CC> <NNP>}
# Group 42, Inc
# Typical names
NAME: {<NNP|PN>+ <NNP>+}
NAME: {<NNP> <PN>? <NNP>+}
NAME: {<NNP> <NNP>}
NAME: {<NNP> <NN> <EMAIL>}
NAME: {<NNP> <PN|VAN>? <PN|VAN>? <NNP>}
NAME: {<NNP> <NN> <NNP>}
NAME: {<NNP> <COMMIT>}
NAME: {<NN> <NNP> <ANDCO>}
NAME: {<NN>? <NNP> <CC> <NAME>}
NAME: {<NN>? <NNP> <OF> <NN>? <NNP> <NNP>?}
NAME: {<NAME> <CC> <NAME>}
COMPANY: {<NNP> <IN> <NN>? <COMPANY>}
NAME2: {<NAME> <EMAIL>}
NAME3: {<YR-RANGE> <NAME2|COMPANY>+}
NAME: {<NAME|NAME2>+ <OF> <NNP> <OF> <NN>? <COMPANY>}
NAME: {<NAME|NAME2>+ <CC|OF>? <NAME|NAME2|COMPANY>}
NAME3: {<YR-RANGE> <NAME>+}
NAME: {<NNP> <OF> <NNP>}
NAME: {<NAME> <NNP>}
NAME: {<NN|NNP|CAPS>+ <CC> <OTH>}
NAME: {<NNP> <CAPS>}
NAME: {<CAPS> <DASH>? <NNP|NAME>}
NAME: {<NNP> <CD> <NNP>}
NAME: {<COMP> <NAME>+}
NAME: {<NNP|CAPS>+ <AUTH>}
# Companies
COMPANY: {<NAME|NAME2|NAME3|NNP>+ <OF> <NN>? <COMPANY|COMP>}
COMPANY: {<NNP> <COMP> <COMP>}
COMPANY: {<NN>? <COMPANY|NAME|NAME2> <CC> <COMPANY|NAME|NAME2>}
COMPANY: {<COMP|NNP> <NN> <COMPANY> <NNP>+}
COMPANY: {<COMPANY> <CC> <AUTH>}
COMPANY: {<NN> <COMP>+}
COMPANY: {<URL>}
# Trailing Authors
COMPANY: {<NAME|NAME2|NAME3|NNP>+ <AUTH>}
# "And" some name
ANDCO: {<CC> <NNP> <NNP>+}
ANDCO: {<CC> <COMPANY|NAME|NAME2|NAME3>+}
COMPANY: {<COMPANY|NAME|NAME2|NAME3> <ANDCO>+}
NAME: {<NNP> <ANDCO>+}
NAME: {<BY> <NN> <AUTH>}
# Various forms of copyright statements
COPYRIGHT: {<COPY> <NAME> <COPY> <YR-RANGE>}
COPYRIGHT: {<COPY> <COPY> <BY>? <COMPANY|NAME*|YR-RANGE>* <BY>? <EMAIL>+}
COPYRIGHT: {<COPY> <BY>? <COMPANY|NAME*|YR-RANGE>* <BY>? <EMAIL>+}
COPYRIGHT: {<COPY> <COPY> <NAME|NAME2|NAME3> <CAPS> <YR-RANGE>}
COPYRIGHT: {<COPY> <NAME|NAME2|NAME3> <CAPS> <YR-RANGE>}
COPYRIGHT: {<COPY> <COPY> <NAME|NAME2|NAME3>+ <YR-RANGE>*}
COPYRIGHT: {<COPY> <NAME|NAME2|NAME3>+ <YR-RANGE>*}
COPYRIGHT: {<COPY> <COPY> <CAPS|NNP>+ <CC> <NN> <COPY> <YR-RANGE>?}
COPYRIGHT: {<COPY> <CAPS|NNP>+ <CC> <NN> <COPY> <YR-RANGE>?}
COPYRIGHT: {<COPY> <COPY> <BY>? <COMPANY|NAME*>+ <YR-RANGE>*}
COPYRIGHT: {<COPY> <BY>? <COMPANY|NAME*>+ <YR-RANGE>*}
COPYRIGHT: {<NNP>? <COPY> <COPY> (<YR-RANGE>+ <BY>? <NN>? <COMPANY|NAME|NAME2>+ <EMAIL>?)+}
COPYRIGHT: {<NNP>? <COPY> (<YR-RANGE>+ <BY>? <NN>? <COMPANY|NAME|NAME2>+ <EMAIL>?)+}
COPYRIGHT: {<COPY> <COPY> <NN> <NAME> <YR-RANGE>}
COPYRIGHT: {<COPY> <NN> <NAME> <YR-RANGE>}
COPYRIGHT: {<COPY> <COPY> <COMP>+}
COPYRIGHT: {<COPY> <COPY> <NN>+ <COMPANY|NAME|NAME2>+}
COPYRIGHT: {<COPY> <COPY> <NN> <NN>? <COMP> <YR-RANGE>?}
COPYRIGHT: {<COPY> <NN> <NN>? <COMP> <YR-RANGE>?}
COPYRIGHT: {<COPY> <COPY> <NN> <NN>? <COMP> <YR-RANGE>?}
COPYRIGHT: {<COPY> <NN> <NN>? <COMPANY> <YR-RANGE>?}
COPYRIGHT: {<COPY> <COPY> <YR-RANGE|NNP> <CAPS|BY>? <NNP|YR-RANGE|NAME>+}
COPYRIGHT: {<COPY> <YR-RANGE|NNP> <CAPS|BY>? <NNP|YR-RANGE|NAME>+}
COPYRIGHT: {<COPY> <COPY> <NNP>+}
# Copyright (c) 1995, 1996 The President and Fellows of Harvard University
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <NNP> <ANDCO>}
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <AUTH>}
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>}
COPYRIGHT2: {<COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>}
COPYRIGHT2: {<COPY> <COPY><NN>? <COPY> <YR-RANGE> <BY> <NN>}
COPYRIGHT2: {<COPY> <NN>? <COPY> <YR-RANGE> <BY> <NN>}
COPYRIGHT2: {<COPY> <COPY><NN> <YR-RANGE> <BY> <NAME>}
COPYRIGHT2: {<COPY> <NN> <YR-RANGE> <BY> <NAME>}
COPYRIGHT2: {<COPY> <COPY><YR-RANGE> <DASH> <NAME2|NAME>}
COPYRIGHT2: {<COPY> <YR-RANGE> <DASH> <NAME2|NAME>}
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NNP> <NAME>}
COPYRIGHT2: {<COPY> <YR-RANGE> <NNP> <NAME>}
COPYRIGHT2: {<NAME> <COPY> <YR-RANGE>}
COPYRIGHT2: {<COPY> <COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>*}
COPYRIGHT2: {<COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>*}
COPYRIGHT2: {<COPY> <COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <COMPANY>}
COPYRIGHT2: {<COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <COMPANY>}
COPYRIGHT2: {<COPY> <COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <DASH> <COMPANY>}
COPYRIGHT2: {<COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <DASH> <COMPANY>}
COPYRIGHT2: {<NNP|NAME|COMPANY> <COPYRIGHT2>}
COPYRIGHT: {<COPYRIGHT> <NN> <COMPANY>}
COPYRIGHT: {<COPY> <COPY> <BY>? <NN> <COMPANY>}
COPYRIGHT: {<COPY> <BY>? <NN> <COMPANY>}
COPYRIGHT: {<COMPANY> <NN> <NAME> <COPYRIGHT2>}
COPYRIGHT: {<COPYRIGHT2> <COMP> <COMPANY>}
COPYRIGHT: {<COMPANY> <NN> <COPYRIGHT2>}
COPYRIGHT: {<COPYRIGHT2> <NNP> <CC> <COMPANY>}
# copyrights in the style of Scilab/INRIA
COPYRIGHT: {<NNP> <NN> <COPY> <NNP>}
COPYRIGHT: {<NNP> <COPY> <NNP>}
# Authors
AUTH: {<AUTH2>+ <BY>}
AUTHOR: {<AUTH>+ <NN>? <COMPANY|NAME|YR-RANGE>* <BY>? <EMAIL>+}
AUTHOR: {<AUTH>+ <NN>? <COMPANY|NAME|NAME2>+ <YR-RANGE>*}
AUTHOR: {<AUTH>+ <YR-RANGE>+ <BY>? <COMPANY|NAME|NAME2>+}
AUTHOR: {<AUTH>+ <YR-RANGE|NNP> <NNP|YR-RANGE>+}
AUTHOR: {<AUTH>+ <NN|CAPS>? <YR-RANGE>+}
AUTHOR: {<COMPANY|NAME|NAME2>+ <AUTH>+ <YR-RANGE>+}
AUTHOR: {<YR-RANGE> <NAME|NAME2>+}
AUTHOR: {<NAME2>+}
AUTHOR: {<AUTHOR> <CC> <NN>? <AUTH>}
AUTHOR: {<BY> <EMAIL>}
ANDAUTH: {<CC> <AUTH|NAME>+}
AUTHOR: {<AUTHOR> <ANDAUTH>+}
# Compounded statements usings authors
# found in some rare cases with a long list of authors.
COPYRIGHT: {<COPY> <BY> <AUTHOR>+ <YR-RANGE>*}
COPYRIGHT: {<AUTHOR> <COPYRIGHT2>}
COPYRIGHT: {<AUTHOR> <YR-RANGE>}
"""
def strip_numbers(s):
"""
Return a string removing words made only of numbers. If there is an
exception or s is not a string, return s as-is.
"""
if s:
s = u' '.join([x for x in s.split(' ') if not x.isdigit()])
return s
def strip_some_punct(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s:
s = s.strip(''','"}:;''')
s = s.lstrip(')')
s = s.rstrip('&(-_')
return s
def fix_trailing_space_dot(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s and s.endswith(' .'):
s = s[:-2] + '.'
return s
def strip_unbalanced_parens(s, parens='()'):
"""
Return a string where unbalanced parenthesis are replaced with a space.
`paren` is a pair of characters to balance such as (), <>, [] , {}.
For instance:
>>> strip_unbalanced_parens('This is a super string', '()')
'This is a super string'
>>> strip_unbalanced_parens('This is a super(c) string', '()')
'This is a super(c) string'
>>> strip_unbalanced_parens('This ((is a super(c) string))', '()')
'This ((is a super(c) string))'
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens(u'This )(is a super(c) string)(', '()')
u'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )((is a super(c) string)((', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This ) is', '()')
'This is'
>>> strip_unbalanced_parens('This ( is', '()')
'This is'
>>> strip_unbalanced_parens('This )) is', '()')
'This is'
>>> strip_unbalanced_parens('This (( is', '()')
'This is'
>>> strip_unbalanced_parens('(', '()')
' '
>>> strip_unbalanced_parens(')', '()')
' '
"""
start, end = parens
if not start in s and not end in s:
return s
unbalanced = []
stack = []
for i, c in enumerate(s):
if c == start:
stack.append((i, c,))
elif c == end:
try:
stack.pop()
except IndexError:
unbalanced.append((i, c,))
unbalanced.extend(stack)
pos_to_del = set([i for i, c in unbalanced])
cleaned = [c if i not in pos_to_del else ' ' for i, c in enumerate(s)]
return type(s)('').join(cleaned)
def refine_copyright(c):
"""
Refine a detected copyright string.
FIXME: the grammar should not allow this to happen.
"""
c = strip_some_punct(c)
c = fix_trailing_space_dot(c)
c = strip_unbalanced_parens(c, '()')
c = strip_unbalanced_parens(c, '<>')
c = strip_unbalanced_parens(c, '[]')
c = strip_unbalanced_parens(c, '{}')
# FIXME: this should be in the grammar, but is hard to get there right
# these are often artifacts of markup
c = c.replace('Copyright Copyright', 'Copyright')
c = c.replace('Copyright copyright', 'Copyright')
c = c.replace('copyright copyright', 'Copyright')
c = c.replace('copyright Copyright', 'Copyright')
c = c.replace('copyright\'Copyright', 'Copyright')
c = c.replace('copyright"Copyright', 'Copyright')
c = c.replace('copyright\' Copyright', 'Copyright')
c = c.replace('copyright" Copyright', 'Copyright')
s = c.split()
# fix traliing garbage, captured by the grammar
if s[-1] in ('Parts', 'Any',):
s = s[:-1]
# this is hard to catch otherwise, unless we split the author
# vs copyright grammar in two. Note that AUTHOR and Authors should be kept
if s[-1] == 'Author':
s = s[:-1]
s = u' '.join(s)
return s.strip()
def refine_author(c):
"""
Refine a detected author
FIXME: the grammar should not allow this to happen.
"""
c = strip_some_punct(c)
c = strip_numbers(c)
c = strip_unbalanced_parens(c, '()')
c = strip_unbalanced_parens(c, '<>')
c = strip_unbalanced_parens(c, '[]')
c = strip_unbalanced_parens(c, '{}')
c = c.split()
# this is hard to catch otherwise, unless we split the author vs copyright grammar in two
if c[0].lower() == 'author':
c = c[1:]
c = u' '.join(c)
return c.strip()
def refine_date(c):
"""
Refine a detected date or date range.
FIXME: the grammar should not allow this to happen.
"""
c = strip_some_punct(c)
return c
def is_junk(c):
"""
Return True if string `c` is a junk copyright that cannot be resolved
otherwise by the parsing.
It would be best not to have to resort to this, but this is practical.
"""
junk = set([
'copyrighted by their authors',
'copyrighted by their authors.',
'copyright holder or other authorized',
'copyright holder who authorizes',
'copyright holder has authorized',
'copyright holder nor the author',
'copyright holder(s) or the author(s)',
'copyright owner or entity authorized',
'copyright owner or contributors',
'copyright for a new language file should be exclusivly the authors',
'copyright holder or said author',
'copyright holder, or any author',
'copyrighted material, only this license, or another one contracted with the authors',
'copyright notices, authorship',
'copyright holder means the original author(s)',
"copyright notice. timevar.def's author",
"copyright holder or simply that it is author-maintained'.",
"copyright holder or simply that is author-maintained'.",
'(c) if you bring a patent claim against any contributor',
'copyright-check writable-files m4-check author_mark_check',
# 'copyrighting it yourself or claiming authorship'
])
return c.lower() in junk
class CopyrightDetector(object):
"""
Class to detect copyrights and authorship.
"""
def __init__(self):
self.tagger = nltk.RegexpTagger(patterns)
self.chunker = nltk.RegexpParser(grammar)
@staticmethod
def as_str(node):
"""
Return a parse tree node as a space-normalized string.
"""
node_string = ' '.join(k for k, _ in node.leaves())
return u' '.join(node_string.split())
def detect(self, numbered_lines):
"""
Return a sequence of tuples (copyrights, authors, years, holders)
detected in a sequence of numbered line tuples.
"""
numbered_lines = list(numbered_lines)
numbers = [n for n, _l in numbered_lines]
start_line = min(numbers)
end_line = max(numbers)
logger.debug('CopyrightDetector:detect:lines numbers: %(start_line)d->%(end_line)d' % locals())
tokens = self.get_tokens(numbered_lines)
# we accumulate detected items in these synchronized lists
# this could be a single list of namedtuples
# or a list of dicts instead
copyrights, authors, years, holders = [], [], [], []
if not tokens:
return copyrights, authors, years, holders, None, None
# first, POS tag each token using token regexes
tagged_text = self.tagger.tag(tokens)
logger.debug('CopyrightDetector:tagged_text: ' + str(tagged_text))
# then build a parse tree based on tagged tokens
tree = self.chunker.parse(tagged_text)
logger.debug('CopyrightDetector:parse tree: ' + str(tree))
def collect_year_and_holder(detected_copyright):
"""
Walk the a parse sub-tree starting with the `detected_copyright`
node collecting all years and holders.
"""
for copyr in detected_copyright:
if isinstance(copyr, nltk.tree.Tree):
logger.debug('n: ' + str(copyr))
node_text = CopyrightDetector.as_str(copyr)
if 'YR-RANGE' in (copyr.label() if nltk3 else copyr.node):
years.append(refine_date(node_text))
elif ('NAME' == (copyr.label() if nltk3 else copyr.node)
or 'COMPANY' in (copyr.label() if nltk3 else copyr.node)):
# FIXME : this would wreck things like 23andme
# where a company name contains numbers
holders.append(refine_author(node_text))
logger.debug('CopyrightDetector: node_text: ' + node_text)
collect_year_and_holder(copyr)
# then walk the parse tree, collecting copyrights, years and authors
for tree_node in tree:
if isinstance(tree_node, nltk.tree.Tree):
node_text = CopyrightDetector.as_str(tree_node)
if 'COPYRIGHT' in (tree_node.label() if nltk3 else tree_node.node):
if node_text and node_text.strip():
refined = refine_copyright(node_text)
if not is_junk(refined):
copyrights.append(refined)
collect_year_and_holder(tree_node)
elif (tree_node.label() if nltk3 else tree_node.node) == 'AUTHOR':
authors.append(refine_author(node_text))
return copyrights, authors, years, holders, start_line, end_line
def get_tokens(self, numbered_lines):
"""
Return an iterable of tokens from lines of text.
"""
tokens = []
# simple tokenization: spaces and some punctuation
splitter = re.compile('[\\t =;]+')
for _line_number, line in numbered_lines:
line = line.strip()
if line:
line = prepare_text_line(line)
if line :
line = strip_markup(line)
if line and line.strip():
for tok in splitter.split(line):
# strip trailing quotes and ignore empties
tok = tok.strip("' ")
if not tok:
continue
# strip trailing colons: why?
tok = tok.rstrip(':').strip()
# strip leading @: : why?
tok = tok.lstrip('@').strip()
if tok and tok not in (':',):
tokens.append(tok)
logger.debug('CopyrightDetector:tokens: ' + repr(list(tokens)))
return tokens
def is_candidate(line):
"""
Return True if a line is a candidate line for copyright detection
"""
line = line.lower()
line = prepare_text_line(line)
return (has_content(line)
and any(s in line for s in copyrights_hint.statement_markers))
def has_content(line):
"""
Return True if a line has some content, ignoring white space, digit and
punctuation.
"""
return re.sub(r'\W+', '', line)
def is_all_rights_reserved(line):
"""
Return True if a line ends with "all rights reserved"-like statements.
"""
line = prepare_text_line(line)
# remove any non-character
line = re.sub(r'\W+', '', line)
line = line.strip()
line = line.lower()
return line.endswith(('rightreserved', 'rightsreserved'))
def candidate_lines(lines):
"""
Yield lists of candidate lines where each list element is a tuple of
(line number, line text).
A candidate line is a line of text that may contain copyright statements.
A few lines before and after a candidate line are also included.
"""
candidates = deque()
previous = None
# used as a state and line counter
in_copyright = 0
for line_number, line in enumerate(lines):
# the first line number is ONE, not zero
numbered_line = (line_number + 1, line)
if is_candidate(line):
# the state is now "in copyright"
in_copyright = 2
# we keep one line before a candidate line if any
if previous:
candidates.append(previous)
previous = None
# we keep the candidate line and yield if we reached the end
# of a statement
candidates.append(numbered_line)
if is_all_rights_reserved(line):
yield list(candidates)
candidates.clear()
in_copyright = 0
else:
if in_copyright:
# if the previous line was a candidate
# then we keep one line after that candidate line
if has_content(line):
candidates.append(numbered_line)
# and decrement our state
in_copyright -= 1
else:
if candidates:
yield list(candidates)
candidates.clear()
in_copyright = 0
else:
# if are neither a candidate line nor the line just after
# then we yield the accumulated lines if any
if candidates:
yield list(candidates)
candidates.clear()
# and we keep track of this line as "previous"
if has_content(line):
previous = numbered_line
else:
previous = None
# finally
if candidates:
yield list(candidates)
def strip_markup(text):
"""
Strip markup tags from text.
"""
html_tag_regex = re.compile(
r'<'
r'[(--)\?\!\%\/]?'
r'[a-zA-Z0-9#\"\=\s\.\;\:\%\&?!,\+\*\-_\/]+'
r'\/?>',
re.MULTILINE | re.UNICODE
)
if text:
text = re.sub(html_tag_regex, ' ', text)
return text
COMMON_WORDS = set([
'Unicode',
'Modified',
'NULL',
'FALSE', 'False',
'TRUE', 'True',
'Last',
'Predefined',
'If',
'Standard',
'Version', 'Versions',
'Package', 'PACKAGE',
'Powered',
'Licensed', 'License', 'License.' 'Licensee', 'License:', 'License-Alias:',
'Legal',
'Entity',
'Indemnification.',
'AS', 'IS',
'See',
'This',
'Java',
'DoubleClick',
'DOM', 'SAX', 'URL',
'Operating System',
'Original Software',
'Berkeley Software Distribution',
'Software Release', 'Release',
'IEEE Std',
'BSD',
'POSIX',
'Derivative Works',
'Intellij IDEA',
'README', 'NEWS',
'ChangeLog', 'CHANGElogger', 'Changelog',
'Redistribution',
])
def lowercase_well_known_word(text):
"""
Return text with certain words lowercased.
Rationale: some common words can start with a capital letter and be mistaken
for a named entity because capitalized words are often company names.
"""
lines = []
for line in text.splitlines(True):
words = []
for word in line.split():
if word in COMMON_WORDS:
word = word.lower()
words.append(word)
lines.append(' '.join(words))
return '\n'.join(lines)
# FIXME: instead of using functions, use plain re and let the re cache do its work
def IGNORED_PUNCTUATION_RE():
return re.compile(r'[*#"%\[\]\{\}`]+', re.I | re.M | re.U)
def ASCII_LINE_DECO_RE():
return re.compile(r'[-_=!\\*]{2,}')
def ASCII_LINE_DECO2_RE():
return re.compile(r'/{3,}')
def WHITESPACE_RE():
return re.compile(r' +')
def MULTIQUOTES_RE():
return re.compile(r"\'{2,}")
# TODO: add debian <s> </s> POS name taggings
def DEBIAN_COPYRIGHT_TAGS_RE():
return re.compile(r"(\<s\>|\<s\\/>)")
def prepare_text_line(line):
"""
Prepare a line of text for copyright detection.
"""
# FIXME: maintain the original character positions
# strip whitespace
line = line.strip()
# strip comment markers
# common comment characters
line = line.strip('\\/*#%;')
# un common comment line prefix in dos
line = re.sub('^rem ', ' ', line)
line = re.sub('^\@rem ', ' ', line)
# un common comment line prefix in autotools am/in
line = re.sub('^dnl ', ' ', line)
# un common comment line prefix in man pages
line = re.sub('^\.\\"', ' ', line)
# un common pipe chars in some ascii art
line = line.replace('|', ' ')
# normalize copyright signs and spacing aournd them
line = line.replace('(C)', ' (c) ')
line = line.replace('(c)', ' (c) ')
# the case of \251 is tested by 'weirdencoding.h'
line = line.replace(u'\251', u' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace(u'\xa9', ' (c) ')
# FIXME: what is \xc2???
line = line.replace(u'\xc2', '')
# TODO: add more HTML entities replacements
# see http://www.htmlhelp.com/reference/html40/entities/special.html
# convert html entities CR LF to space
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
# normalize (possibly repeated) quotes to unique single quote '
# backticks ` and "
line = line.replace(u'`', "'")
line = line.replace(u'"', "'")
line = re.sub(MULTIQUOTES_RE(), "'", line)
# quotes to space? but t'so will be wrecked
# line = line.replace(u"'", ' ')
# some trailing garbage ')
line = line.replace("')", ' ')
# note that we do not replace the debian tag by a space: we remove it
line = re.sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)
line = re.sub(IGNORED_PUNCTUATION_RE(), ' ', line)
# tabs to spaces
line = line.replace('\t', ' ')
# normalize spaces around commas
line = line.replace(' , ', ', ')
# remove ASCII "line decorations"
# such as in --- or === or !!! or *****
line = re.sub(ASCII_LINE_DECO_RE(), ' ', line)
line = re.sub(ASCII_LINE_DECO2_RE(), ' ', line)
# Replace escaped literal \0 \n \r \t that may exist as-is by a space
# such as in code literals: a="\\n some text"
line = line.replace('\\r', ' ')
line = line.replace('\\n', ' ')
line = line.replace('\\t', ' ')
line = line.replace('\\0', ' ')
# TODO: Why?
# replace contiguous spaces with only one occurrence
# line = re.sub(WHITESPACE_RE(), ' ', text)
# normalize to ascii text
line = commoncode.text.toascii(line)
# logger.debug("ascii_only_text: " + text)
# strip verbatim back slash and comment signs again at both ends of a line
# FIXME: this is done at the start of this function already
line = line.strip('\\/*#%;')
# normalize to use only LF as line endings so we can split correctly
# and keep line endings
line = commoncode.text.unixlinesep(line)
# why?
line = lowercase_well_known_word(line)
return line
| vinodpanicker/scancode-toolkit | src/cluecode/copyrights.py | Python | apache-2.0 | 35,651 | [
"VisIt"
] | f6d00d3b7289b3ff7743d3899db98c2f74efb57d8975198235b226ed16464dc2 |
# Disable the AST optimizer on this module
__fatoptimizer__ = {'enabled': False}
import ast
import fatoptimizer.convert_const
import fatoptimizer.namespace
import fatoptimizer.optimizer
import fatoptimizer.tools
import io
import re
import sys
from fatoptimizer.tools import UNSET
import textwrap
import unittest
from unittest import mock
if sys.version_info < (3, 5):
# RecursionError was introduced in Python 3.5
fatoptimizer.tools.RecursionError = RuntimeError
need_python35 = unittest.skipIf(sys.version_info < (3, 5), "need python 3.5")
if not hasattr(ast, 'Constant'):
# backport ast.Constant of the PEP 511
class Constant(ast.AST):
_attributes = ('lineno', 'col_offset')
_fields = ('value',)
def __init__(self, value):
self.value = value
ast.Constant = Constant
def format_code(code):
return textwrap.dedent(code).strip()
def compile_ast(source):
source = format_code(source)
return ast.parse(source, '<string>', 'exec')
def compile_ast_expr(source):
module = ast.parse(source, '<string>', 'exec')
assert isinstance(module, ast.Module)
body = module.body
assert len(body) == 1
expr = body[0]
assert isinstance(expr, ast.Expr)
return expr.value
def specialize_constant(node, value):
if value is None or isinstance(value, bool):
new_node = ast.NameConstant(value=value)
elif isinstance(value, (int, float, complex)):
new_node = ast.Num(n=value)
elif isinstance(value, str):
new_node = ast.Str(s=value)
elif isinstance(value, bytes):
new_node = ast.Bytes(s=value)
elif isinstance(value, tuple):
elts = [specialize_constant(node, elt) for elt in value]
new_node = ast.Tuple(elts=elts, ctx=ast.Load())
else:
raise ValueError("unknown constant: %r" % value)
fatoptimizer.tools.copy_lineno(node, new_node)
return new_node
def builtin_guards(*names):
args = ', '.join(map(repr, sorted(names)))
return '[__fat__.GuardBuiltins(%s)]' % (args,)
class SpecializeConstant(ast.NodeTransformer):
def visit_Constant(self, node):
if isinstance(node.value, frozenset):
return node
return specialize_constant(node, node.value)
class AstToolsTests(unittest.TestCase):
def test_get_starargs(self):
tree = compile_ast('func()')
node = fatoptimizer.tools.get_starargs(tree.body[0].value)
self.assertIsNone(node)
tree = compile_ast('func(arg, *varargs)')
node = fatoptimizer.tools.get_starargs(tree.body[0].value)
self.assertIsInstance(node, ast.Name)
self.assertEqual(node.id, 'varargs')
tree = compile_ast('func()')
with self.assertRaises(ValueError):
fatoptimizer.tools.get_starargs(tree)
def test_get_keywords(self):
tree = compile_ast('func()')
keywords = fatoptimizer.tools.get_keywords(tree.body[0].value)
self.assertFalse(keywords)
tree = compile_ast('func(x=1, y=2)')
keywords = fatoptimizer.tools.get_keywords(tree.body[0].value)
self.assertEqual(len(keywords), 2)
self.assertIsInstance(keywords[0], ast.keyword)
self.assertEqual(keywords[0].arg, 'x')
self.assertIsInstance(keywords[1], ast.keyword)
self.assertEqual(keywords[1].arg, 'y')
tree = compile_ast('func(arg, *varargs, **kwargs)')
keywords = fatoptimizer.tools.get_keywords(tree.body[0].value)
self.assertEqual(len(keywords), 1)
self.assertIsInstance(keywords[0], ast.keyword)
self.assertIsNone(keywords[0].arg)
tree = compile_ast('func()')
with self.assertRaises(ValueError):
fatoptimizer.tools.get_keywords(tree)
def test_get_varkeywords(self):
tree = compile_ast('func()')
keywords = fatoptimizer.tools.get_varkeywords(tree.body[0].value)
self.assertFalse(keywords)
tree = compile_ast('func(x=1, y=2)')
keywords = fatoptimizer.tools.get_varkeywords(tree.body[0].value)
self.assertFalse(keywords)
tree = compile_ast('func(arg, *varargs, **kwargs)')
varkwds = fatoptimizer.tools.get_varkeywords(tree.body[0].value)
self.assertIsInstance(varkwds, ast.Name)
self.assertEqual(varkwds.id, 'kwargs')
tree = compile_ast('func()')
with self.assertRaises(ValueError):
fatoptimizer.tools.get_varkeywords(tree)
class VariableVisitorTests(unittest.TestCase):
def check_vars(self, code, local_variables, global_variables=None,
nonlocal_variables=None,
get_node=None):
tree = compile_ast(code)
self.assertIsInstance(tree, ast.Module)
if get_node:
node = get_node(tree)
else:
node = tree.body[0]
visitor = fatoptimizer.namespace.VariableVisitor("<string>")
visitor.find_variables(node)
self.assertEqual(visitor.local_variables, local_variables)
if global_variables is not None:
self.assertEqual(visitor.global_variables, global_variables)
else:
self.assertEqual(visitor.global_variables, set())
if nonlocal_variables is not None:
self.assertEqual(visitor.nonlocal_variables, nonlocal_variables)
else:
self.assertEqual(visitor.nonlocal_variables, set())
def test_module(self):
code = """
global x
y = 1
"""
self.check_vars(code, {'y'}, {'x'}, get_node=lambda tree: tree)
def test_for(self):
code = """
def func(arg):
for x in arg:
pass
for y, z in arg:
pass
"""
self.check_vars(code, {'arg', 'x', 'y', 'z'})
def test_local(self):
code = """
x = 1
def func():
x = 2
"""
self.check_vars(code, {'x'}, get_node=lambda tree: tree.body[1])
def test_func_args(self):
code = """
def func(arg1, arg2, *varargs, **kwargs):
pass
"""
self.check_vars(code, {'arg1', 'arg2', 'varargs', 'kwargs'})
code = """
def func(*varargs):
pass
"""
self.check_vars(code, {'varargs'})
code = """
def func(**kw):
pass
"""
self.check_vars(code, {'kw'})
@need_python35
def test_nested(self):
code = """
def func(arg):
def func2(arg2):
var2 = arg2
async def afunc3(arg3):
var3 = arg3
var = [None for listcomp in range(3)]
var = {None: None for dictcomp in range(3)}
var = {None for setcomp in range(3)}
var = (None for genexp in range(3))
"""
self.check_vars(code, {'arg', 'func2', 'afunc3', 'var'})
def test_assign(self):
code = """
def func():
a, b = 1, 2
*c, d = (3, 4)
e.f = 5
g[:2] = [6, 7]
"""
self.check_vars(code, {'a', 'b', 'c', 'd'}, {'e', 'g'})
def test_assign_complex(self):
code = """
def func(arg):
first, *obj.attr[0], last = arg
obj.attr[0].attr2[1] = arg
"""
self.check_vars(code, {'arg', 'first', 'last'}, {'obj'})
code = """
def func(arg):
obj.meth().y = arg
"""
self.check_vars(code, {'arg'}, {'obj'})
def test_modify_globals(self):
code = """
def set_global(key, arg):
globals()[key] = arg
"""
with self.assertRaises(fatoptimizer.namespace.ComplexAssignment):
self.check_vars(code, set())
code = """
def assign(checksig):
type(mock)._mock_check_sig = checksig
"""
with self.assertRaises(fatoptimizer.namespace.ComplexAssignment):
self.check_vars(code, set())
def test_global(self):
code = """
x = 1
def func1():
global x
x = 2
"""
self.check_vars(code, set(), {'x'}, get_node=lambda tree: tree.body[1])
def test_nonlocal(self):
code = """
def func1():
nonlocal x
x = 2
"""
self.check_vars(code, set(), nonlocal_variables={'x'})
def test_late_global(self):
code = """
def func1():
x = 6
global x
"""
self.check_vars(code, set(), {'x'})
def test_function_def(self):
code = """
def func():
x = 1
def nested():
pass
"""
self.check_vars(code, {'x', 'nested'})
def test_import(self):
code = """
def func():
from sys import flags
from sys import ps1 as PS1
import os
import subprocess, email
"""
self.check_vars(code, {'flags', 'PS1', 'os', 'subprocess', 'email'})
def test_with(self):
code = """
def func():
with open(name1) as fp1, open(name2) as fp2, open(name3):
pass
with cm() as (a, b):
pass
with cm() as self.attr:
pass
"""
self.check_vars(code, {'fp1', 'fp2', 'a', 'b'}, {'self'})
code = """
obj = object()
def func():
global obj
with cm() as obj.attr:
pass
"""
self.check_vars(code, set(), {'obj'}, get_node=lambda tree: tree.body[1])
def test_augassign(self):
code = """
def func():
# artificial example, it raises UnboundLocalError
x += 1
"""
self.check_vars(code, {'x'})
def test_nested_func(self):
code = """
def func(self):
def func2():
self.attr = 1
"""
self.check_vars(code, set(), {'self'},
get_node=lambda tree: tree.body[0].body[0])
class BaseAstTests(unittest.TestCase):
maxDiff = 15000
def setUp(self):
if hasattr(sys, 'ast_transformers'):
# Disable the AST hook (if any)
old_transformers = sys.ast_transformers
self.addCleanup(setattr, sys, 'ast_transformers', old_transformers )
sys.ast_transformers = []
# Disable all optimizations by default
self.config = fatoptimizer.Config()
self.config.disable_all()
def assertAstEqual(self, tree1, tree2):
# ast objects don't support comparison,
# so compare their text representation
tree1 = SpecializeConstant().visit(tree1)
text1 = fatoptimizer.pretty_dump(tree1)
text2 = fatoptimizer.pretty_dump(tree2)
self.assertEqual(text1, text2)
def optimize(self, source):
tree = compile_ast(source)
return fatoptimizer.optimize(tree, "<string>", self.config)
def check_optimize(self, source1, source2):
tree1 = self.optimize(source1)
if isinstance(source2, ast.AST):
tree2 = ast.Module(body=[source2])
else:
tree2 = compile_ast(source2)
self.assertAstEqual(tree1, tree2)
def check_optimize_func(self, expr, result):
before = "def func(): return (%s)" % expr
tree1 = self.optimize(before)
if isinstance(result, ast.AST):
after = "def func(): return 0"
tree2 = compile_ast(after)
tree2.body[0].body[0].value = result
else:
after = "def func(): return (%s)" % result
tree2 = compile_ast(after)
self.assertAstEqual(tree1, tree2)
def check_dont_optimize(self, source, result=None):
if result is None:
result = source
self.check_optimize(source, result)
def check_dont_optimize_func(self, expr, result=None):
if result is None:
result = expr
self.check_optimize_func(expr, result)
def indent(self, source, level=1):
source = format_code(source)
indent = ' ' * level
return '\n'.join(indent + line for line in source.splitlines())
def format_specialize(self, before, specialized, guards,
template=None):
before = textwrap.dedent(before).strip()
specialized = textwrap.dedent(specialized).strip()
if not template:
template = """
{import_fat}
{code}
"""
template = format_code(template)
code1 = before
code2 = textwrap.dedent("""
import fat as __fat__
{before}
_ast_optimized = func
{specialized}
__fat__.specialize(_ast_optimized, func.__code__, {guards})
func = _ast_optimized
del _ast_optimized
""").strip()
code2 = code2.format(before=before,
specialized=specialized,
guards=guards)
return (code1, code2)
def check_specialize(self, *args, **kw):
code1, code2 = self.format_specialize(*args, **kw)
self.check_optimize(code1, code2)
def check_func_specialize(self, source, specialized, guards,
replace_consts='', template=None):
source = self.indent(source)
before = textwrap.dedent("""
def func():
{source}
""").strip()
before = before.format(source=source)
if isinstance(specialized, ast.AST):
specialized_ast = specialized
specialized = 'def func(): return 8421028141204'
else:
specialized_ast = None
specialized = self.indent(specialized)
specialized = "def func():\n" + specialized
if replace_consts:
specialized += ('\nfunc.__code__ = __fat__.replace_consts(func.__code__, %s)'
% replace_consts)
code1, code2 = self.format_specialize(before, specialized, guards, template=template)
tree1 = self.optimize(code1)
tree2 = compile_ast(code2)
if specialized_ast:
# import, def func, _ast_optimized = func, [def func]
node = tree2.body[3]
assert node.body[0].value.n == 8421028141204
node.body[:] = [specialized_ast]
self.assertAstEqual(tree1, tree2)
def check_builtin_func(self, func, source, specialized):
self.check_func_specialize(source, specialized,
guards=builtin_guards(func))
class FunctionsTests(BaseAstTests):
def test_get_constant(self):
def get_constant(source):
filename = "test"
tree = compile_ast_expr(source)
tree = fatoptimizer.convert_const.ConvertConstant(filename).visit(tree)
return fatoptimizer.tools.get_constant(tree)
self.assertEqual(get_constant('True'), True)
self.assertEqual(get_constant('False'), False)
self.assertEqual(get_constant('None'), None)
self.assertEqual(get_constant('1'), 1)
self.assertEqual(get_constant(r'"unicode \u20ac"'), "unicode \u20ac")
self.assertEqual(get_constant(r'b"bytes \xff"'), b"bytes \xff")
self.assertEqual(get_constant('(1, 2, 3)'), (1, 2, 3))
# unsupported types
self.assertIs(get_constant('[1, 2]'), UNSET)
self.assertIs(get_constant('{1, 2}'), UNSET)
self.assertIs(get_constant('{"key": "value"}'), UNSET)
def new_constant(self, value):
node = ast.Num(n=1, lineno=1, col_offset=1)
return fatoptimizer.tools._new_constant(node, value)
def test_new_constant_primitive(self):
self.assertAstEqual(self.new_constant(None),
compile_ast_expr('None'))
self.assertAstEqual(self.new_constant(False),
compile_ast_expr('False'))
self.assertAstEqual(self.new_constant(True),
compile_ast_expr('True'))
self.assertAstEqual(self.new_constant(2),
compile_ast_expr('2'))
self.assertAstEqual(self.new_constant(4.0),
compile_ast_expr('4.0'))
self.assertAstEqual(self.new_constant(4.0j),
compile_ast_expr('4.0j'))
self.assertAstEqual(self.new_constant("unicode \u20ac"),
compile_ast_expr(r'"unicode \u20ac"'))
self.assertAstEqual(self.new_constant(b"bytes \xff"),
compile_ast_expr(r'b"bytes \xff"'))
def test_new_constant_containers(self):
self.assertAstEqual(self.new_constant((1, 2)),
compile_ast_expr('(1, 2)'))
self.assertAstEqual(self.new_constant([1, 2]),
compile_ast_expr('[1, 2]'))
self.assertAstEqual(self.new_constant({"key": "value"}),
compile_ast_expr('{"key": "value"}'))
self.assertAstEqual(self.new_constant({"a": 1, "b": 2, "c": 3, "d": 4}),
compile_ast_expr(repr({"a": 1, "b": 2, "c": 3, "d": 4})))
class CallPureBuiltinTests(BaseAstTests):
def setUp(self):
super().setUp()
from fatoptimizer.builtins import add_pure_builtins
add_pure_builtins(self.config)
def test_builtin_abs(self):
self.check_builtin_func('abs',
'return abs(-3)',
'return 3')
def test_builtin_ascii(self):
self.check_builtin_func('ascii',
'return ascii(3)',
'return "3"')
def test_builtin_bool(self):
self.check_builtin_func('bool',
'return bool("x")',
'return True')
def test_builtin_bin(self):
self.check_builtin_func('bin',
'return bin(15)',
'return "0b1111"')
def test_builtin_bytes(self):
self.check_builtin_func('bytes',
"return bytes(b'abc')",
"return b'abc'")
self.check_builtin_func('bytes',
"return bytes((65, 66, 67))",
"return b'ABC'")
self.check_dont_optimize_func("bytes('unicode')")
self.check_dont_optimize_func("bytes((-1,))")
self.check_dont_optimize_func("bytes((256,))")
def test_builtin_chr(self):
self.check_builtin_func('chr',
'return chr(65)',
'return "A"')
self.check_dont_optimize_func('chr(-1)')
self.check_dont_optimize_func('chr(0x110000)')
def test_builtin_complex(self):
self.check_builtin_func('complex',
'return complex("1.0j")',
'return 1.0j')
self.check_builtin_func('complex',
'return complex(3j)',
'return 3j')
self.check_builtin_func('complex',
'return complex(0, 2)',
'return 2j')
self.check_dont_optimize_func("complex('xyz')")
self.check_dont_optimize_func("complex('1.0', 2)")
def test_builtin_dict(self):
self.check_builtin_func('dict',
"return dict(((1, 2), (3, 4)))",
"return {1: 2, 3: 4}")
self.check_builtin_func('dict',
"return dict({1: 2, 3: 4})",
"return {1: 2, 3: 4}")
self.check_builtin_func('dict',
"return dict()",
"return {}")
self.check_dont_optimize_func("dict({['list']: 'value'})")
def test_builtin_divmod(self):
self.check_builtin_func('divmod',
'return divmod(100, 3)',
'return (33, 1)')
# division by zero
self.check_dont_optimize_func("divmod(1, 0)")
self.check_dont_optimize_func("divmod(2.0, 0.0)")
def test_builtin_float(self):
self.check_builtin_func('float',
'return float("1.0")',
'return 1.0')
self.check_builtin_func('float',
'return float(2)',
'return 2.0')
self.check_builtin_func('float',
'return float(3.0)',
'return 3.0')
self.check_dont_optimize_func("float('xyz')")
def test_builtin_frozenset(self):
self.check_builtin_func('frozenset',
"return frozenset(('abc',))",
ast.Return(ast.Constant(value=frozenset(('abc',)))))
self.check_builtin_func('frozenset',
"return frozenset()",
ast.Return(ast.Constant(value=frozenset())))
self.check_dont_optimize_func('frozenset(([],))')
def test_builtin_hex(self):
self.check_builtin_func('hex',
'return hex(16)',
'return "0x10"')
def test_builtin_int(self):
self.check_builtin_func('int',
'return int(123)',
'return 123')
self.check_builtin_func('int',
'return int(123.0)',
'return 123')
self.check_builtin_func('int',
'return int("123")',
'return 123')
self.check_dont_optimize_func("int(1j)")
self.check_dont_optimize_func("int('xyz')")
def test_builtin_len(self):
self.check_builtin_func('len', 'return len("abc")', 'return 3')
def test_builtin_list(self):
self.check_builtin_func('list',
'return list("abc")',
'return ["a", "b", "c"]')
def test_builtin_oct(self):
self.check_builtin_func('oct',
'return oct(83)',
'return "0o123"')
def test_builtin_ord(self):
self.check_builtin_func('ord', 'return ord("A")', 'return 65')
self.check_builtin_func('ord', 'return ord(b"A")', 'return 65')
self.check_dont_optimize_func("ord(123)")
self.check_dont_optimize_func("ord('')")
self.check_dont_optimize_func("ord('xyz')")
def test_builtin_max(self):
self.check_builtin_func('max', 'return max(4, 6)', 'return 6')
self.check_dont_optimize_func("max(b'bytes', 'unicode')")
def test_builtin_min(self):
self.check_builtin_func('min', 'return min(4, 6)', 'return 4')
self.check_dont_optimize_func("min(b'bytes', 'unicode')")
def test_builtin_repr(self):
self.check_builtin_func('repr',
'return repr("abc")',
'return "\'abc\'"')
def test_builtin_pow(self):
# int
self.check_builtin_func('pow',
'return pow(2, 8)',
'return 256')
# float
self.check_builtin_func('pow',
'return pow(16.0, 0.5)',
'return 4.0')
# int modulo
self.check_builtin_func('pow',
'return pow(10, 3, 7)',
'return 6')
def test_builtin_round(self):
self.check_builtin_func('round',
'return round(1.5)',
'return 2')
def test_builtin_set(self):
self.check_builtin_func('set',
"return set(('abc',))",
"return {'abc'}")
self.check_dont_optimize_func('set(([],))')
def test_builtin_str(self):
self.check_builtin_func('str',
'return str(123)',
'return "123"')
self.check_builtin_func('str',
'return str("hello")',
'return "hello"')
def test_builtin_sum(self):
self.check_builtin_func('sum',
'return sum((1, 2, 3))',
'return 6')
self.check_dont_optimize_func('sum(([],))')
def test_builtin_tuple(self):
self.check_builtin_func('tuple',
'return tuple("abc")',
'return ("a", "b", "c")')
def test_config_argtype(self):
self.check_builtin_func('str',
'return str(123)',
'return "123"')
self.config._pure_builtins['str'].arg_types = (str,)
self.check_dont_optimize("""
def func():
return str(123)
""")
def test_pow_max_bits(self):
self.config.max_int_bits = 16
self.check_builtin_func('pow',
'return pow(2, 15)',
'return 32768')
self.check_dont_optimize("""
def func():
return pow(2, 16)
""")
self.config.max_int_bits = 17
self.check_builtin_func('pow',
'return pow(2, 16)',
'return 65536')
def test_config_pure_builtins(self):
self.check_builtin_func('str',
'return str(123)',
'return "123"')
del self.config._pure_builtins['str']
self.check_dont_optimize("""
def func():
return str(123)
""")
class ConfigTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.constant_folding = True
def test_config_max_int_bits(self):
self.config.max_int_bits = 16
self.check_optimize("""
def func():
return 1 << 15
""", """
def func():
return 32768
""")
self.check_dont_optimize("""
def func():
return 1 << 16
""")
def test_config_max_bytes_len(self):
self.config.max_bytes_len = 3
self.check_optimize("""
def func():
return b'x' * 3
""", """
def func():
return b'xxx'
""")
self.check_dont_optimize("""
def func():
return b'x' * 4
""")
def test_config_max_str_len(self):
self.config.max_str_len = 3
self.check_optimize("""
def func():
return 'x' * 3
""", """
def func():
return 'xxx'
""")
self.check_dont_optimize("""
def func():
return 'x' * 4
""")
# FIXME: fix this test
#def test_config_max_constant_size(self):
# size = fatoptimizer.tools.get_constant_size('abc')
# self.config.max_constant_size = size
# self.check_builtin_func('str',
# 'return str(123)',
# 'return "123"')
# self.config.max_constant_size = size - 1
# self.check_dont_optimize("""
# def func():
# return str(1234)
# """)
class OptimizerTests(BaseAstTests):
def setUp(self):
super().setUp()
from fatoptimizer.builtins import add_pure_builtins
add_pure_builtins(self.config)
def check_add_import(self, before='', after=''):
template = ("""
%s
{import_fat}
%s
{code}
""" % (after, before))
code1 = textwrap.dedent("""
def func():
print(chr(65))
""")
code2 = """
def func():
print("A")
"""
self.check_specialize(code1, code2,
guards=builtin_guards('chr'),
template=template)
def test_add_import_after_docstring(self):
self.check_add_import(after='"docstring"')
def test_add_import_after_import_future(self):
self.check_add_import(after='from __future__ import print_function')
def test_add_import_before_import_sys(self):
self.check_add_import(before='import sys')
def test_builtin_chr(self):
self.check_func_specialize(
"return chr(65)",
'return "A"',
guards=builtin_guards('chr'))
def test_reentrant_functiondef(self):
# Test reentrant call to visit_FunctionDef() (func2()) when we already
# decided to specialized the function func()
self.check_func_specialize("""
res = chr(65)
def func2():
return 2
return res
""", """
res = "A"
def func2():
return 2
return res
""",
guards=builtin_guards('chr'))
def test_generic_visitor(self):
# Test that visitor visits ast.Call arguments
self.check_func_specialize("""
print(chr(65))
""", """
print("A")
""", guards=builtin_guards('chr'))
def test_combined_called(self):
# optimize str(obj) where obj is not a constant, but a call
# which will be optimized to a constant
self.check_func_specialize(
'return str(ord("A"))',
"return '65'",
guards=builtin_guards('ord', 'str'))
def test_duplicate_guards(self):
# check that duplicated guards are removed
self.check_func_specialize(
"return ord('A') + ord('B')",
"return 65 + 66",
guards=builtin_guards('ord'))
def test_decorator(self):
# FIXME: support decorators
self.check_dont_optimize("""
@decorator
def func():
return ord('A')
""")
def test_method(self):
template = format_code("""
{import_fat}
class MyClass:
{code}
""")
self.check_specialize("""
def func(self):
return chr(65)
""", """
def func(self):
return "A"
""", guards=builtin_guards('chr'), template=template)
def test_nested_functiondef(self):
template = format_code("""
{import_fat}
def create_func():
{code}
return func
""")
self.check_specialize("""
def func():
return chr(65)
""", """
def func():
return "A"
""", guards=builtin_guards('chr'), template=template)
class OptimizerVariableTests(BaseAstTests):
def setUp(self):
super().setUp()
from fatoptimizer.builtins import add_pure_builtins
add_pure_builtins(self.config)
def test_global(self):
template = format_code("""
{import_fat}
x = 1
def create_func():
x = 2
{code}
""")
self.check_func_specialize("""
global x
return ord("A") + x
""", """
global x
return 65 + x
""", guards=builtin_guards('ord'), template=template)
def test_late_global(self):
template = format_code("""
{import_fat}
x = 1
def create_func():
x = 2
{code}
""")
self.check_func_specialize("""
copy_to_local = x
global x
return ord("A") + x
""", """
copy_to_local = x
global x
return 65 + x
""", guards=builtin_guards('ord'), template=template)
def test_assign(self):
template = format_code("""
{import_fat}
def create_func():
x = 1
{code}
""")
self.check_func_specialize("""
# assignement: x is local to nested
x = 2
return ord("A") + x
""", """
x = 2
return 65 + x
""", guards=builtin_guards('ord'), template=template)
class ReplaceVariableTests(BaseAstTests):
def check_replace(self, name_mapping, source1, source2):
tree1 = compile_ast(source1)
filename = '<string>'
replace = fatoptimizer.optimizer.ReplaceVariable(filename, name_mapping)
tree1.body[0] = replace.replace_func_def(tree1.body[0])
tree2 = compile_ast(source2)
self.assertAstEqual(tree1, tree2)
def test_replace(self):
self.check_replace({'x': 7},
"""
def func():
x()
return x
""",
"""
def func():
7()
return 7
""")
def test_list_comprehension(self):
self.check_replace({'x': 7},
"""
def func():
y = x
listcomp = [x() for i in range(3)]
dictcomp = {x(): None for i in range(3)}
setcomp = {x() for i in range(3)}
gen = (x() for i in range(3))
lam = lambda x: str(x)
""",
"""
def func():
y = 7
listcomp = [x() for i in range(3)]
dictcomp = {x(): None for i in range(3)}
setcomp = {x() for i in range(3)}
gen = (x() for i in range(3))
lam = lambda x: str(x)
""")
class CopyBuiltinToConstantTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.copy_builtin_to_constant = True
self.config._copy_builtin_to_constant.add('max')
self.guards = builtin_guards('max')
def test_simple(self):
self.check_func_specialize("""
return max(x, y)
""", """
return 'LOAD_GLOBAL max'(x, y)
""",
self.guards,
replace_consts="{'LOAD_GLOBAL max': max}")
def test_called_twice(self):
self.check_func_specialize("""
a = max(x, y)
b = max(x, y)
""", """
a = 'LOAD_GLOBAL max'(x, y)
b = 'LOAD_GLOBAL max'(x, y)
""",
self.guards,
replace_consts="{'LOAD_GLOBAL max': max}")
def test_disabled(self):
self.config.copy_builtin_to_constant = False
self.check_dont_optimize("""
def func(x, y):
return max(x, y)
""")
def test_global(self):
# don't optimize because global 'max' name is overriden
self.check_dont_optimize("""
def func(x, y):
# don't do that at home, kids!
global max
max = min
return max(x, y)
""")
def test_local_name(self):
self.check_func_specialize("""
global_max = 1
return max(x, y)
""", """
global_max = 1
return 'LOAD_GLOBAL max'(x, y)
""",
self.guards,
replace_consts="{'LOAD_GLOBAL max': max}")
self.check_func_specialize("""
global_max = 1
global_max2 = 2
return max(x, y)
""", """
global_max = 1
global_max2 = 2
return 'LOAD_GLOBAL max'(x, y)
""",
self.guards,
replace_consts="{'LOAD_GLOBAL max': max}")
# FIXME: specialize nested function?
#def test_nested_func_before(self):
# self.config._copy_builtin_to_constant.add('int')
# self.check_optimize("""
# def func():
# def func2(x):
# return int(x)
# y = func2(4)
# return int(y)
# """, """
# import fat as __fat__
# def func():
# def func2(x):
# return int(x)
# y = func2(4)
# return int(y)
# _ast_optimized = func
# def func():
# def func2(x):
# return int(x)
# _ast_optimized = func2
# def func2(x):
# return 'LOAD_GLOBAL int'(x)
# func2.__code__ = __fat__.replace_consts(func2.__code__, {{'LOAD_GLOBAL int': 'LOAD_GLOBAL int#2'}})
# __fat__.specialize(_ast_optimized, func2.__code__, {guards})
# func2 = _ast_optimized
# del _ast_optimized
# y = func2(4)
# return 'LOAD_GLOBAL int#2'(y)
# func.__code__ = __fat__.replace_consts(func.__code__, {{'LOAD_GLOBAL int#2': int}})
# __fat__.specialize(_ast_optimized, func.__code__, {guards})
# func = _ast_optimized
# del _ast_optimized
# """.format(guards=builtin_guards('int')))
# FIXME: specialize nested function?
#def test_nested_func_after(self):
# self.config._copy_builtin_to_constant.add('len')
# self.check_optimize("""
# def func(arg):
# len(arg)
# def func2(x):
# len(x)
# """, """
# import fat as __fat__
# def func(arg):
# len(arg)
# def func2(x):
# len(x)
# _ast_optimized = func
# def func(arg):
# 'LOAD_GLOBAL len'(arg)
# def func2(x):
# len(x)
# _ast_optimized = func2
# def func2(x):
# 'LOAD_GLOBAL len#2'(x)
# func2.__code__ = __fat__.replace_consts(func2.__code__, {{'LOAD_GLOBAL len#2': 'LOAD_GLOBAL len'}})
# __fat__.specialize(_ast_optimized, func2.__code__, {guards})
# func2 = _ast_optimized
# del _ast_optimized
# func.__code__ = __fat__.replace_consts(func.__code__, {{'LOAD_GLOBAL len': len}})
# __fat__.specialize(_ast_optimized, func.__code__, {guards})
# func = _ast_optimized
# del _ast_optimized
# """.format(guards=builtin_guards('len')))
def test_repr_global(self):
# In func()/method(), repr() builtin cannot be copied to constant,
# because the call to __fat__.replace_consts(func.__code__, {'...': repr}) would
# load the local repr() function instead of the builtin repr()
# function.
self.config._copy_builtin_to_constant.add('repr')
self.check_dont_optimize("""
def repr(obj):
return 'local'
def func(obj):
return repr(obj)
""")
self.check_dont_optimize("""
class MyClass:
@staticmethod
def repr(obj):
return 'local'
def method(self, obj):
return repr(obj)
""")
def test_local_func(self):
self.config._copy_builtin_to_constant.add('sum')
self.check_dont_optimize("""
def func():
def sum(*args):
return local
return sum([1, 2, 3])
""")
def test_super(self):
self.config._copy_builtin_to_constant.add('super')
self.check_dont_optimize("""
class MyClass(ParentClass):
def method(self):
super().method()
""")
class UnrollLoopTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.unroll_loops = 16
def test_unroll_tuple(self):
self.check_optimize("""
def func():
for i in (True, 3, "text"):
print(i)
""", """
def func():
i = True
print(i)
i = 3
print(i)
i = "text"
print(i)
""")
def test_unroll_tuple(self):
self.check_optimize("""
def func():
for i in (
(True, 'a'),
(False, 'b'),
):
print(i)
""", """
def func():
i = (True, 'a')
print(i)
i = (False, 'b')
print(i)
""")
def test_not_builtin_range(self):
self.check_dont_optimize("""
range = lambda x: (x,)
def func():
for i in range(2):
print(i)
""")
self.check_dont_optimize("""
def func():
range = lambda x: (x,)
for i in range(2):
print(i)
""")
def test_not_range_int(self):
self.check_dont_optimize("""
def func():
for i in range(2.0):
print(i)
""")
def test_unroll_range(self):
self.config.simplify_iterable = True
self.check_builtin_func('range', """
for i in range(2):
print(i)
""", """
i = 0
print(i)
i = 1
print(i)
""")
def test_else(self):
self.check_optimize("""
def func():
for i in (3,):
print(i)
else:
print("else")
""", """
def func():
i = 3
print(i)
print("else")
""")
def test_dont_optimize(self):
self.check_dont_optimize("""
def func():
for i in range(3):
print(i)
break
""")
self.check_dont_optimize("""
def func():
for i in range(3):
print(i)
continue
""")
self.check_dont_optimize("""
def func():
for i in range(3):
if i == 1:
raise ValueError
print(i)
""")
class UnrollComprehensionTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.unroll_loops = 16
self.config.constant_folding = True
def test_config_disable(self):
self.config.unroll_loops = 0
self.check_dont_optimize('[i for i in (1, 2, 3)]')
def test_config_max_loops(self):
self.config.unroll_loops = 3
self.check_optimize('[i for i in (1, 2, 3)]',
'[1, 2, 3]')
self.check_dont_optimize('[i for i in (1, 2, 3, 4)]')
def test_listcomp(self):
self.check_optimize('[i for i in (1, 2, 3)]',
'[1, 2, 3]')
self.check_optimize('[i*2 for i in "abc"]',
'["aa", "bb", "cc"]')
def test_setcomp(self):
self.check_optimize('{i for i in (1, 2, 3)}',
'{1, 2, 3}')
self.check_optimize('{i*2 for i in "abc"}',
'{"aa", "bb", "cc"}')
def test_dictcomp(self):
self.check_optimize('{i:i for i in (1, 2, 3)}',
'{1: 1, 2: 2, 3: 3}')
self.check_optimize('{i:i*2 for i in (1, 2, 3)}',
'{1: 2, 2: 4, 3: 6}')
class NodeVisitorTests(BaseAstTests):
def check_call_visitor(self, visitor):
tree = ast.parse("1+1")
with self.assertRaises(Exception) as cm:
visitor.visit(tree)
binop = tree.body[0].value
what = ast.dump(binop)
self.assertEqual(str(cm.exception),
'error at <string>:1 on visiting %s: bug' % what)
# Test truncature of the AST dump
with mock.patch('fatoptimizer.tools.COMPACT_DUMP_MAXLEN', 5):
with self.assertRaises(Exception) as cm:
visitor.visit(tree)
what = 'BinOp(...)'
self.assertEqual(str(cm.exception),
'error at <string>:1 on visiting %s: bug' % what)
def test_visitor_call_visitor(self):
class BuggyVisitor(fatoptimizer.tools.NodeVisitor):
def visit_Module(self, node):
# visit_Module() calls indirectly visit_BinOp(),
# but the exception must only be wrapped once
self.generic_visit(node)
def visit_BinOp(self, node):
raise Exception("bug")
visitor = BuggyVisitor("<string>")
self.check_call_visitor(visitor)
def test_transformer_call_visitor(self):
class BuggyTransformer(fatoptimizer.tools.NodeTransformer):
def visit_Module(self, node):
# visit_Module() calls indirectly visit_BinOp(),
# but the exception must only be wrapped once
self.generic_visit(node)
def visit_BinOp(self, node):
raise Exception("bug")
visitor = BuggyTransformer("<string>")
self.check_call_visitor(visitor)
def check_pass_optimizer_error(self, visitor):
tree = ast.parse("1+1")
with self.assertRaises(fatoptimizer.OptimizerError) as cm:
# visit() must not wrap OptimizerError into a generic Exception
visitor.visit(tree)
def test_visitor_pass_optimizer_error(self):
class BuggyVisitor(fatoptimizer.tools.NodeVisitor):
def visit_Module(self, node):
# visit_Module() calls indirectly visit_BinOp()
self.generic_visit(node)
def visit_BinOp(self, node):
raise fatoptimizer.OptimizerError
visitor = BuggyVisitor("<string>")
self.check_pass_optimizer_error(visitor)
def test_transformer_pass_optimizer_error(self):
class BuggyTransformer(fatoptimizer.tools.NodeTransformer):
def visit_Module(self, node):
# visit_Module() calls indirectly visit_BinOp()
self.generic_visit(node)
def visit_BinOp(self, node):
raise fatoptimizer.OptimizerError
visitor = BuggyTransformer("<string>")
self.check_pass_optimizer_error(visitor)
class NamespaceTests(BaseAstTests):
def get_namespace(self, code):
filename = "test"
tree = compile_ast(code)
tree = fatoptimizer.convert_const.ConvertConstant(filename).visit(tree)
self.assertIsInstance(tree, ast.Module)
func_def = tree.body[0]
self.assertIsInstance(func_def, ast.FunctionDef)
parent = fatoptimizer.optimizer.Optimizer(self.config, filename)
optimizer = fatoptimizer.optimizer.FunctionOptimizerStage1(self.config, filename, parent=parent)
optimizer.optimize(func_def)
return optimizer.namespace
def check_namespace(self, code, expected):
ns = self.get_namespace(code)
self.assertEqual(ns._variables, expected)
self.assertFalse(ns._unknown_state)
def check_unknown_namespace(self, code):
ns = self.get_namespace(code)
self.assertTrue(ns._unknown_state, ns._variables)
self.assertEqual(ns._variables, {})
def test_assign(self):
code = """
def func():
x = 1
"""
self.check_namespace(code, {'x': 1})
def test_assign_attr(self):
code = """
def func(obj):
x = obj
x.y = 2
"""
self.check_namespace(code, {'x': UNSET})
code = """
def func(obj):
obj.attr = 1
x = 1
return x
"""
self.check_namespace(code, {'obj': UNSET, 'x': 1})
code = """
def func(obj, value):
obj.attr = value
x = 1
return x
"""
self.check_namespace(code, {'obj': UNSET, 'x': 1})
def test_assign_subscript(self):
code = """
def func(obj):
x = obj
x[:3] = 2
"""
self.check_namespace(code, {'x': UNSET})
def test_aug_assign(self):
code = """
def func():
x = 5
x += 5
"""
self.check_namespace(code, {'x': UNSET})
def test_aug_assign_attr(self):
code = """
def func(obj):
x = obj
x.y += 7
"""
self.check_namespace(code, {'x': UNSET})
def test_for(self):
code = """
def func(obj):
for x in obj:
y = 1
else:
z = 3
"""
self.check_namespace(code, {'x': UNSET, 'y': UNSET, 'z': UNSET})
def test_while(self):
code = """
def func(obj):
while obj:
obj.method()
x = 1
else:
y = 2
"""
self.check_namespace(code, {'x': UNSET, 'y': UNSET})
def test_loop_unrolling(self):
self.config.unroll_loops = 16
code = """
def func():
for x in (5,):
pass
"""
self.check_namespace(code, {'x': 5})
def test_function_def(self):
code = """
def func():
def g():
pass
"""
self.check_namespace(code, {'g': UNSET})
@need_python35
def test_async_function_def(self):
code = """
def func():
async def g():
pass
"""
self.check_namespace(code, {'g': UNSET})
def test_class_def(self):
code = """
def func():
class MyClass:
pass
"""
self.check_namespace(code, {'MyClass': UNSET})
def test_with(self):
code = """
def func(cb, cb2, f):
with cb() as (a, b), cb2() as c, f:
pass
"""
self.check_namespace(code, {'a': UNSET, 'b': UNSET, 'c': UNSET})
code = """
def func(cb):
with cb() as (a, *b):
pass
"""
self.check_namespace(code, {'a': UNSET, 'b': UNSET})
def test_import(self):
code = """
def func():
import sys
import os.path
import posix as _posix
"""
self.check_namespace(code, {'sys': UNSET, 'os': UNSET, '_posix': UNSET})
def test_import_from(self):
code = """
def func():
from sys import ps1
from os.path import exists as path_exists
"""
self.check_namespace(code, {'ps1': UNSET, 'path_exists': UNSET})
def test_delete(self):
code = """
def func():
x = 1
del x
"""
self.check_namespace(code, {})
code = """
def func():
x = 1
y = 2
z = 3
del x, y
"""
self.check_namespace(code, {'z': 3})
code = """
def func():
a = 1
b = 2
c = 3
d = 4
del (a, b), c
"""
self.check_namespace(code, {'d': 4})
def test_cond_delete(self):
code = """
def func(cb):
x = 1
try:
cb()
del x
finally:
pass
"""
self.check_namespace(code, {'x': UNSET})
def test_if(self):
code = """
def func(cond):
if cond:
x = 1
else:
x = 2
return x
"""
self.check_namespace(code, {'x': UNSET})
def test_nested_cond_block(self):
code = """
def func(cond, cond2):
if cond:
if cond2:
x = 1
else:
x = 2
else:
x = 3
return x
"""
self.check_namespace(code, {'x': UNSET})
def test_try(self):
# try/except
code = """
def func(cb):
try:
cb()
x = 1
except:
x = 2
"""
self.check_namespace(code, {'x': UNSET})
# try/except/finally
code = """
def func(cb):
try:
cb()
x = 1
except:
x = 2
finally:
x = 3
"""
self.check_namespace(code, {'x': 3})
# try/except/else/finally
code = """
def func(cb):
try:
cb()
x = 1
except:
x = 2
else:
x = 3
finally:
x = 4
"""
self.check_namespace(code, {'x': 4})
# try/finally
code = """
def func(cb):
try:
cb()
x = 1
finally:
x = 3
"""
self.check_namespace(code, {'x': 3})
# try/finally
code = """
def func(cb):
try:
cb()
x = 1
finally:
x = 3
"""
self.check_namespace(code, {'x': 3})
class ConstantPropagationTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.constant_propagation = True
def test_module(self):
# optimization must also work at the module scope
self.check_optimize("""
x = 1
y = x
""", """
x = 1
y = 1
""")
def test_basic(self):
self.check_optimize("""
def func():
x = 1
y = x
return y
""", """
def func():
x = 1
y = 1
return 1
""")
def test_assign_targets(self):
self.check_optimize("""
def func():
x = y = 1
return (x, y)
""", """
def func():
x = y = 1
return (1, 1)
""")
def test_tuple(self):
self.check_optimize("""
def func():
x = (8, 9)
y = x
return y
""", """
def func():
x = (8, 9)
y = (8, 9)
return (8, 9)
""")
def test_with(self):
self.check_dont_optimize("""
def func():
x = 1
with func2() as x:
pass
return x
""")
def test_for(self):
self.check_dont_optimize("""
i = 0
for x in (3, 5):
i = i + 1
y = i
""")
self.check_dont_optimize("""
def func():
i = 0
for x in (3, 5):
i = i + 1
return i
""")
def test_while(self):
self.check_optimize("""
i = 0
y = i
while i < 10:
i = i + 1
z = i
""", """
i = 0
y = 0
while i < 10:
i = i + 1
z = i
""")
self.check_optimize("""
def func():
i = 0
y = i
while i < 10:
i = i + 1
z = i
""", """
def func():
i = 0
y = 0
while i < 10:
i = i + 1
z = i
""")
def test_delete(self):
self.check_dont_optimize("""
def func():
x = 0
del x
return x
""")
def test_constant_folding(self):
# Test constant propagation + constant folding
self.config.constant_folding = True
self.check_optimize("""
def func():
x = 1
y = x + 1
return y
""", """
def func():
x = 1
y = 2
return 2
""")
def test_complex_assign(self):
self.check_dont_optimize("""
def func(x):
x.y().z = 1
return x
""")
self.check_dont_optimize("""
def func(x):
x.attr = 1
return x
""")
self.check_dont_optimize("""
def func():
x, *y = (1, 2)
return x, y
""")
class BaseConstantFoldingTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.constant_folding = True
class ConstantFoldingBinOpTests(BaseConstantFoldingTests):
def test_disabled(self):
self.check_optimize_func("1 + 1", "2")
self.config.constant_folding = False
self.check_dont_optimize_func("1 + 1")
def test_not_constant(self):
self.check_dont_optimize_func("x + 1")
self.check_dont_optimize_func("1 + x")
def test_shift_error(self):
self.check_dont_optimize_func("1 << -3",
ast.BinOp(left=ast.Num(n=1), op=ast.LShift(), right=ast.Num(-3)))
self.check_dont_optimize_func("1 >> -3",
ast.BinOp(left=ast.Num(n=1), op=ast.RShift(), right=ast.Num(-3)))
def test_float_binopts(self):
self.check_dont_optimize_func('1.0 << 2')
self.check_dont_optimize_func('1.0 >> 2')
self.check_dont_optimize_func('1.0 & 2')
self.check_dont_optimize_func('1.0 | 2')
self.check_dont_optimize_func('1.0 ^ 2')
def test_complex_binopts(self):
self.check_dont_optimize_func('1.0j ** 2')
self.check_dont_optimize_func('1.0j // 2')
self.check_dont_optimize_func('1.0j % 2')
self.check_dont_optimize_func('1.0j << 2')
self.check_dont_optimize_func('1.0j >> 2')
self.check_dont_optimize_func('1.0j & 2')
self.check_dont_optimize_func('1.0j | 2')
self.check_dont_optimize_func('1.0j ^ 2')
def test_division_by_zero(self):
self.check_dont_optimize_func("1 // 0")
self.check_dont_optimize_func("1.0 // 0.0")
self.check_dont_optimize_func("1 / 0")
self.check_dont_optimize_func("1.0 / 0.0")
self.check_dont_optimize_func("1.0j / 0.0j")
def test_formatting(self):
# FIXME: optimize bytes%args and str%args
self.check_dont_optimize_func("b'hello %s' % b'world'")
self.check_dont_optimize_func("'hello %s' % 'world'")
def test_add(self):
self.check_optimize_func("2 + 3", "5")
self.check_optimize_func("2.0 + 3.0", "5.0")
self.check_optimize_func("2.0j + 3.0j", "5.0j")
self.check_optimize_func("(1, 2) + (3,)", "(1, 2, 3)")
self.config.max_str_len = 2
self.check_optimize_func("'a' + 'b'", "'ab'")
self.check_dont_optimize_func("'a' + 'bc'")
self.config.max_bytes_len = 2
self.check_optimize_func("b'a' + b'b'", "b'ab'")
self.check_dont_optimize_func("b'a' + b'bc'")
def test_sub(self):
self.check_optimize_func("3 - 2", "1")
self.check_optimize_func("3.0 - 2.0", "1.0")
self.check_optimize_func("3.0j - 2.0j", "1.0j")
def test_mul(self):
self.check_optimize_func("2 * 3", "6")
self.check_optimize_func("2.0 * 3.0", "6.0")
self.check_optimize_func("2.0j * 3.0", "6j")
self.check_optimize_func("'a' * 3", "'aaa'")
self.check_optimize_func("b'x' * 3", "b'xxx'")
self.check_optimize_func("(1, 2) * 2", "(1, 2, 1, 2)")
self.check_optimize_func("3 * 'a'", "'aaa'")
self.check_optimize_func("3 * b'x'", "b'xxx'")
self.check_optimize_func("2 * (1, 2)", "(1, 2, 1, 2)")
def test_floor_div(self):
self.check_optimize_func("10 // 3", "3")
self.check_optimize_func("10.0 // 3.0", "3.0")
def test_div(self):
self.check_optimize_func("5 / 2", "2.5")
self.check_optimize_func("5.0 / 2.0", "2.5")
self.check_optimize_func("5.0j / 2.0", "2.5j")
def test_mod(self):
self.check_optimize_func("5 % 2", "1")
self.check_optimize_func("5.0 % 2.0", "1.0")
def test_pow(self):
self.check_optimize_func("2 ** 3", "8")
self.check_optimize_func("2.0 ** 3.0", "8.0")
# complex
self.check_dont_optimize_func("2.0j ** 3.0")
self.check_dont_optimize_func("2.0 ** 3.0j")
# 0 ** -1
self.check_dont_optimize_func("0 ** -1",
ast.BinOp(left=ast.Num(n=0), op=ast.Pow(), right=ast.Num(-1)))
self.check_dont_optimize_func("0.0 ** -1",
ast.BinOp(left=ast.Num(n=0.0), op=ast.Pow(), right=ast.Num(-1)))
def test_pow_max_int_bits(self):
self.config.max_int_bits = 16
self.check_optimize_func('2 ** 15', '32768')
self.check_dont_optimize_func("2 ** 16")
self.config.max_int_bits = 17
self.check_optimize_func('2 ** 15', '32768')
def test_shift(self):
self.check_optimize_func("1 << 3", "8")
self.check_optimize_func("16 >> 2", "4")
def test_bits(self):
self.check_optimize_func("3 & 1", "1")
self.check_optimize_func("1 | 2", "3")
self.check_optimize_func("3 ^ 3", "0")
class ConstantFoldingUnaryOpTests(BaseConstantFoldingTests):
def test_not_constant(self):
self.check_dont_optimize_func("-x")
self.check_dont_optimize_func("+x")
self.check_dont_optimize_func("~x")
self.check_dont_optimize_func("not x")
def test_uadd(self):
self.check_optimize_func("+3", "3")
self.check_optimize_func("+3.0", "3.0")
self.check_optimize_func("+3.0j", "3.0j")
self.check_dont_optimize_func("+'abc'")
def test_usub(self):
self.check_optimize_func("-3", ast.Num(n=-3))
self.check_optimize_func("-3.0", ast.Num(n=-3.0))
self.check_optimize_func("-3.0j", ast.Num(n=-3.0j))
self.check_dont_optimize_func("-'abc'")
def test_invert(self):
self.check_optimize_func("~3", ast.Num(n=-4))
self.check_dont_optimize_func("~3.0")
self.check_dont_optimize_func("~3.0j")
self.check_dont_optimize_func("~'abc'")
def test_not(self):
self.check_optimize_func("not 3", "False")
self.check_optimize_func("not 3.0", "False")
self.check_optimize_func("not 3.0j", "False")
self.check_dont_optimize_func("not 'abc'")
def test_not_compare(self):
self.check_optimize_func("not(x is y)", "x is not y")
self.check_optimize_func("not(x is not y)", "x is y")
self.check_optimize_func("not(x in y)", "x not in y")
self.check_optimize_func("not(x not in y)", "x in y")
self.check_dont_optimize_func("not(x < y)")
self.check_dont_optimize_func("not(x <= y)")
self.check_dont_optimize_func("not(x > y)")
self.check_dont_optimize_func("not(x >= y)")
self.check_dont_optimize_func("not(x == y)")
self.check_dont_optimize_func("not(x != y)")
self.check_dont_optimize_func("not(x < y < y)")
class ConstantFoldingSubscritTests(BaseConstantFoldingTests):
def test_not_constant(self):
self.check_dont_optimize_func("x[k]")
self.check_dont_optimize_func("'abc'[k]")
self.check_dont_optimize_func("x[0]")
self.check_dont_optimize_func("x[0:stop]")
self.check_dont_optimize_func("x[start:10]")
self.check_dont_optimize_func("x[:10]")
def test_subscript_index(self):
self.check_optimize_func("'abc'[0]", "'a'")
self.check_optimize_func("'abc'[-2]", "'b'")
self.check_optimize_func("'abcde'[::2]", "'ace'")
self.check_optimize_func("b'ABC'[0]", "65")
self.check_optimize_func("(10, 20, 30, 40)[-1]", "40")
# list
self.check_optimize_func("[10, 20, 30][0]", "10")
# dict with int and str keys
self.check_optimize_func("{9: 'x', 3: 'y'}[9]", "'x'")
self.check_optimize_func("{'x': 9, 'y': 3}['x']", "9")
# don't optimize
self.check_dont_optimize_func("2[1]")
self.check_dont_optimize_func("'abc'[1.0]")
self.check_dont_optimize_func("{10, 20, 30}[1]")
self.check_dont_optimize_func("{1: 2, 3: 4}[['x']]") # list key
self.check_dont_optimize_func("{1: 2}[8]") # KeyError
def test_subscript_slice(self):
self.check_optimize_func("'abc'[:2]", "'ab'")
self.check_optimize_func("'abc'[-2:]", "'bc'")
self.check_optimize_func("b'ABC'[:2]", "b'AB'")
self.check_optimize_func("(10, 20, 30, 40)[:2]", "(10, 20)")
# list
self.check_optimize_func("[10, 20, 30][:2]", "[10, 20]")
# wrong types
self.check_dont_optimize_func("'abc'[1.0:]")
self.check_dont_optimize_func("'abc'[:2.0]")
self.check_dont_optimize_func("'abc'[::3.0]")
self.check_dont_optimize_func("{10, 20, 30}[:2]")
self.check_dont_optimize_func("{1:2, 3:4}[:2]")
class ConstantFoldingCompareTests(BaseConstantFoldingTests):
def test_not_constant(self):
self.check_dont_optimize_func("a in b")
self.check_dont_optimize_func("'x' in b")
self.check_dont_optimize_func("a in 'xyz'")
self.check_dont_optimize_func("a < b")
self.check_dont_optimize_func("'x' < b")
self.check_dont_optimize_func("a < 'xyz'")
def test_contains_type_error(self):
self.check_dont_optimize_func("1 in 'abc'")
self.check_dont_optimize_func("'x' in 2")
self.check_dont_optimize_func("b'bytes' in 'unicode'")
self.check_dont_optimize_func("'unicode' in b'bytes'")
def test_contains(self):
# str
self.check_optimize_func("'a' in 'abc'", "True")
self.check_optimize_func("'a' not in 'abc'", "False")
# bytes
self.check_optimize_func("65 in b'ABC'", "True")
# tuple
self.check_optimize_func("2 in (1, 2, 3)", "True")
self.check_optimize_func("2 not in (1, 2, 3)", "False")
# list
self.check_optimize_func("2 in [1, 2, 3]", "True")
# set
self.check_optimize_func("2 in {1, 2, 3}", "True")
def test_compare(self):
self.check_optimize_func("1 < 2", "True")
self.check_optimize_func("1 <= 2", "True")
self.check_optimize_func("1 == 2", "False")
self.check_optimize_func("1 != 2", "True")
self.check_optimize_func("1 > 2", "False")
self.check_optimize_func("1 >= 2", "False")
# comparison between bytes and str can raise BytesWarning depending
# on runtime option
self.check_dont_optimize_func('"x" == b"x"')
self.check_dont_optimize_func('b"x" == "x"')
self.check_dont_optimize_func('"x" != b"x"')
self.check_dont_optimize_func('b"x" != "x"')
# bytes < str raises TypeError
self.check_dont_optimize_func('b"bytes" < "str"')
def test_is(self):
self.check_optimize_func("None is None", "True")
def test_contains_to_const(self):
# list => tuple
self.check_optimize_func("x in [1, 2]", "x in (1, 2)")
# set => frozenset
const = ast.Constant(value=frozenset({1, 2}))
node = ast.Compare(left=ast.Name(id='x', ctx=ast.Load()),
ops=[ast.In()],
comparators=[const])
self.check_optimize_func("x in {1, 2}", node)
# [] is not a constant: don't optimize
self.check_dont_optimize_func("x in [1, [], 2]")
self.check_dont_optimize_func("x in {1, [], 2}")
class ConstantFoldingCondBlock(BaseConstantFoldingTests):
def test_if(self):
self.check_optimize("""
if test:
x = 1 + 1
else:
x = 2 + 2
""", """
if test:
x = 2
else:
x = 4
""")
def test_for(self):
self.check_optimize("""
for i in range(5):
i += 1 + 1
""", """
for i in range(5):
i += 2
""")
def test_while(self):
self.check_optimize("""
x = 0
while x < 2:
x += 1 +1
""", """
x = 0
while x < 2:
x += 2
""")
def test_try(self):
self.check_optimize("""
try:
x = 1 + 1
except:
x = 2 + 2
else:
x = 3 + 3
finally:
x = 4 + 4
""", """
try:
x = 2
except:
x = 4
else:
x = 6
finally:
x = 8
""")
class NewOptimizerTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.constant_propagation = True
self.config.constant_folding = True
def test_FunctionDef(self):
self.check_optimize("""
x = 1
def func():
return 2 + 3 + x
""", """
x = 1
def func():
return 5 + x
""")
@need_python35
def test_AsyncFunctionDef(self):
self.check_optimize("""
x = 1
async def func():
return 2 + 3 + x
""", """
x = 1
async def func():
return 5 + x
""")
def test_ClassDef(self):
self.check_optimize("""
x = 1
class MyClass:
y = 2 + 3 + x
""", """
x = 1
class MyClass:
y = 5 + x
""")
def test_DictComp(self):
self.check_optimize("""
x = 1
y = {k: 2 + 3 + x for k in "abc"}
""", """
x = 1
y = {k: 5 + x for k in "abc"}
""")
def test_ListComp(self):
self.check_optimize("""
x = 1
y = [2 + 3 + x for k in "abc"]
""", """
x = 1
y = [5 + x for k in "abc"]
""")
def test_SetComp(self):
self.check_optimize("""
x = 1
y = {2 + 3 + x for k in "abc"}
""", """
x = 1
y = {5 + x for k in "abc"}
""")
def test_GeneratorExp(self):
self.check_optimize("""
x = 1
y = (2 + 3 + x for k in "abc")
""", """
x = 1
y = (5 + x for k in "abc")
""")
def test_Lambda(self):
self.check_optimize("""
x = 1
y = lambda: 2 + 3 + x
""", """
x = 1
y = lambda: 5 + x
""")
class ReplaceBuiltinConstantTests(BaseAstTests):
def test_constants(self):
self.config.replace_builtin_constant = True
self.check_optimize("__debug__", str(__debug__))
self.config.replace_builtin_constant = False
self.check_dont_optimize("__debug__")
class RemoveDeadCodeConstantTestTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.remove_dead_code = True
def test_config(self):
self.config.remove_dead_code = False
self.check_dont_optimize("if False: x = 1")
def test_if(self):
self.check_optimize("if False: x = 1", "pass")
self.check_optimize("if True: x = 1", "x = 1")
self.check_dont_optimize("""
def func():
if 0:
yield
""")
def test_if_else(self):
self.check_optimize("""
if True:
x = 1
else:
x = 2
""", """
x = 1
""")
self.check_optimize("""
if False:
x = 1
else:
y = 2
""", """
y = 2
""")
def test_while(self):
self.check_optimize("while 0: x = 1", "pass")
self.check_dont_optimize("while 1: x = 1")
def test_while_else(self):
self.check_optimize("""
while 0:
x = 1
else:
y = 2
""", """
y = 2
""")
def test_return(self):
self.check_optimize("""
def func():
x = 1
return 1
return 2
""", """
def func():
x = 1
return 1
""")
self.check_dont_optimize("""
def func(obj):
return
if 0:
yield from obj
""")
def test_return_logger(self):
self.config.logger = io.StringIO()
def get_logs():
logger = self.config.logger
logger.seek(0)
return logger.readlines()
self.check_optimize("""
def func():
x = 1
return 1
return 2
""", """
def func():
x = 1
return 1
""")
self.assertEqual(get_logs(),
['<string>:4: fatoptimizer: Remove unreachable code: '
'Return(value=Constant(value=2))\n'])
def test_try_dont_remove(self):
self.check_dont_optimize("""
try:
pass
except Exception:
yield 3
""")
def test_try_dont_remove_illegal(self):
# must raise SyntaxError
self.check_dont_optimize("""
for x in (1, 2):
try:
pass
except Exception:
func2()
finally:
# continue is final is illegal
continue
""")
self.check_dont_optimize("""
for x in (1, 2):
try:
pass
except Exception:
func2()
else:
func3()
finally:
# continue is final is illegal
continue
""")
self.check_dont_optimize("""
for x in (1, 2):
try:
pass
except Exception:
try:
func2()
finally:
# continue is final is illegal
continue
""")
def test_try_empty_else(self):
# else block is empty, body block is not empty
# without final block
self.check_optimize("""
try:
func1()
except Exception:
func2()
else:
pass
""", """
try:
func1()
except Exception:
func2()
""")
# with final block
self.check_optimize("""
try:
func1()
except Exception:
func2()
else:
pass
finally:
func3()
""", """
try:
func1()
except Exception:
func2()
finally:
func3()
""")
def test_try_empty_body_non_empty_else(self):
# try block is empty, else block is non empty
# without final block
self.check_optimize("""
try:
pass
except Exception:
func2()
else:
func3()
""", """
func3()
""")
# with final block
self.check_optimize("""
try:
pass
except Exception:
func2()
else:
func3()
finally:
func4()
""", """
try:
func3()
finally:
func4()
""")
def test_try_empty_body_empty_else(self):
# try and else blocks are empty
self.check_optimize("""
try:
pass
except Exception:
func2()
else:
pass
""", """
pass
""")
self.check_optimize("""
try:
pass
except Exception:
func2()
else:
pass
finally:
func3()
""", """
func3()
""")
def test_try_logger_empty_else(self):
self.config.logger = io.StringIO()
def get_logs():
logger = self.config.logger
logger.seek(0)
return logger.readlines()
self.check_optimize("""
try:
func1()
except Exception:
func2()
else:
pass
pass
finally:
func3()
""", """
try:
func1()
except Exception:
func2()
finally:
func3()
""")
self.assertEqual(get_logs(),
['<string>:6: fatoptimizer: Remove dead code (empty else block in try/except): Pass()\n',
'<string>:7: fatoptimizer: Remove dead code (empty else block in try/except): Pass()\n'])
def test_try_logger_empty_body(self):
self.config.logger = io.StringIO()
def get_logs():
logger = self.config.logger
logger.seek(0)
return logger.readlines()
self.check_optimize("""
try:
pass
except Exception:
func2()
finally:
func3()
""", """
func3()
""")
self.assertEqual(get_logs(),
['<string>:2: fatoptimizer: Remove dead code '
'(empty try block): Pass()\n',
'<string>:3: fatoptimizer: Remove dead code '
"(empty try block): "
"ExceptHandler(type=Name(id='Exception', "
"ctx=Load()), name=None, "
"body=[Expr(value=Call(func=Name(id='(...)\n"])
def test_for_empty_iter(self):
self.check_optimize("for x in (): print(x)", "pass")
self.check_optimize("""
for x in ():
print(x)
else:
y = 1
""", """
y = 1
""")
def test_if_empty_else(self):
self.check_optimize("""
if test:
if_block
else:
pass
""", """
if test:
if_block
""")
self.check_optimize("""
if test:
pass
else:
else_block
""", """
if not test:
else_block
""")
self.check_dont_optimize("""
if test:
pass
""")
def test_while_empty_else(self):
self.check_optimize("""
while test:
body
else:
pass
""", """
while test:
body
""")
self.check_dont_optimize("""
while test:
pass
else:
else_block
""")
def test_for_empty_else(self):
self.check_optimize("""
for obj in seq:
body
else:
pass
""", """
for obj in seq:
body
""")
self.check_dont_optimize("""
for obj in seq:
pass
else:
else_block
""")
class SimplifyIterableTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.simplify_iterable = True
def test_config(self):
self.config.simplify_iterable = False
self.check_dont_optimize('''
for x in {}: pass
''')
def test_replace_with_empty_tuple(self):
# empty list
self.check_optimize('for x in []: pass',
'for x in (): pass')
# empty dict
self.check_optimize('for x in {}: pass',
'for x in (): pass')
# need a guard on set() builtin
self.check_dont_optimize('for x in set(): pass')
def test_replace_with_constant(self):
# list => tuple
self.check_optimize('for x in [1, 2, 3]: pass',
'for x in (1, 2, 3): pass')
# set => frozenset
self.check_optimize('for x in {1, 2, 3}: pass',
ast.For(target=ast.Name(id='x', ctx=ast.Store()),
iter=ast.Constant(frozenset((1, 2, 3))),
body=[ast.Pass()],
orelse=[]))
# don't optimize if items are not constants
self.check_dont_optimize('for x in [1, x]: pass')
self.check_dont_optimize('for x in {1, x}: pass')
def test_range(self):
self.check_builtin_func('range', '''
for x in range(3):
pass
''', '''
for x in (0, 1, 2):
pass
''')
self.check_builtin_func('range', '''
for x in range(5, 7):
pass
''', '''
for x in (5, 6):
pass
''')
self.check_builtin_func('range', '''
for x in range(0, 10, 2):
pass
''', '''
for x in (0, 2, 4, 6, 8):
pass
''')
class InliningTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.inlining = True
def test_config(self):
self.config.inlining = False
self.check_dont_optimize('''
def g(x):
return 42
def f(x):
return g(x) + 3
''')
def test_trivial(self):
self.check_optimize('''
def g(x):
return 42
def f(x):
return g(x) + 3
''', '''
def g(x):
return 42
def f(x):
return 42 + 3
''')
def test_nested_function(self):
self.check_optimize('''
def f(x):
def g(x):
return 100
return g(x) + 3
''', '''
def f(x):
def g(x):
return 100
return 100 + 3
''')
# It shouldn't matter if the caller is defined before the callee,
# but currently it does
@unittest.expectedFailure
def test_out_of_order(self):
self.check_optimize('''
def f(x):
return g(x) + 3
def g(x):
return 42
''', '''
def f(x):
return 42 + 3
def g(x):
return 42
''')
def test_simple(self):
self.check_optimize('''
def g(x):
return x * x
def f(x):
return g(x) + 3
''', '''
def g(x):
return x * x
def f(x):
return (x * x) + 3
''')
def test_constant(self):
self.check_optimize('''
def g(x):
return x * x
def f(x):
return g(7) + 3
''', '''
def g(x):
return x * x
def f(x):
return (7 * 7) + 3
''')
def test_self_recursive(self):
self.check_dont_optimize('''
def f(x):
return f(x)
''')
@unittest.expectedFailure
def test_mutually_recursive(self):
self.check_dont_optimize('''
def f(x):
return g(x)
def g(x):
return f(x)
''')
def test_not_enough_positional_args(self):
self.check_dont_optimize('''
def g(x):
return x * x
def f(x):
return g() + 3
''')
def test_too_many_positional_args(self):
self.check_dont_optimize('''
def g(x):
return x * x
def f(p, q, r):
return g(p, q, r) + 3
''')
@unittest.expectedFailure
def test_starargs(self):
self.check_optimize('''
def g(*args):
return args[0]
def f(x):
return g(1, 2, 3) + 3
''', '''
def g(*args):
return args[0]
def f(x):
return (1, 2, 3)[0] + 3
''')
def test_keyword_args(self):
self.check_optimize('''
def g(foo, bar):
return foo * bar
def f(x, y):
return g(foo=x, bar=y) + 3
''', '''
def g(foo, bar):
return foo * bar
def f(x, y):
return (x * y) + 3
''')
def test_keyword_args_reversed(self):
self.check_optimize('''
def g(foo, bar):
return foo * bar
def f(x, y):
return g(bar=x, foo=y) + 3
''', '''
def g(foo, bar):
return foo * bar
def f(x, y):
return (y * x) + 3
''')
@unittest.expectedFailure
def test_kwargs(self):
self.check_optimize('''
def g(**kwargs):
return args['foo']
def f(x):
return g(foo=42) + 3
''', '''
def g(**kwargs):
return args['foo']
def f(x):
return {'foo':42}['foo'] + 3
''')
def test_remap_varnames(self):
self.check_optimize('''
def g(y):
return y * y
def f(x):
return g(x) + 3
''', '''
def g(y):
return y * y
def f(x):
return (x * x) + 3
''')
def test_callee_uses_locals(self):
self.check_dont_optimize('''
def g1(y):
return locals()
def f1(x):
return g1(x)
''')
def test_caller_uses_locals(self):
self.check_optimize('''
def g2(y):
return y * y
def f2(x):
a = g2(x)
print(locals())
return a
''', '''
def g2(y):
return y * y
def f2(x):
a = x * x
print(locals())
return a
''')
def test_compound_expression(self):
self.check_optimize('''
def discriminant(a, b, c):
return (b * b) - (4 * a * c)
def count_real_solutions(a, b, c):
d = discriminant(a, b, c)
if d > 0:
return 2
elif d == 0:
return 1
else:
return 0
''', '''
def discriminant(a, b, c):
return (b * b) - (4 * a * c)
def count_real_solutions(a, b, c):
d = (b * b) - (4 * a * c)
if d > 0:
return 2
elif d == 0:
return 1
else:
return 0
''')
def test_pass(self):
self.check_optimize('''
def noop(a, b, c):
pass
def caller_of_noop(x):
a = noop(x, 4, 'foo')
''', '''
def noop(a, b, c):
pass
def caller_of_noop(x):
a = None
''')
class ModuleConfigTests(BaseAstTests):
def get_config(self, config_dict):
source = '__fatoptimizer__ = %r' % config_dict
optimizer = fatoptimizer.optimizer.ModuleOptimizer(self.config, 'test')
tree = ast.parse(source)
optimizer.optimize(tree)
return optimizer.config
def test_enabled(self):
self.assertEqual(self.config.enabled, True)
config = self.get_config({'enabled': False})
self.assertEqual(config.enabled, False)
def test_max(self):
self.config.max_int_bits = 1
self.config.max_bytes_len = 2
config = self.get_config({
'max_int_bits': 10,
'max_bytes_len': 20,
})
self.assertEqual(config.max_int_bits, 10)
self.assertEqual(config.max_bytes_len, 20)
class CompleteTests(BaseAstTests):
def setUp(self):
super().setUp()
self.config.enable_all()
def test_cond_block(self):
# test that ast.If.test is optimized: a and b variables must be
# replaced with their values. The if condition should be replaced with
# False, and so the whole if is removed.
self.check_optimize('''
a = 5
b = a
if a != b: print(a)
''', '''
a = 5
b = 5
pass
''')
class MiscTests(unittest.TestCase):
def test_version(self):
import setup
self.assertEqual(fatoptimizer.__version__, setup.VERSION)
class CallPureMethodTests(BaseAstTests):
def setUp(self):
super().setUp()
from fatoptimizer.methods import add_pure_methods
add_pure_methods(self.config)
def test_bytes_decode(self):
# test number of arguments
self.check_optimize(r'b"abc".decode()',
r'"abc"')
self.check_optimize(r'b"abc".decode("ascii")',
r'"abc"')
self.check_optimize(r'b"ab\xff".decode("ascii", "replace")',
r'"ab\ufffd"')
# test encoding aliases
self.check_optimize(r'b"abc".decode("ASCII")',
r'"abc"')
self.check_optimize(r'b"abc".decode("latin1")',
r'"abc"')
self.check_optimize(r'b"abc".decode("utf8")',
r'"abc"')
# test decode error
self.check_dont_optimize(r'b"ab\xff".decode("ascii")')
# unsupported encoding/errors
self.check_dont_optimize(r'b"ab\xff".decode("big5")')
self.check_dont_optimize(r'b"ab\xff".decode("ascii", "surrogateescape")')
def test_bytes(self):
self.check_optimize(r'"ABC".lower()', '"abc"')
self.check_optimize(r'"ABC".upper()', '"ABC"')
self.check_optimize(r'"abc".capitalize()', '"Abc"')
self.check_optimize(r'"aBc".swapcase()', '"AbC"')
self.check_optimize(r'"ABC".casefold()', '"abc"')
self.check_optimize(r'"abc".isalpha()', 'True')
self.check_optimize(r'"abc123".isalnum()', 'True')
self.check_optimize(r'"1".isdecimal()', 'True')
self.check_optimize(r'"1".isdigit()', 'True')
self.check_optimize(r'"abc".islower()', 'True')
self.check_optimize(r'"1".isnumeric()', 'True')
self.check_optimize(r'"ABC".isupper()', 'True')
self.check_optimize(r'"1".isidentifier()', 'False')
self.check_optimize(r'"def".isidentifier()', 'True')
self.check_optimize(r'"A Title".istitle()', 'True')
self.check_optimize(r'" ".isspace()', 'True')
self.check_optimize(r'"AbC".swapcase()', '"aBc"')
self.check_optimize(r'"hello world".title()', '"Hello World"')
def test_float(self):
self.check_optimize(r'(5.0).is_integer()', 'True')
self.check_optimize(r'(1.5).as_integer_ratio()', '(3, 2)')
self.check_optimize(r'(1.5).hex()', '"0x1.8000000000000p+0"')
def test_int(self):
self.check_optimize(r'(1023).bit_length()', '10')
def test_str_encode(self):
# test number of arguments
self.check_optimize(r'"abc".encode()',
'b"abc"')
self.check_optimize(r'"abc".encode("ascii")',
r'b"abc"')
self.check_optimize(r'"ab\xff".encode("ascii", "replace")',
r'b"ab?"')
# test encode error
self.check_dont_optimize(r'"ab\xff".encode("ascii")')
# unsupported encoding/errors
self.check_dont_optimize(r'"ab\xff".encode("big5")')
self.check_dont_optimize(r'"ab\xff".encode("ascii", "backslashreplace")')
def test_str(self):
self.check_optimize(r'"ABC".lower()', '"abc"')
self.check_optimize(r'"ABC".upper()', '"ABC"')
self.check_optimize(r'"abc".capitalize()', '"Abc"')
self.check_optimize(r'"aBc".swapcase()', '"AbC"')
self.check_optimize(r'"ABC".casefold()', '"abc"')
self.check_optimize(r'"abc".isalpha()', 'True')
self.check_optimize(r'"abc123".isalnum()', 'True')
self.check_optimize(r'"1".isdecimal()', 'True')
self.check_optimize(r'"1".isdigit()', 'True')
self.check_optimize(r'"abc".islower()', 'True')
self.check_optimize(r'"1".isnumeric()', 'True')
self.check_optimize(r'"ABC".isupper()', 'True')
self.check_optimize(r'"1".isidentifier()', 'False')
self.check_optimize(r'"def".isidentifier()', 'True')
self.check_optimize(r'"A Title".istitle()', 'True')
self.check_optimize(r'" ".isspace()', 'True')
self.check_optimize(r'"AbC".swapcase()', '"aBc"')
self.check_optimize(r'"hello world".title()', '"Hello World"')
self.check_optimize(r'"abc".center(5)', '" abc "')
self.check_optimize(r'"hello".count("l")', '2')
self.check_optimize(r'"abc".endswith("c")', 'True')
self.check_optimize(r'"01\t012\t0123\t01234".expandtabs()', '"01 012 0123 01234"')
self.check_optimize(r'"Python".find("Py")', '0')
self.check_optimize(r'"Python".index("Py")', '0')
self.check_optimize(r'"jgh\ffh".isprintable()', 'False')
self.check_optimize(r'"ABC".isupper()', 'True')
self.check_optimize(r'"ABC".ljust(5)', '"ABC "')
self.check_optimize(r'" spacious ".lstrip()', '"spacious "')
#FIXME: tox err:the first two maketrans arguments must have equal length
#self.check_optimize(r'"hello".maketrans("o","h")', '{111: 104}')
self.check_optimize(r'"hello".partition("l")', '("he", "l", "lo")')
self.check_optimize(r'"hello".replace("l","d")', '"heddo"')
self.check_optimize(r'"hello".rfind("l")', '3')
self.check_optimize(r'"hello".rfind("l")', '3')
self.check_optimize(r'"hello".rjust(8)', '" hello"')
self.check_optimize(r'"hello".partition("l")', '("he", "l", "lo")')
self.check_optimize(r'"abbc".rsplit("b")', '["a", "", "c"]')
self.check_optimize(r'" spacious ".rstrip()', '" spacious"')
self.check_optimize(r'"abbc".split("b")', '["a", "", "c"]')
self.check_optimize(r'"ab c\n\nde fg\rkl\r\n".splitlines()', '["ab c", "", "de fg", "kl"]')
self.check_optimize(r'" spacious ".strip()', '"spacious"')
self.check_optimize(r'"abc".zfill(6)', '"000abc"')
if __name__ == "__main__":
unittest.main()
| haypo/fatoptimizer | test_fatoptimizer.py | Python | mit | 95,707 | [
"VisIt"
] | 06da45e18f18360dcb899bd0445650f5b5a3398861add1d858213645f149556f |
# -*- coding: utf-8 -*-
from pygsp import utils
from . import Filter # prevent circular import in Python < 3.5
class Gabor(Filter):
r"""Design a filter bank with a kernel centered at each frequency.
Design a filter bank from translated versions of a mother filter.
The mother filter is translated to each eigenvalue of the Laplacian.
That is equivalent to convolutions with deltas placed at those eigenvalues.
In classical image processing, a Gabor filter is a sinusoidal wave
multiplied by a Gaussian function (here, the kernel). It analyzes whether
there are any specific frequency content in the image in specific
directions in a localized region around the point of analysis. This
implementation for graph signals allows arbitrary (but isotropic) kernels.
This filter bank can be used to compute the frequency content of a signal
at each vertex. After filtering, one obtains a vertex-frequency
representation :math:`Sf(i,k)` of a signal :math:`f` as
.. math:: Sf(i, k) = \langle g_{i,k}, f \rangle,
where :math:`g_{i,k}` is the mother kernel centered on eigenvalue
:math:`\lambda_k` and localized on vertex :math:`v_i`.
While :math:`g_{i,k}` should ideally be localized in both the spectral and
vertex domains, that is impossible for some graphs due to the localization
of some eigenvectors. See :attr:`pygsp.graphs.Graph.coherence`.
Parameters
----------
graph : :class:`pygsp.graphs.Graph`
kernel : :class:`pygsp.filters.Filter`
Kernel function to be centered at each graph frequency (eigenvalue of
the graph Laplacian).
See Also
--------
Modulation : Another way to translate a filter in the spectral domain.
Notes
-----
The eigenvalues of the graph Laplacian (i.e., the Fourier basis) are needed
to center the kernels.
Examples
--------
Filter bank's representation in Fourier and time (path graph) domains.
>>> import matplotlib.pyplot as plt
>>> G = graphs.Path(N=7)
>>> G.compute_fourier_basis()
>>> G.set_coordinates('line1D')
>>>
>>> g1 = filters.Expwin(G, band_min=None, band_max=0, slope=3)
>>> g2 = filters.Rectangular(G, band_min=-0.05, band_max=0.05)
>>> g3 = filters.Heat(G, scale=10)
>>>
>>> fig, axes = plt.subplots(3, 2, figsize=(10, 10))
>>> for g, ax in zip([g1, g2, g3], axes):
... g = filters.Gabor(G, g)
... s = g.localize(G.N // 2, method='exact')
... _ = g.plot(ax=ax[0], sum=False)
... _ = G.plot(s, ax=ax[1])
>>> fig.tight_layout()
"""
def __init__(self, graph, kernel):
if kernel.n_filters != 1:
raise ValueError('A kernel must be one filter. The passed '
'filter bank {} has {}.'.format(
kernel, kernel.n_filters))
if kernel.G is not graph:
raise ValueError('The graph passed to this filter bank must '
'be the one used to build the mother kernel.')
kernels = []
for i in range(graph.n_vertices):
kernels.append(lambda x, i=i: kernel.evaluate(x - graph.e[i]))
super(Gabor, self).__init__(graph, kernels)
def filter(self, s, method='exact', order=None):
"""TODO: indirection will be removed when poly filtering is merged."""
return super(Gabor, self).filter(s, method='exact')
| epfl-lts2/pygsp | pygsp/filters/gabor.py | Python | bsd-3-clause | 3,450 | [
"Gaussian"
] | 7b5d9958d3d3a7c3659db4eb7f5d6de4b21b8a5cb63af8bcc815f1e0b0d73ae8 |
#!/usr/bin/env python
import vtk, os
from pymicro.crystal.lattice import Crystal
from pymicro.view.scene3d import Scene3D
from pymicro.view.vtk_utils import *
from vtk.util.colors import gold
'''
Create a 3d scene with a unit cells of primitive tetragonal crystal
lattice. The basis associated consist of one Au atom at (0., 0., 0.)
and one Cu atom at (0.5, 0.5, 0.5), so thet the unit cell contains
2 atoms (tP2 Pearson symbol). 4 units cells are shown.
'''
copper = (1.000000, 0.780392, 0.494117)
a = 0.2867
c = 0.411
tl = Lattice.tetragonal(a, c)
tl._centering = 'P'
[A, B, C] = tl._matrix
origin = (0., 0., 0.)
AuCu = Crystal(tl, basis=[(0., 0., 0.), (0.5, 0.5, 0.5)], basis_labels=['Au', 'Cu'], basis_sizes=[0.5, 0.5],
basis_colors=[gold, copper])
AuCu_actor = crystal_3d(AuCu, origin, m=2, n=2, p=1, hide_outside=True)
# create the 3D scene
base_name = os.path.splitext(__file__)[0]
s3d = Scene3D(display=False, ren_size=(800, 800), name=base_name)
s3d.add(AuCu_actor)
cam = setup_camera(size=A + B + C)
cam.SetViewUp(0, 0, 1)
cam.SetFocalPoint(A + B + C / 2)
cam.SetPosition(4.0, 1.5, 1.5)
cam.Dolly(2.0)
s3d.set_camera(cam)
s3d.render()
# thumbnail for the image gallery
from matplotlib import image
image_name = base_name + '.png'
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| heprom/pymicro | examples/3d_visualisation/AuCu_crystal.py | Python | mit | 1,319 | [
"CRYSTAL",
"VTK"
] | be8babe69c6595a3d04f7f67171bbc3ed8aad056ea7e3a04b887e5a6d88ef729 |
import argparse
import os
import shutil
import struct
import sys
import tempfile
import time
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.static import File
from twisted.python import log
import mhef.n3ds
import proxy
def make_quests(path, cipher, language, quest_files):
quests_page = ''
for i in range(len(quest_files)):
quest = open(quest_files[i], 'rb')
info_offset = struct.unpack('I',quest.read(4))
quest.seek(info_offset[0])
info = struct.unpack('8I2H3B33x5H', quest.read(82))
quest.seek(info[7])
language_offset = struct.unpack('5I', quest.read(20))
lang_id = 0
if language == 'fre':
lang_id = 1
elif language == 'spa':
lang_id = 2
elif language == 'ger':
lang_id = 3
elif language == 'ita':
lang_id = 4
quest.seek(language_offset[lang_id])
text_offset = struct.unpack('7I', quest.read(28))
quest.seek(text_offset[0])
title = quest.read(text_offset[1] - text_offset[0]).decode('utf-16').strip('\x00')
success = quest.read(text_offset[2] - text_offset[1]).decode('utf-16').strip('\x00').split('\n')
if len(success) < 2:
success.append(' ')
success = '|'.join(success)
failure = quest.read(text_offset[3] - text_offset[2]).decode('utf-16').strip('\x00').split('\n')
if len(failure) < 2:
failure.append(' ')
failure = '|'.join(failure)
summary = quest.read(text_offset[4] - text_offset[3]).decode('utf-16').strip('\x00').split('\n')
if len(summary) < 7:
summary.extend([' '] * (7 - len(summary)))
summary = '|'.join(summary)
main_monsters = quest.read(text_offset[5] - text_offset[4]).decode('utf-16').strip('\x00').split('\n')
if len(main_monsters) < 2:
main_monsters.append(' ')
main_monsters = '|'.join(main_monsters)
client = quest.read(text_offset[6] - text_offset[5]).decode('utf-16').strip('\x00')
sub_quest = quest.read(language_offset[lang_id] - text_offset[6]).decode('utf-16').strip('\x00')
quests_page += time.strftime('%Y%m%d') + u'{:02d}|{:05d}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\n'.format(i, info[8], title, info[0], info[9], 0, info[10], info[5], info[2], info[1], info[13], info[14], info[15], info[16], info[17], info[11], info[12], success, sub_quest, failure, main_monsters, client, summary)
quest.seek(0)
open(os.path.join(path, 'm{:05d}.mib'.format(info[8])), 'wb').write(cipher.encrypt(quest.read()))
quest.close()
return cipher.encrypt(b'\xef\xbb\xbf' + quests_page.encode('utf-8'))
def make_root(root, region, language, event, challenge):
cipher = mhef.n3ds.DLCCipher(mhef.n3ds.MH4G_JP)
path = os.path.join(root, '3ds/mh4g_nihon')
if args.region == 'USA':
cipher = mhef.n3ds.DLCCipher(mhef.n3ds.MH4G_NA)
path = os.path.join(root, '3ds/mh4g_us_')
elif args.region == 'EUR':
cipher = mhef.n3ds.DLCCipher(mhef.n3ds.MH4G_EU)
path = os.path.join(root, '3ds/mh4g_eu_')
elif args.region == 'KOR':
cipher = mhef.n3ds.DLCCipher(mhef.n3ds.MH4G_KR)
path = os.path.join(root, '3ds/mh4g_kr_')
elif args.region == 'TWN':
cipher = mhef.n3ds.DLCCipher(mhef.n3ds.MH4G_TW)
path = os.path.join(root, 'redgiant/dl/pro_tw')
os.makedirs(path)
default_info = cipher.encrypt(time.strftime('%Y%m%d00|1|0| |Monster Hunter Quest Server\n%Y%m%d00|2|0| |Version BETA 2 \n%Y%m%d00|3|0| |github.com/svanheulen/mhqs '))
open(os.path.join(path, 'DLC_Info_Notice_{}.txt'.format(language)), 'wb').write(default_info)
open(os.path.join(path, 'DLC_Info_Otomo_{}.txt'.format(language)), 'wb').write(default_info)
open(os.path.join(path, 'DLC_Info_Quest_{}.txt'.format(language)), 'wb').write(default_info)
open(os.path.join(path, 'DLC_Info_Special_{}.txt'.format(language)), 'wb').write(default_info)
open(os.path.join(path, 'DLC_EShopInfo.txt'), 'wb').write(cipher.encrypt('0|0|0|0|0|0|0'))
open(os.path.join(path, 'DLC_ShopAmulInfo_{}.txt'.format(language)), 'wb').write(cipher.encrypt('0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0'))
open(os.path.join(path, 'DLC_ShopEquiInfo_{}.txt'.format(language)), 'wb').write(cipher.encrypt('0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0'))
open(os.path.join(path, 'DLC_ShopItemInfo_{}.txt'.format(language)), 'wb').write(cipher.encrypt('0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0'))
open(os.path.join(path, 'DLC_Info_List_{}.txt'.format(language)), 'wb').write(cipher.encrypt(time.strftime('%Y%m%d00|0|Content Preview')))
open(os.path.join(path, time.strftime('DLC_Info_%Y%m%d00_{}.txt'.format(language))), 'wb').write(cipher.encrypt(time.strftime('%y/%m/%d|0|Information:|Monster Hunter Quest Server|This software is licensed|under GPLv3. Please visit|github.com/svanheulen/mhqs|for more information.| | | | | ')))
default_quests = cipher.encrypt('0|0| |0|0|0|0|0|0|0|98|98|98|98|98|0|0| | | | | | | | | | | | | | | ')
if event:
open(os.path.join(path, 'DLC_EventQuestInfo_{}.txt'.format(language)), 'wb').write(make_quests(path, cipher, language, event))
else:
open(os.path.join(path, 'DLC_EventQuestInfo_{}.txt'.format(language)), 'wb').write(default_quests)
if challenge:
open(os.path.join(path, 'DLC_ChallengeQuestInfo_{}.txt'.format(language)), 'wb').write(make_quests(path, cipher, language, challenge))
else:
open(os.path.join(path, 'DLC_ChallengeQuestInfo_{}.txt'.format(language)), 'wb').write(default_quests)
open(os.path.join(path, 'DLC_EpisodeQuestInfo_{}.txt'.format(language)), 'wb').write(default_quests)
open(os.path.join(path, 'DLC_EpisodeQuestInfo2_{}.txt'.format(language)), 'wb').write(cipher.encrypt(' | | | | |0|0|0'))
open(os.path.join(path, 'DLC_OtomoInfo_{}.txt'.format(language)), 'wb').write(cipher.encrypt('0|| | |0|0|0|0|0|0|0|0|0|0|0| '))
open(os.path.join(path, 'DLC_Special_{}.txt'.format(language)), 'wb').write(cipher.encrypt('0||0| '))
parser = argparse.ArgumentParser(description='Runs a proxy for serving custom MH4U DLC quests.')
parser.add_argument('region', choices=('JPN', 'USA', 'EUR', 'KOR', 'TWN'), help='your game region')
parser.add_argument('language', choices=('jpn', 'eng', 'fre', 'spa', 'ger', 'ita', 'kor'), help='your game language')
parser.add_argument('--event', nargs='+', help='the decrypted event quest files to serve')
parser.add_argument('--challenge', nargs='+', help='the decrypted challenge quest files to serve')
args = parser.parse_args()
root = tempfile.mkdtemp()
try:
make_root(root, args.region, args.language, args.event, args.challenge)
log.startLogging(sys.stderr)
reactor.listenTCP(8080, proxy.TunnelProxyFactory())
reactor.listenTCP(8081, Site(File(root)))
reactor.run()
finally:
shutil.rmtree(root)
| svanheulen/mhqs | mh4u_proxy.py | Python | gpl-3.0 | 7,086 | [
"VisIt"
] | 43c77f3f596f0bb2b07e555e021ae1245b32e71f52e2da6dc543afb2b968867a |
import types
class SeqioFilter( list ):
"""This class is to allow filtering of the Biopython SeqIO record
SeqIO.parse returns a generator object so anytime you want to perform
an action on it, you must iterate through the entire list. This
class add the ability to filter and return only a subset of the
features.
Note:
To use simply pass a SeqIO.parse object to it and then when
the object is called a keyword is passed to it and only those
features matching the keyword are returned.
Example:
record = SeqioFilter(SeqIO.parse(infile)):
#no change to standard SeqIO calls
for entry in record:
print(entry.id, entry.seq)
#now we can get only certain features
for cds in record.get_feature('CDS'):
print(cds)
"""
def __init__( self, content ):
self.n = 0
self.get_n = dict()
for n, item in enumerate(content):
self.attach_methods(item)
self.get_n[item.id] = n
self.append(item)
def __iter__(self):
return self
def __next__(self):
try:
item = self[self.n]
except IndexError:
self.n = 0
raise StopIteration()
self.n += 1
return item
def __call__( self, keyword='' ):
pass
def get_entry(self, id):
return self[self.get_n[id]]
def attach_methods(self, target):
"""This method allows attaching new methods to the SeqIO entry object
Args:
target: is the SeqIO object that will be attaching a method to
"""
def get_features(target, feature_type):
for feature in target.features:
feature.id = " ".join(feature.qualifiers.get('locus_tag', [str(feature.location)]))
feature.function = " ".join(feature.qualifiers.get('product',['unknown']))
feature.start = int(feature.location.start) + 1
feature.stop = int(feature.location.end)
if feature.strand < 0:
feature.start, feature.stop = feature.stop, feature.start
if not feature_type or feature.type == feature_type:
yield feature
target.get_features = types.MethodType(get_features,target)
| linsalrob/EdwardsLab | roblib/seqio_filter.py | Python | mit | 2,355 | [
"Biopython"
] | 29c3554077ec5f32851af5355bb9daeb30b4f0eb4b63c3f10fe9d4bd870aa7bb |
from rdkit.Chem.rdMolDescriptors import CalcNumRotatableBonds
from ._base import Descriptor
from .BondCount import BondCount
__all__ = ("RotatableBondsCount", "RotatableBondsRatio")
class RotatableBondsBase(Descriptor):
__slots__ = ()
explicit_hydrogens = False
@classmethod
def preset(cls, version):
yield cls()
def parameters(self):
return ()
class RotatableBondsCount(RotatableBondsBase):
r"""rotatable bonds count descriptor(rdkit wrapper)."""
since = "1.0.0"
__slots__ = ()
def description(self):
return "rotatable bonds count"
def __str__(self):
return "nRot"
def calculate(self):
return CalcNumRotatableBonds(self.mol)
rtype = int
class RotatableBondsRatio(RotatableBondsBase):
r"""rotatable bonds ratio descriptor.
.. math::
{\rm RotRatio} = \frac{N_{\rm rotatable bonds}}{N_{\rm bonds}}
:returns: NaN when :math:`N_{\rm bonds} = 0`
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "rotatable bonds ratio"
def __str__(self):
return "RotRatio"
def dependencies(self):
return {"nB": BondCount("heavy"), "nRot": RotatableBondsCount()}
def calculate(self, nRot, nB):
with self.rethrow_zerodiv():
return float(nRot) / float(nB)
rtype = float
| mordred-descriptor/mordred | mordred/RotatableBond.py | Python | bsd-3-clause | 1,368 | [
"RDKit"
] | ec93e2194c20f96879a5fd20fe046a54deea3be5a5b5bc5cb35f7dff21565ddd |
# Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Spaces of functions with common domain and range."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import super
from inspect import isfunction
import numpy as np
from odl.operator.operator import Operator, _dispatch_call_args
from odl.set import (RealNumbers, ComplexNumbers, Set, Field, LinearSpace,
LinearSpaceElement)
from odl.util import (
is_real_dtype, is_complex_floating_dtype, dtype_repr,
complex_dtype, real_dtype,
is_valid_input_array, is_valid_input_meshgrid,
out_shape_from_array, out_shape_from_meshgrid, vectorize)
from odl.util.utility import preload_first_arg
__all__ = ('FunctionSet', 'FunctionSetElement',
'FunctionSpace', 'FunctionSpaceElement')
def _default_in_place(func, x, out, **kwargs):
"""Default in-place evaluation method."""
out[:] = func(x, **kwargs)
return out
def _default_out_of_place(func, x, **kwargs):
"""Default in-place evaluation method."""
if is_valid_input_array(x, func.domain.ndim):
out_shape = out_shape_from_array(x)
elif is_valid_input_meshgrid(x, func.domain.ndim):
out_shape = out_shape_from_meshgrid(x)
else:
raise TypeError('cannot use in-place method to implement '
'out-of-place non-vectorized evaluation')
dtype = func.space.out_dtype
if dtype is None:
dtype = np.result_type(*x)
out = np.empty(out_shape, dtype=dtype)
func(x, out=out, **kwargs)
return out
def _broadcast_to(array, shape):
"""Wrapper for the numpy function broadcast_to.
Added since we dont require numpy 1.10 and hence cant guarantee that this
exists.
"""
array = np.asarray(array)
try:
return np.broadcast_to(array, shape)
except AttributeError:
# The above requires numpy 1.10, fallback impl else
shape = [m if n == 1 and m != 1 else 1
for n, m in zip(array.shape, shape)]
return array + np.zeros(shape, dtype=array.dtype)
class FunctionSet(Set):
"""A general set of functions with common domain and range."""
def __init__(self, domain, range, out_dtype=None):
"""Initialize a new instance.
Parameters
----------
domain : `Set`
The domain of the functions.
range : `Set`
The range of the functions.
out_dtype : optional
Data type of the return value of a function in this space.
Can be given in any way `numpy.dtype` understands, e.g. as
string ('bool') or data type (bool).
If no data type is given, a "lazy" evaluation is applied,
i.e. an adequate data type is inferred during function
evaluation.
"""
if not isinstance(domain, Set):
raise TypeError('`domain` {!r} not a `Set` instance'
''.format(domain))
if not isinstance(range, Set):
raise TypeError('`range` {!r} not a `Set` instance'
''.format(range))
self.__domain = domain
self.__range = range
self.__out_dtype = None if out_dtype is None else np.dtype(out_dtype)
@property
def domain(self):
"""Common domain of all functions in this set."""
return self.__domain
@property
def range(self):
"""Common range of all functions in this set."""
return self.__range
@property
def out_dtype(self):
"""Output data type of this function.
If ``None``, the output data type is not uniquely pre-defined.
"""
return self.__out_dtype
def element(self, fcall=None, vectorized=True):
"""Create a `FunctionSet` element.
Parameters
----------
fcall : callable, optional
The actual instruction for out-of-place evaluation.
It must return a `FunctionSet.range` element or a
`numpy.ndarray` of such (vectorized call).
vectorized : bool
Whether ``fcall`` supports vectorized evaluation.
Returns
-------
element : `FunctionSetElement`
The new element, always supports vectorization
See Also
--------
odl.discr.grid.TensorGrid.meshgrid : efficient grids for function
evaluation
"""
if not callable(fcall):
raise TypeError('`fcall` {!r} is not callable'.format(fcall))
elif fcall in self:
return fcall
else:
if not vectorized:
fcall = vectorize(fcall)
return self.element_type(self, fcall)
def __eq__(self, other):
"""Return ``self == other``.
Returns
-------
equals : bool
``True`` if ``other`` is a `FunctionSet` with same
`FunctionSet.domain` and `FunctionSet.range`, ``False`` otherwise.
"""
if other is self:
return True
return (isinstance(other, FunctionSet) and
self.domain == other.domain and
self.range == other.range and
self.out_dtype == other.out_dtype)
def __contains__(self, other):
"""Return ``other in self``.
Returns
-------
equals : bool
``True`` if ``other`` is a `FunctionSetElement`
whose `FunctionSetElement.space` attribute
equals this space, ``False`` otherwise.
"""
return (isinstance(other, self.element_type) and
self == other.space)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.range)
def __str__(self):
"""Return ``str(self)``."""
return '{}({}, {})'.format(self.__class__.__name__,
self.domain, self.range)
@property
def element_type(self):
"""`FunctionSetElement`"""
return FunctionSetElement
class FunctionSetElement(Operator):
"""Representation of a `FunctionSet` element."""
def __init__(self, fset, fcall):
"""Initialize a new instance.
Parameters
----------
fset : `FunctionSet`
Set of functions this element lives in.
fcall : callable
The actual instruction for out-of-place evaluation.
It must return a `FunctionSet.range` element or a
`numpy.ndarray` of such (vectorized call).
"""
self.__space = fset
super().__init__(self.space.domain, self.space.range, linear=False)
# Determine which type of implementation fcall is
if isinstance(fcall, FunctionSetElement):
call_has_out, call_out_optional, _ = _dispatch_call_args(
bound_call=fcall._call)
# Numpy Ufuncs and similar objects (e.g. Numba DUfuncs)
elif hasattr(fcall, 'nin') and hasattr(fcall, 'nout'):
if fcall.nin != 1:
raise ValueError('ufunc {} has {} input parameter(s), '
'expected 1'
''.format(fcall.__name__, fcall.nin))
if fcall.nout > 1:
raise ValueError('ufunc {} has {} output parameter(s), '
'expected at most 1'
''.format(fcall.__name__, fcall.nout))
call_has_out = call_out_optional = (fcall.nout == 1)
elif isfunction(fcall):
call_has_out, call_out_optional, _ = _dispatch_call_args(
unbound_call=fcall)
elif callable(fcall):
call_has_out, call_out_optional, _ = _dispatch_call_args(
bound_call=fcall.__call__)
else:
raise TypeError('type {!r} not callable')
self._call_has_out = call_has_out
self._call_out_optional = call_out_optional
if not call_has_out:
# Out-of-place-only
self._call_in_place = preload_first_arg(self, 'in-place')(
_default_in_place)
self._call_out_of_place = fcall
elif call_out_optional:
# Dual-use
self._call_in_place = self._call_out_of_place = fcall
else:
# In-place-only
self._call_in_place = fcall
self._call_out_of_place = preload_first_arg(self, 'out-of-place')(
_default_out_of_place)
@property
def space(self):
"""Space or set this function belongs to."""
return self.__space
@property
def out_dtype(self):
"""Output data type of this function.
If ``None``, the output data type is not uniquely pre-defined.
"""
return self.space.out_dtype
def _call(self, x, out=None, **kwargs):
"""Raw evaluation method."""
if out is None:
return self._call_out_of_place(x, **kwargs)
else:
self._call_in_place(x, out=out, **kwargs)
def __call__(self, x, out=None, **kwargs):
"""Return ``self(x[, out, **kwargs])``.
Parameters
----------
x : `domain` `element-like`, `meshgrid` or `numpy.ndarray`
Input argument for the function evaluation. Conditions
on ``x`` depend on its type:
element-like: must be a castable to a domain element
meshgrid: length must be ``space.ndim``, and the arrays must
be broadcastable against each other.
array: shape must be ``(d, N)``, where ``d`` is the number
of dimensions of the function domain
out : `numpy.ndarray`, optional
Output argument holding the result of the function
evaluation, can only be used for vectorized
functions. Its shape must be equal to
``np.broadcast(*x).shape``.
Other Parameters
----------------
bounds_check : bool
If ``True``, check if all input points lie in the function
domain in the case of vectorized evaluation. This requires
the domain to implement `Set.contains_all`.
Default: ``True``
Returns
-------
out : range element or array of elements
Result of the function evaluation. If ``out`` was provided,
the returned object is a reference to it.
Raises
------
TypeError
If ``x`` is not a valid vectorized evaluation argument
If ``out`` is not a range element or a `numpy.ndarray`
of range elements
ValueError
If evaluation points fall outside the valid domain
"""
bounds_check = kwargs.pop('bounds_check', True)
if bounds_check and not hasattr(self.domain, 'contains_all'):
raise AttributeError('bounds check not possible for '
'domain {}, missing `contains_all()` '
'method'.format(self.domain))
if bounds_check and not hasattr(self.range, 'contains_all'):
raise AttributeError('bounds check not possible for '
'range {}, missing `contains_all()` '
'method'.format(self.range))
ndim = getattr(self.domain, 'ndim', None)
# Check for input type and determine output shape
if is_valid_input_meshgrid(x, ndim):
out_shape = out_shape_from_meshgrid(x)
scalar_out = False
# Avoid operations on tuples like x * 2 by casting to array
if ndim == 1:
x = x[0][None, ...]
elif is_valid_input_array(x, ndim):
x = np.asarray(x)
out_shape = out_shape_from_array(x)
scalar_out = False
# For 1d, squeeze the array
if ndim == 1 and x.ndim == 2:
x = x.squeeze()
elif x in self.domain:
x = np.atleast_2d(x).T # make a (d, 1) array
out_shape = (1,)
scalar_out = (out is None)
else:
# Unknown input
txt_1d = ' or (n,)' if ndim == 1 else ''
raise TypeError('Argument {!r} not a valid vectorized '
'input. Expected an element of the domain '
'{domain}, an array-like with shape '
'({domain.ndim}, n){} or a length-{domain.ndim} '
'meshgrid tuple.'
''.format(x, txt_1d, domain=self.domain))
# Check bounds if specified
if bounds_check:
if not self.domain.contains_all(x):
raise ValueError('input contains points outside '
'the domain {}'.format(self.domain))
# Call the function and check out shape, before or after
if out is None:
if ndim == 1:
try:
out = self._call(x, **kwargs)
except (TypeError, IndexError):
# TypeError is raised if a meshgrid was used but the
# function expected an array (1d only). In this case we try
# again with the first meshgrid vector.
# IndexError is raised in expressions like x[x > 0] since
# "x > 0" evaluates to 'True', i.e. 1, and that index is
# out of range for a meshgrid tuple of length 1 :-). To get
# the real errors with indexing, we check again for the
# same scenario (scalar output when not valid) as in the
# first case.
out = self._call(x[0], **kwargs)
# squeeze to remove extra axes.
out = np.squeeze(out)
else:
out = self._call(x, **kwargs)
# Cast to proper dtype if needed, also convert to array if out
# is scalar.
out = np.asarray(out, self.out_dtype)
if out_shape != (1,) and out.shape != out_shape:
# Try to broadcast the returned element if possible
out = _broadcast_to(out, out_shape)
else:
if not isinstance(out, np.ndarray):
raise TypeError('output {!r} not a `numpy.ndarray` '
'instance')
if out_shape != (1,) and out.shape != out_shape:
raise ValueError('output shape {} not equal to shape '
'{} expected from input'
''.format(out.shape, out_shape))
if self.out_dtype is not None and out.dtype != self.out_dtype:
raise ValueError('`out.dtype` ({}) does not match out_dtype '
'({})'.format(out.dtype, self.out_dtype))
if ndim == 1:
# TypeError for meshgrid in 1d, but expected array (see above)
try:
self._call(x, out=out, **kwargs)
except TypeError:
self._call(x[0], out=out, **kwargs)
else:
self._call(x, out=out, **kwargs)
# Check output values
if bounds_check:
if not self.range.contains_all(out):
raise ValueError('output contains points outside '
'the range {}'
''.format(self.range))
# Numpy does not implement __complex__ for arrays (in contrast to
# __float__), so we have to fish out the scalar ourselves.
return self.range.element(out.ravel()[0]) if scalar_out else out
def assign(self, other):
"""Assign ``other`` to ``self``.
This is implemented without `FunctionSpace.lincomb` to ensure that
``self == other`` evaluates to True after ``self.assign(other)``.
"""
if other not in self.space:
raise TypeError('`other` {!r} is not an element of the space '
'{} of this function'
''.format(other, self.space))
self._call_in_place = other._call_in_place
self._call_out_of_place = other._call_out_of_place
self._call_has_out = other._call_has_out
self._call_out_optional = other._call_out_optional
def copy(self):
"""Create an identical (deep) copy of this element."""
result = self.space.element()
result.assign(self)
return result
def __eq__(self, other):
"""Return ``self == other``.
Returns
-------
equals : bool
``True`` if ``other`` is a `FunctionSetElement` with
``other.space == self.space``, and the functions for evaluation
evaluation of ``self`` and ``other`` are the same, ``False``
otherwise.
"""
if other is self:
return True
if not isinstance(other, FunctionSetElement):
return False
# We cannot blindly compare since functions may have been wrapped
if (self._call_has_out != other._call_has_out or
self._call_out_optional != other._call_out_optional):
return False
if self._call_has_out:
# Out-of-place can be wrapped in this case, so we compare only
# the in-place methods.
funcs_equal = self._call_in_place == other._call_in_place
else:
# Just the opposite of the first case
funcs_equal = self._call_out_of_place == other._call_out_of_place
return self.space == other.space and funcs_equal
def __str__(self):
"""Return ``str(self)``."""
if self._call_has_out:
func = self._call_in_place
else:
func = self._call_out_of_place
return '{}: {} --> {}'.format(func, self.domain, self.range)
def __repr__(self):
"""Return ``repr(self)``."""
if self._call_has_out:
func = self._call_in_place
else:
func = self._call_out_of_place
return '{!r}.element({!r})'.format(self.space, func)
class FunctionSpace(FunctionSet, LinearSpace):
"""A vector space of functions."""
def __init__(self, domain, field=None, out_dtype=None):
"""Initialize a new instance.
Parameters
----------
domain : `Set`
The domain of the functions
field : `Field`, optional
The range of the functions, usually the `RealNumbers` or
`ComplexNumbers`. If not given, the field is either inferred
from ``out_dtype``, or, if the latter is also ``None``, set
to ``RealNumbers()``.
out_dtype : optional
Data type of the return value of a function in this space.
Can be given in any way `numpy.dtype` understands, e.g. as
string (``'float64'``) or data type (``float``).
By default, ``'float64'`` is used for real and ``'complex128'``
for complex spaces.
"""
if not isinstance(domain, Set):
raise TypeError('`domain` {!r} not a Set instance'.format(domain))
if field is not None and not isinstance(field, Field):
raise TypeError('`field` {!r} not a `Field` instance'
''.format(field))
# Data type: check if consistent with field, take default for None
dtype, dtype_in = np.dtype(out_dtype), out_dtype
# Default for both None
if field is None and out_dtype is None:
field = RealNumbers()
out_dtype = np.dtype('float64')
# field None, dtype given -> infer field
elif field is None:
if is_real_dtype(dtype):
field = RealNumbers()
elif is_complex_floating_dtype(dtype):
field = ComplexNumbers()
else:
raise ValueError('{} is not a scalar data type'
''.format(dtype_in))
# field given -> infer dtype if not given, else check consistency
elif field == RealNumbers():
if out_dtype is None:
out_dtype = np.dtype('float64')
elif not is_real_dtype(dtype):
raise ValueError('{} is not a real data type'
''.format(dtype_in))
elif field == ComplexNumbers():
if out_dtype is None:
out_dtype = np.dtype('complex128')
elif not is_complex_floating_dtype(dtype):
raise ValueError('{} is not a complex data type'
''.format(dtype_in))
# Else: keep out_dtype=None, which results in lazy dtype determination
LinearSpace.__init__(self, field)
FunctionSet.__init__(self, domain, field, out_dtype)
# Init cache attributes for real / complex variants
if self.field == RealNumbers():
self.__real_out_dtype = self.out_dtype
self.__real_space = self
self.__complex_out_dtype = complex_dtype(self.out_dtype,
default=np.dtype(object))
self.__complex_space = None
elif self.field == ComplexNumbers():
self.__real_out_dtype = real_dtype(self.out_dtype)
self.__real_space = None
self.__complex_out_dtype = self.out_dtype
self.__complex_space = self
else:
self.__real_out_dtype = None
self.__real_space = None
self.__complex_out_dtype = None
self.__complex_space = None
@property
def real_out_dtype(self):
"""The real dtype corresponding to this space's `out_dtype`."""
return self.__real_out_dtype
@property
def complex_out_dtype(self):
"""The complex dtype corresponding to this space's `out_dtype`."""
return self.__complex_out_dtype
@property
def real_space(self):
"""The space corresponding to this space's `real_dtype`."""
return self.astype(self.real_out_dtype)
@property
def complex_space(self):
"""The space corresponding to this space's `complex_dtype`."""
return self.astype(self.complex_out_dtype)
def element(self, fcall=None, vectorized=True):
"""Create a `FunctionSpace` element.
Parameters
----------
fcall : callable, optional
The actual instruction for out-of-place evaluation.
It must return a `FunctionSet.range` element or a
`numpy.ndarray` of such (vectorized call).
If fcall is a `FunctionSetElement`, it is wrapped
as a new `FunctionSpaceElement`.
vectorized : bool
Whether ``fcall`` supports vectorized evaluation.
Returns
-------
element : `FunctionSpaceElement`
The new element, always supports vectorization
Notes
-----
If you specify ``vectorized=False``, the function is decorated
with a vectorizer, which makes two elements created this way
from the same function being regarded as *not equal*.
"""
if fcall is None:
return self.zero()
elif fcall in self:
return fcall
else:
if not callable(fcall):
raise TypeError('`fcall` {!r} is not callable'.format(fcall))
if not vectorized:
if self.field == RealNumbers():
dtype = 'float64'
else:
dtype = 'complex128'
fcall = vectorize(otypes=[dtype])(fcall)
return self.element_type(self, fcall)
def zero(self):
"""Function mapping everything to zero.
This function is the additive unit in the function space.
Since `FunctionSpace.lincomb` may be slow, we implement this function
directly.
"""
def zero_vec(x, out=None):
"""Zero function, vectorized."""
if is_valid_input_meshgrid(x, self.domain.ndim):
out_shape = out_shape_from_meshgrid(x)
elif is_valid_input_array(x, self.domain.ndim):
out_shape = out_shape_from_array(x)
else:
raise TypeError('invalid input type')
if out is None:
return np.zeros(out_shape, dtype=self.out_dtype)
else:
out.fill(0)
return self.element_type(self, zero_vec)
def one(self):
"""Function mapping everything to one.
This function is the multiplicative unit in the function space.
"""
def one_vec(x, out=None):
"""One function, vectorized."""
if is_valid_input_meshgrid(x, self.domain.ndim):
out_shape = out_shape_from_meshgrid(x)
elif is_valid_input_array(x, self.domain.ndim):
out_shape = out_shape_from_array(x)
else:
raise TypeError('invalid input type')
if out is None:
return np.ones(out_shape, dtype=self.out_dtype)
else:
out.fill(1)
return self.element_type(self, one_vec)
def __eq__(self, other):
"""Return ``self == other``.
Returns
-------
equals : bool
``True`` if ``other`` is a `FunctionSpace` with same
`FunctionSpace.domain` and `FunctionSpace.range`,
``False`` otherwise.
"""
if other is self:
return True
return (isinstance(other, FunctionSpace) and
FunctionSet.__eq__(self, other))
def _astype(self, out_dtype):
"""Internal helper for ``astype``."""
return type(self)(self.domain, out_dtype=out_dtype)
def astype(self, out_dtype):
"""Return a copy of this space with new ``out_dtype``.
Parameters
----------
out_dtype : optional
Output data type of the returned space. Can be given in any
way `numpy.dtype` understands, e.g. as string ('complex64')
or data type (complex). None is interpreted as 'float64'.
Returns
-------
newspace : `FunctionSpace`
The version of this space with given data type
"""
out_dtype = np.dtype(out_dtype)
if out_dtype == self.out_dtype:
return self
# Caching for real and complex versions (exact dtyoe mappings)
if out_dtype == self.real_out_dtype:
if self.__real_space is None:
self.__real_space = self._astype(out_dtype)
return self.__real_space
elif out_dtype == self.complex_out_dtype:
if self.__complex_space is None:
self.__complex_space = self._astype(out_dtype)
return self.__complex_space
else:
return self._astype(out_dtype)
def _lincomb(self, a, x1, b, x2, out):
"""Raw linear combination of ``x1`` and ``x2``.
Notes
-----
The additions and multiplications are implemented via simple
Python functions, so non-vectorized versions are slow.
"""
# Store to allow aliasing
x1_call_oop = x1._call_out_of_place
x1_call_ip = x1._call_in_place
x2_call_oop = x2._call_out_of_place
x2_call_ip = x2._call_in_place
def lincomb_call_out_of_place(x):
"""Linear combination, out-of-place version."""
# Due to vectorization, at least one call must be made to
# ensure the correct final shape. The rest is optimized as
# far as possible.
if a == 0 and b != 0:
out = np.asarray(x2_call_oop(x), dtype=self.out_dtype)
if b != 1:
out *= b
elif b == 0: # Contains the case a == 0
out = np.asarray(x1_call_oop(x), dtype=self.out_dtype)
if a != 1:
out *= a
else:
out = np.asarray(x1_call_oop(x), dtype=self.out_dtype)
if a != 1:
out *= a
tmp = np.asarray(x2_call_oop(x), dtype=self.out_dtype)
if b != 1:
tmp *= b
out += tmp
return out
def lincomb_call_in_place(x, out):
"""Linear combination, in-place version."""
if not isinstance(out, np.ndarray):
raise TypeError('in-place evaluation only possible if output '
'is of type `numpy.ndarray`')
# TODO: this could be optimized for the case when x1 and x2
# are identical
if a == 0 and b == 0:
out *= 0
elif a == 0 and b != 0:
x2_call_ip(x, out)
if b != 1:
out *= b
elif b == 0 and a != 0:
x1_call_ip(x, out)
if a != 1:
out *= a
else:
tmp = np.empty_like(out)
x1_call_ip(x, out)
x2_call_ip(x, tmp)
if a != 1:
out *= a
if b != 1:
tmp *= b
out += tmp
return out
out._call_out_of_place = lincomb_call_out_of_place
out._call_in_place = lincomb_call_in_place
out._call_has_out = out._call_out_optional = True
return out
def _multiply(self, x1, x2, out):
"""Raw pointwise multiplication of two functions.
Notes
-----
The multiplication is implemented with a simple Python
function, so the non-vectorized versions are slow.
"""
# Store to allow aliasing
x1_call_oop = x1._call_out_of_place
x1_call_ip = x1._call_in_place
x2_call_oop = x2._call_out_of_place
x2_call_ip = x2._call_in_place
def product_call_out_of_place(x):
"""Product out-of-place evaluation function."""
return np.asarray(x1_call_oop(x) * x2_call_oop(x),
dtype=self.out_dtype)
def product_call_in_place(x, out):
"""Product in-place evaluation function."""
tmp = np.empty_like(out, dtype=self.out_dtype)
x1_call_ip(x, out)
x2_call_ip(x, tmp)
out *= tmp
return out
out._call_out_of_place = product_call_out_of_place
out._call_in_place = product_call_in_place
out._call_has_out = out._call_out_optional = True
return out
def _divide(self, x1, x2, out):
"""Raw pointwise division of two functions."""
# Store to allow aliasing
x1_call_oop = x1._call_out_of_place
x1_call_ip = x1._call_in_place
x2_call_oop = x2._call_out_of_place
x2_call_ip = x2._call_in_place
def quotient_call_out_of_place(x):
"""Quotient out-of-place evaluation function."""
return np.asarray(x1_call_oop(x) / x2_call_oop(x),
dtype=self.out_dtype)
def quotient_call_in_place(x, out):
"""Quotient in-place evaluation function."""
tmp = np.empty_like(out, dtype=self.out_dtype)
x1_call_ip(x, out)
x2_call_ip(x, tmp)
out /= tmp
return out
out._call_out_of_place = quotient_call_out_of_place
out._call_in_place = quotient_call_in_place
out._call_has_out = out._call_out_optional = True
return out
def _scalar_power(self, x, p, out):
"""Raw p-th power of a function, p integer or general scalar."""
x_call_oop = x._call_out_of_place
x_call_ip = x._call_in_place
def pow_posint(x, n):
"""Recursion to calculate the n-th power out-of-place."""
if isinstance(x, np.ndarray):
y = x.copy()
return ipow_posint(y, n)
else:
return x ** n
def ipow_posint(x, n):
"""Recursion to calculate the n-th power in-place."""
if n == 1:
return x
elif n % 2 == 0:
x *= x
return ipow_posint(x, n // 2)
else:
tmp = x.copy()
x *= x
ipow_posint(x, n // 2)
x *= tmp
return x
def power_call_out_of_place(x):
"""Power out-of-place evaluation function."""
if p == 0:
return self.one()
elif p == int(p) and p >= 1:
return np.asarray(pow_posint(x_call_oop(x), int(p)),
dtype=self.out_dtype)
else:
return np.power(x_call_oop(x), p).astype(self.out_dtype)
def power_call_in_place(x, out):
"""Power in-place evaluation function."""
if p == 0:
out.assign(self.one())
x_call_ip(x, out)
if p == int(p) and p >= 1:
return ipow_posint(out, int(p))
else:
out **= p
return out
out._call_out_of_place = power_call_out_of_place
out._call_in_place = power_call_in_place
out._call_has_out = out._call_out_optional = True
return out
def _realpart(self, x):
"""Function returning the real part of a result."""
x_call_oop = x._call_out_of_place
def realpart_oop(x):
return np.asarray(x_call_oop(x), dtype=self.out_dtype).real
if is_real_dtype(self.out_dtype):
return x
else:
rdtype = real_dtype(self.out_dtype)
rspace = self.astype(rdtype)
return rspace.element(realpart_oop)
def _imagpart(self, x):
"""Function returning the imaginary part of a result."""
x_call_oop = x._call_out_of_place
def imagpart_oop(x):
return np.asarray(x_call_oop(x), dtype=self.out_dtype).imag
if is_real_dtype(self.out_dtype):
return self.zero()
else:
rdtype = real_dtype(self.out_dtype)
rspace = self.astype(rdtype)
return rspace.element(imagpart_oop)
def _conj(self, x):
"""Function returning the complex conjugate of a result."""
x_call_oop = x._call_out_of_place
def conj_oop(x):
return np.asarray(x_call_oop(x), dtype=self.out_dtype).conj()
if is_real_dtype(self.out_dtype):
return x
else:
return self.element(conj_oop)
@property
def examples(self):
"""Return example functions in the space.
Example functions include:
Zero
One
Heaviside function
Hypercube characteristic function
Hypersphere characteristic function
Gaussian
Linear gradients
"""
# Get the points and calculate some statistics on them
mins = self.domain.min()
maxs = self.domain.max()
means = (maxs + mins) / 2.0
stds = (maxs - mins) / 4.0
ndim = getattr(self.domain, 'ndim', None)
# Zero and One
yield ('Zero', self.zero())
try:
yield ('One', self.one())
except NotImplementedError:
pass
# Indicator function in first dimension
def _step_fun(x):
if ndim == 1:
return x > means[0]
else:
return (x[0] > means[0]) + 0 * sum(x[1:]) # fix size
yield ('Step', self.element(_step_fun))
# Indicator function on hypercube
def _cube_fun(x):
if ndim > 1:
result = True
for points, mean, std in zip(x, means, stds):
result = np.logical_and(result, points < mean + std)
result = np.logical_and(result, points > mean - std)
else:
result = np.logical_and(x < means + stds,
x > means - stds)
return result
yield ('Cube', self.element(_cube_fun))
# Indicator function on hypersphere
if self.domain.ndim > 1: # Only if ndim > 1, don't duplicate cube
def _sphere_fun(x):
if ndim == 1:
x = (x,)
r = 0
for points, mean, std in zip(x, means, stds):
r = r + (points - mean) ** 2 / std ** 2
return r < 1.0
yield ('Sphere', self.element(_sphere_fun))
# Gaussian function
def _gaussian_fun(x):
if ndim == 1:
x = (x,)
r2 = 0
for points, mean, std in zip(x, means, stds):
r2 = r2 + (points - mean) ** 2 / ((std / 2) ** 2)
return np.exp(-r2)
yield ('Gaussian', self.element(_gaussian_fun))
# Gradient in each dimensions
for dim in range(self.domain.ndim):
def _gradient_fun(x):
if ndim == 1:
x = (x,)
s = 0
for ind in range(len(x)):
if ind == dim:
s = s + (x[ind] - mins[ind]) / (maxs[ind] - mins[ind])
else:
s = s + x[ind] * 0 # Correct broadcast size
return s
yield ('grad {}'.format(dim), self.element(_gradient_fun))
# Gradient in all dimensions
if self.domain.ndim > 1: # Only if ndim > 1, don't duplicate grad 0
def _all_gradient_fun(x):
if ndim == 1:
x = (x,)
s = 0
for ind in range(len(x)):
s = s + (x[ind] - mins[ind]) / (maxs[ind] - mins[ind])
return s
yield ('Grad all', self.element(_all_gradient_fun))
@property
def element_type(self):
"""`FunctionSpaceElement`"""
return FunctionSpaceElement
def __repr__(self):
"""Return ``repr(self)``."""
inner_str = '{!r}'.format(self.domain)
dtype_str = dtype_repr(self.out_dtype)
if self.field == RealNumbers():
if self.out_dtype == np.dtype('float64'):
pass
else:
inner_str += ', out_dtype={}'.format(dtype_str)
elif self.field == ComplexNumbers():
if self.out_dtype == np.dtype('complex128'):
inner_str += ', field={!r}'.format(self.field)
else:
inner_str += ', out_dtype={}'.format(dtype_str)
else: # different field, name explicitly
inner_str += ', field={!r}'.format(self.field)
inner_str += ', out_dtype={}'.format(dtype_str)
return '{}({})'.format(self.__class__.__name__, inner_str)
def __str__(self):
"""Return ``str(self)``."""
inner_str = '{}'.format(self.domain)
dtype_str = dtype_repr(self.out_dtype)
if self.field == RealNumbers():
if self.out_dtype == np.dtype('float64'):
pass
else:
inner_str += ', out_dtype={}'.format(dtype_str)
elif self.field == ComplexNumbers():
if self.out_dtype == np.dtype('complex128'):
inner_str += ', field={!r}'.format(self.field)
else:
inner_str += ', out_dtype={}'.format(dtype_str)
else: # different field, name explicitly
inner_str += ', field={!r}'.format(self.field)
inner_str += ', out_dtype={}'.format(dtype_str)
return '{}({})'.format(self.__class__.__name__, inner_str)
class FunctionSpaceElement(LinearSpaceElement, FunctionSetElement):
"""Representation of a `FunctionSpace` element."""
def __init__(self, fspace, fcall):
"""Initialize a new instance.
Parameters
----------
fspace : `FunctionSpace`
Set of functions this element lives in.
fcall : callable
The actual instruction for out-of-place evaluation.
It must return an `FunctionSet.range` element or a
``numpy.ndarray`` of such (vectorized call).
"""
if not isinstance(fspace, FunctionSpace):
raise TypeError('`fspace` {!r} not a `FunctionSpace` '
'instance'.format(fspace))
LinearSpaceElement.__init__(self, fspace)
FunctionSetElement.__init__(self, fspace, fcall)
# Tradeoff: either we subclass LinearSpaceElement first and override the
# 3 methods in FunctionSetElement (as below) which LinearSpaceElement
# also has, or we switch inheritance order and need to override all magic
# methods from LinearSpaceElement which are not in-place. This is due to
# the fact that FunctionSetElement inherits from Operator which defines
# some of those magic methods, and those do not work in this case.
__eq__ = FunctionSetElement.__eq__
assign = FunctionSetElement.assign
copy = FunctionSetElement.copy
# Power functions are more general than the ones in LinearSpace
def __pow__(self, p):
"""`f.__pow__(p) <==> f ** p`."""
out = self.space.element()
self.space._scalar_power(self, p, out=out)
return out
def __ipow__(self, p):
"""`f.__ipow__(p) <==> f **= p`."""
return self.space._scalar_power(self, p, out=self)
@property
def real(self):
"""Pointwise real part of this function."""
return self.space._realpart(self)
@property
def imag(self):
"""Pointwise imaginary part of this function."""
return self.space._imagpart(self)
def conj(self):
"""Pointwise complex conjugate of this function."""
return self.space._conj(self)
def __repr__(self):
"""Return ``repr(self)``."""
return 'FunctionSpaceElement'
if __name__ == '__main__':
# pylint: disable=wrong-import-position
from odl.util.testutils import run_doctests
run_doctests()
| bgris/ODL_bgris | lib/python3.5/site-packages/odl/space/fspace.py | Python | gpl-3.0 | 43,234 | [
"Gaussian"
] | 5b82e699073b0dc5d32d54c19ca959fec73de1405f161c35a43a37ed5421ea4e |
"""
Module for utilities related to simulation
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import next
from builtins import dict
from builtins import map
from builtins import str
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from time import time
import hashlib
import array
from numbers import Number
from collections import OrderedDict
from neuron import h# Import NEURON
from ..specs import Dict, ODict
#------------------------------------------------------------------------------
# Convert dict strings to utf8 so can be saved in HDF5 format
#------------------------------------------------------------------------------
def cellByGid(gid):
"""
Function for/to <short description of `netpyne.sim.utils.cellByGid`>
Parameters
----------
gid : <type>
<Short description of gid>
**Default:** *required*
"""
from .. import sim
cell = next((c for c in sim.net.cells if c.gid==gid), None)
return cell
#------------------------------------------------------------------------------
# Get cells list for recording based on set of conditions
#------------------------------------------------------------------------------
def getCellsList(include, returnGids=False):
"""
Function for/to <short description of `netpyne.sim.utils.getCellsList`>
Parameters
----------
include : <type>
<Short description of include>
**Default:** *required*
returnGids : bool
<Short description of returnGids>
**Default:** ``False``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
if sim.nhosts > 1 and any(isinstance(cond, tuple) or isinstance(cond,list) for cond in include): # Gather tags from all cells
allCellTags = sim._gatherAllCellTags()
else:
allCellTags = {cell.gid: cell.tags for cell in sim.net.cells}
cellGids = []
cells = []
for condition in include:
if condition in ['all', 'allCells']: # all cells + Netstims
cells = list(sim.net.cells)
return cells
elif isinstance(condition, int): # cell gid
cellGids.append(condition)
elif isinstance(condition, basestring): # entire pop
cellGids.extend(list(sim.net.pops[condition].cellGids))
elif isinstance(condition, tuple) or isinstance(condition, list): # subset of a pop with relative indices
cellsPop = [gid for gid,tags in allCellTags.items() if tags['pop']==condition[0]]
cellsPop = list(set(cellsPop))
cellsPop.sort()
if isinstance(condition[1], list):
cellGids.extend([gid for i,gid in enumerate(cellsPop) if i in condition[1]])
elif isinstance(condition[1], int):
cellGids.extend([gid for i,gid in enumerate(cellsPop) if i==condition[1]])
cellGids = list(set(cellGids)) # unique values
if returnGids:
return cellGids
else:
cells = [cell for cell in sim.net.cells if cell.gid in cellGids]
return cells
#------------------------------------------------------------------------------
# Timing - Stop Watch
#------------------------------------------------------------------------------
def timing(mode, processName):
"""
Function for/to <short description of `netpyne.sim.utils.timing`>
Parameters
----------
mode : <type>
<Short description of mode>
**Default:** *required*
processName : <type>
<Short description of processName>
**Default:** *required*
"""
from .. import sim
if not hasattr(sim, 'timingData'):
sim.timingData = {}
if hasattr(sim.cfg, 'timing'):
if sim.cfg.timing:
if hasattr(sim, 'rank'):
if sim.rank == 0:
if mode == 'start':
sim.timingData[processName] = time()
elif mode == 'stop':
sim.timingData[processName] = time() - sim.timingData[processName]
else:
if mode == 'start':
sim.timingData[processName] = time()
elif mode == 'stop':
sim.timingData[processName] = time() - sim.timingData[processName]
#------------------------------------------------------------------------------
# Print netpyne version
#------------------------------------------------------------------------------
def version(show=True):
"""
Function for/to <short description of `netpyne.sim.utils.version`>
Parameters
----------
show : bool
<Short description of show>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
from netpyne import __version__
if show:
print(__version__)
return __version__
#------------------------------------------------------------------------------
# Print github version
#------------------------------------------------------------------------------
def gitChangeset(show=True):
"""
Function for/to <short description of `netpyne.sim.utils.gitChangeset`>
Parameters
----------
show : bool
<Short description of show>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
import netpyne, os, subprocess
currentPath = os.getcwd()
try:
netpynePath = os.path.dirname(netpyne.__file__)
os.chdir(netpynePath)
if show: os.system('git log -1')
# get changeset (need to remove initial tag+num and ending '\n')
#changeset = subprocess.check_output(["git", "describe"]).split('-')[2][1:-1]
changeset = subprocess.check_output(["git", "describe"], stderr=subprocess.DEVNULL).split('-')[2][1:-1]
except:
changeset = ''
os.chdir(currentPath)
return changeset
#------------------------------------------------------------------------------
# Hash function for string
#------------------------------------------------------------------------------
def hashStr(obj):
"""
Function for/to <short description of `netpyne.sim.utils.hashStr`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
#return hash(obj) & 0xffffffff # hash func
return int(hashlib.md5(obj.encode('utf-8')).hexdigest()[0:8],16) # convert 8 first chars of md5 hash in base 16 to int
#------------------------------------------------------------------------------
# Hash function for list of values
#------------------------------------------------------------------------------
def hashList(obj):
"""
Function for/to <short description of `netpyne.sim.utils.hashList`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
return int(hashlib.md5(array.array(chr(ord('L')), obj)).hexdigest()[0:8],16)
#------------------------------------------------------------------------------
# Initialize the stim randomizer
#------------------------------------------------------------------------------
def _init_stim_randomizer(rand, stimType, gid, seed):
from .. import sim
rand.Random123(sim.hashStr(stimType), gid, seed)
#------------------------------------------------------------------------------
# Fast function to find unique elements in sequence and preserve order
#------------------------------------------------------------------------------
def unique(seq):
"""
Function for/to <short description of `netpyne.sim.utils.unique`>
Parameters
----------
seq : <type>
<Short description of seq>
**Default:** *required*
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
#------------------------------------------------------------------------------
# Check memory
#------------------------------------------------------------------------------
def checkMemory():
"""
Function for/to <short description of `netpyne.sim.utils.checkMemory`>
"""
from .. import sim
# print memory diagnostic info
if sim.rank == 0: # and checkMemory:
import resource
print('\nMEMORY -----------------------')
print('Sections: ')
print(h.topology())
print('NetCons: ')
print(len(h.List("NetCon")))
print('NetStims:')
print(len(h.List("NetStim")))
print('\n Memory usage: %s \n' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
# import objgraph
# objgraph.show_most_common_types()
print('--------------------------------\n')
#------------------------------------------------------------------------------
# Replace item with specific key from dict or list (used to remove h objects)
#------------------------------------------------------------------------------
def copyReplaceItemObj(obj, keystart, newval, objCopy='ROOT', exclude_list=[]):
"""
Function for/to <short description of `netpyne.sim.utils.copyReplaceItemObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
keystart : <type>
<Short description of keystart>
**Default:** *required*
newval : <type>
<Short description of newval>
**Default:** *required*
objCopy : str
<Short description of objCopy>
**Default:** ``'ROOT'``
**Options:** ``<option>`` <description of option>
exclude_list : list
<Short description of exclude_list>
**Default:** ``[]``
**Options:** ``<option>`` <description of option>
"""
if type(obj) == list:
if objCopy=='ROOT':
objCopy = []
for item in obj:
if isinstance(item, list):
objCopy.append([])
copyReplaceItemObj(item, keystart, newval, objCopy[-1], exclude_list)
elif isinstance(item, (dict, Dict)):
objCopy.append({})
copyReplaceItemObj(item, keystart, newval, objCopy[-1], exclude_list)
else:
objCopy.append(item)
elif isinstance(obj, (dict, Dict)):
if objCopy == 'ROOT':
objCopy = Dict()
for key,val in obj.items():
if type(val) in [list]:
objCopy[key] = []
copyReplaceItemObj(val, keystart, newval, objCopy[key], exclude_list)
elif isinstance(val, (dict, Dict)):
objCopy[key] = {}
copyReplaceItemObj(val, keystart, newval, objCopy[key], exclude_list)
elif key.startswith(keystart) and key not in exclude_list:
objCopy[key] = newval
else:
objCopy[key] = val
return objCopy
#------------------------------------------------------------------------------
# Remove item with specific key from dict or list (used to remove h objects)
#------------------------------------------------------------------------------
def copyRemoveItemObj(obj, keystart, objCopy='ROOT', exclude_list=[]):
"""
Function for/to <short description of `netpyne.sim.utils.copyRemoveItemObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
keystart : <type>
<Short description of keystart>
**Default:** *required*
objCopy : str
<Short description of objCopy>
**Default:** ``'ROOT'``
**Options:** ``<option>`` <description of option>
exclude_list : list
<Short description of exclude_list>
**Default:** ``[]``
**Options:** ``<option>`` <description of option>
"""
if type(obj) == list:
if objCopy=='ROOT':
objCopy = []
for item in obj:
if isinstance(item, list):
objCopy.append([])
copyRemoveItemObj(item, keystart, objCopy[-1], exclude_list)
elif isinstance(item, (dict, Dict)):
objCopy.append({})
copyRemoveItemObj(item, keystart, objCopy[-1], exclude_list)
else:
objCopy.append(item)
elif isinstance(obj, (dict, Dict)):
if objCopy == 'ROOT':
objCopy = Dict()
for key,val in obj.items():
if type(val) in [list]:
objCopy[key] = []
copyRemoveItemObj(val, keystart, objCopy[key], exclude_list)
elif isinstance(val, (dict, Dict)):
objCopy[key] = {}
copyRemoveItemObj(val, keystart, objCopy[key], exclude_list)
elif key.startswith(keystart) and key not in exclude_list:
objCopy.pop(key, None)
else:
objCopy[key] = val
return objCopy
#------------------------------------------------------------------------------
# Replace item with specific key from dict or list (used to remove h objects)
#------------------------------------------------------------------------------
def replaceItemObj(obj, keystart, newval, exclude_list=[]):
"""
Function for/to <short description of `netpyne.sim.utils.replaceItemObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
keystart : <type>
<Short description of keystart>
**Default:** *required*
newval : <type>
<Short description of newval>
**Default:** *required*
exclude_list : list
<Short description of exclude_list>
**Default:** ``[]``
**Options:** ``<option>`` <description of option>
"""
if type(obj) == list:
for item in obj:
if type(item) in [list, dict]:
replaceItemObj(item, keystart, newval, exclude_list)
elif type(obj) == dict:
for key,val in obj.items():
if type(val) in [list, dict]:
replaceItemObj(val, keystart, newval, exclude_list)
if key.startswith(keystart) and key not in exclude_list:
obj[key] = newval
return obj
#------------------------------------------------------------------------------
# Recursivele replace dict keys
#------------------------------------------------------------------------------
def replaceKeys(obj, oldkey, newkey):
"""
Function for/to <short description of `netpyne.sim.utils.replaceKeys`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
oldkey : <type>
<Short description of oldkey>
**Default:** *required*
newkey : <type>
<Short description of newkey>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if isinstance(item, (list, dict, Dict, ODict, OrderedDict)):
replaceKeys(item, oldkey, newkey)
elif isinstance(obj, (dict, Dict, ODict, OrderedDict)):
for key in list(obj.keys()):
val = obj[key]
if isinstance(val, (list, dict, Dict, ODict, OrderedDict)):
replaceKeys(val, oldkey, newkey)
if key == oldkey:
obj[newkey] = obj.pop(oldkey)
return obj
#------------------------------------------------------------------------------
# Replace functions from dict or list with function string (so can be pickled)
#------------------------------------------------------------------------------
def replaceFuncObj(obj):
"""
Function for/to <short description of `netpyne.sim.utils.replaceFuncObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if type(item) in [list, dict]:
replaceFuncObj(item)
elif type(obj) == dict:
for key,val in obj.items():
if type(val) in [list, dict]:
replaceFuncObj(val)
if 'func_name' in dir(val): #hasattr(val,'func_name'): # avoid hasattr() since it creates key in Dicts()
obj[key] = 'func' # funcSource
return obj
#------------------------------------------------------------------------------
# Replace None from dict or list with [](so can be saved to .mat)
#------------------------------------------------------------------------------
def replaceNoneObj(obj):
"""
Function for/to <short description of `netpyne.sim.utils.replaceNoneObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:# or type(obj) == tuple:
for item in obj:
if isinstance(item, (list, dict, Dict, ODict)):
replaceNoneObj(item)
elif isinstance(obj, (dict, Dict, ODict)):
for key,val in obj.items():
if isinstance(val, (list, dict, Dict, ODict)):
replaceNoneObj(val)
if val == None:
obj[key] = []
elif val == {}:
obj[key] = [] # also replace empty dicts with empty list
return obj
#------------------------------------------------------------------------------
# Replace Dict with dict and Odict with OrderedDict
#------------------------------------------------------------------------------
def replaceDictODict(obj):
"""
Function for/to <short description of `netpyne.sim.utils.replaceDictODict`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if type(item) == Dict:
item = item.todict()
elif type(item) == ODict:
item = item.toOrderedDict()
if type(item) in [list, dict, OrderedDict]:
replaceDictODict(item)
elif type(obj) in [dict, OrderedDict, Dict, ODict]:
for key,val in obj.items():
if type(val) == Dict:
obj[key] = val.todict()
elif type(val) == ODict:
obj[key] = val.toOrderedDict()
if type(val) in [list, dict, OrderedDict]:
replaceDictODict(val)
return obj
#------------------------------------------------------------------------------
# Rename objects
#------------------------------------------------------------------------------
def rename(obj, old, new, label=None):
"""
Function for/to <short description of `netpyne.sim.utils.rename`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
old : <type>
<Short description of old>
**Default:** *required*
new : <type>
<Short description of new>
**Default:** *required*
label : <``None``?>
<Short description of label>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
"""
try:
return obj.rename(old, new, label)
except:
if type(obj) == dict and old in obj:
obj[new] = obj.pop(old) # replace
return True
else:
return False
#------------------------------------------------------------------------------
# Replace tuples with str
#------------------------------------------------------------------------------
def tupleToList(obj):
"""
Function for/to <short description of `netpyne.sim.utils.tupleToList`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if type(item) in [list, dict]:
tupleToList(item)
elif type(item) == tuple:
obj[obj.index(item)] = list(item)
elif isinstance(obj, (dict, ODict)):
for key,val in obj.items():
if isinstance(val, (list, dict, ODict)):
tupleToList(val)
elif type(val) == tuple:
obj[key] = list(val) # also replace empty dicts with empty list
return obj
#------------------------------------------------------------------------------
# Replace Decimal with float
#------------------------------------------------------------------------------
def decimalToFloat(obj):
"""
Function for/to <short description of `netpyne.sim.utils.decimalToFloat`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
from decimal import Decimal
if type(obj) == list:
for i,item in enumerate(obj):
if type(item) in [list, dict, tuple]:
decimalToFloat(item)
elif type(item) == Decimal:
obj[i] = float(item)
elif isinstance(obj, dict):
for key,val in obj.items():
if isinstance(val, (list, dict)):
decimalToFloat(val)
elif type(val) == Decimal:
obj[key] = float(val) # also replace empty dicts with empty list
return obj
#------------------------------------------------------------------------------
# Recursively remove items of an object (used to avoid mem leaks)
#------------------------------------------------------------------------------
def clearObj(obj):
"""
Function for/to <short description of `netpyne.sim.utils.clearObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if isinstance(item, (list, dict, Dict, ODict)):
clearObj(item)
del item
elif isinstance(obj, (dict, Dict, ODict)):
for key in list(obj.keys()):
val = obj[key]
if isinstance(val, (list, dict, Dict, ODict)):
clearObj(val)
del obj[key]
return obj
#------------------------------------------------------------------------------
# Support funcs to load from mat
#------------------------------------------------------------------------------
def _mat2dict(obj):
"""
A recursive function which constructs from matobjects nested dictionaries
Enforce lists for conns, synMechs and stims even if 1 element (matlab converts to dict otherwise)
"""
import scipy.io as spio
import numpy as np
if isinstance(obj, dict):
out = {}
for key in obj:
if isinstance(obj[key], spio.matlab.mio5_params.mat_struct):
if key in ['conns', 'stims', 'synMechs']:
out[key] = [_mat2dict(obj[key])] # convert to 1-element list
else:
out[key] = _mat2dict(obj[key])
elif isinstance(obj[key], np.ndarray):
out[key] = _mat2dict(obj[key])
else:
out[key] = obj[key]
elif isinstance(obj, spio.matlab.mio5_params.mat_struct):
out = {}
for key in obj._fieldnames:
val = obj.__dict__[key]
if isinstance(val, spio.matlab.mio5_params.mat_struct):
if key in ['conns', 'stims', 'synMechs']:
out[key] = [_mat2dict(val)] # convert to 1-element list
else:
out[key] = _mat2dict(val)
elif isinstance(val, np.ndarray):
out[key] = _mat2dict(val)
else:
out[key] = val
elif isinstance(obj, np.ndarray):
out = []
for item in obj:
if isinstance(item, spio.matlab.mio5_params.mat_struct) or isinstance(item, np.ndarray):
out.append(_mat2dict(item))
else:
out.append(item)
else:
out = obj
return out
#------------------------------------------------------------------------------
# Convert dict strings to utf8 so can be saved in HDF5 format
#------------------------------------------------------------------------------
def _dict2utf8(obj):
#unidict = {k.decode('utf8'): v.decode('utf8') for k, v in strdict.items()}
#print obj
import collections
if isinstance(obj, basestring):
return obj.decode('utf8')
elif isinstance(obj, collections.Mapping):
for key in list(obj.keys()):
if isinstance(key, Number):
obj[str(key).decode('utf8')] = obj[key]
obj.pop(key)
return dict(list(map(_dict2utf8, iter(obj.items()))))
elif isinstance(obj, collections.Iterable):
return type(obj)(list(map(_dict2utf8, obj)))
else:
return obj
#------------------------------------------------------------------------------
# Clear all sim objects in memory
#------------------------------------------------------------------------------
def clearAll():
"""
Function to clear all sim objects in memory
"""
from .. import sim
import numpy as np
# clean up
sim.pc.barrier()
sim.pc.gid_clear() # clear previous gid settings
# clean cells and simData in all nodes
if hasattr(sim, 'net'):
sim.clearObj([cell.__dict__ if hasattr(cell, '__dict__') else cell for cell in sim.net.cells])
if hasattr(sim, 'simData'):
if 'stims' in list(sim.simData.keys()):
sim.clearObj([stim for stim in sim.simData['stims']])
for key in list(sim.simData.keys()): del sim.simData[key]
if hasattr(sim, 'net'):
for c in sim.net.cells: del c
for p in sim.net.pops: del p
del sim.net.params
# clean cells and simData gathered in master node
if hasattr(sim, 'rank'):
if sim.rank == 0:
if hasattr(sim, 'net'):
if hasattr(sim.net, 'allCells'):
sim.clearObj([cell.__dict__ if hasattr(cell, '__dict__') else cell for cell in sim.net.allCells])
if hasattr(sim, 'allSimData'):
for key in list(sim.allSimData.keys()): del sim.allSimData[key]
if 'stims' in list(sim.allSimData.keys()):
sim.clearObj([stim for stim in sim.allSimData['stims']])
if hasattr(sim, 'net'):
for c in sim.net.allCells: del c
for p in sim.net.allPops: del p
del sim.net.allCells
if hasattr(sim, 'allSimData'):
del sim.allSimData
import matplotlib
matplotlib.pyplot.clf()
matplotlib.pyplot.close('all')
# clean rxd components
if hasattr(sim.net, 'rxd'):
sim.clearObj(sim.net.rxd)
if 'rxd' not in globals():
try:
from neuron import crxd as rxd
except:
pass
#try:
for r in rxd.rxd._all_reactions[:]:
if r():
rxd.rxd._unregister_reaction(r)
for s in rxd.species._all_species:
if s():
s().__del__()
rxd.region._all_regions = []
rxd.region._region_count = 0
rxd.region._c_region_lookup = None
rxd.species._species_counts = 0
rxd.section1d._purge_cptrs()
rxd.initializer.has_initialized = False
rxd.rxd.free_conc_ptrs()
rxd.rxd.free_curr_ptrs()
rxd.rxd.rxd_include_node_flux1D(0, None, None, None)
rxd.species._has_1d = False
rxd.species._has_3d = False
rxd.rxd._zero_volume_indices = np.ndarray(0, dtype=np.int_)
rxd.set_solve_type(dimension=1)
# clear reactions in case next sim does not use rxd
rxd.rxd.clear_rates()
for obj in rxd.__dict__:
sim.clearObj(obj)
#except:
# pass
if hasattr(sim, 'net'):
del sim.net
import gc; gc.collect()
#------------------------------------------------------------------------------
# Create a subclass of json.JSONEncoder to convert numpy types in Python types
#------------------------------------------------------------------------------
import json
import numpy as np
class NpSerializer(json.JSONEncoder):
"""
Class for/to <short description of `netpyne.sim.utils.NpSerializer`>
"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpSerializer, self).default(obj)
| Neurosim-lab/netpyne | netpyne/sim/utils.py | Python | mit | 28,991 | [
"NEURON"
] | f991b135a581bf4080b12d843169ac286b879583cd9355a80cf827a2915944fd |
"""Axis binary sensor platform tests."""
from unittest.mock import Mock
from homeassistant import config_entries
from homeassistant.components import axis
import homeassistant.components.binary_sensor as binary_sensor
from homeassistant.setup import async_setup_component
EVENTS = [
{
"operation": "Initialized",
"topic": "tns1:Device/tnsaxis:Sensor/PIR",
"source": "sensor",
"source_idx": "0",
"type": "state",
"value": "0",
},
{
"operation": "Initialized",
"topic": "tnsaxis:CameraApplicationPlatform/VMD/Camera1Profile1",
"type": "active",
"value": "1",
},
]
ENTRY_CONFIG = {
axis.CONF_DEVICE: {
axis.config_flow.CONF_HOST: "1.2.3.4",
axis.config_flow.CONF_USERNAME: "user",
axis.config_flow.CONF_PASSWORD: "pass",
axis.config_flow.CONF_PORT: 80,
},
axis.config_flow.CONF_MAC: "1234ABCD",
axis.config_flow.CONF_MODEL: "model",
axis.config_flow.CONF_NAME: "model 0",
}
ENTRY_OPTIONS = {
axis.CONF_CAMERA: False,
axis.CONF_EVENTS: True,
axis.CONF_TRIGGER_TIME: 0,
}
async def setup_device(hass):
"""Load the Axis binary sensor platform."""
from axis import AxisDevice
loop = Mock()
config_entry = config_entries.ConfigEntry(
1,
axis.DOMAIN,
"Mock Title",
ENTRY_CONFIG,
"test",
config_entries.CONN_CLASS_LOCAL_PUSH,
system_options={},
options=ENTRY_OPTIONS,
)
device = axis.AxisNetworkDevice(hass, config_entry)
device.api = AxisDevice(loop=loop, **config_entry.data[axis.CONF_DEVICE])
hass.data[axis.DOMAIN] = {device.serial: device}
device.api.enable_events(event_callback=device.async_event_callback)
await hass.config_entries.async_forward_entry_setup(config_entry, "binary_sensor")
# To flush out the service call to update the group
await hass.async_block_till_done()
return device
async def test_platform_manually_configured(hass):
"""Test that nothing happens when platform is manually configured."""
assert (
await async_setup_component(
hass, binary_sensor.DOMAIN, {"binary_sensor": {"platform": axis.DOMAIN}}
)
is True
)
assert axis.DOMAIN not in hass.data
async def test_no_binary_sensors(hass):
"""Test that no sensors in Axis results in no sensor entities."""
await setup_device(hass)
assert len(hass.states.async_all()) == 0
async def test_binary_sensors(hass):
"""Test that sensors are loaded properly."""
device = await setup_device(hass)
for event in EVENTS:
device.api.stream.event.manage_event(event)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
pir = hass.states.get("binary_sensor.model_0_pir_0")
assert pir.state == "off"
assert pir.name == "model 0 PIR 0"
vmd4 = hass.states.get("binary_sensor.model_0_vmd4_camera1profile1")
assert vmd4.state == "on"
assert vmd4.name == "model 0 VMD4 Camera1Profile1"
| leppa/home-assistant | tests/components/axis/test_binary_sensor.py | Python | apache-2.0 | 3,072 | [
"VMD"
] | 4a318f0bb285a7a2e3d5f119b1702fb50b1e657dd4b71763b190e245ccf9c23a |
#!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
'''
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the AAzure Python SDK. For instruction on installing the
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
"ansible_host": "XXX.XXX.XXX.XXX",
"computer_name": "computer_name2",
"fqdn": null,
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
"image": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "latest"
},
"location": "westus",
"mac_address": "00-0D-3A-31-2C-EC",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
"network_security_group": null,
"network_security_group_id": null,
"os_disk": {
"name": "object-name",
"operating_system_type": "Linux"
},
"plan": null,
"powerstate": "running",
"private_ip": "172.26.3.6",
"private_ip_alloc_method": "Static",
"provisioning_state": "Succeeded",
"public_ip": "XXX.XXX.XXX.XXX",
"public_ip_alloc_method": "Static",
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
"public_ip_name": "object-name",
"resource_group": "galaxy-production",
"security_group": "object-name",
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
"tags": {
"db": "database"
},
"type": "Microsoft.Compute/virtualMachines",
"virtual_machine_size": "Standard_DS4"
}
Groups
------
When run in --list mode, instances are grouped by the following categories:
- azure
- location
- resource_group
- security_group
- tag key
- tag key_value
Control groups using azure_rm.ini or set environment variables:
AZURE_GROUP_BY_RESOURCE_GROUP=yes
AZURE_GROUP_BY_LOCATION=yes
AZURE_GROUP_BY_SECURITY_GROUP=yes
AZURE_GROUP_BY_TAG=yes
Select hosts within specific resource groups by assigning a comma separated list to:
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
AZURE_TAGS=key1,key2,key3
Select hosts for specific locations:
AZURE_LOCATIONS=eastus,westus,eastus2
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
----------------------
As mentioned above you can control execution using environment variables or an .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. This provides you with the flexibility of copying and customizing this
script and having matching .ini files. Go forth and customize your Azure inventory!
Powerstate:
-----------
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
Company: Ansible by Red Hat
Version: 1.0.0
'''
import argparse
import ConfigParser
import json
import os
import re
import sys
from distutils.version import LooseVersion
from os.path import expanduser
HAS_AZURE = True
HAS_AZURE_EXC = None
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.network_management_client import NetworkManagementClient
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD'
)
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
locations='AZURE_LOCATIONS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG'
)
AZURE_MIN_VERSION = "0.30.0rc5"
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
class AzureRM(object):
def __init__(self, args):
self._args = args
self._compute_client = None
self._resource_client = None
self._network_client = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
def log(self, msg):
if self.debug:
print (msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = ConfigParser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
return None
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Compute')
return self._compute_client
class AzureInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._compute_client = rm.compute_client
self._network_client = rm.network_client
self._resource_client = rm.rm_client
self._security_groups = None
self.resource_groups = []
self.tags = None
self.locations = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self._inventory = dict(
_meta=dict(
hostvars=dict()
),
azure=[]
)
self._get_settings()
if self._args.resource_groups:
self.resource_groups = self._args.resource_groups.split(',')
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.locations:
self.locations = self._args.locations.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
self.get_inventory()
print (self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad-user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--resource-groups', action='store',
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--locations', action='store',
help='Return inventory for comma separated list of locations')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
def get_inventory(self):
if len(self.resource_groups) > 0:
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group,
str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
else:
# get all VMs within the subscription
try:
virtual_machines = self._compute_client.virtual_machines.list_all()
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags or self.locations:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
def _load_machines(self, machines):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
#TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()
if self.group_by_security_group:
self._get_security_groups(resource_group)
host_vars = dict(
ansible_host=None,
private_ip=None,
private_ip_alloc_method=None,
public_ip=None,
public_ip_name=None,
public_ip_id=None,
public_ip_alloc_method=None,
fqdn=None,
location=machine.location,
name=machine.name,
type=machine.type,
id=machine.id,
tags=machine.tags,
network_interface_id=None,
network_interface=None,
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size,
computer_name=machine.os_profile.computer_name,
provisioning_state=machine.provisioning_state,
)
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value
)
if self.include_powerstate:
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
if machine.storage_profile.image_reference:
host_vars['image'] = dict(
offer=machine.storage_profile.image_reference.offer,
publisher=machine.storage_profile.image_reference.publisher,
sku=machine.storage_profile.image_reference.sku,
version=machine.storage_profile.image_reference.version
)
# Add windows details
if machine.os_profile.windows_configuration is not None:
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
host_vars['windows_rm'] = None
if machine.os_profile.windows_configuration.win_rm is not None:
host_vars['windows_rm'] = dict(listeners=None)
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
host_vars['windows_rm']['listeners'] = []
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol,
certificate_url=listener.certificate_url))
for interface in machine.network_profile.network_interfaces:
interface_reference = self._parse_ref_id(interface.id)
network_interface = self._network_client.network_interfaces.get(
interface_reference['resourceGroups'],
interface_reference['networkInterfaces'])
if network_interface.primary:
if self.group_by_security_group and \
self._security_groups[resource_group].get(network_interface.id, None):
host_vars['security_group'] = \
self._security_groups[resource_group][network_interface.id]['name']
host_vars['security_group_id'] = \
self._security_groups[resource_group][network_interface.id]['id']
host_vars['network_interface'] = network_interface.name
host_vars['network_interface_id'] = network_interface.id
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
self._add_host(host_vars)
def _selected_machines(self, virtual_machines):
selected_machines = []
for machine in virtual_machines:
if self._args.host and self._args.host == machine.name:
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
if self.locations and machine.location in self.locations:
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
if not self._security_groups:
self._security_groups = dict()
if not self._security_groups.get(resource_group):
self._security_groups[resource_group] = dict()
for group in self._network_client.network_security_groups.list(resource_group):
if group.network_interfaces:
for interface in group.network_interfaces:
self._security_groups[resource_group][interface.id] = dict(
name=group.name,
id=group.id
)
def _get_powerstate(self, resource_group, name):
try:
vm = self._compute_client.virtual_machines.get(resource_group,
name,
expand='instanceview')
except Exception as exc:
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
return next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
def _add_host(self, vars):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []
self._inventory[resource_group].append(host_name)
if self.group_by_location:
if not self._inventory.get(vars['location']):
self._inventory[vars['location']] = []
self._inventory[vars['location']].append(host_name)
if self.group_by_security_group and security_group:
if not self._inventory.get(security_group):
self._inventory[security_group] = []
self._inventory[security_group].append(host_name)
self._inventory['_meta']['hostvars'][host_name] = vars
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].iteritems():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
self._inventory[safe_key] = []
if not self._inventory.get(safe_value):
self._inventory[safe_value] = []
self._inventory[safe_key].append(host_name)
self._inventory[safe_value].append(host_name)
def _json_format_dict(self, pretty=False):
# convert inventory to json
if pretty:
return json.dumps(self._inventory, sort_keys=True, indent=2)
else:
return json.dumps(self._inventory)
def _get_settings(self):
# Load settings from the .ini, if it exists. Otherwise,
# look for environment values.
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key):
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif env_settings.get(key, None) is not None:
val = self._to_boolean(env_settings[key])
setattr(self, key, val)
def _parse_ref_id(self, reference):
response = {}
keys = reference.strip('/').split('/')
for index in range(len(keys)):
if index < len(keys) - 1 and index % 2 == 0:
response[keys[index]] = keys[index + 1]
return response
def _to_boolean(self, value):
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
result = True
elif value in ['No', 'no', 0, 'False', 'false', False]:
result = False
else:
result = True
return result
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.iteritems():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
path = basename + '.ini'
config = None
settings = None
try:
config = ConfigParser.ConfigParser()
config.read(path)
except:
pass
if config is not None:
settings = dict()
for key in AZURE_CONFIG_SETTINGS:
try:
settings[key] = config.get('azure', key, raw=True)
except:
pass
return settings
def _tags_match(self, tag_obj, tag_args):
'''
Return True if the tags object from a VM contains the requested tag values.
:param tag_obj: Dictionary of string:string pairs
:param tag_args: List of strings in the form key=value
:return: boolean
'''
if not tag_obj:
return False
matches = 0
for arg in tag_args:
arg_key = arg
arg_value = None
if re.search(r':', arg):
arg_key, arg_value = arg.split(':')
if arg_value and tag_obj.get(arg_key, None) == arg_value:
matches += 1
elif not arg_value and tag_obj.get(arg_key, None) is not None:
matches += 1
if matches == len(tag_args):
return True
return False
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try 'pip install azure==2.0.0rc5') - {0}".format(HAS_AZURE_EXC))
if LooseVersion(azure_compute_version) != LooseVersion(AZURE_MIN_VERSION):
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
"Do you have Azure == 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
AzureInventory()
if __name__ == '__main__':
main()
| levenlabs/ansible | contrib/inventory/azure_rm.py | Python | gpl-3.0 | 32,139 | [
"Galaxy"
] | 7752510af303aaae678ebea932f00ae69a0cd68631d4a55b42fc7cae8256af73 |
"""
Class encapsulating the management of repositories installed into Galaxy from the Tool Shed.
"""
import copy
import logging
import os
from galaxy import util
from tool_shed.util import common_util
from tool_shed.util import container_util
from tool_shed.util import shed_util_common as suc
from tool_shed.util import tool_dependency_util
from tool_shed.util import xml_util
from galaxy.model.orm import and_
from tool_shed.galaxy_install.datatypes import custom_datatype_manager
from tool_shed.galaxy_install.metadata.installed_repository_metadata_manager import InstalledRepositoryMetadataManager
from tool_shed.galaxy_install.repository_dependencies import repository_dependency_manager
from tool_shed.galaxy_install.tools import data_manager
from tool_shed.galaxy_install.tools import tool_panel_manager
log = logging.getLogger( __name__ )
class InstalledRepositoryManager( object ):
def __init__( self, app ):
"""
Among other things, keep in in-memory sets of tuples defining installed repositories and tool dependencies along with
the relationships between each of them. This will allow for quick discovery of those repositories or components that
can be uninstalled. The feature allowing a Galaxy administrator to uninstall a repository should not be available to
repositories or tool dependency packages that are required by other repositories or their contents (packages). The
uninstall feature should be available only at the repository hierarchy level where every dependency will be uninstalled.
The exception for this is if an item (repository or tool dependency package) is not in an INSTALLED state - in these
cases, the specific item can be uninstalled in order to attempt re-installation.
"""
self.app = app
self.install_model = self.app.install_model
self.context = self.install_model.context
self.tool_configs = self.app.config.tool_configs
if self.app.config.migrated_tools_config not in self.tool_configs:
self.tool_configs.append( self.app.config.migrated_tools_config )
self.installed_repository_dicts = []
# Keep an in-memory dictionary whose keys are tuples defining tool_shed_repository objects (whose status is 'Installed')
# and whose values are a list of tuples defining tool_shed_repository objects (whose status can be anything) required by
# the key. The value defines the entire repository dependency tree.
self.repository_dependencies_of_installed_repositories = {}
# Keep an in-memory dictionary whose keys are tuples defining tool_shed_repository objects (whose status is 'Installed')
# and whose values are a list of tuples defining tool_shed_repository objects (whose status is 'Installed') required by
# the key. The value defines the entire repository dependency tree.
self.installed_repository_dependencies_of_installed_repositories = {}
# Keep an in-memory dictionary whose keys are tuples defining tool_shed_repository objects (whose status is 'Installed')
# and whose values are a list of tuples defining tool_shed_repository objects (whose status is 'Installed') that require
# the key.
self.installed_dependent_repositories_of_installed_repositories = {}
# Keep an in-memory dictionary whose keys are tuples defining tool_shed_repository objects (whose status is 'Installed')
# and whose values are a list of tuples defining its immediate tool_dependency objects (whose status can be anything).
# The value defines only the immediate tool dependencies of the repository and does not include any dependencies of the
# tool dependencies.
self.tool_dependencies_of_installed_repositories = {}
# Keep an in-memory dictionary whose keys are tuples defining tool_shed_repository objects (whose status is 'Installed')
# and whose values are a list of tuples defining its immediate tool_dependency objects (whose status is 'Installed').
# The value defines only the immediate tool dependencies of the repository and does not include any dependencies of the
# tool dependencies.
self.installed_tool_dependencies_of_installed_repositories = {}
# Keep an in-memory dictionary whose keys are tuples defining tool_dependency objects (whose status is 'Installed') and
# whose values are a list of tuples defining tool_dependency objects (whose status can be anything) required by the
# installed tool dependency at runtime. The value defines the entire tool dependency tree.
self.runtime_tool_dependencies_of_installed_tool_dependencies = {}
# Keep an in-memory dictionary whose keys are tuples defining tool_dependency objects (whose status is 'Installed') and
# whose values are a list of tuples defining tool_dependency objects (whose status is 'Installed') that require the key
# at runtime. The value defines the entire tool dependency tree.
self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies = {}
if app.config.manage_dependency_relationships:
# Load defined dependency relationships for installed tool shed repositories and their contents.
self.load_dependency_relationships()
def activate_repository( self, repository ):
"""Activate an installed tool shed repository that has been marked as deactivated."""
repository_clone_url = common_util.generate_clone_url_for_installed_repository( self.app, repository )
shed_tool_conf, tool_path, relative_install_dir = suc.get_tool_panel_config_tool_path_install_dir( self.app, repository )
repository.deleted = False
repository.status = self.install_model.ToolShedRepository.installation_status.INSTALLED
if repository.includes_tools_for_display_in_tool_panel:
tpm = tool_panel_manager.ToolPanelManager( self.app )
irmm = InstalledRepositoryMetadataManager( app=self.app,
tpm=tpm,
repository=repository,
changeset_revision=repository.changeset_revision,
metadata_dict=repository.metadata )
repository_tools_tups = irmm.get_repository_tools_tups()
# Reload tools into the appropriate tool panel section.
tool_panel_dict = repository.metadata[ 'tool_panel_section' ]
tpm.add_to_tool_panel( repository.name,
repository_clone_url,
repository.installed_changeset_revision,
repository_tools_tups,
repository.owner,
shed_tool_conf,
tool_panel_dict,
new_install=False )
if repository.includes_data_managers:
tp, data_manager_relative_install_dir = repository.get_tool_relative_path( self.app )
# Hack to add repository.name here, which is actually the root of the installed repository
data_manager_relative_install_dir = os.path.join( data_manager_relative_install_dir, repository.name )
dmh = data_manager.DataManagerHandler( self.app )
new_data_managers = dmh.install_data_managers( self.app.config.shed_data_manager_config_file,
repository.metadata,
repository.get_shed_config_dict( self.app ),
data_manager_relative_install_dir,
repository,
repository_tools_tups )
self.install_model.context.add( repository )
self.install_model.context.flush()
if repository.includes_datatypes:
if tool_path:
repository_install_dir = os.path.abspath( os.path.join( tool_path, relative_install_dir ) )
else:
repository_install_dir = os.path.abspath( relative_install_dir )
# Activate proprietary datatypes.
cdl = custom_datatype_manager.CustomDatatypeLoader( self.app )
installed_repository_dict = cdl.load_installed_datatypes( repository,
repository_install_dir,
deactivate=False )
if installed_repository_dict:
converter_path = installed_repository_dict.get( 'converter_path' )
if converter_path is not None:
cdl.load_installed_datatype_converters( installed_repository_dict, deactivate=False )
display_path = installed_repository_dict.get( 'display_path' )
if display_path is not None:
cdl.load_installed_display_applications( installed_repository_dict, deactivate=False )
def add_entry_to_installed_repository_dependencies_of_installed_repositories( self, repository ):
"""
Add an entry to self.installed_repository_dependencies_of_installed_repositories. A side-effect of this method
is the population of self.installed_dependent_repositories_of_installed_repositories. Since this method discovers
all repositories required by the received repository, it can use the list to add entries to the reverse dictionary.
"""
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
tool_shed, name, owner, installed_changeset_revision = repository_tup
# Get the list of repository dependencies for this repository.
status = self.install_model.ToolShedRepository.installation_status.INSTALLED
repository_dependency_tups = self.get_repository_dependency_tups_for_installed_repository( repository, status=status )
# Add an entry to self.installed_repository_dependencies_of_installed_repositories.
if repository_tup not in self.installed_repository_dependencies_of_installed_repositories:
debug_msg = "Adding an entry for revision %s of repository %s owned by %s " % ( installed_changeset_revision, name, owner )
debug_msg += "to installed_repository_dependencies_of_installed_repositories."
log.debug( debug_msg )
self.installed_repository_dependencies_of_installed_repositories[ repository_tup ] = repository_dependency_tups
# Use the repository_dependency_tups to add entries to the reverse dictionary
# self.installed_dependent_repositories_of_installed_repositories.
for required_repository_tup in repository_dependency_tups:
debug_msg = "Appending revision %s of repository %s owned by %s " % ( installed_changeset_revision, name, owner )
debug_msg += "to all dependent repositories in installed_dependent_repositories_of_installed_repositories."
log.debug( debug_msg )
if required_repository_tup in self.installed_dependent_repositories_of_installed_repositories:
self.installed_dependent_repositories_of_installed_repositories[ required_repository_tup ].append( repository_tup )
else:
self.installed_dependent_repositories_of_installed_repositories[ required_repository_tup ] = [ repository_tup ]
def add_entry_to_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies( self, tool_dependency ):
"""Add an entry to self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies."""
tool_dependency_tup = self.get_tool_dependency_tuple_for_installed_repository_manager( tool_dependency )
if tool_dependency_tup not in self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies:
tool_shed_repository_id, name, version, type = tool_dependency_tup
debug_msg = "Adding an entry for version %s of %s %s " % ( version, type, name )
debug_msg += "to installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies."
log.debug( debug_msg )
status = self.install_model.ToolDependency.installation_status.INSTALLED
installed_runtime_dependent_tool_dependency_tups = self.get_runtime_dependent_tool_dependency_tuples( tool_dependency,
status=status )
self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies[ tool_dependency_tup ] = \
installed_runtime_dependent_tool_dependency_tups
def add_entry_to_installed_tool_dependencies_of_installed_repositories( self, repository ):
"""Add an entry to self.installed_tool_dependencies_of_installed_repositories."""
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
if repository_tup not in self.installed_tool_dependencies_of_installed_repositories:
tool_shed, name, owner, installed_changeset_revision = repository_tup
debug_msg = "Adding an entry for revision %s of repository %s owned by %s " % ( installed_changeset_revision, name, owner )
debug_msg += "to installed_tool_dependencies_of_installed_repositories."
log.debug( debug_msg )
installed_tool_dependency_tups = []
for tool_dependency in repository.tool_dependencies:
if tool_dependency.status == self.app.install_model.ToolDependency.installation_status.INSTALLED:
tool_dependency_tup = self.get_tool_dependency_tuple_for_installed_repository_manager( tool_dependency )
installed_tool_dependency_tups.append( tool_dependency_tup )
self.installed_tool_dependencies_of_installed_repositories[ repository_tup ] = installed_tool_dependency_tups
def add_entry_to_repository_dependencies_of_installed_repositories( self, repository ):
"""Add an entry to self.repository_dependencies_of_installed_repositories."""
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
if repository_tup not in self.repository_dependencies_of_installed_repositories:
tool_shed, name, owner, installed_changeset_revision = repository_tup
debug_msg = "Adding an entry for revision %s of repository %s owned by %s " % ( installed_changeset_revision, name, owner )
debug_msg += "to repository_dependencies_of_installed_repositories."
log.debug( debug_msg )
repository_dependency_tups = self.get_repository_dependency_tups_for_installed_repository( repository, status=None )
self.repository_dependencies_of_installed_repositories[ repository_tup ] = repository_dependency_tups
def add_entry_to_runtime_tool_dependencies_of_installed_tool_dependencies( self, tool_dependency ):
"""Add an entry to self.runtime_tool_dependencies_of_installed_tool_dependencies."""
tool_dependency_tup = self.get_tool_dependency_tuple_for_installed_repository_manager( tool_dependency )
if tool_dependency_tup not in self.runtime_tool_dependencies_of_installed_tool_dependencies:
tool_shed_repository_id, name, version, type = tool_dependency_tup
debug_msg = "Adding an entry for version %s of %s %s " % ( version, type, name )
debug_msg += "to runtime_tool_dependencies_of_installed_tool_dependencies."
log.debug( debug_msg )
runtime_dependent_tool_dependency_tups = self.get_runtime_dependent_tool_dependency_tuples( tool_dependency,
status=None )
self.runtime_tool_dependencies_of_installed_tool_dependencies[ tool_dependency_tup ] = \
runtime_dependent_tool_dependency_tups
def add_entry_to_tool_dependencies_of_installed_repositories( self, repository ):
"""Add an entry to self.tool_dependencies_of_installed_repositories."""
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
if repository_tup not in self.tool_dependencies_of_installed_repositories:
tool_shed, name, owner, installed_changeset_revision = repository_tup
debug_msg = "Adding an entry for revision %s of repository %s owned by %s " % ( installed_changeset_revision, name, owner )
debug_msg += "to tool_dependencies_of_installed_repositories."
log.debug( debug_msg )
tool_dependency_tups = []
for tool_dependency in repository.tool_dependencies:
tool_dependency_tup = self.get_tool_dependency_tuple_for_installed_repository_manager( tool_dependency )
tool_dependency_tups.append( tool_dependency_tup )
self.tool_dependencies_of_installed_repositories[ repository_tup ] = tool_dependency_tups
def get_containing_repository_for_tool_dependency( self, tool_dependency_tup ):
tool_shed_repository_id, name, version, type = tool_dependency_tup
return self.app.install_model.context.query( self.app.install_model.ToolShedRepository ).get( tool_shed_repository_id )
def get_dependencies_for_repository( self, tool_shed_url, repo_info_dict, includes_tool_dependencies, updating=False ):
"""
Return dictionaries containing the sets of installed and missing tool dependencies and repository
dependencies associated with the repository defined by the received repo_info_dict.
"""
rdim = repository_dependency_manager.RepositoryDependencyInstallManager( self.app )
repository = None
installed_rd = {}
installed_td = {}
missing_rd = {}
missing_td = {}
name = repo_info_dict.keys()[ 0 ]
repo_info_tuple = repo_info_dict[ name ]
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \
suc.get_repo_info_tuple_contents( repo_info_tuple )
if tool_dependencies:
if not includes_tool_dependencies:
includes_tool_dependencies = True
# Inspect the tool_dependencies dictionary to separate the installed and missing tool dependencies.
# We don't add to installed_td and missing_td here because at this point they are empty.
installed_td, missing_td = self.get_installed_and_missing_tool_dependencies_for_repository( tool_dependencies )
# In cases where a repository dependency is required only for compiling a dependent repository's
# tool dependency, the value of repository_dependencies will be an empty dictionary here.
if repository_dependencies:
# We have a repository with one or more defined repository dependencies.
if not repository:
repository = suc.get_repository_for_dependency_relationship( self.app,
tool_shed_url,
name,
repository_owner,
changeset_revision )
if not updating and repository and repository.metadata:
installed_rd, missing_rd = self.get_installed_and_missing_repository_dependencies( repository )
else:
installed_rd, missing_rd = \
self.get_installed_and_missing_repository_dependencies_for_new_or_updated_install( repo_info_tuple )
# Discover all repository dependencies and retrieve information for installing them.
all_repo_info_dict = rdim.get_required_repo_info_dicts( tool_shed_url, util.listify( repo_info_dict ) )
has_repository_dependencies = all_repo_info_dict.get( 'has_repository_dependencies', False )
has_repository_dependencies_only_if_compiling_contained_td = \
all_repo_info_dict.get( 'has_repository_dependencies_only_if_compiling_contained_td', False )
includes_tools_for_display_in_tool_panel = all_repo_info_dict.get( 'includes_tools_for_display_in_tool_panel', False )
includes_tool_dependencies = all_repo_info_dict.get( 'includes_tool_dependencies', False )
includes_tools = all_repo_info_dict.get( 'includes_tools', False )
required_repo_info_dicts = all_repo_info_dict.get( 'all_repo_info_dicts', [] )
# Display tool dependencies defined for each of the repository dependencies.
if required_repo_info_dicts:
required_tool_dependencies = {}
for rid in required_repo_info_dicts:
for name, repo_info_tuple in rid.items():
description, repository_clone_url, changeset_revision, ctx_rev, \
repository_owner, rid_repository_dependencies, rid_tool_dependencies = \
suc.get_repo_info_tuple_contents( repo_info_tuple )
if rid_tool_dependencies:
for td_key, td_dict in rid_tool_dependencies.items():
if td_key not in required_tool_dependencies:
required_tool_dependencies[ td_key ] = td_dict
if required_tool_dependencies:
# Discover and categorize all tool dependencies defined for this repository's repository dependencies.
required_installed_td, required_missing_td = \
self.get_installed_and_missing_tool_dependencies_for_repository( required_tool_dependencies )
if required_installed_td:
if not includes_tool_dependencies:
includes_tool_dependencies = True
for td_key, td_dict in required_installed_td.items():
if td_key not in installed_td:
installed_td[ td_key ] = td_dict
if required_missing_td:
if not includes_tool_dependencies:
includes_tool_dependencies = True
for td_key, td_dict in required_missing_td.items():
if td_key not in missing_td:
missing_td[ td_key ] = td_dict
else:
# We have a single repository with (possibly) no defined repository dependencies.
all_repo_info_dict = rdim.get_required_repo_info_dicts( tool_shed_url, util.listify( repo_info_dict ) )
has_repository_dependencies = all_repo_info_dict.get( 'has_repository_dependencies', False )
has_repository_dependencies_only_if_compiling_contained_td = \
all_repo_info_dict.get( 'has_repository_dependencies_only_if_compiling_contained_td', False )
includes_tools_for_display_in_tool_panel = all_repo_info_dict.get( 'includes_tools_for_display_in_tool_panel', False )
includes_tool_dependencies = all_repo_info_dict.get( 'includes_tool_dependencies', False )
includes_tools = all_repo_info_dict.get( 'includes_tools', False )
required_repo_info_dicts = all_repo_info_dict.get( 'all_repo_info_dicts', [] )
dependencies_for_repository_dict = \
dict( changeset_revision=changeset_revision,
has_repository_dependencies=has_repository_dependencies,
has_repository_dependencies_only_if_compiling_contained_td=has_repository_dependencies_only_if_compiling_contained_td,
includes_tool_dependencies=includes_tool_dependencies,
includes_tools=includes_tools,
includes_tools_for_display_in_tool_panel=includes_tools_for_display_in_tool_panel,
installed_repository_dependencies=installed_rd,
installed_tool_dependencies=installed_td,
missing_repository_dependencies=missing_rd,
missing_tool_dependencies=missing_td,
name=name,
repository_owner=repository_owner )
return dependencies_for_repository_dict
def get_installed_and_missing_repository_dependencies( self, repository ):
"""
Return the installed and missing repository dependencies for a tool shed repository that has a record
in the Galaxy database, but may or may not be installed. In this case, the repository dependencies are
associated with the repository in the database. Do not include a repository dependency if it is required
only to compile a tool dependency defined for the dependent repository since these special kinds of repository
dependencies are really a dependency of the dependent repository's contained tool dependency, and only
if that tool dependency requires compilation.
"""
missing_repository_dependencies = {}
installed_repository_dependencies = {}
has_repository_dependencies = repository.has_repository_dependencies
if has_repository_dependencies:
# The repository dependencies container will include only the immediate repository
# dependencies of this repository, so the container will be only a single level in depth.
metadata = repository.metadata
installed_rd_tups = []
missing_rd_tups = []
for tsr in repository.repository_dependencies:
prior_installation_required = self.set_prior_installation_required( repository, tsr )
only_if_compiling_contained_td = self.set_only_if_compiling_contained_td( repository, tsr )
rd_tup = [ tsr.tool_shed,
tsr.name,
tsr.owner,
tsr.changeset_revision,
prior_installation_required,
only_if_compiling_contained_td,
tsr.id,
tsr.status ]
if tsr.status == self.app.install_model.ToolShedRepository.installation_status.INSTALLED:
installed_rd_tups.append( rd_tup )
else:
# We'll only add the rd_tup to the missing_rd_tups list if the received repository
# has tool dependencies that are not correctly installed. This may prove to be a
# weak check since the repository in question may not have anything to do with
# compiling the missing tool dependencies. If we discover that this is a problem,
# more granular checking will be necessary here.
if repository.missing_tool_dependencies:
if not self.repository_dependency_needed_only_for_compiling_tool_dependency( repository, tsr ):
missing_rd_tups.append( rd_tup )
else:
missing_rd_tups.append( rd_tup )
if installed_rd_tups or missing_rd_tups:
# Get the description from the metadata in case it has a value.
repository_dependencies = metadata.get( 'repository_dependencies', {} )
description = repository_dependencies.get( 'description', None )
# We need to add a root_key entry to one or both of installed_repository_dependencies dictionary and the
# missing_repository_dependencies dictionaries for proper display parsing.
root_key = container_util.generate_repository_dependencies_key_for_repository( repository.tool_shed,
repository.name,
repository.owner,
repository.installed_changeset_revision,
prior_installation_required,
only_if_compiling_contained_td )
if installed_rd_tups:
installed_repository_dependencies[ 'root_key' ] = root_key
installed_repository_dependencies[ root_key ] = installed_rd_tups
installed_repository_dependencies[ 'description' ] = description
if missing_rd_tups:
missing_repository_dependencies[ 'root_key' ] = root_key
missing_repository_dependencies[ root_key ] = missing_rd_tups
missing_repository_dependencies[ 'description' ] = description
return installed_repository_dependencies, missing_repository_dependencies
def get_installed_and_missing_repository_dependencies_for_new_or_updated_install( self, repo_info_tuple ):
"""
Parse the received repository_dependencies dictionary that is associated with a repository being
installed into Galaxy for the first time and attempt to determine repository dependencies that are
already installed and those that are not.
"""
missing_repository_dependencies = {}
installed_repository_dependencies = {}
missing_rd_tups = []
installed_rd_tups = []
description, \
repository_clone_url, \
changeset_revision, \
ctx_rev, \
repository_owner, \
repository_dependencies, \
tool_dependencies = \
suc.get_repo_info_tuple_contents( repo_info_tuple )
if repository_dependencies:
description = repository_dependencies[ 'description' ]
root_key = repository_dependencies[ 'root_key' ]
# The repository dependencies container will include only the immediate repository dependencies of
# this repository, so the container will be only a single level in depth.
for key, rd_tups in repository_dependencies.items():
if key in [ 'description', 'root_key' ]:
continue
for rd_tup in rd_tups:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
common_util.parse_repository_dependency_tuple( rd_tup )
# Updates to installed repository revisions may have occurred, so make sure to locate the
# appropriate repository revision if one exists. We need to create a temporary repo_info_tuple
# that includes the correct repository owner which we get from the current rd_tup. The current
# tuple looks like: ( description, repository_clone_url, changeset_revision, ctx_rev, repository_owner,
# repository_dependencies, installed_td )
tmp_clone_url = common_util.generate_clone_url_from_repo_info_tup( self.app, rd_tup )
tmp_repo_info_tuple = ( None, tmp_clone_url, changeset_revision, None, owner, None, None )
repository, installed_changeset_revision = suc.repository_was_previously_installed( self.app,
tool_shed,
name,
tmp_repo_info_tuple,
from_tip=False )
if repository:
new_rd_tup = [ tool_shed,
name,
owner,
changeset_revision,
prior_installation_required,
only_if_compiling_contained_td,
repository.id,
repository.status ]
if repository.status == self.install_model.ToolShedRepository.installation_status.INSTALLED:
if new_rd_tup not in installed_rd_tups:
installed_rd_tups.append( new_rd_tup )
else:
# A repository dependency that is not installed will not be considered missing if its value
# for only_if_compiling_contained_td is True This is because this type of repository dependency
# will only be considered at the time that the specified tool dependency is being installed, and
# even then only if the compiled binary of the tool dependency could not be installed due to the
# unsupported installation environment.
if not util.asbool( only_if_compiling_contained_td ):
if new_rd_tup not in missing_rd_tups:
missing_rd_tups.append( new_rd_tup )
else:
new_rd_tup = [ tool_shed,
name,
owner,
changeset_revision,
prior_installation_required,
only_if_compiling_contained_td,
None,
'Never installed' ]
if not util.asbool( only_if_compiling_contained_td ):
# A repository dependency that is not installed will not be considered missing if its value for
# only_if_compiling_contained_td is True - see above...
if new_rd_tup not in missing_rd_tups:
missing_rd_tups.append( new_rd_tup )
if installed_rd_tups:
installed_repository_dependencies[ 'root_key' ] = root_key
installed_repository_dependencies[ root_key ] = installed_rd_tups
installed_repository_dependencies[ 'description' ] = description
if missing_rd_tups:
missing_repository_dependencies[ 'root_key' ] = root_key
missing_repository_dependencies[ root_key ] = missing_rd_tups
missing_repository_dependencies[ 'description' ] = description
return installed_repository_dependencies, missing_repository_dependencies
def get_installed_and_missing_tool_dependencies_for_repository( self, tool_dependencies_dict ):
"""
Return the lists of installed tool dependencies and missing tool dependencies for a set of repositories
being installed into Galaxy.
"""
# FIXME: This implementation breaks when updates to a repository contain dependencies that result in
# multiple entries for a specific tool dependency. A scenario where this can happen is where 2 repositories
# define the same dependency internally (not using the complex repository dependency definition to a separate
# package repository approach). If 2 repositories contain the same tool_dependencies.xml file, one dependency
# will be lost since the values in these returned dictionaries are not lists. All tool dependency dictionaries
# should have lists as values. These scenarios are probably extreme corner cases, but still should be handled.
installed_tool_dependencies = {}
missing_tool_dependencies = {}
if tool_dependencies_dict:
# Make sure not to change anything in the received tool_dependencies_dict as that would be a bad side-effect!
tmp_tool_dependencies_dict = copy.deepcopy( tool_dependencies_dict )
for td_key, val in tmp_tool_dependencies_dict.items():
# Default the status to NEVER_INSTALLED.
tool_dependency_status = self.install_model.ToolDependency.installation_status.NEVER_INSTALLED
# Set environment tool dependencies are a list.
if td_key == 'set_environment':
new_val = []
for requirement_dict in val:
# {'repository_name': 'xx',
# 'name': 'bwa',
# 'version': '0.5.9',
# 'repository_owner': 'yy',
# 'changeset_revision': 'zz',
# 'type': 'package'}
tool_dependency = \
tool_dependency_util.get_tool_dependency_by_name_version_type( self.app,
requirement_dict.get( 'name', None ),
requirement_dict.get( 'version', None ),
requirement_dict.get( 'type', 'package' ) )
if tool_dependency:
tool_dependency_status = tool_dependency.status
requirement_dict[ 'status' ] = tool_dependency_status
new_val.append( requirement_dict )
if tool_dependency_status in [ self.install_model.ToolDependency.installation_status.INSTALLED ]:
if td_key in installed_tool_dependencies:
installed_tool_dependencies[ td_key ].extend( new_val )
else:
installed_tool_dependencies[ td_key ] = new_val
else:
if td_key in missing_tool_dependencies:
missing_tool_dependencies[ td_key ].extend( new_val )
else:
missing_tool_dependencies[ td_key ] = new_val
else:
# The val dictionary looks something like this:
# {'repository_name': 'xx',
# 'name': 'bwa',
# 'version': '0.5.9',
# 'repository_owner': 'yy',
# 'changeset_revision': 'zz',
# 'type': 'package'}
tool_dependency = tool_dependency_util.get_tool_dependency_by_name_version_type( self.app,
val.get( 'name', None ),
val.get( 'version', None ),
val.get( 'type', 'package' ) )
if tool_dependency:
tool_dependency_status = tool_dependency.status
val[ 'status' ] = tool_dependency_status
if tool_dependency_status in [ self.install_model.ToolDependency.installation_status.INSTALLED ]:
installed_tool_dependencies[ td_key ] = val
else:
missing_tool_dependencies[ td_key ] = val
return installed_tool_dependencies, missing_tool_dependencies
def get_repository_dependency_tups_for_installed_repository( self, repository, dependency_tups=None, status=None ):
"""
Return a list of of tuples defining tool_shed_repository objects (whose status can be anything) required by the
received repository. The returned list defines the entire repository dependency tree. This method is called
only from Galaxy.
"""
if dependency_tups is None:
dependency_tups = []
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
for rrda in repository.required_repositories:
repository_dependency = rrda.repository_dependency
required_repository = repository_dependency.repository
if status is None or required_repository.status == status:
required_repository_tup = self.get_repository_tuple_for_installed_repository_manager( required_repository )
if required_repository_tup == repository_tup:
# We have a circular repository dependency relationship, skip this entry.
continue
if required_repository_tup not in dependency_tups:
dependency_tups.append( required_repository_tup )
return get_repository_dependency_tups_for_installed_repository( required_repository,
dependency_tups=dependency_tups )
return dependency_tups
def get_repository_tuple_for_installed_repository_manager( self, repository ):
return ( str( repository.tool_shed ),
str( repository.name ),
str( repository.owner ),
str( repository.installed_changeset_revision ) )
def get_repository_install_dir( self, tool_shed_repository ):
for tool_config in self.tool_configs:
tree, error_message = xml_util.parse_xml( tool_config )
if tree is None:
return None
root = tree.getroot()
tool_path = root.get( 'tool_path', None )
if tool_path:
ts = common_util.remove_port_from_tool_shed_url( str( tool_shed_repository.tool_shed ) )
relative_path = os.path.join( tool_path,
ts,
'repos',
str( tool_shed_repository.owner ),
str( tool_shed_repository.name ),
str( tool_shed_repository.installed_changeset_revision ) )
if os.path.exists( relative_path ):
return relative_path
return None
def get_runtime_dependent_tool_dependency_tuples( self, tool_dependency, status=None ):
"""
Return the list of tool dependency objects that require the received tool dependency at run time. The returned
list will be filtered by the received status if it is not None. This method is called only from Galaxy.
"""
runtime_dependent_tool_dependency_tups = []
required_env_shell_file_path = tool_dependency.get_env_shell_file_path( self.app )
if required_env_shell_file_path:
required_env_shell_file_path = os.path.abspath( required_env_shell_file_path )
if required_env_shell_file_path is not None:
for td in self.app.install_model.context.query( self.app.install_model.ToolDependency ):
if status is None or td.status == status:
env_shell_file_path = td.get_env_shell_file_path( self.app )
if env_shell_file_path is not None:
try:
contents = open( env_shell_file_path, 'r' ).read()
except Exception, e:
contents = None
log.debug( 'Error reading file %s, so cannot determine if package %s requires package %s at run time: %s' % \
( str( env_shell_file_path ), str( td.name ), str( tool_dependency.name ), str( e ) ) )
if contents is not None and contents.find( required_env_shell_file_path ) >= 0:
td_tuple = get_tool_dependency_tuple_for_installed_repository_manager( td )
runtime_dependent_tool_dependency_tups.append( td_tuple )
return runtime_dependent_tool_dependency_tups
def get_tool_dependency_tuple_for_installed_repository_manager( self, tool_dependency ):
if tool_dependency.type is None:
type = None
else:
type = str( tool_dependency.type )
return ( tool_dependency.tool_shed_repository_id, str( tool_dependency.name ), str( tool_dependency.version ), type )
def handle_existing_tool_dependencies_that_changed_in_update( self, repository, original_dependency_dict,
new_dependency_dict ):
"""
This method is called when a Galaxy admin is getting updates for an installed tool shed
repository in order to cover the case where an existing tool dependency was changed (e.g.,
the version of the dependency was changed) but the tool version for which it is a dependency
was not changed. In this case, we only want to determine if any of the dependency information
defined in original_dependency_dict was changed in new_dependency_dict. We don't care if new
dependencies were added in new_dependency_dict since they will just be treated as missing
dependencies for the tool.
"""
updated_tool_dependency_names = []
deleted_tool_dependency_names = []
for original_dependency_key, original_dependency_val_dict in original_dependency_dict.items():
if original_dependency_key not in new_dependency_dict:
updated_tool_dependency = self.update_existing_tool_dependency( repository,
original_dependency_val_dict,
new_dependency_dict )
if updated_tool_dependency:
updated_tool_dependency_names.append( updated_tool_dependency.name )
else:
deleted_tool_dependency_names.append( original_dependency_val_dict[ 'name' ] )
return updated_tool_dependency_names, deleted_tool_dependency_names
def handle_repository_install( self, repository ):
"""Load the dependency relationships for a repository that was just installed or reinstalled."""
# Populate self.repository_dependencies_of_installed_repositories.
self.add_entry_to_repository_dependencies_of_installed_repositories( repository )
# Populate self.installed_repository_dependencies_of_installed_repositories.
self.add_entry_to_installed_repository_dependencies_of_installed_repositories( repository )
# Populate self.tool_dependencies_of_installed_repositories.
self.add_entry_to_tool_dependencies_of_installed_repositories( repository )
# Populate self.installed_tool_dependencies_of_installed_repositories.
self.add_entry_to_installed_tool_dependencies_of_installed_repositories( repository )
for tool_dependency in repository.tool_dependencies:
# Populate self.runtime_tool_dependencies_of_installed_tool_dependencies.
self.add_entry_to_runtime_tool_dependencies_of_installed_tool_dependencies( tool_dependency )
# Populate self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies.
self.add_entry_to_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies( tool_dependency )
def handle_repository_uninstall( self, repository ):
"""Remove the dependency relationships for a repository that was just uninstalled."""
for tool_dependency in repository.tool_dependencies:
tool_dependency_tup = self.get_tool_dependency_tuple_for_installed_repository_manager( tool_dependency )
# Remove this tool_dependency from all values in
# self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies
altered_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies = {}
for td_tup, installed_runtime_dependent_tool_dependency_tups in \
self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies.items():
if tool_dependency_tup in installed_runtime_dependent_tool_dependency_tups:
# Remove the tool_dependency from the list.
installed_runtime_dependent_tool_dependency_tups.remove( tool_dependency_tup )
# Add the possibly altered list to the altered dictionary.
altered_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies[ td_tup ] = \
installed_runtime_dependent_tool_dependency_tups
self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies = \
altered_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies
# Remove the entry for this tool_dependency from self.runtime_tool_dependencies_of_installed_tool_dependencies.
self.remove_entry_from_runtime_tool_dependencies_of_installed_tool_dependencies( tool_dependency )
# Remove the entry for this tool_dependency from
# self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies.
self.remove_entry_from_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies( tool_dependency )
# Remove this repository's entry from self.installed_tool_dependencies_of_installed_repositories.
self.remove_entry_from_installed_tool_dependencies_of_installed_repositories( repository )
# Remove this repository's entry from self.tool_dependencies_of_installed_repositories
self.remove_entry_from_tool_dependencies_of_installed_repositories( repository )
# Remove this repository's entry from self.installed_repository_dependencies_of_installed_repositories.
self.remove_entry_from_installed_repository_dependencies_of_installed_repositories( repository )
# Remove this repository's entry from self.repository_dependencies_of_installed_repositories.
self.remove_entry_from_repository_dependencies_of_installed_repositories( repository )
def handle_tool_dependency_install( self, repository, tool_dependency ):
"""Load the dependency relationships for a tool dependency that was just installed independently of its containing repository."""
# The received repository must have a status of 'Installed'. The value of tool_dependency.status will either be
# 'Installed' or 'Error', but we only need to change the in-memory dictionaries if it is 'Installed'.
if tool_dependency.is_installed:
# Populate self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies.
self.add_entry_to_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies( tool_dependency )
# Populate self.installed_tool_dependencies_of_installed_repositories.
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
tool_dependency_tup = self.get_tool_dependency_tuple_for_installed_repository_manager( tool_dependency )
if repository_tup in self.installed_tool_dependencies_of_installed_repositories:
self.installed_tool_dependencies_of_installed_repositories[ repository_tup ].append( tool_dependency_tup )
else:
self.installed_tool_dependencies_of_installed_repositories[ repository_tup ] = [ tool_dependency_tup ]
def load_dependency_relationships( self ):
"""Load relationships for all installed repositories and tool dependencies into in-memnory dictionaries."""
# Get the list of installed tool shed repositories.
for repository in self.context.query( self.app.install_model.ToolShedRepository ) \
.filter( self.app.install_model.ToolShedRepository.table.c.status ==
self.app.install_model.ToolShedRepository.installation_status.INSTALLED ):
# Populate self.repository_dependencies_of_installed_repositories.
self.add_entry_to_repository_dependencies_of_installed_repositories( repository )
# Populate self.installed_repository_dependencies_of_installed_repositories.
self.add_entry_to_installed_repository_dependencies_of_installed_repositories( repository )
# Populate self.tool_dependencies_of_installed_repositories.
self.add_entry_to_tool_dependencies_of_installed_repositories( repository )
# Populate self.installed_tool_dependencies_of_installed_repositories.
self.add_entry_to_installed_tool_dependencies_of_installed_repositories( repository )
# Get the list of installed tool dependencies.
for tool_dependency in self.context.query( self.app.install_model.ToolDependency ) \
.filter( self.app.install_model.ToolDependency.table.c.status ==
self.app.install_model.ToolDependency.installation_status.INSTALLED ):
# Populate self.runtime_tool_dependencies_of_installed_tool_dependencies.
self.add_entry_to_runtime_tool_dependencies_of_installed_tool_dependencies( tool_dependency )
# Populate self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies.
self.add_entry_to_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies( tool_dependency )
def load_proprietary_datatypes( self ):
cdl = custom_datatype_manager.CustomDatatypeLoader( self.app )
for tool_shed_repository in self.context.query( self.install_model.ToolShedRepository ) \
.filter( and_( self.install_model.ToolShedRepository.table.c.includes_datatypes==True,
self.install_model.ToolShedRepository.table.c.deleted==False ) ) \
.order_by( self.install_model.ToolShedRepository.table.c.id ):
relative_install_dir = self.get_repository_install_dir( tool_shed_repository )
if relative_install_dir:
installed_repository_dict = cdl.load_installed_datatypes( tool_shed_repository, relative_install_dir )
if installed_repository_dict:
self.installed_repository_dicts.append( installed_repository_dict )
def load_proprietary_converters_and_display_applications( self, deactivate=False ):
cdl = custom_datatype_manager.CustomDatatypeLoader( self.app )
for installed_repository_dict in self.installed_repository_dicts:
if installed_repository_dict[ 'converter_path' ]:
cdl.load_installed_datatype_converters( installed_repository_dict, deactivate=deactivate )
if installed_repository_dict[ 'display_path' ]:
cdl.load_installed_display_applications( installed_repository_dict, deactivate=deactivate )
def purge_repository( self, repository ):
"""Purge a repository with status New (a white ghost) from the database."""
sa_session = self.app.model.context.current
status = 'ok'
message = ''
purged_tool_versions = 0
purged_tool_dependencies = 0
purged_required_repositories = 0
purged_orphan_repository_repository_dependency_association_records = 0
purged_orphan_repository_dependency_records = 0
if repository.is_new:
# Purge this repository's associated tool versions.
if repository.tool_versions:
for tool_version in repository.tool_versions:
if tool_version.parent_tool_association:
for tool_version_association in tool_version.parent_tool_association:
try:
sa_session.delete( tool_version_association )
sa_session.flush()
except Exception, e:
status = 'error'
message = 'Error attempting to purge tool_versions for the repository named %s with status %s: %s.' % \
( str( repository.name ), str( repository.status ), str( e ) )
return status, message
if tool_version.child_tool_association:
for tool_version_association in tool_version.child_tool_association:
try:
sa_session.delete( tool_version_association )
sa_session.flush()
except Exception, e:
status = 'error'
message = 'Error attempting to purge tool_versions for the repository named %s with status %s: %s.' % \
( str( repository.name ), str( repository.status ), str( e ) )
return status, message
try:
sa_session.delete( tool_version )
sa_session.flush()
purged_tool_versions += 1
except Exception, e:
status = 'error'
message = 'Error attempting to purge tool_versions for the repository named %s with status %s: %s.' % \
( str( repository.name ), str( repository.status ), str( e ) )
return status, message
# Purge this repository's associated tool dependencies.
if repository.tool_dependencies:
for tool_dependency in repository.tool_dependencies:
try:
sa_session.delete( tool_dependency )
sa_session.flush()
purged_tool_dependencies += 1
except Exception, e:
status = 'error'
message = 'Error attempting to purge tool_dependencies for the repository named %s with status %s: %s.' % \
( str( repository.name ), str( repository.status ), str( e ) )
return status, message
# Purge this repository's associated required repositories.
if repository.required_repositories:
for rrda in repository.required_repositories:
try:
sa_session.delete( rrda )
sa_session.flush()
purged_required_repositories += 1
except Exception, e:
status = 'error'
message = 'Error attempting to purge required_repositories for the repository named %s with status %s: %s.' % \
( str( repository.name ), str( repository.status ), str( e ) )
return status, message
# Purge any "orphan" repository_dependency records associated with the repository, but not with any
# repository_repository_dependency_association records.
for orphan_repository_dependency in \
sa_session.query( self.app.install_model.RepositoryDependency ) \
.filter( self.app.install_model.RepositoryDependency.table.c.tool_shed_repository_id == repository.id ):
# Purge any repository_repository_dependency_association records whose repository_dependency_id is
# the id of the orphan repository_dependency record.
for orphan_rrda in \
sa_session.query( self.app.install_model.RepositoryRepositoryDependencyAssociation ) \
.filter( self.app.install_model.RepositoryRepositoryDependencyAssociation.table.c.repository_dependency_id == orphan_repository_dependency.id ):
try:
sa_session.delete( orphan_rrda )
sa_session.flush()
purged_orphan_repository_repository_dependency_association_records += 1
except Exception, e:
status = 'error'
message = 'Error attempting to purge repository_repository_dependency_association records associated with '
message += 'an orphan repository_dependency record for the repository named %s with status %s: %s.' % \
( str( repository.name ), str( repository.status ), str( e ) )
return status, message
try:
sa_session.delete( orphan_repository_dependency )
sa_session.flush()
purged_orphan_repository_dependency_records += 1
except Exception, e:
status = 'error'
message = 'Error attempting to purge orphan repository_dependency records for the repository named %s with status %s: %s.' % \
( str( repository.name ), str( repository.status ), str( e ) )
return status, message
# Purge the repository.
sa_session.delete( repository )
sa_session.flush()
message = 'The repository named <b>%s</b> with status <b>%s</b> has been purged.<br/>' % \
( str( repository.name ), str( repository.status ) )
message += 'Total associated tool_version records purged: %d<br/>' % purged_tool_versions
message += 'Total associated tool_dependency records purged: %d<br/>' % purged_tool_dependencies
message += 'Total associated repository_repository_dependency_association records purged: %d<br/>' % purged_required_repositories
message += 'Total associated orphan repository_repository_dependency_association records purged: %d<br/>' % \
purged_orphan_repository_repository_dependency_association_records
message += 'Total associated orphan repository_dependency records purged: %d<br/>' % purged_orphan_repository_dependency_records
else:
status = 'error'
message = 'A repository must have the status <b>New</b> in order to be purged. This repository has '
message += ' the status %s.' % str( repository.status )
return status, message
def remove_entry_from_installed_repository_dependencies_of_installed_repositories( self, repository ):
"""
Remove an entry from self.installed_repository_dependencies_of_installed_repositories. A side-effect of this method
is removal of appropriate value items from self.installed_dependent_repositories_of_installed_repositories.
"""
# Remove tuples defining this repository from value lists in self.installed_dependent_repositories_of_installed_repositories.
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
tool_shed, name, owner, installed_changeset_revision = repository_tup
altered_installed_dependent_repositories_of_installed_repositories = {}
for r_tup, v_tups in self.installed_dependent_repositories_of_installed_repositories.items():
if repository_tup in v_tups:
debug_msg = "Removing entry for revision %s of repository %s owned by %s " % \
( installed_changeset_revision, name, owner )
r_tool_shed, r_name, r_owner, r_installed_changeset_revision = r_tup
debug_msg += "from the dependent list for revision %s of repository %s owned by %s " % \
( r_installed_changeset_revision, r_name, r_owner )
debug_msg += "in installed_repository_dependencies_of_installed_repositories."
log.debug( debug_msg )
v_tups.remove( repository_tup )
altered_installed_dependent_repositories_of_installed_repositories[ r_tup ] = v_tups
self.installed_dependent_repositories_of_installed_repositories = \
altered_installed_dependent_repositories_of_installed_repositories
# Remove this repository's entry from self.installed_repository_dependencies_of_installed_repositories.
if repository_tup in self.installed_repository_dependencies_of_installed_repositories:
debug_msg = "Removing entry for revision %s of repository %s owned by %s " % ( installed_changeset_revision, name, owner )
debug_msg += "from installed_repository_dependencies_of_installed_repositories."
log.debug( debug_msg )
del self.installed_repository_dependencies_of_installed_repositories[ repository_tup ]
def remove_entry_from_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies( self, tool_dependency ):
"""Remove an entry from self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies."""
tool_dependency_tup = self.get_tool_dependency_tuple_for_installed_repository_manager( tool_dependency )
if tool_dependency_tup in self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies:
tool_shed_repository_id, name, version, type = tool_dependency_tup
debug_msg = "Removing entry for version %s of %s %s " % ( version, type, name )
debug_msg += "from installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies."
log.debug( debug_msg )
del self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies[ tool_dependency_tup ]
def remove_entry_from_installed_tool_dependencies_of_installed_repositories( self, repository ):
"""Remove an entry from self.installed_tool_dependencies_of_installed_repositories."""
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
if repository_tup in self.installed_tool_dependencies_of_installed_repositories:
tool_shed, name, owner, installed_changeset_revision = repository_tup
debug_msg = "Removing entry for revision %s of repository %s owned by %s " % ( installed_changeset_revision, name, owner )
debug_msg += "from installed_tool_dependencies_of_installed_repositories."
log.debug( debug_msg )
del self.installed_tool_dependencies_of_installed_repositories[ repository_tup ]
def remove_entry_from_repository_dependencies_of_installed_repositories( self, repository ):
"""Remove an entry from self.repository_dependencies_of_installed_repositories."""
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
if repository_tup in self.repository_dependencies_of_installed_repositories:
tool_shed, name, owner, installed_changeset_revision = repository_tup
debug_msg = "Removing entry for revision %s of repository %s owned by %s " % ( installed_changeset_revision, name, owner )
debug_msg += "from repository_dependencies_of_installed_repositories."
log.debug( debug_msg )
del self.repository_dependencies_of_installed_repositories[ repository_tup ]
def remove_entry_from_runtime_tool_dependencies_of_installed_tool_dependencies( self, tool_dependency ):
"""Remove an entry from self.runtime_tool_dependencies_of_installed_tool_dependencies."""
tool_dependency_tup = self.get_tool_dependency_tuple_for_installed_repository_manager( tool_dependency )
if tool_dependency_tup in self.runtime_tool_dependencies_of_installed_tool_dependencies:
tool_shed_repository_id, name, version, type = tool_dependency_tup
debug_msg = "Removing entry for version %s of %s %s from runtime_tool_dependencies_of_installed_tool_dependencies." % \
( version, type, name )
log.debug( debug_msg )
del self.runtime_tool_dependencies_of_installed_tool_dependencies[ tool_dependency_tup ]
def remove_entry_from_tool_dependencies_of_installed_repositories( self, repository ):
"""Remove an entry from self.tool_dependencies_of_installed_repositories."""
repository_tup = self.get_repository_tuple_for_installed_repository_manager( repository )
if repository_tup in self.tool_dependencies_of_installed_repositories:
tool_shed, name, owner, installed_changeset_revision = repository_tup
debug_msg = "Removing entry for revision %s of repository %s owned by %s from tool_dependencies_of_installed_repositories." % \
( installed_changeset_revision, name, owner )
log.debug( debug_msg )
del self.tool_dependencies_of_installed_repositories[ repository_tup ]
def repository_dependency_needed_only_for_compiling_tool_dependency( self, repository, repository_dependency ):
for rd_tup in repository.tuples_of_repository_dependencies_needed_for_compiling_td:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = rd_tup
# TODO: we may discover that we need to check more than just installed_changeset_revision and changeset_revision here, in which
# case we'll need to contact the tool shed to get the list of all possible changeset_revisions.
cleaned_tool_shed = common_util.remove_protocol_and_port_from_tool_shed_url( tool_shed )
cleaned_repository_dependency_tool_shed = \
common_util.remove_protocol_and_port_from_tool_shed_url( str( repository_dependency.tool_shed ) )
if cleaned_repository_dependency_tool_shed == cleaned_tool_shed and \
repository_dependency.name == name and \
repository_dependency.owner == owner and \
( repository_dependency.installed_changeset_revision == changeset_revision or \
repository_dependency.changeset_revision == changeset_revision ):
return True
return False
def set_only_if_compiling_contained_td( self, repository, required_repository ):
"""
Return True if the received required_repository is only needed to compile a tool
dependency defined for the received repository.
"""
# This method is called only from Galaxy when rendering repository dependencies
# for an installed tool shed repository.
# TODO: Do we need to check more than changeset_revision here?
required_repository_tup = [ required_repository.tool_shed, \
required_repository.name, \
required_repository.owner, \
required_repository.changeset_revision ]
for tup in repository.tuples_of_repository_dependencies_needed_for_compiling_td:
partial_tup = tup[ 0:4 ]
if partial_tup == required_repository_tup:
return 'True'
return 'False'
def set_prior_installation_required( self, repository, required_repository ):
"""
Return True if the received required_repository must be installed before the
received repository.
"""
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( self.app,
str( required_repository.tool_shed ) )
required_repository_tup = [ tool_shed_url,
str( required_repository.name ),
str( required_repository.owner ),
str( required_repository.changeset_revision ) ]
# Get the list of repository dependency tuples associated with the received repository
# where prior_installation_required is True.
required_rd_tups_that_must_be_installed = repository.requires_prior_installation_of
for required_rd_tup in required_rd_tups_that_must_be_installed:
# Repository dependency tuples in metadata include a prior_installation_required value,
# so strip it for comparision.
partial_required_rd_tup = required_rd_tup[ 0:4 ]
if partial_required_rd_tup == required_repository_tup:
# Return the string value of prior_installation_required, which defaults to 'False'.
return str( required_rd_tup[ 4 ] )
return 'False'
def update_existing_tool_dependency( self, repository, original_dependency_dict, new_dependencies_dict ):
"""
Update an exsiting tool dependency whose definition was updated in a change set
pulled by a Galaxy administrator when getting updates to an installed tool shed
repository. The original_dependency_dict is a single tool dependency definition,
an example of which is::
{"name": "bwa",
"readme": "\\nCompiling BWA requires zlib and libpthread to be present on your system.\\n ",
"type": "package",
"version": "0.6.2"}
The new_dependencies_dict is the dictionary generated by the metadata_util.generate_tool_dependency_metadata method.
"""
new_tool_dependency = None
original_name = original_dependency_dict[ 'name' ]
original_type = original_dependency_dict[ 'type' ]
original_version = original_dependency_dict[ 'version' ]
# Locate the appropriate tool_dependency associated with the repository.
tool_dependency = None
for tool_dependency in repository.tool_dependencies:
if tool_dependency.name == original_name and \
tool_dependency.type == original_type and \
tool_dependency.version == original_version:
break
if tool_dependency and tool_dependency.can_update:
dependency_install_dir = tool_dependency.installation_directory( self.app )
removed_from_disk, error_message = \
tool_dependency_util.remove_tool_dependency_installation_directory( dependency_install_dir )
if removed_from_disk:
context = self.app.install_model.context
new_dependency_name = None
new_dependency_type = None
new_dependency_version = None
for new_dependency_key, new_dependency_val_dict in new_dependencies_dict.items():
# Match on name only, hopefully this will be enough!
if original_name == new_dependency_val_dict[ 'name' ]:
new_dependency_name = new_dependency_val_dict[ 'name' ]
new_dependency_type = new_dependency_val_dict[ 'type' ]
new_dependency_version = new_dependency_val_dict[ 'version' ]
break
if new_dependency_name and new_dependency_type and new_dependency_version:
# Update all attributes of the tool_dependency record in the database.
log.debug( "Updating version %s of tool dependency %s %s to have new version %s and type %s." % \
( str( tool_dependency.version ),
str( tool_dependency.type ),
str( tool_dependency.name ),
str( new_dependency_version ),
str( new_dependency_type ) ) )
tool_dependency.type = new_dependency_type
tool_dependency.version = new_dependency_version
tool_dependency.status = self.app.install_model.ToolDependency.installation_status.UNINSTALLED
tool_dependency.error_message = None
context.add( tool_dependency )
context.flush()
new_tool_dependency = tool_dependency
else:
# We have no new tool dependency definition based on a matching dependency name, so remove
# the existing tool dependency record from the database.
log.debug( "Deleting version %s of tool dependency %s %s from the database since it is no longer defined." % \
( str( tool_dependency.version ), str( tool_dependency.type ), str( tool_dependency.name ) ) )
context.delete( tool_dependency )
context.flush()
return new_tool_dependency
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/tool_shed/galaxy_install/installed_repository_manager.py | Python | gpl-3.0 | 77,968 | [
"BWA",
"Galaxy"
] | 2e3f8b29e9ccfd225d7cb4d8bbbc22346441faee5bd515e06adfc35e54e69a22 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@upcloud.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""
Visitor doing some postprocessing on the astroid tree.
Try to resolve definitions (namespace) dictionary, relationship...
"""
from __future__ import print_function
import collections
import os
import traceback
import astroid
from astroid import bases
from astroid import exceptions
from astroid import manager
from astroid import modutils
from astroid import node_classes
from pylint.pyreverse import utils
def _iface_hdlr(_):
"""Handler used by interfaces to handle suspicious interface nodes."""
return True
def _astroid_wrapper(func, modname):
print("parsing %s..." % modname)
try:
return func(modname)
except exceptions.AstroidBuildingException as exc:
print(exc)
except Exception as exc: # pylint: disable=broad-except
traceback.print_exc()
def interfaces(node, herited=True, handler_func=_iface_hdlr):
"""Return an iterator on interfaces implemented by the given class node."""
# FIXME: what if __implements__ = (MyIFace, MyParent.__implements__)...
try:
implements = bases.Instance(node).getattr("__implements__")[0]
except exceptions.NotFoundError:
return
if not herited and implements.frame() is not node:
return
found = set()
missing = False
for iface in node_classes.unpack_infer(implements):
if iface is astroid.Uninferable:
missing = True
continue
if iface not in found and handler_func(iface):
found.add(iface)
yield iface
if missing:
raise exceptions.InferenceError()
class IdGeneratorMixIn:
"""Mixin adding the ability to generate integer uid."""
def __init__(self, start_value=0):
self.id_count = start_value
def init_counter(self, start_value=0):
"""init the id counter
"""
self.id_count = start_value
def generate_id(self):
"""generate a new identifier
"""
self.id_count += 1
return self.id_count
class Linker(IdGeneratorMixIn, utils.LocalsVisitor):
"""Walk on the project tree and resolve relationships.
According to options the following attributes may be
added to visited nodes:
* uid,
a unique identifier for the node (on astroid.Project, astroid.Module,
astroid.Class and astroid.locals_type). Only if the linker
has been instantiated with tag=True parameter (False by default).
* Function
a mapping from locals names to their bounded value, which may be a
constant like a string or an integer, or an astroid node
(on astroid.Module, astroid.Class and astroid.Function).
* instance_attrs_type
as locals_type but for klass member attributes (only on astroid.Class)
* implements,
list of implemented interface _objects_ (only on astroid.Class nodes)
"""
def __init__(self, project, inherited_interfaces=0, tag=False):
IdGeneratorMixIn.__init__(self)
utils.LocalsVisitor.__init__(self)
# take inherited interface in consideration or not
self.inherited_interfaces = inherited_interfaces
# tag nodes or not
self.tag = tag
# visited project
self.project = project
def visit_project(self, node):
"""visit a pyreverse.utils.Project node
* optionally tag the node with a unique id
"""
if self.tag:
node.uid = self.generate_id()
for module in node.modules:
self.visit(module)
def visit_package(self, node):
"""visit an astroid.Package node
* optionally tag the node with a unique id
"""
if self.tag:
node.uid = self.generate_id()
for subelmt in node.values():
self.visit(subelmt)
def visit_module(self, node):
"""visit an astroid.Module node
* set the locals_type mapping
* set the depends mapping
* optionally tag the node with a unique id
"""
if hasattr(node, "locals_type"):
return
node.locals_type = collections.defaultdict(list)
node.depends = []
if self.tag:
node.uid = self.generate_id()
def visit_classdef(self, node):
"""visit an astroid.Class node
* set the locals_type and instance_attrs_type mappings
* set the implements list and build it
* optionally tag the node with a unique id
"""
if hasattr(node, "locals_type"):
return
node.locals_type = collections.defaultdict(list)
if self.tag:
node.uid = self.generate_id()
# resolve ancestors
for baseobj in node.ancestors(recurs=False):
specializations = getattr(baseobj, "specializations", [])
specializations.append(node)
baseobj.specializations = specializations
# resolve instance attributes
node.instance_attrs_type = collections.defaultdict(list)
for assignattrs in node.instance_attrs.values():
for assignattr in assignattrs:
self.handle_assignattr_type(assignattr, node)
# resolve implemented interface
try:
node.implements = list(interfaces(node, self.inherited_interfaces))
except astroid.InferenceError:
node.implements = ()
def visit_functiondef(self, node):
"""visit an astroid.Function node
* set the locals_type mapping
* optionally tag the node with a unique id
"""
if hasattr(node, "locals_type"):
return
node.locals_type = collections.defaultdict(list)
if self.tag:
node.uid = self.generate_id()
link_project = visit_project
link_module = visit_module
link_class = visit_classdef
link_function = visit_functiondef
def visit_assignname(self, node):
"""visit an astroid.AssignName node
handle locals_type
"""
# avoid double parsing done by different Linkers.visit
# running over the same project:
if hasattr(node, "_handled"):
return
node._handled = True
if node.name in node.frame():
frame = node.frame()
else:
# the name has been defined as 'global' in the frame and belongs
# there.
frame = node.root()
try:
if not hasattr(frame, "locals_type"):
# If the frame doesn't have a locals_type yet,
# it means it wasn't yet visited. Visit it now
# to add what's missing from it.
if isinstance(frame, astroid.ClassDef):
self.visit_classdef(frame)
elif isinstance(frame, astroid.FunctionDef):
self.visit_functiondef(frame)
else:
self.visit_module(frame)
current = frame.locals_type[node.name]
values = set(node.infer())
frame.locals_type[node.name] = list(set(current) | values)
except astroid.InferenceError:
pass
@staticmethod
def handle_assignattr_type(node, parent):
"""handle an astroid.assignattr node
handle instance_attrs_type
"""
try:
values = set(node.infer())
current = set(parent.instance_attrs_type[node.attrname])
parent.instance_attrs_type[node.attrname] = list(current | values)
except astroid.InferenceError:
pass
def visit_import(self, node):
"""visit an astroid.Import node
resolve module dependencies
"""
context_file = node.root().file
for name in node.names:
relative = modutils.is_relative(name[0], context_file)
self._imported_module(node, name[0], relative)
def visit_importfrom(self, node):
"""visit an astroid.ImportFrom node
resolve module dependencies
"""
basename = node.modname
context_file = node.root().file
if context_file is not None:
relative = modutils.is_relative(basename, context_file)
else:
relative = False
for name in node.names:
if name[0] == "*":
continue
# analyze dependencies
fullname = "%s.%s" % (basename, name[0])
if fullname.find(".") > -1:
try:
# TODO: don't use get_module_part,
# missing package precedence
fullname = modutils.get_module_part(fullname, context_file)
except ImportError:
continue
if fullname != basename:
self._imported_module(node, fullname, relative)
def compute_module(self, context_name, mod_path):
"""return true if the module should be added to dependencies"""
package_dir = os.path.dirname(self.project.path)
if context_name == mod_path:
return 0
if modutils.is_standard_module(mod_path, (package_dir,)):
return 1
return 0
def _imported_module(self, node, mod_path, relative):
"""Notify an imported module, used to analyze dependencies"""
module = node.root()
context_name = module.name
if relative:
mod_path = "%s.%s" % (".".join(context_name.split(".")[:-1]), mod_path)
if self.compute_module(context_name, mod_path):
# handle dependencies
if not hasattr(module, "depends"):
module.depends = []
mod_paths = module.depends
if mod_path not in mod_paths:
mod_paths.append(mod_path)
class Project:
"""a project handle a set of modules / packages"""
def __init__(self, name=""):
self.name = name
self.path = None
self.modules = []
self.locals = {}
self.__getitem__ = self.locals.__getitem__
self.__iter__ = self.locals.__iter__
self.values = self.locals.values
self.keys = self.locals.keys
self.items = self.locals.items
def add_module(self, node):
self.locals[node.name] = node
self.modules.append(node)
def get_module(self, name):
return self.locals[name]
def get_children(self):
return self.modules
def __repr__(self):
return "<Project %r at %s (%s modules)>" % (
self.name,
id(self),
len(self.modules),
)
def project_from_files(
files, func_wrapper=_astroid_wrapper, project_name="no name", black_list=("CVS",)
):
"""return a Project from a list of files or modules"""
# build the project representation
astroid_manager = manager.AstroidManager()
project = Project(project_name)
for something in files:
if not os.path.exists(something):
fpath = modutils.file_from_modpath(something.split("."))
elif os.path.isdir(something):
fpath = os.path.join(something, "__init__.py")
else:
fpath = something
ast = func_wrapper(astroid_manager.ast_from_file, fpath)
if ast is None:
continue
# XXX why is first file defining the project.path ?
project.path = project.path or ast.file
project.add_module(ast)
base_name = ast.name
# recurse in package except if __init__ was explicitly given
if ast.package and something.find("__init__") == -1:
# recurse on others packages / modules if this is a package
for fpath in modutils.get_module_files(
os.path.dirname(ast.file), black_list
):
ast = func_wrapper(astroid_manager.ast_from_file, fpath)
if ast is None or ast.name == base_name:
continue
project.add_module(ast)
return project
| kczapla/pylint | pylint/pyreverse/inspector.py | Python | gpl-2.0 | 12,352 | [
"VisIt"
] | 85f09d7c7be7ffa833f652cefb2c09b5b6322836cde5ec37ddcd313a47315934 |
__author__ = 'mnowotka'
from django.conf import settings
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from rdkit import RDLogger
from pybel import ob, readstring
from indigoWrapper import *
import requests
from django.utils.http import urlquote
from rdkit.Chem import InchiToInchiKey
from base64 import b64encode
import hashlib
from PIL import Image
import StringIO
import os
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
ob.obErrorLog.SetOutputLevel(0)
INCHI_SPECIAL_CHARS = '={}()-/,;+?.'
#-----------------------------------------------------------------------------------------------------------------------
def getHScore():
from django.db.models import Count
from chembl_business_model.models import MoleculeDictionary
from chembl_core_model.models import Assays
ChIndex = None
AssIndex = None
order = MoleculeDictionary.objects.filter(downgraded=False).annotate(activities_count=Count("activities")).distinct().order_by('-activities_count')
for idx, mol in enumerate(order):
if mol.activities_count < idx:
ChIndex = idx - 1
break
order = Assays.objects.annotate(activities_count=Count("activities")).distinct().order_by('-activities_count')
for idx, ass in enumerate(order):
if ass.activities_count < idx:
AssIndex = idx - 1
break
return (ChIndex, AssIndex)
#-----------------------------------------------------------------------------------------------------------------------
def check_indigo_correct(size=1000):
from chembl_business_model.models import CompoundStructures
from clint.textui import progress
import tempfile
f = tempfile.NamedTemporaryFile(delete=False)
print "saving to file %s" % f.name
errorCount = 0
structures = CompoundStructures.objects.all()
count = structures.count()
pk = CompoundStructures._meta.pk.name
for i in progress.bar(range(0, count, size), label="Indigo check "):
if i < 0:
chunk = CompoundStructures.objects.order_by(pk)[:size]
else:
last_pk = CompoundStructures.objects.order_by(pk).only(pk).values_list(pk)[i][0]
chunk = CompoundStructures.objects.order_by(pk).filter(pk__gt=last_pk)[:size]
for structure in chunk:
try:
indigoObj.loadMolecule(str(structure.molfile))
except Exception as e:
f.write('%s\t%s\n' % (structure.pk, str(e)))
errorCount += 1
f.close()
print "%s errors saved to %s" % (str(errorCount), f.name)
#-----------------------------------------------------------------------------------------------------------------------
def check_Activities():
from chembl_business_model.models import Activities
import cx_Oracle
run = True
excludes = [0]
while run:
try:
for obj in Activities.objects.order_by(Activities._meta.pk.name).exclude(pk__lte=max(excludes)).iterator():
pass
except cx_Oracle.DatabaseError:
problem = obj.pk+1
if problem in excludes:
problem += 1
excludes.append(problem)
print str(excludes)
continue
run = False
print excludes
#-----------------------------------------------------------------------------------------------------------------------
def smileFromImage(image, path):
return fromImage(image, path, smileToCanonicalSmile)
#-----------------------------------------------------------------------------------------------------------------------
def molsFromImage(image, path):
return fromImage(image, path, smileToMol)
#-----------------------------------------------------------------------------------------------------------------------
def fromImage(image, path, fun):
from subprocess import PIPE, Popen
import tempfile
fd, fpath = tempfile.mkstemp()
os.write(fd, image)
os.close(fd)
arguments = [path, '-ij', '-f', 'smi', fpath]
p = Popen(arguments, stdin=PIPE, stdout=PIPE, stderr=PIPE)
a, err = p.communicate(input=image)
os.remove(fpath)
return map(lambda x : fun(x) if x else None, filter(bool,a.split('\n')))
#-----------------------------------------------------------------------------------------------------------------------
def smileToMol(smile):
mol = Chem.MolFromSmiles(smile)
if mol:
AllChem.Compute2DCoords(mol)
return Chem.MolToMolBlock(mol)
else:
return None
#-----------------------------------------------------------------------------------------------------------------------
def nameToMol(name):
res = requests.get(settings.OPSIN_URL + name + '.smi', timeout=60)
if res.status_code != 200:
return None
return smileToMol(str(res.text))
#-----------------------------------------------------------------------------------------------------------------------
def smilesFromMol(mol):
molecule = Chem.MolFromMolBlock(mol)
smiles = Chem.MolToSmiles(molecule)
return smiles
#-----------------------------------------------------------------------------------------------------------------------
def smileToCanonicalSmile(smile):
mol = Chem.MolFromSmiles(smile)
if mol:
return Chem.MolToSmiles(mol, True)
return smile
#-----------------------------------------------------------------------------------------------------------------------
def jsonFromSmiles(smile, size):
mol = Chem.MolFromSmiles(smile)
return Draw.MolToJSON(mol, size)
#-----------------------------------------------------------------------------------------------------------------------
def jsonFromMol(mol, size):
molecule = Chem.MolFromMolBlock(mol)
return Draw.MolToJSON(molecule, size)
#-----------------------------------------------------------------------------------------------------------------------
def inchiFromPipe(molfile, path):
from subprocess import PIPE, Popen
p = Popen([path, "-STDIO", "-AuxNone"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
a = p.communicate(input=str(molfile))
return a[0][13:-1]
#-----------------------------------------------------------------------------------------------------------------------
def iterateModelRecords(modelClass, function, divFactor=300):
all = modelClass.objects.all().count()
for i in range(divFactor):
low = i * (all / divFactor)
high = (i + 1) * (all / divFactor)
if i == (divFactor - 1):
high = (all - 1)
for record in modelClass.objects.all()[low:high]:
function(record)
#-----------------------------------------------------------------------------------------------------------------------
def iterateNModelRecords(modelClass, function, N):
for record in modelClass.objects.all()[0:N]:
function(record)
#-----------------------------------------------------------------------------------------------------------------------
def checkInchiBinary(struct, version):
from chembl_business_model.models import InchiErrors
if not struct.standard_inchi or not struct.standard_inchi_key or not struct.molfile:
return
if struct.standard_inchi != inchiFromPipe(struct.molfile, settings.INCHI_BINARIES_LOCATION[version]):
error = InchiErrors(error_type=version, structure=struct)
error.save()
#-----------------------------------------------------------------------------------------------------------------------
def checkOSRA(molecule):
img = molecule.compoundimages.png_500
im = Image.open(StringIO.StringIO(molecule.compoundimages.png_500))
canonical_smiles = molecule.compoundstructures.canonical_smiles
smile = smileFromImage(img, settings.OSRA_BINARIES_LOCATION['2.0.0'], canonical_smiles)
im.show()
return canonical_smiles, Chem.MolToSmiles(Chem.MolFromSmiles(smile[0]), True)
#-----------------------------------------------------------------------------------------------------------------------
def checkImage(compoundImage):
from chembl_business_model.models import ImageErrors
if not compoundImage.png or not compoundImage.png_500:
return
im = Image.open(StringIO.StringIO(compoundImage.png))
try:
im.verify()
if im.size != (128, 128):
error = ImageErrors(error_type='tomb size', image=compoundImage)
error.save()
except Exception as e:
error = ImageErrors(error_type='tomb', image=compoundImage)
error.save()
im = Image.open(StringIO.StringIO(compoundImage.png_500))
try:
im.verify()
if im.size != (500, 500):
error = ImageErrors(error_type='reg size', image=compoundImage)
error.save()
except Exception as e:
error = ImageErrors(error_type='reg', image=compoundImage)
error.save()
#-----------------------------------------------------------------------------------------------------------------------
def getSynonymTypes():
from chembl_business_model.models import MoleculeSynonyms
return MoleculeSynonyms.objects.all().values_list('syn_type', flat=True).order_by('syn_type').distinct()
#-----------------------------------------------------------------------------------------------------------------------
def getImage(mol=False, molregno=False):
from chembl_business_model.models import MoleculeDictionary
filters = dict()
if mol:
key = InchiKeyFromMol(mol)
filters['structure_key'] = key
else:
filters['molregno'] = molregno
return b64encode(MoleculeDictionary.objects.filter(**filters).values_list('compoundimages__png_500')[0][0])
#-----------------------------------------------------------------------------------------------------------------------
def checkPybelInchi(struct):
from chembl_business_model.models import InchiErrors
if not struct.standard_inchi or not struct.standard_inchi_key or not struct.molfile:
return
try:
mol = readstring('mol', str(struct.molfile))
inchi = mol.write('inchi')
if inchi.strip() != struct.standard_inchi:
error = InchiErrors(error_type='open babel 2.3.2', structure=struct)
error.save()
except Exception:
error = InchiErrors(error_type='openbabel 2.3.2 runtime', structure=struct)
error.save()
#-----------------------------------------------------------------------------------------------------------------------
def checkIndigoInchi(struct):
from chembl_business_model.models import InchiErrors
if not struct.standard_inchi or not struct.standard_inchi_key or not struct.molfile:
return
try:
mol = indigoObj.loadMolecule(str(struct.molfile))
inchi = indigo_inchiObj.getInchi(mol)
if inchi != struct.standard_inchi:
error = InchiErrors(error_type='indigo 1.1.5.0 linux32', structure=struct)
error.save()
except Exception:
error = InchiErrors(error_type='indigo 1.1.5.0 runtime', structure=struct)
error.save()
#-----------------------------------------------------------------------------------------------------------------------
def checkRDkitInchi(struct):
from chembl_business_model.models import InchiErrors
if not struct.standard_inchi or not struct.standard_inchi_key or not struct.molfile:
return
m = Chem.MolFromMolBlock(str(struct.molfile))
if not m:
error = InchiErrors(error_type='mol read 1.4', structure=struct)
error.save()
return
inchi = Chem.inchi.MolToInchi(m)
if struct.standard_inchi != inchi:
error = InchiErrors(error_type='1.04 RD', structure=struct)
error.save()
#-----------------------------------------------------------------------------------------------------------------------
def tagsFromText(text):
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
cv = CountVectorizer(min_df=1, charset_error="ignore",
stop_words="english", max_features=200)
counts = cv.fit_transform([text]).toarray().ravel()
words = np.array(cv.get_feature_names())
words = words[counts > 1]
counts = counts[counts > 1]
words = words[np.array(map(lambda x: x.isalpha(), words))]
counts = counts[np.array(map(lambda x: x.isalpha(), words))]
#TODO: stemming, words len > 2, remove verbs
return [words, counts]
#-----------------------------------------------------------------------------------------------------------------------
def entitiesFromText(text):
import jpype
ret = set()
jpype.startJVM(settings.JAVA_VIRTUAL_MACHINE_LOCATION, settings.OSCAR_BINARIES_LOCATION)
Oscar = jpype.JClass("uk.ac.cam.ch.wwmm.oscar.Oscar")
FormatType = jpype.JClass("uk.ac.cam.ch.wwmm.oscar.chemnamedict.entities.FormatType")
oscar = Oscar()
named_entities = oscar.findAndResolveNamedEntities(text)
for ne in named_entities:
smiles = ne.getFirstChemicalStructure(FormatType.SMILES)
if not smiles:
continue
smiles = smiles.getValue()
name = ne.getSurface()
ne_type = ne.getType().toString()
ret.add((name, ne_type, smiles))
jpype.utilusM()
return list(ret)
#-----------------------------------------------------------------------------------------------------------------------
def entitiesFromTextNew(text):
result = requests.post(settings.OSCAR_ENDPOINT, data={'text':text, 'filter' : 'true'}, timeout=60)
status = result.status_code
if status != 200:
raise Exception("URL %s has status %s" % (settings.OSCAR_ENDPOINT, status))
return result.json()
#-----------------------------------------------------------------------------------------------------------------------
def journalChoices():
from chembl_core_model.models import Docs
return Docs.objects.values_list('journal', flat=True).order_by('journal').distinct()
#-----------------------------------------------------------------------------------------------------------------------
def docTypeChoices():
from chembl_core_model.models import Docs
return Docs.objects.values_list('doc_type', flat=True).order_by('doc_type').distinct()
#-----------------------------------------------------------------------------------------------------------------------
def metaFromDoi(doi):
from Bio import Entrez
from BeautifulSoup import BeautifulSoup
from chembl_business_model.models import JournalArticles, Docs
doc_id = None
meta = {'journal':{'pubDate':{}}, 'authors':[]}
Entrez.email = settings.ADMINS[0][1]
handle = Entrez.esearch(db="pubmed", term=str(doi))
record = BeautifulSoup(handle.read())
id = str(record.id.getText())
handle = Entrez.efetch(db="pubmed", id=id, rettype="gb")
result = BeautifulSoup(handle.read())
meta['journal']['volume'] = result.volume.getText() if result.volume else ''
meta['journal']['issue'] = result.issue.getText() if result.issue else ''
meta['pubmed'] = id
meta['doi'] = result.elocationid.getText() if result.elocationid else ''
meta['title'] = result.articletitle.getText() if result.articletitle else ''
meta['abstract'] = result.abstracttext.getText() if result.abstracttext else ''
journal = result.journal
if journal:
meta['journal']['issn'] = journal.issn.getText() if journal.issn else ''
meta['journal']['title'] = journal.title.getText() if journal.title else ''
meta['journal']['ISOAbbreviation'] = journal.isoabbreviation.getText() if journal.isoabbreviation else ''
pubdate = journal.pubdate
if pubdate:
meta['journal']['pubDate']['year'] = pubdate.year.getText() if pubdate.year else ''
meta['journal']['pubDate']['month'] = pubdate.month.getText() if pubdate.month else ''
meta['journal']['pubDate']['day'] = pubdate.day.getText() if pubdate.day else ''
if result.authorlist:
for i in result.authorlist.childGenerator():
if i and str(i).strip():
author = BeautifulSoup(str(i))
auth = {}
if author.forename:
auth['forename'] = author.forename.getText()
auth['lastname'] = author.lastname.getText()
auth['initials'] = author.initials.getText()
meta['authors'].append(auth)
try:
pubmedId = int(doi)
print 'searching doc of pubmed_id = %s' % pubmedId
q = Docs.objects.filter(pubmed_id = pubmedId)
except ValueError:
print 'searching doc of doi = %s' % doi
q = Docs.objects.filter(doi__exact = doi)
if len(q):
doc_id = q[0].pk
else:
print 'searchuin'
q = Docs.objects.filter(pubmed_id = int(id))
if len(q):
doc_id = q[0].pk
elif meta.get('doi'):
q = Docs.objects.filter(doi__exact = meta['doi'])
if len(q):
doc_id = q[0].pk
if doc_id:
doc = q[0]
journal = doc.journal
arts = JournalArticles.objects.filter(pk=doc_id)
art = None
if len(arts):
art = arts[0]
if not meta['journal']['title']:
meta['journal']['title'] = journal.title if journal else None
if not meta['journal']['ISOAbbreviation']:
meta['journal']['ISOAbbreviation'] = journal.iso_abbreviation if journal else None
if not meta['journal']['issn']:
meta['journal']['issn'] = journal.issn_print if journal else None
if not meta['journal']['issn']:
meta['journal']['issn'] = journal.issn_electronic if journal else None
meta['journal']['volume'] = doc.volume
meta['journal']['issue'] = doc.issue
if not meta['journal']['pubDate']['year']:
meta['journal']['pubDate']['year'] = art.year if art else None
if not meta['journal']['pubDate']['month']:
meta['journal']['pubDate']['month'] = art.month if art else None
if not meta['journal']['pubDate']['day']:
meta['journal']['pubDate']['day'] = art.day if art else None
meta['journal']['pagination'] = art.pagination if art else None
meta['first_page'] = doc.first_page
meta['last_page'] = doc.last_page
if not meta['title']:
meta['title'] = doc.title
if not meta['abstract']:
meta['abstract'] = doc.abstract
if not meta['authors']:
meta['authors'] = doc.authors
meta['doc_id'] = doc_id
meta['chembl_like'] = "No"
title = urlquote(meta['title'])
abstract = urlquote(meta['abstract'])
url = '%sCHEMBLLIKE/%s/%s' % (settings.PIPLINE_PILOT_ENDPOINT, title, abstract)
try:
result = requests.get(url, timeout=60)
status = result.status_code
if status != 200:
pass
else:
if result.json()["Prediction"]:
meta['chembl_like'] = "Yes"
except:
pass
return meta
#-----------------------------------------------------------------------------------------------------------------------
def getStructure(mol):
data = dict()
if settings.OPEN_SOURCE:
try:
inchi = inchiFromPipe(mol, settings.INCHI_BINARIES_LOCATION['1.02'])
data['InChI'] = inchi
inchiKey = InchiToInchiKey(inchi)
data['InChIKey'] = inchiKey
smiles = smilesFromMol(mol)
data['Canonical_Smiles'] = smiles
except:
pass
else:
url = '%scuration' % settings.PIPLINE_PILOT_ENDPOINT
result = requests.post(url, data=mol, timeout=60)
status = result.status_code
if status != 200:
raise Exception("URL %s has status %s for mol %s" % (url, status, mol))
data = result.json()
return data
#-----------------------------------------------------------------------------------------------------------------------
def InchiKeyFromMol(mol):
return getStructure(mol).get('InChIKey', '')
#-----------------------------------------------------------------------------------------------------------------------
def getStatus():
try:
if settings.OPEN_SOURCE:
return 6
url = '%salive' % settings.PIPLINE_PILOT_ENDPOINT
result = requests.get(url, timeout=60)
status = result.status_code
if status != 200:
return 1
res = result.json()
if not res['up']:
return 1
return int(res['resourceindex'])
except:
return 1
#-----------------------------------------------------------------------------------------------------------------------
def ImageFromMolPP(mol, size):
url = '%sctab2image/%s' % (settings.PIPLINE_PILOT_ENDPOINT, size)
result = requests.post(url, data=mol, timeout=60)
status = result.status_code
if status != 200:
raise Exception("URL %s has status %s for mol %s" % (url, status, mol))
return result.json()['b64PNG']
#-----------------------------------------------------------------------------------------------------------------------
def ImageFromMol(mol, size=100):
if settings.OPEN_SOURCE:
molecule = Chem.MolFromMolBlock(str(mol))
raw = Draw.MolToImage(molecule, size=(size, size))
output = StringIO.StringIO()
raw.save(output, 'PNG')
ret = output.getvalue()
output.close()
return b64encode(ret)
else:
return ImageFromMolPP(mol, size)
#-----------------------------------------------------------------------------------------------------------------------
def cleanup(molfile, mode = 'cleanup'):
#TODO: open source version of this function using RDKit should be implemented as well
url = '%scleanup/%s' % (settings.PIPLINE_PILOT_ENDPOINT, mode)
result = requests.post(url, data=molfile, timeout=60)
status = result.status_code
if status != 200:
raise Exception("URL %s has status %s for mol %s" % (url, status, molfile))
data = result.json()
return data
#-----------------------------------------------------------------------------------------------------------------------
def compoundSearch(query, max_results = 5):
import re
from haystack.query import SearchQuerySet
from chembl_business_model.models import MoleculeDictionary
from chembl_business_model.models import ChemblIdLookup
from chembl_business_model.models import CompoundStructures
from chembl_business_model.models import Products
from chembl_business_model.models import CompoundRecords
from chembl_business_model.models import MoleculeSynonyms
from chembl_business_model.models import Biotherapeutics
if query.upper() == 'CHEMBL': # good luck with that
return []
if query.isdigit():
try:
q = MoleculeDictionary.objects.get(pk=int(query))
return [{'label' : "%s (%s)" % (q.pref_name, q.chembl_id), 'value': int(query)}]
except:
pass
if query.upper().startswith('CHEMBL'):
try:
q = ChemblIdLookup.objects.get(pk=query.upper())
if q.entity_type == 'COMPOUND':
mol = MoleculeDictionary.objects.get(chembl_id=query.upper())
return [{'label' : "%s (%s)" % (mol.pref_name, query.upper()), 'value': mol.molregno}]
except:
q = SearchQuerySet().models(ChemblIdLookup).autocomplete(chembl_id=query)
r = []
if len(q):
for chembl in q:
if chembl.object.entity_type == 'COMPOUND':
mol = MoleculeDictionary.objects.get(chembl_id=chembl.object.chembl_id)
r.append({'label' : "%s (%s)" % (mol.pref_name, query), 'value': mol.molregno})
if len(r):
return r[:max_results]
if len(query) == 27 and query[14] == '-' and query[25] == '-' and re.match('^([0-9A-Z\-]+)$',query):
try:
q = CompoundStructures.objects.get(standard_inchi_key=query).molecule
return [{'label' : "%s (%s)" % (q.pref_name, q.chembl_id), 'value': q.molregno}]
except:
pass
q = SearchQuerySet().models(MoleculeDictionary).autocomplete(pref_name=query)
if len(q):
r = []
for mol in q:
r.append({'label' : "%s (%s)" % (mol.object.pref_name, mol.object.chembl_id), 'value': mol.object.molregno})
return r[:max_results]
q = SearchQuerySet().models(Products).autocomplete(trade_name=query)
if len(q):
r = []
for prod in q:
mols = prod.object.moleculedictionary_set
if mols.count():
for mol in mols.all():
r.append({'label' : "%s (%s)" % (mol.pref_name, mol.chembl_id), 'value': mol.molregno})
if len(r):
return r[:max_results]
q = SearchQuerySet().models(CompoundRecords).autocomplete(compound_name=query)
if len(q):
r = []
for record in q:
mol = record.object.molecule
r.append({'label' : "%s (%s)" % (mol.pref_name, mol.chembl_id), 'value': mol.molregno})
return r[:max_results]
q = SearchQuerySet().models(CompoundRecords).autocomplete(compound_key=query)
if len(q):
r = []
for record in q:
mol = record.object.molecule
r.append({'label' : "%s (%s)" % (mol.pref_name, mol.chembl_id), 'value': mol.molregno})
return r[:max_results]
q = SearchQuerySet().models(MoleculeSynonyms).autocomplete(synonyms=query)
if len(q):
r = []
for synonym in q:
mol = synonym.object.molecule
r.append({'label' : "%s (%s)" % (mol.pref_name, mol.chembl_id), 'value': mol.molregno})
return r[:max_results]
q = SearchQuerySet().models(Biotherapeutics).autocomplete(description=query)
if len(q):
r = []
for bio in q:
mol = bio.object.molecule
r.append({'label' : "%s (%s)" % (mol.pref_name, mol.chembl_id), 'value': mol.molregno})
return r[:max_results]
#-----------------------------------------------------------------------------------------------------------------------
def md5Checksum(fh):
fh.seek(0,0)
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
#-----------------------------------------------------------------------------------------------------------------------
| thesgc/chembiohub_ws | chembl_business_model/utils.py | Python | gpl-3.0 | 26,780 | [
"Open Babel",
"Pybel",
"RDKit"
] | 42ae02584e768760f90519549b98eafb8246221f638596e9863e9acad79de4d9 |
import ast
import hashlib
import json
import unittest
from io import BytesIO
from oceannavigator import create_app
from plotting.scriptGenerator import generatePython, generateR
class TestScriptGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.app = create_app()
def test_generatePython_plot(self):
with self.app.app_context():
plotQuery = '{"area":[{"innerrings":[],"name":"","polygons":[[[50.32977916630952,-54.02923583984376],[49.99194654491231,-41.90032958984374],[43.11512912870705,-41.90032958984374],[43.8801861709303,-54.20501708984374],[50.32977916630952,-54.02923583984376]]]}],"bathymetry":true,"colormap":"default","contour":{"colormap":"default","hatch":false,"legend":true,"levels":"auto","variable":"none"},"dataset":"giops_day","depth":0,"interp":"gaussian","neighbours":10,"projection":"EPSG:3857","quiver":{"colormap":"default","magnitude":"length","variable":"none"},"radius":25,"scale":"-5,30,auto","showarea":true,"time":860,"type":"map","variable":"votemper"}'
data = generatePython(plotQuery, "PLOT").read()
ast.parse(data)
def test_generatePython_csv(self):
with self.app.app_context():
plotQuery = '{"area":[{"innerrings":[],"name":"","polygons":[[[47.59676544537632,-63.322752995466445],[47.48923059927762,-62.7459688212614],[46.71147616396766,-62.92175066482866],[47.07117494555064,-63.848111528746855],[47.59676544537632,-63.322752995466445]]]}],"bathymetry":true,"colormap":"default","contour":{"colormap":"default","hatch":false,"legend":true,"levels":"auto","variable":"none"},"dataset":"giops_day","depth":0,"interp":"gaussian","neighbours":10,"projection":"EPSG:3857","quiver":{"colormap":"default","magnitude":"length","variable":"none"},"radius":25,"scale":"10.672692871093773,21.980279541015648,auto","showarea":true,"time":712,"type":"map","variable":"votemper"}'
data = generatePython(plotQuery, "CSV").read()
ast.parse(data)
def test_generatePython_netcdf(self):
with self.app.app_context():
plotQuery = '{"dataset_name":"giops_day","max_range":"47.59676544537632,-62.7459688212614","min_range":"46.71147616396766,-63.848111528746855","output_format":"NETCDF4","should_zip":0,"time":"712,716","user_grid":0,"variables":"vice,votemper,vozocrtx,vomecrty"}'
data = generatePython(plotQuery, "SUBSET").read()
ast.parse(data)
@unittest.skip(
"Test is broken: these should not have been comparing hashes, but the entire output."
)
def test_generateR_plot(self):
with self.app.app_context():
plotQuery = '{"area":[{"innerrings":[],"name":"","polygons":[[[57.45537472457255,-53.32611083984376],[54.96545403664038,-35.91699909988563],[37.492919230762624,-40.57520222488561],[39.21584183791197,-60.08692097488562],[57.45537472457255,-53.32611083984376]]]}],"bathymetry":true,"colormap":"default","contour":{"colormap":"default","hatch":false,"legend":true,"levels":"auto","variable":"none"},"dataset":"giops_day","depth":0,"interp":"gaussian","neighbours":10,"projection":"EPSG:3857","quiver":{"colormap":"default","magnitude":"length","variable":"none"},"radius":25,"scale":"-5,30,auto","showarea":true,"time":862,"type":"map","variable":"votemper"}'
data = generateR(plotQuery)
newData = data.read()
m = hashlib.md5()
m.update(newData)
expectedHash = "7442e1b8ac4b92d9a8aafa7edf6a8400"
self.assertEqual(m.hexdigest(), expectedHash)
@unittest.skip(
"Test is broken: these should not have been comparing hashes, but the entire output."
)
def test_generateR_csv(self):
with self.app.app_context():
plotQuery = '{"area":[{"innerrings":[],"name":"","polygons":[[[57.45537472457255,-53.32611083984376],[54.96545403664038,-35.91699909988563],[37.492919230762624,-40.57520222488561],[39.21584183791197,-60.08692097488562],[57.45537472457255,-53.32611083984376]]]}],"bathymetry":true,"colormap":"default","contour":{"colormap":"default","hatch":false,"legend":true,"levels":"auto","variable":"none"},"dataset":"giops_day","depth":0,"interp":"gaussian","neighbours":10,"projection":"EPSG:3857","quiver":{"colormap":"default","magnitude":"length","variable":"none"},"radius":25,"scale":"-5,30,auto","showarea":true,"time":862,"type":"map","variable":"votemper"}&save&format=csv&size=10x7&dpi=144'
data = generateR(plotQuery)
newData = data.read()
m = hashlib.md5()
m.update(newData)
expectedHash = "4afa74cd7db4226c78fb7f5e2ae0a22f"
self.assertEqual(m.hexdigest(), expectedHash)
@unittest.skip(
"Test is broken: these should not have been comparing hashes, but the entire output."
)
def test_generateR_netcdf(self):
with self.app.app_context():
plotQuery = '{"dataset_name":"giops_day","max_range":"57.45537472457255,-35.91699909988563","min_range":"37.492919230762624,-60.08692097488562","output_format":"NETCDF4","should_zip":0,"time":"857,862","user_grid":0,"variables":"vice,votemper,vozocrtx,vomecrty"}'
data = generateR(plotQuery)
newData = data.read()
m = hashlib.md5()
m.update(newData)
expectedHash = "9c4552b8e34e8856bd8bde64125e7f2d"
self.assertEqual(m.hexdigest(), expectedHash)
| DFO-Ocean-Navigator/Ocean-Data-Map-Project | tests/test_scriptGen.py | Python | gpl-3.0 | 5,458 | [
"Gaussian"
] | d2ee8d7e319849af697ea4539273a31bc8434a20190f17236add2c7dd928101b |
import os, sys, getopt
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
import vtk
from vtk.util.colors import peacock, tomato, red, white, black
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from PeacockActor import PeacockActor
from ClippedActor import ClippedActor
import RendererFactory
class MeshRenderWidget(QtGui.QWidget):
def __init__(self, tree_widget):
QtGui.QWidget.__init__(self)
self.tree_widget = tree_widget
self.tree_widget.mesh_item_changed.connect(self.meshItemChanged)
self.mesh_file_name = ''
self.mesh_renderer = None
self.current_block_actors = {}
self.current_sideset_actors = {}
self.current_nodeset_actors = {}
self.this_layout = QtGui.QVBoxLayout()
self.setLayout(self.this_layout)
self.vtkwidget = QVTKRenderWindowInteractor(self)
self.renderer = vtk.vtkRenderer()
self.renderer.SetBackground(0.2,0.2,0.2)
self.renderer.SetBackground2(1,1,1)
self.renderer.SetGradientBackground(1)
self.renderer.ResetCamera()
self.this_layout.addWidget(self.vtkwidget)
self.this_layout.setStretchFactor(self.vtkwidget, 10)
self.vtkwidget.setMinimumHeight(300)
self.vtkwidget.GetRenderWindow().AddRenderer(self.renderer)
self.interactor = self.vtkwidget.GetRenderWindow().GetInteractor()
self.interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
self.show()
self.interactor.Initialize()
self.controls_layout = QtGui.QHBoxLayout()
self.left_controls_layout = QtGui.QVBoxLayout()
self.block_view_group_box = QtGui.QGroupBox('Show Blocks')
self.block_view_group_box.setMaximumWidth(150)
# self.block_view_group_box.setMaximumHeight(200)
self.block_view_layout = QtGui.QVBoxLayout()
self.block_view_list = QtGui.QListView()
self.block_view_model = QtGui.QStandardItemModel()
self.block_view_model.itemChanged.connect(self._blockViewItemChanged)
self.block_view_list.setModel(self.block_view_model)
self.block_view_layout.addWidget(self.block_view_list)
self.block_view_group_box.setLayout(self.block_view_layout)
self.left_controls_layout.addWidget(self.block_view_group_box)
self.controls_layout.addLayout(self.left_controls_layout)
self.right_controls_layout = QtGui.QVBoxLayout()
self.controls_layout.addLayout(self.right_controls_layout)
self.view_mesh_checkbox = QtGui.QCheckBox('View Mesh')
self.view_mesh_checkbox.setToolTip('Toggle viewing of mesh elements')
self.view_mesh_checkbox.setCheckState(QtCore.Qt.Checked)
self.view_mesh_checkbox.stateChanged.connect(self.viewMeshCheckboxChanged)
self.right_controls_layout.addWidget(self.view_mesh_checkbox)
self.highlight_group_box = QtGui.QGroupBox('Highlight')
self.highlight_group_box.setMaximumHeight(70)
self.highlight_group_box.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
# self.highlight_group_box.setMaximumWidth(200)
self.highlight_layout = QtGui.QHBoxLayout()
self.highlight_group_box.setLayout(self.highlight_layout)
self.right_controls_layout.addWidget(self.highlight_group_box)
self.highlight_block_label = QtGui.QLabel('Block:')
self.highlight_block_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.highlight_block_combo = QtGui.QComboBox()
# self.highlight_block_combo.setMaximumWidth(50)
self.highlight_block_combo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.highlight_block_combo.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
self.highlight_block_combo.setToolTip('Highlight a block in the mesh')
self.highlight_block_combo.currentIndexChanged[str].connect(self.showBlockSelected)
self.highlight_layout.addWidget(self.highlight_block_label)
self.highlight_layout.addWidget(self.highlight_block_combo)
self.highlight_sideset_label = QtGui.QLabel('Sideset:')
self.highlight_sideset_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.highlight_sideset_combo = QtGui.QComboBox()
# self.highlight_sideset_combo.setMaximumWidth(50)
self.highlight_sideset_combo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.highlight_sideset_combo.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
self.highlight_sideset_combo.setToolTip('Highlight a sideset in the mesh')
self.highlight_sideset_combo.currentIndexChanged[str].connect(self.showSidesetSelected)
self.highlight_layout.addWidget(self.highlight_sideset_label)
self.highlight_layout.addWidget(self.highlight_sideset_combo)
self.highlight_nodeset_label = QtGui.QLabel('Nodeset:')
self.highlight_nodeset_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.highlight_nodeset_combo = QtGui.QComboBox()
# self.highlight_nodeset_combo.setMaximumWidth(50)
self.highlight_nodeset_combo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.highlight_nodeset_combo.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
self.highlight_nodeset_combo.setToolTip('Highlight a nodeset in the mesh')
self.highlight_nodeset_combo.currentIndexChanged[str].connect(self.showNodesetSelected)
self.highlight_layout.addWidget(self.highlight_nodeset_label)
self.highlight_layout.addWidget(self.highlight_nodeset_combo)
self.highlight_clear = QtGui.QPushButton('Clear')
self.highlight_clear.setToolTip('Clear highlighting')
self.highlight_clear.setDisabled(True)
self.highlight_clear.clicked.connect(self.clearHighlight)
self.highlight_layout.addWidget(self.highlight_clear)
self.plane = vtk.vtkPlane()
self.plane.SetOrigin(0, 0, 0)
self.plane.SetNormal(1, 0, 0)
self.clip_groupbox = QtGui.QGroupBox("Clip")
self.clip_groupbox.setToolTip('Toggle clip mode to slice the mesh open along a plane')
self.clip_groupbox.setCheckable(True)
self.clip_groupbox.setChecked(False)
self.clip_groupbox.setMaximumHeight(70)
self.clip_groupbox.toggled[bool].connect(self._clippingToggled)
clip_layout = QtGui.QHBoxLayout()
self.clip_plane_combobox = QtGui.QComboBox()
self.clip_plane_combobox.setToolTip('Direction of the normal for the clip plane')
self.clip_plane_combobox.addItem('x')
self.clip_plane_combobox.addItem('y')
self.clip_plane_combobox.addItem('z')
self.clip_plane_combobox.currentIndexChanged[str].connect(self._clipNormalChanged)
clip_layout.addWidget(self.clip_plane_combobox)
self.clip_plane_slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.clip_plane_slider.setToolTip('Slide to change plane position')
self.clip_plane_slider.setRange(0, 100)
self.clip_plane_slider.setSliderPosition(50)
self.clip_plane_slider.sliderMoved[int].connect(self._clipSliderMoved)
clip_layout.addWidget(self.clip_plane_slider)
# vbox->addStretch(1);
self.clip_groupbox.setLayout(clip_layout)
self.right_controls_layout.addWidget(self.clip_groupbox)
self.this_layout.addLayout(self.controls_layout)
self.this_layout.setStretchFactor(self.controls_layout, 1)
self.bounds = {}
self.bounds['x'] = [0.0, 0.0]
self.bounds['y'] = [0.0, 0.0]
self.bounds['z'] = [0.0, 0.0]
# self.draw_edges_checkbox = QtGui.QCheckBox("View Mesh")
# self.left_controls_layout.addWidget(self.draw_edges_checkbox)
def clear(self):
self.highlight_block_combo.clear()
self.highlight_sideset_combo.clear()
self.highlight_nodeset_combo.clear()
for block_actor_name, block_actor in self.current_block_actors.items():
block_actor.hide()
for sideset_actor_name, sideset_actor in self.current_sideset_actors.items():
sideset_actor.hide()
for nodeset_actor_name, nodeset_actor in self.current_nodeset_actors.items():
nodeset_actor.hide()
self.current_block_actors = {}
self.current_sideset_actors = {}
self.current_nodeset_actors = {}
def meshItemChanged(self, item):
# Disconnect some actions while we fill stuff in
if self.mesh_renderer:
self.highlight_block_combo.currentIndexChanged[str].disconnect(self.showBlockSelected)
self.highlight_sideset_combo.currentIndexChanged[str].disconnect(self.showSidesetSelected)
self.highlight_nodeset_combo.currentIndexChanged[str].disconnect(self.showNodesetSelected)
self.clear()
self.mesh_renderer = RendererFactory.getRenderer(self, item.table_data)
if self.mesh_renderer:
self.show()
else:
self.hide()
return
self.current_block_actors = self.mesh_renderer.block_actors
self.current_sideset_actors = self.mesh_renderer.sideset_actors
self.current_nodeset_actors = self.mesh_renderer.nodeset_actors
self.block_view_model.clear()
for block in self.mesh_renderer.blocks:
block_display_name = str(block)
if block in self.mesh_renderer.block_id_to_name:
block_display_name += ' : ' + self.mesh_renderer.block_id_to_name[block]
item = QtGui.QStandardItem(str(block_display_name))
item.exodus_block = block
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Checked)
self.block_view_model.appendRow(item)
for block_actor_name, block_actor in self.current_block_actors.items():
block_actor.show()
block_actor.showEdges()
block_names = []
for block_actor_id, block_actor in self.current_block_actors.items():
name = block_actor_id.strip(' ')
if int(name) in self.mesh_renderer.block_id_to_name:
name += ' : ' + self.mesh_renderer.block_id_to_name[int(name)]
block_names.append(name)
self.highlight_block_combo.addItem('')
for block_actor_name in sorted(block_names, key=lambda name: int(name.split(' ')[0])):
self.highlight_block_combo.addItem(str(block_actor_name))
sideset_names = []
for sideset_actor_id, sideset_actor in self.current_sideset_actors.items():
sideset_actor.setColor(red)
name = sideset_actor_id.strip(' ')
if int(name) in self.mesh_renderer.sideset_id_to_name:
name += ' : ' + self.mesh_renderer.sideset_id_to_name[int(name)]
sideset_names.append(name)
self.highlight_sideset_combo.addItem('')
for sideset_actor_name in sorted(sideset_names, key=lambda name: int(name.split(' ')[0])):
self.highlight_sideset_combo.addItem(sideset_actor_name)
nodeset_names = []
for nodeset_actor_id, nodeset_actor in self.current_nodeset_actors.items():
nodeset_actor.setColor(red)
name = nodeset_actor_id.strip(' ')
if int(name) in self.mesh_renderer.nodeset_id_to_name:
name += ' : ' + self.mesh_renderer.nodeset_id_to_name[int(name)]
nodeset_names.append(name)
self.highlight_nodeset_combo.addItem('')
for nodeset_actor_name in sorted(nodeset_names, key=lambda name: int(name.split(' ')[0])):
self.highlight_nodeset_combo.addItem(nodeset_actor_name)
self.highlight_block_combo.currentIndexChanged[str].connect(self.showBlockSelected)
self.highlight_sideset_combo.currentIndexChanged[str].connect(self.showSidesetSelected)
self.highlight_nodeset_combo.currentIndexChanged[str].connect(self.showNodesetSelected)
self.setBounds()
# Avoid z-buffer fighting
vtk.vtkPolyDataMapper().SetResolveCoincidentTopologyToPolygonOffset()
self.renderer.ResetCamera()
self.vtkwidget.repaint()
def setBounds(self):
for actor_name, actor in self.current_block_actors.items():
current_bounds = actor.getBounds()
self.bounds['x'][0] = min(self.bounds['x'][0], current_bounds[0])
self.bounds['x'][1] = max(self.bounds['x'][1], current_bounds[1])
self.bounds['y'][0] = min(self.bounds['y'][0], current_bounds[2])
self.bounds['y'][1] = max(self.bounds['y'][1], current_bounds[3])
self.bounds['z'][0] = min(self.bounds['z'][0], current_bounds[4])
self.bounds['z'][1] = max(self.bounds['z'][1], current_bounds[5])
def swapActors(self, current, new):
for old_name, old_actor in current.items():
new[old_name].sync(old_actor)
old_actor.hide()
def _blockViewItemChanged(self, item):
if item.checkState() == QtCore.Qt.Checked:
self.current_block_actors[str(item.exodus_block)].show()
else:
self.current_block_actors[str(item.exodus_block)].hide()
self.vtkwidget.repaint()
def _clippingToggled(self, value):
if value:
self.swapActors(self.current_block_actors, self.mesh_renderer.clipped_block_actors)
self.current_block_actors = self.mesh_renderer.clipped_block_actors
self.swapActors(self.current_sideset_actors, self.mesh_renderer.clipped_sideset_actors)
self.current_sideset_actors = self.mesh_renderer.clipped_sideset_actors
self.swapActors(self.current_nodeset_actors, self.mesh_renderer.clipped_nodeset_actors)
self.current_nodeset_actors = self.mesh_renderer.clipped_nodeset_actors
self._clipNormalChanged(self.clip_plane_combobox.currentText())
else:
self.swapActors(self.current_block_actors, self.mesh_renderer.block_actors)
self.current_block_actors = self.mesh_renderer.block_actors
self.swapActors(self.current_sideset_actors, self.mesh_renderer.sideset_actors)
self.current_sideset_actors = self.mesh_renderer.sideset_actors
self.swapActors(self.current_nodeset_actors, self.mesh_renderer.nodeset_actors)
self.current_nodeset_actors = self.mesh_renderer.nodeset_actors
self.vtkwidget.repaint()
def _clipNormalChanged(self, value):
self.plane.SetOrigin(self.bounds['x'][0],
self.bounds['y'][0],
self.bounds['z'][0])
if value == 'x':
self.plane.SetNormal(1, 0, 0)
elif value == 'y':
self.plane.SetNormal(0, 1, 0)
else:
self.plane.SetNormal(0, 0, 1)
self.clip_plane_slider.setSliderPosition(50)
self._clipSliderMoved(50)
def _clipSliderMoved(self, value):
direction = str(self.clip_plane_combobox.currentText())
step_size = (self.bounds[direction][1] - self.bounds[direction][0])/100.0
steps = value
distance = float(steps)*step_size
position = self.bounds[direction][0] + distance
old = self.plane.GetOrigin()
self.plane.SetOrigin(position if direction == 'x' else old[0],
position if direction == 'y' else old[1],
position if direction == 'z' else old[2])
for actor_name, actor in self.current_sideset_actors.items():
actor.movePlane()
for actor_name, actor in self.current_nodeset_actors.items():
actor.movePlane()
for actor_name, actor in self.current_block_actors.items():
actor.movePlane()
self.vtkwidget.repaint()
def viewMeshCheckboxChanged(self, value):
if value == QtCore.Qt.Checked:
for actor_name, actor in self.current_sideset_actors.items():
actor.showEdges()
for actor_name, actor in self.current_nodeset_actors.items():
actor.showEdges()
for actor_name, actor in self.current_block_actors.items():
actor.showEdges()
else:
for actor_name, actor in self.current_sideset_actors.items():
actor.hideEdges()
for actor_name, actor in self.current_nodeset_actors.items():
actor.hideEdges()
for actor_name, actor in self.current_block_actors.items():
actor.hideEdges()
self.vtkwidget.repaint()
def clearBlockComboBox(self):
self.highlight_block_combo.currentIndexChanged[str].disconnect(self.showBlockSelected)
self.highlight_block_combo.setCurrentIndex(0)
self.highlight_block_combo.currentIndexChanged[str].connect(self.showBlockSelected)
def clearSidesetComboBox(self):
self.highlight_sideset_combo.currentIndexChanged[str].disconnect(self.showSidesetSelected)
self.highlight_sideset_combo.setCurrentIndex(0)
self.highlight_sideset_combo.currentIndexChanged[str].connect(self.showSidesetSelected)
def clearNodesetComboBox(self):
self.highlight_nodeset_combo.currentIndexChanged[str].disconnect(self.showNodesetSelected)
self.highlight_nodeset_combo.setCurrentIndex(0)
self.highlight_nodeset_combo.currentIndexChanged[str].connect(self.showNodesetSelected)
def showBlockSelected(self, block_name):
if block_name != '':
self.clearSidesetComboBox()
self.clearNodesetComboBox()
self.highlightBlock(str(block_name).split(' ')[0])
else:
self.clearActors()
def showSidesetSelected(self, sideset_name):
if sideset_name != '':
self.clearBlockComboBox()
self.clearNodesetComboBox()
self.highlightBoundary(str(sideset_name).split(' ')[0])
else:
self.clearActors()
def showNodesetSelected(self, nodeset_name):
if nodeset_name != '':
self.clearBlockComboBox()
self.clearSidesetComboBox()
self.highlightNodeset(str(nodeset_name).split(' ')[0])
else:
self.clearActors()
def highlightBoundary(self, boundary):
self.highlight_clear.setDisabled(False)
# Turn off all sidesets
for actor_name, actor in self.current_sideset_actors.items():
actor.hide()
# Turn off all nodesets
for actor_name, actor in self.current_nodeset_actors.items():
actor.hide()
# Turn solids to only edges... but only if they are visible
for actor_name, actor in self.current_block_actors.items():
actor.setColor(black)
actor.goWireframe()
boundaries = boundary.strip("'").split(' ')
for the_boundary in boundaries:
if the_boundary in self.current_sideset_actors:
self.current_sideset_actors[the_boundary].show()
elif the_boundary in self.current_nodeset_actors:
self.current_nodeset_actors[the_boundary].show()
elif the_boundary in self.mesh_renderer.name_to_sideset_id:
self.current_sideset_actors[str(self.mesh_renderer.name_to_sideset_id[the_boundary])].show()
elif the_boundary in self.mesh_renderer.name_to_nodeset_id:
self.current_nodeset_actors[str(self.mesh_renderer.name_to_nodeset_id[the_boundary])].show()
self.vtkwidget.repaint()
def highlightNodeset(self, boundary):
self.highlight_clear.setDisabled(False)
# Turn off all sidesets
for actor_name, actor in self.current_sideset_actors.items():
actor.hide()
# Turn off all nodesets
for actor_name, actor in self.current_nodeset_actors.items():
actor.hide()
# Turn solids to only edges... but only if they are visible
for actor_name, actor in self.current_block_actors.items():
actor.setColor(black)
actor.goWireframe()
boundaries = boundary.strip("'").split(' ')
for the_boundary in boundaries:
if the_boundary in self.current_nodeset_actors:
self.current_nodeset_actors[the_boundary].show()
elif the_boundary in self.mesh_renderer.name_to_nodeset_id:
self.current_nodeset_actors[str(self.mesh_renderer.name_to_nodeset_id[the_boundary])].show()
self.vtkwidget.repaint()
def highlightBlock(self, block):
self.highlight_clear.setDisabled(False)
# Turn off all sidesets
for actor_name, actor in self.current_sideset_actors.items():
actor.hide()
# Turn off all nodesets
for actor_name, actor in self.current_nodeset_actors.items():
actor.hide()
# Turn solids to only edges...
for actor_name, actor in self.current_block_actors.items():
actor.setColor(black)
actor.goWireframe()
blocks = block.strip("'").split(' ')
for the_block in blocks:
if the_block in self.current_block_actors:
self.current_block_actors[the_block].setColor(red)
self.current_block_actors[the_block].goSolid()
elif the_block in self.mesh_renderer.name_to_block_id:
self.current_block_actors[str(self.mesh_renderer.name_to_block_id[the_block])].setColor(red)
self.current_block_actors[str(self.mesh_renderer.name_to_block_id[the_block])].goSolid()
self.vtkwidget.repaint()
def clearActors(self):
# Turn off all sidesets
for actor_name, actor in self.current_sideset_actors.items():
actor.hide()
# Turn off all nodesets
for actor_name, actor in self.current_nodeset_actors.items():
actor.hide()
# Show solids and edges - but only if something is visible
for actor_name, actor in self.current_block_actors.items():
actor.setColor(white)
actor.goSolid()
self.vtkwidget.repaint()
def clearHighlight(self):
self.highlight_block_combo.setCurrentIndex(0)
self.highlight_sideset_combo.setCurrentIndex(0)
self.highlight_nodeset_combo.setCurrentIndex(0)
self.highlight_clear.setDisabled(True)
self.clearActors()
| Chuban/moose | gui/vtk/MeshRenderWidget.py | Python | lgpl-2.1 | 22,884 | [
"VTK"
] | 2b75837946ed239401d7fd6adf0215366878b507f1d070aa17dc75428835609e |
##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for modules.py.
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
import os
import re
import tempfile
import shutil
from test.framework.utilities import EnhancedTestCase, init_config
from unittest import TestLoader, main
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root, get_software_version, get_software_libdir, modules_tool
# number of modules included for testing purposes
TEST_MODULES_COUNT = 58
class ModulesTest(EnhancedTestCase):
"""Test cases for modules."""
def setUp(self):
"""set up everything for a unit test."""
super(ModulesTest, self).setUp()
self.testmods = modules_tool()
def init_testmods(self, test_modules_paths=None):
"""Initialize set of test modules for test."""
if test_modules_paths is None:
test_modules_paths = [os.path.abspath(os.path.join(os.path.dirname(__file__), 'modules'))]
self.reset_modulepath(test_modules_paths)
# for Lmod, this test has to run first, to avoid that it fails;
# no modules are found if another test ran before it, but using a (very) long module path works fine interactively
def test_long_module_path(self):
"""Test dealing with a (very) long module path."""
# create a really long modules install path
tmpdir = tempfile.mkdtemp()
long_mod_path = tmpdir
subdir = 'foo'
# Lmod v5.1.5 doesn't support module paths longer than 256 characters, so stay just under that magic limit
while (len(os.path.abspath(long_mod_path)) + len(subdir)) < 240:
long_mod_path = os.path.join(long_mod_path, subdir)
# copy one of the test modules there
gcc_mod_dir = os.path.join(long_mod_path, 'GCC')
os.makedirs(gcc_mod_dir)
gcc_mod_path = os.path.join(os.path.dirname(__file__), 'modules', 'GCC', '4.6.3')
shutil.copy2(gcc_mod_path, gcc_mod_dir)
# try and use long modules path
self.init_testmods(test_modules_paths=[long_mod_path])
ms = self.testmods.available()
self.assertEqual(ms, ['GCC/4.6.3'])
shutil.rmtree(tmpdir)
def test_avail(self):
"""Test if getting a (restricted) list of available modules works."""
self.init_testmods()
# test modules include 3 GCC modules
ms = self.testmods.available('GCC')
self.assertEqual(ms, ['GCC/4.6.3', 'GCC/4.6.4', 'GCC/4.7.2'])
# test modules include one GCC/4.6.3 module
ms = self.testmods.available(mod_name='GCC/4.6.3')
self.assertEqual(ms, ['GCC/4.6.3'])
# all test modules are accounted for
ms = self.testmods.available()
self.assertEqual(len(ms), TEST_MODULES_COUNT)
def test_exists(self):
"""Test if testing for module existence works."""
self.init_testmods()
self.assertEqual(self.testmods.exist(['OpenMPI/1.6.4-GCC-4.6.4']), [True])
self.assertEqual(self.testmods.exist(['foo/1.2.3']), [False])
# exists should not return True for incomplete module names
self.assertEqual(self.testmods.exist(['GCC']), [False])
# exists works on hidden modules
self.assertEqual(self.testmods.exist(['toy/.0.0-deps']), [True])
# exists also works on lists of module names
# list should be sufficiently long, since for short lists 'show' is always used
mod_names = ['OpenMPI/1.6.4-GCC-4.6.4', 'foo/1.2.3', 'GCC',
'ScaLAPACK/1.8.0-gompi-1.1.0-no-OFED',
'ScaLAPACK/1.8.0-gompi-1.1.0-no-OFED-ATLAS-3.8.4-LAPACK-3.4.0-BLACS-1.1',
'Compiler/GCC/4.7.2/OpenMPI/1.6.4', 'toy/.0.0-deps']
self.assertEqual(self.testmods.exist(mod_names), [True, False, False, False, True, True, True])
def test_load(self):
""" test if we load one module it is in the loaded_modules """
self.init_testmods()
ms = self.testmods.available()
# exclude modules not on the top level of a hierarchy
ms = [m for m in ms if not (m.startswith('Core') or m.startswith('Compiler/') or m.startswith('MPI/') or
m.startswith('CategorizedHMNS'))]
for m in ms:
self.testmods.load([m])
self.assertTrue(m in self.testmods.loaded_modules())
self.testmods.purge()
# trying to load a module not on the top level of a hierarchy should fail
mods = [
'Compiler/GCC/4.7.2/OpenMPI/1.6.4', # module use on non-existent dir (Tcl-based env mods), or missing dep (Lmod)
'MPI/GCC/4.7.2/OpenMPI/1.6.4/ScaLAPACK/2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2', # missing dep
]
for mod in mods:
self.assertErrorRegex(EasyBuildError, '.*', self.testmods.load, [mod])
def test_ld_library_path(self):
"""Make sure LD_LIBRARY_PATH is what it should be when loaded multiple modules."""
self.init_testmods()
testpath = '/this/is/just/a/test'
os.environ['LD_LIBRARY_PATH'] = testpath
# load module and check that previous LD_LIBRARY_PATH is still there, at the end
self.testmods.load(['GCC/4.6.3'])
self.assertTrue(re.search("%s$" % testpath, os.environ['LD_LIBRARY_PATH']))
self.testmods.purge()
# check that previous LD_LIBRARY_PATH is still there, at the end
self.assertTrue(re.search("%s$" % testpath, os.environ['LD_LIBRARY_PATH']))
self.testmods.purge()
def test_purge(self):
"""Test if purging of modules works."""
self.init_testmods()
ms = self.testmods.available()
self.testmods.load([ms[0]])
self.assertTrue(len(self.testmods.loaded_modules()) > 0)
self.testmods.purge()
self.assertTrue(len(self.testmods.loaded_modules()) == 0)
self.testmods.purge()
self.assertTrue(len(self.testmods.loaded_modules()) == 0)
def test_get_software_root_version_libdir(self):
"""Test get_software_X functions."""
tmpdir = tempfile.mkdtemp()
test_cases = [
('GCC', 'GCC'),
('grib_api', 'GRIB_API'),
('netCDF-C++', 'NETCDFMINCPLUSPLUS'),
('Score-P', 'SCOREMINP'),
]
for (name, env_var_name) in test_cases:
# mock stuff that get_software_X functions rely on
root = os.path.join(tmpdir, name)
os.makedirs(os.path.join(root, 'lib'))
os.environ['EBROOT%s' % env_var_name] = root
version = '0.0-%s' % root
os.environ['EBVERSION%s' % env_var_name] = version
self.assertEqual(get_software_root(name), root)
self.assertEqual(get_software_version(name), version)
self.assertEqual(get_software_libdir(name), 'lib')
os.environ.pop('EBROOT%s' % env_var_name)
os.environ.pop('EBVERSION%s' % env_var_name)
# check expected result of get_software_libdir with multiple lib subdirs
root = os.path.join(tmpdir, name)
os.makedirs(os.path.join(root, 'lib64'))
os.environ['EBROOT%s' % env_var_name] = root
self.assertErrorRegex(EasyBuildError, "Multiple library subdirectories found.*", get_software_libdir, name)
self.assertEqual(get_software_libdir(name, only_one=False), ['lib', 'lib64'])
# only directories containing files in specified list should be retained
open(os.path.join(root, 'lib64', 'foo'), 'w').write('foo')
self.assertEqual(get_software_libdir(name, fs=['foo']), 'lib64')
# clean up for previous tests
os.environ.pop('EBROOT%s' % env_var_name)
# if root/version for specified software package can not be found, these functions should return None
self.assertEqual(get_software_root('foo'), None)
self.assertEqual(get_software_version('foo'), None)
self.assertEqual(get_software_libdir('foo'), None)
# if no library subdir is found, get_software_libdir should return None
os.environ['EBROOTFOO'] = tmpdir
self.assertEqual(get_software_libdir('foo'), None)
os.environ.pop('EBROOTFOO')
shutil.rmtree(tmpdir)
def test_wrong_modulepath(self):
"""Test whether modules tool can deal with a broken $MODULEPATH."""
test_modules_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules')
modules_test_installpath = os.path.join(self.test_installpath, 'modules', 'all')
os.environ['MODULEPATH'] = "/some/non-existing/path:/this/doesnt/exists/anywhere:%s" % test_modules_path
init_config()
modtool = modules_tool()
self.assertEqual(len(modtool.mod_paths), 2)
self.assertTrue(os.path.samefile(modtool.mod_paths[0], modules_test_installpath))
self.assertEqual(modtool.mod_paths[1], test_modules_path)
self.assertTrue(len(modtool.available()) > 0)
def test_path_to_top_of_module_tree(self):
"""Test function to determine path to top of the module tree."""
modtool = modules_tool()
path = modtool.path_to_top_of_module_tree([], 'gompi/1.3.12', '', ['GCC/4.6.4', 'OpenMPI/1.6.4-GCC-4.6.4'])
self.assertEqual(path, [])
path = modtool.path_to_top_of_module_tree([], 'toy/.0.0-deps', '', ['gompi/1.3.12'])
self.assertEqual(path, [])
path = modtool.path_to_top_of_module_tree([], 'toy/0.0', '', [])
self.assertEqual(path, [])
def test_path_to_top_of_module_tree_hierarchical_mns(self):
"""Test function to determine path to top of the module tree for a hierarchical module naming scheme."""
modtool = modules_tool()
ecs_dir = os.path.join(os.path.dirname(__file__), 'easyconfigs')
all_stops = [x[0] for x in EasyBlock.get_steps()]
build_options = {
'check_osdeps': False,
'robot_path': [ecs_dir],
'valid_stops': all_stops,
'validate': False,
}
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'HierarchicalMNS'
init_config(build_options=build_options)
self.setup_hierarchical_modules()
modtool = modules_tool()
mod_prefix = os.path.join(self.test_installpath, 'modules', 'all')
init_modpaths = [os.path.join(mod_prefix, 'Core')]
deps = ['GCC/4.7.2', 'OpenMPI/1.6.4', 'FFTW/3.3.3', 'OpenBLAS/0.2.6-LAPACK-3.4.2',
'ScaLAPACK/2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2']
path = modtool.path_to_top_of_module_tree(init_modpaths, 'goolf/1.4.10', os.path.join(mod_prefix, 'Core'), deps)
self.assertEqual(path, [])
path = modtool.path_to_top_of_module_tree(init_modpaths, 'GCC/4.7.2', os.path.join(mod_prefix, 'Core'), [])
self.assertEqual(path, [])
full_mod_subdir = os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2')
deps = ['GCC/4.7.2', 'hwloc/1.6.2']
path = modtool.path_to_top_of_module_tree(init_modpaths, 'OpenMPI/1.6.4', full_mod_subdir, deps)
self.assertEqual(path, ['GCC/4.7.2'])
full_mod_subdir = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4')
deps = ['GCC/4.7.2', 'OpenMPI/1.6.4']
path = modtool.path_to_top_of_module_tree(init_modpaths, 'FFTW/3.3.3', full_mod_subdir, deps)
self.assertEqual(path, ['OpenMPI/1.6.4', 'GCC/4.7.2'])
def test_path_to_top_of_module_tree_categorized_hmns(self):
"""
Test function to determine path to top of the module tree for a categorized hierarchical module naming
scheme.
"""
ecs_dir = os.path.join(os.path.dirname(__file__), 'easyconfigs')
all_stops = [x[0] for x in EasyBlock.get_steps()]
build_options = {
'check_osdeps': False,
'robot_path': [ecs_dir],
'valid_stops': all_stops,
'validate': False,
}
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'CategorizedHMNS'
init_config(build_options=build_options)
self.setup_categorized_hmns_modules()
modtool = modules_tool()
mod_prefix = os.path.join(self.test_installpath, 'modules', 'all')
init_modpaths = [os.path.join(mod_prefix, 'Core', 'compiler'), os.path.join(mod_prefix, 'Core', 'toolchain')]
deps = ['GCC/4.7.2', 'OpenMPI/1.6.4', 'FFTW/3.3.3', 'OpenBLAS/0.2.6-LAPACK-3.4.2',
'ScaLAPACK/2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2']
path = modtool.path_to_top_of_module_tree(init_modpaths, 'goolf/1.4.10', os.path.join(mod_prefix, 'Core', 'toolchain'), deps)
self.assertEqual(path, [])
path = modtool.path_to_top_of_module_tree(init_modpaths, 'GCC/4.7.2', os.path.join(mod_prefix, 'Core', 'compiler'), [])
self.assertEqual(path, [])
full_mod_subdir = os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2', 'mpi')
deps = ['GCC/4.7.2', 'hwloc/1.6.2']
path = modtool.path_to_top_of_module_tree(init_modpaths, 'OpenMPI/1.6.4', full_mod_subdir, deps)
self.assertEqual(path, ['GCC/4.7.2'])
full_mod_subdir = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4', 'numlib')
deps = ['GCC/4.7.2', 'OpenMPI/1.6.4']
path = modtool.path_to_top_of_module_tree(init_modpaths, 'FFTW/3.3.3', full_mod_subdir, deps)
self.assertEqual(path, ['OpenMPI/1.6.4', 'GCC/4.7.2'])
def suite():
""" returns all the testcases in this module """
return TestLoader().loadTestsFromTestCase(ModulesTest)
if __name__ == '__main__':
main()
| pneerincx/easybuild-framework | test/framework/modules.py | Python | gpl-2.0 | 14,748 | [
"NetCDF"
] | 16d275301c49bad4f5726f1e6126018ff569e738fe4069fda81c2633be884258 |
"""
test views
"""
import datetime
import json
import re
import pytz
import ddt
import urlparse
from mock import patch, MagicMock
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.courses import get_course_by_id
from courseware.tests.factories import StudentModuleFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tabs import get_course_tab_list
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.core.urlresolvers import reverse, resolve
from django.utils.timezone import UTC
from django.test.utils import override_settings
from django.test import RequestFactory
from edxmako.shortcuts import render_to_response
from request_cache.middleware import RequestCache
from opaque_keys.edx.keys import CourseKey
from student.roles import CourseCcxCoachRole
from student.models import (
CourseEnrollment,
CourseEnrollmentAllowed,
)
from student.tests.factories import (
AdminFactory,
CourseEnrollmentFactory,
UserFactory,
)
from xmodule.x_module import XModuleMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
SharedModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from xmodule.modulestore.tests.factories import (
CourseFactory,
ItemFactory,
)
from ccx_keys.locator import CCXLocator
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import get_override_for_ccx, override_field_for_ccx
from lms.djangoapps.ccx.tests.factories import CcxFactory
def intercept_renderer(path, context):
"""
Intercept calls to `render_to_response` and attach the context dict to the
response for examination in unit tests.
"""
# I think Django already does this for you in their TestClient, except
# we're bypassing that by using edxmako. Probably edxmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
def ccx_dummy_request():
"""
Returns dummy request object for CCX coach tab test
"""
factory = RequestFactory()
request = factory.get('ccx_coach_dashboard')
request.user = MagicMock()
return request
def setup_students_and_grades(context):
"""
Create students and set their grades.
:param context: class reference
"""
if context.course:
context.student = student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=context.course.id)
context.student2 = student2 = UserFactory.create()
CourseEnrollmentFactory.create(user=student2, course_id=context.course.id)
# create grades for self.student as if they'd submitted the ccx
for chapter in context.course.get_children():
for i, section in enumerate(chapter.get_children()):
for j, problem in enumerate(section.get_children()):
# if not problem.visible_to_staff_only:
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1,
student=context.student,
course_id=context.course.id,
module_state_key=problem.location
)
StudentModuleFactory.create(
grade=1 if i > j else 0,
max_grade=1,
student=context.student2,
course_id=context.course.id,
module_state_key=problem.location
)
def is_email(identifier):
"""
Checks if an `identifier` string is a valid email
"""
try:
validate_email(identifier)
except ValidationError:
return False
return True
@attr('shard_1')
@ddt.ddt
class TestCoachDashboard(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestCoachDashboard, cls).setUpClass()
cls.course = course = CourseFactory.create()
# Create a course outline
cls.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC
)
cls.mooc_due = due = datetime.datetime(
2010, 7, 7, 0, 0, tzinfo=pytz.UTC
)
cls.chapters = [
ItemFactory.create(start=start, parent=course) for _ in xrange(2)
]
cls.sequentials = flatten([
[
ItemFactory.create(parent=chapter) for _ in xrange(2)
] for chapter in cls.chapters
])
cls.verticals = flatten([
[
ItemFactory.create(
start=start, due=due, parent=sequential, graded=True, format='Homework', category=u'vertical'
) for _ in xrange(2)
] for sequential in cls.sequentials
])
# Trying to wrap the whole thing in a bulk operation fails because it
# doesn't find the parents. But we can at least wrap this part...
with cls.store.bulk_operations(course.id, emit_signals=False):
blocks = flatten([ # pylint: disable=unused-variable
[
ItemFactory.create(parent=vertical) for _ in xrange(2)
] for vertical in cls.verticals
])
def setUp(self):
"""
Set up tests
"""
super(TestCoachDashboard, self).setUp()
# Create instructor account
self.coach = coach = AdminFactory.create()
self.client.login(username=coach.username, password="test")
# create an instance of modulestore
self.mstore = modulestore()
def make_coach(self):
"""
create coach user
"""
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
def make_ccx(self, max_students_allowed=settings.CCX_MAX_STUDENTS_ALLOWED):
"""
create ccx
"""
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
override_field_for_ccx(ccx, self.course, 'max_student_enrollments_allowed', max_students_allowed)
return ccx
def get_outbox(self):
"""
get fake outbox
"""
from django.core import mail
return mail.outbox
def assert_elements_in_schedule(self, url, n_chapters=2, n_sequentials=4, n_verticals=8):
"""
Helper function to count visible elements in the schedule
"""
response = self.client.get(url)
# the schedule contains chapters
chapters = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
sequentials = flatten([chapter.get('children', []) for chapter in chapters])
verticals = flatten([sequential.get('children', []) for sequential in sequentials])
# check that the numbers of nodes at different level are the expected ones
self.assertEqual(n_chapters, len(chapters))
self.assertEqual(n_sequentials, len(sequentials))
self.assertEqual(n_verticals, len(verticals))
# extract the locations of all the nodes
all_elements = chapters + sequentials + verticals
return [elem['location'] for elem in all_elements if 'location' in elem]
def hide_node(self, node):
"""
Helper function to set the node `visible_to_staff_only` property
to True and save the change
"""
node.visible_to_staff_only = True
self.mstore.update_item(node, self.coach.id)
def test_not_a_coach(self):
"""
User is not a coach, should get Forbidden response.
"""
ccx = self.make_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_no_ccx_created(self):
"""
No CCX is created, coach should see form to add a CCX.
"""
self.make_coach()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search(
'<form action=".+create_ccx"',
response.content))
def test_create_ccx(self):
"""
Create CCX. Follow redirect to coach dashboard, confirm we see
the coach dashboard for the new CCX.
"""
self.make_coach()
url = reverse(
'create_ccx',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {'name': 'New CCX'})
self.assertEqual(response.status_code, 302)
url = response.get('location') # pylint: disable=no-member
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Get the ccx_key
path = urlparse.urlparse(url).path
resolver = resolve(path)
ccx_key = resolver.kwargs['course_id']
course_key = CourseKey.from_string(ccx_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.coach, course_key))
self.assertTrue(re.search('id="ccx-schedule"', response.content))
# check if the max amount of student that can be enrolled has been overridden
ccx = CustomCourseForEdX.objects.get()
course_enrollments = get_override_for_ccx(ccx, self.course, 'max_student_enrollments_allowed')
self.assertEqual(course_enrollments, settings.CCX_MAX_STUDENTS_ALLOWED)
@SharedModuleStoreTestCase.modifies_courseware
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('ccx.views.TODAY')
def test_get_ccx_schedule(self, today):
"""
Gets CCX schedule and checks number of blocks in it.
Hides nodes at a different depth and checks that these nodes
are not in the schedule.
"""
today.return_value = datetime.datetime(2014, 11, 25, tzinfo=pytz.UTC)
self.make_coach()
ccx = self.make_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={
'course_id': CCXLocator.from_course_locator(
self.course.id, ccx.id)
}
)
# all the elements are visible
self.assert_elements_in_schedule(url)
# hide a vertical
vertical = self.verticals[0]
self.hide_node(vertical)
locations = self.assert_elements_in_schedule(url, n_verticals=7)
self.assertNotIn(unicode(vertical.location), locations)
# hide a sequential
sequential = self.sequentials[0]
self.hide_node(sequential)
locations = self.assert_elements_in_schedule(url, n_sequentials=3, n_verticals=6)
self.assertNotIn(unicode(sequential.location), locations)
# hide a chapter
chapter = self.chapters[0]
self.hide_node(chapter)
locations = self.assert_elements_in_schedule(url, n_chapters=1, n_sequentials=2, n_verticals=4)
self.assertNotIn(unicode(chapter.location), locations)
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('ccx.views.TODAY')
def test_edit_schedule(self, today):
"""
Get CCX schedule, modify it, save it.
"""
today.return_value = datetime.datetime(2014, 11, 25, tzinfo=pytz.UTC)
self.make_coach()
ccx = self.make_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)})
response = self.client.get(url)
schedule = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
self.assertEqual(len(schedule), 2)
self.assertEqual(schedule[0]['hidden'], False)
self.assertEqual(schedule[0]['start'], None)
self.assertEqual(schedule[0]['children'][0]['start'], None)
self.assertEqual(schedule[0]['due'], None)
self.assertEqual(schedule[0]['children'][0]['due'], None)
self.assertEqual(
schedule[0]['children'][0]['children'][0]['due'], None
)
url = reverse(
'save_ccx',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)})
def unhide(unit):
"""
Recursively unhide a unit and all of its children in the CCX
schedule.
"""
unit['hidden'] = False
for child in unit.get('children', ()):
unhide(child)
unhide(schedule[0])
schedule[0]['start'] = u'2014-11-20 00:00'
schedule[0]['children'][0]['due'] = u'2014-12-25 00:00' # what a jerk!
schedule[0]['children'][0]['children'][0]['start'] = u'2014-12-20 00:00'
schedule[0]['children'][0]['children'][0]['due'] = u'2014-12-25 00:00'
response = self.client.post(
url, json.dumps(schedule), content_type='application/json'
)
schedule = json.loads(response.content)['schedule']
self.assertEqual(schedule[0]['hidden'], False)
self.assertEqual(schedule[0]['start'], u'2014-11-20 00:00')
self.assertEqual(
schedule[0]['children'][0]['due'], u'2014-12-25 00:00'
)
self.assertEqual(
schedule[0]['children'][0]['children'][0]['due'], u'2014-12-25 00:00'
)
self.assertEqual(
schedule[0]['children'][0]['children'][0]['start'], u'2014-12-20 00:00'
)
# Make sure start date set on course, follows start date of earliest
# scheduled chapter
ccx = CustomCourseForEdX.objects.get()
course_start = get_override_for_ccx(ccx, self.course, 'start')
self.assertEqual(str(course_start)[:-9], u'2014-11-20 00:00')
# Make sure grading policy adjusted
policy = get_override_for_ccx(ccx, self.course, 'grading_policy',
self.course.grading_policy)
self.assertEqual(policy['GRADER'][0]['type'], 'Homework')
self.assertEqual(policy['GRADER'][0]['min_count'], 8)
self.assertEqual(policy['GRADER'][1]['type'], 'Lab')
self.assertEqual(policy['GRADER'][1]['min_count'], 0)
self.assertEqual(policy['GRADER'][2]['type'], 'Midterm Exam')
self.assertEqual(policy['GRADER'][2]['min_count'], 0)
self.assertEqual(policy['GRADER'][3]['type'], 'Final Exam')
self.assertEqual(policy['GRADER'][3]['min_count'], 0)
@patch('ccx.views.render_to_response', intercept_renderer)
def test_save_without_min_count(self):
"""
POST grading policy without min_count field.
"""
self.make_coach()
ccx = self.make_ccx()
course_id = CCXLocator.from_course_locator(self.course.id, ccx.id)
save_policy_url = reverse(
'ccx_set_grading_policy', kwargs={'course_id': course_id})
# This policy doesn't include a min_count field
policy = {
"GRADE_CUTOFFS": {
"Pass": 0.5
},
"GRADER": [
{
"weight": 0.15,
"type": "Homework",
"drop_count": 2,
"short_label": "HW"
}
]
}
response = self.client.post(
save_policy_url, {"policy": json.dumps(policy)}
)
self.assertEqual(response.status_code, 302)
ccx = CustomCourseForEdX.objects.get()
# Make sure grading policy adjusted
policy = get_override_for_ccx(
ccx, self.course, 'grading_policy', self.course.grading_policy
)
self.assertEqual(len(policy['GRADER']), 1)
self.assertEqual(policy['GRADER'][0]['type'], 'Homework')
self.assertNotIn('min_count', policy['GRADER'][0])
save_ccx_url = reverse('save_ccx', kwargs={'course_id': course_id})
coach_dashboard_url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': course_id}
)
response = self.client.get(coach_dashboard_url)
schedule = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
response = self.client.post(
save_ccx_url, json.dumps(schedule), content_type='application/json'
)
self.assertEqual(response.status_code, 200)
@ddt.data(
('ccx_invite', True, 1, 'student-ids', ('enrollment-button', 'Enroll')),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Enroll')),
('ccx_manage_student', True, 1, 'student-id', ('student-action', 'add')),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'add')),
)
@ddt.unpack
def test_enroll_member_student(self, view_name, send_email, outbox_count, student_form_input_name, button_tuple):
"""
Tests the enrollment of a list of students who are members
of the class.
It tests 2 different views that use slightly different parameters,
but that perform the same task.
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([student.email, ]), # pylint: disable=no-member
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
if send_email:
self.assertIn(student.email, outbox[0].recipients()) # pylint: disable=no-member
# a CcxMembership exists for this student
self.assertTrue(
CourseEnrollment.objects.filter(course_id=self.course.id, user=student).exists()
)
def test_ccx_invite_enroll_up_to_limit(self):
"""
Enrolls a list of students up to the enrollment limit.
This test is specific to one of the enrollment views: the reason is because
the view used in this test can perform bulk enrollments.
"""
self.make_coach()
# create ccx and limit the maximum amount of students that can be enrolled to 2
ccx = self.make_ccx(max_students_allowed=2)
ccx_course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
# create some users
students = [
UserFactory.create(is_staff=False) for _ in range(3)
]
url = reverse(
'ccx_invite',
kwargs={'course_id': ccx_course_key}
)
data = {
'enrollment-button': 'Enroll',
'student-ids': u','.join([student.email for student in students]),
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# a CcxMembership exists for the first two students but not the third
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[0]).exists()
)
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[1]).exists()
)
self.assertFalse(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[2]).exists()
)
def test_manage_student_enrollment_limit(self):
"""
Enroll students up to the enrollment limit.
This test is specific to one of the enrollment views: the reason is because
the view used in this test cannot perform bulk enrollments.
"""
students_limit = 1
self.make_coach()
ccx = self.make_ccx(max_students_allowed=students_limit)
ccx_course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
students = [
UserFactory.create(is_staff=False) for _ in range(2)
]
url = reverse(
'ccx_manage_student',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)}
)
# enroll the first student
data = {
'student-action': 'add',
'student-id': u','.join([students[0].email, ]),
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# a CcxMembership exists for this student
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[0]).exists()
)
# try to enroll the second student without success
# enroll the first student
data = {
'student-action': 'add',
'student-id': u','.join([students[1].email, ]),
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# a CcxMembership does not exist for this student
self.assertFalse(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[1]).exists()
)
error_message = 'The course is full: the limit is {students_limit}'.format(
students_limit=students_limit
)
self.assertContains(response, error_message, status_code=200)
@ddt.data(
('ccx_invite', True, 1, 'student-ids', ('enrollment-button', 'Unenroll')),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Unenroll')),
('ccx_manage_student', True, 1, 'student-id', ('student-action', 'revoke')),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'revoke')),
)
@ddt.unpack
def test_unenroll_member_student(self, view_name, send_email, outbox_count, student_form_input_name, button_tuple):
"""
Tests the unenrollment of a list of students who are members of the class.
It tests 2 different views that use slightly different parameters,
but that perform the same task.
"""
self.make_coach()
ccx = self.make_ccx()
course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
enrollment = CourseEnrollmentFactory(course_id=course_key)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': course_key}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([student.email, ]), # pylint: disable=no-member
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
if send_email:
self.assertIn(student.email, outbox[0].recipients()) # pylint: disable=no-member
# a CcxMembership does not exists for this student
self.assertFalse(
CourseEnrollment.objects.filter(course_id=self.course.id, user=student).exists()
)
@ddt.data(
('ccx_invite', True, 1, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody@nowhere.com'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody@nowhere.com'),
('ccx_invite', True, 0, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody'),
('ccx_manage_student', True, 0, 'student-id', ('student-action', 'add'), 'dummy_student_id'),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'add'), 'dummy_student_id'),
('ccx_manage_student', True, 1, 'student-id', ('student-action', 'add'), 'xyz@gmail.com'),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'add'), 'xyz@gmail.com'),
)
@ddt.unpack
def test_enroll_non_user_student(
self, view_name, send_email, outbox_count, student_form_input_name, button_tuple, identifier):
"""
Tests the enrollment of a list of students who are not users yet.
It tests 2 different views that use slightly different parameters,
but that perform the same task.
"""
self.make_coach()
ccx = self.make_ccx()
course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': course_key}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([identifier, ]),
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
# some error messages are returned for one of the views only
if view_name == 'ccx_manage_student' and not is_email(identifier):
error_message = 'Could not find a user with name or email "{identifier}" '.format(
identifier=identifier
)
self.assertContains(response, error_message, status_code=200)
if is_email(identifier):
if send_email:
self.assertIn(identifier, outbox[0].recipients())
self.assertTrue(
CourseEnrollmentAllowed.objects.filter(course_id=course_key, email=identifier).exists()
)
else:
self.assertFalse(
CourseEnrollmentAllowed.objects.filter(course_id=course_key, email=identifier).exists()
)
@ddt.data(
('ccx_invite', True, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody@nowhere.com'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody@nowhere.com'),
('ccx_invite', True, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody'),
)
@ddt.unpack
def test_unenroll_non_user_student(
self, view_name, send_email, outbox_count, student_form_input_name, button_tuple, identifier):
"""
Unenroll a list of students who are not users yet
"""
self.make_coach()
course = CourseFactory.create()
ccx = self.make_ccx()
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
outbox = self.get_outbox()
CourseEnrollmentAllowed(course_id=course_key, email=identifier)
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': course_key}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([identifier, ]),
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
self.assertFalse(
CourseEnrollmentAllowed.objects.filter(
course_id=course_key, email=identifier
).exists()
)
GET_CHILDREN = XModuleMixin.get_children
def patched_get_children(self, usage_key_filter=None):
"""Emulate system tools that mask courseware not visible to students"""
def iter_children():
"""skip children not visible to students"""
for child in GET_CHILDREN(self, usage_key_filter=usage_key_filter):
child._field_data_cache = {} # pylint: disable=protected-access
if not child.visible_to_staff_only:
yield child
return list(iter_children())
@attr('shard_1')
@override_settings(FIELD_OVERRIDE_PROVIDERS=(
'ccx.overrides.CustomCoursesForEdxOverrideProvider',))
@patch('xmodule.x_module.XModuleMixin.get_children', patched_get_children, spec=True)
class TestCCXGrades(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestCCXGrades, cls).setUpClass()
cls._course = course = CourseFactory.create(enable_ccx=True)
# Create a course outline
cls.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC
)
chapter = ItemFactory.create(
start=start, parent=course, category='sequential'
)
cls.sections = sections = [
ItemFactory.create(
parent=chapter,
category="sequential",
metadata={'graded': True, 'format': 'Homework'})
for _ in xrange(4)
]
# making problems available at class level for possible future use in tests
cls.problems = [
[
ItemFactory.create(
parent=section,
category="problem",
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'}
) for _ in xrange(4)
] for section in sections
]
def setUp(self):
"""
Set up tests
"""
super(TestCCXGrades, self).setUp()
# Create instructor account
self.coach = coach = AdminFactory.create()
self.client.login(username=coach.username, password="test")
# Create CCX
role = CourseCcxCoachRole(self._course.id)
role.add_users(coach)
ccx = CcxFactory(course_id=self._course.id, coach=self.coach)
# override course grading policy and make last section invisible to students
override_field_for_ccx(ccx, self._course, 'grading_policy', {
'GRADER': [
{'drop_count': 0,
'min_count': 2,
'short_label': 'HW',
'type': 'Homework',
'weight': 1}
],
'GRADE_CUTOFFS': {'Pass': 0.75},
})
override_field_for_ccx(
ccx, self.sections[-1], 'visible_to_staff_only', True
)
# create a ccx locator and retrieve the course structure using that key
# which emulates how a student would get access.
self.ccx_key = CCXLocator.from_course_locator(self._course.id, ccx.id)
self.course = get_course_by_id(self.ccx_key, depth=None)
setup_students_and_grades(self)
self.client.login(username=coach.username, password="test")
self.addCleanup(RequestCache.clear_request_cache)
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('instructor.views.gradebook_api.MAX_STUDENTS_PER_PAGE_GRADE_BOOK', 1)
def test_gradebook(self):
self.course.enable_ccx = True
RequestCache.clear_request_cache()
url = reverse(
'ccx_gradebook',
kwargs={'course_id': self.ccx_key}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Max number of student per page is one. Patched setting MAX_STUDENTS_PER_PAGE_GRADE_BOOK = 1
self.assertEqual(len(response.mako_context['students']), 1) # pylint: disable=no-member
student_info = response.mako_context['students'][0] # pylint: disable=no-member
self.assertEqual(student_info['grade_summary']['percent'], 0.5)
self.assertEqual(
student_info['grade_summary']['grade_breakdown'][0]['percent'],
0.5)
self.assertEqual(
len(student_info['grade_summary']['section_breakdown']), 4)
def test_grades_csv(self):
self.course.enable_ccx = True
RequestCache.clear_request_cache()
url = reverse(
'ccx_grades_csv',
kwargs={'course_id': self.ccx_key}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Are the grades downloaded as an attachment?
self.assertEqual(
response['content-disposition'],
'attachment'
)
rows = response.content.strip().split('\r')
headers = rows[0]
# picking first student records
data = dict(zip(headers.strip().split(','), rows[1].strip().split(',')))
self.assertNotIn('HW 04', data)
self.assertEqual(data['HW 01'], '0.75')
self.assertEqual(data['HW 02'], '0.5')
self.assertEqual(data['HW 03'], '0.25')
self.assertEqual(data['HW Avg'], '0.5')
@patch('courseware.views.render_to_response', intercept_renderer)
def test_student_progress(self):
self.course.enable_ccx = True
patch_context = patch('courseware.views.get_course_with_access')
get_course = patch_context.start()
get_course.return_value = self.course
self.addCleanup(patch_context.stop)
self.client.login(username=self.student.username, password="test")
url = reverse(
'progress',
kwargs={'course_id': self.ccx_key}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
grades = response.mako_context['grade_summary'] # pylint: disable=no-member
self.assertEqual(grades['percent'], 0.5)
self.assertEqual(grades['grade_breakdown'][0]['percent'], 0.5)
self.assertEqual(len(grades['section_breakdown']), 4)
@ddt.ddt
class CCXCoachTabTestCase(SharedModuleStoreTestCase):
"""
Test case for CCX coach tab.
"""
@classmethod
def setUpClass(cls):
super(CCXCoachTabTestCase, cls).setUpClass()
cls.ccx_enabled_course = CourseFactory.create(enable_ccx=True)
cls.ccx_disabled_course = CourseFactory.create(enable_ccx=False)
def setUp(self):
super(CCXCoachTabTestCase, self).setUp()
self.user = UserFactory.create()
for course in [self.ccx_enabled_course, self.ccx_disabled_course]:
CourseEnrollmentFactory.create(user=self.user, course_id=course.id)
role = CourseCcxCoachRole(course.id)
role.add_users(self.user)
def check_ccx_tab(self, course):
"""Helper function for verifying the ccx tab."""
request = RequestFactory().request()
request.user = self.user
all_tabs = get_course_tab_list(request, course)
return any(tab.type == 'ccx_coach' for tab in all_tabs)
@ddt.data(
(True, True, True),
(True, False, False),
(False, True, False),
(False, False, False),
(True, None, False)
)
@ddt.unpack
def test_coach_tab_for_ccx_advance_settings(self, ccx_feature_flag, enable_ccx, expected_result):
"""
Test ccx coach tab state (visible or hidden) depending on the value of enable_ccx flag, ccx feature flag.
"""
with self.settings(FEATURES={'CUSTOM_COURSES_EDX': ccx_feature_flag}):
course = self.ccx_enabled_course if enable_ccx else self.ccx_disabled_course
self.assertEquals(
expected_result,
self.check_ccx_tab(course)
)
class TestStudentDashboardWithCCX(ModuleStoreTestCase):
"""
Test to ensure that the student dashboard works for users enrolled in CCX
courses.
"""
def setUp(self):
"""
Set up courses and enrollments.
"""
super(TestStudentDashboardWithCCX, self).setUp()
# Create a Draft Mongo and a Split Mongo course and enroll a student user in them.
self.student_password = "foobar"
self.student = UserFactory.create(username="test", password=self.student_password, is_staff=False)
self.draft_course = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
self.split_course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
CourseEnrollment.enroll(self.student, self.draft_course.id)
CourseEnrollment.enroll(self.student, self.split_course.id)
# Create a CCX coach.
self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.split_course.id)
role.add_users(self.coach)
# Create a CCX course and enroll the user in it.
self.ccx = CcxFactory(course_id=self.split_course.id, coach=self.coach)
last_week = datetime.datetime.now(UTC()) - datetime.timedelta(days=7)
override_field_for_ccx(self.ccx, self.split_course, 'start', last_week) # Required by self.ccx.has_started().
course_key = CCXLocator.from_course_locator(self.split_course.id, self.ccx.id)
CourseEnrollment.enroll(self.student, course_key)
def test_load_student_dashboard(self):
self.client.login(username=self.student.username, password=self.student_password)
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search('Test CCX', response.content))
def flatten(seq):
"""
For [[1, 2], [3, 4]] returns [1, 2, 3, 4]. Does not recurse.
"""
return [x for sub in seq for x in sub]
def iter_blocks(course):
"""
Returns an iterator over all of the blocks in a course.
"""
def visit(block):
""" get child blocks """
yield block
for child in block.get_children():
for descendant in visit(child): # wish they'd backport yield from
yield descendant
return visit(course)
| simbs/edx-platform | lms/djangoapps/ccx/tests/test_views.py | Python | agpl-3.0 | 39,324 | [
"VisIt"
] | b7fa4d397fd7647ce4f01065e3d481e5f03f11c70c3a218eec93649404dbd616 |
import re
import json
import requests
from functools import update_wrapper
from django import forms
from django.conf import settings
from django.views.generic import View
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from sentry.utils.http import absolute_uri
from sentry.models import Organization, Team, User, OrganizationMember, \
GroupAssignee
from .utils import JsonResponse, IS_DEBUG
from .models import Tenant, Context
from . import mentions
from .plugin import enable_plugin_for_tenant, disable_plugin_for_tenant, \
ADDON_HOST_IDENT
from .cards import make_event_notification, make_generic_notification, \
make_subscription_update_notification, ICON, ICON2X
_link_pattern = re.escape(settings.SENTRY_URL_PREFIX) \
.replace('https\\:', 'https?\\:') + '/'
_link_re = re.compile(_link_pattern +
r'(?P<org>[^/]+)/(?P<proj>[^/]+)/group/'
r'(?P<group>[^/]+)(/events/(?P<event>[^/]+)|/?)')
ADDON_KEY = getattr(settings, 'HIPCHAT_SENTRY_AC_KEY', None)
if ADDON_KEY is None:
ADDON_KEY = '.'.join(ADDON_HOST_IDENT.split('.')[::-1]) + '.hipchat-ac'
class DescriptorView(View):
def get(self, request):
return JsonResponse({
'key': ADDON_KEY,
'name': 'Sentry for HipChat',
'description': 'Sentry integration for HipChat.',
'links': {
'self': absolute_uri(reverse('sentry-hipchat-ac-descriptor')),
},
'icon': {
'url': ICON,
},
'capabilities': {
'installable': {
'allowRoom': True,
'allowGlobal': False,
'callbackUrl': absolute_uri(reverse(
'sentry-hipchat-ac-installable')),
},
'hipchatApiConsumer': {
'scopes': ['send_notification', 'view_room'],
},
'configurable': {
'url': absolute_uri(reverse('sentry-hipchat-ac-config')),
},
'webhook': [
{
'event': 'room_message',
'url': absolute_uri(reverse(
'sentry-hipchat-ac-link-message')),
'pattern': _link_pattern,
'authentication': 'jwt',
},
],
'webPanel': [
{
'key': 'sentry.sidebar.event-details',
'name': {
'value': 'Sentry Issue Details',
},
'location': 'hipchat.sidebar.right',
'url': absolute_uri(reverse(
'sentry-hipchat-ac-event-details')),
},
{
'key': 'sentry.sidebar.recent-events',
'name': {
'value': 'Recent Sentry Issues',
},
'location': 'hipchat.sidebar.right',
'url': absolute_uri(reverse(
'sentry-hipchat-ac-recent-events')),
},
],
'action': [
{
'key': 'message.sentry.event-details',
'name': {
'value': 'Show details',
},
'target': 'sentry-event-details-glance',
'location': 'hipchat.message.action',
'conditions': [
{
'condition': 'card_matches',
'params': {
'metadata': [
{'attr': 'sentry_message_type',
'eq': 'event'},
]
}
}
],
},
{
'key': 'message.sentry.assign-event',
'name': {
'value': 'Assign',
},
'target': 'sentry-assign-dialog',
'location': 'hipchat.message.action',
'conditions': [
{
'condition': 'card_matches',
'params': {
'metadata': [
{'attr': 'sentry_message_type',
'eq': 'event'},
]
}
}
],
}
],
'dialog': [
{
'key': 'sentry-assign-dialog',
'title': {
'value': 'Assign Issue',
},
'url': absolute_uri(reverse(
'sentry-hipchat-assign-event')),
'options': {
'size': {
'height': '400px',
'width': '600px',
},
},
}
],
'glance': [
# Invisible dummy glance for normal sidebars
{
'name': {
'value': 'Sentry Issue Details',
},
'key': 'sentry-event-details-glance',
'target': 'sentry.sidebar.event-details',
'icon': {
'url': ICON,
'url@2x': ICON2X,
},
'conditions': [
{
'condition': 'glance_matches',
"params": {
"metadata": [
{"attr": "this_is_a_dummy",
"eq": True}
]
}
}
],
},
{
'name': {
'value': 'Sentry',
},
'queryUrl': absolute_uri(reverse(
'sentry-hipchat-ac-recent-events-glance')),
'key': 'sentry-recent-events-glance',
'target': 'sentry.sidebar.recent-events',
'icon': {
'url': ICON,
'url@2x': ICON2X,
},
'conditions': [],
}
],
},
'vendor': {
'url': 'https://www.getsentry.com/',
'name': 'Sentry',
}
})
class InstallableView(View):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return View.dispatch(self, *args, **kwargs)
def post(self, request):
data = json.loads(request.body) or {}
room_id = data.get('roomId', None)
if room_id is None:
return HttpResponse('This add-on can only be installed in '
'individual rooms.', status=400)
capdoc = requests.get(data['capabilitiesUrl'], timeout=10).json()
if capdoc['links'].get('self') != data['capabilitiesUrl']:
return HttpResponse('Mismatch on capabilities URL',
status=400)
# Make sure we clean up an old existing tenant if we have one.
try:
old_tenant = Tenant.objects.get(pk=data['oauthId'])
except Tenant.DoesNotExist:
pass
else:
old_tenant.delete()
tenant = Tenant.objects.create(
id=data['oauthId'],
room_id=room_id,
secret=data['oauthSecret'],
capdoc=capdoc,
)
tenant.update_room_info()
return HttpResponse('', status=201)
def delete(self, request, oauth_id):
try:
tenant = Tenant.objects.get(pk=oauth_id)
tenant.delete()
except Tenant.DoesNotExist:
pass
return HttpResponse('', status=201)
class GrantAccessForm(forms.Form):
orgs = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,
label='Organizations',
required=False)
def __init__(self, tenant, request):
self.user = request.user
self.tenant = tenant
self.all_orgs = Organization.objects.get_for_user(request.user)
org_choices = [(str(x.id), x.name) for x in self.all_orgs]
if request.method == 'POST':
forms.Form.__init__(self, request.POST)
else:
forms.Form.__init__(self)
self.fields['orgs'].choices = org_choices
def clean_orgs(self):
rv = [org for org in self.all_orgs if str(org.id) in
self.cleaned_data['orgs']]
if not rv:
raise forms.ValidationError('You need to select at least one '
'organization to give access to.')
return rv
def save_changes(self):
self.tenant.auth_user = self.user
self.tenant.organizations = self.cleaned_data['orgs']
self.tenant.save()
notify_tenant_added(self.tenant)
class ProjectSelectForm(forms.Form):
projects = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,
label='Projects', required=False)
def __init__(self, tenant, request):
self.tenant = tenant
project_choices = []
self.projects_by_id = {}
for org in tenant.organizations.all():
teams = Team.objects.get_for_user(org, tenant.auth_user,
with_projects=True)
for team, projects in teams:
for project in projects:
project_choices.append((str(project.id), '%s | %s / %s' % (
org.name, team.name, project.name)))
self.projects_by_id[str(project.id)] = project
project_choices.sort(key=lambda x: x[1].lower())
if request.method == 'POST':
forms.Form.__init__(self, request.POST)
else:
forms.Form.__init__(self, initial={
'projects': [str(x.id) for x in tenant.projects.all()],
})
self.fields['projects'].choices = project_choices
def clean_projects(self):
return set(self.cleaned_data['projects'])
def save_changes(self):
new_projects = []
removed_projects = []
for project_id, project in self.projects_by_id.iteritems():
if project_id in self.cleaned_data['projects']:
if enable_plugin_for_tenant(project, self.tenant):
new_projects.append(project)
else:
if disable_plugin_for_tenant(project, self.tenant):
removed_projects.append(project)
if new_projects or removed_projects:
with Context.for_tenant(self.tenant) as ctx:
ctx.send_notification(**make_subscription_update_notification(
new_projects, removed_projects))
if removed_projects:
mentions.clear_project_mentions(
self.tenant, removed_projects)
ctx.push_recent_events_glance()
def webhook(f):
@csrf_exempt
def new_f(request, *args, **kwargs):
data = json.loads(request.body) or {}
with Context.for_request(request, data) as context:
return f(request, context, data, *args, **kwargs)
return update_wrapper(new_f, f)
def with_context(f):
def new_f(request, *args, **kwargs):
with Context.for_request(request) as context:
return f(request, context, *args, **kwargs)
return update_wrapper(new_f, f)
def allow_frame(f):
def new_f(request, *args, **kwargs):
resp = f(request, *args, **kwargs)
# put something here so that sentry does not overwrite it
# with deny.
resp['X-Frame-Options'] = 'allow'
return resp
return update_wrapper(new_f, f)
def cors(f):
def new_f(request, *args, **kwargs):
origin = request.META.get('HTTP_ORIGIN')
resp = f(request, *args, **kwargs)
resp['Access-Control-Allow-Origin'] = origin
resp['Access-Control-Request-Method'] = 'GET, HEAD, OPTIONS'
resp['Access-Control-Allow-Headers'] = 'X-Requested-With'
resp['Access-Control-Allow-Credentials'] = 'true'
resp['Access-Control-Max-Age'] = '1728000'
return resp
return update_wrapper(new_f, f)
@allow_frame
@with_context
def configure(request, context):
# XXX: this is a bit terrible because it means the login url is
# already set at the time we visit this page. This can have some
# stupid consequences when opening up the login page seaprately in a
# different tab later. Ideally we could pass the login url through as
# a URL parameter instead but this is currently not securely possible.
request.session['_next'] = request.get_full_path()
grant_form = None
project_select_form = None
if context.tenant.auth_user is None and \
request.user.is_authenticated():
grant_form = GrantAccessForm(context.tenant, request)
if request.method == 'POST' and grant_form.is_valid():
grant_form.save_changes()
return HttpResponseRedirect(request.get_full_path())
elif context.tenant.auth_user is not None:
project_select_form = ProjectSelectForm(context.tenant, request)
if request.method == 'POST' and project_select_form.is_valid():
project_select_form.save_changes()
return HttpResponseRedirect(request.get_full_path())
return render(request, 'sentry_hipchat_ac/configure.html', {
'context': context,
'tenant': context.tenant,
'current_user': request.user,
'grant_form': grant_form,
'project_select_form': project_select_form,
'available_orgs': list(context.tenant.organizations.all()),
'hipchat_debug': IS_DEBUG,
})
@allow_frame
@with_context
def sign_out(request, context):
tenant = context.tenant
cfg_url = '%s?signed_request=%s' % (
reverse('sentry-hipchat-ac-config'),
context.signed_request
)
if tenant.auth_user is None or 'no' in request.POST:
return HttpResponseRedirect(cfg_url)
elif request.method == 'POST':
tenant.clear()
notify_tenant_removal(tenant)
return HttpResponseRedirect(cfg_url)
return render(request, 'sentry_hipchat_ac/sign_out.html', {
'context': context,
'tenant': tenant,
})
@cors
@allow_frame
@with_context
def recent_events_glance(request, context):
return JsonResponse(context.get_recent_events_glance())
@allow_frame
@with_context
def event_details(request, context):
event = None
group = None
interface_data = {}
tags = []
event_id = request.GET.get('event')
bad_event = False
if event_id is not None:
event = context.get_event(event_id)
if event is None:
bad_event = True
else:
group = event.group
tags = [(k.split(':', 1)[1] if k.startswith('sentry:') else k,
v) for k, v in event.get_tags()]
interface_data.update(
http=event.interfaces.get('sentry.interfaces.Http'),
user=event.interfaces.get('sentry.interfaces.User'),
)
exc = event.interfaces.get('sentry.interfaces.Exception')
if exc is not None:
interface_data['exc'] = exc
interface_data['exc_as_string'] = exc.to_string(event)
return render(request, 'sentry_hipchat_ac/event_details.html', {
'context': context,
'event': event,
'from_recent': request.GET.get('from_recent') == 'yes',
'group': group,
'interfaces': interface_data,
'bad_event': bad_event,
'tags': tags,
})
@allow_frame
@with_context
def assign_event(request, context):
event = None
project = None
member_list = []
assigned_to = None
dismiss_dialog = False
event_id = request.GET.get('event')
if event_id:
event = context.get_event(event_id)
if event is not None:
project = event.project
member_list = sorted(set(User.objects.filter(
is_active=True,
sentry_orgmember_set__organization=project.organization,
sentry_orgmember_set__id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=True,
organizationmemberteam__team=project.team,
).values('id')
).distinct()[:1000]), key=lambda x: x.email)
assigned_to = GroupAssignee.objects.filter(
group=event.group
).first()
if request.method == 'POST':
if 'assign' in request.POST:
assignee = next((
x for x in member_list
if str(x.id) == request.POST['assigned_to']), None)
if assignee is not None:
GroupAssignee.objects.assign(event.group, assignee)
elif 'deassign' in request.POST:
GroupAssignee.objects.deassign(event.group)
dismiss_dialog = True
return render(request, 'sentry_hipchat_ac/assign_event.html', {
'context': context,
'event': event,
'project': project,
'member_list': member_list,
'assigned_to': assigned_to,
'dismiss_dialog': dismiss_dialog,
})
@allow_frame
@with_context
def recent_events(request, context):
events = mentions.get_recent_mentions(context.tenant)
return render(request, 'sentry_hipchat_ac/recent_events.html', {
'context': context,
'events': events,
})
@webhook
def on_link_message(request, context, data):
match = _link_re.search(data['item']['message']['message'])
if match is not None:
params = match.groupdict()
event = context.get_event_from_url_params(
group_id=params['group'],
event_id=params['event'],
slug_vars={'org_slug': params['org'],
'proj_slug': params['proj']}
)
if event is not None:
context.send_notification(**make_event_notification(
event.group, event, context.tenant, new=False,
event_target=params['event'] is not None))
mentions.mention_event(
project=event.project,
group=event.group,
tenant=context.tenant,
event=params['event'] and event or None,
)
context.push_recent_events_glance()
return HttpResponse('', status=204)
def notify_tenant_added(tenant):
with Context.for_tenant(tenant) as ctx:
ctx.send_notification(**make_generic_notification(
'The Sentry Hipchat integration was associated with this room.',
color='green'))
ctx.push_recent_events_glance()
def notify_tenant_removal(tenant):
with Context.for_tenant(tenant) as ctx:
ctx.send_notification(**make_generic_notification(
'The Sentry Hipchat integration was disassociated with this room.',
color='red'))
ctx.push_recent_events_glance()
| getsentry/sentry-hipchat-ac | sentry_hipchat_ac/views.py | Python | apache-2.0 | 20,481 | [
"VisIt"
] | 80cd3407a893259f42b875ad521175263c9ada51b8e4d25507ee6209168aed9f |
import numpy as np
import scipy.sparse
from scipy.io.netcdf import NetCDFFile
from aston.trace.Trace import AstonFrame, AstonSeries
from aston.tracefile.TraceFile import TraceFile
class NetCDF(TraceFile):
ext = 'CDF'
mgc = '4344'
traces = ['#ms']
def total_trace(self, twin=None):
f = NetCDFFile(open(self.filename, 'rb'))
tme = f.variables['scan_acquisition_time'].data / 60.
tic = f.variables['total_intensity'].data
return AstonSeries(tic, tme, name='TIC')
@property
def data(self):
f = NetCDFFile(open(self.filename, 'rb'))
t = f.variables['scan_acquisition_time'].data / 60.
## this is half the speed of the following code
#ions = list(set(f.variables['mass_values'].data))
#cols = np.array([ions.index(i) for i in \
# f.variables['mass_values'].data])
#TODO: slow; there has to be a way to vectorize this more?
ions = np.array(list(set(f.variables['mass_values'].data)))
rcols = f.variables['mass_values'].data
cols = np.empty(rcols.shape, dtype=int)
for i, ion in enumerate(ions):
cols[rcols == ion] = i
vals = f.variables['intensity_values'].data
rowst = np.add.accumulate(f.variables['point_count'].data)
rowst = np.insert(rowst, 0, 0)
data = scipy.sparse.csr_matrix((vals, cols, rowst), \
shape=(len(t), len(ions)), dtype=float)
return AstonFrame(data.todense(), t, ions)
def write_netcdf(filename, df, info=None):
#FIXME: still a lot of issues here
if info is None:
info = {}
f = NetCDFFile(filename, 'w')
f.createDimension('_2_byte_string', 2)
f.createDimension('_4_byte_string', 4)
f.createDimension('_8_byte_string', 8)
f.createDimension('_16_byte_string', 16)
f.createDimension('_32_byte_string', 32)
f.createDimension('_64_byte_string', 64)
f.createDimension('_128_byte_string', 128)
f.createDimension('_255_byte_string', 255)
f.createDimension('error_number', 1)
f.flush()
f.dataset_completeness = 'C1' # TODO: save peaks too? ('C1+C2')
f.netcdf_revision = '2.3.2'
f.languages = 'English'
f.flush()
f.experiment_title = info.get('name', ' ')
f.operator_name = info.get('operator', ' ')
# TODO: wrong format for injection_date_time_stamp
f.injection_date_time_stamp = info.get('date', ' ')
f.company_method_id = info.get('method', ' ')
f.sample_name = info.get('sample', ' ')
f.flush()
f.createDimension('scan_number', len(df.index))
v = f.createVariable('scan_acquisition_time', '>d', ('scan_number',))
v[:] = df.index.astype('d')
v = f.createVariable('total_intensity', '>d', ('scan_number',))
v[:] = df.values.sum(axis=1).astype('d')
v = f.createVariable('point_count', '>i', ('scan_number',))
v[:] = np.sum(df.values != 0, axis=1).astype('i')
f.flush()
f.createDimension('point_number', np.sum(df.values != 0))
stretch_t = np.resize(df.index, df.values.T.shape).T
v = f.createVariable('mass_values', '>f', ('point_number',))
v[:] = stretch_t[df.values != 0]
v = f.createVariable('intensity_values', '>f', ('point_number',))
v[:] = df.values[df.values != 0]
f.close()
| molliewebb/aston | aston/tracefile/NetCDF.py | Python | gpl-3.0 | 3,291 | [
"NetCDF"
] | ddf456628d03ede5405e0611e3d1dfc1e8be62729e1536cea11e62c64d37c2d8 |
"""Model input/output tools."""
# (c) Copyright 2016 Andrew Dawson.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function) #noqa
import os.path
from netCDF4 import Dataset
class NetCDFWriter(object):
"""Write model output to a NetCDF file."""
def __init__(self, model, filename, overwrite=True):
"""
Initialize a netCDF output writer object for a given model.
Arguments:
* model
An instance of `heateqn.model.HeatEquationModel` to provide
output services to.
* filename
The name of the NetCDF file to use for output.
Keyword argument:
* overwrite
If `True` the writer will overwrite the specified file if it
already exists, and if `False` an error will be raised if
the specified filename already exists. Default is `True`
(existing file will be overwritten).
"""
self.model = model
self.filename = filename
if not overwrite and os.path.exists(filename):
msg = ('cannot write output to "{}", the file already '
'exists but overwrite=False')
raise IOError(msg.format(filename))
# Open a new netCDF dataset:
self.ds = Dataset(filename, mode='w')
# Create dimensions for time, latitude and longitude, the time
# dimension has unlimited size:
self.ds.createDimension('time')
self.ds.createDimension('latitude', size=model.nlat)
self.ds.createDimension('longitude', size=model.nlon)
# Create coordinate variables for time, latitude and longitude, the
# values of depth can be set immediately:
self.time = self.ds.createVariable('time', 'f4', dimensions=['time'])
time_units = 'seconds since {}'.format(
model.start_time.strftime('%Y-%m-%d %H:%M:%S'))
self.time.setncatts({'standard_name': 'time', 'units': time_units})
latitude = self.ds.createVariable('latitude', 'f4',
dimensions=['latitude'])
longitude = self.ds.createVariable('longitude', 'f4',
dimensions=['longitude'])
latitude.setncatts({'standard_name': 'latitude',
'units': 'degrees_north'})
longitude.setncatts({'standard_name': 'longitude',
'units': 'degrees_east'})
latitude[:], longitude[:] = self.model.engine.grid_latlon()
lat_lon = self.ds.createVariable('latitude_longitude', 'i4')
lat_lon.setncatts({'grid_mapping_name': 'latitude_longitude',
'longitude_of_prime_meridian': 0.,
'semi_major_axis': 6371229.,
'semi_minor_axis': 6371229.})
# Create variables to hold the model state:
self.u = self.ds.createVariable(
'uwnd',
'f4',
dimensions=['time', 'latitude', 'longitude'],
zlib=True)
self.v = self.ds.createVariable(
'vwnd',
'f4',
dimensions=['time', 'latitude', 'longitude'],
zlib=True)
self.vrt = self.ds.createVariable(
'vrt',
'f4',
dimensions=['time', 'latitude', 'longitude'],
zlib=True)
self.u.setncatts({'standard_name': 'eastward_wind',
'units': 'm s-1',
'grid_mapping': 'latitude_longitude'})
self.v.setncatts({'standard_name': 'northward_wind',
'units': 'm s-1',
'grid_mapping': 'latitude_longitude'})
self.vrt.setncatts({'standard_name': 'atmosphere_relative_vorticity',
'units': 's-1',
'grid_mapping': 'latitude_longitude'})
def save(self):
"""Save the current model state to the output netCDF file."""
if not self.ds.isopen():
msg = 'cannot save output: the NetCDF writer is already closed'
raise IOError(msg)
index = self.time.size
self.time[index] = self.model.t
self.u[index] = self.model.u_grid
self.v[index] = self.model.v_grid
self.vrt[index] = self.model.vrt_grid
def flush(self):
"""
Write the output file to disk.
The netCDF file may be buffered. Whilst calling `save` will
append a record to the output, it may not be written to disk
immediately.
"""
if self.ds.isopen():
self.ds.sync()
def close(self):
"""Close the netCDF output file."""
if self.ds.isopen():
self.ds.close()
| ajdawson/python_for_climate_scientists | course_content/barotropic-model/barotropic/io.py | Python | gpl-3.0 | 5,379 | [
"NetCDF"
] | 4f30d09899f0a5befe46910dfb95fdcb8b6a52cac6dd6d2ad6d85a19dffaef80 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Generation tools for NNGT using MPI """
import warnings
import numpy as np
import scipy.sparse as ssp
from scipy.spatial.distance import cdist
from numpy.random import randint
from mpi4py import MPI
import nngt
from nngt.lib import InvalidArgument
from nngt.lib.connect_tools import *
from . import connect_algorithms
from .connect_algorithms import *
__all__ = connect_algorithms.__all__
def _from_degree_list(source_ids, target_ids, degrees, degree_type="in",
directed=True, multigraph=False, existing_edges=None,
**kwargs):
''' Connect nodes from a list of degrees '''
if not directed:
raise NotImplementedError("This function is not yet implemented for "
"undirected graphs in MPI.")
# type of degree
degree_type = _set_degree_type(degree_type)
b_out = (degree_type == "out")
b_total = (degree_type == "total")
if b_total:
raise NotImplementedError(
"Total degree is not supported yet with MPI.")
# mpi-related stuff
comm, size, rank = _mpi_and_random_init()
# use only local sources and degrees unless already_local=True
if not kwargs.get("already_local", False):
source_ids = np.array(source_ids, dtype=int)[rank::size]
if rank != 0:
degrees = None
degrees = comm.bcast(degrees, root=0)
degrees = degrees[rank::size]
source_ids = np.array(source_ids, dtype=int)
target_ids = np.array(target_ids, dtype=int)
num_source = len(source_ids)
# compute the local number of edges
edges = np.sum(degrees)
b_one_pop = _check_num_edges(
source_ids, target_ids, edges, directed, multigraph)
num_etotal = 0
ia_edges = np.zeros((edges, 2), dtype=int)
idx = 0 if b_out else 1 # differenciate source / target
max_degree = np.inf if multigraph else len(target_ids)
for i, v in enumerate(source_ids):
degree_i = degrees[i]
edges_i, ecurrent, variables_i = np.zeros((degree_i, 2)), 0, []
if existing_edges is not None:
with_v = np.where(existing_edges[:, idx] == v)
variables_i.extend(existing_edges[with_v, 1 - idx])
degree_i += len(variables_i)
assert degree_i < max_degree, "Required degree is greater that " +\
"maximum possible degree {}.".format(max_degree)
rm = np.where(target_ids == v)[0]
rm = rm[0] if len(rm) else -1
var_tmp = (np.array(target_ids, copy=True) if rm == -1 else
np.concatenate((target_ids[:rm], target_ids[rm+1:])))
num_var_i = len(var_tmp)
ia_edges[num_etotal:num_etotal + degree_i, idx] = v
while len(variables_i) != degree_i:
variables_i.extend(np.random.choice(var_tmp, degree_i-ecurrent,
replace=multigraph))
if not multigraph:
variables_i = list(set(variables_i))
ecurrent = len(variables_i)
ia_edges[num_etotal:num_etotal+ecurrent, int(not idx)] = variables_i
num_etotal += ecurrent
comm.Barrier()
_finalize_random(rank)
# the 'nngt' backend is made to be distributed, but the others are not
if nngt.get_config("backend") == "nngt":
return ia_edges
else:
# all the data is gather on the root processus
ia_edges = comm.gather(ia_edges, root=0)
if rank == 0:
ia_edges = np.concatenate(ia_edges, axis=0)
return ia_edges
else:
return None
def _fixed_degree(source_ids, target_ids, degree, degree_type="in",
reciprocity=-1, directed=True, multigraph=False,
existing_edges=None, **kwargs):
''' Connect nodes with a delta distribution '''
# mpi-related stuff (finalized in _from_degree_list)
comm, size, rank = _mpi_and_random_init()
assert degree >= 0, "A positive value is required for `degree`."
# use only local sources
source_ids = np.array(source_ids, dtype=int)[rank::size]
target_ids = np.array(target_ids, dtype=int)
num_source = len(source_ids)
# compute the local number of edges
lst_deg = np.full(num_source, degree, dtype=int)
# !IMPORTANT! use `already_local` to tell _from_degree_list that only
# local sources and degrees have been sent
return _from_degree_list(
source_ids, target_ids, lst_deg, degree_type=degree_type,
directed=directed, multigraph=multigraph,
existing_edges=existing_edges, already_local=True, **kwargs)
def _gaussian_degree(source_ids, target_ids, avg=-1, std=-1, degree_type="in",
reciprocity=-1, directed=True, multigraph=False,
existing_edges=None, **kwargs):
''' Connect nodes with a Gaussian distribution '''
# mpi-related stuff (finalized in _from_degree_list)
comm, size, rank = _mpi_and_random_init()
# switch values to float
avg = float(avg)
std = float(std)
assert avg >= 0, "A positive value is required for `avg`."
assert std >= 0, "A positive value is required for `std`."
# use only local sources
source_ids = np.array(source_ids, dtype=int)[rank::size]
target_ids = np.array(target_ids, dtype=int)
num_source = len(source_ids)
# compute the local number of edges
lst_deg = np.around(
np.maximum(np.random.normal(avg, std, num_source), 0.)).astype(int)
# !IMPORTANT! use `already_local` to tell _from_degree_list that only
# local sources and degrees have been sent
return _from_degree_list(
source_ids, target_ids, lst_deg, degree_type=degree_type,
directed=directed, multigraph=multigraph,
existing_edges=existing_edges, already_local=True, **kwargs)
def _distance_rule(source_ids, target_ids, density=-1, edges=-1, avg_deg=-1,
scale=-1, rule="exp", max_proba=-1., shape=None,
positions=None, directed=True, multigraph=False,
distance=None, **kwargs):
'''
Returns a distance-rule graph
'''
assert max_proba <= 0, "MPI distance_rule cannot use `max_proba` yet."
distance = [] if distance is None else distance
distance_tmp = []
edges_hash = set()
# mpi-related stuff
comm, size, rank = _mpi_and_random_init()
# compute the required values
source_ids = np.array(source_ids).astype(int)
target_ids = np.array(target_ids).astype(int)
num_source, num_target = len(source_ids), len(target_ids)
num_edges, _ = _compute_connections(
num_source, num_target, density, edges, avg_deg, directed,
reciprocity=-1)
b_one_pop = _check_num_edges(
source_ids, target_ids, num_edges, directed, multigraph)
num_neurons = len(set(np.concatenate((source_ids, target_ids))))
# for each node, check the neighbours that are in an area where
# connections can be made: ± scale for lin, ± 10*scale for exp.
# Get the sources and associated targets for each MPI process
sources = []
targets = []
lim = scale if rule == 'lin' else 10*scale
for s in source_ids[rank::size]:
keep = (np.abs(positions[0, target_ids] - positions[0, s]) < lim)
keep *= (np.abs(positions[1, target_ids] - positions[1, s]) < lim)
if b_one_pop:
keep[s] = 0
sources.append(s)
targets.append(target_ids[keep])
# the number of trials should be done depending on total number of
# neighbours available, so we compute this number
local_neighbours = 0
for tgt_list in targets:
local_neighbours += len(tgt_list)
tot_neighbours = comm.gather(local_neighbours, root=0)
if rank == 0:
final_tot = np.sum(tot_neighbours)
assert final_tot > num_edges, \
"Scale is too small: there are not enough close neighbours to " +\
"create the required number of connections. Increase `scale` " +\
"or `neuron_density`."
else:
final_tot = None
final_tot = comm.bcast(final_tot, root=0)
neigh_norm = 1. / final_tot
# try to create edges until num_edges is attained
if rank == 0:
ia_edges = np.zeros((num_edges, 2), dtype=int)
else:
ia_edges = None
num_ecurrent = 0
while num_ecurrent < num_edges:
trials = []
for tgt_list in targets:
trials.append(max(
int(len(tgt_list)*(num_edges - num_ecurrent)*neigh_norm), 1))
# try to create edges
edges_tmp = [[], []]
dist_local = []
total_trials = int(np.sum(trials))
local_sources = np.repeat(sources, trials)
local_targets = np.zeros(total_trials, dtype=int)
current_pos = 0
for tgts, num_try in zip(targets, trials):
t = np.random.randint(0, len(tgts), num_try)
local_targets[current_pos:current_pos + num_try] = tgts[t]
current_pos += num_try
test = dist_rule(rule, scale, positions[:, local_sources],
positions[:, local_targets], dist=dist_local)
test = np.greater(test, np.random.uniform(size=total_trials))
edges_tmp[0].extend(local_sources[test])
edges_tmp[1].extend(local_targets[test])
dist_local = np.array(dist_local)[test]
comm.Barrier()
# gather the result in root and assess the current number of edges
edges_tmp = comm.gather(edges_tmp, root=0)
dist_local = comm.gather(dist_local, root=0)
if rank == 0:
edges_tmp = np.concatenate(edges_tmp, axis=1).T
dist_local = np.concatenate(dist_local)
# if we're at the end, we'll make too many edges, so we keep
# only the necessary fraction that we pick randomly
num_desired = num_edges - num_ecurrent
num_tmp = len(edges_tmp)
if num_desired < num_tmp:
chosen = np.random.choice(num_tmp, num_desired,
replace=multigraph)
edges_tmp = edges_tmp[chosen]
dist_local = np.array(dist_local)[chosen]
ia_edges, num_ecurrent = _filter(
ia_edges, edges_tmp, num_ecurrent, edges_hash, b_one_pop,
multigraph, distance=distance_tmp, dist_tmp=dist_local)
num_ecurrent = comm.bcast(num_ecurrent, root=0)
comm.Barrier()
_finalize_random(rank)
# the 'nngt' backend is made to be distributed, but the others are not
if nngt.get_config("backend") == "nngt":
local_edges = None
if rank == 0:
local_edges = [ia_edges[i::size, :] for i in range(size)]
distance_tmp = [distance_tmp[i::size] for i in range(size)]
local_edges = comm.scatter(local_edges, root=0)
distance_tmp = comm.scatter(distance_tmp, root=0)
distance.extend(distance_tmp)
return local_edges
else:
# all the data is gather on the root processus
if rank == 0:
distance.extend(distance_tmp)
return ia_edges
else:
return None
# --------------------- #
# Unavailable functions #
# --------------------- #
def _not_yet(*args, **kwargs):
raise NotImplementedError("Not available with MPI yet.")
_circular = _not_yet
_erdos_renyi = _not_yet
_newman_watts = _not_yet
_price_scale_free = _not_yet
_random_scale_free = _not_yet
_unique_rows = _not_yet
# ----- #
# Tools #
# ----- #
def _mpi_and_random_init():
'''
Init MPI comm and information and seed the RNGs
'''
comm = MPI.COMM_WORLD.Clone()
size = comm.Get_size()
rank = comm.Get_rank()
# Random number generation seeding
seeds = None
if not nngt._seeded_local:
# no local seeds were generated, set them from initial msd
msd = nngt.get_config("msd")
seeds = [msd + i + 1 for i in range(size)]
nngt._config['seeds'] = seeds
elif not nngt._used_local:
# local seeds were generated but not used, use them
seeds = nngt.get_config('seeds')
else:
# local seeds were generated and used, generate new ones from new msd
if rank == 0:
msd = np.random.randint(0, 2**31 - size - 1)
else:
msd = None
msd = comm.bcast(msd, root=0)
seeds = [msd + i + 1 for i in range(size)]
seed = seeds[rank]
np.random.seed(seed)
nngt._seeded_local = True
nngt._used_local = True
return comm, size, rank
def _finalize_random(rank):
'''
Make sure everyone gets same seed back.
'''
comm = MPI.COMM_WORLD
if rank == 0:
new_seed = np.random.randint(0, 2**31 - 1)
else:
new_seed = None
new_seed = comm.bcast(new_seed, root=0)
np.random.seed(new_seed)
| Silmathoron/NNGT | nngt/generation/mpi_connect.py | Python | gpl-3.0 | 13,750 | [
"Gaussian"
] | fa96913795309c31f713e3b5db0d2b5284e1e61c4b862d2ad7ff86acbe393fbe |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module to hold and distribute the -D dispersion correction parameters.
"""
from __future__ import absolute_import
from __future__ import print_function
try:
from .p4regex import *
except ImportError:
from .exceptions import *
## ==> Dispersion Aliases and Parameters <== ##
# This defines the -D aliases for all of psi4
dash_alias = {
'-d': '-d2p4', # means -D aliases to a -D2 level dispersion correction, as opposed to -D3
'-d2': '-d2p4', # means -D2 uses psi4's internal -D2 correction, as opposed to calling dftd3
'-d3': '-d3zero', # means -D3 uses the original zero-damping fn, as opposed to bj-damping
'-d3m': '-d3mzero', # means -D3 uses the 3-param zero-damping fn, refit for short-range
}
dash_alias_reverse = {v : k for k, v in dash_alias.items()}
# The dashcoeff dict below defines the -D parameters for all of psi4. 'd2p4' are
# taken from already defined functionals in psi4. The remainder of the parameters are
# from http://toc.uni-muenster.de/DFTD3/ on September 25, 2012, with the dict keys
# translated from Turbomole to Psi4 functional names.
dashcoeff = {
'd2p4': {
'b97-d' : {'s6': 1.25}, # IN
'blyp' : {'s6': 1.20}, # IN
'b3lyp' : {'s6': 1.05}, # IN
'bp86' : {'s6': 1.05}, # IN
'pbe' : {'s6': 0.75}, # IN
'pbe0' : {'s6': 0.60}, # IN
'dsd-blyp' : {'s6': 0.35}, # IN but different btwn dftd3 and psi4 and variants need to be worked out
'dsd-pbep86' : {'s6': 0.29}, # IN
'dsd-pbepbe' : {'s6': 0.42}, # IN
'b2plyp' : {'s6': 0.55}, # IN
},
'd2gr': {
'blyp' : {'s6': 1.2, 'alpha6': 20.0}, # in psi4 #IN
'bp86' : {'s6': 1.05, 'alpha6': 20.0}, # in psi4 #IN
'b97-d' : {'s6': 1.25, 'alpha6': 20.0}, # in psi4 #IN
'revpbe' : {'s6': 1.25, 'alpha6': 20.0},
'pbe' : {'s6': 0.75, 'alpha6': 20.0}, # in psi4 #IN
'tpss' : {'s6': 1.0, 'alpha6': 20.0},
'b3lyp' : {'s6': 1.05, 'alpha6': 20.0}, # in psi4 #IN
'pbe0' : {'s6': 0.6, 'alpha6': 20.0}, # in psi4 #IN
'pw6b95' : {'s6': 0.5, 'alpha6': 20.0},
'tpss0' : {'s6': 0.85, 'alpha6': 20.0},
'b2plyp' : {'s6': 0.55, 'alpha6': 20.0}, # in psi4 #IN
'b2gp-plyp' : {'s6': 0.4, 'alpha6': 20.0},
'dsd-blyp' : {'s6': 0.41, 'alpha6': 60.0}, # in psi4
},
'd3zero': {
'b1b95' : {'s6': 1.0, 's8': 1.868, 'sr6': 1.613, 'alpha6': 14.0},
'b2gpplyp' : {'s6': 0.56, 's8': 0.760, 'sr6': 1.586, 'alpha6': 14.0},
'b3lyp' : {'s6': 1.0, 's8': 1.703, 'sr6': 1.261, 'alpha6': 14.0}, # in psi4 #IN
'b97-d' : {'s6': 1.0, 's8': 0.909, 'sr6': 0.892, 'alpha6': 14.0}, # in psi4 #IN
'bhlyp' : {'s6': 1.0, 's8': 1.442, 'sr6': 1.370, 'alpha6': 14.0},
'blyp' : {'s6': 1.0, 's8': 1.682, 'sr6': 1.094, 'alpha6': 14.0}, # in psi4 #IN
'bp86' : {'s6': 1.0, 's8': 1.683, 'sr6': 1.139, 'alpha6': 14.0}, # in psi4 #IN
'bpbe' : {'s6': 1.0, 's8': 2.033, 'sr6': 1.087, 'alpha6': 14.0},
'mpwlyp' : {'s6': 1.0, 's8': 1.098, 'sr6': 1.239, 'alpha6': 14.0},
'pbe' : {'s6': 1.0, 's8': 0.722, 'sr6': 1.217, 'alpha6': 14.0}, # in psi4 #IN
'pbe0' : {'s6': 1.0, 's8': 0.928, 'sr6': 1.287, 'alpha6': 14.0}, # in psi4 #IN
'pw6b95' : {'s6': 1.0, 's8': 0.862, 'sr6': 1.532, 'alpha6': 14.0},
'pwb6k' : {'s6': 1.0, 's8': 0.550, 'sr6': 1.660, 'alpha6': 14.0},
'revpbe' : {'s6': 1.0, 's8': 1.010, 'sr6': 0.923, 'alpha6': 14.0},
'tpss' : {'s6': 1.0, 's8': 1.105, 'sr6': 1.166, 'alpha6': 14.0},
'tpss0' : {'s6': 1.0, 's8': 1.242, 'sr6': 1.252, 'alpha6': 14.0},
'tpssh' : {'s6': 1.0, 's8': 1.219, 'sr6': 1.223, 'alpha6': 14.0},
'bop' : {'s6': 1.0, 's8': 1.975, 'sr6': 0.929, 'alpha6': 14.0},
'mpw1b95' : {'s6': 1.0, 's8': 1.118, 'sr6': 1.605, 'alpha6': 14.0},
'mpwb1k' : {'s6': 1.0, 's8': 1.061, 'sr6': 1.671, 'alpha6': 14.0},
'olyp' : {'s6': 1.0, 's8': 1.764, 'sr6': 0.806, 'alpha6': 14.0},
'opbe' : {'s6': 1.0, 's8': 2.055, 'sr6': 0.837, 'alpha6': 14.0},
'otpss' : {'s6': 1.0, 's8': 1.494, 'sr6': 1.128, 'alpha6': 14.0},
'pbe38' : {'s6': 1.0, 's8': 0.998, 'sr6': 1.333, 'alpha6': 14.0},
'pbesol' : {'s6': 1.0, 's8': 0.612, 'sr6': 1.345, 'alpha6': 14.0},
'revssb' : {'s6': 1.0, 's8': 0.560, 'sr6': 1.221, 'alpha6': 14.0},
'ssb' : {'s6': 1.0, 's8': 0.663, 'sr6': 1.215, 'alpha6': 14.0},
'b3pw91' : {'s6': 1.0, 's8': 1.775, 'sr6': 1.176, 'alpha6': 14.0},
'bmk' : {'s6': 1.0, 's8': 2.168, 'sr6': 1.931, 'alpha6': 14.0},
'camb3lyp' : {'s6': 1.0, 's8': 1.217, 'sr6': 1.378, 'alpha6': 14.0},
'lcwpbe' : {'s6': 1.0, 's8': 1.279, 'sr6': 1.355, 'alpha6': 14.0},
'm05-2x' : {'s6': 1.0, 's8': 0.00 , 'sr6': 1.417, 'alpha6': 14.0}, # in psi4 #IN
'm05' : {'s6': 1.0, 's8': 0.595, 'sr6': 1.373, 'alpha6': 14.0}, # in psi4 #IN
'm062x' : {'s6': 1.0, 's8': 0.00 , 'sr6': 1.619, 'alpha6': 14.0},
'm06hf' : {'s6': 1.0, 's8': 0.00 , 'sr6': 1.446, 'alpha6': 14.0},
'm06l' : {'s6': 1.0, 's8': 0.00 , 'sr6': 1.581, 'alpha6': 14.0},
'm06' : {'s6': 1.0, 's8': 0.00 , 'sr6': 1.325, 'alpha6': 14.0},
'hcth120' : {'s6': 1.0, 's8': 1.206, 'sr6': 1.221, 'alpha6': 14.0}, # in psi4 #IN
'b2plyp' : {'s6': 0.64, 's8': 1.022, 'sr6': 1.427, 'alpha6': 14.0}, # in psi4 #IN
'dsd-blyp' : {'s6': 0.50, 's8': 0.705, 'sr6': 1.569, 'alpha6': 14.0}, # in psi4
'ptpss' : {'s6': 0.75, 's8': 0.879, 'sr6': 1.541, 'alpha6': 14.0},
'pwpb95' : {'s6': 0.82, 's8': 0.705, 'sr6': 1.557, 'alpha6': 14.0},
'revpbe0' : {'s6': 1.0, 's8': 0.792, 'sr6': 0.949, 'alpha6': 14.0},
'revpbe38' : {'s6': 1.0, 's8': 0.862, 'sr6': 1.021, 'alpha6': 14.0},
'rpw86pbe' : {'s6': 1.0, 's8': 0.901, 'sr6': 1.224, 'alpha6': 14.0},
},
'd3bj': {
'b1b95' : {'s6': 1.000, 's8': 1.4507, 'a1': 0.2092, 'a2': 5.5545},
'b2gpplyp' : {'s6': 0.560, 's8': 0.2597, 'a1': 0.0000, 'a2': 6.3332},
'b3pw91' : {'s6': 1.000, 's8': 2.8524, 'a1': 0.4312, 'a2': 4.4693},
'bhlyp' : {'s6': 1.000, 's8': 1.0354, 'a1': 0.2793, 'a2': 4.9615},
'bmk' : {'s6': 1.000, 's8': 2.0860, 'a1': 0.1940, 'a2': 5.9197},
'bop' : {'s6': 1.000, 's8': 3.295, 'a1': 0.4870, 'a2': 3.5043},
'bpbe' : {'s6': 1.000, 's8': 4.0728, 'a1': 0.4567, 'a2': 4.3908},
'camb3lyp' : {'s6': 1.000, 's8': 2.0674, 'a1': 0.3708, 'a2': 5.4743},
'lcwpbe' : {'s6': 1.000, 's8': 1.8541, 'a1': 0.3919, 'a2': 5.0897},
'mpw1b95' : {'s6': 1.000, 's8': 1.0508, 'a1': 0.1955, 'a2': 6.4177},
'mpwb1k' : {'s6': 1.000, 's8': 0.9499, 'a1': 0.1474, 'a2': 6.6223},
'mpwlyp' : {'s6': 1.000, 's8': 2.0077, 'a1': 0.4831, 'a2': 4.5323},
'olyp' : {'s6': 1.000, 's8': 2.6205, 'a1': 0.5299, 'a2': 2.8065},
'opbe' : {'s6': 1.000, 's8': 3.3816, 'a1': 0.5512, 'a2': 2.9444},
'otpss' : {'s6': 1.000, 's8': 2.7495, 'a1': 0.4634, 'a2': 4.3153},
'pbe38' : {'s6': 1.000, 's8': 1.4623, 'a1': 0.3995, 'a2': 5.1405},
'pbesol' : {'s6': 1.000, 's8': 2.9491, 'a1': 0.4466, 'a2': 6.1742},
'ptpss' : {'s6': 0.750, 's8': 0.2804, 'a1': 0.000, 'a2': 6.5745},
'pwb6k' : {'s6': 1.000, 's8': 0.9383, 'a1': 0.1805, 'a2': 7.7627},
'revssb' : {'s6': 1.000, 's8': 0.4389, 'a1': 0.4720, 'a2': 4.0986},
'ssb' : {'s6': 1.000, 's8': -0.1744, 'a1': -0.0952, 'a2': 5.2170},
'tpssh' : {'s6': 1.000, 's8': 0.4243, 'a1': 0.0000, 'a2': 5.5253},
'hcth120' : {'s6': 1.000, 's8': 1.0821, 'a1': 0.3563, 'a2': 4.3359}, # in psi4 #IN
'b2plyp' : {'s6': 0.640, 's8': 0.9147, 'a1': 0.3065, 'a2': 5.0570}, # in psi4 #IN
'b3lyp' : {'s6': 1.000, 's8': 1.9889, 'a1': 0.3981, 'a2': 4.4211}, # in psi4 #IN
'b97-d' : {'s6': 1.000, 's8': 2.2609, 'a1': 0.5545, 'a2': 3.2297}, # in psi4 #IN
'blyp' : {'s6': 1.000, 's8': 2.6996, 'a1': 0.4298, 'a2': 4.2359}, # in psi4 #IN
'bp86' : {'s6': 1.000, 's8': 3.2822, 'a1': 0.3946, 'a2': 4.8516}, # in psi4 #IN
'dsd-blyp' : {'s6': 0.500, 's8': 0.2130, 'a1': 0.000, 'a2': 6.0519}, # in psi4
'pbe0' : {'s6': 1.000, 's8': 1.2177, 'a1': 0.4145, 'a2': 4.8593}, # in psi4 #IN
'pbe' : {'s6': 1.000, 's8': 0.7875, 'a1': 0.4289, 'a2': 4.4407}, # in psi4 #IN
'pw6b95' : {'s6': 1.000, 's8': 0.7257, 'a1': 0.2076, 'a2': 6.3750},
'pwpb95' : {'s6': 0.820, 's8': 0.2904, 'a1': 0.0000, 'a2': 7.3141},
'revpbe0' : {'s6': 1.000, 's8': 1.7588, 'a1': 0.4679, 'a2': 3.7619},
'revpbe38' : {'s6': 1.000, 's8': 1.4760, 'a1': 0.4309, 'a2': 3.9446},
'revpbe' : {'s6': 1.000, 's8': 2.3550, 'a1': 0.5238, 'a2': 3.5016},
'rpw86pbe' : {'s6': 1.000, 's8': 1.3845, 'a1': 0.4613, 'a2': 4.5062},
'tpss0' : {'s6': 1.000, 's8': 1.2576, 'a1': 0.3768, 'a2': 4.5865},
'tpss' : {'s6': 1.000, 's8': 1.9435, 'a1': 0.4535, 'a2': 4.4752},
},
'd3mzero': { # alpha6 = 14.0
'b2plyp' : {'s6': 0.640, 's8': 0.717543, 'sr6': 1.313134, 'beta': 0.016035},
'b3lyp' : {'s6': 1.000, 's8': 1.532981, 'sr6': 1.338153, 'beta': 0.013988},
'b97-d' : {'s6': 1.000, 's8': 1.020078, 'sr6': 1.151808, 'beta': 0.035964},
'blyp' : {'s6': 1.000, 's8': 1.841686, 'sr6': 1.279637, 'beta': 0.014370},
'bp86' : {'s6': 1.000, 's8': 1.945174, 'sr6': 1.233460, 'beta': 0.000000},
'pbe' : {'s6': 1.000, 's8': 0.000000, 'sr6': 2.340218, 'beta': 0.129434},
'pbe0' : {'s6': 1.000, 's8': 0.000081, 'sr6': 2.077949, 'beta': 0.116755},
'lcwpbe' : {'s6': 1.000, 's8': 1.280619, 'sr6': 1.366361, 'beta': 0.003160},
},
'd3mbj': {
'b2plyp' : {'s6': 0.640, 's8': 0.672820, 'a1': 0.486434, 'a2': 3.656466},
'b3lyp' : {'s6': 1.000, 's8': 1.466677, 'a1': 0.278672, 'a2': 4.606311},
'b97-d' : {'s6': 1.000, 's8': 1.206988, 'a1': 0.240184, 'a2': 3.864426},
'blyp' : {'s6': 1.000, 's8': 1.875007, 'a1': 0.448486, 'a2': 3.610679},
'bp86' : {'s6': 1.000, 's8': 3.140281, 'a1': 0.821850, 'a2': 2.728151},
'pbe' : {'s6': 1.000, 's8': 0.358940, 'a1': 0.012092, 'a2': 5.938951},
'pbe0' : {'s6': 1.000, 's8': 0.528823, 'a1': 0.007912, 'a2': 6.162326},
'lcwpbe' : {'s6': 1.000, 's8': 0.906564, 'a1': 0.563761, 'a2': 3.593680},
},
}
# Full list of all possible endings
full_dash_keys = list(dashcoeff) + [x.replace('-', '') for x in list(dash_alias)]
def dash_server(func, dashlvl):
""" Returns the dictionary of keys for default empirical parameters"""
# Validate input arguments
dashlvl = dashlvl.lower()
dashlvleff = dash_alias['-' + dashlvl][1:] if ('-' + dashlvl) in dash_alias.keys() else dashlvl
if dashlvleff not in dashcoeff.keys():
raise ValidationError("""-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys()))
func = func.lower()
if func not in dashcoeff[dashlvleff].keys():
raise ValidationError("""Functional %s is not available for -D level %s.""" % (func, dashlvl))
# Return the values
return dashcoeff[dashlvleff][func]
def dftd3_coeff_formatter(dashlvl, dashcoeff):
# Return strings for dftd3 program parameter file
# s6 rs6 s18 rs8 alpha6 version
# d2p4: s6 sr6=1.1 s8=0.0 a2=None alpha6=20.0 version=2
# d2gr: s6 sr6=1.1 s8=0.0 a2=None alpha6 version=2
# d3zero: s6 sr6 s8 a2=None alpha6 version=3
# d3bj: s6 a1 s8 a2 alpha6=None version=4
# d3mzero: s6 sr6 s8 beta alpha6=14.0 version=5
# d3mbj: s6 a1 s8 a2 alpha6=None version=6
dashlvleff = dash_alias['-' + dashlvl][1:] if ('-' + dashlvl) in dash_alias.keys() else dashlvl
if dashlvleff.lower() == 'd2p4':
returnstring = '%12.6f %12.6f %12.6f %12.6f %12.6f %6d\n' % \
(dashcoeff['s6'],
1.1, 0.0, 0.0, 20.0, 2)
elif dashlvleff.lower() == 'd2gr':
returnstring = '%12.6f %12.6f %12.6f %12.6f %12.6f %6d\n' % \
(dashcoeff['s6'],
1.1, 0.0, 0.0,
dashcoeff['alpha6'],
2)
elif dashlvleff.lower() == 'd3zero':
returnstring = '%12.6f %12.6f %12.6f %12.6f %12.6f %6d\n' % \
(dashcoeff['s6'],
dashcoeff['sr6'],
dashcoeff['s8'],
1.0,
dashcoeff['alpha6'],
3)
elif dashlvleff.lower() == 'd3bj':
returnstring = '%12.6f %12.6f %12.6f %12.6f %12.6f %6d\n' % \
(dashcoeff['s6'],
dashcoeff['a1'],
dashcoeff['s8'],
dashcoeff['a2'],
0.0, 4)
elif dashlvleff.lower() == 'd3mzero':
returnstring = '%12.6f %12.6f %12.6f %12.6f %12.6f %6d\n' % \
(dashcoeff['s6'],
dashcoeff['sr6'],
dashcoeff['s8'],
dashcoeff['beta'],
14.0, 5)
elif dashlvleff.lower() == 'd3mbj':
returnstring = '%12.6f %12.6f %12.6f %12.6f %12.6f %6d\n' % \
(dashcoeff['s6'],
dashcoeff['a1'],
dashcoeff['s8'],
dashcoeff['a2'],
0.0, 6)
else:
raise ValidationError("""-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys()))
return returnstring
| kannon92/psi4 | psi4/driver/qcdb/dashparam.py | Python | gpl-2.0 | 15,141 | [
"Psi4",
"TURBOMOLE"
] | f730a37090a286a977b263e26b0b2a557be4996e0e3fe1b8307d2557c4e23243 |
# creates: fcc100.png fcc110.png bcc100.png fcc111.png bcc110.png bcc111.png hcp0001.png fcc111o.png bcc110o.png bcc111o.png hcp0001o.png ontop-site.png hollow-site.png fcc-site.png hcp-site.png bridge-site.png
import os
from ase import *
from ase.lattice.surface import *
ortho = 'fcc100 fcc110 bcc100'.split()
hex = 'fcc111 bcc110 bcc111 hcp0001'.split()
symbols = {'fcc': 'Cu', 'bcc': 'Fe', 'hcp': 'Ru'}
radii = {'fcc': 1.1, 'bcc': 1.06, 'hcp': 1.08}
adsorbates= {'ontop': 'H', 'hollow': 'O', 'fcc': 'N', 'hcp': 'C',
'bridge': 'F'}
def save(name, slab):
write(name + '.png', slab, show_unit_cell=2, radii=radii[name[:3]],
scale=10)
for name in ortho + hex:
f = eval(name)
slab = f(symbols[name[:3]], (3, 4, 5), vacuum=4)
for site in slab.adsorbate_info['sites']:
if site.endswith('bridge'):
h = 1.5
else:
h = 1.2
add_adsorbate(slab, adsorbates.get(site, 'F'), h, site)
save(name, slab)
for name in hex:
f = eval(name)
slab = f(symbols[name[:3]], (3, 4, 5), vacuum=4, orthogonal=True)
for site in slab.adsorbate_info['sites']:
if site.endswith('bridge'):
h = 1.5
else:
h = 1.2
add_adsorbate(slab, adsorbates.get(site, 'F'), h, site)
save(name + 'o', slab)
for site, symbol in adsorbates.items():
write('%s-site.png' % site, Atoms(symbol), radii=1.08, scale=10)
| freephys/python_ase | doc/ase/surface.py | Python | gpl-3.0 | 1,427 | [
"ASE"
] | a6616bb83721d01b3dc197b4ac9a6f0b55c62312461827d7d5c85f043fa9cc38 |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 9 00:06:24 2014
@author: kristian
"""
from skumleskogen import *
import time
################## OPTIONS ##################
debug_on = True
write_to_file = True
hukommelse = {}
sti_totalt = ["inn"]
noder_med_lås = set()
forrige_retning = []
file = None
try:
del print
except:
pass
_print = print
class Print_To_File(object):
def __init__(self, *text):
_print(text)
string = ""
for t in text:
string += str(t)
if file:
file.write("\n" + string)
if write_to_file:
print = Print_To_File
file = open("output.txt", mode="a")
class MovementException(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return str(self.error)
def start_solving():
print("Er inngang:", er_inngang())
nøkler = 0
while True:
debug()
husk_node()
if er_stank():
if gaa_tilbake():
sti_totalt.append("STANK! tilbake til " + str(nummer()))
kom_fra_retning = forrige_retning.pop(len(forrige_retning) - 1)
continue
if er_nokkel():
if plukk_opp():
nøkler += 1
sti_totalt.append("plukket nøkkel " + str(nøkler))
continue
if (not hukommelse[nummer()]["venstre"]) \
or kan_låse_opp(nummer(), nøkler, "venstre"):
try:
hukommelse[nummer()]["lås"][0] = False
hukommelse[nummer()]["superlås"][0] = False
besøk_node("venstre")
except MovementException as ex:
print(ex)
else:
forrige_retning.append("venstre")
sti_totalt.append("venstre " + str(nummer()))
continue
if (not hukommelse[nummer()]["høyre"]) \
or kan_låse_opp(nummer(), nøkler, "høyre"):
try:
hukommelse[nummer()]["lås"][1] = False
hukommelse[nummer()]["superlås"][1] = False
besøk_node("høyre")
except MovementException as ex:
print(ex)
else:
forrige_retning.append("høyre")
sti_totalt.append("høyre " + str(nummer()))
continue
if er_laas():
noder_med_lås.add(nummer())
if er_superlaas():
if nøkler >= 2:
utfall = laas_opp()
if utfall:
nøkler -= 2
sti_totalt.append("låste opp sl " + str(nøkler))
if nummer() in noder_med_lås:
noder_med_lås.remove(nummer())
continue
else:
noder_med_lås.add(nummer())
else:
if nøkler >= 1:
utfall = laas_opp()
if utfall:
nøkler -= 1
sti_totalt.append("låste opp s " + str(nøkler))
if nummer() in noder_med_lås:
noder_med_lås.remove(nummer())
continue
if er_utgang():
gaa_ut()
return
# Vi er stuck. Noen noder må være låste.
har_lås = er_laas()
har_superlås = er_superlaas()
if har_lås and har_superlås:
# Låsen var ikke en vanlig lås, men superlås.
har_lås = False
if barn_har_lås(nummer()):
har_lås = True
if barn_har_superlås(nummer()):
har_superlås = True
if gaa_tilbake():
sti_totalt.append("tilbake til " + str(nummer()))
kom_fra_retning = forrige_retning.pop(len(forrige_retning) - 1)
print("kom fra:", kom_fra_retning)
if har_lås:
print("har lås")
if kom_fra_retning == "venstre":
hukommelse[nummer()]["lås"][0] = True
else:
hukommelse[nummer()]["lås"][1] = True
if har_superlås:
print("har superlås")
if kom_fra_retning == "venstre":
hukommelse[nummer()]["superlås"][0] = True
else:
hukommelse[nummer()]["superlås"][1] = True
print(hukommelse[nummer()])
else:
print("KLARTE IKKE Å GÅ TILBAKE!!!")
return
def kan_låse_opp(n, nøkler, retning):
indeks = 0
if retning == "høyre":
indeks = 1
if hukommelse[n]["lås"][indeks] and (nøkler >= 1):
return True
if hukommelse[n]["superlås"][indeks] and (nøkler >= 2):
return True
return False
def barn_har_lås(n):
return hukommelse[n]["lås"][0] or hukommelse[n]["lås"][1]
def barn_har_superlås(n):
return hukommelse[n]["superlås"][0] or hukommelse[n]["superlås"][1]
def husk_node():
n = nummer()
if n not in hukommelse:
hukommelse[n] = {"venstre": False, "høyre": False,
"lås": [False, False], "superlås": [False, False]}
def besøk_node(retning):
n = nummer()
utfall = False
if retning == "venstre":
utfall = gaa_venstre()
elif retning == "høyre":
utfall = gaa_hoyre()
else:
print("Ugyldig retning oppgitt!", n, retning)
return
if utfall:
hukommelse[n][retning] = True
else:
if er_laas():
raise MovementException("Er låst")
else:
raise MovementException("Er blindvei")
def debug():
if debug_on:
print("/"*25 + "DEBUG:" + "/"*25)
print(("Nummer: {n}\n" +
"Type:\n " +
"i: {i}, l: {l}, sl: {sl}, st: {st}, nk: {nk}, v: {v}, u: {u}" +
"\nLabel: {la}")
.format(n=nummer(), i=er_inngang(), l=er_laas(),
sl=er_superlaas(), st=er_stank(), u=er_utgang(),
v=er_vanlig(), nk=er_nokkel(), la=label(nummer())))
def main():
# Initialisation.
def get_hours():
return time.asctime().split(' ')[4]
start_time = time.time()
print("Starting. Time:", get_hours())
# Start solving the maze.
try:
start_solving()
# In case of failure, e.g. a rabbit ate you.
except Exception as e:
print("Exception occured:")
print(e)
print("Exciting. Time:", get_hours())
# Done, do final actions.
finally:
print("\nRan for {0} seconds.".format(
abs(
round(start_time - time.time(), 4))))
print("Maze completed.")
print(sti_totalt)
if __name__ == "__main__":
main()
if file:
file.close()
| krissrex/python_projects | Projects/Oving10-itgk/main.py | Python | mit | 6,889 | [
"exciting"
] | 52c367741ae1c1f9abca2b74c37a3a252e437b35b5d1e837dce936f2fecb0374 |
"""
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
choose_conv_method -- Chooses faster of FFT and direct convolution methods.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-d deconvolution using lfilter.
sosfilt -- 1-dimensional IIR digital linear filtering using
-- a second-order sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
sosfiltfilt -- A forward-backward filter for second-order sections.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
resample_poly -- Resample using polyphase filtering method.
upfirdn -- Upsample, apply FIR filter, downsample.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
bilinear_zpk -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firls -- FIR filter design using least-squares error minimization.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response from TF coefficients.
freqs_zpk -- Analog filter frequency response from ZPK coefficients.
freqz -- Digital filter frequency response from TF coefficients.
freqz_zpk -- Digital filter frequency response from ZPK coefficients.
sosfreqz -- Digital filter frequency response for SOS format filter.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
minimum_phase -- Convert a linear phase FIR filter to minimum phase.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
iirnotch -- Design second-order IIR notch digital filter.
iirpeak -- Design second-order IIR peak (resonant) digital filter.
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
lti -- Continuous-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
freqresp -- frequency response of a continuous-time LTI system.
bode -- Bode magnitude and phase data (continuous-time LTI).
Discrete-Time Linear Systems
============================
.. autosummary::
:toctree: generated/
dlti -- Discrete-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
dfreqresp -- frequency response of a discrete-time LTI system.
dbode -- Bode magnitude and phase data (discrete-time LTI).
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
tf2sos -- transfer function to second-order sections.
tf2ss -- transfer function to state-space.
zpk2tf -- zero-pole-gain to transfer function.
zpk2sos -- zero-pole-gain to second-order sections.
zpk2ss -- zero-pole-gain to state-space.
ss2tf -- state-pace to transfer function.
ss2zpk -- state-space to pole-zero-gain.
sos2zpk -- second-order sections to zero-pole-gain.
sos2tf -- second-order sections to transfer function.
cont2discrete -- continuous-time to discrete-time LTI conversion.
place_poles -- pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
max_len_seq -- Maximum length sequence
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
unit_impulse -- Discrete unit impulse
Window functions
================
For window functions, see the `scipy.signal.windows` namespace.
In the `scipy.signal` namespace, there is a convenience function to
obtain these windows by name:
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
ricker -- return ricker wavelet
cwt -- perform continuous wavelet transform
Peak finding
============
.. autosummary::
:toctree: generated/
argrelmin -- Calculate the relative minima of data
argrelmax -- Calculate the relative maxima of data
argrelextrema -- Calculate the relative extrema of data
find_peaks -- Find a subset of peaks inside a signal.
find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation.
peak_prominences -- Calculate the prominence of each peak in a signal.
peak_widths -- Calculate the width of each peak in a signal.
Spectral Analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram
welch -- Compute a periodogram using Welch's method
csd -- Compute the cross spectral density, using Welch's method
coherence -- Compute the magnitude squared coherence, using Welch's method
spectrogram -- Compute the spectrogram
lombscargle -- Computes the Lomb-Scargle periodogram
vectorstrength -- Computes the vector strength
stft -- Compute the Short Time Fourier Transform
istft -- Compute the Inverse Short Time Fourier Transform
check_COLA -- Check the COLA constraint for iSTFT reconstruction
check_NOLA -- Check the NOLA constraint for iSTFT reconstruction
"""
from __future__ import division, print_function, absolute_import
from . import sigtools, windows
from .waveforms import *
from ._max_len_seq import max_len_seq
from ._upfirdn import upfirdn
# The spline module (a C extension) provides:
# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2
from .spline import *
from .bsplines import *
from .filter_design import *
from .fir_filter_design import *
from .ltisys import *
from .lti_conversion import *
from .signaltools import *
from ._savitzky_golay import savgol_coeffs, savgol_filter
from .spectral import *
from .wavelets import *
from ._peak_finding import *
from .windows import get_window # keep this one in signal namespace
# deal with * -> windows.* doc-only soft-deprecation
deprecated_windows = ('boxcar', 'triang', 'parzen', 'bohman', 'blackman',
'nuttall', 'blackmanharris', 'flattop', 'bartlett',
'barthann', 'hamming', 'kaiser', 'gaussian',
'general_gaussian', 'chebwin', 'slepian', 'cosine',
'hann', 'exponential', 'tukey')
# backward compatibility imports for actually deprecated windows not
# in the above list
from .windows import hanning
def deco(name):
f = getattr(windows, name)
# Add deprecation to docstring
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
wrapped.__name__ = name
wrapped.__module__ = 'scipy.signal'
if hasattr(f, '__qualname__'):
wrapped.__qualname__ = f.__qualname__
if f.__doc__ is not None:
lines = f.__doc__.splitlines()
for li, line in enumerate(lines):
if line.strip() == 'Parameters':
break
else:
raise RuntimeError('dev error: badly formatted doc')
spacing = ' ' * line.find('P')
lines.insert(li, ('{0}.. warning:: scipy.signal.{1} is deprecated,\n'
'{0} use scipy.signal.windows.{1} '
'instead.\n'.format(spacing, name)))
wrapped.__doc__ = '\n'.join(lines)
return wrapped
for name in deprecated_windows:
locals()[name] = deco(name)
del deprecated_windows, name, deco
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| Eric89GXL/scipy | scipy/signal/__init__.py | Python | bsd-3-clause | 14,598 | [
"Gaussian"
] | 0088797b201f8093a3f9a4a3b4c450568f7ddc515da508266dbf9f75ef6f1cb6 |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import copy
import warnings
import numpy as np
from ..io.pick import pick_channels_cov
from ..forward import apply_forward
from ..utils import check_random_state, verbose, _time_mask
@verbose
def simulate_evoked(fwd, stc, info, cov, snr=3., tmin=None, tmax=None,
iir_filter=None, random_state=None, verbose=None):
"""Generate noisy evoked data
Parameters
----------
fwd : dict
a forward solution.
stc : SourceEstimate object
The source time courses.
info : dict
Measurement info to generate the evoked.
cov : Covariance object
The noise covariance.
snr : float
signal to noise ratio in dB. It corresponds to
10 * log10( var(signal) / var(noise) ).
tmin : float | None
start of time interval to estimate SNR. If None first time point
is used.
tmax : float | None
start of time interval to estimate SNR. If None last time point
is used.
iir_filter : None | array
IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
random_state : None | int | np.random.RandomState
To specify the random generator state.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : Evoked object
The simulated evoked data
Notes
-----
.. versionadded:: 0.10.0
"""
evoked = apply_forward(fwd, stc, info)
if snr < np.inf:
noise = simulate_noise_evoked(evoked, cov, iir_filter, random_state)
evoked_noise = add_noise_evoked(evoked, noise, snr,
tmin=tmin, tmax=tmax)
else:
evoked_noise = evoked
return evoked_noise
def simulate_noise_evoked(evoked, cov, iir_filter=None, random_state=None):
"""Creates noise as a multivariate Gaussian
The spatial covariance of the noise is given from the cov matrix.
Parameters
----------
evoked : evoked object
an instance of evoked used as template
cov : Covariance object
The noise covariance
iir_filter : None | array
IIR filter coefficients (denominator)
random_state : None | int | np.random.RandomState
To specify the random generator state.
Returns
-------
noise : evoked object
an instance of evoked
Notes
-----
.. versionadded:: 0.10.0
"""
noise = evoked.copy()
noise.data = _generate_noise(evoked.info, cov, iir_filter, random_state,
evoked.data.shape[1])[0]
return noise
def _generate_noise(info, cov, iir_filter, random_state, n_samples, zi=None):
"""Helper to create spatially colored and temporally IIR-filtered noise"""
from scipy.signal import lfilter
noise_cov = pick_channels_cov(cov, include=info['ch_names'], exclude=[])
rng = check_random_state(random_state)
c = np.diag(noise_cov.data) if noise_cov['diag'] else noise_cov.data
mu_channels = np.zeros(len(c))
# we almost always get a positive semidefinite warning here, so squash it
with warnings.catch_warnings(record=True):
noise = rng.multivariate_normal(mu_channels, c, n_samples).T
if iir_filter is not None:
if zi is None:
zi = np.zeros((len(c), len(iir_filter) - 1))
noise, zf = lfilter([1], iir_filter, noise, axis=-1, zi=zi)
else:
zf = None
return noise, zf
def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
"""Adds noise to evoked object with specified SNR.
SNR is computed in the interval from tmin to tmax.
Parameters
----------
evoked : Evoked object
An instance of evoked with signal
noise : Evoked object
An instance of evoked with noise
snr : float
signal to noise ratio in dB. It corresponds to
10 * log10( var(signal) / var(noise) )
tmin : float
start time before event
tmax : float
end time after event
Returns
-------
evoked_noise : Evoked object
An instance of evoked corrupted by noise
"""
evoked = copy.deepcopy(evoked)
tmask = _time_mask(evoked.times, tmin, tmax, sfreq=evoked.info['sfreq'])
tmp = 10 * np.log10(np.mean((evoked.data[:, tmask] ** 2).ravel()) /
np.mean((noise.data ** 2).ravel()))
noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data
evoked.data += noise.data
return evoked
| wronk/mne-python | mne/simulation/evoked.py | Python | bsd-3-clause | 4,699 | [
"Gaussian"
] | a371cb55eb292728f15aec4ecd2931d14cbf405a08f7b533b0a8726c4bc26635 |
import collections
import dataclasses
import random
import itertools
from . import machines
from . import syntax
from . import operations
from . import runs
from . import trees
__all__ = ['Grammar', 'from_grammar', 'to_grammar', 'any_parse', 'only_parse', 'all_parses']
class Grammar:
"""A string-rewriting grammar."""
def __init__(self):
self.nonterminals = set()
self.start_nonterminal = None
self.rules = []
@property
def terminals(self):
all = set()
for lhs, rhs in self.rules:
all.update(lhs)
all.update(rhs)
return all - self.nonterminals
@classmethod
def from_file(cls, filename):
"""Read a grammar from a file.
Arguments:
filename (str): name of file to read from
Returns:
Grammar: a CFG
The file should contain one rule per line, for example::
S -> a S b
S -> &
Currently the grammar must be a context-free grammar. The
nonterminal symbols are exactly those that appear on a
left-hand side. The left-hand side of the first rule is the
start symbol.
"""
with open(filename) as f:
return cls.from_lines(f)
@classmethod
def from_lines(cls, lines):
"""Read a grammar from a list of strings (see `from_file`).
Arguments:
lines: a list of strings
Returns:
Grammar: a CFG
"""
g = cls()
parsed_rules = []
first = True
for line in lines:
tokens = syntax.lexer(line)
lhs = syntax.parse_symbol(tokens)
g.nonterminals.add(lhs)
if first:
g.set_start_nonterminal(lhs)
first = False
syntax.parse_character(tokens, syntax.ARROW)
rhs = []
if tokens.cur == syntax.EPSILON:
syntax.parse_character(tokens, syntax.EPSILON)
syntax.parse_end(tokens)
else:
while tokens.pos < len(tokens):
rhs.append(syntax.parse_symbol(tokens))
g.add_rule(lhs, rhs)
return g
def set_start_nonterminal(self, x):
"""Set the start symbol to `x`. If `x` is not already a nonterminal,
it is added to the nonterminal alphabet."""
x = syntax.Symbol(x)
self.add_nonterminal(x)
self.start_nonterminal = x
def add_nonterminal(self, x):
"""Add `x` to the nonterminal alphabet."""
x = syntax.Symbol(x)
self.nonterminals.add(x)
def add_rule(self, lhs, rhs):
"""Add rule with left-hand side `lhs` and right-hand side `rhs`,
where `lhs` and `rhs` are both Strings.
"""
self.rules.append((syntax.String(lhs), syntax.String(rhs)))
def __str__(self):
result = []
result.append('nonterminals: {{{}}}'.format(','.join(map(str, sorted(self.nonterminals)))))
result.append('start: {}'.format(self.start_nonterminal))
for lhs, rhs in self.rules:
result.append('{} → {}'.format(lhs, rhs))
return '\n'.join(result)
def _repr_html_(self):
result = []
result.append("nonterminals: {{{}}}".format(','.join(x._repr_html_() for x in sorted(self.nonterminals))))
result.append('start: {}'.format(self.start_nonterminal._repr_html_()))
for lhs, rhs in self.rules:
result.append('{} → {}'.format(lhs._repr_html_(), rhs._repr_html_()))
return '<br>\n'.join(result)
def is_unrestricted(self):
return True
def has_strict_start(self):
"""Returns True iff the start nonterminal does not appear in the rhs
of any rule. I don't know what the correct terminology for
this is.
"""
for _, rhs in self.rules:
if self.start_nonterminal in rhs:
return False
return True
def is_noncontracting(self):
"""Returns True iff the grammar is *essentially* noncontracting, that
is, each rule is of the form α → β where one of the following is true:
- len(β) ≥ len(α)
- α = S, β = ε, and S does not occur on the rhs of any rule
"""
for lhs, rhs in self.rules:
if (len(lhs) == 1 and lhs[0] == self.start_nonterminal and
len(rhs) == 0 and self.has_strict_start()):
continue
if len(rhs) < len(lhs):
return False
return True
def is_contextsensitive(self):
"""Returns True iff the grammar is context-sensitive, that is, each
rule is of the form α A β → α B β where one of the following is true:
- A is a nonterminal and len(B) > 0
- A = S, α = β = B = ε, and S does not occur on the rhs of any rule
"""
if not self.is_noncontracting():
return False
for lhs, rhs in self.rules:
if (len(lhs) == 1 and lhs[0] == self.start_nonterminal and
len(rhs) == 0 and self.has_strict_start()):
continue
for li, lx in enumerate(lhs):
suffix = len(lhs)-li-1
if (lx in self.nonterminals and lhs[:li] == rhs[:li] and
(suffix == 0 or lhs[-suffix:] == rhs[-suffix:])):
break
else:
return False
return True
def is_contextfree(self):
"""Returns True iff the grammar is context-free."""
for lhs, rhs in self.rules:
if len(lhs) != 1:
return False
if lhs[0] not in self.nonterminals:
return False
return True
def is_leftlinear(self):
"""Returns True iff the grammar is left-linear, that is, it is context-free and
every rule is of the form A → B w or A → w where w contains only terminals.
"""
if not self.is_contextfree():
return False
for _, rhs in self.rules:
if any(x in self.nonterminals for x in rhs[1:]):
return False
return True
def is_rightlinear(self):
"""Returns True iff the grammar is left-linear, that is, it is context-free and
every rule is of the form A → w B or A → w where w contains only terminals.
"""
if not self.is_contextfree():
return False
for _, rhs in self.rules:
if any(x in self.nonterminals for x in rhs[:-1]):
return False
return True
def remove_useless(self):
"""Returns a new grammar containing just useful rules."""
if not self.is_contextfree():
raise ValueError("grammar must be context-free")
by_lhs = collections.defaultdict(list)
by_rhs = collections.defaultdict(list)
for [lhs], rhs in self.rules:
by_lhs[lhs].append((lhs, rhs))
for y in rhs:
if y in self.nonterminals:
by_rhs[y].append((lhs, rhs))
agenda = collections.deque([self.start_nonterminal])
reachable = set()
while len(agenda) > 0:
x = agenda.popleft()
if x in reachable: continue
reachable.add(x)
for _, rhs in by_lhs[x]:
for y in rhs:
if y in by_lhs:
agenda.append(y)
agenda = collections.deque()
productive = set()
for [lhs], rhs in self.rules:
if all(y not in self.nonterminals for y in rhs):
agenda.append(lhs)
while len(agenda) > 0:
y = agenda.popleft()
if y in productive: continue
productive.add(y)
for lhs, rhs in by_rhs[y]:
if all(y not in self.nonterminals or y in productive for y in rhs):
agenda.append(lhs)
g = Grammar()
g.set_start_nonterminal(self.start_nonterminal)
for [lhs], rhs in self.rules:
if (lhs in reachable & productive and
all(y not in self.nonterminals or y in reachable & productive for y in rhs)):
g.add_nonterminal(lhs)
g.add_rule([lhs], rhs)
return g
def compute_nullable(self):
"""Compute, for every nonterminal and rhs suffix α,
whether α ⇒* ε.
"""
if not self.is_contextfree():
raise ValueError("grammar must be context-free")
nullable = {syntax.String()}
prev_size = None
while len(nullable) != prev_size:
prev_size = len(nullable)
for lhs, rhs in self.rules:
for i in reversed(range(len(rhs))):
if rhs[i+1:] in nullable and rhs[i:i+1] in nullable:
nullable.add(rhs[i:])
if rhs in nullable:
nullable.add(lhs)
return nullable
def compute_first(self, nullable=None):
"""Compute, for every terminal, nonterminal, and rhs suffix α, the set of
terminals b where α ⇒* b γ for some γ.
"""
if not self.is_contextfree():
raise ValueError("grammar must be context-free")
if nullable is None:
nullable = self.compute_nullable()
first = {syntax.String(): set()}
for lhs, rhs in self.rules:
first.setdefault(lhs, set())
for i in range(len(rhs)):
if rhs[i] not in self.nonterminals:
first.setdefault(rhs[i:i+1], {rhs[i]})
first.setdefault(rhs[i:], set())
changed = True
def update(s, x):
nonlocal changed
n = len(s)
s.update(x)
if n != len(s):
changed = True
while changed:
changed = False
for lhs, rhs in self.rules:
for i in reversed(range(len(rhs))):
update(first[rhs[i:]], first[rhs[i:i+1]])
if rhs[i:i+1] in nullable:
update(first[rhs[i:]], first[rhs[i+1:]])
update(first[lhs], first[rhs])
return first
def compute_follow(self, nullable=None, first=None):
"""Compute, for every nonterminal A, the set of terminals b where
S →* γ A b δ for some γ, δ."""
if not self.is_contextfree():
raise ValueError("grammar must be context-free")
if nullable is None:
nullable = self.compute_nullable()
if first is None:
first = self.compute_first(nullable)
follow = {x: set() for x in self.nonterminals}
follow[self.start_nonterminal].add('⊣')
changed = True
def update(s, x):
nonlocal changed
n = len(s)
s.update(x)
if n != len(s):
changed = True
while changed:
changed = False
for [lhs], rhs in self.rules:
for i in range(len(rhs)):
if rhs[i] in self.nonterminals:
update(follow[rhs[i]], first[rhs[i+1:]])
if rhs[i+1:] in nullable:
update(follow[rhs[i]], follow[lhs])
return follow
def zero_pad(n, i):
return str(i).zfill(len(str(n)))
def fresh(s, alphabet):
while s in alphabet:
s += "'"
return s
def from_grammar(g, mode="topdown"):
"""Convert a CFG to a PDA.
Arguments:
g (Grammar): the grammar to convert, which must be a CFG.
mode (str): selects which algorithm to use. Possible values are:
- ``"topdown"``: nondeterministic top-down, as in Sipser (3e) Lemma 2.21.
- ``"bottomup"``: nondeterministic bottom-up.
- ``"ll1"``: LL(1) deterministic top-down.
- ``"lr0"``: LR(0) deterministic bottom-up.
- ``"lr1"``: LR(1) deterministic bottom-up.
Returns:
Machine: a PDA equivalent to `g`.
"""
if g.is_contextfree():
if mode == "topdown":
return from_cfg_topdown(g)
if mode == "ll1":
return from_cfg_ll1(g)
elif mode == "bottomup":
return from_cfg_bottomup(g)
elif mode == "lr0":
return from_cfg_lr0(g)
elif mode == "lr1":
return from_cfg_lr1(g)
else:
raise ValueError("unknown mode '{}'".format(mode))
else:
raise NotImplementedError()
def from_cfg_topdown(g):
terminals = g.terminals
m = machines.PushdownAutomaton()
m.set_start_state('start')
m.add_transition(('start', [], []), ('loop', [g.start_nonterminal, '$']))
for [[lhs], rhs] in g.rules:
m.add_transition(('loop', [], lhs), ('loop', rhs))
for a in terminals:
m.add_transition(("loop", a, a), ("loop", []))
m.add_transition(("loop", [], "$"), ("accept", []))
m.add_accept_state("accept")
return m
END = syntax.Symbol('-|')
def from_cfg_ll1(g):
"""Convert a CFG to a PDA. If the CFG is LL(1), the resulting PDA will
be deterministic.
"""
nullable = g.compute_nullable()
first = g.compute_first(nullable)
follow = g.compute_follow(nullable, first)
terminals = g.terminals
m = machines.PushdownAutomaton()
m.set_start_state('start')
m.add_transition(('start', [], []), ('loop', [g.start_nonterminal, '$']))
for [[lhs], rhs] in g.rules:
for c in terminals | {END}:
if c in first[rhs] or (rhs in nullable and c in follow[lhs]):
m.add_transition((c, [], lhs), (c, rhs))
for a in terminals:
m.add_transition(('loop', a, []), (a, []))
m.add_transition((a, [], a), ('loop', []))
m.add_transition(('loop', syntax.BLANK, []), (END, [])) # treat blank as endmarker
m.add_transition((END, [], '$'), ('accept', []))
m.add_accept_state("accept")
return m
def from_cfg_bottomup(g):
terminals = g.terminals
m = machines.PushdownAutomaton()
m.set_start_state('start')
m.add_transition(('start', [], []), ('loop', ['$']))
for [[lhs], rhs] in g.rules:
m.add_transition(("loop", [], reversed(rhs)), ("loop", lhs))
for a in terminals:
m.add_transition(("loop", a, []), ("loop", a))
m.add_transition(("loop", [], [g.start_nonterminal, '$']), ("accept", []))
m.add_accept_state("accept")
return m
@dataclasses.dataclass(frozen=True, order=True)
class DottedRule:
top: bool
lhs: syntax.Symbol
rhs: tuple # includes lookahead symbols
dot: int # position of dot
end: int # length of true rhs
# change to DottedRule(lhs, rhs+lookahead, dotpos, lookpos)?
# or: write lookahead like A b -> α b
def __init__(self, lhs, rhs, dot, end=None):
if lhs is None:
object.__setattr__(self, 'top', True)
object.__setattr__(self, 'lhs', None)
else:
object.__setattr__(self, 'top', False)
object.__setattr__(self, 'lhs', lhs)
object.__setattr__(self, 'rhs', tuple(rhs))
object.__setattr__(self, 'dot', dot)
if end is None:
object.__setattr__(self, 'end', len(rhs))
else:
object.__setattr__(self, 'end', end)
def move(self, dot):
return DottedRule(self.lhs, self.rhs, dot, self.end)
def __str__(self):
toks = list(self.rhs)
if self.end < len(self.rhs):
toks[self.end] = '(' + toks[self.end]
toks[-1] = toks[-1] + ')'
toks[self.dot:self.dot] = ['.']
if not self.top:
toks[0:0] = [self.lhs, '→']
return ' '.join(map(str, toks))
def _repr_html_(self):
return str(self)
def intersect_stack(p, m):
"""Given a PDA `p` and DFA `m`, construct a new PDA whose stack
language is the intersection of the stack language of `p`
(bottom-to-top) and the language of `m`.
This construction is the same as Hopcroft and Ullman (1e), page 254.
`p` can push and pop multiple symbols, and the resulting PDA does
push and pop multiple symbols.
"""
if not p.is_pushdown():
raise ValueError('m must be a pushdown automaton')
if not m.is_finite():
raise ValueError('m must be a finite automaton')
if not m.is_deterministic():
raise NotImplementedError('m must be deterministic')
m_bystate = {}
for t in m.get_transitions():
[[q], [a]], [[r]] = t.lhs, t.rhs
m_bystate[q, a] = r
mf = m.get_accept_states()
pm = machines.PushdownAutomaton()
# pm has the same states as p, plus a new start state.
# pm's stack contains paths of m, alternating between states of m
# and input symbols of m (= stack symbols of p).
# For LR parsing, it's not necessary to store the latter (e.g.,
# Sipser (3e) Lemma 2.58), but in general
# m might have more than one transition between a pair of states.
pq1 = p.get_start_state()
pq0 = fresh(pq1, p.states)
pm.set_start_state(pq0)
pm.add_transition([[pq0], [], []], [[pq1], [m.get_start_state()]])
for pt in p.get_transitions():
[[pq], pa, px], [[pr], py] = pt.lhs, pt.rhs
for mq in m.states:
pmx = [mq]
for x in reversed(px):
pmx += [x, m_bystate[pmx[-1], x]]
pmy = [mq]
for y in reversed(py):
pmy += [y, m_bystate[pmy[-1], y]]
if pmx[-1] in mf and pmy[-1] in mf:
pm.add_transition([[pq], pa, reversed(pmx)], [[pr], reversed(pmy)])
pm.add_accept_states(p.get_accept_states())
return pm
def renumber_states(m, verbose=False):
if not m.is_finite():
raise ValueError()
index = {}
for i, q in enumerate(sorted(m.states)):
index[q] = i
if verbose:
print(f"{i}\t{q}")
mr = machines.FiniteAutomaton()
mr.set_start_state(index[m.get_start_state()])
mr.add_accept_states([index[q] for q in m.get_accept_states()])
for t in m.get_transitions():
[[q], [a]], [[r]] = t.lhs, t.rhs
mr.add_transition([[index[q]], [a]], [[index[r]]])
return mr
def lr_automaton(g, k=0):
"""Construct the nondeterministic LR(k) automaton for CFG g."""
if k == 1:
nullable = g.compute_nullable()
first = g.compute_first(nullable)
follow = g.compute_follow(nullable, first)
elif k > 1:
raise NotImplementedError()
g_bylhs = collections.defaultdict(list)
for [[lhs], rhs] in g.rules:
g_bylhs[lhs].append(rhs)
# Add top pseudo-rule
g_bylhs[None] = [syntax.String([g.start_nonterminal])]
if k == 1: follow[None] = [END]
m = machines.FiniteAutomaton()
m.set_start_state('start')
# Nonstandardly read a $ because from_cfg_bottomup pushes a $ at
# the bottom of its stack.
m.add_transition(['start', '$'],
[[DottedRule(None, [g.start_nonterminal] + [END]*k, 0, 1)]])
for lhs in g_bylhs:
for rhs in g_bylhs[lhs]:
if k == 0:
looks = [[]]
elif k == 1:
looks = [[x] for x in follow[lhs]]
for look in looks:
dr = DottedRule(lhs, list(rhs)+look, 0, len(rhs))
for i, x in enumerate(dr.rhs):
# Shift
m.add_transition([[dr.move(i)], [x]], [[dr.move(i+1)]])
# Predict
if x not in g.nonterminals:
continue
if k == 0:
looks1 = [[]]
elif k == 1:
looks1 = [[x] for x in first[rhs[i:]]]
if rhs[i:] in nullable:
looks1 += looks
for rhs1 in g_bylhs[x]:
for look1 in looks1:
m.add_transition([[dr.move(i)], []],
[[DottedRule(x, list(rhs1)+look1, 0, len(rhs1))]])
m.add_accept_state(dr.move(len(dr.rhs)))
return m
def from_cfg_lr0(g):
"""Convert a CFG to a PDA. If the CFG is LR(0), the resulting PDA
will be deterministic.
"""
p = from_cfg_bottomup(g)
lr = lr_automaton(g, 0)
lr = operations.determinize(lr)
lr = operations.prefix(lr)
lr = renumber_states(lr)
return intersect_stack(p, lr)
def from_cfg_lr1(g):
"""Convert a CFG to a PDA. If the CFG is LR(1), the resulting PDA
will be deterministic.
"""
terminals = g.terminals
m = machines.PushdownAutomaton()
m.set_start_state('start')
m.add_transition(('start', [], []), ('loop', '$'))
# reduce
for [[lhs], rhs] in g.rules:
for a in terminals | {END}:
m.add_transition(('loop', [], [a] + list(reversed(rhs))), ('loop', [a, lhs]))
# shift
for a in terminals:
m.add_transition(('loop', a, []), ('loop', a))
m.add_transition(('loop', syntax.BLANK, []), ('loop', END))
# accept
m.add_transition(('loop', [], [END, g.start_nonterminal, '$']), ('accept', []))
m.add_accept_state('accept')
lr = lr_automaton(g, 1)
lr = operations.determinize(lr)
lr = operations.prefix(lr)
lr = renumber_states(lr)
return intersect_stack(m, lr)
def pda_to_cfg(m):
"""Convert a PDA to a CFG, using the construction of Sipser (3e) Lemma 2.27.
Arguments:
m (Machine): automaton to convert, which must be a PDA.
Returns:
Grammar: A CFG equivalent to `m`.
"""
Tuple = syntax.Tuple
if not m.is_pushdown():
raise TypeError("only pushdown automata can be converted to (context-free) grammars")
push = collections.defaultdict(list)
pop = collections.defaultdict(list)
stack_alphabet = set()
for t in m.get_transitions():
([q], a, x) = t.lhs
([r], y) = t.rhs
stack_alphabet.update(x)
stack_alphabet.update(y)
if len(x) > 1 or len(y) > 1:
raise NotImplementedError("multiple pushes/pops not supported")
if len(x) == 0 and len(y) == 1:
push[y[0]].append((q, a, x, r, y))
elif len(x) == 1 and len(y) == 0:
pop[x[0]].append((q, a, x, r, y))
else:
raise NotImplementedError("transitions must either push or pop but not both or neither")
# Add bottom symbol to stack
start = fresh('start', m.states)
bottom = fresh('$', stack_alphabet)
stack_alphabet.add(bottom)
push[bottom].append((start, [], [], m.get_start_state(), [bottom]))
# Make automaton empty its stack before accepting
accept = fresh('accept', m.states)
empty = fresh('empty', m.states)
for x in stack_alphabet:
for q in m.get_accept_states():
pop[x].append((q, [], [x], accept if x == bottom else empty, []))
pop[x].append((empty, [], [x], accept if x == bottom else empty, []))
g = Grammar()
g.set_start_nonterminal(Tuple((start, accept)))
# For each p, q, r, s \in Q, u \in \Gamma, and a, b \in \Sigma_\epsilon,
# if \delta(p, a, \epsilon) contains (r, u) and \delta(s, b, u) contains
# (q, \epsilon), put the rule A_{pq} -> a A_{rs} b in G.
for u in stack_alphabet:
for p, a, _, r, _ in push[u]:
for s, b, _, q, _ in pop[u]:
g.add_nonterminal(Tuple((p,q)))
g.add_rule([Tuple((p,q))], list(a) + [Tuple((r,s))] + list(b))
# For each p, q, r \in Q, put the rule A_{pq} -> A_{pr} A_{rq} in G.
for p in m.states:
for q in m.states:
for r in m.states:
g.add_nonterminal(Tuple((p,q)))
g.add_rule([Tuple((p,q))], [Tuple((p,r)), Tuple((r,q))])
# For each p \in Q, put the rule A_{pp} -> \epsilon in G
for p in m.states:
g.add_nonterminal(Tuple((p,p)))
g.add_rule([Tuple((p,p))], [])
return g
def to_grammar(m):
if m.is_pushdown():
return pda_to_cfg(m)
def pda_paths(r):
# To do: move to runs.py
# Index the edges of r in reverse
ants = collections.defaultdict(list)
for u in r.edges:
for v in r.edges[u]:
for e in r.edges[u][v]:
ants[v].append((e, u))
def visit(v):
axiom = False
if 'start' in r.nodes[v]:
axiom = True
else:
assert len(ants[v]) > 0
for e, u in ants[v]:
if 'prev' in e:
for p1 in visit(e['prev']):
for p2 in visit(u):
yield p1 + p2
elif 'transition' in e:
for p in visit(u):
yield p + [e['transition']]
else:
axiom = True
if axiom:
yield []
for v in r.nodes:
if 'accept' in r.nodes[v]:
yield from visit(v)
def pda_path_to_tree(path, mode='bottomup'):
if mode != 'bottomup':
raise NotImplementedError
stack = []
for trans in path:
[q], a, x = trans.lhs
[r], _, y = trans.rhs
if q == r == 'loop':
if len(a) == 1:
stack.append(trees.Tree(a[0]))
elif len(x) == 0:
stack.append(trees.Tree(y[0], [trees.Tree('ε')]))
else:
stack[-len(x):] = [trees.Tree(y[0], stack[-len(x):])]
assert len(stack) == 1
return stack[0]
def only_parse(g, w):
m = from_grammar(g, mode='bottomup')
r = runs.run_pda(m, w, show_stack=0, keep_nodes=True)
paths = list(itertools.islice(pda_paths(r), 2))
if len(paths) == 0:
raise ValueError('no parse')
elif len(paths) == 1:
return pda_path_to_tree(paths[0])
else:
raise ValueError('more than one possible parse')
def any_parse(g, w):
m = from_grammar(g, mode='bottomup')
r = runs.run_pda(m, w, show_stack=0, keep_nodes=True)
paths = pda_paths(r)
try:
path = next(paths)
except StopIteration:
raise ValueError('no parse')
return pda_path_to_tree(path)
def all_parses(g, w):
m = from_grammar(g, mode='bottomup')
r = runs.run_pda(m, w, show_stack=0, keep_nodes=True)
paths = pda_paths(r)
for path in paths:
yield pda_path_to_tree(path)
| ND-CSE-30151/tock | tock/grammars.py | Python | mit | 26,741 | [
"VisIt"
] | 90427fe0cea0937f23b3b1011060f4a29a5e453e3430a4a997afc15f628b9736 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that interface with Grimme's GCP code."""
from __future__ import absolute_import, print_function
import os
import re
import uuid
import socket
import subprocess
try:
from psi4.driver.p4util.exceptions import *
from psi4 import core
isP4regime = True
except ImportError:
from .exceptions import *
isP4regime = False
from .p4regex import *
from .molecule import Molecule
def run_gcp(self, func=None, dertype=None, verbose=False): # dashlvl=None, dashparam=None
"""Function to call Grimme's dftd3 program (http://toc.uni-muenster.de/DFTD3/)
to compute the -D correction of level *dashlvl* using parameters for
the functional *func*. The dictionary *dashparam* can be used to supply
a full set of dispersion parameters in the absense of *func* or to supply
individual overrides in the presence of *func*. Returns energy if *dertype* is 0,
gradient if *dertype* is 1, else tuple of energy and gradient if *dertype*
unspecified. The dftd3 executable must be independently compiled and found in
:envvar:`PATH` or :envvar:`PSIPATH`.
*self* may be either a qcdb.Molecule (sensibly) or a psi4.Molecule
(works b/c psi4.Molecule has been extended by this method py-side and
only public interface fns used) or a string that can be instantiated
into a qcdb.Molecule.
"""
# Create (if necessary) and update qcdb.Molecule
if isinstance(self, Molecule):
# called on a qcdb.Molecule
pass
elif isinstance(self, core.Molecule):
# called on a python export of a psi4.core.Molecule (py-side through Psi4's driver)
self.create_psi4_string_from_molecule()
elif isinstance(self, basestring):
# called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion)
self = Molecule(self)
else:
raise ValidationError("""Argument mol must be psi4string or qcdb.Molecule""")
self.update_geometry()
# # Validate arguments
# dashlvl = dashlvl.lower()
# dashlvl = dash_alias['-' + dashlvl][1:] if ('-' + dashlvl) in dash_alias.keys() else dashlvl
# if dashlvl not in dashcoeff.keys():
# raise ValidationError("""-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys()))
if dertype is None:
dertype = -1
elif der0th.match(str(dertype)):
dertype = 0
elif der1st.match(str(dertype)):
dertype = 1
# elif der2nd.match(str(dertype)):
# raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype))
else:
raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype))
# if func is None:
# if dashparam is None:
# # defunct case
# raise ValidationError("""Parameters for -D correction missing. Provide a func or a dashparam kwarg.""")
# else:
# # case where all param read from dashparam dict (which must have all correct keys)
# func = 'custom'
# dashcoeff[dashlvl][func] = {}
# dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems())
# for key in dashcoeff[dashlvl]['b3lyp'].keys():
# if key in dashparam.keys():
# dashcoeff[dashlvl][func][key] = dashparam[key]
# else:
# raise ValidationError("""Parameter %s is missing from dashparam dict %s.""" % (key, dashparam))
# else:
# func = func.lower()
# if func not in dashcoeff[dashlvl].keys():
# raise ValidationError("""Functional %s is not available for -D level %s.""" % (func, dashlvl))
# if dashparam is None:
# # (normal) case where all param taken from dashcoeff above
# pass
# else:
# # case where items in dashparam dict can override param taken from dashcoeff above
# dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems())
# for key in dashcoeff[dashlvl]['b3lyp'].keys():
# if key in dashparam.keys():
# dashcoeff[dashlvl][func][key] = dashparam[key]
# TODO temp until figure out paramfile
allowed_funcs = ['HF/MINIS', 'DFT/MINIS', 'HF/MINIX', 'DFT/MINIX',
'HF/SV', 'DFT/SV', 'HF/def2-SV(P)', 'DFT/def2-SV(P)', 'HF/def2-SVP',
'DFT/def2-SVP', 'HF/DZP', 'DFT/DZP', 'HF/def-TZVP', 'DFT/def-TZVP',
'HF/def2-TZVP', 'DFT/def2-TZVP', 'HF/631Gd', 'DFT/631Gd',
'HF/def2-TZVP', 'DFT/def2-TZVP', 'HF/cc-pVDZ', 'DFT/cc-pVDZ',
'HF/aug-cc-pVDZ', 'DFT/aug-cc-pVDZ', 'DFT/SV(P/h,c)', 'DFT/LANL',
'DFT/pobTZVP', 'TPSS/def2-SVP', 'PW6B95/def2-SVP',
# specials
'hf3c', 'pbeh3c']
allowed_funcs = [f.lower() for f in allowed_funcs]
if func.lower() not in allowed_funcs:
raise Dftd3Error("""bad gCP func: %s. need one of: %r""" % (func, allowed_funcs))
# Move ~/.dftd3par.<hostname> out of the way so it won't interfere
defaultfile = os.path.expanduser('~') + '/.dftd3par.' + socket.gethostname()
defmoved = False
if os.path.isfile(defaultfile):
os.rename(defaultfile, defaultfile + '_hide')
defmoved = True
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Find out if running from Psi4 for scratch details and such
try:
import psi4
except ImportError as err:
isP4regime = False
else:
isP4regime = True
# Setup unique scratch directory and move in
current_directory = os.getcwd()
if isP4regime:
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path())
gcp_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.gcp.' + str(uuid.uuid4())[:8]
else:
gcp_tmpdir = os.environ['HOME'] + os.sep + 'gcp_' + str(uuid.uuid4())[:8]
if os.path.exists(gcp_tmpdir) is False:
os.mkdir(gcp_tmpdir)
os.chdir(gcp_tmpdir)
# Write gcp_parameters file that governs cp correction
# paramcontents = gcp_server(func, dashlvl, 'dftd3')
# paramfile1 = 'dftd3_parameters' # older patched name
# with open(paramfile1, 'w') as handle:
# handle.write(paramcontents)
# paramfile2 = '.gcppar'
# with open(paramfile2, 'w') as handle:
# handle.write(paramcontents)
###Two kinds of parameter files can be read in: A short and an extended version. Both are read from
###$HOME/.gcppar.$HOSTNAME by default. If the option -local is specified the file is read in from
###the current working directory: .gcppar
###The short version reads in: basis-keywo
# Write dftd3_geometry file that supplies geometry to dispersion calc
numAtoms = self.natom()
geom = self.save_string_xyz()
reals = []
for line in geom.splitlines():
lline = line.split()
if len(lline) != 4:
continue
if lline[0] == 'Gh':
numAtoms -= 1
else:
reals.append(line)
geomtext = str(numAtoms) + '\n\n'
for line in reals:
geomtext += line.strip() + '\n'
geomfile = './gcp_geometry.xyz'
with open(geomfile, 'w') as handle:
handle.write(geomtext)
# TODO somehow the variations on save_string_xyz and
# whether natom and chgmult does or doesn't get written
# have gotten all tangled. I fear this doesn't work
# the same btwn libmints and qcdb or for ghosts
# Call gcp program
command = ['gcp', geomfile]
command.extend(['-level', func])
if dertype != 0:
command.append('-grad')
try:
#print('command', command)
dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
raise ValidationError('Program gcp not found in path. %s' % e)
out, err = dashout.communicate()
# Parse output
success = False
for line in out.splitlines():
line = line.decode('utf-8')
if re.match(' Egcp:', line):
sline = line.split()
dashd = float(sline[1])
if re.match(' normal termination of gCP', line):
success = True
if not success:
os.chdir(current_directory)
raise Dftd3Error("""Unsuccessful gCP run.""")
# Parse grad output
if dertype != 0:
derivfile = './gcp_gradient'
dfile = open(derivfile, 'r')
dashdderiv = []
for line in geom.splitlines():
lline = line.split()
if len(lline) != 4:
continue
if lline[0] == 'Gh':
dashdderiv.append([0.0, 0.0, 0.0])
else:
dashdderiv.append([float(x.replace('D', 'E')) for x in dfile.readline().split()])
dfile.close()
if len(dashdderiv) != self.natom():
raise ValidationError('Program gcp gradient file has %d atoms- %d expected.' % \
(len(dashdderiv), self.natom()))
# Prepare results for Psi4
if isP4regime and dertype != 0:
core.set_variable('GCP CORRECTION ENERGY', dashd)
psi_dashdderiv = core.Matrix(self.natom(), 3)
psi_dashdderiv.set(dashdderiv)
# Print program output to file if verbose
if not verbose and isP4regime:
verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False
if verbose:
text = '\n ==> GCP Output <==\n'
text += out.decode('utf-8')
if dertype != 0:
with open(derivfile, 'r') as handle:
text += handle.read().replace('D', 'E')
text += '\n'
if isP4regime:
core.print_out(text)
else:
print(text)
# # Clean up files and remove scratch directory
# os.unlink(paramfile1)
# os.unlink(paramfile2)
# os.unlink(geomfile)
# if dertype != 0:
# os.unlink(derivfile)
# if defmoved is True:
# os.rename(defaultfile + '_hide', defaultfile)
os.chdir('..')
# try:
# shutil.rmtree(dftd3_tmpdir)
# except OSError as e:
# ValidationError('Unable to remove dftd3 temporary directory %s' % e)
os.chdir(current_directory)
# return -D & d(-D)/dx
if dertype == -1:
return dashd, dashdderiv
elif dertype == 0:
return dashd
elif dertype == 1:
return psi_dashdderiv
try:
# Attach method to libmints psi4.Molecule class
core.Molecule.run_gcp = run_gcp
except (NameError, AttributeError):
# But don't worry if that doesn't work b/c
# it'll get attached to qcdb.Molecule class
pass
| kratman/psi4public | psi4/driver/qcdb/interface_gcp.py | Python | gpl-2.0 | 11,924 | [
"Psi4"
] | 7fa7e70ebf3837b5b11c87b5882c33d5f343a15af15db6c8c1bd04de82461f08 |
from __future__ import annotations
from math import sqrt
import pytest
from scitbx import matrix
class Model:
def __init__(self, test_nave_model=False):
# Set up experimental models with regular geometry
from dxtbx.model import BeamFactory, DetectorFactory, GoniometerFactory
# Beam along the Z axis
self.beam = BeamFactory.make_beam(unit_s0=matrix.col((0, 0, 1)), wavelength=1.0)
# Goniometer (used only for index generation) along X axis
self.goniometer = GoniometerFactory.known_axis(matrix.col((1, 0, 0)))
# Detector fast, slow along X, -Y; beam in the centre, 200 mm distance
dir1 = matrix.col((1, 0, 0))
dir2 = matrix.col((0, -1, 0))
centre = matrix.col((0, 0, 200))
npx_fast = npx_slow = 1000
pix_size = 0.2
origin = centre - (
0.5 * npx_fast * pix_size * dir1 + 0.5 * npx_slow * pix_size * dir2
)
self.detector = DetectorFactory.make_detector(
"PAD",
dir1,
dir2,
origin,
(pix_size, pix_size),
(npx_fast, npx_slow),
(0, 1.0e6),
)
# Cubic 100 A^3 crystal
a = matrix.col((100, 0, 0))
b = matrix.col((0, 100, 0))
c = matrix.col((0, 0, 100))
if test_nave_model:
from dxtbx.model import MosaicCrystalSauter2014
self.crystal = MosaicCrystalSauter2014(a, b, c, space_group_symbol="P 1")
self.crystal.set_half_mosaicity_deg(500)
self.crystal.set_domain_size_ang(0.2)
else:
from dxtbx.model import Crystal
self.crystal = Crystal(a, b, c, space_group_symbol="P 1")
# Collect these models in an Experiment (ignoring the goniometer)
from dxtbx.model.experiment_list import Experiment
self.experiment = Experiment(
beam=self.beam,
detector=self.detector,
goniometer=None,
scan=None,
crystal=self.crystal,
imageset=None,
)
# Generate some reflections
self.reflections = self.generate_reflections()
def generate_reflections(self):
"""Use reeke_model to generate indices of reflections near to the Ewald
sphere that might be observed on a still image. Build a reflection_table
of these."""
from cctbx.sgtbx import space_group_info
space_group_type = space_group_info("P 1").group().type()
# create a ReekeIndexGenerator
UB = self.crystal.get_A()
axis = self.goniometer.get_rotation_axis()
s0 = self.beam.get_s0()
# use the same UB at the beginning and end - the margin parameter ensures
# we still have indices close to the Ewald sphere generated
from dials.algorithms.spot_prediction import ReekeIndexGenerator
r = ReekeIndexGenerator(UB, UB, space_group_type, axis, s0, dmin=1.5, margin=1)
# generate indices
hkl = r.to_array()
nref = len(hkl)
# create a reflection table
from dials.array_family import flex
table = flex.reflection_table()
table["flags"] = flex.size_t(nref, 0)
table["id"] = flex.int(nref, 0)
table["panel"] = flex.size_t(nref, 0)
table["miller_index"] = flex.miller_index(hkl)
table["entering"] = flex.bool(nref, True)
table["s1"] = flex.vec3_double(nref)
table["xyzcal.mm"] = flex.vec3_double(nref)
table["xyzcal.px"] = flex.vec3_double(nref)
return table
@pytest.mark.parametrize("nave_model", [True, False], ids=["nave", "native"])
def test(nave_model):
model = Model(test_nave_model=nave_model)
# cache objects from the model
UB = matrix.sqr(model.crystal.get_A())
s0 = matrix.col(model.beam.get_s0())
es_radius = s0.length()
# create the predictor and predict for reflection table
from dials.algorithms.spot_prediction import StillsReflectionPredictor
predictor = StillsReflectionPredictor(model.experiment)
predictor.for_reflection_table(model.reflections, UB)
# for every reflection, reconstruct relp rotated to the Ewald sphere (vector
# r) and unrotated relp (vector q), calculate the angle between them and
# compare with delpsical.rad
from libtbx.test_utils import approx_equal
for ref in model.reflections.rows():
r = matrix.col(ref["s1"]) - s0
q = UB * matrix.col(ref["miller_index"])
tst_radius = (s0 + q).length()
sgn = -1 if tst_radius > es_radius else 1
delpsi = sgn * r.accute_angle(q)
assert approx_equal(delpsi, ref["delpsical.rad"])
def test_spherical_relps():
model = Model()
# cache objects from the model
UB = matrix.sqr(model.crystal.get_A())
s0 = matrix.col(model.beam.get_s0())
es_radius = s0.length()
# create the predictor and predict for reflection table
from dials.algorithms.spot_prediction import StillsReflectionPredictor
predictor = StillsReflectionPredictor(model.experiment, spherical_relp=True)
predictor.for_reflection_table(model.reflections, UB)
# for every reflection, reconstruct relp centre q, calculate s1 according
# to the formula in stills_prediction_nave3.pdf and compare
from libtbx.test_utils import approx_equal
for ref in model.reflections.rows():
q = UB * matrix.col(ref["miller_index"])
radicand = q.length_sq() + 2.0 * q.dot(s0) + s0.length_sq()
assert radicand > 0.0
denom = sqrt(radicand)
s1 = es_radius * (q + s0) / denom
assert approx_equal(s1, ref["s1"])
| dials/dials | tests/algorithms/spot_prediction/test_stills_reflection_predictor.py | Python | bsd-3-clause | 5,664 | [
"CRYSTAL"
] | 1ba384583c6572eab4c61cf3cf41ded650d9daab4b792c35bc6fee9747ae5f3e |
import numpy as np
import scipy.signal
import torch
from math import ceil
from torch import nn as nn
# from .module import Module
from torch.nn import Parameter
from torch.nn import functional as F
from torch.nn.init import xavier_normal
from torch.nn.modules.utils import _pair
from torch.utils.data import DataLoader
from tqdm import tqdm
from .constraints import positive
from .utils.hermite import hermite_2d, rotate_weights_hermite
def elu1(x):
return F.elu(x, inplace=True) + 1.
class Elu1(nn.Module):
"""
Elu activation function shifted by 1 to ensure that the
output stays positive. That is:
Elu1(x) = Elu(x) + 1
"""
def forward(self, x):
return elu1(x)
def log1exp(x):
return torch.log(1. + torch.exp(x))
class Log1Exp(nn.Module):
def forward(self, x):
return log1exp(x)
class AdjustedElu(nn.Module):
"""
Elu activation function that's adjusted to:
1) ensure that all outputs are positive and
2) f(x) = x for x >= 1
"""
def forward(self, x):
return F.elu(x - 1.) + 1.
class WidthXHeightXFeatureLinear(nn.Module):
"""
Factorized fully connected layer. Weights are a sum of outer products between three vectors over width,
height and spatial.
"""
def __init__(self, in_shape, outdims, components=1, bias=True, normalize=True, positive=False, width=None,
height=None, eps=1e-6):
super().__init__()
self.in_shape = in_shape
self.eps = eps
c, w, h = self.in_shape
self.outdims = outdims
self.normalize = normalize
self.positive = positive
self.components = components
self.width = Parameter(torch.Tensor(self.outdims, 1, w, 1, components)) if width is None else width
self.height = Parameter(torch.Tensor(self.outdims, 1, 1, h, components)) if height is None else height
self.features = Parameter(torch.Tensor(self.outdims, c, 1, 1))
assert self.width.size(4) == self.height.size(4), 'The number of components in width and height do not agree'
self.components = self.width.size(4)
if bias:
bias = Parameter(torch.Tensor(self.outdims))
self.register_parameter('bias', bias)
else:
self.register_parameter('bias', None)
self.initialize()
def initialize(self, init_noise=1e-3):
self.width.data.normal_(0, init_noise)
self.height.data.normal_(0, init_noise)
self.features.data.normal_(0, init_noise)
if self.bias is not None:
self.bias.data.fill_(0)
@property
def normalized_width(self):
if self.positive:
positive(self.width)
if self.normalize:
return self.width / (self.width.pow(2).sum(2, keepdim=True) + self.eps).sqrt().expand_as(self.width)
else:
return self.width
@property
def normalized_height(self):
c, w, h = self.in_shape
if self.positive:
positive(self.height)
if self.normalize:
return self.height / (self.height.pow(2).sum(3, keepdim=True) + self.eps).sqrt().expand_as(self.height)
else:
return self.height
@property
def spatial(self):
c, w, h = self.in_shape
n, comp = self.outdims, self.components
weight = self.normalized_width.expand(n, 1, w, h, comp) \
* self.normalized_height.expand(n, 1, w, h, comp)
weight = weight.sum(4, keepdim=True).view(n, 1, w, h)
return weight
@property
def weight(self):
c, w, h = self.in_shape
n, comp = self.outdims, self.components
weight = self.spatial.expand(n, c, w, h) * self.features.expand(n, c, w, h)
weight = weight.view(self.outdims, -1)
return weight
@property
def basis(self):
c, w, h = self.in_shape
return self.weight.view(-1, c, w, h).data.cpu().numpy()
def forward(self, x):
N = x.size(0)
y = x.view(N, -1) @ self.weight.t()
if self.bias is not None:
y = y + self.bias.expand_as(y)
return y
def __repr__(self):
return ('spatial positive ' if self.positive else '') + \
('normalized ' if self.normalize else '') + \
self.__class__.__name__ + \
' (' + '{} x {} x {}'.format(*self.in_shape) + ' -> ' + str(self.outdims) + ') spatial rank {}'.format(
self.components)
class SpatialXFeatureLinear(nn.Module):
"""
Factorized fully connected layer. Weights are a sum of outer products between a spatial filter and a feature vector.
"""
def __init__(self, in_shape, outdims, bias=True, normalize=True, positive=True, spatial=None):
super().__init__()
self.in_shape = in_shape
self.outdims = outdims
self.normalize = normalize
self.positive = positive
c, w, h = in_shape
self.spatial = Parameter(torch.Tensor(self.outdims, 1, w, h)) if spatial is None else spatial
self.features = Parameter(torch.Tensor(self.outdims, c, 1, 1))
if bias:
bias = Parameter(torch.Tensor(self.outdims))
self.register_parameter('bias', bias)
else:
self.register_parameter('bias', None)
self.initialize()
@property
def normalized_spatial(self):
positive(self.spatial)
if self.normalize:
weight = self.spatial / (
self.spatial.pow(2).sum(2, keepdim=True).sum(3, keepdim=True).sqrt().expand_as(self.spatial) + 1e-6)
else:
weight = self.spatial
return weight
@property
def weight(self):
if self.positive:
positive(self.features)
n = self.outdims
c, w, h = self.in_shape
weight = self.normalized_spatial.expand(n, c, w, h) * self.features.expand(n, c, w, h)
weight = weight.view(self.outdims, -1)
return weight
def l1(self, average=True):
n = self.outdims
c, w, h = self.in_shape
ret = (self.normalized_spatial.view(self.outdims, -1).abs().sum(1, keepdim=True)
* self.features.view(self.outdims, -1).abs().sum(1)).sum()
if average:
ret = ret / (n * c * w * h)
return ret
def initialize(self, init_noise=1e-3):
self.spatial.data.normal_(0, init_noise)
self.features.data.normal_(0, init_noise)
if self.bias is not None:
self.bias.data.fill_(0)
def forward(self, x, shift=None):
N = x.size(0)
y = x.view(N, -1) @ self.weight.t()
if self.bias is not None:
y = y + self.bias.expand_as(y)
return y
def __repr__(self):
return ('spatial positive ' if self.positive else '') + \
('normalized ' if self.normalize else '') + \
self.__class__.__name__ + \
' (' + '{} x {} x {}'.format(*self.in_shape) + ' -> ' + str(
self.outdims) + ')'
class SpatialTransformerPyramid2d(nn.Module):
def __init__(self, in_shape, outdims, scale_n=4, positive=False, bias=True,
init_range=.1, downsample=True, _skip_upsampling=False, type=None):
super().__init__()
self.in_shape = in_shape
c, w, h = in_shape
self.outdims = outdims
self.positive = positive
self.gauss_pyramid = Pyramid(scale_n=scale_n, downsample=downsample, _skip_upsampling=_skip_upsampling, type=type)
self.grid = Parameter(torch.Tensor(1, outdims, 1, 2))
self.features = Parameter(torch.Tensor(1, c * (scale_n + 1), 1, outdims))
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter('bias', bias)
else:
self.register_parameter('bias', None)
self.init_range = init_range
self.initialize()
def initialize(self):
self.grid.data.uniform_(-self.init_range, self.init_range)
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
def group_sparsity(self, group_size):
f = self.features.size(1)
n = f // group_size
ret = 0
for chunk in range(0, f, group_size):
ret = ret + (self.features[:, chunk:chunk + group_size, ...].pow(2).mean(1) + 1e-12).sqrt().mean() / n
return ret
def feature_l1(self, average=True):
if average:
return self.features.abs().mean()
else:
return self.features.abs().sum()
def neuron_layer_power(self, x, neuron_id):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, w, h = x.size()
m = self.gauss_pyramid.scale_n + 1
feat = self.features.view(1, m * c, self.outdims)
y = torch.cat(self.gauss_pyramid(x), dim=1)
y = (y * feat[:, :, neuron_id, None, None]).sum(1)
return y.pow(2).mean()
def forward(self, x, shift=None):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, w, h = x.size()
m = self.gauss_pyramid.scale_n + 1
feat = self.features.view(1, m * c, self.outdims)
if shift is None:
grid = self.grid.expand(N, self.outdims, 1, 2)
else:
grid = self.grid.expand(N, self.outdims, 1, 2) + shift[:, None, None, :]
pools = [F.grid_sample(xx, grid) for xx in self.gauss_pyramid(x)]
y = torch.cat(pools, dim=1).squeeze(-1)
y = (y * feat).sum(1).view(N, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
def __repr__(self):
c, w, h = self.in_shape
r = self.__class__.__name__ + \
' (' + '{} x {} x {}'.format(c, w, h) + ' -> ' + str(self.outdims) + ')'
if self.bias is not None:
r += ' with bias'
for ch in self.children():
r += ' -> ' + ch.__repr__() + '\n'
return r
class FactorizedSpatialTransformerPyramid2d(SpatialTransformerPyramid2d):
def __init__(self, in_shape, outdims, scale_n=4, positive=False, bias=True,
init_range=.1, downsample=True, type=None):
super(SpatialTransformerPyramid2d, self).__init__()
self.in_shape = in_shape
c, w, h = in_shape
self.outdims = outdims
self.positive = positive
self.gauss_pyramid = Pyramid(scale_n=scale_n, downsample=downsample, type=type)
self.grid = Parameter(torch.Tensor(1, outdims, 1, 2))
self.feature_scales = Parameter(torch.Tensor(1, scale_n + 1, 1, outdims))
self.feature_channels = Parameter(torch.Tensor(1, 1, c, outdims))
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter('bias', bias)
else:
self.register_parameter('bias', None)
self.init_range = init_range
self.initialize()
@property
def features(self):
return (self.feature_scales * self.feature_channels).view(1, -1, 1, self.outdims)
def scale_l1(self, average=True):
if average:
return self.feature_scales.abs().mean()
else:
return self.feature_scales.abs().sum()
def channel_l1(self, average=True):
if average:
return self.feature_channels.abs().mean()
else:
return self.feature_channels.abs().sum()
def initialize(self):
self.grid.data.uniform_(-self.init_range, self.init_range)
self.feature_scales.data.fill_(1 / np.sqrt(self.in_shape[0]))
self.feature_channels.data.fill_(1 / np.sqrt(self.in_shape[0]))
if self.bias is not None:
self.bias.data.fill_(0)
class SpatialTransformerPooled2d(nn.Module):
def __init__(self, in_shape, outdims, pool_steps=1, positive=False, bias=True,
pool_kern=2, init_range=.1):
super().__init__()
self._pool_steps = pool_steps
self.in_shape = in_shape
c, w, h = in_shape
self.outdims = outdims
self.positive = positive
self.grid = Parameter(torch.Tensor(1, outdims, 1, 2))
self.features = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, outdims))
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter('bias', bias)
else:
self.register_parameter('bias', None)
self.pool_kern = pool_kern
self.avg = nn.AvgPool2d((pool_kern, pool_kern), stride=pool_kern, count_include_pad=False)
self.init_range = init_range
self.initialize()
@property
def pool_steps(self):
return self._pool_steps
@pool_steps.setter
def pool_steps(self, value):
assert value >= 0 and int(value) - value == 0, 'new pool steps must be a non-negative integer'
if value != self._pool_steps:
print('Resizing readout features')
c, w, h = self.in_shape
self._pool_steps = int(value)
self.features = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, self.outdims))
self.features.data.fill_(1 / self.in_shape[0])
def initialize(self):
self.grid.data.uniform_(-self.init_range, self.init_range)
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
def feature_l1(self, average=True):
if average:
return self.features.abs().mean()
else:
return self.features.abs().sum()
def group_sparsity(self, group_size):
f = self.features.size(1)
n = f // group_size
ret = 0
for chunk in range(0, f, group_size):
ret = ret + (self.features[:, chunk:chunk + group_size, ...].pow(2).mean(1) + 1e-12).sqrt().mean() / n
return ret
def forward(self, x, shift=None, out_idx=None):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, w, h = x.size()
m = self.pool_steps + 1
feat = self.features.view(1, m * c, self.outdims)
if out_idx is None:
grid = self.grid
bias = self.bias
outdims = self.outdims
else:
feat = feat[:, :, out_idx]
grid = self.grid[:, out_idx]
if self.bias is not None:
bias = self.bias[out_idx]
outdims = len(out_idx)
if shift is None:
grid = grid.expand(N, outdims, 1, 2)
else:
grid = grid.expand(N, outdims, 1, 2) + shift[:, None, None, :]
pools = [F.grid_sample(x, grid)]
for _ in range(self.pool_steps):
x = self.avg(x)
pools.append(F.grid_sample(x, grid))
y = torch.cat(pools, dim=1)
y = (y.squeeze(-1) * feat).sum(1).view(N, outdims)
if self.bias is not None:
y = y + bias
return y
def __repr__(self):
c, w, h = self.in_shape
r = self.__class__.__name__ + \
' (' + '{} x {} x {}'.format(c, w, h) + ' -> ' + str(self.outdims) + ')'
if self.bias is not None:
r += ' with bias'
r += ' and pooling for {} steps\n'.format(self.pool_steps)
for ch in self.children():
r += ' -> ' + ch.__repr__() + '\n'
return r
class SpatialXFeatureLinear3d(nn.Module):
def __init__(self, in_shape, outdims, bias=True, normalize=False, positive=True, spatial=None):
super().__init__()
self.in_shape = in_shape
self.outdims = outdims
self.normalize = normalize
self.positive = positive
c, t, w, h = in_shape
self.spatial = Parameter(torch.Tensor(self.outdims, 1, 1, w, h)) if spatial is None else spatial
self.features = Parameter(torch.Tensor(self.outdims, c, 1, 1, 1))
if bias:
bias = Parameter(torch.Tensor(self.outdims))
self.register_parameter('bias', bias)
else:
self.register_parameter('bias', None)
self.initialize()
def l1(self, average=True):
n = self.outdims
c, _, w, h = self.in_shape
ret = (self.spatial.view(self.outdims, -1).abs().sum(1, keepdim=True)
* self.features.view(self.outdims, -1).abs().sum(1, keepdim=True)).sum()
if average:
ret = ret / (n * c * w * h)
return ret
@property
def normalized_spatial(self):
if self.positive:
positive(self.spatial)
if self.normalize:
weight = self.spatial / (
self.spatial.pow(2).sum(2, keepdim=True).sum(3, keepdim=True).sum(4, keepdim=True).sqrt().expand(
self.spatial) + 1e-6)
else:
weight = self.spatial
return weight
@property
def constrained_features(self):
if self.positive:
positive(self.features)
return self.features
@property
def weight(self):
n = self.outdims
c, _, w, h = self.in_shape
weight = self.normalized_spatial.expand(n, c, 1, w, h) * self.constrained_features.expand(n, c, 1, w, h)
return weight
def initialize(self, init_noise=1e-3):
self.spatial.data.normal_(0, init_noise)
self.features.data.normal_(0, init_noise)
if self.bias is not None:
self.bias.data.fill_(0)
def forward(self, x):
N, c, t, w, h = x.size()
# tmp2 = x.transpose(2, 1).contiguous()
# tmp2 = tmp2.view(-1, w * h) @ self.normalized_spatial.view(self.outdims, -1).t()
# tmp2 = (tmp2.view(N*t,c,self.outdims) \
# * self.constrained_features.transpose(0,1).contiguous().view(c, self.outdims).expand(N* t, c, self.outdims)).sum(1)
tmp = x.transpose(2, 1).contiguous().view(-1, c * w * h) @ self.weight.view(self.outdims, -1).t()
if self.bias is not None:
tmp = tmp + self.bias.expand_as(tmp)
# tmp2 = tmp2 + self.bias.expand_as(tmp2)
return tmp.view(N, t, self.outdims)
# return tmp2.view(N, t, self.outdims)
def __repr__(self):
c, t, w, h = self.in_shape
return ('positive ' if self.positive else '') + \
('spatially normalized ' if self.normalize else '') + \
self.__class__.__name__ + \
' (' + '{} x {} x {}'.format(c, w, h) + ' -> ' + str(self.outdims) + ')'
class SpatialTransformerPyramid3d(nn.Module):
def __init__(self, in_shape, outdims, scale_n=4, positive=True, bias=True, init_range=.05, downsample=True,
type=None):
super().__init__()
self.in_shape = in_shape
c, _, w, h = in_shape
self.outdims = outdims
self.positive = positive
self.gauss = Pyramid(scale_n=scale_n, downsample=downsample, type=type)
self.grid = Parameter(torch.Tensor(1, outdims, 1, 2))
self.features = Parameter(torch.Tensor(1, c * (scale_n + 1), 1, outdims))
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter('bias', bias)
else:
self.register_parameter('bias', None)
self.init_range = init_range
self.initialize()
def initialize(self):
self.grid.data.uniform_(-self.init_range, self.init_range)
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
def feature_l1(self, average=True, subs_idx=None):
if subs_idx is not None: raise NotImplemented('Subsample is not implemented.')
if average:
return self.features.abs().mean()
else:
return self.features.abs().sum()
def forward(self, x, shift=None, subs_idx=None):
if subs_idx is not None: raise NotImplemented('Subsample is not implemented.')
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, t, w, h = x.size()
m = self.gauss.scale_n + 1
feat = self.features.view(1, m * c, self.outdims)
if shift is None:
grid = self.grid.expand(N * t, self.outdims, 1, 2)
else:
grid = self.grid.expand(N, self.outdims, 1, 2)
grid = torch.stack([grid + shift[:, i, :][:, None, None, :] for i in range(t)], 1)
grid = grid.contiguous().view(-1, self.outdims, 1, 2)
z = x.contiguous().transpose(2, 1).contiguous().view(-1, c, w, h)
pools = [F.grid_sample(x, grid) for x in self.gauss(z)]
y = torch.cat(pools, dim=1).squeeze(-1)
y = (y * feat).sum(1).view(N, t, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
def __repr__(self):
c, t, w, h = self.in_shape
r = self.__class__.__name__ + \
' (' + '{} x {} x {}'.format(c, w, h) + ' -> ' + str(self.outdims) + ')'
if self.bias is not None:
r += ' with bias'
for ch in self.children():
r += '\n -> ' + ch.__repr__()
return r
class SpatialTransformerPooled3d(nn.Module):
def __init__(self, in_shape, outdims, pool_steps=1, positive=False, bias=True,
init_range=.05, kernel_size=2, stride=2, grid=None, stop_grad=False):
super().__init__()
self._pool_steps = pool_steps
self.in_shape = in_shape
c, t, w, h = in_shape
self.outdims = outdims
self.positive = positive
if grid is None:
self.grid = Parameter(torch.Tensor(1, outdims, 1, 2))
else:
self.grid = grid
self.features = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, outdims))
self.register_buffer('mask', torch.ones_like(self.features))
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter('bias', bias)
else:
self.register_parameter('bias', None)
self.avg = nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=False)
self.init_range = init_range
self.initialize()
self.stop_grad = stop_grad
@property
def pool_steps(self):
return self._pool_steps
@pool_steps.setter
def pool_steps(self, value):
assert value >= 0 and int(value) - value == 0, 'new pool steps must be a non-negative integer'
if value != self._pool_steps:
print('Resizing readout features')
c, t, w, h = self.in_shape
outdims = self.outdims
self._pool_steps = int(value)
self.features = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, outdims))
self.mask = torch.ones_like(self.features)
self.features.data.fill_(1 / self.in_shape[0])
def initialize(self, init_noise=1e-3, grid=True):
# randomly pick centers within the spatial map
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
if grid:
self.grid.data.uniform_(-self.init_range, self.init_range)
def feature_l1(self, average=True, subs_idx=None):
subs_idx = subs_idx if subs_idx is not None else slice(None)
if average:
return self.features[..., subs_idx].abs().mean()
else:
return self.features[..., subs_idx].abs().sum()
def reset_fisher_prune_scores(self):
self._prune_n = 0
self._prune_scores = self.features.detach() * 0
def update_fisher_prune_scores(self):
self._prune_n += 1
if self.features.grad is None:
raise ValueError('You need to run backward first')
self._prune_scores += (0.5 * self.features.grad.pow(2) * self.features.pow(2)).detach()
@property
def fisher_prune_scores(self):
return self._prune_scores / self._prune_n
def prune(self):
idx = (self.fisher_prune_scores + 1e6 * (1 - self.mask)).squeeze().argmin(dim=0)
nt = idx.new
seq = nt(np.arange(len(idx)))
self.mask[:, idx, :, seq] = 0
self.features.data[:, idx, :, seq] = 0
def forward(self, x, shift=None, subs_idx=None):
if self.stop_grad:
x = x.detach()
self.features.data *= self.mask
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, t, w, h = x.size()
m = self._pool_steps + 1
if subs_idx is not None:
feat = self.features[..., subs_idx].contiguous()
outdims = feat.size(-1)
feat = feat.view(1, m * c, outdims)
grid = self.grid[:, subs_idx, ...]
else:
grid = self.grid
feat = self.features.view(1, m * c, self.outdims)
outdims = self.outdims
if shift is None:
grid = grid.expand(N * t, outdims, 1, 2)
else:
grid = grid.expand(N, outdims, 1, 2)
grid = torch.stack([grid + shift[:, i, :][:, None, None, :] for i in range(t)], 1)
grid = grid.contiguous().view(-1, outdims, 1, 2)
z = x.contiguous().transpose(2, 1).contiguous().view(-1, c, w, h)
pools = [F.grid_sample(z, grid)]
for i in range(self._pool_steps):
z = self.avg(z)
pools.append(F.grid_sample(z, grid))
y = torch.cat(pools, dim=1)
y = (y.squeeze(-1) * feat).sum(1).view(N, t, outdims)
if self.bias is not None:
if subs_idx is None:
y = y + self.bias
else:
y = y + self.bias[subs_idx]
return y
def __repr__(self):
c, _, w, h = self.in_shape
r = self.__class__.__name__ + \
' (' + '{} x {} x {}'.format(c, w, h) + ' -> ' + str(self.outdims) + ')'
if self.bias is not None:
r += ' with bias'
if self.stop_grad:
r += ', stop_grad=True'
r += '\n'
for ch in self.children():
r += ' -> ' + ch.__repr__() + '\n'
return r
class FactorizedSpatialTransformerPooled3d(SpatialTransformerPooled3d):
def __init__(self, *args, components=25, **kwargs):
super().__init__(*args, **kwargs)
self.components = components
c, t, w, h = self.in_shape
outdims = self.outdims
self.feature_scales = Parameter(torch.Tensor(1, 1, 1, outdims, components))
self.feature_channels = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, 1, components))
self.init_features()
def init_features(self):
self.feature_channels.data.fill_(1 / self.in_shape[0])
self.feature_scales.data.fill_(1 / self.in_shape[0])
@property
def features(self):
return (self.feature_scales * self.feature_channels).sum(-1)
@property
def pool_steps(self):
return self._pool_steps
@pool_steps.setter
def pool_steps(self, value):
assert value >= 0 and int(value) - value == 0, 'new pool steps must be a non-negative integer'
if value != self._pool_steps:
print('Resizing readout features')
c, t, w, h = self.in_shape
outdims = self.outdims
self._pool_steps = int(value)
self.feature_channels = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, outdims, self.components))
self.mask = torch.ones_like(self.features)
self.init_features()
class SpatialTransformerXPooled3d(nn.Module):
def __init__(self, in_shape, outdims, pool_steps=1, positive=False, bias=True,
init_range=.2, grid_points=10, kernel_size=4, stride=4, grid=None,
stop_grad=False):
super().__init__()
self._pool_steps = pool_steps
self.in_shape = in_shape
c, t, w, h = in_shape
self.outdims = outdims
self.positive = positive
self._grid_points = grid_points
if grid is None:
self.grid = Parameter(torch.Tensor(1, outdims, grid_points, 2))
else:
self.grid = grid
self.features = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, outdims))
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter('bias', bias)
else:
self.register_parameter('bias', None)
self.avg = nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=False)
self.init_range = init_range
self.initialize()
self.stop_grad = stop_grad
@property
def pool_steps(self):
return self._pool_steps
@pool_steps.setter
def pool_steps(self, value):
assert value >= 0 and int(value) - value == 0, 'new pool steps must be a non-negative integer'
if value != self._pool_steps:
print('Resizing readout features')
c, t, w, h = self.in_shape
outdims = self.outdims
self._pool_steps = int(value)
self.features = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, outdims))
self.features.data.fill_(1 / self.in_shape[0])
def initialize(self, init_noise=1e-3, grid=True):
# randomly pick centers within the spatial map
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
if grid:
self.grid.data.uniform_(-self.init_range, self.init_range)
def feature_l1(self, average=True, subs_idx=None):
subs_idx = subs_idx if subs_idx is not None else slice(None)
if average:
return self.features[..., subs_idx].abs().mean()
else:
return self.features[..., subs_idx].abs().sum()
def dgrid_l2(self, average=True, subs_idx=None):
subs_idx = subs_idx if subs_idx is not None else slice(None)
if average:
return (self.grid[:, subs_idx, :-1, :] - self.grid[:, subs_idx, 1:, :]).pow(2).mean()
else:
return (self.grid[:, subs_idx, :-1, :] - self.grid[:, subs_idx, 1:, :]).pow(2).sum()
def forward(self, x, shift=None, subs_idx=None):
if self.stop_grad:
x = x.detach()
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, t, w, h = x.size()
m = self._pool_steps + 1
if subs_idx is not None:
feat = self.features[..., subs_idx].contiguous()
outdims = feat.size(-1)
feat = feat.view(1, m * c, outdims)
grid = self.grid[:, subs_idx, ...]
else:
grid = self.grid
feat = self.features.view(1, m * c, self.outdims)
outdims = self.outdims
if shift is None:
grid = grid.expand(N * t, outdims, self._grid_points, 2)
else:
grid = grid.expand(N, outdims, self._grid_points, 2)
grid = torch.stack([grid + shift[:, i, :][:, None, None, :] for i in range(t)], 1)
grid = grid.contiguous().view(-1, outdims, self._grid_points, 2)
z = x.contiguous().transpose(2, 1).contiguous().view(-1, c, w, h)
pools = [F.grid_sample(z, grid).mean(dim=3, keepdim=True)]
for i in range(self._pool_steps):
z = self.avg(z)
pools.append(F.grid_sample(z, grid).mean(dim=3, keepdim=True))
y = torch.cat(pools, dim=1)
y = (y.squeeze(-1) * feat).sum(1).view(N, t, outdims)
if self.bias is not None:
if subs_idx is None:
y = y + self.bias
else:
y = y + self.bias[subs_idx]
return y
def __repr__(self):
c, _, w, h = self.in_shape
r = self.__class__.__name__ + \
' (' + '{} x {} x {}'.format(c, w, h) + ' -> ' + str(self.outdims) + ')'
if self.bias is not None:
r += ' with bias'
if self.stop_grad:
r += ', stop_grad=True'
r += '\n'
for ch in self.children():
r += ' -> ' + ch.__repr__() + '\n'
return r
class ExtendedConv2d(nn.Conv2d):
"""
Extended 2D convolution module with fancier padding options.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, in_shape=None, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
if padding == 'SAME':
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1, "kernel must be odd sized"
if stride[0] == 1 and stride[1] == 1:
padding = (kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2
else:
assert in_shape is not None, 'Input shape must be provided for stride that is not 1'
h = in_shape[-2]
w = in_shape[-1]
padding = ceil((h * (stride[0] - 1) + kernel_size[0] - 1) / 2), \
ceil((w * (stride[1] - 1) + kernel_size[1] - 1) / 2)
super().__init__(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, groups=groups, bias=bias)
class DepthSeparableConv2d(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True):
super().__init__()
self.add_module('in_depth_conv', nn.Conv2d(in_channels, out_channels, 1, bias=bias))
self.add_module('spatial_conv', nn.Conv2d(out_channels, out_channels, kernel_size, stride=1, padding=padding,
dilation=dilation, bias=bias, groups=out_channels))
self.add_module('out_depth_conv', nn.Conv2d(out_channels, out_channels, 1, bias=bias))
class ConstrainedConv2d(ExtendedConv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, in_shape=None, groups=1, bias=True, constrain=None):
super().__init__(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, in_shape=in_shape, groups=groups, bias=bias)
self.constrain_fn = constrain
self.constrain_cache = None
def constrain(self):
if self.constrain_fn is not None:
self.constrain_cache = self.constrain_fn(self.weight, cache=self.constrain_cache)
def forward(self, *args, **kwargs):
self.constrain()
return super().forward(*args, **kwargs)
class ConstrainedConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, *args, constrain=None, **kwargs):
super().__init__(*args, **kwargs)
self.constrain_fn = constrain
self.constrain_cache = None
def constrain(self):
if self.constrain_fn is not None:
self.constrain_cache = self.constrain_fn(self.weight, cache=self.constrain_cache)
def forward(self, *args, **kwargs):
self.constrain()
return super().forward(*args, **kwargs)
def conv2d_config(in_shape, out_shape, kernel_size, stride=None):
"""
Given desired input and output tensor shapes and convolution kernel size,
returns configurations that can be used to construct an appropriate 2D
convolution operation satisfying the desired properties.
Args:
in_shape: shape of the input tensor. May be either [batch, channel, height, width]
or [channel, height, width]
out_shape: shape of the output tensor. May be either [batch, channel, height, width]
or [channel, height, width]
kernel_size: shape of the kernel. May be an integer or a pair tuple
stride: (OPTIONAL) desired stride to be used. If not provided, optimal stride size
will be computed and returned to minimize the necessary amount of padding
or stripping.
Returns:
A tuple (stride, padding, output_padding, padded_shape, conv_type, padding_type).
stride: optimial stride size to be used. If stride was passed in, no change is made.
padding: padding to be applied to each edge
output_padding: if operation is transpose convolution, supplies output_padding that's
necessary. Otherwise, this is None.
conv_type: the required type of convolution. It is either "NORMAL" or "TRANSPOSE"
padding_type: string to indicate the type of padding. Either "VALID" or "SAME".
"""
in_shape = np.array(in_shape[-3:])
out_shape = np.array(out_shape[-3:])
kern_shape = np.array(kernel_size)
# determine the kind of convolution to use
if np.all(in_shape[-2:] >= out_shape[-2:]):
conv_type = "NORMAL"
elif np.all(in_shape[-2:] <= out_shape[-2:]):
conv_type = "TRANSPOSE"
in_shape, out_shape = out_shape, in_shape
else:
raise ValueError('Input shape dimensions must be both >= OR <= the output shape dimensions')
if stride is None:
stride = np.ceil((in_shape[-2:] - kern_shape + 1) / (out_shape[-2:] - 1)).astype(np.int)
else:
stride = np.array(_pair(stride))
stride[stride <= 0] = 1
padding = (out_shape[-2:] - 1) * stride + kern_shape - in_shape[-2:]
if np.all(np.ceil(in_shape[-2:] / stride) == out_shape[-2:]):
padding_type = 'SAME'
else:
padding_type = 'VALID'
# get padded input shape
in_shape[-2:] = in_shape[-2:] + padding.astype(np.int)
padded_shape = tuple(in_shape.tolist())
if conv_type == "TRANSPOSE":
output_padding = tuple((padding % 2 != 0).astype(np.int).tolist())
else:
output_padding = None
padding = tuple(np.ceil(padding / 2).astype(np.int).tolist())
stride = tuple(stride.tolist())
return stride, padding, output_padding, \
padded_shape, conv_type, padding_type
def get_conv(in_shape, out_shape, kernel_size, stride=None, constrain=None, **kwargs):
"""
Given desired input and output tensor shapes and convolution kernel size,
returns a convolution operation satisfying the desired properties.
Args:
in_shape: shape of the input tensor. May be either [batch, channel, height, width]
or [channel, height, width]
out_shape: shape of the output tensor. May be either [batch, channel, height, width]
or [channel, height, width]
kernel_size: shape of the kernel. May be an integer or a pair tuple
stride: (OPTIONAL) desired stride to be used. If not provided, optimal stride size
will be computed and returned to minimize the necesssary amount of padding
or stripping.
constrain: (OPTIONAL) constrain function to be applied to the convolution filter weights
**kwargs: additional arguments that are passed into the underlying convolution operation
Returns:
A convolution module (either a nn.Conv2d subclass or nn.ConvTranspose2d subclass)
"""
in_channels, out_channels = in_shape[-3], out_shape[-3]
stride, padding, output_padding, padded_shape, conv_type, padding_type = conv2d_config(in_shape, out_shape,
kernel_size, stride)
if conv_type == "NORMAL":
return ConstrainedConv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
constrain=constrain, **kwargs)
else:
return ConstrainedConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
constrain=constrain, output_padding=output_padding, **kwargs)
class Pyramid(nn.Module):
_filter_dict = {
'gauss5x5': np.float32([
[0.003765, 0.015019, 0.023792, 0.015019, 0.003765],
[0.015019, 0.059912, 0.094907, 0.059912, 0.015019],
[0.023792, 0.094907, 0.150342, 0.094907, 0.023792],
[0.015019, 0.059912, 0.094907, 0.059912, 0.015019],
[0.003765, 0.015019, 0.023792, 0.015019, 0.003765]]),
'gauss3x3': np.float32([
[1 / 16, 1 / 8, 1 / 16],
[1 / 8, 1 / 4, 1 / 8],
[1 / 16, 1 / 8, 1 / 16]]
),
'laplace5x5': np.outer(np.float32([1, 4, 6, 4, 1]), np.float32([1, 4, 6, 4, 1])) / 256,
}
def __init__(self, scale_n=4, type='gauss5x5', downsample=True, _skip_upsampling=False):
"""
Setup Laplace image pyramid
Args:
scale_n: number of Laplace pyramid layers to construct
type: type of Gaussian filter used in pyramid construction. Valid options are: 'gauss5x5', 'gauss3x3', and 'laplace5x5'
downsample: whether to downsample the image in each layer. Defaults to True
"""
super().__init__()
self.type = type
self.downsample = downsample
self._skip_upsampling = _skip_upsampling
h = self._filter_dict[type]
self.register_buffer('filter', torch.from_numpy(h))
self.scale_n = scale_n
self._kern = h.shape[0]
self._pad = self._kern // 2
def lap_split(self, img):
N, c, h, w = img.size()
filter = self.filter.expand(c, 1, self._kern, self._kern).contiguous()
# the necessary output padding depends on even/odd of the dimension
output_padding = (h + 1) % 2, (w + 1) % 2
smooth = F.conv2d(img, filter, padding=self._pad, groups=c)
if self.downsample:
lo = smooth[:, :, ::2, ::2]
if self._skip_upsampling:
# Technically incorrect implementation of the Laplace pyramid
lo2 = smooth
else:
lo2 = 4 * F.conv_transpose2d(lo, filter, stride=2, padding=self._pad, output_padding=output_padding,
groups=c)
else:
lo = lo2 = smooth
hi = img - lo2
return lo, hi
def forward(self, img):
levels = []
for i in range(self.scale_n):
img, hi = self.lap_split(img)
levels.append(hi)
levels.append(img)
return levels
def __repr__(self):
return "Pyramid(scale_n={scale_n}, padding={_pad}, downsample={downsample}, type={type})".format(
**self.__dict__)
class RotEquiConv2d(nn.Module):
def __init__(self, in_features, out_features, num_rotations, kernel_size,
padding=0, bias=True, momentum=0.1, upsampling=2, first_layer=False):
super().__init__()
if not first_layer:
in_features *= num_rotations
self._batch_norm = nn.BatchNorm3d(out_features, momentum=momentum, affine=False)
H, self.desc, self.mu = hermite_2d(kernel_size, kernel_size * upsampling, 2 * np.sqrt(kernel_size))
self.register_buffer('hermite_basis', torch.FloatTensor(H))
n_coeffs = kernel_size * (kernel_size + 1) // 2
self.coeffs = Parameter(torch.FloatTensor(size=(n_coeffs, in_features, out_features)).normal_(std=0.1))
self.bias = Parameter(torch.FloatTensor(size=(1, out_features, 1, 1, 1)).zero_()) if bias else None
self.num_rotations = num_rotations
self.out_features = out_features
self.first_layer = first_layer
self.padding = padding
@property
def raw_weights(self):
return torch.einsum('ijk,ilm->mljk', (self.hermite_basis, self.coeffs))
@property
def pooled_raw_weights(self):
return F.avg_pool2d(self.raw_weights, stride=2, kernel_size=2)
@property
def weights(self):
return rotate_weights_hermite(self.hermite_basis, self.desc, self.mu, self.coeffs, self.num_rotations,
first_layer=self.first_layer)
@property
def pooled_weights(self):
return F.avg_pool2d(self.weights, stride=2, kernel_size=2)
def forward(self, input):
x = F.conv2d(input, self.pooled_weights, padding=self.padding)
N, c, *spatial = x.shape
x = x.view(N, self.out_features, self.num_rotations, *spatial)
x = self._batch_norm(x)
if self.bias is not None:
x = x + self.bias
return x.view(N, c, *spatial)
class MaxMin(nn.Module):
def __init__(self, axis=-1):
super(MaxMinGroup, self).__init__()
self.axis = axis
def forward(self, x):
maxes = maxout_by_group(x, 2, self.axis)
mins = minout_by_group(x, 2, self.axis)
maxmin = torch.cat((maxes, mins), dim=1)
return maxmin
def extra_repr(self):
return 'group_size: {}'.format(2)
class GroupSort(nn.Module):
def __init__(self, group_size, axis=-1):
super(GroupSort, self).__init__()
self.group_size = group_size
self.axis = axis
def forward(self, x):
group_sorted = group_sort(x, self.group_size, self.axis)
# assert check_group_sorted(group_sorted, self.group_size, axis=self.axis) == 1, "GroupSort failed. "
return group_sorted
def extra_repr(self):
return 'num_groups: {}'.format(self.num_units)
def process_group_size(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError('number of features({}) is not a '
'multiple of group_size({})'.format(num_channels, num_units))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis+1, group_size)
return size
def group_sort(x, group_size, axis=-1):
size = process_group_size(x, group_size, axis)
grouped_x = x.view(*size)
sort_dim = axis if axis == -1 else axis + 1
sorted_grouped_x, _ = grouped_x.sort(dim=sort_dim)
sorted_x = sorted_grouped_x.view(*list(x.shape))
return sorted_x
def maxout_by_group(x, group_size, axis=-1):
size = process_group_size(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_group_size(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
def check_group_sorted(x, group_size, axis=-1):
size = process_group_size(x, group_size, axis)
x_np = x.cpu().data.numpy()
x_np = x_np.reshape(*size)
x_np_diff = np.diff(x_np, axis=axis)
# Return 1 iff all elements are increasing.
if np.sum(x_np_diff < 0) > 0:
return 0
else:
return 1
| eywalker/attorch | attorch/layers.py | Python | mit | 46,852 | [
"Gaussian"
] | 6f24e064918f5bc9867c55beac5212258b538449d4bd091ab7cb90e246e20840 |
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import itertools
import cartopy.crs as ccrs
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import shapely.geometry
import iris
import iris.analysis.cartography
import iris.analysis.geometry
import iris.analysis.maths
import iris.coord_systems
import iris.coords
import iris.cube
import iris.tests.stock
class TestAnalysisCubeCoordComparison(tests.IrisTest):
def assertComparisonDict(self, comarison_dict, reference_filename):
string = ''
for key, coord_groups in comarison_dict.iteritems():
string += ('%40s ' % key)
names = [[coord.name() if coord is not None else 'None' for coord in coords] for coords in coord_groups]
string += str(sorted(names))
string += '\n'
self.assertString(string, reference_filename)
def test_coord_comparison(self):
cube1 = iris.cube.Cube(np.zeros((41, 41)))
lonlat_cs = iris.coord_systems.GeogCS(6371229)
lon_points1 = -180 + 4.5 * np.arange(41, dtype=np.float32)
lat_points = -90 + 4.5 * np.arange(41, dtype=np.float32)
cube1.add_dim_coord(iris.coords.DimCoord(lon_points1, 'longitude', units='degrees', coord_system=lonlat_cs), 0)
cube1.add_dim_coord(iris.coords.DimCoord(lat_points, 'latitude', units='degrees', coord_system=lonlat_cs), 1)
cube1.add_aux_coord(iris.coords.AuxCoord(0, long_name='z'))
cube1.add_aux_coord(iris.coords.AuxCoord(['foobar'], long_name='f', units='no_unit'))
cube2 = iris.cube.Cube(np.zeros((41, 41, 5)))
lonlat_cs = iris.coord_systems.GeogCS(6371229)
lon_points2 = -160 + 4.5 * np.arange(41, dtype=np.float32)
cube2.add_dim_coord(iris.coords.DimCoord(lon_points2, 'longitude', units='degrees', coord_system=lonlat_cs), 0)
cube2.add_dim_coord(iris.coords.DimCoord(lat_points, 'latitude', units='degrees', coord_system=lonlat_cs), 1)
cube2.add_dim_coord(iris.coords.DimCoord([5, 7, 9, 11, 13], long_name='z'), 2)
cube3 = cube1.copy()
lon = cube3.coord("longitude")
lat = cube3.coord("latitude")
cube3.remove_coord(lon)
cube3.remove_coord(lat)
cube3.add_dim_coord(lon, 1)
cube3.add_dim_coord(lat, 0)
cube3.coord('z').points = [20]
cube4 = cube2.copy()
lon = cube4.coord("longitude")
lat = cube4.coord("latitude")
cube4.remove_coord(lon)
cube4.remove_coord(lat)
cube4.add_dim_coord(lon, 1)
cube4.add_dim_coord(lat, 0)
coord_comparison = iris.analysis.coord_comparison
self.assertComparisonDict(coord_comparison(cube1, cube1), ('analysis', 'coord_comparison', 'cube1_cube1.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube2), ('analysis', 'coord_comparison', 'cube1_cube2.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube3), ('analysis', 'coord_comparison', 'cube1_cube3.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube4), ('analysis', 'coord_comparison', 'cube1_cube4.txt'))
self.assertComparisonDict(coord_comparison(cube2, cube3), ('analysis', 'coord_comparison', 'cube2_cube3.txt'))
self.assertComparisonDict(coord_comparison(cube2, cube4), ('analysis', 'coord_comparison', 'cube2_cube4.txt'))
self.assertComparisonDict(coord_comparison(cube3, cube4), ('analysis', 'coord_comparison', 'cube3_cube4.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube1, cube1), ('analysis', 'coord_comparison', 'cube1_cube1_cube1.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube2, cube1), ('analysis', 'coord_comparison', 'cube1_cube2_cube1.txt'))
# get a coord comparison result and check that we are getting back what was expected
coord_group = coord_comparison(cube1, cube2)['grouped_coords'][0]
self.assertIsInstance(coord_group, iris.analysis._CoordGroup)
self.assertIsInstance(list(coord_group)[0], iris.coords.Coord)
class TestAnalysisWeights(tests.IrisTest):
def test_weighted_mean_little(self):
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
weights = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=np.float32)
cube = iris.cube.Cube(data, long_name="test_data", units="1")
hcs = iris.coord_systems.GeogCS(6371229)
lat_coord = iris.coords.DimCoord(np.array([1, 2, 3], dtype=np.float32), long_name="lat", units="1", coord_system=hcs)
lon_coord = iris.coords.DimCoord(np.array([1, 2, 3], dtype=np.float32), long_name="lon", units="1", coord_system=hcs)
cube.add_dim_coord(lat_coord, 0)
cube.add_dim_coord(lon_coord, 1)
cube.add_aux_coord(iris.coords.AuxCoord(np.arange(3, dtype=np.float32), long_name="dummy", units=1), 1)
self.assertCML(cube, ('analysis', 'weighted_mean_source.cml'))
a = cube.collapsed('lat', iris.analysis.MEAN, weights=weights)
self.assertCMLApproxData(a, ('analysis', 'weighted_mean_lat.cml'))
b = cube.collapsed(lon_coord, iris.analysis.MEAN, weights=weights)
b.data = np.asarray(b.data)
self.assertCMLApproxData(b, ('analysis', 'weighted_mean_lon.cml'))
self.assertEquals(b.coord("dummy").shape, (1,))
# test collapsing multiple coordinates (and the fact that one of the coordinates isn't the same coordinate instance as on the cube)
c = cube.collapsed([lat_coord[:], lon_coord], iris.analysis.MEAN, weights=weights)
self.assertCMLApproxData(c, ('analysis', 'weighted_mean_latlon.cml'))
self.assertEquals(c.coord("dummy").shape, (1,))
# Check new coord bounds - made from points
self.assertArrayEqual(c.coord('lat').bounds, [[1, 3]])
# Check new coord bounds - made from bounds
cube.coord('lat').bounds = [[0.5, 1.5], [1.5, 2.5], [2.5, 3.5]]
c = cube.collapsed(['lat', 'lon'], iris.analysis.MEAN, weights=weights)
self.assertArrayEqual(c.coord('lat').bounds, [[0.5, 3.5]])
cube.coord('lat').bounds = None
# Check there was no residual change
self.assertCML(cube, ('analysis', 'weighted_mean_source.cml'))
@iris.tests.skip_data
def test_weighted_mean(self):
### compare with pp_area_avg - which collapses both lat and lon
#
# pp = ppa('/data/local/dataZoo/PP/simple_pp/global.pp', 0)
# print, pp_area(pp, /box)
# print, pp_area_avg(pp, /box) #287.927
# ;gives an answer of 287.927
#
###
e = iris.tests.stock.simple_pp()
self.assertCML(e, ('analysis', 'weighted_mean_original.cml'))
e.coord('latitude').guess_bounds()
e.coord('longitude').guess_bounds()
area_weights = iris.analysis.cartography.area_weights(e)
e.coord('latitude').bounds = None
e.coord('longitude').bounds = None
f, collapsed_area_weights = e.collapsed('latitude', iris.analysis.MEAN, weights=area_weights, returned=True)
g = f.collapsed('longitude', iris.analysis.MEAN, weights=collapsed_area_weights)
# check it's a 1D, scalar cube (actually, it should really be 0D)!
self.assertEquals(g.shape, (1,))
# check the value - pp_area_avg's result of 287.927 differs by factor of 1.00002959
np.testing.assert_approx_equal(g.data[0], 287.935, significant=5)
#check we get summed weights even if we don't give any
h, summed_weights = e.collapsed('latitude', iris.analysis.MEAN, returned=True)
assert(summed_weights is not None)
# Check there was no residual change
e.coord('latitude').bounds = None
e.coord('longitude').bounds = None
self.assertCML(e, ('analysis', 'weighted_mean_original.cml'))
# Test collapsing of missing coord
self.assertRaises(iris.exceptions.CoordinateNotFoundError, e.collapsed, 'platitude', iris.analysis.MEAN)
# Test collpasing of non data coord
self.assertRaises(iris.exceptions.CoordinateCollapseError, e.collapsed, 'pressure', iris.analysis.MEAN)
@iris.tests.skip_data
class TestAnalysisBasic(tests.IrisTest):
def setUp(self):
file = tests.get_data_path(('PP', 'aPProt1', 'rotatedMHtimecube.pp'))
cubes = iris.load(file)
self.cube = cubes[0]
self.assertCML(self.cube, ('analysis', 'original.cml'))
def _common(self, name, aggregate, original_name='original_common.cml', *args, **kwargs):
self.cube.data = self.cube.data.astype(np.float64)
self.assertCML(self.cube, ('analysis', original_name))
a = self.cube.collapsed('grid_latitude', aggregate)
self.assertCMLApproxData(a, ('analysis', '%s_latitude.cml' % name), *args, **kwargs)
b = a.collapsed('grid_longitude', aggregate)
self.assertCMLApproxData(b, ('analysis', '%s_latitude_longitude.cml' % name), *args, **kwargs)
c = self.cube.collapsed(['grid_latitude', 'grid_longitude'], aggregate)
self.assertCMLApproxData(c, ('analysis', '%s_latitude_longitude_1call.cml' % name), *args, **kwargs)
# Check there was no residual change
self.assertCML(self.cube, ('analysis', original_name))
def test_mean(self):
self._common('mean', iris.analysis.MEAN, decimal=1)
def test_std_dev(self):
# as the numbers are so high, trim off some trailing digits & compare to 0dp
self._common('std_dev', iris.analysis.STD_DEV, decimal=1)
def test_hmean(self):
# harmonic mean requires data > 0
self.cube.data *= self.cube.data
self._common('hmean', iris.analysis.HMEAN, 'original_hmean.cml', decimal=1)
def test_gmean(self):
self._common('gmean', iris.analysis.GMEAN, decimal=1)
def test_variance(self):
# as the numbers are so high, trim off some trailing digits & compare to 0dp
self._common('variance', iris.analysis.VARIANCE, decimal=1)
def test_median(self):
self._common('median', iris.analysis.MEDIAN)
def test_sum(self):
# as the numbers are so high, trim off some trailing digits & compare to 0dp
self._common('sum', iris.analysis.SUM, decimal=1)
def test_max(self):
self._common('max', iris.analysis.MAX)
def test_min(self):
self._common('min', iris.analysis.MIN)
def test_rms(self):
self._common('rms', iris.analysis.RMS)
def test_duplicate_coords(self):
self.assertRaises(ValueError, tests.stock.track_1d, duplicate_x=True)
def test_xy_range(self):
result_non_circ = iris.analysis.cartography._xy_range(self.cube)
self.assertEqual(self.cube.coord('grid_longitude').circular, False)
np.testing.assert_array_almost_equal(
result_non_circ, ((313.02, 392.11), (-22.49, 24.92)), decimal=0)
def test_xy_range_geog_cs(self):
cube = iris.tests.stock.global_pp()
self.assertTrue(cube.coord('longitude').circular)
result = iris.analysis.cartography._xy_range(cube)
np.testing.assert_array_almost_equal(
result, ((0, 360), (-90, 90)), decimal=0)
def test_xy_range_geog_cs_regional(self):
cube = iris.tests.stock.global_pp()
cube = cube[10:20, 20:30]
self.assertFalse(cube.coord('longitude').circular)
result = iris.analysis.cartography._xy_range(cube)
np.testing.assert_array_almost_equal(
result, ((75, 108.75), (42.5, 65)), decimal=0)
class TestMissingData(tests.IrisTest):
def setUp(self):
self.cube_with_nan = tests.stock.simple_2d()
data = self.cube_with_nan.data.astype(np.float32)
self.cube_with_nan.data = data.copy()
self.cube_with_nan.data[1, 0] = np.nan
self.cube_with_nan.data[2, 2] = np.nan
self.cube_with_nan.data[2, 3] = np.nan
self.cube_with_mask = tests.stock.simple_2d()
self.cube_with_mask.data = ma.array(self.cube_with_nan.data,
mask=np.isnan(self.cube_with_nan.data))
def test_max(self):
cube = self.cube_with_nan.collapsed('foo', iris.analysis.MAX)
np.testing.assert_array_equal(cube.data, np.array([3, np.nan, np.nan]))
cube = self.cube_with_mask.collapsed('foo', iris.analysis.MAX)
np.testing.assert_array_equal(cube.data, np.array([3, 7, 9]))
def test_min(self):
cube = self.cube_with_nan.collapsed('foo', iris.analysis.MIN)
np.testing.assert_array_equal(cube.data, np.array([0, np.nan, np.nan]))
cube = self.cube_with_mask.collapsed('foo', iris.analysis.MIN)
np.testing.assert_array_equal(cube.data, np.array([0, 5, 8]))
def test_sum(self):
cube = self.cube_with_nan.collapsed('foo', iris.analysis.SUM)
np.testing.assert_array_equal(cube.data, np.array([6, np.nan, np.nan]))
cube = self.cube_with_mask.collapsed('foo', iris.analysis.SUM)
np.testing.assert_array_equal(cube.data, np.array([6, 18, 17]))
class TestAggregator_mdtol_keyword(tests.IrisTest):
def setUp(self):
data = ma.array([[1, 2], [4, 5]], dtype=np.float32,
mask=[[False, True], [False, True]])
cube = iris.cube.Cube(data, long_name="test_data", units="1")
lat_coord = iris.coords.DimCoord(np.array([1, 2], dtype=np.float32),
long_name="lat", units="1")
lon_coord = iris.coords.DimCoord(np.array([3, 4], dtype=np.float32),
long_name="lon", units="1")
cube.add_dim_coord(lat_coord, 0)
cube.add_dim_coord(lon_coord, 1)
self.cube = cube
def test_single_coord_no_mdtol(self):
collapsed = self.cube.collapsed(
self.cube.coord('lat'), iris.analysis.MEAN)
t = ma.array([2.5, 5.], mask=[False, True])
self.assertMaskedArrayEqual(collapsed.data, t)
def test_single_coord_mdtol(self):
self.cube.data.mask = np.array([[False, True], [False, False]])
collapsed = self.cube.collapsed(
self.cube.coord('lat'), iris.analysis.MEAN, mdtol=0.5)
t = ma.array([2.5, 5], mask=[False, False])
self.assertMaskedArrayEqual(collapsed.data, t)
def test_single_coord_mdtol_alt(self):
self.cube.data.mask = np.array([[False, True], [False, False]])
collapsed = self.cube.collapsed(
self.cube.coord('lat'), iris.analysis.MEAN, mdtol=0.4)
t = ma.array([2.5, 5], mask=[False, True])
self.assertMaskedArrayEqual(collapsed.data, t)
def test_multi_coord_no_mdtol(self):
collapsed = self.cube.collapsed(
[self.cube.coord('lat'), self.cube.coord('lon')],
iris.analysis.MEAN)
t = np.array([2.5])
self.assertArrayEqual(collapsed.data, t)
def test_multi_coord_mdtol(self):
collapsed = self.cube.collapsed(
[self.cube.coord('lat'), self.cube.coord('lon')],
iris.analysis.MEAN, mdtol=0.4)
t = ma.array([2.5], mask=[True])
self.assertMaskedArrayEqual(collapsed.data, t)
class TestAggregators(tests.IrisTest):
def test_percentile_1d(self):
cube = tests.stock.simple_1d()
first_quartile = cube.collapsed('foo', iris.analysis.PERCENTILE,
percent=25)
np.testing.assert_array_almost_equal(first_quartile.data,
np.array([2.5], dtype=np.float32))
self.assertCML(first_quartile, ('analysis',
'first_quartile_foo_1d.cml'),
checksum=False)
third_quartile = cube.collapsed('foo', iris.analysis.PERCENTILE,
percent=75)
np.testing.assert_array_almost_equal(third_quartile.data,
np.array([7.5],
dtype=np.float32))
self.assertCML(third_quartile,
('analysis', 'third_quartile_foo_1d.cml'),
checksum=False)
def test_percentile_2d(self):
cube = tests.stock.simple_2d()
first_quartile = cube.collapsed('foo', iris.analysis.PERCENTILE,
percent=25)
np.testing.assert_array_almost_equal(first_quartile.data,
np.array([0.75, 4.75, 8.75],
dtype=np.float32))
self.assertCML(first_quartile, ('analysis',
'first_quartile_foo_2d.cml'),
checksum=False)
first_quartile = cube.collapsed(('foo', 'bar'),
iris.analysis.PERCENTILE, percent=25)
np.testing.assert_array_almost_equal(first_quartile.data,
np.array([2.75],
dtype=np.float32))
self.assertCML(first_quartile, ('analysis',
'first_quartile_foo_bar_2d.cml'),
checksum=False)
def test_percentile_3d(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
last_quartile = iris.analysis._percentile(array_3d, 0, 50)
np.testing.assert_array_almost_equal(last_quartile,
np.array([[6., 7., 8., 9.],
[10., 11., 12., 13.],
[14., 15., 16., 17.]],
dtype=np.float32))
def test_percentile_3d_axis_one(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
last_quartile = iris.analysis._percentile(array_3d, 1, 50)
np.testing.assert_array_almost_equal(last_quartile,
np.array([[4., 5., 6., 7.],
[16., 17., 18., 19.]],
dtype=np.float32))
def test_percentile_3d_axis_two(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
last_quartile = iris.analysis._percentile(array_3d, 2, 50)
np.testing.assert_array_almost_equal(last_quartile,
np.array([[1.5, 5.5, 9.5],
[13.5, 17.5, 21.5]],
dtype=np.float32))
def test_percentile_3d_masked(self):
cube = tests.stock.simple_3d_mask()
last_quartile = cube.collapsed('wibble',
iris.analysis.PERCENTILE, percent=75)
np.testing.assert_array_almost_equal(last_quartile.data,
np.array([[12., 13., 14., 15.],
[16., 17., 18., 19.],
[20., 18., 19., 20.]],
dtype=np.float32))
self.assertCML(last_quartile, ('analysis',
'last_quartile_foo_3d_masked.cml'),
checksum=False)
def test_percentile_3d_notmasked(self):
cube = tests.stock.simple_3d()
last_quartile = cube.collapsed('wibble',
iris.analysis.PERCENTILE, percent=75)
np.testing.assert_array_almost_equal(last_quartile.data,
np.array([[9., 10., 11., 12.],
[13., 14., 15., 16.],
[17., 18., 19., 20.]],
dtype=np.float32))
self.assertCML(last_quartile, ('analysis',
'last_quartile_foo_3d_notmasked.cml'),
checksum=False)
def test_proportion(self):
cube = tests.stock.simple_1d()
r = cube.data >= 5
gt5 = cube.collapsed('foo', iris.analysis.PROPORTION, function=lambda val: val >= 5)
np.testing.assert_array_almost_equal(gt5.data, np.array([6 / 11.]))
self.assertCML(gt5, ('analysis', 'proportion_foo_1d.cml'), checksum=False)
def test_proportion_2d(self):
cube = tests.stock.simple_2d()
gt6 = cube.collapsed('foo', iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([0, 0.5, 1], dtype=np.float32))
self.assertCML(gt6, ('analysis', 'proportion_foo_2d.cml'), checksum=False)
gt6 = cube.collapsed('bar', iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([1 / 3, 1 / 3, 2 / 3, 2 / 3], dtype=np.float32))
self.assertCML(gt6, ('analysis', 'proportion_bar_2d.cml'), checksum=False)
gt6 = cube.collapsed(('foo', 'bar'), iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([0.5], dtype=np.float32))
self.assertCML(gt6, ('analysis', 'proportion_foo_bar_2d.cml'), checksum=False)
# mask the data
cube.data = ma.array(cube.data, mask=cube.data % 2)
cube.data.mask[1, 2] = True
gt6_masked = cube.collapsed('bar', iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6_masked.data, ma.array([1 / 3, None, 1 / 2, None],
mask=[False, True, False, True],
dtype=np.float32))
self.assertCML(gt6_masked, ('analysis', 'proportion_foo_2d_masked.cml'), checksum=False)
def test_count(self):
cube = tests.stock.simple_1d()
gt5 = cube.collapsed('foo', iris.analysis.COUNT, function=lambda val: val >= 5)
np.testing.assert_array_almost_equal(gt5.data, np.array([6]))
gt5.data = gt5.data.astype('i8')
self.assertCML(gt5, ('analysis', 'count_foo_1d.cml'), checksum=False)
def test_count_2d(self):
cube = tests.stock.simple_2d()
gt6 = cube.collapsed('foo', iris.analysis.COUNT, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([0, 2, 4], dtype=np.float32))
gt6.data = gt6.data.astype('i8')
self.assertCML(gt6, ('analysis', 'count_foo_2d.cml'), checksum=False)
gt6 = cube.collapsed('bar', iris.analysis.COUNT, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([1, 1, 2, 2], dtype=np.float32))
gt6.data = gt6.data.astype('i8')
self.assertCML(gt6, ('analysis', 'count_bar_2d.cml'), checksum=False)
gt6 = cube.collapsed(('foo', 'bar'), iris.analysis.COUNT, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([6], dtype=np.float32))
gt6.data = gt6.data.astype('i8')
self.assertCML(gt6, ('analysis', 'count_foo_bar_2d.cml'), checksum=False)
def test_weighted_sum_consistency(self):
# weighted sum with unit weights should be the same as a sum
cube = tests.stock.simple_1d()
normal_sum = cube.collapsed('foo', iris.analysis.SUM)
weights = np.ones_like(cube.data)
weighted_sum = cube.collapsed('foo', iris.analysis.SUM, weights=weights)
self.assertArrayAlmostEqual(normal_sum.data, weighted_sum.data)
def test_weighted_sum_1d(self):
# verify 1d weighted sum is correct
cube = tests.stock.simple_1d()
weights = np.array([.05, .05, .1, .1, .2, .3, .2, .1, .1, .05, .05])
result = cube.collapsed('foo', iris.analysis.SUM, weights=weights)
self.assertAlmostEqual(result.data, 6.5)
self.assertCML(result, ('analysis', 'sum_weighted_1d.cml'),
checksum=False)
def test_weighted_sum_2d(self):
# verify 2d weighted sum is correct
cube = tests.stock.simple_2d()
weights = np.array([.3, .4, .3])
weights = iris.util.broadcast_to_shape(weights, cube.shape, [0])
result = cube.collapsed('bar', iris.analysis.SUM, weights=weights)
self.assertArrayAlmostEqual(result.data, np.array([4., 5., 6., 7.]))
self.assertCML(result, ('analysis', 'sum_weighted_2d.cml'),
checksum=False)
def test_weighted_rms(self):
cube = tests.stock.simple_2d()
# modify cube data so that the results are nice numbers
cube.data = np.array([[4, 7, 10, 8],
[21, 30, 12, 24],
[14, 16, 20, 8]],
dtype=np.float64)
weights = np.array([[1, 4, 3, 2],
[6, 4.5, 1.5, 3],
[2, 1, 1.5, 0.5]],
dtype=np.float64)
expected_result = np.array([8.0, 24.0, 16.0])
result = cube.collapsed('foo', iris.analysis.RMS, weights=weights)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertCML(result, ('analysis', 'rms_weighted_2d.cml'),
checksum=False)
@iris.tests.skip_data
class TestRotatedPole(tests.IrisTest):
def _check_both_conversions(self, cube):
rlons, rlats = iris.analysis.cartography.get_xy_grids(cube)
rcs = cube.coord_system('RotatedGeogCS')
x, y = iris.analysis.cartography.unrotate_pole(
rlons, rlats, rcs.grid_north_pole_longitude,
rcs.grid_north_pole_latitude)
plt.scatter(x, y)
self.check_graphic()
plt.scatter(rlons, rlats)
self.check_graphic()
def test_all(self):
path = tests.get_data_path(('PP', 'ukVorog', 'ukv_orog_refonly.pp'))
master_cube = iris.load_cube(path)
# Check overall behaviour.
cube = master_cube[::10, ::10]
self._check_both_conversions(cube)
# Check numerical stability.
cube = master_cube[210:238, 424:450]
self._check_both_conversions(cube)
def test_unrotate_nd(self):
rlons = np.array([[350., 352.],[350., 352.]])
rlats = np.array([[-5., -0.],[-4., -1.]])
resx, resy = iris.analysis.cartography.unrotate_pole(rlons, rlats,
178.0, 38.0)
# Solutions derived by proj4 direct.
solx = np.array([[-16.42176094, -14.85892262],
[-16.71055023, -14.58434624]])
soly = np.array([[ 46.00724251, 51.29188893],
[ 46.98728486, 50.30706042]])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
def test_unrotate_1d(self):
rlons = np.array([350., 352., 354., 356.])
rlats = np.array([-5., -0., 5., 10.])
resx, resy = iris.analysis.cartography.unrotate_pole(
rlons.flatten(), rlats.flatten(), 178.0, 38.0)
# Solutions derived by proj4 direct.
solx = np.array([-16.42176094, -14.85892262,
-12.88946157, -10.35078336])
soly = np.array([46.00724251, 51.29188893,
56.55031485, 61.77015703])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
def test_rotate_nd(self):
rlons = np.array([[350., 351.],[352., 353.]])
rlats = np.array([[10., 15.],[20., 25.]])
resx, resy = iris.analysis.cartography.rotate_pole(rlons, rlats,
20., 80.)
# Solutions derived by proj4 direct.
solx = np.array([[148.69672569, 149.24727087],
[149.79067025, 150.31754368]])
soly = np.array([[18.60905789, 23.67749384],
[28.74419024, 33.8087963 ]])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
def test_rotate_1d(self):
rlons = np.array([350., 351., 352., 353.])
rlats = np.array([10., 15., 20., 25.])
resx, resy = iris.analysis.cartography.rotate_pole(rlons.flatten(),
rlats.flatten(), 20., 80.)
# Solutions derived by proj4 direct.
solx = np.array([148.69672569, 149.24727087,
149.79067025, 150.31754368])
soly = np.array([18.60905789, 23.67749384,
28.74419024, 33.8087963 ])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
@iris.tests.skip_data
class TestAreaWeights(tests.IrisTest):
def test_area_weights(self):
small_cube = iris.tests.stock.simple_pp()
# Get offset, subsampled region: small enough to test against literals
small_cube = small_cube[10:, 35:]
small_cube = small_cube[::8, ::8]
small_cube = small_cube[:5, :4]
# pre-check non-data properties
self.assertCML(small_cube, ('analysis', 'areaweights_original.cml'),
checksum=False)
# check area-weights values
small_cube.coord('latitude').guess_bounds()
small_cube.coord('longitude').guess_bounds()
area_weights = iris.analysis.cartography.area_weights(small_cube)
expected_results = np.array(
[[3.11955916e+12, 3.11956058e+12, 3.11955916e+12, 3.11956058e+12],
[5.21950793e+12, 5.21951031e+12, 5.21950793e+12, 5.21951031e+12],
[6.68991432e+12, 6.68991737e+12, 6.68991432e+12, 6.68991737e+12],
[7.35341320e+12, 7.35341655e+12, 7.35341320e+12, 7.35341655e+12],
[7.12998265e+12, 7.12998589e+12, 7.12998265e+12, 7.12998589e+12]],
dtype=np.float64)
self.assertArrayAllClose(area_weights, expected_results, rtol=1e-8)
# Check there was no residual change
small_cube.coord('latitude').bounds = None
small_cube.coord('longitude').bounds = None
self.assertCML(small_cube, ('analysis', 'areaweights_original.cml'),
checksum=False)
def test_quadrant_area(self):
degrees = iris.unit.Unit("degrees")
radians = iris.unit.Unit("radians")
def lon2radlon(lons):
return [degrees.convert(lon, radians) for lon in lons]
def lat2radcolat(lats):
return [degrees.convert(lat + 90, radians) for lat in lats]
lats = np.array([lat2radcolat([-80, -70])])
lons = np.array([lon2radlon([0, 10])])
area = iris.analysis.cartography._quadrant_area(lats, lons, iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertAlmostEquals(area, [[319251845980.763671875]])
lats = np.array([lat2radcolat([0, 10])])
lons = np.array([lon2radlon([0, 10])])
area = iris.analysis.cartography._quadrant_area(lats, lons, iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertAlmostEquals(area, [[1228800593851.443115234375]])
lats = np.array([lat2radcolat([10, 0])])
lons = np.array([lon2radlon([0, 10])])
area = iris.analysis.cartography._quadrant_area(lats, lons, iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertAlmostEquals(area, [[1228800593851.443115234375]])
lats = np.array([lat2radcolat([70, 80])])
lons = np.array([lon2radlon([0, 10])])
area = iris.analysis.cartography._quadrant_area(lats, lons, iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertAlmostEquals(area, [[319251845980.7646484375]])
lats = np.array([lat2radcolat([-80, -70]), lat2radcolat([0, 10]), lat2radcolat([70, 80])])
lons = np.array([lon2radlon([0, 10])])
area = iris.analysis.cartography._quadrant_area(lats, lons, iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertAlmostEquals(area[0], [319251845980.763671875])
self.assertAlmostEquals(area[1], [1228800593851.443115234375])
self.assertAlmostEquals(area[2], [319251845980.7646484375])
class TestAreaWeightGeneration(tests.IrisTest):
def setUp(self):
self.cube = iris.tests.stock.realistic_4d()
def test_area_weights_std(self):
# weights for stock 4d data
weights = iris.analysis.cartography.area_weights(self.cube)
self.assertEqual(weights.shape, self.cube.shape)
def test_area_weights_order(self):
# weights for data with dimensions in a different order
order = [3, 2, 1, 0] # (lon, lat, level, time)
self.cube.transpose(order)
weights = iris.analysis.cartography.area_weights(self.cube)
self.assertEqual(weights.shape, self.cube.shape)
def test_area_weights_non_adjacent(self):
# weights for cube with non-adjacent latitude/longitude dimensions
order = [0, 3, 1, 2] # (time, lon, level, lat)
self.cube.transpose(order)
weights = iris.analysis.cartography.area_weights(self.cube)
self.assertEqual(weights.shape, self.cube.shape)
def test_area_weights_scalar_latitude(self):
# weights for cube with a scalar latitude dimension
cube = self.cube[:, :, 0, :]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_scalar_longitude(self):
# weights for cube with a scalar longitude dimension
cube = self.cube[:, :, :, 0]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_scalar(self):
# weights for cube with scalar latitude and longitude dimensions
cube = self.cube[:, :, 0, 0]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_singleton_latitude(self):
# singleton (1-point) latitude dimension
cube = self.cube[:, :, 0:1, :]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_singleton_longitude(self):
# singleton (1-point) longitude dimension
cube = self.cube[:, :, :, 0:1]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_singletons(self):
# singleton (1-point) latitude and longitude dimensions
cube = self.cube[:, :, 0:1, 0:1]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_normalized(self):
# normalized area weights must sum to one over lat/lon dimensions.
weights = iris.analysis.cartography.area_weights(self.cube,
normalize=True)
sumweights = weights.sum(axis=3).sum(axis=2) # sum over lon and lat
self.assertArrayAlmostEqual(sumweights, 1)
def test_area_weights_non_contiguous(self):
# Slice the cube so that we have non-contiguous longitude
# bounds.
ind = (0, 1, 2, -3, -2, -1)
cube = self.cube[..., ind]
weights = iris.analysis.cartography.area_weights(cube)
expected = iris.analysis.cartography.area_weights(self.cube)[..., ind]
self.assertArrayEqual(weights, expected)
def test_area_weights_no_lon_bounds(self):
self.cube.coord('grid_longitude').bounds = None
with self.assertRaises(ValueError):
iris.analysis.cartography.area_weights(self.cube)
def test_area_weights_no_lat_bounds(self):
self.cube.coord('grid_latitude').bounds = None
with self.assertRaises(ValueError):
iris.analysis.cartography.area_weights(self.cube)
@iris.tests.skip_data
class TestLatitudeWeightGeneration(tests.IrisTest):
def setUp(self):
path = iris.tests.get_data_path(['NetCDF', 'rotated', 'xyt',
'small_rotPole_precipitation.nc'])
self.cube = iris.load_cube(path)
self.cube_dim_lat = self.cube.copy()
self.cube_dim_lat.remove_coord('latitude')
self.cube_dim_lat.remove_coord('longitude')
# The 2d cubes are unrealistic, you would not want to weight by
# anything other than grid latitude in real-world scenarios. However,
# the technical details are suitable for testing purposes, providing
# a nice analog for a 2d latitude coordinate from a curvilinear grid.
self.cube_aux_lat = self.cube.copy()
self.cube_aux_lat.remove_coord('grid_latitude')
self.cube_aux_lat.remove_coord('grid_longitude')
self.lat1d = self.cube.coord('grid_latitude').points
self.lat2d = self.cube.coord('latitude').points
def test_cosine_latitude_weights_range(self):
# check the range of returned values, needs a cube that spans the full
# latitude range
lat_coord = iris.coords.DimCoord(np.linspace(-90, 90, 73),
standard_name='latitude',
units=iris.unit.Unit('degrees_north'))
cube = iris.cube.Cube(np.ones([73], dtype=np.float64),
long_name='test_cube', units='1')
cube.add_dim_coord(lat_coord, 0)
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertTrue(weights.max() <= 1)
self.assertTrue(weights.min() >= 0)
def test_cosine_latitude_weights_0d(self):
# 0d latitude dimension (scalar coordinate)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat[:, 0, :])
self.assertEqual(weights.shape, self.cube_dim_lat[:, 0, :].shape)
self.assertAlmostEqual(weights[0, 0],
np.cos(np.deg2rad(self.lat1d[0])))
def test_cosine_latitude_weights_1d_singleton(self):
# singleton (1-point) 1d latitude coordinate (time, lat, lon)
cube = self.cube_dim_lat[:, 0:1, :]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertAlmostEqual(weights[0, 0, 0],
np.cos(np.deg2rad(self.lat1d[0])))
def test_cosine_latitude_weights_1d(self):
# 1d latitude coordinate (time, lat, lon)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
self.assertEqual(weights.shape, self.cube.shape)
self.assertArrayAlmostEqual(weights[0, :, 0],
np.cos(np.deg2rad(self.lat1d)))
def test_cosine_latitude_weights_1d_latitude_first(self):
# 1d latitude coordinate with latitude first (lat, time, lon)
order = [1, 0, 2] # (lat, time, lon)
self.cube_dim_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
self.assertEqual(weights.shape, self.cube_dim_lat.shape)
self.assertArrayAlmostEqual(weights[:, 0, 0],
np.cos(np.deg2rad(self.lat1d)))
def test_cosine_latitude_weights_1d_latitude_last(self):
# 1d latitude coordinate with latitude last (time, lon, lat)
order = [0, 2, 1] # (time, lon, lat)
self.cube_dim_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
self.assertEqual(weights.shape, self.cube_dim_lat.shape)
self.assertArrayAlmostEqual(weights[0, 0, :],
np.cos(np.deg2rad(self.lat1d)))
def test_cosine_latitude_weights_2d_singleton1(self):
# 2d latitude coordinate with first dimension singleton
cube = self.cube_aux_lat[:, 0:1, :]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d[0:1, :])))
def test_cosine_latitude_weights_2d_singleton2(self):
# 2d latitude coordinate with second dimension singleton
cube = self.cube_aux_lat[:, :, 0:1]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d[:, 0:1])))
def test_cosine_latitude_weights_2d_singleton3(self):
# 2d latitude coordinate with both dimensions singleton
cube = self.cube_aux_lat[:, 0:1, 0:1]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d[0:1, 0:1])))
def test_cosine_latitude_weights_2d(self):
# 2d latitude coordinate (time, lat, lon)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_aux_lat)
self.assertEqual(weights.shape, self.cube_aux_lat.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d)))
def test_cosine_latitude_weights_2d_latitude_first(self):
# 2d latitude coordinate with latitude first (lat, time, lon)
order = [1, 0, 2] # (lat, time, lon)
self.cube_aux_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_aux_lat)
self.assertEqual(weights.shape, self.cube_aux_lat.shape)
self.assertArrayAlmostEqual(weights[:, 0, :],
np.cos(np.deg2rad(self.lat2d)))
def test_cosine_latitude_weights_2d_latitude_last(self):
# 2d latitude coordinate with latitude last (time, lon, lat)
order = [0, 2, 1] # (time, lon, lat)
self.cube_aux_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_aux_lat)
self.assertEqual(weights.shape, self.cube_aux_lat.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d.T)))
def test_cosine_latitude_weights_no_latitude(self):
# no coordinate identified as latitude
self.cube_dim_lat.remove_coord('grid_latitude')
with self.assertRaises(ValueError):
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
def test_cosine_latitude_weights_multiple_latitude(self):
# two coordinates identified as latitude
with self.assertRaises(ValueError):
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube)
class TestRollingWindow(tests.IrisTest):
def setUp(self):
# XXX Comes from test_aggregated_by
cube = iris.cube.Cube(np.array([[6, 10, 12, 18], [8, 12, 14, 20], [18, 12, 10, 6]]), long_name='temperature', units='kelvin')
cube.add_dim_coord(iris.coords.DimCoord(np.array([0, 5, 10], dtype=np.float64), 'latitude', units='degrees'), 0)
cube.add_dim_coord(iris.coords.DimCoord(np.array([0, 2, 4, 6], dtype=np.float64), 'longitude', units='degrees'), 1)
self.cube = cube
def test_non_mean_operator(self):
res_cube = self.cube.rolling_window('longitude', iris.analysis.MAX, window=2)
expected_result = np.array([[10, 12, 18],
[12, 14, 20],
[18, 12, 10]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
def test_longitude_simple(self):
res_cube = self.cube.rolling_window('longitude', iris.analysis.MEAN, window=2)
expected_result = np.array([[ 8., 11., 15.],
[ 10., 13., 17.],
[ 15., 11., 8.]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
self.assertCML(res_cube, ('analysis', 'rolling_window', 'simple_longitude.cml'))
self.assertRaises(ValueError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=0)
def test_longitude_masked(self):
self.cube.data = ma.array(self.cube.data,
mask=[[True, True, True, True],
[True, False, True, True],
[False, False, False, False]])
res_cube = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=2)
expected_result = np.ma.array([[-99., -99., -99.],
[12., 12., -99.],
[15., 11., 8.]],
mask=[[True, True, True],
[False, False, True],
[False, False, False]],
dtype=np.float64)
self.assertMaskedArrayEqual(expected_result, res_cube.data)
def test_longitude_circular(self):
cube = self.cube
cube.coord('longitude').circular = True
self.assertRaises(iris.exceptions.NotYetImplementedError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=0)
def test_different_length_windows(self):
res_cube = self.cube.rolling_window('longitude', iris.analysis.MEAN, window=4)
expected_result = np.array([[ 11.5],
[ 13.5],
[ 11.5]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
self.assertCML(res_cube, ('analysis', 'rolling_window', 'size_4_longitude.cml'))
# Window too long:
self.assertRaises(ValueError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=6)
# Window too small:
self.assertRaises(ValueError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=0)
def test_bad_coordinate(self):
self.assertRaises(KeyError, self.cube.rolling_window, 'wibble', iris.analysis.MEAN, window=0)
def test_latitude_simple(self):
res_cube = self.cube.rolling_window('latitude', iris.analysis.MEAN, window=2)
expected_result = np.array([[ 7., 11., 13., 19.],
[ 13., 12., 12., 13.]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
self.assertCML(res_cube, ('analysis', 'rolling_window', 'simple_latitude.cml'))
def test_mean_with_weights_consistency(self):
# equal weights should be the same as the mean with no weights
wts = np.array([0.5, 0.5], dtype=np.float64)
res_cube = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=2,
weights=wts)
expected_result = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=2)
self.assertArrayEqual(expected_result.data, res_cube.data)
def test_mean_with_weights(self):
# rolling window mean with weights
wts = np.array([0.1, 0.6, 0.3], dtype=np.float64)
res_cube = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=3,
weights=wts)
expected_result = np.array([[10.2, 13.6],
[12.2, 15.6],
[12.0, 9.0]], dtype=np.float64)
# use almost equal to compare floats
self.assertArrayAlmostEqual(expected_result, res_cube.data)
class TestGeometry(tests.IrisTest):
@iris.tests.skip_data
def test_distinct_xy(self):
cube = iris.tests.stock.simple_pp()
cube = cube[:4, :4]
lon = cube.coord('longitude')
lat = cube.coord('latitude')
lon.guess_bounds()
lat.guess_bounds()
from iris.fileformats.rules import regular_step
quarter = abs(regular_step(lon) * regular_step(lat) * 0.25)
half = abs(regular_step(lon) * regular_step(lat) * 0.5)
minx = 3.7499990463256836
maxx = 7.499998092651367
miny = 84.99998474121094
maxy = 89.99998474121094
geometry = shapely.geometry.box(minx, miny, maxx, maxy)
weights = iris.analysis.geometry.geometry_area_weights(cube, geometry)
target = np.array([
[0, quarter, quarter, 0],
[0, half, half, 0],
[0, quarter, quarter, 0],
[0, 0, 0, 0]])
self.assertTrue(np.allclose(weights, target))
def test_shared_xy(self):
cube = tests.stock.track_1d()
geometry = shapely.geometry.box(1, 4, 3.5, 7)
weights = iris.analysis.geometry.geometry_area_weights(cube, geometry)
target = np.array([0, 0, 2, 0.5, 0, 0, 0, 0, 0, 0, 0])
self.assertTrue(np.allclose(weights, target))
class TestProject(tests.GraphicsTest):
def setUp(self):
cube = iris.tests.stock.realistic_4d_no_derived()
# Remove some slices to speed testing.
self.cube = cube[0:2, 0:3]
self.target_proj = ccrs.Robinson()
def test_bad_resolution(self):
with self.assertRaises(ValueError):
iris.analysis.cartography.project(self.cube,
self.target_proj,
nx=-200, ny=200)
with self.assertRaises(ValueError):
iris.analysis.cartography.project(self.cube,
self.target_proj,
nx=200, ny='abc')
def test_missing_latlon(self):
cube = self.cube.copy()
cube.remove_coord('grid_latitude')
with self.assertRaises(ValueError):
iris.analysis.cartography.project(cube, self.target_proj)
cube = self.cube.copy()
cube.remove_coord('grid_longitude')
with self.assertRaises(ValueError):
iris.analysis.cartography.project(cube, self.target_proj)
self.cube.remove_coord('grid_longitude')
self.cube.remove_coord('grid_latitude')
with self.assertRaises(ValueError):
iris.analysis.cartography.project(self.cube, self.target_proj)
def test_default_resolution(self):
new_cube, extent = iris.analysis.cartography.project(self.cube,
self.target_proj)
self.assertEqual(new_cube.shape, self.cube.shape)
@iris.tests.skip_data
def test_cartopy_projection(self):
cube = iris.load_cube(tests.get_data_path(('PP', 'aPPglob1',
'global.pp')))
projections = {}
projections['RotatedPole'] = ccrs.RotatedPole(pole_longitude=177.5,
pole_latitude=37.5)
projections['Robinson'] = ccrs.Robinson()
projections['PlateCarree'] = ccrs.PlateCarree()
projections['NorthPolarStereo'] = ccrs.NorthPolarStereo()
projections['Orthographic'] = ccrs.Orthographic(central_longitude=-90,
central_latitude=45)
projections['InterruptedGoodeHomolosine'] = ccrs.InterruptedGoodeHomolosine()
projections['LambertCylindrical'] = ccrs.LambertCylindrical()
# Set up figure
fig = plt.figure(figsize=(10, 10))
gs = matplotlib.gridspec.GridSpec(nrows=3, ncols=3, hspace=1.5, wspace=0.5)
for subplot_spec, (name, target_proj) in itertools.izip(gs, projections.iteritems()):
# Set up axes and title
ax = plt.subplot(subplot_spec, frameon=False, projection=target_proj)
ax.set_title(name)
# Transform cube to target projection
new_cube, extent = iris.analysis.cartography.project(cube, target_proj,
nx=150, ny=150)
# Plot
plt.pcolor(new_cube.coord('projection_x_coordinate').points,
new_cube.coord('projection_y_coordinate').points,
new_cube.data)
# Add coastlines
ax.coastlines()
# Tighten up layout
gs.tight_layout(plt.gcf())
# Verify resulting plot
self.check_graphic(tol=1.0)
@iris.tests.skip_data
def test_no_coord_system(self):
cube = iris.load_cube(tests.get_data_path(('PP', 'aPPglob1', 'global.pp')))
cube.coord('longitude').coord_system = None
cube.coord('latitude').coord_system = None
new_cube, extent = iris.analysis.cartography.project(cube,
self.target_proj)
self.assertCML(new_cube,
('analysis', 'project', 'default_source_cs.cml'))
if __name__ == "__main__":
tests.main()
| bblay/iris | lib/iris/tests/test_analysis.py | Python | gpl-3.0 | 54,747 | [
"NetCDF"
] | 95cd97cb8f55451f7cb81cd5ae76d6aaaaabb6d3ebf8f74d1135084c1090a7a8 |
#********************************************************************************
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#********************************************************************************
#
# Created by Brian Keene on 8 September 2016
#
# Revision history:
#
#
#********************************************************************************
# note - on OSX, requires framework build of python/2.7 to run, as this
# application requires access to the screen (this might only apply to systems
# running Mavericks or later)
# This script for a graphical user interface is intended to serve as a template for
# graphical user interfaces; primarily intended to be used as a simplified
# front-end of wx.Widgets, and to allow for easy setup of dynamic hiding that
# might involve cross-communication between objects of assorted placement in
# the hierarchy of parent-child objects. Allows for an OOP creation of a GUI,
# with emphasis on easy modification in the accompanying script.
# import the needed modules
import wx, os
# global dictionary in which we store data
myDict = {}
class wxFrame(wx.Frame):
# note to others: we pass another class (an instance of Frame) to this wx.Frame derived class;
# the ambiguity of parent in the class __init__ vs the wx.Frame.__init__ is due to parent in the
# wx.Frame.__init__ function being a /keyword/ argument, rather than a python convention, as is used
# in the class __init__ function. The wx.Frame.__init__ parent argument /must/ be a wx.Window object,
# or simply value "None", which is what we usually use
# __init__ takes the implicit self argument as usual
# and 'sibling' here is an instance of our 'Frame' class defined immediately below this class.
# 'sibling' holds all the necessary data needed to define a wx.Frame object.
def __init__(self,sibling):
wx.Frame.__init__(self,parent=sibling._parent,title=sibling._title)
self.SetInitialSize(sibling._size)
# we define our own Frame() class, because we don't instantly want to create an actual wx.Frame object yet
class Frame:
# a static class object we can access using Frame._register[index] - we don't access this via an instance of
# the class; we can also iterate over it, looking for instances with specific data
_register = []
_typeName = "Frame"
# implicit argument self
# parent: typically None, but if a frame is spawned dynamically it may be useful to pass the relevant object
# title: string displayed at the top of the frame (the name)
# size: integer tuple (e.g., (100,100)) specifying the size of the frame in pixels
def __init__(self, parent, title, size, **kwargs):
self._parent = parent;
self._title = title;
self._size = size;
# an instance variable holding other instances that are children of this instance
self._children = []
def initObj(self):
# make an instance of the frame, that is a derived class of the wx.Frame class
self._obj = wxFrame(self)
Frame._register.append(self)
# iterate over this instance's children and initialize them.
for obj in self._children:
obj.initObj();
# we have now instantiated all of the objects on this frame; show the frame
self._obj.Show()
# a wxNotebook class
class wxNotebook(wx.Notebook):
# the implicit self argument, as usual
# and 'sibling' - the instance of 'Notebook' class (defined below) holding all
# necessary data needed to define the wx.Notebook
def __init__(self,sibling):
wx.Notebook(sibling._parent._obj)
self._pages = [];
for index, item in enumerate(sibling._children):
item.initObj();
self._pages.append(item._obj)
self.AddPage(self._pages[index], item._name);
self.NBSizer = wx.BoxSizer();
self.NBSizer.Add(self,1,wx.EXPAND)
sibling._parent._obj.SetSizer(self.NBSizer)
# our notebook class that collates information before making a wx.Notebook notebook
class Notebook:
# the implicit self argument
# parent panel object
# the pages to be added to this notebook
# and the names of the pages
_register = []
_typeName = "Notebook"
def __init__(self,parent, **kwargs):
# instantiate the notebook
self._parent = parent;
# an instance variable holding other instances that are children of this instance
self._children = [];
self._pages = [];
# append this instance to a list belonging to the parent, so that the parent knows
parent._children.append(self);
def initObj(self):
# our wxNotebook method initiates the instantiation of the self._children objects
self._obj = wx.Notebook(self._parent._obj)
# create a wxNotebook instance and store it in self._obj; pass 'self' as the argument
# i.e., we pass this instance of Notebook as the 'sibling' argument (the wxNotebook 'self' is implicit)
##self._obj = wxNotebook(self)
for index, item in enumerate(self._children):
item.initObj();
self._pages.append(item._obj);
self._obj.AddPage(self._pages[index], item._name);
self.NBSizer = wx.BoxSizer();
self.NBSizer.Add(self._obj, 1, wx.EXPAND)
self._parent._obj.SetSizer(self.NBSizer)
Notebook._register.append(self)
def customBehavior():
pass
# i think this has to be incorporated in the wxNotebook class, rather than here;
def OnPageChanging(self,event):
oldPage = event.GetOldSelection()
newPage = event.GetSelection()
customBehavior()
class wxPanel(wx.Panel):
def __init__(self,sibling):
wx.Panel.__init__(self,parent=sibling._parent._obj);
self._needsSizer = True;
for obj in sibling._children:
if obj._typeName == "Notebook":
self._needsSizer = False;
break
if self._needsSizer:
self.grid = wx.GridBagSizer(hgap=5,vgap=5);
self.SetSizer(self.grid);
# call the init methods of the objects, which then places wxWidget objects in the self._widgets variable for
# each Widget class instance
# a panel holding a notebook will never have a widget - its a dummy panel
# if it does, this is where an error will be thrown!
for child in sibling._children:
if child._typeName == "Widget":
child.initObj(self);
self.grid.Add(child._obj, pos=child._pos, span=child._span, flag=child._gridFlags)
# if the base child widget object is a label, it won't have a function
if ((child._function is not None) and (child._wxEvt is not None)):
self.Bind(child._wxEvt,child._function,child._obj)
if child._label is not None:
# we know that this will be a label;
child._labelObj = wx.StaticText(self,label=child._label)
self.grid.Add(child._labelObj,child._labelPos, child._labelSpan)
if (child._hasSlave):
self.Bind(child._wxEvt, child.masterFunction, child._obj)
# some objects are initially hidden; here, we hide them.
if (child._initHide):
child._obj.Hide()
if (child._label is not None):
child._labelObj.Hide()
self.Layout()
# in this class, we collate all the information we'll need to make a well-defined wx.Panel object
class Panel:
# what do we require from the user to instantiate a base panel object?
# make an iterable list of panel instances; make sure methods only access this /after/
# the main frame has added all objects (i.e., at the end of th user's GUI script!)
_register = []
# all instances of this class have the _typeName = "Panel"
_typeName = "Panel"
def __init__(self, parent,**kwargs):
# a list of widget objects, from our widgets class, that identify this panel as their parent panel
# note that we do /not/ need more information, as this has the instanced objects; we can call their methods
# directly from here! Very convenient.
self._widgets = [];
# panel must have parent object on which it is displayed
self._parent = parent;
# a list of the instances that have this instance of the Panel class as their parent
self._children = []
parent._children.append(self);
# we use a name if this panel is a child of a Notebook object; in this case, the name is
# displayed atop the notebook
self._name = kwargs.get("name",None)
def initObj(self):
# we initialize the panel, which then refers to all of the panel's widgets' methods for their instantiation
self._obj = wxPanel(self);
# append this instance to the class register, so that we may iterate over the class instances if needed
Panel._register.append(self);
# iterate over self._children, and initialize objects that are /not/ of the type widget; these will
# be initialized in the wxPanel class!
for obj in self._children:
if (obj._typeName != "Widget"):
obj.initObj()
def deleteWidget():
pass
def bindToFunction():
# ehhhh... we might have already done this in the widget class. could be better that way.
pass
#class wxWidget:
# def __init__(self,sibling):
# self._widget = None;
# if sibling._
class Widget:
_register = []
_typeName = "Widget"
# for all Widget objects, we need the parent object, widgetType, name, and position
def __init__(self,parent,widgetType,name,pos,**kwargs):
# note that we use **kwargs to pass in information that may be specific to certain type
# of widget; e.g., text widget vs button vs ... etc.
# **kwargs is a list of KeyWord ARGumentS (kwargs) of arbitrary length
# note that, by default, there is no label (and no label position <(int,int)> provided
#####################
# Required arguments, for all widget types
#####################
self._parent = parent; # parent object, typically an instance of Panel
self._widgetType = widgetType; # button, textwidget, label, etc.
self._name = name; #string
self._pos = pos; #tuple of coords: "(integer, integer)"
#####################
# Required arguments, for some widget types
#####################
# required for choice widgets
self._choices = kwargs.get('choices',None)
############################
# optional arguments
# we can specify a label (if so, must specify a position)
# the spans of the label and widget default to (1,1)
# if a widget can use an initial value (e.g., a text control), it defaults to an empty string
# if a widget is to be bound to a function, must specify this explicitly or bind to it later
############################
self._label = kwargs.get('label',None)
self._labelPos = kwargs.get('labelPos',None)
# default behavior of span is (1,1) if not specified
self._span = kwargs.get('span',(1,1))
self._labelSpan = kwargs.get('labelSpan',(1,1))
self._initValue = kwargs.get('value',"")
self._function = kwargs.get('function',None)
self._wxEvt = None
self._hasMaster = False; # default this to false; changed if the setMaster() function is called on self
self._hasSlave = False;
# these will be instantiated during the creation of the parent object
self._labelObj = None;
self._obj = None;
# Hide most objects at first; that way, they only show if they are told to show,
# and otherwise will hide when told to hide
# implement this /after/ we have connected all the show/hide funcitonality
self._initHide = False;
# TODO: have the Panel's grid.Add() method use these flags when instantiating the widget
self._gridFlags = (wx.RESERVE_SPACE_EVEN_IF_HIDDEN | wx.EXPAND | wx.ALIGN_CENTER)
# append the object to the list of children in the parent instance
parent._children.append(self)
# the master widget - this is a /Widget/ instance
self._masters = []
# denotes messages from master that instruct self to Hide()
# these should be strings
self._hideWhen = []
# widgets to which self is master; note that this is set implicitly via setMaster, when
# other widgets denotes self as master
# this is a /Widget/ instance (not a wx object)
self._slaves = []
Widget._register.append(self); # append this instance to the class register
# allows the function to which the widget will be bound to be set after construction of the widget instance
# we allow the function to be defined according to whatever parameters the user inputs; no implicit self
def masterFunction(self,event):
# pass the value of this widget to slaved widgets
message = str(event.GetString())
for slave in self._slaves:
slave.evaluateMessage(message);
def evaluateMessage(self,message):
# this is used by the interface to loop over child widgets
# in the event that a chosen selection hides multiple levels of the parent-child hierarchy.
# continues until exhaustion
if message in self._hideWhen:
self._obj.Hide()
if (self._labelObj is not None):
self._labelObj.Hide()
self._parent._obj.Layout()
else:
self._obj.Show()
if (self._labelObj is not None):
self._labelObj.Show()
self._parent._obj.Layout()
def setMaster(self, master, hideWhen):
self._masters.append(master)
# assume hideWhen is in the form of an array
for instruction in hideWhen:
self._hideWhen.append(instruction)
# append self to master._slaves[]
master._slaves.append(self);
self._hasMaster = True;
if master._hasSlave == False:
master._hasSlave = True;
def setFunction(self,function):
self._function = function;
def setGridFlags(self,flags):
self._gridFlags = flags;
def setInitHide(self,boolean):
self._initHide = boolean;
# maybe the user wants to attach labels later; allow them to do so here
def setLabel(self,label,labelPos,**kwargs):
self._label = label;
self._labelPos = labelPos;
self._labelSpan = kwargs.get('labelSpan',(1,1))
# this is a bottom level object; it requires a parentInstance on initialization
def initObj(self,parentInstance):
# for each, initialize the wx object in self._obj, and inform the class what kind of wx event to
# expect in self._wxEvt
#self._obj = wxWidget(self)
if (self._widgetType == "text"):
self._obj = wx.TextCtrl(parentInstance,value=self._initValue,name=self._name)
self._wxEvt = wx.EVT_TEXT
# need to add all types of widgets here; remember to overload necessary parameters for each via kwargs.get()
elif (self._widgetType == "choice"):
#choicesList
if (self._choices is None):
raise ValueError('%s has no choices! Please specify choices for the choice widget.' %(self._name))
self._obj = wx.Choice(parentInstance,-1,choices=self._choices,name=self._name)
self._wxEvt = wx.EVT_CHOICE
# more types of widgets to be implemented
elif (self._widgetType == "button"):
if (self._name is None):
raise ValueError('%s has no name! The name of the button is displayed on the button, and \n\
is required!' %(self._name))
self._obj = wx.Button(parentInstance,label=self._name, name=self._name)
self._wxEvt = wx.EVT_BUTTON
elif (self._widgetType == "static"):
self._obj = wx.StaticText(parentInstance,label=self._name, name=self._name)
self._wxEvt = None
| bpkeene/pythonPlayground | GUI_Template.py | Python | gpl-3.0 | 16,981 | [
"Brian"
] | 9a04f4af3fd76831a9c9d59223cd21bacc66d88968a40faba7b4c8f0e6965e21 |
#!/usr/bin/env python3
"""The influence of windowing of lin. sweep signals when using a
Kaiser Window by fixing beta (=7) and fade_in (=0).
fstart = 100 Hz
fstop = 5000 Hz
Deconvolution: Unwindowed
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 100
fstop = 5000
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.lin_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -30.
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
# Lists
beta = 7
fade_in = 0
fade_out_list = np.arange(0, 1001, 1)
# Spectrum of dirac for reference
dirac = np.zeros(pad * fs)
dirac[0] = 1
dirac_f = np.fft.rfft(dirac)
def get_results(fade_out):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_zeropadded = generation.zero_padding(excitation, pad, fs)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_zeropadded,
system_response,
fs)
return ir
with open("lin_sweep_kaiser_window_bandlimited_script6_1.txt", "w") as f:
for fade_out in fade_out_list:
ir = get_results(fade_out)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
spectrum_distance = calculation.vector_distance(
dirac_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_out) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
| spatialaudio/sweep | lin_sweep_kaiser_window_bandlimited_script6/lin_sweep_kaiser_window_bandlimited_script6_1.py | Python | mit | 2,208 | [
"DIRAC"
] | f70520af4434b7c59d8e628fbaa8c4be1dc30a3472d0a92a2ff748de63603937 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides so-called "strategies" to determine the coordination environments of an atom in a structure.
Some strategies can favour larger or smaller environments. Some strategies uniquely identifies the environments while
some others can identify the environment as a "mix" of several environments, each of which is assigned with a given
fraction. The choice of the strategy depends on the purpose of the user.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import abc
import os
from collections import OrderedDict
from typing import Dict, List, Optional
import numpy as np
from monty.json import MSONable
from scipy.stats import gmean
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import (
AllCoordinationGeometries,
)
from pymatgen.analysis.chemenv.coordination_environments.voronoi import (
DetailedVoronoiContainer,
)
from pymatgen.analysis.chemenv.utils.chemenv_errors import EquivalentSiteSearchError
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import (
get_lower_and_upper_f,
)
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from pymatgen.analysis.chemenv.utils.func_utils import (
CSMFiniteRatioFunction,
CSMInfiniteRatioFunction,
DeltaCSMRatioFunction,
RatioFunction,
)
from pymatgen.core.operations import SymmOp
from pymatgen.core.sites import PeriodicSite
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
module_dir = os.path.dirname(os.path.abspath(__file__))
MPSYMBOL_TO_CN = AllCoordinationGeometries().get_symbol_cn_mapping()
ALLCG = AllCoordinationGeometries()
class StrategyOption(MSONable, metaclass=abc.ABCMeta):
"""Abstract class for the options of the chemenv strategies."""
allowed_values = None # type: Optional[str]
@abc.abstractmethod
def as_dict(self):
"""
A JSON serializable dict representation of this strategy option.
"""
pass
class DistanceCutoffFloat(float, StrategyOption):
"""Distance cutoff in a strategy."""
allowed_values = "Real number between 1.0 and +infinity"
def __new__(cls, myfloat):
"""Special float that should be between 1.0 and infinity.
:param myfloat: Distance cutoff.
"""
flt = float.__new__(cls, myfloat)
if flt < 1.0:
raise ValueError("Distance cutoff should be between 1.0 and +infinity")
return flt
def as_dict(self):
"""MSONAble dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"value": self,
}
@classmethod
def from_dict(cls, d):
"""Initialize distance cutoff from dict.
:param d: Dict representation of the distance cutoff."""
return cls(d["value"])
class AngleCutoffFloat(float, StrategyOption):
"""Angle cutoff in a strategy"""
allowed_values = "Real number between 0.0 and 1.0"
def __new__(cls, myfloat):
"""Special float that should be between 0.0 and 1.0.
:param myfloat: Angle cutoff."""
flt = float.__new__(cls, myfloat)
if flt < 0.0 or flt > 1.0:
raise ValueError("Angle cutoff should be between 0.0 and 1.0")
return flt
def as_dict(self):
"""MSONAble dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"value": self,
}
@classmethod
def from_dict(cls, d):
"""Initialize angle cutoff from dict.
:param d: Dict representation of the angle cutoff.
"""
return cls(d["value"])
class CSMFloat(float, StrategyOption):
"""Real number representing a Continuous Symmetry Measure"""
allowed_values = "Real number between 0.0 and 100.0"
def __new__(cls, myfloat):
"""Special float that should be between 0.0 and 100.0.
:param myfloat: CSM."""
flt = float.__new__(cls, myfloat)
if flt < 0.0 or flt > 100.0:
raise ValueError("Continuous symmetry measure limits should be between 0.0 and 100.0")
return flt
def as_dict(self):
"""MSONable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"value": self,
}
@classmethod
def from_dict(cls, d):
"""Initialize CSM from dict.
:param d: Dict representation of the CSM.
"""
return cls(d["value"])
class AdditionalConditionInt(int, StrategyOption):
"""Integer representing an additional condition in a strategy."""
allowed_values = "Integer amongst :\n"
for integer, description in AdditionalConditions.CONDITION_DESCRIPTION.items():
allowed_values += f' - {integer:d} for "{description}"\n'
def __new__(cls, integer):
"""Special int representing additional conditions."""
if str(int(integer)) != str(integer):
raise ValueError(f"Additional condition {str(integer)} is not an integer")
intger = int.__new__(cls, integer)
if intger not in AdditionalConditions.ALL:
raise ValueError(f"Additional condition {integer:d} is not allowed")
return intger
def as_dict(self):
"""MSONable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"value": self,
}
@classmethod
def from_dict(cls, d):
"""Initialize additional condition from dict.
:param d: Dict representation of the additional condition.
"""
return cls(d["value"])
class AbstractChemenvStrategy(MSONable, metaclass=abc.ABCMeta):
"""
Class used to define a Chemenv strategy for the neighbors and coordination environment to be applied to a
StructureEnvironments object
"""
AC = AdditionalConditions()
STRATEGY_OPTIONS = OrderedDict() # type: Dict[str, Dict]
STRATEGY_DESCRIPTION = None # type: str
STRATEGY_INFO_FIELDS = [] # type: List
DEFAULT_SYMMETRY_MEASURE_TYPE = "csm_wcs_ctwcc"
def __init__(
self,
structure_environments=None,
symmetry_measure_type=DEFAULT_SYMMETRY_MEASURE_TYPE,
):
"""
Abstract constructor for the all chemenv strategies.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
self.structure_environments = None
if structure_environments is not None:
self.set_structure_environments(structure_environments)
self._symmetry_measure_type = symmetry_measure_type
@property
def symmetry_measure_type(self):
"""Type of symmetry measure."""
return self._symmetry_measure_type
def set_structure_environments(self, structure_environments):
"""Set the structure environments to this strategy.
:param structure_environments: StructureEnvironments object.
:return: None
"""
self.structure_environments = structure_environments
if not isinstance(self.structure_environments.voronoi, DetailedVoronoiContainer):
raise ValueError('Voronoi Container not of type "DetailedVoronoiContainer"')
self.prepare_symmetries()
def prepare_symmetries(self):
"""Prepare the symmetries for the structure contained in the structure environments."""
try:
self.spg_analyzer = SpacegroupAnalyzer(self.structure_environments.structure)
self.symops = self.spg_analyzer.get_symmetry_operations()
except Exception:
self.symops = []
def equivalent_site_index_and_transform(self, psite):
"""Get the equivalent site and corresponding symmetry+translation transformations.
:param psite: Periodic site.
:return: Equivalent site in the unit cell, translations and symmetry transformation.
"""
# Get the index of the site in the unit cell of which the PeriodicSite psite is a replica.
try:
isite = self.structure_environments.structure.index(psite)
except ValueError:
try:
uc_psite = psite.to_unit_cell()
isite = self.structure_environments.structure.index(uc_psite)
except ValueError:
for isite2, site2 in enumerate(self.structure_environments.structure):
if psite.is_periodic_image(site2):
isite = isite2
break
# Get the translation between psite and its corresponding site in the unit cell (Translation I)
thissite = self.structure_environments.structure[isite]
dthissite = psite.frac_coords - thissite.frac_coords
# Get the translation between the equivalent site for which the neighbors have been computed and the site in
# the unit cell that corresponds to psite (Translation II)
equivsite = self.structure_environments.structure[self.structure_environments.sites_map[isite]].to_unit_cell()
# equivsite = self.structure_environments.structure[self.structure_environments.sites_map[isite]]
dequivsite = (
self.structure_environments.structure[self.structure_environments.sites_map[isite]].frac_coords
- equivsite.frac_coords
)
found = False
# Find the symmetry that applies the site in the unit cell to the equivalent site, as well as the translation
# that gets back the site to the unit cell (Translation III)
# TODO: check that these tolerances are needed, now that the structures are refined before analyzing envs
tolerances = [1e-8, 1e-7, 1e-6, 1e-5, 1e-4]
for tolerance in tolerances:
for symop in self.symops:
newsite = PeriodicSite(
equivsite._species,
symop.operate(equivsite.frac_coords),
equivsite._lattice,
)
if newsite.is_periodic_image(thissite, tolerance=tolerance):
mysym = symop
dthissite2 = thissite.frac_coords - newsite.frac_coords
found = True
break
if not found:
symops = [SymmOp.from_rotation_and_translation()]
for symop in symops:
newsite = PeriodicSite(
equivsite._species,
symop.operate(equivsite.frac_coords),
equivsite._lattice,
)
# if newsite.is_periodic_image(thissite):
if newsite.is_periodic_image(thissite, tolerance=tolerance):
mysym = symop
dthissite2 = thissite.frac_coords - newsite.frac_coords
found = True
break
if found:
break
if not found:
raise EquivalentSiteSearchError(psite)
return [
self.structure_environments.sites_map[isite],
dequivsite,
dthissite + dthissite2,
mysym,
]
@abc.abstractmethod
def get_site_neighbors(self, site):
"""
Applies the strategy to the structure_environments object in order to get the neighbors of a given site.
:param site: Site for which the neighbors are looked for
:param structure_environments: StructureEnvironments object containing all the information needed to get the
neighbors of the site
:return: The list of neighbors of the site. For complex strategies, where one allows multiple solutions, this
can return a list of list of neighbors
"""
raise NotImplementedError()
@property
def uniquely_determines_coordination_environments(self):
"""
Returns True if the strategy leads to a unique coordination environment, False otherwise.
:return: True if the strategy leads to a unique coordination environment, False otherwise.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_site_coordination_environment(self, site):
"""
Applies the strategy to the structure_environments object in order to define the coordination environment of
a given site.
:param site: Site for which the coordination environment is looked for
:return: The coordination environment of the site. For complex strategies, where one allows multiple
solutions, this can return a list of coordination environments for the site
"""
raise NotImplementedError()
@abc.abstractmethod
def get_site_coordination_environments(self, site):
"""
Applies the strategy to the structure_environments object in order to define the coordination environment of
a given site.
:param site: Site for which the coordination environment is looked for
:return: The coordination environment of the site. For complex strategies, where one allows multiple
solutions, this can return a list of coordination environments for the site
"""
raise NotImplementedError()
@abc.abstractmethod
def get_site_coordination_environments_fractions(
self,
site,
isite=None,
dequivsite=None,
dthissite=None,
mysym=None,
ordered=True,
min_fraction=0.0,
return_maps=True,
return_strategy_dict_info=False,
):
"""
Applies the strategy to the structure_environments object in order to define the coordination environment of
a given site.
:param site: Site for which the coordination environment is looked for
:return: The coordination environment of the site. For complex strategies, where one allows multiple
solutions, this can return a list of coordination environments for the site
"""
raise NotImplementedError()
def get_site_ce_fractions_and_neighbors(self, site, full_ce_info=False, strategy_info=False):
"""
Applies the strategy to the structure_environments object in order to get coordination environments, their
fraction, csm, geometry_info, and neighbors
:param site: Site for which the above information is seeked
:return: The list of neighbors of the site. For complex strategies, where one allows multiple solutions, this
can return a list of list of neighbors
"""
[
isite,
dequivsite,
dthissite,
mysym,
] = self.equivalent_site_index_and_transform(site)
geoms_and_maps_list = self.get_site_coordination_environments_fractions(
site=site,
isite=isite,
dequivsite=dequivsite,
dthissite=dthissite,
mysym=mysym,
return_maps=True,
return_strategy_dict_info=True,
)
if geoms_and_maps_list is None:
return None
site_nbs_sets = self.structure_environments.neighbors_sets[isite]
ce_and_neighbors = []
for fractions_dict in geoms_and_maps_list:
ce_map = fractions_dict["ce_map"]
ce_nb_set = site_nbs_sets[ce_map[0]][ce_map[1]]
neighbors = [
{"site": nb_site_and_index["site"], "index": nb_site_and_index["index"]}
for nb_site_and_index in ce_nb_set.neighb_sites_and_indices
]
fractions_dict["neighbors"] = neighbors
ce_and_neighbors.append(fractions_dict)
return ce_and_neighbors
def set_option(self, option_name, option_value):
"""Set up a given option for this strategy.
:param option_name: Name of the option.
:param option_value: Value for this option.
:return: None
"""
self.__setattr__(option_name, option_value)
def setup_options(self, all_options_dict):
"""Set up options for this strategy based on a dict.
:param all_options_dict: Dict of option_name->option_value.
:return: None
"""
for option_name, option_value in all_options_dict.items():
self.set_option(option_name, option_value)
@abc.abstractmethod
def __eq__(self, other):
"""
Equality method that should be implemented for any strategy
:param other: strategy to be compared with the current one
:return:
"""
raise NotImplementedError()
def __str__(self):
out = f' Chemenv Strategy "{self.__class__.__name__}"\n'
out += " {}\n\n".format("=" * (19 + len(self.__class__.__name__)))
out += " Description :\n {}\n".format("-" * 13)
out += self.STRATEGY_DESCRIPTION
out += "\n\n"
out += " Options :\n {}\n".format("-" * 9)
for option_name, option_dict in self.STRATEGY_OPTIONS.items():
out += f" - {option_name} : {str(getattr(self, option_name))}\n"
return out
@abc.abstractmethod
def as_dict(self):
"""
Bson-serializable dict representation of the SimplestChemenvStrategy object.
:return: Bson-serializable dict representation of the SimplestChemenvStrategy object.
"""
raise NotImplementedError()
@classmethod
def from_dict(cls, d):
"""
Reconstructs the SimpleAbundanceChemenvStrategy object from a dict representation of the
SimpleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the SimpleAbundanceChemenvStrategy object
:return: StructureEnvironments object
"""
raise NotImplementedError()
class SimplestChemenvStrategy(AbstractChemenvStrategy):
"""
Simplest ChemenvStrategy using fixed angle and distance parameters for the definition of neighbors in the
Voronoi approach. The coordination environment is then given as the one with the lowest continuous symmetry measure
"""
# Default values for the distance and angle cutoffs
DEFAULT_DISTANCE_CUTOFF = 1.4
DEFAULT_ANGLE_CUTOFF = 0.3
DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF = 10.0
DEFAULT_ADDITIONAL_CONDITION = AbstractChemenvStrategy.AC.ONLY_ACB
STRATEGY_OPTIONS = OrderedDict() # type: Dict[str, Dict]
STRATEGY_OPTIONS["distance_cutoff"] = {
"type": DistanceCutoffFloat,
"internal": "_distance_cutoff",
"default": DEFAULT_DISTANCE_CUTOFF,
}
STRATEGY_OPTIONS["angle_cutoff"] = {
"type": AngleCutoffFloat,
"internal": "_angle_cutoff",
"default": DEFAULT_ANGLE_CUTOFF,
}
STRATEGY_OPTIONS["additional_condition"] = {
"type": AdditionalConditionInt,
"internal": "_additional_condition",
"default": DEFAULT_ADDITIONAL_CONDITION,
}
STRATEGY_OPTIONS["continuous_symmetry_measure_cutoff"] = {
"type": CSMFloat,
"internal": "_continuous_symmetry_measure_cutoff",
"default": DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF,
}
STRATEGY_DESCRIPTION = (
" Simplest ChemenvStrategy using fixed angle and distance parameters \n"
" for the definition of neighbors in the Voronoi approach. \n"
" The coordination environment is then given as the one with the \n"
" lowest continuous symmetry measure."
)
def __init__(
self,
structure_environments=None,
distance_cutoff=DEFAULT_DISTANCE_CUTOFF,
angle_cutoff=DEFAULT_ANGLE_CUTOFF,
additional_condition=DEFAULT_ADDITIONAL_CONDITION,
continuous_symmetry_measure_cutoff=DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE,
):
"""
Constructor for this SimplestChemenvStrategy.
:param distance_cutoff: Distance cutoff used
:param angle_cutoff: Angle cutoff used
"""
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self.distance_cutoff = distance_cutoff
self.angle_cutoff = angle_cutoff
self.additional_condition = additional_condition
self.continuous_symmetry_measure_cutoff = continuous_symmetry_measure_cutoff
@property
def uniquely_determines_coordination_environments(self):
"""Whether this strategy uniquely determines coordination environments."""
return True
@property
def distance_cutoff(self):
"""Distance cutoff used."""
return self._distance_cutoff
@distance_cutoff.setter
def distance_cutoff(self, distance_cutoff):
"""Set the distance cutoff for this strategy.
:param distance_cutoff: Distance cutoff.
:return: None
"""
self._distance_cutoff = DistanceCutoffFloat(distance_cutoff)
@property
def angle_cutoff(self):
"""Angle cutoff used."""
return self._angle_cutoff
@angle_cutoff.setter
def angle_cutoff(self, angle_cutoff):
"""Set the angle cutoff for this strategy.
:param angle_cutoff: Angle cutoff.
:return: None
"""
self._angle_cutoff = AngleCutoffFloat(angle_cutoff)
@property
def additional_condition(self):
"""Additional condition for this strategy."""
return self._additional_condition
@additional_condition.setter
def additional_condition(self, additional_condition):
"""Set the additional condition for this strategy.
:param additional_condition: Additional condition.
:return: None
"""
self._additional_condition = AdditionalConditionInt(additional_condition)
@property
def continuous_symmetry_measure_cutoff(self):
"""CSM cutoff used"""
return self._continuous_symmetry_measure_cutoff
@continuous_symmetry_measure_cutoff.setter
def continuous_symmetry_measure_cutoff(self, continuous_symmetry_measure_cutoff):
"""Set the CSM cutoff for this strategy.
:param continuous_symmetry_measure_cutoff: CSM cutoff
:return: None
"""
self._continuous_symmetry_measure_cutoff = CSMFloat(continuous_symmetry_measure_cutoff)
def get_site_neighbors(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None):
"""Get the neighbors of a given site.
:param site: Site for which neighbors are needed.
:param isite: Index of the site.
:param dequivsite: Translation of the equivalent site.
:param dthissite: Translation of this site.
:param mysym: Symmetry to be applied.
:return: List of coordinated neighbors of site.
"""
if isite is None:
[
isite,
dequivsite,
dthissite,
mysym,
] = self.equivalent_site_index_and_transform(site)
ce, cn_map = self.get_site_coordination_environment(
site=site,
isite=isite,
dequivsite=dequivsite,
dthissite=dthissite,
mysym=mysym,
return_map=True,
)
nb_set = self.structure_environments.neighbors_sets[isite][cn_map[0]][cn_map[1]]
eqsite_ps = nb_set.neighb_sites
coordinated_neighbors = []
for ips, ps in enumerate(eqsite_ps):
coords = mysym.operate(ps.frac_coords + dequivsite) + dthissite
ps_site = PeriodicSite(ps._species, coords, ps._lattice)
coordinated_neighbors.append(ps_site)
return coordinated_neighbors
def get_site_coordination_environment(
self,
site,
isite=None,
dequivsite=None,
dthissite=None,
mysym=None,
return_map=False,
):
"""Get the coordination environment of a given site.
:param site: Site for which coordination environment is needed.
:param isite: Index of the site.
:param dequivsite: Translation of the equivalent site.
:param dthissite: Translation of this site.
:param mysym: Symmetry to be applied.
:param return_map: Whether to return cn_map (identifies the NeighborsSet used).
:return: Coordination environment of site.
"""
if isite is None:
[
isite,
dequivsite,
dthissite,
mysym,
] = self.equivalent_site_index_and_transform(site)
neighbors_normalized_distances = self.structure_environments.voronoi.neighbors_normalized_distances[isite]
neighbors_normalized_angles = self.structure_environments.voronoi.neighbors_normalized_angles[isite]
idist = None
for iwd, wd in enumerate(neighbors_normalized_distances):
if self.distance_cutoff >= wd["min"]:
idist = iwd
else:
break
iang = None
for iwa, wa in enumerate(neighbors_normalized_angles):
if self.angle_cutoff <= wa["max"]:
iang = iwa
else:
break
if idist is None or iang is None:
raise ValueError("Distance or angle parameter not found ...")
my_cn = None
my_inb_set = None
found = False
for cn, nb_sets in self.structure_environments.neighbors_sets[isite].items():
for inb_set, nb_set in enumerate(nb_sets):
sources = [
src
for src in nb_set.sources
if src["origin"] == "dist_ang_ac_voronoi" and src["ac"] == self.additional_condition
]
for src in sources:
if src["idp"] == idist and src["iap"] == iang:
my_cn = cn
my_inb_set = inb_set
found = True
break
if found:
break
if found:
break
if not found:
return None
cn_map = (my_cn, my_inb_set)
ce = self.structure_environments.ce_list[self.structure_environments.sites_map[isite]][cn_map[0]][cn_map[1]]
if ce is None:
return None
coord_geoms = ce.coord_geoms
if return_map:
if coord_geoms is None:
return cn_map[0], cn_map
return (
ce.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type),
cn_map,
)
if coord_geoms is None:
return cn_map[0]
return ce.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
def get_site_coordination_environments_fractions(
self,
site,
isite=None,
dequivsite=None,
dthissite=None,
mysym=None,
ordered=True,
min_fraction=0.0,
return_maps=True,
return_strategy_dict_info=False,
):
"""Get the coordination environments of a given site and additional information.
:param site: Site for which coordination environment is needed.
:param isite: Index of the site.
:param dequivsite: Translation of the equivalent site.
:param dthissite: Translation of this site.
:param mysym: Symmetry to be applied.
:param ordered: Whether to order the list by fractions.
:param min_fraction: Minimum fraction to include in the list
:param return_maps: Whether to return cn_maps (identifies all the NeighborsSet used).
:param return_strategy_dict_info: Whether to add the info about the strategy used.
:return: List of Dict with coordination environment, fraction and additional info.
"""
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[
isite,
dequivsite,
dthissite,
mysym,
] = self.equivalent_site_index_and_transform(site)
site_nb_sets = self.structure_environments.neighbors_sets[isite]
if site_nb_sets is None:
return None
ce_and_map = self.get_site_coordination_environment(
site=site,
isite=isite,
dequivsite=dequivsite,
dthissite=dthissite,
mysym=mysym,
return_map=True,
)
if ce_and_map is None:
return None
ce, ce_map = ce_and_map
if ce is None:
ce_dict = {
"ce_symbol": f"UNKNOWN:{ce_map[0]:d}",
"ce_dict": None,
"ce_fraction": 1.0,
}
else:
ce_dict = {"ce_symbol": ce[0], "ce_dict": ce[1], "ce_fraction": 1.0}
if return_maps:
ce_dict["ce_map"] = ce_map
if return_strategy_dict_info:
ce_dict["strategy_info"] = {}
fractions_info_list = [ce_dict]
return fractions_info_list
def get_site_coordination_environments(
self,
site,
isite=None,
dequivsite=None,
dthissite=None,
mysym=None,
return_maps=False,
):
"""Get the coordination environments of a given site.
:param site: Site for which coordination environment is needed.
:param isite: Index of the site.
:param dequivsite: Translation of the equivalent site.
:param dthissite: Translation of this site.
:param mysym: Symmetry to be applied.
:param return_maps: Whether to return cn_maps (identifies all the NeighborsSet used).
:return: List of coordination environment.
"""
return [
self.get_site_coordination_environment(
site=site,
isite=isite,
dequivsite=dequivsite,
dthissite=dthissite,
mysym=mysym,
return_map=return_maps,
)
]
def add_strategy_visualization_to_subplot(self, subplot, visualization_options=None, plot_type=None):
"""Add a visual of the strategy on a distance-angle plot.
:param subplot: Axes object onto the visual should be added.
:param visualization_options: Options for the visual.
:param plot_type: Type of distance-angle plot.
:return: None
"""
subplot.plot(
self._distance_cutoff,
self._angle_cutoff,
"o",
mec=None,
mfc="w",
markersize=12,
)
subplot.plot(self._distance_cutoff, self._angle_cutoff, "x", linewidth=2, markersize=12)
def __eq__(self, other):
return (
self.__class__.__name__ == other.__class__.__name__
and self._distance_cutoff == other._distance_cutoff
and self._angle_cutoff == other._angle_cutoff
and self._additional_condition == other._additional_condition
and self._continuous_symmetry_measure_cutoff == other._continuous_symmetry_measure_cutoff
and self.symmetry_measure_type == other.symmetry_measure_type
)
def as_dict(self):
"""
Bson-serializable dict representation of the SimplestChemenvStrategy object.
:return: Bson-serializable dict representation of the SimplestChemenvStrategy object.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"distance_cutoff": float(self._distance_cutoff),
"angle_cutoff": float(self._angle_cutoff),
"additional_condition": int(self._additional_condition),
"continuous_symmetry_measure_cutoff": float(self._continuous_symmetry_measure_cutoff),
"symmetry_measure_type": self._symmetry_measure_type,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object
created using the as_dict method.
:param d: dict representation of the SimplestChemenvStrategy object
:return: StructureEnvironments object
"""
return cls(
distance_cutoff=d["distance_cutoff"],
angle_cutoff=d["angle_cutoff"],
additional_condition=d["additional_condition"],
continuous_symmetry_measure_cutoff=d["continuous_symmetry_measure_cutoff"],
symmetry_measure_type=d["symmetry_measure_type"],
)
class SimpleAbundanceChemenvStrategy(AbstractChemenvStrategy):
"""
Simple ChemenvStrategy using the neighbors that are the most "abundant" in the grid of angle and distance
parameters for the definition of neighbors in the Voronoi approach.
The coordination environment is then given as the one with the lowest continuous symmetry measure
"""
DEFAULT_MAX_DIST = 2.0
DEFAULT_ADDITIONAL_CONDITION = AbstractChemenvStrategy.AC.ONLY_ACB
STRATEGY_OPTIONS = OrderedDict() # type: Dict[str, Dict]
STRATEGY_OPTIONS["additional_condition"] = {
"type": AdditionalConditionInt,
"internal": "_additional_condition",
"default": DEFAULT_ADDITIONAL_CONDITION,
}
STRATEGY_OPTIONS["surface_calculation_type"] = {}
STRATEGY_DESCRIPTION = (
' Simple Abundance ChemenvStrategy using the most "abundant" neighbors map \n'
" for the definition of neighbors in the Voronoi approach. \n"
" The coordination environment is then given as the one with the \n"
" lowest continuous symmetry measure."
)
def __init__(
self,
structure_environments=None,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE,
):
"""
Constructor for the SimpleAbundanceChemenvStrategy.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
raise NotImplementedError("SimpleAbundanceChemenvStrategy not yet implemented")
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self._additional_condition = additional_condition
@property
def uniquely_determines_coordination_environments(self):
"""Whether this strategy uniquely determines coordination environments."""
return True
def get_site_neighbors(self, site):
"""Get the neighbors of a given site with this strategy.
:param site: Periodic site.
:return: List of neighbors of site.
"""
[
isite,
dequivsite,
dthissite,
mysym,
] = self.equivalent_site_index_and_transform(site)
cn_map = self._get_map(isite)
eqsite_ps = self.structure_environments.unique_coordinated_neighbors(isite, cn_map=cn_map)
coordinated_neighbors = []
for ips, ps in enumerate(eqsite_ps):
coords = mysym.operate(ps.frac_coords + dequivsite) + dthissite
ps_site = PeriodicSite(ps._species, coords, ps._lattice)
coordinated_neighbors.append(ps_site)
return coordinated_neighbors
def get_site_coordination_environment(
self,
site,
isite=None,
dequivsite=None,
dthissite=None,
mysym=None,
return_map=False,
):
"""Get the coordination environment of a given site.
:param site: Site for which coordination environment is needed.
:param isite: Index of the site.
:param dequivsite: Translation of the equivalent site.
:param dthissite: Translation of this site.
:param mysym: Symmetry to be applied.
:param return_map: Whether to return cn_map (identifies the NeighborsSet used).
:return: Coordination environment of site.
"""
if isite is None:
[
isite,
dequivsite,
dthissite,
mysym,
] = self.equivalent_site_index_and_transform(site)
cn_map = self._get_map(isite)
if cn_map is None:
return None
coord_geoms = self.structure_environments.ce_list[self.structure_environments.sites_map[isite]][cn_map[0]][
cn_map[1]
]
if return_map:
if coord_geoms is None:
return cn_map[0], cn_map
return (
coord_geoms.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type),
cn_map,
)
if coord_geoms is None:
return cn_map[0]
return coord_geoms.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
def get_site_coordination_environments(
self,
site,
isite=None,
dequivsite=None,
dthissite=None,
mysym=None,
return_maps=False,
):
"""Get the coordination environments of a given site.
:param site: Site for which coordination environment is needed.
:param isite: Index of the site.
:param dequivsite: Translation of the equivalent site.
:param dthissite: Translation of this site.
:param mysym: Symmetry to be applied.
:param return_maps: Whether to return cn_maps (identifies all the NeighborsSet used).
:return: List of coordination environment.
"""
return [
self.get_site_coordination_environment(
site=site,
isite=isite,
dequivsite=dequivsite,
dthissite=dthissite,
mysym=mysym,
return_map=return_maps,
)
]
def _get_map(self, isite):
maps_and_surfaces = self._get_maps_surfaces(isite)
if maps_and_surfaces is None:
return None
surface_max = 0.0
imax = -1
for ii, map_and_surface in enumerate(maps_and_surfaces):
all_additional_conditions = [ac[2] for ac in map_and_surface["parameters_indices"]]
if self._additional_condition in all_additional_conditions and map_and_surface["surface"] > surface_max:
surface_max = map_and_surface["surface"]
imax = ii
return maps_and_surfaces[imax]["map"]
def _get_maps_surfaces(self, isite, surface_calculation_type=None):
if surface_calculation_type is None:
surface_calculation_type = {
"distance_parameter": ("initial_normalized", None),
"angle_parameter": ("initial_normalized", None),
}
return self.structure_environments.voronoi.maps_and_surfaces(
isite=isite,
surface_calculation_type=surface_calculation_type,
max_dist=self.DEFAULT_MAX_DIST,
)
def __eq__(self, other):
return (
self.__class__.__name__ == other.__class__.__name__
and self._additional_condition == other.additional_condition
)
def as_dict(self):
"""
Bson-serializable dict representation of the SimpleAbundanceChemenvStrategy object.
:return: Bson-serializable dict representation of the SimpleAbundanceChemenvStrategy object.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the SimpleAbundanceChemenvStrategy object from a dict representation of the
SimpleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the SimpleAbundanceChemenvStrategy object
:return: StructureEnvironments object
"""
return cls(additional_condition=d["additional_condition"])
class TargettedPenaltiedAbundanceChemenvStrategy(SimpleAbundanceChemenvStrategy):
"""
Simple ChemenvStrategy using the neighbors that are the most "abundant" in the grid of angle and distance
parameters for the definition of neighbors in the Voronoi approach, with a bias for a given list of target
environments. This can be useful in the case of, e.g. connectivity search of some given environment.
The coordination environment is then given as the one with the lowest continuous symmetry measure
"""
DEFAULT_TARGET_ENVIRONMENTS = ["O:6"]
def __init__(
self,
structure_environments=None,
truncate_dist_ang=True,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
max_nabundant=5,
target_environments=DEFAULT_TARGET_ENVIRONMENTS,
target_penalty_type="max_csm",
max_csm=5.0,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE,
):
"""Initializes strategy.
Not yet implemented.
:param structure_environments:
:param truncate_dist_ang:
:param additional_condition:
:param max_nabundant:
:param target_environments:
:param target_penalty_type:
:param max_csm:
:param symmetry_measure_type:
"""
raise NotImplementedError("TargettedPenaltiedAbundanceChemenvStrategy not yet implemented")
SimpleAbundanceChemenvStrategy.__init__(
self,
structure_environments,
additional_condition=additional_condition,
symmetry_measure_type=symmetry_measure_type,
)
self.max_nabundant = max_nabundant
self.target_environments = target_environments
self.target_penalty_type = target_penalty_type
self.max_csm = max_csm
def get_site_coordination_environment(
self,
site,
isite=None,
dequivsite=None,
dthissite=None,
mysym=None,
return_map=False,
):
"""Get the coordination environment of a given site.
:param site: Site for which coordination environment is needed.
:param isite: Index of the site.
:param dequivsite: Translation of the equivalent site.
:param dthissite: Translation of this site.
:param mysym: Symmetry to be applied.
:param return_map: Whether to return cn_map (identifies the NeighborsSet used).
:return: Coordination environment of site.
"""
if isite is None:
[
isite,
dequivsite,
dthissite,
mysym,
] = self.equivalent_site_index_and_transform(site)
cn_map = self._get_map(isite)
if cn_map is None:
return None
chemical_environments = self.structure_environments.ce_list[self.structure_environments.sites_map[isite]][
cn_map[0]
][cn_map[1]]
if return_map:
if chemical_environments.coord_geoms is None or len(chemical_environments) == 0:
return cn_map[0], cn_map
return (
chemical_environments.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type),
cn_map,
)
if chemical_environments.coord_geoms is None:
return cn_map[0]
return chemical_environments.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
def _get_map(self, isite):
maps_and_surfaces = SimpleAbundanceChemenvStrategy._get_maps_surfaces(self, isite)
if maps_and_surfaces is None:
return SimpleAbundanceChemenvStrategy._get_map(self, isite)
current_map = None
current_target_env_csm = 100.0
surfaces = [map_and_surface["surface"] for map_and_surface in maps_and_surfaces]
order = np.argsort(surfaces)[::-1]
target_cgs = [
AllCoordinationGeometries().get_geometry_from_mp_symbol(mp_symbol) for mp_symbol in self.target_environments
]
target_cns = [cg.coordination_number for cg in target_cgs]
for ii in range(min([len(maps_and_surfaces), self.max_nabundant])):
my_map_and_surface = maps_and_surfaces[order[ii]]
mymap = my_map_and_surface["map"]
cn = mymap[0]
if cn not in target_cns or cn > 12 or cn == 0:
continue
all_conditions = [params[2] for params in my_map_and_surface["parameters_indices"]]
if self._additional_condition not in all_conditions:
continue
cg, cgdict = self.structure_environments.ce_list[self.structure_environments.sites_map[isite]][mymap[0]][
mymap[1]
].minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
if (
cg in self.target_environments
and cgdict["symmetry_measure"] <= self.max_csm
and cgdict["symmetry_measure"] < current_target_env_csm
):
current_map = mymap
current_target_env_csm = cgdict["symmetry_measure"]
if current_map is not None:
return current_map
return SimpleAbundanceChemenvStrategy._get_map(self, isite)
@property
def uniquely_determines_coordination_environments(self):
"""Whether this strategy uniquely determines coordination environments."""
return True
def as_dict(self):
"""
Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object.
:return: Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"max_nabundant": self.max_nabundant,
"target_environments": self.target_environments,
"target_penalty_type": self.target_penalty_type,
"max_csm": self.max_csm,
}
def __eq__(self, other):
return (
self.__class__.__name__ == other.__class__.__name__
and self._additional_condition == other.additional_condition
and self.max_nabundant == other.max_nabundant
and self.target_environments == other.target_environments
and self.target_penalty_type == other.target_penalty_type
and self.max_csm == other.max_csm
)
@classmethod
def from_dict(cls, d):
"""
Reconstructs the TargettedPenaltiedAbundanceChemenvStrategy object from a dict representation of the
TargettedPenaltiedAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object
:return: TargettedPenaltiedAbundanceChemenvStrategy object
"""
return cls(
additional_condition=d["additional_condition"],
max_nabundant=d["max_nabundant"],
target_environments=d["target_environments"],
target_penalty_type=d["target_penalty_type"],
max_csm=d["max_csm"],
)
class NbSetWeight(MSONable, metaclass=abc.ABCMeta):
"""Abstract object for neighbors sets weights estimations."""
@abc.abstractmethod
def as_dict(self):
"""
A JSON serializable dict representation of this neighbors set weight.
"""
pass
@abc.abstractmethod
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
pass
class AngleNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on the angle."""
SHORT_NAME = "AngleWeight"
def __init__(self, aa=1.0):
"""Initialize AngleNbSetWeight estimator.
:param aa: Exponent of the angle for the estimator.
"""
self.aa = aa
if self.aa == 1.0:
self.aw = self.angle_sum
else:
self.aw = self.angle_sumn
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
return self.aw(nb_set=nb_set)
@staticmethod
def angle_sum(nb_set):
"""Sum of all angles in a neighbors set.
:param nb_set: Neighbors set.
:return: Sum of solid angles for the neighbors set.
"""
return np.sum(nb_set.angles) / (4.0 * np.pi)
def angle_sumn(self, nb_set):
"""Sum of all angles to a given power in a neighbors set.
:param nb_set: Neighbors set.
:return: Sum of solid angles to the power aa for the neighbors set.
"""
return np.power(self.angle_sum(nb_set=nb_set), self.aa)
def __eq__(self, other):
return self.aa == other.aa
def __ne__(self, other):
return not self == other
def as_dict(self):
"""MSONAble dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"aa": self.aa,
}
@classmethod
def from_dict(cls, dd):
"""
From dict
:param dd:
:return:
"""
return cls(aa=dd["aa"])
class NormalizedAngleDistanceNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on the normalized angle/distance."""
SHORT_NAME = "NormAngleDistWeight"
def __init__(self, average_type, aa, bb):
"""Initialize NormalizedAngleDistanceNbSetWeight.
:param average_type: Average function.
:param aa: Exponent for the angle values.
:param bb: Exponent for the distance values.
"""
self.average_type = average_type
if self.average_type == "geometric":
self.eval = self.gweight
elif self.average_type == "arithmetic":
self.eval = self.aweight
else:
raise ValueError(
'Average type is "{}" while it should be ' '"geometric" or "arithmetic"'.format(average_type)
)
self.aa = aa
self.bb = bb
if self.aa == 0:
if self.bb == 1:
self.fda = self.invdist
elif self.bb == 0:
raise ValueError("Both exponents are 0.")
else:
self.fda = self.invndist
elif self.bb == 0:
if self.aa == 1:
self.fda = self.ang
else:
self.fda = self.angn
else:
if self.aa == 1:
if self.bb == 1:
self.fda = self.anginvdist
else:
self.fda = self.anginvndist
else:
if self.bb == 1:
self.fda = self.angninvdist
else:
self.fda = self.angninvndist
def __eq__(self, other):
return self.average_type == other.average_type and self.aa == other.aa and self.bb == other.bb
def __ne__(self, other):
return not self == other
def as_dict(self):
"""MSONable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"average_type": self.average_type,
"aa": self.aa,
"bb": self.bb,
}
@classmethod
def from_dict(cls, dd):
"""Initialize from dict.
:param dd: Dict representation of NormalizedAngleDistanceNbSetWeight.
:return: NormalizedAngleDistanceNbSetWeight.
"""
return cls(average_type=dd["average_type"], aa=dd["aa"], bb=dd["bb"])
@staticmethod
def invdist(nb_set):
"""Inverse distance weight.
:param nb_set: Neighbors set.
:return: List of inverse distances.
"""
return [1.0 / dist for dist in nb_set.normalized_distances]
def invndist(self, nb_set):
"""Inverse power distance weight.
:param nb_set: Neighbors set.
:return: List of inverse power distances.
"""
return [1.0 / dist ** self.bb for dist in nb_set.normalized_distances]
@staticmethod
def ang(nb_set):
"""Angle weight.
:param nb_set: Neighbors set.
:return: List of angle weights.
"""
return nb_set.normalized_angles
def angn(self, nb_set):
"""Power angle weight.
:param nb_set: Neighbors set.
:return: List of power angle weights.
"""
return [ang ** self.aa for ang in nb_set.normalized_angles]
@staticmethod
def anginvdist(nb_set):
"""Angle/distance weight.
:param nb_set: Neighbors set.
:return: List of angle/distance weights.
"""
nangles = nb_set.normalized_angles
return [nangles[ii] / dist for ii, dist in enumerate(nb_set.normalized_distances)]
def anginvndist(self, nb_set):
"""Angle/power distance weight.
:param nb_set: Neighbors set.
:return: List of angle/power distance weights.
"""
nangles = nb_set.normalized_angles
return [nangles[ii] / dist ** self.bb for ii, dist in enumerate(nb_set.normalized_distances)]
def angninvdist(self, nb_set):
"""Power angle/distance weight.
:param nb_set: Neighbors set.
:return: List of power angle/distance weights.
"""
nangles = nb_set.normalized_angles
return [nangles[ii] ** self.aa / dist for ii, dist in enumerate(nb_set.normalized_distances)]
def angninvndist(self, nb_set):
"""Power angle/power distance weight.
:param nb_set: Neighbors set.
:return: List of power angle/power distance weights.
"""
nangles = nb_set.normalized_angles
return [nangles[ii] ** self.aa / dist ** self.bb for ii, dist in enumerate(nb_set.normalized_distances)]
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
fda_list = self.fda(nb_set=nb_set)
return self.eval(fda_list=fda_list)
@staticmethod
def gweight(fda_list):
"""Geometric mean of the weights.
:param fda_list: List of estimator weights for each neighbor.
:return: Geometric mean of the weights.
"""
return gmean(fda_list)
@staticmethod
def aweight(fda_list):
"""Standard mean of the weights.
:param fda_list: List of estimator weights for each neighbor.
:return: Standard mean of the weights.
"""
return np.mean(fda_list)
def get_effective_csm(
nb_set,
cn_map,
structure_environments,
additional_info,
symmetry_measure_type,
max_effective_csm,
effective_csm_estimator_ratio_function,
):
"""Get the effective continuous symmetry measure of a given neighbors set.
:param nb_set: Neighbors set.
:param cn_map: Mapping index of this neighbors set.
:param structure_environments: Structure environments.
:param additional_info: Additional information for the neighbors set.
:param symmetry_measure_type: Type of symmetry measure to be used in the effective CSM.
:param max_effective_csm: Max CSM to use for the effective CSM calculation.
:param effective_csm_estimator_ratio_function: Ratio function to use to compute effective CSM.
:return: Effective CSM of a given Neighbors set.
"""
try:
effective_csm = additional_info["effective_csms"][nb_set.isite][cn_map]
except KeyError:
site_ce_list = structure_environments.ce_list[nb_set.isite]
site_chemenv = site_ce_list[cn_map[0]][cn_map[1]]
if site_chemenv is None:
effective_csm = 100.0
else:
mingeoms = site_chemenv.minimum_geometries(
symmetry_measure_type=symmetry_measure_type, max_csm=max_effective_csm
)
if len(mingeoms) == 0:
effective_csm = 100.0
else:
csms = [
ce_dict["other_symmetry_measures"][symmetry_measure_type]
for mp_symbol, ce_dict in mingeoms
if ce_dict["other_symmetry_measures"][symmetry_measure_type] <= max_effective_csm
]
effective_csm = effective_csm_estimator_ratio_function.mean_estimator(csms)
set_info(
additional_info=additional_info,
field="effective_csms",
isite=nb_set.isite,
cn_map=cn_map,
value=effective_csm,
)
return effective_csm
def set_info(additional_info, field, isite, cn_map, value):
"""Set additional information for the weights.
:param additional_info: Additional information.
:param field: Type of additional information.
:param isite: Index of site to add info.
:param cn_map: Mapping index of the neighbors set.
:param value: Value of this additional information.
:return: None
"""
try:
additional_info[field][isite][cn_map] = value
except KeyError:
try:
additional_info[field][isite] = {cn_map: value}
except KeyError:
additional_info[field] = {isite: {cn_map: value}}
class SelfCSMNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on the Self CSM."""
SHORT_NAME = "SelfCSMWeight"
DEFAULT_EFFECTIVE_CSM_ESTIMATOR = {
"function": "power2_inverse_decreasing",
"options": {"max_csm": 8.0},
}
DEFAULT_WEIGHT_ESTIMATOR = {
"function": "power2_decreasing_exp",
"options": {"max_csm": 8.0, "alpha": 1.0},
}
DEFAULT_SYMMETRY_MEASURE_TYPE = "csm_wcs_ctwcc"
def __init__(
self,
effective_csm_estimator=DEFAULT_EFFECTIVE_CSM_ESTIMATOR,
weight_estimator=DEFAULT_WEIGHT_ESTIMATOR,
symmetry_measure_type=DEFAULT_SYMMETRY_MEASURE_TYPE,
):
"""Initialize SelfCSMNbSetWeight.
:param effective_csm_estimator: Ratio function used for the effective CSM (comparison between neighbors sets).
:param weight_estimator: Weight estimator within a given neighbors set.
:param symmetry_measure_type: Type of symmetry measure to be used.
"""
self.effective_csm_estimator = effective_csm_estimator
self.effective_csm_estimator_rf = CSMInfiniteRatioFunction.from_dict(effective_csm_estimator)
self.weight_estimator = weight_estimator
self.weight_estimator_rf = CSMFiniteRatioFunction.from_dict(weight_estimator)
self.symmetry_measure_type = symmetry_measure_type
self.max_effective_csm = self.effective_csm_estimator["options"]["max_csm"]
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
effective_csm = get_effective_csm(
nb_set=nb_set,
cn_map=cn_map,
structure_environments=structure_environments,
additional_info=additional_info,
symmetry_measure_type=self.symmetry_measure_type,
max_effective_csm=self.max_effective_csm,
effective_csm_estimator_ratio_function=self.effective_csm_estimator_rf,
)
weight = self.weight_estimator_rf.evaluate(effective_csm)
set_info(
additional_info=additional_info,
field="self_csms_weights",
isite=nb_set.isite,
cn_map=cn_map,
value=weight,
)
return weight
def __eq__(self, other):
return (
self.effective_csm_estimator == other.effective_csm_estimator
and self.weight_estimator == other.weight_estimator
and self.symmetry_measure_type == other.symmetry_measure_type
)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""MSONable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"effective_csm_estimator": self.effective_csm_estimator,
"weight_estimator": self.weight_estimator,
"symmetry_measure_type": self.symmetry_measure_type,
}
@classmethod
def from_dict(cls, dd):
"""Initialize from dict.
:param dd: Dict representation of SelfCSMNbSetWeight.
:return: SelfCSMNbSetWeight.
"""
return cls(
effective_csm_estimator=dd["effective_csm_estimator"],
weight_estimator=dd["weight_estimator"],
symmetry_measure_type=dd["symmetry_measure_type"],
)
class DeltaCSMNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on the differences of CSM."""
SHORT_NAME = "DeltaCSMWeight"
DEFAULT_EFFECTIVE_CSM_ESTIMATOR = {
"function": "power2_inverse_decreasing",
"options": {"max_csm": 8.0},
}
DEFAULT_SYMMETRY_MEASURE_TYPE = "csm_wcs_ctwcc"
DEFAULT_WEIGHT_ESTIMATOR = {
"function": "smootherstep",
"options": {"delta_csm_min": 0.5, "delta_csm_max": 3.0},
}
def __init__(
self,
effective_csm_estimator=DEFAULT_EFFECTIVE_CSM_ESTIMATOR,
weight_estimator=DEFAULT_WEIGHT_ESTIMATOR,
delta_cn_weight_estimators=None,
symmetry_measure_type=DEFAULT_SYMMETRY_MEASURE_TYPE,
):
"""Initialize SelfCSMNbSetWeight.
:param effective_csm_estimator: Ratio function used for the effective CSM (comparison between neighbors sets).
:param weight_estimator: Weight estimator within a given neighbors set.
:param delta_cn_weight_estimators: Specific weight estimators for specific cn
:param symmetry_measure_type: Type of symmetry measure to be used.
"""
self.effective_csm_estimator = effective_csm_estimator
self.effective_csm_estimator_rf = CSMInfiniteRatioFunction.from_dict(effective_csm_estimator)
self.weight_estimator = weight_estimator
if self.weight_estimator is not None:
self.weight_estimator_rf = DeltaCSMRatioFunction.from_dict(weight_estimator)
self.delta_cn_weight_estimators = delta_cn_weight_estimators
self.delta_cn_weight_estimators_rfs = {}
if delta_cn_weight_estimators is not None:
for delta_cn, dcn_w_estimator in delta_cn_weight_estimators.items():
self.delta_cn_weight_estimators_rfs[delta_cn] = DeltaCSMRatioFunction.from_dict(dcn_w_estimator)
self.symmetry_measure_type = symmetry_measure_type
self.max_effective_csm = self.effective_csm_estimator["options"]["max_csm"]
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
effcsm = get_effective_csm(
nb_set=nb_set,
cn_map=cn_map,
structure_environments=structure_environments,
additional_info=additional_info,
symmetry_measure_type=self.symmetry_measure_type,
max_effective_csm=self.max_effective_csm,
effective_csm_estimator_ratio_function=self.effective_csm_estimator_rf,
)
cn = cn_map[0]
isite = nb_set.isite
delta_csm = None
delta_csm_cn_map2 = None
nb_set_weight = 1.0
for cn2, nb_sets in structure_environments.neighbors_sets[isite].items():
if cn2 < cn:
continue
for inb_set2, nb_set2 in enumerate(nb_sets):
if cn == cn2:
continue
effcsm2 = get_effective_csm(
nb_set=nb_set2,
cn_map=(cn2, inb_set2),
structure_environments=structure_environments,
additional_info=additional_info,
symmetry_measure_type=self.symmetry_measure_type,
max_effective_csm=self.max_effective_csm,
effective_csm_estimator_ratio_function=self.effective_csm_estimator_rf,
)
this_delta_csm = effcsm2 - effcsm
if cn2 == cn:
if this_delta_csm < 0.0:
set_info(
additional_info=additional_info,
field="delta_csms",
isite=isite,
cn_map=cn_map,
value=this_delta_csm,
)
set_info(
additional_info=additional_info,
field="delta_csms_weights",
isite=isite,
cn_map=cn_map,
value=0.0,
)
set_info(
additional_info=additional_info,
field="delta_csms_cn_map2",
isite=isite,
cn_map=cn_map,
value=(cn2, inb_set2),
)
return 0.0
else:
dcn = cn2 - cn
if dcn in self.delta_cn_weight_estimators_rfs:
this_delta_csm_weight = self.delta_cn_weight_estimators_rfs[dcn].evaluate(this_delta_csm)
else:
this_delta_csm_weight = self.weight_estimator_rf.evaluate(this_delta_csm)
if this_delta_csm_weight < nb_set_weight:
delta_csm = this_delta_csm
delta_csm_cn_map2 = (cn2, inb_set2)
nb_set_weight = this_delta_csm_weight
set_info(
additional_info=additional_info,
field="delta_csms",
isite=isite,
cn_map=cn_map,
value=delta_csm,
)
set_info(
additional_info=additional_info,
field="delta_csms_weights",
isite=isite,
cn_map=cn_map,
value=nb_set_weight,
)
set_info(
additional_info=additional_info,
field="delta_csms_cn_map2",
isite=isite,
cn_map=cn_map,
value=delta_csm_cn_map2,
)
return nb_set_weight
def __eq__(self, other):
return (
self.effective_csm_estimator == other.effective_csm_estimator
and self.weight_estimator == other.weight_estimator
and self.delta_cn_weight_estimators == other.delta_cn_weight_estimators
and self.symmetry_measure_type == other.symmetry_measure_type
)
def __ne__(self, other):
return not self == other
@classmethod
def delta_cn_specifics(
cls,
delta_csm_mins=None,
delta_csm_maxs=None,
function="smootherstep",
symmetry_measure_type="csm_wcs_ctwcc",
effective_csm_estimator=DEFAULT_EFFECTIVE_CSM_ESTIMATOR,
):
"""Initializes DeltaCSMNbSetWeight from specific coordination number differences.
:param delta_csm_mins: Minimums for each coordination number.
:param delta_csm_maxs: Maximums for each coordination number.
:param function: Ratio function used.
:param symmetry_measure_type: Type of symmetry measure to be used.
:param effective_csm_estimator: Ratio function used for the effective CSM (comparison between neighbors sets).
:return: DeltaCSMNbSetWeight.
"""
if delta_csm_mins is None or delta_csm_maxs is None:
delta_cn_weight_estimators = {
dcn: {
"function": function,
"options": {
"delta_csm_min": 0.25 + dcn * 0.25,
"delta_csm_max": 5.0 + dcn * 0.25,
},
}
for dcn in range(1, 13)
}
else:
delta_cn_weight_estimators = {
dcn: {
"function": function,
"options": {
"delta_csm_min": delta_csm_mins[dcn - 1],
"delta_csm_max": delta_csm_maxs[dcn - 1],
},
}
for dcn in range(1, 13)
}
return cls(
effective_csm_estimator=effective_csm_estimator,
weight_estimator={
"function": function,
"options": {
"delta_csm_min": delta_cn_weight_estimators[12]["options"]["delta_csm_min"],
"delta_csm_max": delta_cn_weight_estimators[12]["options"]["delta_csm_max"],
},
},
delta_cn_weight_estimators=delta_cn_weight_estimators,
symmetry_measure_type=symmetry_measure_type,
)
def as_dict(self):
"""
MSONable dict.
:return:
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"effective_csm_estimator": self.effective_csm_estimator,
"weight_estimator": self.weight_estimator,
"delta_cn_weight_estimators": self.delta_cn_weight_estimators,
"symmetry_measure_type": self.symmetry_measure_type,
}
@classmethod
def from_dict(cls, dd):
"""Initialize from dict.
:param dd: Dict representation of DeltaCSMNbSetWeight.
:return: DeltaCSMNbSetWeight.
"""
return cls(
effective_csm_estimator=dd["effective_csm_estimator"],
weight_estimator=dd["weight_estimator"],
delta_cn_weight_estimators={
int(dcn): dcn_estimator for dcn, dcn_estimator in dd["delta_cn_weight_estimators"].items()
}
if ("delta_cn_weight_estimators" in dd and dd["delta_cn_weight_estimators"] is not None)
else None,
symmetry_measure_type=dd["symmetry_measure_type"],
)
class CNBiasNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on specific biases towards specific coordination numbers."""
SHORT_NAME = "CNBiasWeight"
def __init__(self, cn_weights, initialization_options):
"""Initialize CNBiasNbSetWeight.
:param cn_weights: Weights for each coordination.
:param initialization_options: Options for initialization.
"""
self.cn_weights = cn_weights
self.initialization_options = initialization_options
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
return self.cn_weights[len(nb_set)]
def __eq__(self, other):
return self.cn_weights == other.cn_weights and self.initialization_options == other.initialization_options
def __ne__(self, other):
return not self == other
def as_dict(self):
"""MSONable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"cn_weights": {str(cn): cnw for cn, cnw in self.cn_weights.items()},
"initialization_options": self.initialization_options,
}
@classmethod
def from_dict(cls, dd):
"""Initialize from dict.
:param dd: Dict representation of CNBiasNbSetWeight.
:return: CNBiasNbSetWeight.
"""
return cls(
cn_weights={int(cn): cnw for cn, cnw in dd["cn_weights"].items()},
initialization_options=dd["initialization_options"],
)
@classmethod
def linearly_equidistant(cls, weight_cn1, weight_cn13):
"""Initializes linearly equidistant weights for each coordination.
:param weight_cn1: Weight of coordination 1.
:param weight_cn13: Weight of coordination 13.
:return: CNBiasNbSetWeight.
"""
initialization_options = {
"type": "linearly_equidistant",
"weight_cn1": weight_cn1,
"weight_cn13": weight_cn13,
}
dw = (weight_cn13 - weight_cn1) / 12.0
cn_weights = {cn: weight_cn1 + (cn - 1) * dw for cn in range(1, 14)}
return cls(cn_weights=cn_weights, initialization_options=initialization_options)
@classmethod
def geometrically_equidistant(cls, weight_cn1, weight_cn13):
"""Initializes geometrically equidistant weights for each coordination.
:param weight_cn1: Weight of coordination 1.
:param weight_cn13: Weight of coordination 13.
:return: CNBiasNbSetWeight.
"""
initialization_options = {
"type": "geometrically_equidistant",
"weight_cn1": weight_cn1,
"weight_cn13": weight_cn13,
}
factor = np.power(float(weight_cn13) / weight_cn1, 1.0 / 12.0)
cn_weights = {cn: weight_cn1 * np.power(factor, cn - 1) for cn in range(1, 14)}
return cls(cn_weights=cn_weights, initialization_options=initialization_options)
@classmethod
def explicit(cls, cn_weights):
"""Initializes weights explicitly for each coordination.
:param cn_weights: Weights for each coordination.
:return: CNBiasNbSetWeight.
"""
initialization_options = {"type": "explicit"}
if set(cn_weights.keys()) != set(range(1, 14)):
raise ValueError("Weights should be provided for CN 1 to 13")
return cls(cn_weights=cn_weights, initialization_options=initialization_options)
@classmethod
def from_description(cls, dd):
"""Initializes weights from description.
:param dd: Dictionary description.
:return: CNBiasNbSetWeight.
"""
if dd["type"] == "linearly_equidistant":
return cls.linearly_equidistant(weight_cn1=dd["weight_cn1"], weight_cn13=dd["weight_cn13"])
if dd["type"] == "geometrically_equidistant":
return cls.geometrically_equidistant(weight_cn1=dd["weight_cn1"], weight_cn13=dd["weight_cn13"])
if dd["type"] == "explicit":
return cls.explicit(cn_weights=dd["cn_weights"])
return None
class DistanceAngleAreaNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on the area in the distance-angle space."""
SHORT_NAME = "DistAngleAreaWeight"
AC = AdditionalConditions()
DEFAULT_SURFACE_DEFINITION = {
"type": "standard_elliptic",
"distance_bounds": {"lower": 1.2, "upper": 1.8},
"angle_bounds": {"lower": 0.1, "upper": 0.8},
}
def __init__(
self,
weight_type="has_intersection",
surface_definition=DEFAULT_SURFACE_DEFINITION,
nb_sets_from_hints="fallback_to_source",
other_nb_sets="0_weight",
additional_condition=AC.ONLY_ACB,
smoothstep_distance=None,
smoothstep_angle=None,
):
"""Initialize CNBiasNbSetWeight.
:param weight_type: Type of weight.
:param surface_definition: Definition of the surface.
:param nb_sets_from_hints: How to deal with neighbors sets obtained from "hints".
:param other_nb_sets: What to do with other neighbors sets.
:param additional_condition: Additional condition to be used.
:param smoothstep_distance: Smoothstep distance.
:param smoothstep_angle: Smoothstep angle.
"""
self.weight_type = weight_type
if weight_type == "has_intersection":
self.area_weight = self.w_area_has_intersection
elif weight_type == "has_intersection_smoothstep":
raise NotImplementedError()
# self.area_weight = self.w_area_has_intersection_smoothstep
else:
raise ValueError(f'Weight type is "{weight_type}" while it should be "has_intersection"')
self.surface_definition = surface_definition
self.nb_sets_from_hints = nb_sets_from_hints
self.other_nb_sets = other_nb_sets
self.additional_condition = additional_condition
self.smoothstep_distance = smoothstep_distance
self.smoothstep_angle = smoothstep_angle
if self.nb_sets_from_hints == "fallback_to_source":
if self.other_nb_sets == "0_weight":
self.w_area_intersection_specific = self.w_area_intersection_nbsfh_fbs_onb0
else:
raise ValueError('Other nb_sets should be "0_weight"')
else:
raise ValueError("Nb_sets from hints should fallback to source")
lower_and_upper_functions = get_lower_and_upper_f(surface_calculation_options=surface_definition)
self.dmin = surface_definition["distance_bounds"]["lower"]
self.dmax = surface_definition["distance_bounds"]["upper"]
self.amin = surface_definition["angle_bounds"]["lower"]
self.amax = surface_definition["angle_bounds"]["upper"]
self.f_lower = lower_and_upper_functions["lower"]
self.f_upper = lower_and_upper_functions["upper"]
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
return self.area_weight(
nb_set=nb_set,
structure_environments=structure_environments,
cn_map=cn_map,
additional_info=additional_info,
)
def w_area_has_intersection_smoothstep(self, nb_set, structure_environments, cn_map, additional_info):
"""Get intersection of the neighbors set area with the surface.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments.
:param cn_map: Mapping index of the neighbors set.
:param additional_info: Additional information.
:return: Area intersection between neighbors set and surface.
"""
w_area = self.w_area_intersection_specific(
nb_set=nb_set,
structure_environments=structure_environments,
cn_map=cn_map,
additional_info=additional_info,
)
if w_area > 0.0:
if self.smoothstep_distance is not None:
w_area = w_area
if self.smoothstep_angle is not None:
w_area = w_area
return w_area
def w_area_has_intersection(self, nb_set, structure_environments, cn_map, additional_info):
"""Get intersection of the neighbors set area with the surface.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments.
:param cn_map: Mapping index of the neighbors set.
:param additional_info: Additional information.
:return: Area intersection between neighbors set and surface.
"""
return self.w_area_intersection_specific(
nb_set=nb_set,
structure_environments=structure_environments,
cn_map=cn_map,
additional_info=additional_info,
)
def w_area_intersection_nbsfh_fbs_onb0(self, nb_set, structure_environments, cn_map, additional_info):
"""Get intersection of the neighbors set area with the surface.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments.
:param cn_map: Mapping index of the neighbors set.
:param additional_info: Additional information.
:return: Area intersection between neighbors set and surface.
"""
dist_ang_sources = [
src
for src in nb_set.sources
if src["origin"] == "dist_ang_ac_voronoi" and src["ac"] == self.additional_condition
]
if len(dist_ang_sources) > 0:
for src in dist_ang_sources:
d1 = src["dp_dict"]["min"]
d2 = src["dp_dict"]["next"]
a1 = src["ap_dict"]["next"]
a2 = src["ap_dict"]["max"]
if self.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2):
return 1.0
return 0.0
from_hints_sources = [src for src in nb_set.sources if src["origin"] == "nb_set_hints"]
if len(from_hints_sources) == 0:
return 0.0
if len(from_hints_sources) != 1:
raise ValueError("Found multiple hints sources for nb_set")
cn_map_src = from_hints_sources[0]["cn_map_source"]
nb_set_src = structure_environments.neighbors_sets[nb_set.isite][cn_map_src[0]][cn_map_src[1]]
dist_ang_sources = [
src
for src in nb_set_src.sources
if src["origin"] == "dist_ang_ac_voronoi" and src["ac"] == self.additional_condition
]
if len(dist_ang_sources) == 0:
return 0.0
for src in dist_ang_sources:
d1 = src["dp_dict"]["min"]
d2 = src["dp_dict"]["next"]
a1 = src["ap_dict"]["next"]
a2 = src["ap_dict"]["max"]
if self.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2):
return 1.0
return 0.0
def rectangle_crosses_area(self, d1, d2, a1, a2):
"""Whether a given rectangle crosses the area defined by the upper and lower curves.
:param d1: lower d.
:param d2: upper d.
:param a1: lower a.
:param a2: upper a.
:return:
"""
# Case 1
if d1 <= self.dmin and d2 <= self.dmin:
return False
# Case 6
if d1 >= self.dmax and d2 >= self.dmax:
return False
# Case 2
if d1 <= self.dmin and d2 <= self.dmax:
ld2 = self.f_lower(d2)
if a2 <= ld2 or a1 >= self.amax:
return False
return True
# Case 3
if d1 <= self.dmin and d2 >= self.dmax:
if a2 <= self.amin or a1 >= self.amax:
return False
return True
# Case 4
if self.dmin <= d1 <= self.dmax and self.dmin <= d2 <= self.dmax:
ld1 = self.f_lower(d1)
ld2 = self.f_lower(d2)
if a2 <= ld1 and a2 <= ld2:
return False
ud1 = self.f_upper(d1)
ud2 = self.f_upper(d2)
if a1 >= ud1 and a1 >= ud2:
return False
return True
# Case 5
if self.dmin <= d1 <= self.dmax and d2 >= self.dmax:
ud1 = self.f_upper(d1)
if a1 >= ud1 or a2 <= self.amin:
return False
return True
raise ValueError("Should not reach this point!")
def __eq__(self, other):
return (
self.weight_type == other.weight_type
and self.surface_definition == other.surface_definition
and self.nb_sets_from_hints == other.nb_sets_from_hints
and self.other_nb_sets == other.other_nb_sets
and self.additional_condition == other.additional_condition
)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""MSONable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"weight_type": self.weight_type,
"surface_definition": self.surface_definition,
"nb_sets_from_hints": self.nb_sets_from_hints,
"other_nb_sets": self.other_nb_sets,
"additional_condition": self.additional_condition,
}
@classmethod
def from_dict(cls, dd):
"""Initialize from dict.
:param dd: Dict representation of DistanceAngleAreaNbSetWeight.
:return: DistanceAngleAreaNbSetWeight.
"""
return cls(
weight_type=dd["weight_type"],
surface_definition=dd["surface_definition"],
nb_sets_from_hints=dd["nb_sets_from_hints"],
other_nb_sets=dd["other_nb_sets"],
additional_condition=dd["additional_condition"],
)
class DistancePlateauNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on the distance."""
SHORT_NAME = "DistancePlateauWeight"
def __init__(self, distance_function=None, weight_function=None):
"""Initialize DistancePlateauNbSetWeight.
:param distance_function: Distance function to use.
:param weight_function: Ratio function to use.
"""
if distance_function is None:
self.distance_function = {"type": "normalized_distance"}
else:
self.distance_function = distance_function
if weight_function is None:
self.weight_function = {
"function": "inverse_smootherstep",
"options": {"lower": 0.2, "upper": 0.4},
}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
return self.weight_rf.eval(nb_set.distance_plateau())
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
"""MSONable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"distance_function": self.distance_function,
"weight_function": self.weight_function,
}
@classmethod
def from_dict(cls, dd):
"""Initialize from dict.
:param dd: Dict representation of DistancePlateauNbSetWeight.
:return: DistancePlateauNbSetWeight.
"""
return cls(
distance_function=dd["distance_function"],
weight_function=dd["weight_function"],
)
class AnglePlateauNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on the angle."""
SHORT_NAME = "AnglePlateauWeight"
def __init__(self, angle_function=None, weight_function=None):
"""Initialize AnglePlateauNbSetWeight.
:param angle_function: Angle function to use.
:param weight_function: Ratio function to use.
"""
if angle_function is None:
self.angle_function = {"type": "normalized_angle"}
else:
self.angle_function = angle_function
if weight_function is None:
self.weight_function = {
"function": "inverse_smootherstep",
"options": {"lower": 0.05, "upper": 0.15},
}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
return self.weight_rf.eval(nb_set.angle_plateau())
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
"""MSONable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"angle_function": self.angle_function,
"weight_function": self.weight_function,
}
@classmethod
def from_dict(cls, dd):
"""Initialize from dict.
:param dd: Dict representation of AnglePlateauNbSetWeight.
:return: AnglePlateauNbSetWeight.
"""
return cls(angle_function=dd["angle_function"], weight_function=dd["weight_function"])
class DistanceNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on the distance."""
SHORT_NAME = "DistanceNbSetWeight"
def __init__(self, weight_function=None, nbs_source="voronoi"):
"""Initialize DistanceNbSetWeight.
:param weight_function: Ratio function to use.
:param nbs_source: Source of the neighbors.
"""
if weight_function is None:
self.weight_function = {
"function": "smootherstep",
"options": {"lower": 1.2, "upper": 1.3},
}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
if nbs_source not in ["nb_sets", "voronoi"]:
raise ValueError('"nbs_source" should be one of ["nb_sets", "voronoi"]')
self.nbs_source = nbs_source
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
cn = cn_map[0]
isite = nb_set.isite
voronoi = structure_environments.voronoi.voronoi_list2[isite]
if self.nbs_source == "nb_sets":
all_nbs_voro_indices = set()
for cn2, nb_sets in structure_environments.neighbors_sets[isite].items():
for inb_set2, nb_set2 in enumerate(nb_sets):
if cn == cn2:
continue
all_nbs_voro_indices.update(nb_set2.site_voronoi_indices)
elif self.nbs_source == "voronoi":
all_nbs_voro_indices = set(range(len(voronoi)))
else:
raise ValueError('"nbs_source" should be one of ["nb_sets", "voronoi"]')
all_nbs_indices_except_nb_set = all_nbs_voro_indices.difference(nb_set.site_voronoi_indices)
normalized_distances = [voronoi[inb]["normalized_distance"] for inb in all_nbs_indices_except_nb_set]
if len(normalized_distances) == 0:
return 1.0
return self.weight_rf.eval(min(normalized_distances))
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
"""MSOnable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"weight_function": self.weight_function,
"nbs_source": self.nbs_source,
}
@classmethod
def from_dict(cls, dd):
"""Initialize from dict.
:param dd: Dict representation of DistanceNbSetWeight.
:return: DistanceNbSetWeight.
"""
return cls(weight_function=dd["weight_function"], nbs_source=dd["nbs_source"])
class DeltaDistanceNbSetWeight(NbSetWeight):
"""Weight of neighbors set based on the difference of distances."""
SHORT_NAME = "DeltaDistanceNbSetWeight"
def __init__(self, weight_function=None, nbs_source="voronoi"):
"""Initialize DeltaDistanceNbSetWeight.
:param weight_function: Ratio function to use.
:param nbs_source: Source of the neighbors.
"""
if weight_function is None:
self.weight_function = {
"function": "smootherstep",
"options": {"lower": 0.1, "upper": 0.2},
}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
if nbs_source not in ["nb_sets", "voronoi"]:
raise ValueError('"nbs_source" should be one of ["nb_sets", "voronoi"]')
self.nbs_source = nbs_source
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
"""Get the weight of a given neighbors set.
:param nb_set: Neighbors set.
:param structure_environments: Structure environments used to estimate weight.
:param cn_map: Mapping index for this neighbors set.
:param additional_info: Additional information.
:return: Weight of the neighbors set.
"""
cn = cn_map[0]
isite = nb_set.isite
voronoi = structure_environments.voronoi.voronoi_list2[isite]
if self.nbs_source == "nb_sets":
all_nbs_voro_indices = set()
for cn2, nb_sets in structure_environments.neighbors_sets[isite].items():
for inb_set2, nb_set2 in enumerate(nb_sets):
if cn == cn2:
continue
all_nbs_voro_indices.update(nb_set2.site_voronoi_indices)
elif self.nbs_source == "voronoi":
all_nbs_voro_indices = set(range(len(voronoi)))
else:
raise ValueError('"nbs_source" should be one of ["nb_sets", "voronoi"]')
all_nbs_indices_except_nb_set = all_nbs_voro_indices.difference(nb_set.site_voronoi_indices)
normalized_distances = [voronoi[inb]["normalized_distance"] for inb in all_nbs_indices_except_nb_set]
if len(normalized_distances) == 0:
return 1.0
if len(nb_set) == 0:
return 0.0
nb_set_max_normalized_distance = max(nb_set.normalized_distances)
return self.weight_rf.eval(min(normalized_distances) - nb_set_max_normalized_distance)
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
"""MSONable dict"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"weight_function": self.weight_function,
"nbs_source": self.nbs_source,
}
@classmethod
def from_dict(cls, dd):
"""Initialize from dict.
:param dd: Dict representation of DeltaDistanceNbSetWeight.
:return: DeltaDistanceNbSetWeight.
"""
return cls(weight_function=dd["weight_function"], nbs_source=dd["nbs_source"])
class WeightedNbSetChemenvStrategy(AbstractChemenvStrategy):
"""
WeightedNbSetChemenvStrategy
"""
STRATEGY_DESCRIPTION = " WeightedNbSetChemenvStrategy"
DEFAULT_CE_ESTIMATOR = {
"function": "power2_inverse_power2_decreasing",
"options": {"max_csm": 8.0},
}
def __init__(
self,
structure_environments=None,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE,
nb_set_weights=None,
ce_estimator=DEFAULT_CE_ESTIMATOR,
):
"""
Constructor for the WeightedNbSetChemenvStrategy.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self._additional_condition = additional_condition
if nb_set_weights is None:
raise ValueError()
self.nb_set_weights = nb_set_weights
self.ordered_weights = []
for nb_set_weight in self.nb_set_weights:
self.ordered_weights.append({"weight": nb_set_weight, "name": nb_set_weight.SHORT_NAME})
self.ce_estimator = ce_estimator
self.ce_estimator_ratio_function = CSMInfiniteRatioFunction.from_dict(self.ce_estimator)
self.ce_estimator_fractions = self.ce_estimator_ratio_function.fractions
@property
def uniquely_determines_coordination_environments(self):
"""Whether this strategy uniquely determines coordination environments."""
return False
def get_site_coordination_environments_fractions(
self,
site,
isite=None,
dequivsite=None,
dthissite=None,
mysym=None,
ordered=True,
min_fraction=0.0,
return_maps=True,
return_strategy_dict_info=False,
return_all=False,
):
"""Get the coordination environments of a given site and additional information.
:param site: Site for which coordination environment is needed.
:param isite: Index of the site.
:param dequivsite: Translation of the equivalent site.
:param dthissite: Translation of this site.
:param mysym: Symmetry to be applied.
:param ordered: Whether to order the list by fractions.
:param min_fraction: Minimum fraction to include in the list
:param return_maps: Whether to return cn_maps (identifies all the NeighborsSet used).
:param return_strategy_dict_info: Whether to add the info about the strategy used.
:return: List of Dict with coordination environment, fraction and additional info.
"""
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[
isite,
dequivsite,
dthissite,
mysym,
] = self.equivalent_site_index_and_transform(site)
site_nb_sets = self.structure_environments.neighbors_sets[isite]
if site_nb_sets is None:
return None
cn_maps = []
for cn, nb_sets in site_nb_sets.items():
for inb_set, nb_set in enumerate(nb_sets):
# CHECK THE ADDITIONAL CONDITION HERE ?
cn_maps.append((cn, inb_set))
weights_additional_info = {"weights": {isite: {}}}
for wdict in self.ordered_weights:
cn_maps_new = []
weight = wdict["weight"]
weight_name = wdict["name"]
for cn_map in cn_maps:
nb_set = site_nb_sets[cn_map[0]][cn_map[1]]
w_nb_set = weight.weight(
nb_set=nb_set,
structure_environments=self.structure_environments,
cn_map=cn_map,
additional_info=weights_additional_info,
)
if cn_map not in weights_additional_info["weights"][isite]:
weights_additional_info["weights"][isite][cn_map] = {}
weights_additional_info["weights"][isite][cn_map][weight_name] = w_nb_set
if return_all or w_nb_set > 0.0:
cn_maps_new.append(cn_map)
cn_maps = cn_maps_new
for cn_map, weights in weights_additional_info["weights"][isite].items():
weights_additional_info["weights"][isite][cn_map]["Product"] = np.product(list(weights.values()))
w_nb_sets = {
cn_map: weights["Product"] for cn_map, weights in weights_additional_info["weights"][isite].items()
}
w_nb_sets_total = np.sum(list(w_nb_sets.values()))
nb_sets_fractions = {cn_map: w_nb_set / w_nb_sets_total for cn_map, w_nb_set in w_nb_sets.items()}
for cn_map in weights_additional_info["weights"][isite]:
weights_additional_info["weights"][isite][cn_map]["NbSetFraction"] = nb_sets_fractions[cn_map]
ce_symbols = []
ce_dicts = []
ce_fractions = []
ce_dict_fractions = []
ce_maps = []
site_ce_list = self.structure_environments.ce_list[isite]
if return_all:
for cn_map, nb_set_fraction in nb_sets_fractions.items():
cn = cn_map[0]
inb_set = cn_map[1]
site_ce_nb_set = site_ce_list[cn][inb_set]
if site_ce_nb_set is None:
continue
mingeoms = site_ce_nb_set.minimum_geometries(symmetry_measure_type=self.symmetry_measure_type)
if len(mingeoms) > 0:
csms = [
ce_dict["other_symmetry_measures"][self.symmetry_measure_type]
for ce_symbol, ce_dict in mingeoms
]
fractions = self.ce_estimator_fractions(csms)
if fractions is None:
ce_symbols.append(f"UNCLEAR:{cn:d}")
ce_dicts.append(None)
ce_fractions.append(nb_set_fraction)
all_weights = weights_additional_info["weights"][isite][cn_map]
dict_fractions = dict(all_weights.items())
dict_fractions["CEFraction"] = None
dict_fractions["Fraction"] = nb_set_fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
for ifraction, fraction in enumerate(fractions):
ce_symbols.append(mingeoms[ifraction][0])
ce_dicts.append(mingeoms[ifraction][1])
ce_fractions.append(nb_set_fraction * fraction)
all_weights = weights_additional_info["weights"][isite][cn_map]
dict_fractions = dict(all_weights.items())
dict_fractions["CEFraction"] = fraction
dict_fractions["Fraction"] = nb_set_fraction * fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
ce_symbols.append(f"UNCLEAR:{cn:d}")
ce_dicts.append(None)
ce_fractions.append(nb_set_fraction)
all_weights = weights_additional_info["weights"][isite][cn_map]
dict_fractions = dict(all_weights.items())
dict_fractions["CEFraction"] = None
dict_fractions["Fraction"] = nb_set_fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
for cn_map, nb_set_fraction in nb_sets_fractions.items():
if nb_set_fraction > 0.0:
cn = cn_map[0]
inb_set = cn_map[1]
site_ce_nb_set = site_ce_list[cn][inb_set]
mingeoms = site_ce_nb_set.minimum_geometries(symmetry_measure_type=self._symmetry_measure_type)
csms = [
ce_dict["other_symmetry_measures"][self._symmetry_measure_type]
for ce_symbol, ce_dict in mingeoms
]
fractions = self.ce_estimator_fractions(csms)
for ifraction, fraction in enumerate(fractions):
if fraction > 0.0:
ce_symbols.append(mingeoms[ifraction][0])
ce_dicts.append(mingeoms[ifraction][1])
ce_fractions.append(nb_set_fraction * fraction)
all_weights = weights_additional_info["weights"][isite][cn_map]
dict_fractions = dict(all_weights.items())
dict_fractions["CEFraction"] = fraction
dict_fractions["Fraction"] = nb_set_fraction * fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
if ordered:
indices = np.argsort(ce_fractions)[::-1]
else:
indices = list(range(len(ce_fractions)))
fractions_info_list = [
{
"ce_symbol": ce_symbols[ii],
"ce_dict": ce_dicts[ii],
"ce_fraction": ce_fractions[ii],
}
for ii in indices
if ce_fractions[ii] >= min_fraction
]
if return_maps:
for ifinfo, ii in enumerate(indices):
if ce_fractions[ii] >= min_fraction:
fractions_info_list[ifinfo]["ce_map"] = ce_maps[ii]
if return_strategy_dict_info:
for ifinfo, ii in enumerate(indices):
if ce_fractions[ii] >= min_fraction:
fractions_info_list[ifinfo]["strategy_info"] = ce_dict_fractions[ii]
return fractions_info_list
def get_site_coordination_environment(self, site):
"""Get the coordination environment of a given site.
Not implemented for this strategy
"""
pass
def get_site_neighbors(self, site):
"""Get the neighbors of a given site.
Not implemented for this strategy.
"""
pass
def get_site_coordination_environments(
self,
site,
isite=None,
dequivsite=None,
dthissite=None,
mysym=None,
return_maps=False,
):
"""Get the coordination environments of a given site.
:param site: Site for which coordination environment is needed.
:param isite: Index of the site.
:param dequivsite: Translation of the equivalent site.
:param dthissite: Translation of this site.
:param mysym: Symmetry to be applied.
:param return_maps: Whether to return cn_maps (identifies all the NeighborsSet used).
:return: List of coordination environment.
"""
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[
isite,
dequivsite,
dthissite,
mysym,
] = self.equivalent_site_index_and_transform(site)
return [
self.get_site_coordination_environment( # pylint: disable=E1123
site=site,
isite=isite,
dequivsite=dequivsite,
dthissite=dthissite,
mysym=mysym,
return_map=return_maps,
)
]
def __eq__(self, other):
return (
self.__class__.__name__ == other.__class__.__name__
and self._additional_condition == other._additional_condition
and self.symmetry_measure_type == other.symmetry_measure_type
and self.nb_set_weights == other.nb_set_weights
and self.ce_estimator == other.ce_estimator
)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object.
:return: Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"symmetry_measure_type": self.symmetry_measure_type,
"nb_set_weights": [nb_set_weight.as_dict() for nb_set_weight in self.nb_set_weights],
"ce_estimator": self.ce_estimator,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the WeightedNbSetChemenvStrategy object from a dict representation of the
WeightedNbSetChemenvStrategy object created using the as_dict method.
:param d: dict representation of the WeightedNbSetChemenvStrategy object
:return: WeightedNbSetChemenvStrategy object
"""
return cls(
additional_condition=d["additional_condition"],
symmetry_measure_type=d["symmetry_measure_type"],
nb_set_weights=d["nb_set_weights"],
ce_estimator=d["ce_estimator"],
)
class MultiWeightsChemenvStrategy(WeightedNbSetChemenvStrategy):
"""
MultiWeightsChemenvStrategy
"""
STRATEGY_DESCRIPTION = " Multi Weights ChemenvStrategy"
# STRATEGY_INFO_FIELDS = ['cn_map_surface_fraction', 'cn_map_surface_weight',
# 'cn_map_mean_csm', 'cn_map_csm_weight',
# 'cn_map_delta_csm', 'cn_map_delta_csms_cn_map2', 'cn_map_delta_csm_weight',
# 'cn_map_cn_weight',
# 'cn_map_fraction', 'cn_map_ce_fraction', 'ce_fraction']
DEFAULT_CE_ESTIMATOR = {
"function": "power2_inverse_power2_decreasing",
"options": {"max_csm": 8.0},
}
DEFAULT_DIST_ANG_AREA_WEIGHT = {} # type: Dict
def __init__(
self,
structure_environments=None,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE,
dist_ang_area_weight=None,
self_csm_weight=None,
delta_csm_weight=None,
cn_bias_weight=None,
angle_weight=None,
normalized_angle_distance_weight=None,
ce_estimator=DEFAULT_CE_ESTIMATOR,
):
"""
Constructor for the MultiWeightsChemenvStrategy.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
self._additional_condition = additional_condition
self.dist_ang_area_weight = dist_ang_area_weight
self.angle_weight = angle_weight
self.normalized_angle_distance_weight = normalized_angle_distance_weight
self.self_csm_weight = self_csm_weight
self.delta_csm_weight = delta_csm_weight
self.cn_bias_weight = cn_bias_weight
self.ordered_weights = []
nb_sets_weights = []
if dist_ang_area_weight is not None:
self.ordered_weights.append({"weight": dist_ang_area_weight, "name": "DistAngArea"})
nb_sets_weights.append(dist_ang_area_weight)
if self_csm_weight is not None:
self.ordered_weights.append({"weight": self_csm_weight, "name": "SelfCSM"})
nb_sets_weights.append(self_csm_weight)
if delta_csm_weight is not None:
self.ordered_weights.append({"weight": delta_csm_weight, "name": "DeltaCSM"})
nb_sets_weights.append(delta_csm_weight)
if cn_bias_weight is not None:
self.ordered_weights.append({"weight": cn_bias_weight, "name": "CNBias"})
nb_sets_weights.append(cn_bias_weight)
if angle_weight is not None:
self.ordered_weights.append({"weight": angle_weight, "name": "Angle"})
nb_sets_weights.append(angle_weight)
if normalized_angle_distance_weight is not None:
self.ordered_weights.append(
{
"weight": normalized_angle_distance_weight,
"name": "NormalizedAngDist",
}
)
nb_sets_weights.append(normalized_angle_distance_weight)
self.ce_estimator = ce_estimator
self.ce_estimator_ratio_function = CSMInfiniteRatioFunction.from_dict(self.ce_estimator)
self.ce_estimator_fractions = self.ce_estimator_ratio_function.fractions
WeightedNbSetChemenvStrategy.__init__(
self,
structure_environments,
additional_condition=additional_condition,
symmetry_measure_type=symmetry_measure_type,
nb_set_weights=nb_sets_weights,
ce_estimator=ce_estimator,
)
@classmethod
def stats_article_weights_parameters(cls):
"""Initialize strategy used in the statistics article."""
self_csm_weight = SelfCSMNbSetWeight(
weight_estimator={
"function": "power2_decreasing_exp",
"options": {"max_csm": 8.0, "alpha": 1.0},
}
)
surface_definition = {
"type": "standard_elliptic",
"distance_bounds": {"lower": 1.15, "upper": 2.0},
"angle_bounds": {"lower": 0.05, "upper": 0.75},
}
da_area_weight = DistanceAngleAreaNbSetWeight(
weight_type="has_intersection",
surface_definition=surface_definition,
nb_sets_from_hints="fallback_to_source",
other_nb_sets="0_weight",
additional_condition=DistanceAngleAreaNbSetWeight.AC.ONLY_ACB,
)
symmetry_measure_type = "csm_wcs_ctwcc"
delta_weight = DeltaCSMNbSetWeight.delta_cn_specifics()
bias_weight = None
angle_weight = None
nad_weight = None
return cls(
dist_ang_area_weight=da_area_weight,
self_csm_weight=self_csm_weight,
delta_csm_weight=delta_weight,
cn_bias_weight=bias_weight,
angle_weight=angle_weight,
normalized_angle_distance_weight=nad_weight,
symmetry_measure_type=symmetry_measure_type,
)
@property
def uniquely_determines_coordination_environments(self):
"""Whether this strategy uniquely determines coordination environments."""
return False
def __eq__(self, other):
return (
self.__class__.__name__ == other.__class__.__name__
and self._additional_condition == other._additional_condition
and self.symmetry_measure_type == other.symmetry_measure_type
and self.dist_ang_area_weight == other.dist_ang_area_weight
and self.self_csm_weight == other.self_csm_weight
and self.delta_csm_weight == other.delta_csm_weight
and self.cn_bias_weight == other.cn_bias_weight
and self.angle_weight == other.angle_weight
and self.normalized_angle_distance_weight == other.normalized_angle_distance_weight
and self.ce_estimator == other.ce_estimator
)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
:return: Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"symmetry_measure_type": self.symmetry_measure_type,
"dist_ang_area_weight": self.dist_ang_area_weight.as_dict()
if self.dist_ang_area_weight is not None
else None,
"self_csm_weight": self.self_csm_weight.as_dict() if self.self_csm_weight is not None else None,
"delta_csm_weight": self.delta_csm_weight.as_dict() if self.delta_csm_weight is not None else None,
"cn_bias_weight": self.cn_bias_weight.as_dict() if self.cn_bias_weight is not None else None,
"angle_weight": self.angle_weight.as_dict() if self.angle_weight is not None else None,
"normalized_angle_distance_weight": self.normalized_angle_distance_weight.as_dict()
if self.normalized_angle_distance_weight is not None
else None,
"ce_estimator": self.ce_estimator,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the MultiWeightsChemenvStrategy object from a dict representation of the
MultipleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the MultiWeightsChemenvStrategy object
:return: MultiWeightsChemenvStrategy object
"""
if d["normalized_angle_distance_weight"] is not None:
nad_w = NormalizedAngleDistanceNbSetWeight.from_dict(d["normalized_angle_distance_weight"])
else:
nad_w = None
return cls(
additional_condition=d["additional_condition"],
symmetry_measure_type=d["symmetry_measure_type"],
dist_ang_area_weight=DistanceAngleAreaNbSetWeight.from_dict(d["dist_ang_area_weight"])
if d["dist_ang_area_weight"] is not None
else None,
self_csm_weight=SelfCSMNbSetWeight.from_dict(d["self_csm_weight"])
if d["self_csm_weight"] is not None
else None,
delta_csm_weight=DeltaCSMNbSetWeight.from_dict(d["delta_csm_weight"])
if d["delta_csm_weight"] is not None
else None,
cn_bias_weight=CNBiasNbSetWeight.from_dict(d["cn_bias_weight"])
if d["cn_bias_weight"] is not None
else None,
angle_weight=AngleNbSetWeight.from_dict(d["angle_weight"]) if d["angle_weight"] is not None else None,
normalized_angle_distance_weight=nad_w,
ce_estimator=d["ce_estimator"],
)
| vorwerkc/pymatgen | pymatgen/analysis/chemenv/coordination_environments/chemenv_strategies.py | Python | mit | 119,637 | [
"pymatgen"
] | 0ef3597b80699185706673541b720684b89d317f41844c36139be617a0632349 |
# Copyright (C) 2017 Zhixian MA <zxma_sjtu@qqq.com>
"""
A script to preprocess the raw data fetched from Chandra Data Archive (CDA)
The processing steps are as follows
1. Get field of view (FOV) from the fov1.fits file, the CCD_ID is set as 7 as the default
2. Get evt2_sub.fits by dmcopying from the evt2.fits according to the fov_sub.fits
3. Detect point sources by wavdetect, and save the region files
4. Mannually filter the point sources
5. Fill the point sources with dmfilth
6. Locate center of the galaxy or cluster, output as a region file
7. Get the 400x400 cut image from the evt2_sub.fits
References
==========
1. An image of diffuse emission
http://cxc.cfa.harvard.edu/ciao/threads/diffuse_emission/
Methods
=======
get_sub(obspath,ccd_id=7)
get the single ccd image
get_ps(obspath,pspath='wavd')
detect point sources with wavdetect
fill_ps(obspath,pspath='wavd')
fill the ps by dmfilth
get_cnt(obspath)
locate the center point
get_img(obspath)
get the cut image
# Caution
1. CIAO software package should be pre-installed in your system
2. Python3 packages like astropy, numpy, scipy should be installed.
"""
import os
import argparse
from astropy.io import fits
import numpy as np
def get_sub(obspath, ccd_id='7'):
"""
Get the single ccd image
Variables
=========
obspath: str
path of the observation
ccd_id: str
ID of the ccd, default as 7
"""
# Init
evt_path = os.path.join(obspath, 'evt2.fits')
fov_path = os.path.join(obspath,'fov1.fits')
fov_sub_path=os.path.join(obspath,'fov1_sub.fits')
evt_sub_path=os.path.join(obspath,'evt2_sub.fits')
# Get cut
print("Processing on sample %s" % obspath)
# get fov
print("dmcopy '%s[ccd_id=%s]' %s clobber=yes" % (fov_path, ccd_id, fov_sub_path))
os.system("dmcopy '%s[ccd_id=%s]' %s clobber=yes" % (fov_path, ccd_id, fov_sub_path))
# get sub evt2
print("dmcopy '%s[energy=500:7000,ccd_id=%s,sky=region(%s)][bin sky=1]' %s clobber=yes" % (evt_path, ccd_id, fov_sub_path, evt_sub_path))
os.system("dmcopy '%s[energy=500:7000,ccd_id=%s,sky=region(%s)][bin sky=1]' %s clobber=yes" % (evt_path, ccd_id, fov_sub_path, evt_sub_path))
def get_ps(obspath, pspath="wavd"):
"""
detect point sources with wavdetect
Variables
=========
obspath: str
path of the observation
pspath: str
path of the poins sources
"""
# Init
evt_path = os.path.join(obspath, 'evt2_sub.fits')
pspre = os.path.join(obspath, pspath)
if not os.path.exists(pspre):
os.mkdir(pspre)
# path of wavd results
psffile=os.path.join(pspre, 'psf.psfmap')
regfile=os.path.join(pspre, 'wavd.reg')
outfile=os.path.join(pspre, 'wavd.fits')
scellfile=os.path.join(pspre, 'wavd.scell')
imagefile=os.path.join(pspre, 'wavd.image')
defnbkgfile=os.path.join(pspre, 'wavd.nbkg')
# Get psfmap
print("punlearn mkpsfmap")
os.system("punlearn mkpsfmap")
print("mkpsfmap %s %s energy=1.4967 ecf=0.393" % (evt_path, psffile))
os.system("mkpsfmap %s %s energy=1.4967 ecf=0.393 clobber=yes" % (evt_path, psffile))
# get point sources
print("punlearn wavdetect")
os.system("punlearn wavdetect")
print("wavdetect infile=%s regfile=%s outfile=%s scellfile=%s imagefile=%s defnbkgfile=%s psffile=%s scales='2.0 4.0' clobber=yes" % (evt_path, regfile, outfile, scellfile, imagefile, defnbkgfile, psffile) )
os.system("wavdetect infile=%s regfile=%s outfile=%s scellfile=%s imagefile=%s defnbkgfile=%s psffile=%s scales='2.0 4.0' clobber=yes" % (evt_path, regfile, outfile, scellfile, imagefile, defnbkgfile, psffile) )
def fill_ps(obspath, pspath='wavd'):
"""
Fill regions of point sources in the fits image.
Variables
=========
obspath: str
path of the observation
pspath: str
path of the point sources
"""
# Init
evt_path = os.path.join(obspath, 'evt2_sub.fits')
pspre = os.path.join(obspath, pspath)
regfile = os.path.join(pspre, 'wavd_man.reg')
reg_mod = os.path.join(pspre, 'wavd_mod.fits')
roi_path = os.path.join(pspre, 'sources')
if not os.path.exists(roi_path):
os.mkdir(roi_path)
# parameters
roi_outsrcfile = os.path.join(roi_path, "src%d.fits")
# parameters of dmfilth
# exclude = os.path.join(roi_path, "exclude")
outfile = os.path.join(obspath, 'img_fill.fits')
# get region
print("punlearn dmmakereg")
os.system("punlearn dmmakereg")
print("dmmakereg 'region(%s)' %s clobber=yes" % (regfile, reg_mod))
os.system("dmmakereg 'region(%s)' %s clobber=yes" % (regfile, reg_mod))
# get roi
print("punlearn roi")
os.system("punlearn roi")
print("roi infile=%s outsrcfile=%s bkgfactor=0.5 fovregion="" streakregion="" radiusmode=mul bkgradius=3 clobber=yes" % (reg_mod, roi_outsrcfile))
os.system("roi infile=%s outsrcfile=%s bkgfactor=0.5 fovregion="" streakregion="" radiusmode=mul bkgradius=3 clobber=yes" % (reg_mod, roi_outsrcfile))
# split bkg and ps regions
print("splitroi '%s' exclude" % (os.path.join(roi_path,"src*.fits")))
os.system("splitroi '%s' exclude" % (os.path.join(roi_path,"src*.fits")))
# fill
print("punlearn dmfilth")
os.system("punlearn dmfilth")
print("dmfilth infile=%s outfile=%s method=POISSON srclist=@exclude.src.reg bkglist=@exclude.bg.reg randseed=0 clobber=yes" % (evt_path, outfile))
os.system("dmfilth infile=%s outfile=%s method=POISSON srclist=@exclude.src.reg bkglist=@exclude.bg.reg randseed=0 clobber=yes" % (evt_path, outfile))
def get_cnt(obspath,box_size=400):
"""Detect center of the observation
Variable
========
obspath: str
path of the observation
box_size: integer
size of the box
"""
# Init
img_path = os.path.join(obspath, 'img_fill.fits')
cnt_path = os.path.join(obspath, 'cnt.reg')
# Load the image
hdulist = fits.open(img_path)
img = hdulist[0]
rows,cols = img.shape
# Get sky coordinate
sky_x = img.header['CRVAL1P']
sky_y = img.header['CRVAL2P']
# find peak
peak = np.max(img.data)
peak_row,peak_col = np.where(img.data==peak)
peak_row = np.mean(peak_row)
peak_col = np.mean(peak_col)
# Judege overflow
row_up = max((peak_row-box_size, 0))
row_down = min((peak_row+box_size, rows))
col_left = max((peak_col-box_size, 0))
col_right = min((peak_col+box_size, cols))
# region parameters
cnt_x = peak_col + 1 + sky_x
cnt_y = peak_row + 1 + sky_y
box_row = min((peak_row - row_up, row_down - peak_row))
box_col = min((peak_col - col_left, col_right - peak_col))
box_size = np.floor(min((box_row,box_col))) * 2
# write region
if os.path.exists(cnt_path):
os.remove(cnt_path)
fp = open(cnt_path,'a')
fp.write("box(%f,%f,%d,%d,0)" % (cnt_x,cnt_y,box_size,box_size))
def get_img(obspath,cntpath='cnt.reg'):
"""
Get the center image
Variables
=========
obspath: str
path of the observation
cntpath: str
path of the center region
"""
# Init
evt_path = os.path.join(obspath, 'img_fill.fits')
cnt_path = os.path.join(obspath, cntpath)
img_path = os.path.join(obspath, 'img_cut.fits')
# cut
print("punlearn dmcopy")
os.system("punlearn dmcopy")
print("dmcopy '%s[sky=region(%s)]' %s clobber=yes" % (evt_path, cnt_path, img_path))
os.system("dmcopy '%s[sky=region(%s)]' %s clobber=yes" % (evt_path, cnt_path, img_path))
def cmp_cav(obspath):
"""
Draw circles on the image according to the cavity parameters in Shin2016
Variable
========
obspath: str
path of the observation
"""
# Init
cir_path = os.path.join(obspath, 'cav_cir.reg')
ref_path = os.path.join(obspath, 'cavities_chg.reg')
cnt_path = os.path.join(obspath, 'cnt.reg')
# get center coordinate
fp_cnt = open(cnt_path,'r')
cnt = fp_cnt.readline()
cnt = cnt[4:-1]
cnt = cnt.split(",")
fp_cnt.close()
# Write circles
if os.path.exists(cir_path):
os.remove(cir_path)
fp_cir = open(cir_path, 'a')
fp_ref = open(ref_path,'r')
cavs = fp_ref.readlines()
for l in cavs:
l1 = l.replace("\n",'')
params = l1.split('\t')
fp_cir.write("circle(%s,%s,%s)\n" % (cnt[0],cnt[1],params[-1]))
| myinxd/cavdet | cnn/utils_pre.py | Python | mit | 8,416 | [
"Galaxy"
] | 652272d8f778f0c04b832a8f6ed2a5c9cabffcc6e213406ffc1a6d0f68f1fa91 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.region_health_checks import (
RegionHealthChecksClient,
)
from google.cloud.compute_v1.services.region_health_checks import pagers
from google.cloud.compute_v1.services.region_health_checks import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RegionHealthChecksClient._get_default_mtls_endpoint(None) is None
assert (
RegionHealthChecksClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
RegionHealthChecksClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
RegionHealthChecksClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionHealthChecksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionHealthChecksClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionHealthChecksClient, "rest"),]
)
def test_region_health_checks_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.RegionHealthChecksRestTransport, "rest"),],
)
def test_region_health_checks_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionHealthChecksClient, "rest"),]
)
def test_region_health_checks_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_region_health_checks_client_get_transport_class():
transport = RegionHealthChecksClient.get_transport_class()
available_transports = [
transports.RegionHealthChecksRestTransport,
]
assert transport in available_transports
transport = RegionHealthChecksClient.get_transport_class("rest")
assert transport == transports.RegionHealthChecksRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionHealthChecksClient, transports.RegionHealthChecksRestTransport, "rest"),],
)
@mock.patch.object(
RegionHealthChecksClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionHealthChecksClient),
)
def test_region_health_checks_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(RegionHealthChecksClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(RegionHealthChecksClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
RegionHealthChecksClient,
transports.RegionHealthChecksRestTransport,
"rest",
"true",
),
(
RegionHealthChecksClient,
transports.RegionHealthChecksRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
RegionHealthChecksClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionHealthChecksClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_region_health_checks_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [RegionHealthChecksClient])
@mock.patch.object(
RegionHealthChecksClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionHealthChecksClient),
)
def test_region_health_checks_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionHealthChecksClient, transports.RegionHealthChecksRestTransport, "rest"),],
)
def test_region_health_checks_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
RegionHealthChecksClient,
transports.RegionHealthChecksRestTransport,
"rest",
None,
),
],
)
def test_region_health_checks_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [compute.DeleteRegionHealthCheckRequest, dict,]
)
def test_delete_unary_rest(request_type):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_unary_rest_required_fields(
request_type=compute.DeleteRegionHealthCheckRequest,
):
transport_class = transports.RegionHealthChecksRestTransport
request_init = {}
request_init["health_check"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["healthCheck"] = "health_check_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "healthCheck" in jsonified_request
assert jsonified_request["healthCheck"] == "health_check_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "delete",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_delete_unary_rest_unset_required_fields():
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.delete._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("healthCheck", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_delete_unary_rest_interceptors(null_interceptor):
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthChecksRestInterceptor(),
)
client = RegionHealthChecksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "post_delete"
) as post, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "pre_delete"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.DeleteRegionHealthCheckRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.delete_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_delete_unary_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteRegionHealthCheckRequest
):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_unary(request)
def test_delete_unary_rest_flattened():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
health_check="health_check_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.delete_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}"
% client.transport._host,
args[1],
)
def test_delete_unary_rest_flattened_error(transport: str = "rest"):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_unary(
compute.DeleteRegionHealthCheckRequest(),
project="project_value",
region="region_value",
health_check="health_check_value",
)
def test_delete_unary_rest_error():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.GetRegionHealthCheckRequest, dict,])
def test_get_rest(request_type):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheck(
check_interval_sec=1884,
creation_timestamp="creation_timestamp_value",
description="description_value",
healthy_threshold=1819,
id=205,
kind="kind_value",
name="name_value",
region="region_value",
self_link="self_link_value",
timeout_sec=1185,
type_="type__value",
unhealthy_threshold=2046,
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheck.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.HealthCheck)
assert response.check_interval_sec == 1884
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.healthy_threshold == 1819
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.timeout_sec == 1185
assert response.type_ == "type__value"
assert response.unhealthy_threshold == 2046
def test_get_rest_required_fields(request_type=compute.GetRegionHealthCheckRequest):
transport_class = transports.RegionHealthChecksRestTransport
request_init = {}
request_init["health_check"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["healthCheck"] = "health_check_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "healthCheck" in jsonified_request
assert jsonified_request["healthCheck"] == "health_check_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheck()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheck.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("healthCheck", "project", "region",)))
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_rest_interceptors(null_interceptor):
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthChecksRestInterceptor(),
)
client = RegionHealthChecksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "post_get"
) as post, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "pre_get"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.HealthCheck.to_json(compute.HealthCheck())
request = compute.GetRegionHealthCheckRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.HealthCheck
client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetRegionHealthCheckRequest
):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheck()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
health_check="health_check_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheck.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetRegionHealthCheckRequest(),
project="project_value",
region="region_value",
health_check="health_check_value",
)
def test_get_rest_error():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.InsertRegionHealthCheckRequest, dict,]
)
def test_insert_unary_rest(request_type):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["health_check_resource"] = {
"check_interval_sec": 1884,
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"grpc_health_check": {
"grpc_service_name": "grpc_service_name_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
},
"healthy_threshold": 1819,
"http2_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"http_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"https_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"id": 205,
"kind": "kind_value",
"log_config": {"enable": True},
"name": "name_value",
"region": "region_value",
"self_link": "self_link_value",
"ssl_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"tcp_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"timeout_sec": 1185,
"type_": "type__value",
"unhealthy_threshold": 2046,
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_unary_rest_required_fields(
request_type=compute.InsertRegionHealthCheckRequest,
):
transport_class = transports.RegionHealthChecksRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_insert_unary_rest_unset_required_fields():
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("healthCheckResource", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_insert_unary_rest_interceptors(null_interceptor):
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthChecksRestInterceptor(),
)
client = RegionHealthChecksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "post_insert"
) as post, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "pre_insert"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.InsertRegionHealthCheckRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.insert_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.InsertRegionHealthCheckRequest
):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["health_check_resource"] = {
"check_interval_sec": 1884,
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"grpc_health_check": {
"grpc_service_name": "grpc_service_name_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
},
"healthy_threshold": 1819,
"http2_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"http_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"https_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"id": 205,
"kind": "kind_value",
"log_config": {"enable": True},
"name": "name_value",
"region": "region_value",
"self_link": "self_link_value",
"ssl_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"tcp_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"timeout_sec": 1185,
"type_": "type__value",
"unhealthy_threshold": 2046,
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert_unary(request)
def test_insert_unary_rest_flattened():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
health_check_resource=compute.HealthCheck(check_interval_sec=1884),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthChecks"
% client.transport._host,
args[1],
)
def test_insert_unary_rest_flattened_error(transport: str = "rest"):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert_unary(
compute.InsertRegionHealthCheckRequest(),
project="project_value",
region="region_value",
health_check_resource=compute.HealthCheck(check_interval_sec=1884),
)
def test_insert_unary_rest_error():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.ListRegionHealthChecksRequest, dict,])
def test_list_rest(request_type):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheckList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheckList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(request_type=compute.ListRegionHealthChecksRequest):
transport_class = transports.RegionHealthChecksRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("filter", "max_results", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheckList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheckList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_list_rest_interceptors(null_interceptor):
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthChecksRestInterceptor(),
)
client = RegionHealthChecksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "post_list"
) as post, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "pre_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.HealthCheckList.to_json(
compute.HealthCheckList()
)
request = compute.ListRegionHealthChecksRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.HealthCheckList
client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListRegionHealthChecksRequest
):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheckList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", region="region_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheckList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthChecks"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListRegionHealthChecksRequest(),
project="project_value",
region="region_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.HealthCheckList(
items=[
compute.HealthCheck(),
compute.HealthCheck(),
compute.HealthCheck(),
],
next_page_token="abc",
),
compute.HealthCheckList(items=[], next_page_token="def",),
compute.HealthCheckList(
items=[compute.HealthCheck(),], next_page_token="ghi",
),
compute.HealthCheckList(
items=[compute.HealthCheck(), compute.HealthCheck(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.HealthCheckList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1", "region": "sample2"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.HealthCheck) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [compute.PatchRegionHealthCheckRequest, dict,])
def test_patch_unary_rest(request_type):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
request_init["health_check_resource"] = {
"check_interval_sec": 1884,
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"grpc_health_check": {
"grpc_service_name": "grpc_service_name_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
},
"healthy_threshold": 1819,
"http2_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"http_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"https_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"id": 205,
"kind": "kind_value",
"log_config": {"enable": True},
"name": "name_value",
"region": "region_value",
"self_link": "self_link_value",
"ssl_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"tcp_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"timeout_sec": 1185,
"type_": "type__value",
"unhealthy_threshold": 2046,
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_patch_unary_rest_required_fields(
request_type=compute.PatchRegionHealthCheckRequest,
):
transport_class = transports.RegionHealthChecksRestTransport
request_init = {}
request_init["health_check"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).patch._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["healthCheck"] = "health_check_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).patch._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "healthCheck" in jsonified_request
assert jsonified_request["healthCheck"] == "health_check_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "patch",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_patch_unary_rest_unset_required_fields():
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.patch._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",))
& set(("healthCheck", "healthCheckResource", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_patch_unary_rest_interceptors(null_interceptor):
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthChecksRestInterceptor(),
)
client = RegionHealthChecksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "post_patch"
) as post, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "pre_patch"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.PatchRegionHealthCheckRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.patch_unary(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_patch_unary_rest_bad_request(
transport: str = "rest", request_type=compute.PatchRegionHealthCheckRequest
):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
request_init["health_check_resource"] = {
"check_interval_sec": 1884,
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"grpc_health_check": {
"grpc_service_name": "grpc_service_name_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
},
"healthy_threshold": 1819,
"http2_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"http_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"https_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"id": 205,
"kind": "kind_value",
"log_config": {"enable": True},
"name": "name_value",
"region": "region_value",
"self_link": "self_link_value",
"ssl_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"tcp_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"timeout_sec": 1185,
"type_": "type__value",
"unhealthy_threshold": 2046,
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.patch_unary(request)
def test_patch_unary_rest_flattened():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
health_check="health_check_value",
health_check_resource=compute.HealthCheck(check_interval_sec=1884),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.patch_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}"
% client.transport._host,
args[1],
)
def test_patch_unary_rest_flattened_error(transport: str = "rest"):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch_unary(
compute.PatchRegionHealthCheckRequest(),
project="project_value",
region="region_value",
health_check="health_check_value",
health_check_resource=compute.HealthCheck(check_interval_sec=1884),
)
def test_patch_unary_rest_error():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.UpdateRegionHealthCheckRequest, dict,]
)
def test_update_unary_rest(request_type):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
request_init["health_check_resource"] = {
"check_interval_sec": 1884,
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"grpc_health_check": {
"grpc_service_name": "grpc_service_name_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
},
"healthy_threshold": 1819,
"http2_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"http_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"https_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"id": 205,
"kind": "kind_value",
"log_config": {"enable": True},
"name": "name_value",
"region": "region_value",
"self_link": "self_link_value",
"ssl_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"tcp_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"timeout_sec": 1185,
"type_": "type__value",
"unhealthy_threshold": 2046,
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_update_unary_rest_required_fields(
request_type=compute.UpdateRegionHealthCheckRequest,
):
transport_class = transports.RegionHealthChecksRestTransport
request_init = {}
request_init["health_check"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).update._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["healthCheck"] = "health_check_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).update._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "healthCheck" in jsonified_request
assert jsonified_request["healthCheck"] == "health_check_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "put",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_update_unary_rest_unset_required_fields():
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.update._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",))
& set(("healthCheck", "healthCheckResource", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_update_unary_rest_interceptors(null_interceptor):
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthChecksRestInterceptor(),
)
client = RegionHealthChecksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "post_update"
) as post, mock.patch.object(
transports.RegionHealthChecksRestInterceptor, "pre_update"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.UpdateRegionHealthCheckRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.update_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_update_unary_rest_bad_request(
transport: str = "rest", request_type=compute.UpdateRegionHealthCheckRequest
):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
request_init["health_check_resource"] = {
"check_interval_sec": 1884,
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"grpc_health_check": {
"grpc_service_name": "grpc_service_name_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
},
"healthy_threshold": 1819,
"http2_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"http_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"https_health_check": {
"host": "host_value",
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request_path": "request_path_value",
"response": "response_value",
},
"id": 205,
"kind": "kind_value",
"log_config": {"enable": True},
"name": "name_value",
"region": "region_value",
"self_link": "self_link_value",
"ssl_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"tcp_health_check": {
"port": 453,
"port_name": "port_name_value",
"port_specification": "port_specification_value",
"proxy_header": "proxy_header_value",
"request": "request_value",
"response": "response_value",
},
"timeout_sec": 1185,
"type_": "type__value",
"unhealthy_threshold": 2046,
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.update_unary(request)
def test_update_unary_rest_flattened():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"health_check": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
health_check="health_check_value",
health_check_resource=compute.HealthCheck(check_interval_sec=1884),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.update_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}"
% client.transport._host,
args[1],
)
def test_update_unary_rest_flattened_error(transport: str = "rest"):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_unary(
compute.UpdateRegionHealthCheckRequest(),
project="project_value",
region="region_value",
health_check="health_check_value",
health_check_resource=compute.HealthCheck(check_interval_sec=1884),
)
def test_update_unary_rest_error():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionHealthChecksClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionHealthChecksClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionHealthChecksClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionHealthChecksClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RegionHealthChecksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RegionHealthChecksClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize(
"transport_class", [transports.RegionHealthChecksRestTransport,]
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_region_health_checks_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RegionHealthChecksTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_region_health_checks_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.region_health_checks.transports.RegionHealthChecksTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.RegionHealthChecksTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"insert",
"list",
"patch",
"update",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_region_health_checks_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.region_health_checks.transports.RegionHealthChecksTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionHealthChecksTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_region_health_checks_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.region_health_checks.transports.RegionHealthChecksTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionHealthChecksTransport()
adc.assert_called_once()
def test_region_health_checks_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RegionHealthChecksClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_region_health_checks_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.RegionHealthChecksRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_health_checks_host_no_port(transport_name):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_health_checks_host_with_port(transport_name):
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = RegionHealthChecksClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = RegionHealthChecksClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthChecksClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = RegionHealthChecksClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = RegionHealthChecksClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthChecksClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = RegionHealthChecksClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = RegionHealthChecksClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthChecksClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = RegionHealthChecksClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = RegionHealthChecksClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthChecksClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = RegionHealthChecksClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = RegionHealthChecksClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthChecksClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.RegionHealthChecksTransport, "_prep_wrapped_messages"
) as prep:
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.RegionHealthChecksTransport, "_prep_wrapped_messages"
) as prep:
transport_class = RegionHealthChecksClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = RegionHealthChecksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(RegionHealthChecksClient, transports.RegionHealthChecksRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-compute | tests/unit/gapic/compute_v1/test_region_health_checks.py | Python | apache-2.0 | 117,533 | [
"Octopus"
] | 8d3d42e96b0000f758784efdf96aeddf5f8055c264573c4646ef21a30f2be81c |
#!/usr/bin/env python
########################################################################
# File : dirac-wms-cpu-normalization
# Author : Andrew McNab
########################################################################
"""
Determine Normalization for current CPU.
The main users of this script are the pilot jobs.
Pilots invoke dirac-wms-cpu-normalization which
- tries to find MACHINEFEATURES/ + JOBFEATURES,
and if found populates the local cfg file with e.g. '/LocalSite/JOBFEATURES/'
- runs 1 iteration of singleDiracBenchmark(1) (for single processors only)
- stores in local cfg the following: (the example below is from the case of when MJF is not available)::
LocalSite
{
CPUScalingFactor = 23.7 # corrected value (by JobScheduling/CPUNormalizationCorrection)
CPUNormalizationFactor = 23.7 # corrected value (by JobScheduling/CPUNormalizationCorrection)
DB12measured = 15.4
DB12 = 15.4
}
The last 2 (DB12 and DB12measured) are up to now wrote down but never used.
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.registerSwitch("U", "Update", "Update dirac.cfg with the resulting value")
Script.registerSwitch("R:", "Reconfig=", "Update given configuration file with the resulting value")
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile] ' % Script.scriptName]))
Script.parseCommandLine(ignoreErrors=True)
update = False
configFile = None
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ("U", "Update"):
update = True
elif unprocSw[0] in ("R", "Reconfig"):
configFile = unprocSw[1]
if __name__ == "__main__":
from DIRAC import gLogger, gConfig
from DIRAC.WorkloadManagementSystem.Client.DIRACbenchmark import singleDiracBenchmark
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities import MJF
mjf = MJF.MJF()
mjf.updateConfig()
db12JobFeature = mjf.getJobFeature('db12')
hs06JobFeature = mjf.getJobFeature('hs06')
result = singleDiracBenchmark(1)
if result is None:
gLogger.error('Cannot make benchmark measurements')
DIRAC.exit(1)
db12Measured = round(result['NORM'], 1)
corr = Operations().getValue('JobScheduling/CPUNormalizationCorrection', 1.)
norm = round(result['NORM'] / corr, 1)
gLogger.notice('Estimated CPU power is %.1f HS06' % norm)
if update:
gConfig.setOptionValue('/LocalSite/CPUScalingFactor', hs06JobFeature if hs06JobFeature else norm) # deprecate?
gConfig.setOptionValue('/LocalSite/CPUNormalizationFactor', norm) # deprecate?
gConfig.setOptionValue('/LocalSite/DB12measured', db12Measured)
# Set DB12 to use by default. Remember db12JobFeature is still in /LocalSite/JOBFEATURES/db12
if db12JobFeature is not None:
gConfig.setOptionValue('/LocalSite/DB12', db12JobFeature)
else:
gConfig.setOptionValue('/LocalSite/DB12', db12Measured)
if configFile:
gConfig.dumpLocalCFGToFile(configFile)
else:
gConfig.dumpLocalCFGToFile(gConfig.diracConfigFilePath)
DIRAC.exit()
| arrabito/DIRAC | WorkloadManagementSystem/scripts/dirac-wms-cpu-normalization.py | Python | gpl-3.0 | 3,218 | [
"DIRAC"
] | 22ea9017512fe70da8c96d8d51cec56de0066bb29e5e9afe6d02a00a6a21e579 |
# Standard Library
from builtins import str
from builtins import range
import random
import string
# Third Party Stuff
from django.conf import settings
from django.contrib import messages, auth
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.mail import EmailMultiAlternatives
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.template import RequestContext
from hashids import Hashids
from PIL import Image
# Spoken Tutorial Stuff
from cms.forms import *
from cms.models import *
from cms.services import *
from events.models import Student
from mdldjango.models import MdlUser
from mdldjango.urls import *
from django.template.context_processors import csrf
from donate.models import Payee
def dispatcher(request, permalink=''):
if permalink == '':
return HttpResponseRedirect('/')
if permalink == 'project_documents':
impersonating = request.session.pop('_impersonate', None)
if impersonating is not None:
from impersonate.signals import session_end
request.session.modified = True
session_end.send(
sender=None,
impersonator=request.impersonator,
impersonating=impersonating,
request=request
)
return HttpResponseRedirect('/accounts/login/?next=/project_documents/')
if not request.user.groups.filter(name ='page_admin').exists():
messages.error(request, "You are not authorized to access this page. Please login as an authorized user.")
return HttpResponseRedirect('/accounts/login/?next=/project_documents/')
page_content = get_object_or_404(Page, permalink=permalink, visible=True)
col_offset = int((12 - page_content.cols) / 2)
col_remainder = int((12 - page_content.cols) % 2)
if col_remainder:
col_offset = str(col_offset) + 'p5'
context = {
'page': page_content,
'col_offset': col_offset,
}
return render(request, 'cms/templates/page.html', context)
def create_profile(user, phone):
confirmation_code = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for x in range(7))
profile = Profile(user=user, confirmation_code=confirmation_code, phone=phone)
profile.save()
return profile
def account_register(request):
# import recaptcha validate function
from cms.recaptcha import recaptcha_valdation, get_recaptcha_context
# reCAPTCHA Site key
context = get_recaptcha_context()
if request.method == 'POST':
# verify recaptcha
#recaptcha_result = recaptcha_valdation(request)
form = RegisterFormHome(request.POST)
#if recaptcha_result and form.is_valid():
if form.is_valid():
username = request.POST['username'].strip()
password = request.POST['password'].strip()
email = request.POST['email'].strip()
first_name = str(request.POST['first_name'].strip())
last_name = str(request.POST['last_name'].strip())
phone = request.POST['phone']
user = User.objects.create_user(username, email, password)
user.first_name = first_name
user.last_name = last_name
user.is_active = False
user.save()
create_profile(user, phone)
send_registration_confirmation(user)
messages.success(request,
"Thank you for registering.\
Please confirm your registration by clicking on the activation link which has been sent to your registered email %s.<br>\
In case if you do not receive any activation mail kindly verify and activate your account from below link :<br>\
<a href='https://spoken-tutorial.org/accounts/verify/'>https://spoken-tutorial.org/accounts/verify/</a>"
% (email))
return HttpResponseRedirect('/')
context['form'] = form
return render(request, 'cms/templates/register.html', context)
else:
form = RegisterFormHome()
context['form'] = form
context.update(csrf(request))
return render(request, 'cms/templates/register.html', context)
def send_registration_confirmation(user):
p = Profile.objects.get(user=user)
# user.email = "k.sanmugam2@gmail.com"
# Sending email when an answer is posted
subject = 'Account Active Notification'
message = """Dear {0},
Thank you for registering at {1}. You may activate your account by clicking on this link or copying and pasting it in your browser
{2}
Regards,
Admin
Spoken Tutorials
IIT Bombay.
""".format(
user.username,
"https://spoken-tutorial.org",
"https://spoken-tutorial.org/accounts/confirm/" + str(p.confirmation_code) + "/" + user.username
)
email = EmailMultiAlternatives(
subject, message, 'no-reply@spoken-tutorial.org',
to = [user.email], bcc = [], cc = [],
headers={'Reply-To': 'no-reply@spoken-tutorial.org', "Content-type":"text/html;charset=iso-8859-1"}
)
#email.attach_alternative(message, "text/html")
try:
result = email.send(fail_silently=False)
except:
pass
def email_otp(user):
p = Profile.objects.get(user=user)
subject = 'Spoken Tutorial'
message = """Hello {0},
Your OTP is
{1}
If you enter the OTP correctly, this email address will get activated on Spoken Tutorial({2}).
Regards,
Admin
Spoken Tutorials
IIT Bombay.
""".format(
user.username,
str(p.confirmation_code),
"https://spoken-tutorial.org",
)
email = EmailMultiAlternatives(
subject, message, 'no-reply@spoken-tutorial.org',
to = [user.email], bcc = [], cc = [],
headers={'Reply-To': 'no-reply@spoken-tutorial.org', "Content-type":"text/html;charset=iso-8859-1"}
)
#email.attach_alternative(message, "text/html")
try:
result = email.send(fail_silently=False)
except:
pass
def confirm(request, confirmation_code, username):
try:
user = User.objects.get(username=username)
profile = Profile.objects.get(user=user)
#if profile.confirmation_code == confirmation_code and user.date_joined > (timezone.now()-timezone.timedelta(days=1)):
if profile.confirmation_code == confirmation_code:
user.is_active = True
user.save()
user.backend='django.contrib.auth.backends.ModelBackend'
login(request,user)
messages.success(request, "Your account has been activated!. Please update your profile.")
return HttpResponseRedirect('/')
else:
messages.success(request, "Something went wrong!. Please try again!")
return HttpResponseRedirect('/')
except Exception as e:
messages.success(request, "Your account not activated!. Please try again!")
return HttpResponseRedirect('/')
def account_login(request):
user = request.user
error_msg = ''
if request.user.is_anonymous():
form = LoginForm()
context = {
'form' : form
}
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = request.POST.get('username', None)
password = request.POST.get('password', None)
remember = request.POST.get('remember', None)
if username and password:
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
if remember:
request.session.set_expiry(settings.KEEP_LOGGED_DURATION)
else:
request.session.set_expiry(0)
try:
p = Profile.objects.get(user_id = user.id)
if not user.first_name or not user.last_name or not p.state or not p.district or not p.city or not p.address or not p.phone:# or not p.pincode or not p.picture:
messages.success(request, "<ul><li>Please update your profile.</li><li>Please make sure you enter your First name, Last name both and with correct spelling.</li><li>It is recommended that you do upload the photo.</li></ul>")
return HttpResponseRedirect('/accounts/profile/'+user.username)
except:
pass
if request.GET and request.GET['next']:
return HttpResponseRedirect(request.GET['next'])
return HttpResponseRedirect('/')
else:
error_msg = "Your account is disabled.<br>\
Kindly activate your account by clicking on the activation link which has been sent to your registered email %s.<br>\
In case if you do not receive any activation mail kindly verify and activate your account from below link :<br>\
<a href='https://spoken-tutorial.org/accounts/verify/'>https://spoken-tutorial.org/accounts/verify/</a>"% (user.email)
else:
error_msg = 'Invalid username / password'
else:
error_msg = 'Please enter username and Password'
context['form'] = form
messages.error(request, error_msg)
return render(request, 'cms/templates/login.html', context)
else:
context['form'] = form
context.update(csrf(request))
if error_msg:
messages.error(request, error_msg)
return render(request, 'cms/templates/login.html', context)
return HttpResponseRedirect('/')
@login_required
def account_logout(request):
logout(request)
return HttpResponseRedirect('/')
@login_required
def account_profile(request, username):
user = request.user
try:
profile = Profile.objects.get(user_id=user.id)
except:
profile = create_profile(user)
old_file_path = settings.MEDIA_ROOT + str(profile.picture)
new_file_path = None
if request.method == 'POST':
form = ProfileForm(user, request.POST, request.FILES, instance = profile)
if form.is_valid():
user.first_name = request.POST['first_name']
user.last_name = request.POST['last_name']
user.save()
form_data = form.save(commit=False)
form_data.user_id = user.id
if 'picture-clear' in request.POST and request.POST['picture-clear']:
#if not old_file == new_file:
if os.path.isfile(old_file_path):
os.remove(old_file_path)
if 'picture' in request.FILES:
form_data.picture = request.FILES['picture']
form_data.save()
if 'picture' in request.FILES:
size = 128, 128
filename = str(request.FILES['picture'])
ext = os.path.splitext(filename)[1]
if ext != '.pdf' and ext != '':
im = Image.open(settings.MEDIA_ROOT + str(form_data.picture))
im.thumbnail(size, Image.ANTIALIAS)
ext = ext[1:]
mimeType = ext.upper()
if mimeType == 'JPG':
mimeType = 'JPEG'
thumbName = 'user/' + str(user.id)+ '/' + str(user.id) + '-thumb.' + ext
im.save(settings.MEDIA_ROOT + thumbName, mimeType)
form_data.thumb = thumbName
form_data.save()
messages.success(request, "Your profile has been updated!")
return HttpResponseRedirect("/accounts/view-profile/" + user.username)
context = {'form':form}
return render(request, 'cms/templates/profile.html', context)
else:
context = {}
context.update(csrf(request))
instance = Profile.objects.get(user_id=user.id)
context['form'] = ProfileForm(user, instance = instance)
return render(request, 'cms/templates/profile.html', context)
@login_required
def account_view_profile(request, username):
if username != request.user.username:
raise PermissionDenied('You are not allowed to view this page!')
user = User.objects.get(username = username)
profile = None
try:
profile = Profile.objects.get(user = user)
except:
profile = create_profile(user)
context = {
'profile' : profile,
'media_url' : settings.MEDIA_URL,
}
if request.user.is_authenticated():
payee_list = Payee.objects.prefetch_related('cdfosslanguages_set__foss','cdfosslanguages_set__lang').filter(user=request.user)
context['payee_list'] = payee_list
return render(request, 'cms/templates/view-profile.html', context)
def password_reset(request):
context = {}
form = PasswordResetForm()
if request.method == "POST":
form = PasswordResetForm(request.POST)
if form.is_valid():
password_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
user = User.objects.filter(email=request.POST['email']).first()
user.set_password(password_string)
user.save()
# change if any mdl user pass too
from mdldjango.views import changeMdlUserPass
changeMdlUserPass(request.POST['email'], password_string)
print(('Username => ', user.username))
print(('New password => ', password_string))
if not user.profile_set.first():
profile = create_profile(user,None)
changePassUrl = "http://www.spoken-tutorial.org/accounts/change-password"
if request.GET and request.GET['next']:
changePassUrl = changePassUrl + "?auto=%s&username=%s&next=%s" % (user.profile_set.first().confirmation_code, user.username, request.GET['next'])
#Send email
subject = "Spoken Tutorial password reset"
to = [user.email]
message = '''Hi {0},
Your account password at 'Spoken Tutorials' has been reset
and you have been issued with a new temporary password.
Your current login information is now:
username: {1}
password: {2}
With respect to change your password kindly follow the steps written below :
Step 1. Visit below link to change the password. Provide temporary password given above in the place of Old Password field.
{3}
Step 2.Use this changed password for spoken forum login and in moodle login also.
In most mail programs, this should appear as a blue link
which you can just click on. If that doesn't work,
then cut and paste the address into the address
line at the top of your web browser window.
Best Wishes,
Admin
Spoken Tutorials
IIT Bombay.
'''.format(user.username, user.username, password_string,changePassUrl)
# send email
email = EmailMultiAlternatives(
subject, message, 'no-reply@spoken-tutorial.org',
to = to, bcc = [], cc = [],
headers={'Reply-To': 'no-reply@spoken-tutorial.org', "Content-type":"text/html;charset=iso-8859-1"}
)
result = email.send(fail_silently=False)
# redirect to next url if there or redirect to login page
# use for forum password rest form
redirectNext = request.GET.get('next', False)
if redirectNext:
return HttpResponseRedirect(redirectNext)
messages.success(request, "New password sent to your email "+user.email)
return HttpResponseRedirect('/accounts/change-password/')
context = {
'form': form
}
context.update(csrf(request))
return render(request, 'cms/templates/password_reset.html', context)
#@login_required
def change_password(request):
# chacking uselogin
pcode = request.GET.get('auto', False)
username = request.GET.get('username', False)
nextUrl = request.GET.get('next', False)
# check pcode in profile page
if pcode and username and nextUrl:
user = User.objects.get(username=username)
profile = Profile.objects.get(user=user)
if profile.confirmation_code == pcode:
user.backend='django.contrib.auth.backends.ModelBackend'
login(request,user)
if request.user.is_anonymous():
return HttpResponseRedirect('/accounts/login/?next=/accounts/change-password')
context = {}
form = ChangePasswordForm()
if request.method == "POST":
form = ChangePasswordForm(request.POST)
if form.is_valid():
profile = Profile.objects.get(user_id = form.cleaned_data['userid'], confirmation_code = form.cleaned_data['code'])
user = profile.user
user.set_password(form.cleaned_data['new_password'])
user.save()
# change if any mdl user pass too
from mdldjango.views import changeMdlUserPass
changeMdlUserPass(user.email, form.cleaned_data['new_password'])
if nextUrl:
return HttpResponseRedirect(nextUrl.split("?", 1)[0])
messages.success(request, "Your account password has been updated successfully!")
return HttpResponseRedirect("/accounts/view-profile/" + user.username)
context['form'] = form
context.update(csrf(request))
return render(request, 'cms/templates/change_password.html', context)
def confirm_student(request, token):
mdluserid = Hashids(salt=settings.SPOKEN_HASH_SALT).decode(token)[0]
try:
mdluser = MdlUser.objects.filter(pk=mdluserid).first()
user = User.objects.get(email=mdluser.email)
student = Student.objects.get(user_id = user.id)
if mdluser:
user.is_active = True
user.save()
student.verified = True
student.error = False
student.save()
messages.success(request, "Your account has been activated!. Please login to continue.")
return HttpResponseRedirect('https://spoken-tutorial.org/participant/login/')
else:
print('can not match record')
messages.error(request, "Your account not activated!. Please try again!")
return HttpResponseRedirect('/')
except ObjectDoesNotExist:
messages.error(request, "Your account not activated!. Please try again!")
return HttpResponseRedirect('/')
def verify_email(request):
context = {}
if request.method == 'POST':
form = VerifyForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
#send verification mail as per the criteria check
status, msg = send_verify_email(request,email)
if status:
messages.success(request, msg, extra_tags='success')
else:
messages.error(request, msg, extra_tags='error')
else:
messages.error(request, 'Invalid Email ID', extra_tags='error')
return render(request, "cms/templates/verify_email.html", context)
| Spoken-tutorial/spoken-website | cms/views.py | Python | gpl-3.0 | 19,636 | [
"VisIt"
] | 4b0e6b93e66a79ac773bbca13224b057c7967cba0d2742ccc679085f47c1d395 |
"""Factor Analysis.
A latent linear variable model.
FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
While PCA assumes Gaussian noise with the same variance for each
feature, the FactorAnalysis model assumes different variances for
each of them.
This implementation is based on David Barber's Book,
Bayesian Reasoning and Machine Learning,
http://www.cs.ucl.ac.uk/staff/d.barber/brml,
Algorithm 21.1
"""
# Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Licence: BSD3
import warnings
from math import sqrt, log
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array, check_random_state
from ..utils.extmath import fast_logdet, fast_dot, randomized_svd, squared_norm
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
class FactorAnalysis(BaseEstimator, TransformerMixin):
"""Factor Analysis (FA)
A simple linear generative model with Gaussian latent variables.
The observations are assumed to be caused by a linear transformation of
lower dimensional latent factors and added Gaussian noise.
Without loss of generality the factors are distributed according to a
Gaussian with zero mean and unit covariance. The noise is also zero mean
and has an arbitrary diagonal covariance matrix.
If we would restrict the model further, by assuming that the Gaussian
noise is even isotropic (all diagonal entries are the same) we would obtain
:class:`PPCA`.
FactorAnalysis performs a maximum likelihood estimate of the so-called
`loading` matrix, the transformation of the latent variables to the
observed ones, using expectation-maximization (EM).
Read more in the :ref:`User Guide <FA>`.
Parameters
----------
n_components : int | None
Dimensionality of latent space, the number of components
of ``X`` that are obtained after ``transform``.
If None, n_components is set to the number of features.
tol : float
Stopping tolerance for EM algorithm.
copy : bool
Whether to make a copy of X. If ``False``, the input X gets overwritten
during fitting.
max_iter : int
Maximum number of iterations.
noise_variance_init : None | array, shape=(n_features,)
The initial guess of the noise variance for each feature.
If None, it defaults to np.ones(n_features)
svd_method : {'lapack', 'randomized'}
Which SVD method to use. If 'lapack' use standard SVD from
scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
Defaults to 'randomized'. For most applications 'randomized' will
be sufficiently precise while providing significant speed gains.
Accuracy can also be improved by setting higher values for
`iterated_power`. If this is not sufficient, for maximum precision
you should choose 'lapack'.
iterated_power : int, optional
Number of iterations for the power method. 3 by default. Only used
if ``svd_method`` equals 'randomized'
random_state : int or RandomState
Pseudo number generator state used for random sampling. Only used
if ``svd_method`` equals 'randomized'
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
loglike_ : list, [n_iterations]
The log likelihood at each iteration.
noise_variance_ : array, shape=(n_features,)
The estimated noise variance for each feature.
n_iter_ : int
Number of iterations run.
References
----------
.. David Barber, Bayesian Reasoning and Machine Learning,
Algorithm 21.1
.. Christopher M. Bishop: Pattern Recognition and Machine Learning,
Chapter 12.2.4
See also
--------
PCA: Principal component analysis is also a latent linear variable model
which however assumes equal noise variance for each feature.
This extra assumption makes probabilistic PCA faster as it can be
computed in closed form.
FastICA: Independent component analysis, a latent variable model with
non-Gaussian latent variables.
"""
def __init__(self, n_components=None, tol=1e-2, copy=True, max_iter=1000,
noise_variance_init=None, svd_method='randomized',
iterated_power=3, random_state=0):
self.n_components = n_components
self.copy = copy
self.tol = tol
self.max_iter = max_iter
if svd_method not in ['lapack', 'randomized']:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % svd_method)
self.svd_method = svd_method
self.noise_variance_init = noise_variance_init
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using EM
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self
"""
X = check_array(X, copy=self.copy, dtype=np.float)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2. * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError("noise_variance_init dimension does not "
"with number of features : %d != %d" %
(len(self.noise_variance_init), n_features))
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == 'lapack':
def my_svd(X):
_, s, V = linalg.svd(X, full_matrices=False)
return (s[:n_components], V[:n_components],
squared_norm(s[n_components:]))
elif self.svd_method == 'randomized':
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, V = randomized_svd(X, n_components,
random_state=random_state,
n_iter=self.iterated_power)
return s, V, squared_norm(X) - squared_norm(s)
else:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % self.svd_method)
for i in xrange(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, V, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1., 0.))[:, np.newaxis] * V
del V
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W ** 2, axis=0), SMALL)
else:
warnings.warn('FactorAnalysis did not converge.' +
' You might want' +
' to increase the number of iterations.',
ConvergenceWarning)
self.components_ = W
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
The latent variables of X.
"""
check_is_fitted(self, 'components_')
X = check_array(X)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = fast_dot(X_transformed, Wpsi.T)
X_transformed = fast_dot(tmp, cov_z)
return X_transformed
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : array, shape (n_features, n_features)
Estimated covariance of data.
"""
check_is_fitted(self, 'components_')
cov = np.dot(self.components_.T, self.components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : array, shape (n_features, n_features)
Estimated precision of data.
"""
check_is_fitted(self, 'components_')
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components == 0:
return np.diag(1. / self.noise_variance_)
if self.n_components == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
precision = np.dot(components_ / self.noise_variance_, components_.T)
precision.flat[::len(precision) + 1] += 1.
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= -self.noise_variance_[np.newaxis, :]
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def score_samples(self, X):
"""Compute the log-likelihood of each sample
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'components_')
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Compute the average log-likelihood of the samples
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/decomposition/factor_analysis.py | Python | gpl-2.0 | 11,946 | [
"Gaussian"
] | fec248df62649015a64123ee1b5c54d2ef3deb9d742f250ba2661629a5d39fdc |
#!/bin/env python
# -*- coding: utf-8 -*-
from Sire.IO import *
from Sire.System import *
from Sire.Mol import *
from Sire.MM import *
from Sire.FF import *
from Sire.CAS import *
from Sire.Maths import *
from Sire.Analysis import *
from Sire.System import *
from Sire.Base import *
from Sire.Units import *
import Sire.Config
import Sire.Stream
from Sire.Tools.AmberLoader import *
from Sire.Tools import Parameter, resolveParameters
import os
import shutil
import copy
# We will use the waterbox held in the WSRC tools directory
wsrc_tools_dir = "%s/Tools/WSRC" % Sire.Config.share_directory
####################################################
# ALL OF THE GLOBAL USER-AVAILABLE LSRC PARAMETERS #
####################################################
mcs_timeout = Parameter("match timeout", 5*second,
"""The maximum amount of time to give the maximum common substructure
algorithm to find a match between the two ligands.""")
mcs_prematch = Parameter("match atoms", None,
"""The names of atoms that must match when aligning the two ligands.
The format is a comma-separated list of atom pairs, saying that
the atom called 'A' in ligand0 matches the atom called 'B' in
ligand1. This is needed when the maximum common substructure
algorithm fails to find a good match. For example, the string
'A1:B1,A2:B2,A3:B3' would match atom A1 in ligand0 to atom
B1 in ligand1, A2 to B2 and A3 to B3.""")
cutoff_method = Parameter("cutoff method", "shift electrostatics",
"""Method used to apply the non-bonded electrostatic cutoff.""")
rf_dielectric = Parameter("reaction field dielectric", 78.3,
"""Dielectric constant to use if the reaction field cutoff method is used.""")
coul_cutoff = Parameter("coulomb cutoff", 15*angstrom,
"""Coulomb cutoff length""")
lj_cutoff = Parameter("LJ cutoff", 15*angstrom,
"""Lennard Jones cutoff length""")
grid_spacing = Parameter("grid spacing", 1.0*angstrom,
"""Grid spacing used for the grid-based forcefields""")
grid_buffer = Parameter("grid buffer", 2*angstrom,
"""Buffer around the grid used to prevent recalculation
in the grid-based forcefields.""")
disable_grid = Parameter("disable grid", False, """Whether or not to disable use of the grid""")
use_oldff = Parameter("use old forcefields", False, """For debugging, use the old forcefields rather than the
new forcefields""")
temperature = Parameter("temperature", 25*celsius, """Simulation temperature""")
random_seed = Parameter("random seed", None, """Random number seed. Set this if you
want to have reproducible simulations.""")
vacuum_calc = Parameter("vacuum calculation", False,
"""Whether or not to swap the ligand into vacuum. This is useful if you
want to calculate relative hydration free energies.""")
use_fixed_ligand = Parameter("fixed ligand", False,
"""Whether or not to completely fix the ligand during the simulation.""")
use_rot_trans_ligand = Parameter("ligand rot-trans", True,
"""Whether or not the ligand is free to translate and rotate.""")
topology = Parameter("topology", "dual",
"""Whether to use 'single' or 'dual' topology to morph between the two ligands.""")
alpha_scale = Parameter("alpha_scale", 1.0,
"""Amount by which to scale the alpha parameter. The lower the value,
the less softening with lambda, while the higher the value, the
more softening""")
delta_lambda = Parameter("delta_lambda", 0.001,
"""Value of delta lambda used in the finite difference thermodynamic
integration algorithm used to calculate the free energy""")
water_monitor_distance = Parameter("water monitor distance", 5.0*angstrom,
"""The distance up to which the free energy of water molecules
interacting with the ligand should be recorded.""")
nrgmon_frequency = Parameter("energy monitor frequency", 1000,
"""The number of steps between each evaluation of the energy monitors.""")
lambda_values = Parameter("lambda values", [ 0.005, 0.071, 0.137, 0.203, 0.269, 0.335, 0.401, 0.467, 0.533, 0.599, 0.665, 0.731, 0.797, 0.863, 0.929, 0.995 ],
"""The values of lambda to use in the RETI free energy simulation.""")
nsubmoves = Parameter("nsubmoves", 50000,
"""The number of moves to perform between each RETI move.""")
ligand_name0 = Parameter("ligand0", None,
"""The name of ligand 0. This should be the name of one of the residues
in the ligand, so that this program can find the correct molecule. If it is not set, then
the first non-protein, non solvent molecule is used.""")
ligand_name1 = Parameter("ligand1", None,
"""The name of ligand 1. This should be the name of one of the residues
in the ligand, so that this program can find the correct molecule. If it is not set, then
the first non-protein, non solvent molecule is used.""")
reflection_radius = Parameter("reflection radius", 15*angstrom,
"""The radius of the reflection sphere""")
ligand_reflection_radius = Parameter("ligand reflection radius", 2*angstrom,
"""The reflection radius of the ligand. This is used to constrain the ligand
to remain in the active site. This is needed to define the accessible volume
of the bound state.""")
topfile0 = Parameter("topfile0", "complex0.top",
"""Name of the topology file containing the solvated protein-ligand0 complex.""")
crdfile0 = Parameter("crdfile0", "complex0.crd",
"""Name of the coordinate file containing the coordinates of the
solvated protein-ligand0 complex.""")
topfile1 = Parameter("topfile1", "complex1.top",
"""Name of the topology file containing the solvated protein-ligand1 complex.""")
crdfile1 = Parameter("crdfile1", "complex1.crd",
"""Name of the coordinate file containing the coordinates of the
solvated protein-ligand1 complex.""")
s3file0 = Parameter("s3file0", "complex0.s3",
"""Name to use for the intermediate s3 file that will contain the
solvated protein-ligand0 complex after it has been loaded from the top/crd files.""")
s3file1 = Parameter("s3file1", "complex1.s3",
"""Name to use for the intermediate s3 file that will contain the
solvated protein-ligand1 complex after it has been loaded from the top/crd files.""")
water_topfile = Parameter("water topfile", "%s/waterbox.top" % wsrc_tools_dir,
"""Name of the topology file containing the water box.""")
water_crdfile = Parameter("water crdfile", "%s/waterbox.crd" % wsrc_tools_dir,
"""Name of the coordinate file containing the coordinates of the water box.""")
water_s3file = Parameter("water s3file", "waterbox.s3",
"""Name to use for the intermediate s3 file that will contain the
water box after it has been loaded from the top/crd files.""")
outdir = Parameter("output directory", "output",
"""Name of the directory in which to place all of the output files.""")
restart_file = Parameter("restart file", "lsrc_restart.s3",
"""Name of the restart file to use to save progress during the calculation of
the relative binding free energy of ligand 0 to ligand 1.""")
nmoves = Parameter("nmoves", 1000, """Number of RETI moves to perform during the simulation.""")
nequilmoves = Parameter("nequilmoves", 50000,
"""Number of equilibration moves to perform before setting up the free energy simulation.""")
coul_power = Parameter("coulomb power", 0,
"""The soft-core coulomb power parameter (integer)""")
shift_delta = Parameter("shift delta", 1.2,
"""The soft-core shift delta parameter""")
use_single_topology = Parameter("single topology", False,
"""Whether or not to use single topology to morph from ligand 0 to ligand 1.""")
use_dual_topology = Parameter("dual topology", True,
"""Whether or not to use dual topology to morph from ligand 0 to ligand 1.""")
save_pdb = Parameter("save pdb", True,
"""Whether or not to write a PDB of the system after each iteration.""")
save_all_pdbs = Parameter("save all pdbs", False,
"""Whether or not to write all of the PDBs. If not, only PDBs at the two
end points of the simulation will be written.""")
pdb_frequency = Parameter("pdb frequency", 50,
"""The frequency (number of iterations between) saving PDBs""")
binwidth = Parameter("free energy bin width", 1 * kcal_per_mol,
"""The size of the bins used in the histogram of energies collected
as part of creating the free energy average, in multiples of delta lambda""")
restart_frequency = Parameter("restart frequency", 10,
"""The frequency (number of iterations between) saving the restart file for the simulation.""")
####################################################
def renumberMolecules(molgroup):
newgroup = MoleculeGroup(molgroup.name().value())
for molnum in molgroup.molNums():
mol = molgroup[molnum]
newmol = mol.molecule().edit().renumber().commit()
newgroup.add( ViewsOfMol(newmol,mol.selections()) )
return newgroup
def getMinimumDistance(mol0, mol1):
space = Cartesian()
return space.minimumDistance(CoordGroup(mol0.molecule().property("coordinates").array()), \
CoordGroup(mol1.molecule().property("coordinates").array()))
def setCLJProperties(forcefield):
if cutoff_method.val.find("shift electrostatics") != -1:
forcefield.setShiftElectrostatics(True)
elif cutoff_method.val.find("reaction field") != -1:
forcefield.setUseReactionField(True)
forcefield.setReactionFieldDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
forcefield.setSpace(Cartesian())
forcefield.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff.val,coul_cutoff.val,
lj_cutoff.val,lj_cutoff.val) )
return forcefield
def setCLJFuncProperties(cljfunc):
cljfunc.setSpace(Cartesian())
cljfunc.setCoulombCutoff(coul_cutoff.val)
cljfunc.setLJCutoff(lj_cutoff.val)
cljfunc.setArithmeticCombiningRules( True )
return cljfunc
def getInterCLJFunction():
if cutoff_method.val.find("shift electrostatics") != -1:
cljfunc = CLJShiftFunction()
elif cutoff_method.val.find("reaction field") != -1:
cljfunc = CLJRFFunction()
cljfunc.setDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
return setCLJFuncProperties(cljfunc)
def getSoftInterCLJFunction():
if cutoff_method.val.find("shift electrostatics") != -1:
cljfunc = CLJSoftShiftFunction()
elif cutoff_method.val.find("reaction field") != -1:
cljfunc = CLJSoftRFFunction()
cljfunc.setDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
cljfunc.setAlpha(0.0)
cljfunc.setShiftDelta(shift_delta.val)
cljfunc.setCoulombPower(coul_power.val)
return setCLJFuncProperties(cljfunc)
def getIntraCLJFunction():
if cutoff_method.val.find("shift electrostatics") != -1:
cljfunc = CLJIntraShiftFunction()
elif cutoff_method.val.find("reaction field") != -1:
cljfunc = CLJIntraRFFunction()
cljfunc.setDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
return setCLJFuncProperties(cljfunc)
def getSoftIntraCLJFunction():
if cutoff_method.val.find("shift electrostatics") != -1:
cljfunc = CLJSoftIntraShiftFunction()
elif cutoff_method.val.find("reaction field") != -1:
cljfunc = CLJSoftIntraRFFunction()
cljfunc.setDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
cljfunc.setAlpha(0.0)
cljfunc.setShiftDelta(shift_delta.val)
cljfunc.setCoulombPower(coul_power.val)
return setCLJFuncProperties(cljfunc)
def setNewGridProperties(forcefield, extra_buffer=0*angstrom):
if disable_grid.val:
forcefield.disableGrid()
else:
forcefield.enableGrid()
forcefield.setGridSpacing(grid_spacing.val)
forcefield.setGridBuffer(grid_buffer.val + extra_buffer)
return forcefield
def setFakeGridProperties(forcefield):
forcefield.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff.val,coul_cutoff.val,
lj_cutoff.val,lj_cutoff.val) )
forcefield.setSpace(Cartesian())
return forcefield
def setGridProperties(forcefield, extra_buffer=0*angstrom):
forcefield.setGridSpacing(grid_spacing.val)
forcefield.setBuffer(grid_buffer.val + extra_buffer)
forcefield.setLJCutoff(lj_cutoff.val)
forcefield.setCoulombCutoff(coul_cutoff.val)
return forcefield
def setSoftCoreProperties(forcefield):
forcefield.setCoulombPower(coul_power.val)
forcefield.setShiftDelta(shift_delta.val)
return forcefield
def createLSRCMoves(system):
# pull out all of the molecule groups for the mobile parts of the system
mobile_solvent = system[MGName("mobile_solvent")]
mobile_sidechains = system[MGName("mobile_sidechains")]
mobile_backbones = system[MGName("mobile_backbones")]
mobile_solutes = system[MGName("mobile_solutes")]
mobile_ligand = system[MGName("mobile_ligand")]
print("Creating the Monte Carlo moves to sample the LSRC system...")
# create the global set of moves that will be applied to
# the system
moves = WeightedMoves()
# create zmatrix moves to move the protein sidechains
if mobile_sidechains.nViews() > 0:
sc_moves = ZMatMove(mobile_sidechains)
moves.add( sc_moves, mobile_sidechains.nViews() )
if mobile_backbones.nViews() > 0:
bb_moves = RigidBodyMC(mobile_backbones)
bb_moves.setCenterOfRotation( GetCOGPoint( AtomName("CA", CaseInsensitive),
AtomName("N", CaseInsensitive) ) )
bb_moves.setMaximumTranslation(0.030*angstrom)
bb_moves.setMaximumRotation(1.0*degrees)
moves.add( bb_moves, mobile_backbones.nViews() )
if not use_fixed_ligand.val:
if mobile_ligand.nViews() > 0:
scale_moves = 10
# get the amount to translate and rotate from the ligand's flexibility object
flex = mobile_ligand.moleculeAt(0)[0].molecule().property("flexibility")
if use_rot_trans_ligand.val:
if (flex.translation().value() != 0 or flex.rotation().value() != 0):
rb_moves = RigidBodyMC(mobile_ligand)
rb_moves.setMaximumTranslation(flex.translation())
rb_moves.setMaximumRotation(flex.rotation())
rb_moves.setSynchronisedTranslation(True)
rb_moves.setSynchronisedRotation(True)
rb_moves.setSharedRotationCenter(True)
scale_moves = scale_moves / 2
moves.add( rb_moves, scale_moves * mobile_ligand.nViews() )
intra_moves = InternalMove(mobile_ligand)
moves.add( intra_moves, scale_moves * mobile_ligand.nViews() )
if mobile_solutes.nViews() > 0:
rb_moves = RigidBodyMC(mobile_solutes)
if system.containsProperty("average solute translation delta"):
translation_delta = float(str(system.property("average solute translation delta")))
else:
translation_delta = 0
if system.containsProperty("average solute rotation delta"):
rotation_delta = float(str(system.property("average solute rotation delta")))
else:
rotation_delta = 0
if translation_delta > 0 and rotation_delta > 0:
rb_moves.setMaximumTranslation(translation_delta * angstroms)
rb_moves.setMaximumRotation(rotation_delta * degrees)
if system.containsProperty("reflection sphere radius"):
reflection_radius = float(str(system.property("reflection sphere radius"))) * angstroms
reflection_center = system.property("reflection center").toVector()[0]
rb_moves.setReflectionSphere(reflection_center, reflection_radius)
moves.add(rb_moves, 4 * mobile_solutes.nViews())
intra_moves = InternalMove(solute_group)
moves.add(intra_moves, 4 * mobile_solutes.nViews())
max_water_translation = 0.15 * angstroms
max_water_rotation = 15 * degrees
if mobile_solvent.nViews() > 0:
rb_moves = RigidBodyMC(mobile_solvent)
rb_moves.setMaximumTranslation(max_water_translation)
rb_moves.setMaximumRotation(max_water_rotation)
if system.containsProperty("reflection sphere radius"):
reflection_radius = float(str(system.property("reflection sphere radius"))) * angstroms
reflection_center = system.property("reflection center").toVector()[0]
rb_moves.setReflectionSphere(reflection_center, reflection_radius)
moves.add(rb_moves, 4 * mobile_solvent.nViews())
moves.setTemperature(temperature.val)
seed = random_seed.val
if seed is None:
seed = RanGenerator().randInt(100000,1000000)
print("Using generated random number seed %d" % seed)
else:
print("Using supplied random number seed %d" % seed)
moves.setGenerator( RanGenerator(seed) )
return moves
def createStage(system, protein_system, ligand_mol0, ligand_mol1, water_system, mapper, stage):
# align ligand1 against ligand0
ligand_mol1 = ligand_mol1.move().align(ligand_mol0, AtomMatchInverter(mapper))
# write out the aligned ligands to a PDB file
mols = Molecules()
mols.add(ligand_mol0)
mols.add(ligand_mol1)
print("\nPLEASE CHECK: Writing alignment of ligands to the file aligned_ligands.pdb.")
print("PLEASE CHECK: View this file in a PDB viewer to check that the ligands are aligned.\n")
PDB().write(mols, "aligned_ligands.pdb")
# create a molecule group for the ligand
ligand_group0 = MoleculeGroup("ligand0")
ligand_group0.add(ligand_mol0)
ligand_group1 = MoleculeGroup("ligand1")
ligand_group1.add(ligand_mol1)
system.add(ligand_group0)
system.add(ligand_group1)
bound_leg = MoleculeGroup("bound_leg")
free_leg = MoleculeGroup("free_leg")
bound_leg.add(ligand_mol0)
bound_leg.add(ligand_mol1)
free_leg.add(ligand_mol0)
free_leg.add(ligand_mol1)
# pull out the groups that we want from the two systems
# create a group to hold all of the mobile water molecules in the free leg
mobile_free_water_group = MoleculeGroup("mobile_free")
if water_system:
water_mol = None
if MGName("mobile_solvents") in water_system.mgNames():
mols = water_system[MGName("mobile_solvents")].molecules()
for molnum in mols.molNums():
# only add this water if it doesn't overlap with ligand1
water_mol = mols[molnum][0].molecule().edit().renumber().commit()
if getMinimumDistance(ligand_mol1,water_mol) > 1.5:
for j in range(0,water_mol.nResidues()):
water_mol = water_mol.residue( ResIdx(j) ).edit() \
.setProperty( PDB.parameters().pdbResidueName(), "FWT" ) \
.commit().molecule()
mobile_free_water_group.add(water_mol)
# create a group to hold all of the fixed water molecules in the free leg
fixed_free_water_group = MoleculeGroup("fixed_free")
if water_system:
if MGName("fixed_molecules") in water_system.mgNames():
mols = water_system[MGName("fixed_molecules")].molecules()
for molnum in mols.molNums():
fixed_free_water_group.add( mols[molnum][0].molecule().edit().renumber().commit() )
# create a group to hold all of the fixed molecules in the bound leg
fixed_bound_group = MoleculeGroup("fixed_bound")
if MGName("fixed_molecules") in protein_system.mgNames():
fixed_bound_group.add( protein_system[ MGName("fixed_molecules") ] )
# create a group to hold all of the mobile solute molecules in the bound leg
mobile_bound_solutes_group = MoleculeGroup("mobile_bound_solutes")
if MGName("mobile_solutes") in protein_system.mgNames():
mobile_bound_solutes_group.add( protein_system[MGName("mobile_solutes")] )
mobile_bound_solutes_group.remove(ligand_mol0)
mobile_bound_solutes_group.remove(ligand_mol1)
if mobile_bound_solutes_group.nMolecules() > 0:
bound_leg.add(mobile_bound_solutes_group)
# create a group to hold all of the mobile solvent molecules in the bound leg
mobile_bound_solvents_group = MoleculeGroup("mobile_bound_solvents")
mobile_bound_water_group = MoleculeGroup("mobile_bound_water")
if MGName("mobile_solvents") in protein_system.mgNames():
mols = protein_system[MGName("mobile_solvents")]
for molnum in mols.molNums():
solvent_mol = mols[molnum][0].molecule()
try:
# this is a water molecule if we can swap the coordinates with the
# water molecule from teh water box
water_mol.edit().setProperty("coordinates", \
solvent_mol.property("coordinates"))
for j in range(0,solvent_mol.nResidues()):
solvent_mol = solvent_mol.residue( ResIdx(j) ).edit() \
.setProperty( PDB.parameters().pdbResidueName(), "BWT" ) \
.commit().molecule()
mobile_bound_solvents_group.add(solvent_mol)
mobile_bound_water_group.add(solvent_mol)
except:
# the test molecule is not compatible, so it is not
# compatible with the water in the water box
mobile_bound_solvents_group.add(solvent_mol)
print("The number of bound leg mobile solvent molecules is %d." % mobile_bound_solvents_group.nMolecules())
print("The number of these which are compatible water molecules is %d." % mobile_bound_water_group.nMolecules())
# create the groups to hold all of the protein molecules. We will use "extract" to
# pull out only those protein atoms that are in the mobile region
bound_protein_intra_group = MoleculeGroup("bound_protein_intra_group")
mobile_bound_proteins_group = MoleculeGroup("mobile_bound_proteins")
mobile_bound_protein_sidechains_group = MoleculeGroup("mobile_bound_protein_sidechains")
mobile_bound_protein_backbones_group = MoleculeGroup("mobile_bound_protein_backbones")
if MGName("protein_sidechains") in protein_system.mgNames() or \
MGName("protein_backbones") in protein_system.mgNames():
all_proteins = Molecules()
try:
protein_sidechains = protein_system[MGName("protein_sidechains")]
all_proteins.add(protein_sidechains.molecules())
except:
protein_sidechains = MoleculeGroup()
try:
protein_backbones = protein_system[MGName("protein_backbones")]
all_proteins.add(protein_backbones.molecules())
except:
protein_backbones = MoleculeGroup()
try:
boundary_molecules = protein_system[MGName("boundary_molecules")]
all_proteins.add(boundary_molecules.molecules())
except:
boundary_molecules = MoleculeGroup()
for molnum in all_proteins.molNums():
protein_mol = Molecule.join(all_proteins[molnum])
if protein_mol.selectedAll():
bound_protein_intra_group.add(protein_mol)
bound_leg.add(protein_mol)
mobile_protein = []
if protein_sidechains.contains(molnum):
sidechains = protein_sidechains[molnum]
for sidechain in sidechains:
mobile_bound_protein_sidechains_group.add( sidechain )
mobile_protein += sidechains
if protein_backbones.contains(molnum):
backbones = protein_backbones[molnum]
for backbone in backbones:
mobile_bound_protein_backbones_group.add( backbone )
mobile_protein += backbones
if len(mobile_protein) > 0:
mobile_bound_proteins_group.add( Molecule.join(mobile_protein) )
else:
# only some of the atoms have been selected. We will extract
# the mobile atoms and will then update all of the other selections
print("Extracting the mobile atoms of protein %s" % protein_mol.molecule())
new_protein_mol = protein_mol.extract()
print("Extracted %d mobile atoms from %d total atoms..." % \
(new_protein_mol.nAtoms(), protein_mol.molecule().nAtoms()))
bound_protein_intra_group.add(new_protein_mol)
bound_leg.add( new_protein_mol )
mobile_protein_view = new_protein_mol.selection()
mobile_protein_view = mobile_protein_view.selectNone()
if protein_sidechains.contains(molnum):
sidechains = protein_sidechains[molnum]
for sidechain in sidechains:
view = new_protein_mol.selection()
view = view.selectNone()
for atomid in sidechain.selection().selectedAtoms():
atom = protein_mol.atom(atomid)
resatomid = ResAtomID( atom.residue().number(), atom.name() )
view = view.select( resatomid )
mobile_protein_view = mobile_protein_view.select( resatomid )
if view.nSelected() > 0:
mobile_bound_protein_sidechains_group.add( PartialMolecule(new_protein_mol, view) )
if protein_backbones.contains(molnum):
backbones = protein_backbones[molnum]
for backbone in backbones:
view = new_protein_mol.selection()
view = view.selectNone()
for atomid in backbone.selection().selectedAtoms():
atom = protein_mol.atom(atomid)
resatomid = ResAtomID( atom.residue().number(), atom.name() )
view = view.select( resatomid )
mobile_protein_view = mobile_protein_view.select( resatomid )
if view.nSelected() > 0:
mobile_bound_protein_backbones_group.add( PartialMolecule(new_protein_mol, view) )
print("Number of moved protein sidechain residues = %s" % mobile_bound_protein_sidechains_group.nViews())
print("Number of moved protein backbone residues = %s" % mobile_bound_protein_backbones_group.nViews())
if mobile_protein_view.nSelected() > 0:
mobile_bound_proteins_group.add( PartialMolecule(new_protein_mol, mobile_protein_view) )
# finished adding in all of the protein groups
bound_leg.add(mobile_bound_solvents_group)
free_leg.add(mobile_free_water_group)
system.add(bound_leg)
system.add(free_leg)
# now add in the forcefields for the system...
print("Creating the forcefields for the WSRC system...")
# first, group together the molecules grouped above into convenient
# groups for the forcefields
# group holding all of the mobile atoms in the bound leg
mobile_bound_mols = mobile_bound_solvents_group.molecules()
mobile_bound_mols.add( mobile_bound_solutes_group.molecules() )
mobile_bound_mols.add( bound_protein_intra_group.molecules() )
# group holding all of the mobile atoms in the bound leg, excluding the
# buffer atoms that are fixed, but bonded to mobile atoms
mobile_buffered_bound_mols = mobile_bound_solvents_group.molecules()
mobile_buffered_bound_mols.add( mobile_bound_solutes_group.molecules() )
mobile_buffered_bound_mols.add( mobile_bound_proteins_group.molecules() )
# group holding all of the mobile water molecules in the free leg
mobile_free_mols = mobile_free_water_group.molecules()
# group holding all of the fixed water molecules in the free leg
fixed_free_group = fixed_free_water_group
# group holding all of the protein molecules that need intramolecular terms calculated
bound_protein_intra_mols = bound_protein_intra_group.molecules()
# group holding all of the solute molecules that nede intramolecular terms calculated
bound_solute_intra_mols = mobile_bound_solutes_group.molecules()
###
### INTRA-ENERGY OF THE LIGAND AND CLUSTER
###
if use_oldff.val:
# intramolecular energy of the ligands
ligand_intraclj = IntraCLJFF("ligand:intraclj")
ligand_intraclj = setCLJProperties(ligand_intraclj)
ligand_intraclj.add(ligand_mol0)
ligand_intraclj.add(ligand_mol1)
ligand_intraff = InternalFF("ligand:intra")
ligand_intraff.setUse14Calculation(False)
ligand_intraff.add(ligand_mol0)
ligand_intraff.add(ligand_mol1)
else:
print("Using the NEW PARALLEL FORCEFIELDS :-)")
# intramolecular energy of the ligands
ligand_intraclj = IntraFF("ligand:intraclj")
ligand_intraclj.setCLJFunction( getIntraCLJFunction() )
ligand_intraclj.add(ligand_mol0)
ligand_intraclj.add(ligand_mol1)
ligand_intraff = InternalFF("ligand:intra")
ligand_intraff.setUse14Calculation(True)
ligand_intraff.add(ligand_mol0)
ligand_intraff.add(ligand_mol1)
###
### FORCEFIELDS INVOLVING THE LIGAND/CLUSTER BOUND LEG
###
# forcefield holding the energy between the ligand and the mobile atoms in the
# bound leg
if use_oldff.val:
bound_ligand0_mobile = InterGroupSoftCLJFF("bound:ligand0-mobile")
bound_ligand0_mobile = setCLJProperties(bound_ligand0_mobile)
bound_ligand0_mobile = setSoftCoreProperties(bound_ligand0_mobile)
bound_ligand0_mobile.add(ligand_mol0, MGIdx(0))
bound_ligand0_mobile.add(mobile_bound_mols, MGIdx(1))
bound_ligand1_mobile = InterGroupSoftCLJFF("bound:ligand1-mobile")
bound_ligand1_mobile = setCLJProperties(bound_ligand1_mobile)
bound_ligand1_mobile = setSoftCoreProperties(bound_ligand1_mobile)
bound_ligand1_mobile.add(ligand_mol1, MGIdx(0))
bound_ligand1_mobile.add(mobile_bound_mols, MGIdx(1))
# Whether or not to disable the grid and calculate all energies atomisticly
if disable_grid.val:
# we need to renumber all of the fixed molecules so that they don't clash
# with the mobile molecules
print("Renumbering fixed molecules...")
fixed_bound_group = renumberMolecules(fixed_bound_group)
fixed_free_group = renumberMolecules(fixed_free_group)
# forcefield holding the energy between the ligand and the fixed atoms in the bound leg
if disable_grid.val:
bound_ligand0_fixed = InterGroupCLJFF("bound:ligand0-fixed")
bound_ligand0_fixed = setCLJProperties(bound_ligand0_fixed)
bound_ligand0_fixed = setFakeGridProperties(bound_ligand0_fixed)
bound_ligand0_fixed.add(ligand_mol0, MGIdx(0))
bound_ligand0_fixed.add(fixed_bound_group, MGIdx(1))
bound_ligand1_fixed = InterGroupCLJFF("bound:ligand1-fixed")
bound_ligand1_fixed = setCLJProperties(bound_ligand1_fixed)
bound_ligand1_fixed = setFakeGridProperties(bound_ligand1_fixed)
bound_ligand1_fixed.add(ligand_mol1, MGIdx(0))
bound_ligand1_fixed.add(fixed_bound_group, MGIdx(1))
else:
bound_ligand0_fixed = GridFF2("bound:ligand0-fixed")
bound_ligand0_fixed = setCLJProperties(bound_ligand0_fixed)
bound_ligand0_fixed = setGridProperties(bound_ligand0_fixed)
bound_ligand0_fixed.add(ligand_mol0, MGIdx(0))
bound_ligand0_fixed.addFixedAtoms( fixed_bound_group )
bound_ligand1_fixed = GridFF2("bound:ligand1-fixed")
bound_ligand1_fixed = setCLJProperties(bound_ligand1_fixed)
bound_ligand1_fixed = setGridProperties(bound_ligand1_fixed)
bound_ligand1_fixed.add(ligand_mol1, MGIdx(0))
bound_ligand1_fixed.addFixedAtoms( fixed_bound_group )
else:
# Using the new forcefields, that are much easier to work with :-)
bound_ligand0_mobile = InterGroupFF("bound:ligand0-mobile")
bound_ligand0_mobile.setCLJFunction( getSoftInterCLJFunction() )
bound_ligand0_mobile.setCLJFunction( "f", getSoftInterCLJFunction() )
bound_ligand0_mobile.setCLJFunction( "b", getSoftInterCLJFunction() )
bound_ligand0_mobile.setCLJFunction( "next", getSoftInterCLJFunction() )
bound_ligand0_mobile.setCLJFunction( "prev", getSoftInterCLJFunction() )
bound_ligand0_mobile.add(ligand_mol0, MGIdx(0))
bound_ligand0_mobile.add(mobile_bound_mols, MGIdx(1))
bound_ligand0_fixed = InterGroupFF("bound:ligand0-fixed")
bound_ligand0_fixed.setCLJFunction( getInterCLJFunction() )
bound_ligand0_fixed = setNewGridProperties(bound_ligand0_fixed)
bound_ligand0_fixed.add(ligand_mol0, MGIdx(0))
bound_ligand0_fixed.addFixedAtoms(fixed_bound_group.molecules())
bound_ligand1_mobile = InterGroupFF("bound:ligand1-mobile")
bound_ligand1_mobile.setCLJFunction( getSoftInterCLJFunction() )
bound_ligand1_mobile.setCLJFunction( "f", getSoftInterCLJFunction() )
bound_ligand1_mobile.setCLJFunction( "b", getSoftInterCLJFunction() )
bound_ligand1_mobile.setCLJFunction( "next", getSoftInterCLJFunction() )
bound_ligand1_mobile.setCLJFunction( "prev", getSoftInterCLJFunction() )
bound_ligand1_mobile.add(ligand_mol1, MGIdx(0))
bound_ligand1_mobile.add(mobile_bound_mols, MGIdx(1))
bound_ligand1_fixed = InterGroupFF("bound:ligand1-fixed")
bound_ligand1_fixed.setCLJFunction( getInterCLJFunction() )
bound_ligand1_fixed = setNewGridProperties(bound_ligand1_fixed)
bound_ligand1_fixed.add(ligand_mol1, MGIdx(0))
bound_ligand1_fixed.addFixedAtoms(fixed_bound_group.molecules())
###
### FORCEFIELDS INVOLVING THE LIGAND/CLUSTER FREE LEG
###
# forcefield holding the energy between the ligand and the mobile atoms
# in the free leg
if use_oldff.val:
free_ligand0_mobile = InterGroupSoftCLJFF("free:ligand0-mobile")
free_ligand0_mobile = setCLJProperties(free_ligand0_mobile)
free_ligand0_mobile = setSoftCoreProperties(free_ligand0_mobile)
free_ligand0_mobile.add(ligand_mol0, MGIdx(0))
free_ligand0_mobile.add(mobile_free_mols, MGIdx(1))
free_ligand1_mobile = InterGroupSoftCLJFF("free:ligand1-mobile")
free_ligand1_mobile = setCLJProperties(free_ligand1_mobile)
free_ligand1_mobile = setSoftCoreProperties(free_ligand1_mobile)
free_ligand1_mobile.add(ligand_mol1, MGIdx(0))
free_ligand1_mobile.add(mobile_free_mols, MGIdx(1))
# forcefield holding the energy between the ligand and the fixed atoms
# in the free leg
if disable_grid.val:
free_ligand0_fixed = InterGroupCLJFF("free:ligand0_fixed")
free_ligand0_fixed = setCLJProperties(free_ligand0_fixed)
free_ligand0_fixed = setFakeGridProperties(free_ligand0_fixed)
free_ligand0_fixed.add(ligand_mol0, MGIdx(0))
free_ligand0_fixed.add(fixed_free_group, MGIdx(1))
free_ligand1_fixed = InterGroupCLJFF("free:ligand1_fixed")
free_ligand1_fixed = setCLJProperties(free_ligand1_fixed)
free_ligand1_fixed = setFakeGridProperties(free_ligand1_fixed)
free_ligand1_fixed.add(ligand_mol1, MGIdx(0))
free_ligand1_fixed.add(fixed_free_group, MGIdx(1))
else:
free_ligand0_fixed = GridFF2("free:ligand0-fixed")
free_ligand0_fixed = setCLJProperties(free_ligand0_fixed)
free_ligand0_fixed = setGridProperties(free_ligand0_fixed)
free_ligand0_fixed.add(ligand_mol0, MGIdx(0))
free_ligand0_fixed.addFixedAtoms(fixed_free_group)
free_ligand1_fixed = GridFF2("free:ligand1-fixed")
free_ligand1_fixed = setCLJProperties(free_ligand1_fixed)
free_ligand1_fixed = setGridProperties(free_ligand1_fixed)
free_ligand1_fixed.add(ligand_mol1, MGIdx(0))
free_ligand1_fixed.addFixedAtoms(fixed_free_group)
else:
free_ligand0_mobile = InterGroupFF("free:ligand0-mobile")
free_ligand0_mobile.setCLJFunction( getSoftInterCLJFunction() )
free_ligand0_mobile.setCLJFunction( "f", getSoftInterCLJFunction() )
free_ligand0_mobile.setCLJFunction( "b", getSoftInterCLJFunction() )
free_ligand0_mobile.setCLJFunction( "next", getSoftInterCLJFunction() )
free_ligand0_mobile.setCLJFunction( "prev", getSoftInterCLJFunction() )
free_ligand0_mobile.add(ligand_mol0, MGIdx(0))
free_ligand0_mobile.add(mobile_free_mols, MGIdx(1))
free_ligand0_fixed = InterGroupFF("free:ligand0-fixed")
free_ligand0_fixed.setCLJFunction( getInterCLJFunction() )
free_ligand0_fixed = setNewGridProperties(free_ligand0_fixed)
free_ligand0_fixed.add(ligand_mol0, MGIdx(0))
free_ligand0_fixed.addFixedAtoms(fixed_free_group.molecules())
free_ligand1_mobile = InterGroupFF("free:ligand1-mobile")
free_ligand1_mobile.setCLJFunction( getSoftInterCLJFunction() )
free_ligand1_mobile.setCLJFunction( "f", getSoftInterCLJFunction() )
free_ligand1_mobile.setCLJFunction( "b", getSoftInterCLJFunction() )
free_ligand1_mobile.setCLJFunction( "next", getSoftInterCLJFunction() )
free_ligand1_mobile.setCLJFunction( "prev", getSoftInterCLJFunction() )
free_ligand1_mobile.add(ligand_mol1, MGIdx(0))
free_ligand1_mobile.add(mobile_free_mols, MGIdx(1))
free_ligand1_fixed = InterGroupFF("free:ligand1-fixed")
free_ligand1_fixed.setCLJFunction( getInterCLJFunction() )
free_ligand1_fixed = setNewGridProperties(free_ligand1_fixed)
free_ligand1_fixed.add(ligand_mol1, MGIdx(0))
free_ligand1_fixed.addFixedAtoms(fixed_free_group.molecules())
###
### FORCEFIELDS LOCAL ONLY TO THE BOUND LEG
###
bound_forcefields = []
if use_oldff.val:
# forcefield holding the energy between the bound leg mobile atoms and
# the bound leg fixed atoms
if disable_grid.val:
bound_mobile_fixed = InterGroupCLJFF("bound:mobile-fixed")
bound_mobile_fixed = setCLJProperties(bound_mobile_fixed)
bound_mobile_fixed = setFakeGridProperties(bound_mobile_fixed)
bound_mobile_fixed.add(mobile_buffered_bound_mols, MGIdx(0))
bound_mobile_fixed.add(fixed_bound_group, MGIdx(1))
bound_forcefields.append(bound_mobile_fixed)
else:
bound_mobile_fixed = GridFF2("bound:mobile-fixed")
bound_mobile_fixed = setCLJProperties(bound_mobile_fixed)
bound_mobile_fixed = setGridProperties(bound_mobile_fixed)
# we use mobile_buffered_bound_group as this group misses out atoms that are bonded
# to fixed atoms (thus preventing large energies caused by incorrect non-bonded calculations)
bound_mobile_fixed.add(mobile_buffered_bound_mols, MGIdx(0))
bound_mobile_fixed.addFixedAtoms(fixed_bound_group)
bound_forcefields.append(bound_mobile_fixed)
# forcefield holding the intermolecular energy between all bound molecules
bound_mobile_mobile = InterCLJFF("bound:mobile-mobile")
bound_mobile_mobile = setCLJProperties(bound_mobile_mobile)
bound_mobile_mobile.add(mobile_bound_mols)
bound_forcefields.append(bound_mobile_mobile)
else:
# forcefield holding the energy between
# the bound molecules and bound fixed atoms
bound_mobile_fixed = InterGroupFF("bound:mobile-fixed")
bound_mobile_fixed.setCLJFunction( getInterCLJFunction() )
bound_mobile_fixed = setNewGridProperties(bound_mobile_fixed)
# we use mobile_buffered_bound_group as this group misses out atoms that are bonded
# to fixed atoms (thus preventing large energies caused by incorrect non-bonded calculations)
bound_mobile_fixed.add(mobile_buffered_bound_mols, MGIdx(0))
bound_mobile_fixed.addFixedAtoms(fixed_bound_group.molecules())
bound_forcefields.append(bound_mobile_fixed)
# forcefield holding the energy between all bound mobile molecules
bound_mobile_mobile = InterFF("bound:mobile-mobile")
bound_mobile_mobile.setCLJFunction( getInterCLJFunction() )
bound_mobile_mobile.add(mobile_bound_mols)
bound_forcefields.append(bound_mobile_mobile)
# intramolecular energy of the protein
if bound_protein_intra_mols.nMolecules() > 0:
if use_oldff.val:
protein_intraclj = IntraCLJFF("bound:protein_intraclj")
protein_intraclj = setCLJProperties(protein_intraclj)
protein_intraff = InternalFF("bound:protein_intra")
for molnum in bound_protein_intra_mols.molNums():
protein_mol = Molecule.join(bound_protein_intra_mols[molnum])
protein_intraclj.add(protein_mol)
protein_intraff.add(protein_mol)
bound_forcefields.append(protein_intraclj)
bound_forcefields.append(protein_intraff)
else:
protein_intraclj = IntraFF("bound:protein_intraclj")
protein_intraclj.setCLJFunction( getIntraCLJFunction() )
protein_intraff = InternalFF("bound:protein_intra")
protein_intraff.setUse14Calculation(True)
for molnum in bound_protein_intra_mols.molNums():
protein_mol = Molecule.join(bound_protein_intra_mols[molnum])
protein_intraclj.add(protein_mol)
protein_intraff.add(protein_mol)
bound_forcefields.append(protein_intraclj)
bound_forcefields.append(protein_intraff)
# intramolecular energy of any other solutes
if bound_solute_intra_mols.nMolecules() > 0:
if use_oldff.val:
solute_intraclj = IntraCLJFF("bound:solute_intraclj")
solute_intraclj = setCLJProperties(solute_intraclj)
solute_intraff = InternalFF("bound:solute_intra")
for molnum in bound_solute_intra_mols.molNums():
solute_mol = Molecule.join(bound_solute_intra_mols[molnum])
solute_intraclj.add(solute_mol)
solute_intraff.add(solute_mol)
bound_forcefields.append(solute_intraclj)
bound_forcefields.append(solute_intraff)
else:
solute_intraclj = IntraFF("bound:solute_intraclj")
solute_intraclj.setCLJFunction( getIntraCLJFunction() )
solute_intraff = InternalFF("bound:solute_intra")
solute_intraff.setUse14Calculation(True)
for molnum in bound_solute_intra_mols.molNums():
solute_mol = Molecule.join(bound_solute_intra_mols[molnum])
solute_intraclj.add(solute_mol)
solute_intraff.add(solute_mol)
bound_forcefields.append(solute_intraclj)
bound_forcefields.append(solute_intraff)
###
### FORCEFIELDS LOCAL ONLY TO THE FREE LEG
###
free_forcefields = []
if use_oldff.val:
# forcefield holding the energy between the mobile free molecules and the
# fixed free molecules
if disable_grid:
free_mobile_fixed = InterGroupCLJFF("free:mobile-fixed")
free_mobile_fixed = setCLJProperties(free_mobile_fixed)
free_mobile_fixed = setFakeGridProperties(free_mobile_fixed)
free_mobile_fixed.add(mobile_free_mols, MGIdx(0))
free_mobile_fixed.add(fixed_free_group, MGIdx(1))
free_forcefields.append(free_mobile_fixed)
else:
free_mobile_fixed = GridFF2("free:mobile-fixed")
free_mobile_fixed = setCLJProperties(free_mobile_fixed)
free_mobile_fixed = setGridProperties(free_mobile_fixed)
free_mobile_fixed.add(mobile_free_mols, MGIdx(0))
free_mobile_fixed.addFixedAtoms(fixed_free_group)
free_forcefields.append(free_mobile_fixed)
# forcefield holding the intermolecular energy between the mobile free molecules
free_mobile_mobile = InterCLJFF("free:mobile-mobile")
free_mobile_mobile = setCLJProperties(free_mobile_mobile)
free_mobile_mobile.add(mobile_free_mols)
free_forcefields.append(free_mobile_mobile)
else:
# forcefield holding the energy between the mobile free molecules, and their
# interaction with the fixed free molecules
free_mobile = InterFF("free:mobile")
free_mobile.setCLJFunction( getInterCLJFunction() )
free_mobile = setNewGridProperties( free_mobile )
free_mobile.add(mobile_free_mols)
free_mobile.addFixedAtoms(fixed_free_group.molecules())
free_forcefields.append(free_mobile)
###
### NOW ADD THE FORCEFIELDS TO THE SYSTEM
###
###
### SETTING THE FORCEFIELD EXPRESSIONS
###
ligand_int_nrg_sym = Symbol("E_{ligand:internal}")
ligand0_bound_nrg_sym = Symbol("E_{ligand0:bound}")
ligand0_bound_nrg_f_sym = Symbol("E_{ligand0:bound_{f}}")
ligand0_bound_nrg_b_sym = Symbol("E_{ligand0:bound_{b}}")
ligand0_bound_nrg_next_sym = Symbol("E_{ligand0:bound_{next}}")
ligand0_bound_nrg_prev_sym = Symbol("E_{ligand0:bound_{prev}}")
ligand1_bound_nrg_sym = Symbol("E_{ligand1:bound}")
ligand1_bound_nrg_f_sym = Symbol("E_{ligand1:bound_{f}}")
ligand1_bound_nrg_b_sym = Symbol("E_{ligand1:bound_{b}}")
ligand1_bound_nrg_next_sym = Symbol("E_{ligand1:bound_{next}}")
ligand1_bound_nrg_prev_sym = Symbol("E_{ligand1:bound_{prev}}")
ligand0_free_nrg_sym = Symbol("E_{ligand0:free}")
ligand0_free_nrg_f_sym = Symbol("E_{ligand0:free_{f}}")
ligand0_free_nrg_b_sym = Symbol("E_{ligand0:free_{b}}")
ligand0_free_nrg_next_sym = Symbol("E_{ligand0:free_{next}}")
ligand0_free_nrg_prev_sym = Symbol("E_{ligand0:free_{prev}}")
ligand1_free_nrg_sym = Symbol("E_{ligand1:free}")
ligand1_free_nrg_f_sym = Symbol("E_{ligand1:free_{f}}")
ligand1_free_nrg_b_sym = Symbol("E_{ligand1:free_{b}}")
ligand1_free_nrg_next_sym = Symbol("E_{ligand1:free_{next}}")
ligand1_free_nrg_prev_sym = Symbol("E_{ligand1:free_{prev}}")
ligand_int_nrg = ligand_intraclj.components().total() + \
ligand_intraff.components().total()
bound_ligand0_fixed_nrg = bound_ligand0_fixed.components().total()
free_ligand0_fixed_nrg = free_ligand0_fixed.components().total()
bound_ligand1_fixed_nrg = bound_ligand1_fixed.components().total()
free_ligand1_fixed_nrg = free_ligand1_fixed.components().total()
if use_oldff.val:
ligand0_bound_nrg = bound_ligand0_mobile.components().total(0) + \
bound_ligand0_fixed_nrg
ligand0_bound_nrg_f = bound_ligand0_mobile.components().total(1) + \
bound_ligand0_fixed_nrg
ligand0_bound_nrg_b = bound_ligand0_mobile.components().total(2) + \
bound_ligand0_fixed_nrg
ligand0_bound_nrg_next = bound_ligand0_mobile.components().total(3) + \
bound_ligand0_fixed_nrg
ligand0_bound_nrg_prev = bound_ligand0_mobile.components().total(4) + \
bound_ligand0_fixed_nrg
ligand1_bound_nrg = bound_ligand1_mobile.components().total(0) + \
bound_ligand1_fixed_nrg
ligand1_bound_nrg_f = bound_ligand1_mobile.components().total(1) + \
bound_ligand1_fixed_nrg
ligand1_bound_nrg_b = bound_ligand1_mobile.components().total(2) + \
bound_ligand1_fixed_nrg
ligand1_bound_nrg_next = bound_ligand1_mobile.components().total(3) + \
bound_ligand1_fixed_nrg
ligand1_bound_nrg_prev = bound_ligand1_mobile.components().total(4) + \
bound_ligand1_fixed_nrg
ligand0_free_nrg = free_ligand0_mobile.components().total(0) + \
free_ligand0_fixed_nrg
ligand0_free_nrg_f = free_ligand0_mobile.components().total(1) + \
free_ligand0_fixed_nrg
ligand0_free_nrg_b = free_ligand0_mobile.components().total(2) + \
free_ligand0_fixed_nrg
ligand0_free_nrg_next = free_ligand0_mobile.components().total(3) + \
free_ligand0_fixed_nrg
ligand0_free_nrg_prev = free_ligand0_mobile.components().total(4) + \
free_ligand0_fixed_nrg
ligand1_free_nrg = free_ligand1_mobile.components().total(0) + \
free_ligand1_fixed_nrg
ligand1_free_nrg_f = free_ligand1_mobile.components().total(1) + \
free_ligand1_fixed_nrg
ligand1_free_nrg_b = free_ligand1_mobile.components().total(2) + \
free_ligand1_fixed_nrg
ligand1_free_nrg_next = free_ligand1_mobile.components().total(3) + \
free_ligand1_fixed_nrg
ligand1_free_nrg_prev = free_ligand1_mobile.components().total(4) + \
free_ligand1_fixed_nrg
else:
ligand0_bound_nrg = bound_ligand0_mobile.components().total() + \
bound_ligand0_fixed_nrg
ligand0_bound_nrg_f = bound_ligand0_mobile.components().total("f") + \
bound_ligand0_fixed_nrg
ligand0_bound_nrg_b = bound_ligand0_mobile.components().total("b") + \
bound_ligand0_fixed_nrg
ligand0_bound_nrg_next = bound_ligand0_mobile.components().total("next") + \
bound_ligand0_fixed_nrg
ligand0_bound_nrg_prev = bound_ligand0_mobile.components().total("prev") + \
bound_ligand0_fixed_nrg
ligand1_bound_nrg = bound_ligand1_mobile.components().total() + \
bound_ligand1_fixed_nrg
ligand1_bound_nrg_f = bound_ligand1_mobile.components().total("f") + \
bound_ligand1_fixed_nrg
ligand1_bound_nrg_b = bound_ligand1_mobile.components().total("b") + \
bound_ligand1_fixed_nrg
ligand1_bound_nrg_next = bound_ligand1_mobile.components().total("next") + \
bound_ligand1_fixed_nrg
ligand1_bound_nrg_prev = bound_ligand1_mobile.components().total("prev") + \
bound_ligand1_fixed_nrg
ligand0_free_nrg = free_ligand0_mobile.components().total() + \
free_ligand0_fixed_nrg
ligand0_free_nrg_f = free_ligand0_mobile.components().total("f") + \
free_ligand0_fixed_nrg
ligand0_free_nrg_b = free_ligand0_mobile.components().total("b") + \
free_ligand0_fixed_nrg
ligand0_free_nrg_next = free_ligand0_mobile.components().total("next") + \
free_ligand0_fixed_nrg
ligand0_free_nrg_prev = free_ligand0_mobile.components().total("prev") + \
free_ligand0_fixed_nrg
ligand1_free_nrg = free_ligand1_mobile.components().total() + \
free_ligand1_fixed_nrg
ligand1_free_nrg_f = free_ligand1_mobile.components().total("f") + \
free_ligand1_fixed_nrg
ligand1_free_nrg_b = free_ligand1_mobile.components().total("b") + \
free_ligand1_fixed_nrg
ligand1_free_nrg_next = free_ligand1_mobile.components().total("next") + \
free_ligand1_fixed_nrg
ligand1_free_nrg_prev = free_ligand1_mobile.components().total("prev") + \
free_ligand1_fixed_nrg
lam = Symbol("lambda")
lam_f = Symbol("lambda_{f}")
lam_b = Symbol("lambda_{b}")
lam_next = Symbol("lambda_{next}")
lam_prev = Symbol("lambda_{prev}")
system.add(ligand_intraclj)
system.add(ligand_intraff)
system.add(bound_ligand0_mobile)
system.add(free_ligand0_mobile)
system.add(bound_ligand1_mobile)
system.add(free_ligand1_mobile)
system.add(bound_ligand0_fixed)
system.add(free_ligand0_fixed)
system.add(bound_ligand1_fixed)
system.add(free_ligand1_fixed)
system.setConstant(lam, 0.0)
system.setConstant(lam_f, 0.0)
system.setConstant(lam_b, 0.0)
system.setConstant(lam_next, 0.0)
system.setConstant(lam_prev, 0.0)
system.setComponent(ligand_int_nrg_sym, ligand_int_nrg)
system.setComponent(ligand0_bound_nrg_sym, ligand0_bound_nrg)
system.setComponent(ligand0_bound_nrg_f_sym, ligand0_bound_nrg_f)
system.setComponent(ligand0_bound_nrg_b_sym, ligand0_bound_nrg_b)
system.setComponent(ligand0_bound_nrg_next_sym, ligand0_bound_nrg_next)
system.setComponent(ligand0_bound_nrg_prev_sym, ligand0_bound_nrg_prev)
system.setComponent(ligand1_bound_nrg_sym, ligand1_bound_nrg)
system.setComponent(ligand1_bound_nrg_f_sym, ligand1_bound_nrg_f)
system.setComponent(ligand1_bound_nrg_b_sym, ligand1_bound_nrg_b)
system.setComponent(ligand1_bound_nrg_next_sym, ligand1_bound_nrg_next)
system.setComponent(ligand1_bound_nrg_prev_sym, ligand1_bound_nrg_prev)
system.setComponent(ligand0_free_nrg_sym, ligand0_free_nrg)
system.setComponent(ligand0_free_nrg_f_sym, ligand0_free_nrg_f)
system.setComponent(ligand0_free_nrg_b_sym, ligand0_free_nrg_b)
system.setComponent(ligand0_free_nrg_next_sym, ligand0_free_nrg_next)
system.setComponent(ligand0_free_nrg_prev_sym, ligand0_free_nrg_prev)
system.setComponent(ligand1_free_nrg_sym, ligand1_free_nrg)
system.setComponent(ligand1_free_nrg_f_sym, ligand1_free_nrg_f)
system.setComponent(ligand1_free_nrg_b_sym, ligand1_free_nrg_b)
system.setComponent(ligand1_free_nrg_next_sym, ligand1_free_nrg_next)
system.setComponent(ligand1_free_nrg_prev_sym, ligand1_free_nrg_prev)
bound_bound_nrg_sym = Symbol("E_{bound-bound}")
bound_bound_nrg = None
for bound_forcefield in bound_forcefields:
if bound_bound_nrg is None:
bound_bound_nrg = bound_forcefield.components().total()
else:
bound_bound_nrg = bound_bound_nrg + bound_forcefield.components().total()
system.add(bound_forcefield)
system.setComponent(bound_bound_nrg_sym, bound_bound_nrg)
free_free_nrg_sym = Symbol("E_{free-free}")
free_free_nrg = None
for free_forcefield in free_forcefields:
if free_free_nrg is None:
free_free_nrg = free_forcefield.components().total()
else:
free_free_nrg = free_free_nrg + free_forcefield.components().total()
system.add(free_forcefield)
system.setComponent(free_free_nrg_sym, free_free_nrg)
bound_nrg_sym = Symbol("E_{bound}")
bound_nrg = ((1-lam) * ligand0_bound_nrg_sym) + (lam * ligand1_bound_nrg_sym)
bound_nrg_f_sym = Symbol("E_{bound_{f}}")
bound_nrg_f = ((1-lam_f) * ligand0_bound_nrg_f_sym) + (lam_f * ligand1_bound_nrg_f_sym)
bound_nrg_b_sym = Symbol("E_{bound_{b}}")
bound_nrg_b = ((1-lam_b) * ligand0_bound_nrg_b_sym) + (lam_b * ligand1_bound_nrg_b_sym)
bound_nrg_next_sym = Symbol("E_{bound_{next}}")
bound_nrg_next = ((1-lam_next) * ligand0_bound_nrg_next_sym) + (lam_next * ligand1_bound_nrg_next_sym)
bound_nrg_prev_sym = Symbol("E_{bound_{prev}}")
bound_nrg_prev = ((1-lam_prev) * ligand0_bound_nrg_prev_sym) + (lam_prev * ligand1_bound_nrg_prev_sym)
free_nrg_sym = Symbol("E_{free}")
free_nrg = (lam * ligand0_free_nrg_sym) + ((1-lam) * ligand1_free_nrg_sym)
free_nrg_f_sym = Symbol("E_{free_{f}}")
free_nrg_f = (lam_f * ligand0_free_nrg_f_sym) + ((1-lam_f) * ligand1_free_nrg_f_sym)
free_nrg_b_sym = Symbol("E_{free_{b}}")
free_nrg_b = (lam_b * ligand0_free_nrg_b_sym) + ((1-lam_b) * ligand1_free_nrg_b_sym)
free_nrg_next_sym = Symbol("E_{free_{next}}")
free_nrg_next = (lam_next * ligand0_free_nrg_next_sym) + ((1-lam_next) * ligand1_free_nrg_next_sym)
free_nrg_prev_sym = Symbol("E_{free_{prev}}")
free_nrg_prev = (lam_prev * ligand0_free_nrg_prev_sym) + ((1-lam_prev) * ligand1_free_nrg_prev_sym)
box_nrg_sym = Symbol("E_{box}")
box_nrg = bound_bound_nrg_sym + free_free_nrg_sym + ligand_int_nrg_sym
total_nrg_sym = system.totalComponent()
total_nrg = bound_nrg_sym + free_nrg_sym + box_nrg_sym
total_nrg_f_sym = Symbol("E_{total_{f}}")
total_nrg_f = bound_nrg_f_sym + free_nrg_f_sym + box_nrg_sym
total_nrg_b_sym = Symbol("E_{total_{b}}")
total_nrg_b = bound_nrg_b_sym + free_nrg_b_sym + box_nrg_sym
total_nrg_next_sym = Symbol("E_{total_{next}}")
total_nrg_next = bound_nrg_next_sym + free_nrg_next_sym + box_nrg_sym
total_nrg_prev_sym = Symbol("E_{total_{prev}}")
total_nrg_prev = bound_nrg_prev_sym + free_nrg_prev_sym + box_nrg_sym
system.setComponent(bound_nrg_sym, bound_nrg)
system.setComponent(bound_nrg_f_sym, bound_nrg_f)
system.setComponent(bound_nrg_b_sym, bound_nrg_b)
system.setComponent(bound_nrg_next_sym, bound_nrg_next)
system.setComponent(bound_nrg_prev_sym, bound_nrg_prev)
system.setComponent(free_nrg_sym, free_nrg)
system.setComponent(free_nrg_f_sym, free_nrg_f)
system.setComponent(free_nrg_b_sym, free_nrg_b)
system.setComponent(free_nrg_next_sym, free_nrg_next)
system.setComponent(free_nrg_prev_sym, free_nrg_prev)
system.setComponent(box_nrg_sym, box_nrg)
system.setComponent(total_nrg_sym, total_nrg)
system.setComponent(total_nrg_f_sym, total_nrg_f)
system.setComponent(total_nrg_b_sym, total_nrg_b)
system.setComponent(total_nrg_next_sym, total_nrg_next)
system.setComponent(total_nrg_prev_sym, total_nrg_prev)
system.setComponent( Symbol("delta_nrg^{F}"), (total_nrg_f_sym - total_nrg_sym) )
system.setComponent( Symbol("delta_nrg^{B}"), (total_nrg_b_sym - total_nrg_sym) )
system.setComponent( Symbol("delta_nrg^{next}"), (total_nrg_next_sym - total_nrg_sym) )
system.setComponent( Symbol("delta_nrg^{prev}"), (total_nrg_prev_sym - total_nrg_sym) )
system.setComponent( Symbol("delta_bound_nrg^{F}"), (bound_nrg_f_sym - bound_nrg_sym) )
system.setComponent( Symbol("delta_bound_nrg^{B}"), (bound_nrg_b_sym - bound_nrg_sym) )
system.setComponent( Symbol("delta_free_nrg^{F}"), (free_nrg_f_sym - free_nrg_sym) )
system.setComponent( Symbol("delta_free_nrg^{B}"), (free_nrg_b_sym - free_nrg_sym) )
# Now add constraints. These are used to keep
# all lambda values between 0 and 1, and to
# map the alpha values of the softcore forcefields to lambda
print("\nCreating LSRC system constraints...\n")
# Add the constraint that lambda_f is lambda + delta_lambda and
# lambda_b is lambda - delta_lambda (kept to between 0 and 1)
dlam = delta_lambda.val
if dlam > 1 or dlam < 0.0000001:
print("WARNING: Weird value of delta_lambda (%s). Setting it to 0.001" % dlam)
dlam = 0.001
# Constrain lam_f and lam_b to lie with delta_lambda of lambda
dlam_sym = Symbol("delta_lambda")
system.setConstant( dlam_sym, dlam )
system.add( ComponentConstraint( lam_f, Min( lam + dlam_sym, 1 ) ) )
system.add( ComponentConstraint( lam_b, Max( lam - dlam_sym, 0 ) ) )
# Constrain lam_next and lam_prev to be equal to the next and previous
# windows lambda values
lamvals = copy.deepcopy( lambda_values.val )
if lamvals[-1] != 1:
lamvals.append(1)
if lamvals[0] != 0:
lamvals.insert(0,0)
system.add( WindowedComponent( lam_next, lam, lamvals, 1 ) )
system.add( WindowedComponent( lam_prev, lam, lamvals, -1 ) )
system.setConstant( lam, lambda_values.val[0] )
# now add alpha variables that can be used by the EnergyMonitors
alpha_on = Symbol("alpha_on")
alpha_off = Symbol("alpha_off")
system.setConstant(alpha_on, 0)
system.setConstant(alpha_off, 1)
system.setConstant( Symbol("alpha_scale"), alpha_scale.val )
system.add( ComponentConstraint( alpha_on, alpha_scale.val * lam ) )
system.add( ComponentConstraint( alpha_off, alpha_scale.val * (1-lam) ) )
# Now constrain alpha to follow lambda
if use_oldff.val:
# First, the reference state (alpha0)
system.add( PropertyConstraint( "alpha0", FFName("free:ligand1-mobile"), alpha_scale.val * lam ) )
system.add( PropertyConstraint( "alpha0", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam) ) )
system.add( PropertyConstraint( "alpha0", FFName("bound:ligand0-mobile"), alpha_scale.val * lam ) )
system.add( PropertyConstraint( "alpha0", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam) ) )
# Now the forwards perturbed state (alpha1)
system.add( PropertyConstraint( "alpha1", FFName("free:ligand1-mobile"), alpha_scale.val * lam_f ) )
system.add( PropertyConstraint( "alpha1", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam_f) ) )
system.add( PropertyConstraint( "alpha1", FFName("bound:ligand0-mobile"), alpha_scale.val * lam_f ) )
system.add( PropertyConstraint( "alpha1", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam_f) ) )
# Now the backwards perturbed state (alpha2)
system.add( PropertyConstraint( "alpha2", FFName("free:ligand1-mobile"), alpha_scale.val * lam_b ) )
system.add( PropertyConstraint( "alpha2", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam_b) ) )
system.add( PropertyConstraint( "alpha2", FFName("bound:ligand0-mobile"), alpha_scale.val * lam_b ) )
system.add( PropertyConstraint( "alpha2", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam_b) ) )
# Now the next window perturbed state (alpha3)
system.add( PropertyConstraint( "alpha3", FFName("free:ligand1-mobile"), alpha_scale.val * lam_next ) )
system.add( PropertyConstraint( "alpha3", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam_next) ) )
system.add( PropertyConstraint( "alpha3", FFName("bound:ligand0-mobile"), alpha_scale.val * lam_next ) )
system.add( PropertyConstraint( "alpha3", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam_next) ) )
# Now the previous window perturbed state (alpha4)
system.add( PropertyConstraint( "alpha4", FFName("free:ligand1-mobile"), alpha_scale.val * lam_prev ) )
system.add( PropertyConstraint( "alpha4", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam_prev) ) )
system.add( PropertyConstraint( "alpha4", FFName("bound:ligand0-mobile"), alpha_scale.val * lam_prev ) )
system.add( PropertyConstraint( "alpha4", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam_prev) ) )
else:
# First, the reference state (alpha)
system.add( PropertyConstraint( "alpha", FFName("free:ligand1-mobile"), alpha_scale.val * lam ) )
system.add( PropertyConstraint( "alpha", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam) ) )
system.add( PropertyConstraint( "alpha", FFName("bound:ligand0-mobile"), alpha_scale.val * lam ) )
system.add( PropertyConstraint( "alpha", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam) ) )
# Now the forwards perturbed state (alpha[f])
system.add( PropertyConstraint( "alpha[f]", FFName("free:ligand1-mobile"), alpha_scale.val * lam_f ) )
system.add( PropertyConstraint( "alpha[f]", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam_f) ) )
system.add( PropertyConstraint( "alpha[f]", FFName("bound:ligand0-mobile"), alpha_scale.val * lam_f ) )
system.add( PropertyConstraint( "alpha[f]", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam_f) ) )
# Now the backwards perturbed state (alpha[b])
system.add( PropertyConstraint( "alpha[b]", FFName("free:ligand1-mobile"), alpha_scale.val * lam_b ) )
system.add( PropertyConstraint( "alpha[b]", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam_b) ) )
system.add( PropertyConstraint( "alpha[b]", FFName("bound:ligand0-mobile"), alpha_scale.val * lam_b ) )
system.add( PropertyConstraint( "alpha[b]", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam_b) ) )
# Now the next window perturbed state (alpha[next])
system.add( PropertyConstraint( "alpha[next]", FFName("free:ligand1-mobile"), alpha_scale.val * lam_next ) )
system.add( PropertyConstraint( "alpha[next]", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam_next) ) )
system.add( PropertyConstraint( "alpha[next]", FFName("bound:ligand0-mobile"), alpha_scale.val * lam_next ) )
system.add( PropertyConstraint( "alpha[next]", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam_next) ) )
# Now the previous window perturbed state (alpha[prev])
system.add( PropertyConstraint( "alpha[prev]", FFName("free:ligand1-mobile"), alpha_scale.val * lam_prev ) )
system.add( PropertyConstraint( "alpha[prev]", FFName("bound:ligand1-mobile"), alpha_scale.val * (1 - lam_prev) ) )
system.add( PropertyConstraint( "alpha[prev]", FFName("bound:ligand0-mobile"), alpha_scale.val * lam_prev ) )
system.add( PropertyConstraint( "alpha[prev]", FFName("free:ligand0-mobile"), alpha_scale.val * (1 - lam_prev) ) )
# Now lets create all of the groups for moves based on the above
# All solvent molecules in the bound and free legs are moved together
mobile_solvent = MoleculeGroup("mobile_solvent")
mobile_solvent.add( mobile_bound_solvents_group.molecules() )
mobile_solvent.add( mobile_free_water_group.molecules() )
system.add( mobile_solvent )
# All protein sidechains are moved together
mobile_sidechains = MoleculeGroup("mobile_sidechains")
mobile_sidechains.add(mobile_bound_protein_sidechains_group.molecules())
system.add( mobile_sidechains )
# All protein backbones are moved together
mobile_backbones = MoleculeGroup("mobile_backbones")
mobile_backbones.add(mobile_bound_protein_backbones_group.molecules())
system.add( mobile_backbones )
# All solute molecules are moved together
mobile_solutes = MoleculeGroup("mobile_solutes")
mobile_solutes.add(mobile_bound_solutes_group.molecules())
system.add( mobile_solutes )
# The ligand is moved in its own group
mobile_ligand = MoleculeGroup("mobile_ligand")
mobile_ligand.add(ligand_mol0)
mobile_ligand.add(ligand_mol1)
system.add( mobile_ligand )
# Apply all of the constraints
system.applyConstraints()
###
### ADD THE SYSTEM MONITORS
###
# Now we need to add the monitors...
print("\nAdding system monitors...")
system.add( "delta_g^{F}", MonitorComponent( Symbol("delta_nrg^{F}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val) ) )
system.add( "delta_g^{B}", MonitorComponent( Symbol("delta_nrg^{B}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val, False) ) )
system.add( "delta_g^{next}", MonitorComponent( Symbol("delta_nrg^{next}"),
BennettsFreeEnergyAverage(0 * kcal_per_mol,
temperature.val,
0.1 * binwidth.val) ) )
system.add( "delta_g^{prev}", MonitorComponent( Symbol("delta_nrg^{prev}"),
BennettsFreeEnergyAverage(0 * kcal_per_mol,
temperature.val,
0.1 * binwidth.val, False) ) )
system.add( "delta_bound_g^{F}", MonitorComponent( Symbol("delta_bound_nrg^{F}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val) ) )
system.add( "delta_bound_g^{B}", MonitorComponent( Symbol("delta_bound_nrg^{B}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val, False) ) )
system.add( "delta_free_g^{F}", MonitorComponent( Symbol("delta_free_nrg^{F}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val) ) )
system.add( "delta_free_g^{B}", MonitorComponent( Symbol("delta_free_nrg^{B}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val, False) ) )
# we will monitor the average energy between the two ligands and each
# residue with mobile sidechain, and each mobile solute
monitor_prosol = None
if mobile_solutes.isEmpty():
monitor_prosol = mobile_sidechains
elif mobile_sidechains.isEmpty():
monitor_prosol = mobile_solutes
else:
monitor_prosol = MoleculeGroup("monitored_protein_solute")
monitor_prosol.add(mobile_sidechains)
monitor_prosol.add(mobile_solutes)
system.add(monitor_prosol)
residue_nrgmon = FreeEnergyMonitor(monitor_prosol, ligand_group0, ligand_group1)
nrgmons = {}
nrgmons["residue_nrgmon"] = residue_nrgmon
# because the water molecules can diffuse, we find all waters within
# a certain distance of the ligand, and then identify them using identity
# points (placed at the center of the initial positions of the waters),
# and then monitor those...
boundwater_points = []
freewater_points = []
if water_monitor_distance.val:
dist = water_monitor_distance.val.to(angstrom)
for molnum in mobile_bound_water_group.molNums():
water_mol = mobile_bound_water_group[molnum][0].molecule()
if getMinimumDistance(ligand_mol0,water_mol) < dist:
# we should monitor this water
boundwater_points.append( VectorPoint(water_mol.evaluate().center()) )
for molnum in mobile_free_water_group.molNums():
#this is a mobile water, so a candidate for monitoring
water_mol = mobile_free_water_group[molnum][0].molecule()
if getMinimumDistance(ligand_mol0,water_mol) < dist:
# we should monitor this water
freewater_points.append( VectorPoint(water_mol.evaluate().center()) )
system.add(mobile_bound_water_group)
system.add(mobile_free_water_group)
boundwater_assigner = IDAssigner(boundwater_points, mobile_bound_water_group,
{"space" : Cartesian()})
boundwater_assigner.update(system)
freewater_assigner = IDAssigner(freewater_points, mobile_free_water_group,
{"space" : Cartesian()})
freewater_assigner.update(system)
boundwater_nrgmon = FreeEnergyMonitor(boundwater_assigner, ligand_group0, ligand_group1)
freewater_nrgmon = FreeEnergyMonitor(freewater_assigner, ligand_group1, ligand_group0)
nrgmons["boundwater_nrgmon"] = boundwater_nrgmon
nrgmons["freewater_nrgmon"] = freewater_nrgmon
for key in list(nrgmons.keys()):
nrgmons[key].setCoulombPower(coul_power.val)
nrgmons[key].setShiftDelta(shift_delta.val)
nrgmons[key].setTemperature(temperature.val)
system.add(key, nrgmons[key], nrgmon_frequency.val)
moves = createLSRCMoves(system)
# now calculate the total energy of the system - this initialises grids etc.
# ensuring that, when we make the replicas, the maximum amount of sharing between
# replicas occurs
print("\nEnergies of this system at lambda == 0...")
system.setConstant(lam, 0.0)
printEnergies(system.energies(), sys.stdout)
print("\nEnergies of this system at lambda == 1...")
system.setConstant(lam, 1.0)
printEnergies(system.energies(), sys.stdout)
system.setConstant(lam, 0.0)
# Now equlibrate the system
if nequilmoves.val:
print("\nPerforming %s moves of equilibration..." % nequilmoves.val)
system = moves.move(system, nequilmoves.val, False)
print("\nEnergies after equilibration...")
printEnergies(system.energies(), sys.stdout)
moves.clearStatistics()
# validate that we haven't leaked any energy
oldnrgs = system.energies()
system2 = System(system)
system2.mustNowRecalculateFromScratch()
newnrgs = system2.energies()
broken_nrgs = {}
for key in oldnrgs.keys():
if abs( oldnrgs[key] - newnrgs[key] ) > 0.05:
broken_nrgs[key] = (oldnrgs[key], newnrgs[key])
broken_keys = list(broken_nrgs.keys())
broken_keys.sort()
if len(broken_keys) > 0:
print("ERROR - PROGRAM BUG : SOME OF THE FORCEFIELDS ARE LEAKING ENERGY")
for key in broken_keys:
print("%s : %s versus %s" % (key, broken_nrgs[key][0], broken_nrgs[key][1]))
print("ERROR: Please send 'broken_system.s3' in for debugging, along with everything printed above!")
Sire.Stream.save( (system,moves), "broken_system.s3")
sys.exit(-1)
return (system, moves)
def mergeLSRC(sys0, ligand0_mol, sys1, ligand1_mol, watersys):
if watersys:
print("Merging the two ligand complexes with the water system to create the ligandswap system...")
else:
print("Merging the two ligand complexes with a vacuum box to create the ligandswap system...")
print("\nFirst, mapping the atoms from the first ligand to the atoms of the second...")
if mcs_prematch.val:
mapping = AtomMCSMatcher( AtomIDMatcher(mcs_prematch.val),
mcs_timeout.val, False ).match(ligand0_mol, PropertyMap(),
ligand1_mol, PropertyMap())
else:
mapping = AtomMCSMatcher(mcs_timeout.val, False).match(ligand0_mol, PropertyMap(),
ligand1_mol, PropertyMap())
lines = []
for key in mapping.keys():
lines.append( "%s <=> %s" % (ligand0_mol.atom(key).name().value(), \
ligand1_mol.atom(mapping[key]).name().value()) )
lines.sort()
print("Mapping:\n%s\n" % "\n".join(lines))
lsrc_sys = System("LSRC stage 1 ( A => B )")
if sys0.containsProperty("reflection center"):
if not sys1.containsProperty("reflection center"):
print("Lack of reflection sphere in sys1 when it exists in sys0!")
sys.exit(-1)
if watersys:
if not watersys.containsProperty("reflection center"):
print("Lack of reflection sphere in the water system when it exists in sys0 and sys1!")
sys.exit(-1)
reflection_center0 = sys0.property("reflection center").toVector()[0]
reflection_radius0 = float(str(sys0.property("reflection sphere radius")))
reflection_center1 = sys1.property("reflection center").toVector()[0]
reflection_radius1 = float(str(sys1.property("reflection sphere radius")))
if watersys:
reflection_center_wat = watersys.property("reflection center").toVector()[0]
reflection_radius_wat = float(str(watersys.property("reflection sphere radius")))
else:
reflection_center_wat = reflection_center0
reflection_radius_wat = reflection_radius0
if reflection_center0 != reflection_center1 or \
reflection_center0 != reflection_center_wat or \
reflection_radius0 != reflection_radius1 or \
reflection_radius0 != reflection_radius_wat:
print("Disagreement of the reflection sphere in the boxes!")
print("sys0: %s and %s sys1: %s and %s water: %s and %s" % \
(reflection_center0,reflection_radius0,
reflection_center1,reflection_radius1,
reflection_center_wat,reflection_radius_wat))
sys.exit(-1)
lsrc_sys.setProperty("reflection center", AtomCoords(CoordGroup(1,reflection_center0)))
lsrc_sys.setProperty("reflection sphere radius", VariantProperty(reflection_radius0))
elif sys1.containsProperty("reflection center"):
print("Lack of reflection sphere in sys0 when it exists in sys1!")
sys.exit(-1)
if sys0.containsProperty("average solute translation delta"):
lsrc_sys.setProperty("average solute translation delta", \
sys0.property("average solute translation delta"))
if sys0.containsProperty("average solute rotation delta"):
lsrc_sys.setProperty("average solute rotation delta", \
sys0.property("average solute rotation delta"))
# create the merged system
(lsrc_sys,lsrc_moves) = createStage(lsrc_sys, sys0, ligand0_mol, ligand1_mol, watersys, AtomResultMatcher(mapping), "lsrc")
return (lsrc_sys, lsrc_moves)
def loadWater():
"""Load the the water box used for the free leg"""
if vacuum_calc.val:
print("Using a vacuum box, so calculating a relative hydration free energy.")
return None
if os.path.exists(water_s3file.val):
print("Restoring from Sire Streamed Save file %s..." % water_s3file.val)
watersys = Sire.Stream.load(water_s3file.val)
else:
print("Loading from Amber files %s / %s..." % (water_topfile.val, water_crdfile.val))
watersys = createSystem(water_topfile.val, water_crdfile.val)
watersys = addFlexibility(watersys, Vector(0,0,0), reflection_radius.val)
Sire.Stream.save(watersys, water_s3file.val)
return watersys
def loadSystem(topfile, crdfile, s3file, ligand_name):
"""Load the solvated protein-ligand system from topfile/crdfile, saving into the s3 file 's3file'
and locating the ligand molecule called 'ligand_name'"""
if os.path.exists(s3file):
print("Restoring from Sire Streamed Save file %s..." % s3file)
system = Sire.Stream.load(s3file)
else:
print("Loading from Amber files %s / %s..." % (topfile,crdfile))
# Add the name of the ligand to the list of solute molecules
scheme = NamingScheme()
if ligand_name:
scheme.addSoluteResidueName(ligand_name)
# Load up the system. This will automatically find the protein, solute, water, solvent
# and ion molecules and assign them to different groups
system = createSystem(topfile, crdfile, scheme)
ligand_mol = findMolecule(system, ligand_name)
if ligand_mol is None:
print("Cannot find the ligand (%s) in the set of loaded molecules!" % ligand_name)
sys.exit(-1)
# Center the system with the ligand at (0,0,0)
system = centerSystem(system, ligand_mol)
ligand_mol = system[ligand_mol.number()][0].molecule()
system = addFlexibility(system, Vector(0,0,0), reflection_radius.val, scheme )
Sire.Stream.save(system, s3file)
ligand_mol = findMolecule(system, ligand_name)
if ligand_mol is None:
print("Cannot find the ligand (%s) in the set of loaded molecules!" % ligand_name)
sys.exit(-1)
return (system, ligand_mol)
def makeRETI(system, moves):
"""This function replicates 'system' over each of the supplied lambda values
and uses 'moves' to sample each of the replicated systems. This uses RETI
to perform replica exchange moves across lambda"""
lam = Symbol("lambda")
replicas = Replicas( len(lambda_values.val) )
replicas.setSubSystem(system)
replicas.setSubMoves(moves)
replicas.setNSubMoves(nsubmoves.val)
replicas.setLambdaComponent(lam)
replicas.setRecordAllStatistics(True)
seed = random_seed.val
if seed is None:
seed = RanGenerator().randInt(100000,1000000)
print("RETI system using generated random number seed %d" % seed)
else:
print("RETI system using supplied random number seed %d" % seed)
replicas.setGenerator( RanGenerator(seed+5) )
for i in range(0, len(lambda_values.val)):
# set the initial lambda value for this replica
print("Setting replica %s to lambda %s" % (i, lambda_values.val[i]))
replicas.setLambdaValue(i, lambda_values.val[i])
for i in range(0, len(lambda_values.val)):
print(lambda_values.val[i])
print(replicas[i].subSystem().constants())
# Now add monitors for each replica that will copy back
nrgmons = [ "delta_g^{F}", "delta_g^{B}", "delta_g^{next}", "delta_g^{prev}",
"delta_bound_g^{F}", "delta_bound_g^{B}",
"delta_free_g^{F}", "delta_free_g^{B}",
"residue_nrgmon", "boundwater_nrgmon", "freewater_nrgmon" ]
for nrgmon in nrgmons:
replicas.add( nrgmon, MonitorMonitor(MonitorName(nrgmon), True) )
# now create the replica exchange moves for the replicas
replica_moves = RepExMove2()
#replica_moves.setDisableSwaps(True)
replica_moves.setGenerator( RanGenerator(seed+7) )
print("\nReturning the WSRC RETI replicas and moves...")
return (replicas, replica_moves)
def loadInput():
"""This is a high level function that loads the LSRC system that calculates the
relative binding free energy of swapping bound ligand 0 with free ligand 1"""
have_sys = False
if os.path.exists(restart_file.val):
(lsrc_system,lsrc_moves) = Sire.Stream.load(restart_file.val)
have_sys = True
if not have_sys:
# need to load the system
(sys0, ligand0_mol) = loadSystem(topfile0.val, crdfile0.val, s3file0.val, ligand_name0.val)
(sys1, ligand1_mol) = loadSystem(topfile1.val, crdfile1.val, s3file1.val, ligand_name1.val)
watersys = loadWater()
(lsrc_system,lsrc_moves) = mergeLSRC(sys0,ligand0_mol, sys1,ligand1_mol, watersys)
(lsrc_system,lsrc_moves) = makeRETI(lsrc_system, lsrc_moves)
Sire.Stream.save( (lsrc_system,lsrc_moves), restart_file.val )
return (lsrc_system,lsrc_moves)
def printEnergies(nrgs, FILE):
"""This function prints all of the energies in 'nrgs' to the file 'FILE'"""
keys = list(nrgs.keys())
keys.sort()
for key in keys:
FILE.write("%s == %s kcal mol-1\n" % (key, nrgs[key]))
def printComponents(comps, FILE):
"""This function prints out all of the free energy components in the passed object"""
print("RESIDUE TOTAL COULOMB LJ", file=FILE)
for i in range(0, comps.nComponents()):
print("%s %s %s %s" % (comps.viewAt(i).residue(), \
comps.integrate(i).values()[-1].y(), \
comps.integrateCoulomb(i).values()[-1].y(), \
comps.integrateLJ(i).values()[-1].y()), file=FILE)
def printFreeEnergy(key1, key2, key3, total, bound, free, FILE):
"""This function prints out the total, bound and free free energies"""
print("%s %s %s" % (key1,key2,key3), file=FILE)
print("%s %s %s" % (total.integrate().values()[-1].y(), \
bound.integrate().values()[-1].y(), \
free.integrate().values()[-1].y()), file=FILE)
def analyseLSRC(dirname, replicas, iteration, bennetts_freenrgs, fep_freenrgs, ti_freenrgs, bound_freenrgs, free_freenrgs,
res_freenrgs, bound_water_freenrgs, free_water_freenrgs):
"""This function is used to perform all analysis of iteration 'it' of the passed LSRC system"""
# read the value of delta_lambda from the first system
system = replicas[0].subSystem()
delta_lambda = system.constant(Symbol("delta_lambda"))
logfile = "%s/results_%0004d.log" % (dirname, iteration)
FILE = open(logfile, "w")
print("===========================", file=FILE)
print(" Results for iteration %d" % iteration, file=FILE)
print("===========================", file=FILE)
print("\ndelta_lambda == %f" % delta_lambda, file=FILE)
print("temperature == %f K\n" % replicas[0].subMoves().temperature().to(kelvin), file=FILE)
nreplicas = replicas.nReplicas()
# extract all of the monitors from the replicas
lambda_values = []
dg_f = {}
dg_b = {}
dg_next = {}
dg_prev = {}
dg_bound_f = {}
dg_bound_b = {}
dg_free_f = {}
dg_free_b = {}
dg_residue = {}
dg_boundwater = {}
dg_freewater = {}
write_pdbs = (save_pdb.val) and (iteration % pdb_frequency.val == 0)
if write_pdbs:
print("Saving PDBs of the system at iteration %d..." % iteration)
for i in range(0, nreplicas):
replica = replicas[i]
monitors = replica.monitors()
lamval = replica.lambdaValue()
lambda_values.append(lamval)
if write_pdbs:
if save_all_pdbs.val or (i == 0) or (i == nreplicas-1):
# Save a PDB of the final configuration for the bound and free legs for each lambda value
system = replica.subSystem()
bound_leg = system[MGName("bound_leg")]
free_leg = system[MGName("free_leg")]
PDB().write(bound_leg, "%s/bound_mobile_%000006d_%.5f.pdb" % (dirname, iteration, lamval))
PDB().write(free_leg, "%s/free_mobile_%000006d_%.5f.pdb" % (dirname, iteration, lamval))
dg_f[lamval] = monitors[MonitorName("delta_g^{F}")][-1].accumulator()
dg_b[lamval] = monitors[MonitorName("delta_g^{B}")][-1].accumulator()
dg_next[lamval] = monitors[MonitorName("delta_g^{next}")][-1].accumulator()
dg_prev[lamval] = monitors[MonitorName("delta_g^{prev}")][-1].accumulator()
dg_bound_f[lamval] = monitors[MonitorName("delta_bound_g^{F}")][-1].accumulator()
dg_bound_b[lamval] = monitors[MonitorName("delta_bound_g^{B}")][-1].accumulator()
dg_free_f[lamval] = monitors[MonitorName("delta_free_g^{F}")][-1].accumulator()
dg_free_b[lamval] = monitors[MonitorName("delta_free_g^{B}")][-1].accumulator()
dg_residue[lamval] = monitors[MonitorName("residue_nrgmon")][-1]
dg_boundwater[lamval] = monitors[MonitorName("boundwater_nrgmon")][-1]
dg_freewater[lamval] = monitors[MonitorName("freewater_nrgmon")][-1]
windows = copy.deepcopy(lambda_values)
windows.sort()
if windows[-1] != 1:
windows.append(1)
if windows[0] != 0:
windows.insert(0,0)
bennetts_freenrgs.set( iteration, windows, dg_next, dg_prev )
fep_freenrgs.set( iteration, windows, dg_next, dg_prev )
ti_freenrgs.set( iteration, dg_f, dg_b, delta_lambda )
bound_freenrgs.set( iteration, dg_bound_f, dg_bound_b, delta_lambda )
free_freenrgs.set( iteration, dg_free_f, dg_free_b, delta_lambda )
print("\nRELATIVE FREE ENERGY\n", file=FILE)
printFreeEnergy("TOTAL", "BOUND", "FREE",
ti_freenrgs[iteration], bound_freenrgs[iteration], free_freenrgs[iteration], FILE)
res_freenrgs.set( iteration, dg_residue )
bound_water_freenrgs.set( iteration, dg_boundwater )
free_water_freenrgs.set( iteration, dg_freewater )
print("\nRESIDUE FREE ENERGY COMPONENTS\n", file=FILE)
printComponents(res_freenrgs[iteration], FILE)
print("\nPROTEIN BOX WATER FREE ENERGY COMPONENTS\n", file=FILE)
printComponents(bound_water_freenrgs[iteration], FILE)
print("\nWATER BOX WATER FREE ENERGY COMPONENTS\n", file=FILE)
printComponents(free_water_freenrgs[iteration], FILE)
print("\n=============", file=FILE)
print("Relative free energy for iteration %d equals %s" % (iteration, \
ti_freenrgs[iteration].integrate().values()[-1].y()), file=FILE)
print("==============", file=FILE)
FILE.close()
def tryBackup(filename):
# save the old file to a backup
try:
shutil.copy(filename, "%s.bak" % filename)
except:
pass
def mustMakeDir(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
@resolveParameters
def run():
"""This is a very high level function that does everything to run a LSRC simulation"""
(lsrc_system,lsrc_moves) = loadInput()
nmax = lsrc_moves.nMoves()
print("Number of iterations to perform: %d. Number of iterations completed: %d." % (nmoves.val, nmax))
if nmax >= nmoves.val:
print("All iterations complete. Simulation complete.")
sys.exit(0)
# make sure all of the output directories exist
mustMakeDir(outdir.val)
# See if we have any existing free energy statistics files...
t = QTime()
t.start()
freenrgs_file = "%s/freenrgs.s3" % outdir.val
if not os.path.exists(freenrgs_file):
bennetts_freenrgs = Bennetts()
fep_freenrgs = FEP()
ti_freenrgs = TI()
else:
[bennetts_freenrgs, fep_freenrgs, ti_freenrgs] = Sire.Stream.load(freenrgs_file)
freenrg_parts_file = "%s/freenrg_parts.s3" % outdir.val
if not os.path.exists(freenrg_parts_file):
bound_freenrgs = TI()
free_freenrgs = TI()
else:
[bound_freenrgs, free_freenrgs] = Sire.Stream.load(freenrg_parts_file)
freenrg_components_file = "%s/freenrg_components.s3" % outdir.val
if not os.path.exists(freenrg_components_file):
res_freenrgs = TIComponents()
bound_water_freenrgs = TIComponents()
free_water_freenrgs = TIComponents()
else:
[res_freenrgs, bound_water_freenrgs, free_water_freenrgs] = Sire.Stream.load(freenrg_components_file)
print("Initialising / loading the free energy files took %d ms" % t.elapsed())
while nmax < nmoves.val:
t.start()
print("Performing iteration %d..." % (nmax+1))
lsrc_moves.move(lsrc_system, 1, True)
ms = t.elapsed()
print("...iteration complete. Took %d ms" % ms)
nmax = lsrc_moves.nMoves()
# we have successfully completed one iteration of each system
iteration = nmax
# perform analysis
t.start()
print("Analysing iteration %d..." % iteration)
analyseLSRC(outdir.val,
lsrc_system, iteration, bennetts_freenrgs, fep_freenrgs, ti_freenrgs, bound_freenrgs, free_freenrgs,
res_freenrgs, bound_water_freenrgs, free_water_freenrgs)
lsrc_system.clearAllStatistics()
print("...analysis complete (took %d ms)" % t.elapsed())
# write a restart file for all of the free energies and component - this simplifies post-run analysis
if iteration % restart_frequency.val == 0 or iteration == nmoves.val:
t.start()
print("Saving the free energy analysis files from iteration %d..." % iteration)
tryBackup(freenrgs_file)
tryBackup(freenrg_components_file)
tryBackup(freenrg_parts_file)
Sire.Stream.save( [bennetts_freenrgs, fep_freenrgs, ti_freenrgs], freenrgs_file )
Sire.Stream.save( [bound_freenrgs, free_freenrgs], freenrg_parts_file )
Sire.Stream.save( [res_freenrgs, bound_water_freenrgs, free_water_freenrgs], freenrg_components_file )
print("...save complete (took %d ms)" % t.elapsed())
# write a restart file every N moves in case of crash or run out of time
t.start()
print("Saving the restart file from iteration %d..." % iteration)
# save the old file to a backup
tryBackup(restart_file.val)
Sire.Stream.save( (lsrc_system, lsrc_moves), restart_file.val )
print("...save complete (took %d ms)" % t.elapsed())
print("All iterations complete.")
return
| michellab/Sire | wrapper/Tools/LSRC.py | Python | gpl-2.0 | 95,590 | [
"Amber"
] | 89bf70949025944ed05df8982742e0246e68a65f9ed7a4468c8bb590e68c3192 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
if sys.version > '3':
xrange = range
basestring = str
from pyspark import SparkContext, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc
from pyspark.mllib.linalg import Vectors, SparseVector, _convert_to_vector
from pyspark.sql import DataFrame
class MLUtils(object):
"""
Helper methods to load, save and pre-process data used in MLlib.
.. versionadded:: 1.0.0
"""
@staticmethod
def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in xrange(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values
@staticmethod
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
@staticmethod
@since("1.0.0")
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param numFeatures: number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
:param minPartitions: min number of partitions
:return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
@staticmethod
@since("1.0.0")
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
@staticmethod
@since("1.1.0")
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
:return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
@staticmethod
@since("1.5.0")
def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0))
@staticmethod
@since("1.5.0")
def loadVectors(sc, path):
"""
Loads vectors saved using `RDD[Vector].saveAsTextFile`
with the default number of partitions.
"""
return callMLlibFunc("loadVectors", sc, path)
@staticmethod
@since("2.0.0")
def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
:return:
the input dataset with old vector columns converted to the
new vector type
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertVectorColumnsFromML(dataset, *cols):
"""
Converts vector columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Vector` type from the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
Old vector columns will be ignored. If unspecified, all new
vector columns will be converted except nested ones.
:return:
the input dataset with new vector columns converted to the
old vector type
>>> import pyspark
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsFromML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsToML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Matrix` type to the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
New matrix columns will be ignored. If unspecified, all old
matrix columns will be converted excepted nested ones.
:return:
the input dataset with old matrix columns converted to the
new matrix type
>>> import pyspark
>>> from pyspark.mllib.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsFromML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Matrix` type from the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
Old matrix columns will be ignored. If unspecified, all new
matrix columns will be converted except nested ones.
:return:
the input dataset with new matrix columns converted to the
old matrix type
>>> import pyspark
>>> from pyspark.ml.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsFromML", dataset, list(cols))
class Saveable(object):
"""
Mixin for models and transformers which may be saved as files.
.. versionadded:: 1.3.0
"""
def save(self, sc, path):
"""
Save this model to the given path.
This saves:
* human-readable (JSON) model metadata to path/metadata/
* Parquet formatted data to path/data/
The model may be loaded using py:meth:`Loader.load`.
:param sc: Spark context used to save model data.
:param path: Path specifying the directory in which to save
this model. If the directory already exists,
this method throws an exception.
"""
raise NotImplementedError
@inherit_doc
class JavaSaveable(Saveable):
"""
Mixin for models that provide save() through their Scala
implementation.
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def save(self, sc, path):
"""Save this model to the given path."""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
class Loader(object):
"""
Mixin for classes which can load saved models from files.
.. versionadded:: 1.3.0
"""
@classmethod
def load(cls, sc, path):
"""
Load a model from the given path. The model should have been
saved using py:meth:`Saveable.save`.
:param sc: Spark context used for loading model files.
:param path: Path specifying the directory to which the model
was saved.
:return: model instance
"""
raise NotImplementedError
@inherit_doc
class JavaLoader(Loader):
"""
Mixin for classes which can load saved models using its Scala
implementation.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
"""
Returns the full class name of the Java loader. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = cls.__module__.replace("pyspark", "org.apache.spark")
return ".".join([java_package, cls.__name__])
@classmethod
def _load_java(cls, sc, path):
"""
Load a Java model from the given path.
"""
java_class = cls._java_loader_class()
java_obj = sc._jvm
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj.load(sc._jsc.sc(), path)
@classmethod
@since("1.3.0")
def load(cls, sc, path):
"""Load a model from the given path."""
java_model = cls._load_java(sc, path)
return cls(java_model)
class LinearDataGenerator(object):
"""Utils for generating linear data.
.. versionadded:: 1.5.0
"""
@staticmethod
@since("1.5.0")
def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
:param: intercept bias factor, the term c in X'w + c
:param: weights feature vector, the term w in X'w + c
:param: xMean Point around which the data X is centered.
:param: xVariance Variance of the given data
:param: nPoints Number of points to be generated
:param: seed Random Seed
:param: eps Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns a list of LabeledPoints of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps)))
@staticmethod
@since("1.5.0")
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.util tests")\
.getOrCreate()
globs['spark'] = spark
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| pgandhi999/spark | python/pyspark/mllib/util.py | Python | apache-2.0 | 19,611 | [
"Gaussian"
] | e385176d62fc65e56e7bd61bdb93e36b7afab6826cf4fe2c2d19312287bd52e8 |
from osp.conf.settings import *
# Unique key used for salting passwords
SECRET_KEY = 'Chac-8#haCa_Ra-e?-e+ucrur=gEFRasejayasaC?meMe!AC-a'
# DEBUG should be False in production, True in development
DEBUG = True
# Expire session cookie when user quits the browser session if set
# to True. (This will also log the user out of the application).
# Note, the user must actually quit the browser and not just close the browser.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# List of administrators who should receive error reports
ADMINS = (
('John Smith', 'john.smith@example.edu'),
('Francis Drake', 'francis.drake@example.edu'),
)
MANAGERS = ADMINS
# List of developers who receive email messages in debug mode
DEBUG_USERS = ADMINS
# MySQL database configuration settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'osp',
'USER': 'osp',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Server time zone
TIME_ZONE = 'America/New_York'
# Used if you are hosting OSP off the top level (e.g. http://example.edu/osp/)
URL_PREFIX = ''
# The URL path at which media is being served
MEDIA_URL = URL_PREFIX + '/media/'
# The URL path at which admin media is being served
ADMIN_MEDIA_PREFIX = URL_PREFIX + '/media/admin/'
# Uncomment the following lines if you are using the LDAP backend
#
# import ldap
# from django_auth_ldap.config import LDAPSearch
#
# AUTHENTICATION_BACKENDS = [
# 'django_auth_ldap.backend.LDAPBackend',
# 'django.contrib.auth.backends.ModelBackend',
# ]
# AUTH_LDAP_SERVER_URI = 'ldap://ldap.example.edu'
# AUTH_LDAP_BIND_DN = 'service_user'
# AUTH_LDAP_BIND_PASSWORD = 'service_password'
# AUTH_LDAP_USER_SEARCH = LDAPSearch('ou=Users,dc=example,dc=edu',
# ldap.SCOPE_SUBTREE,
# '(uid=%(user)s)')
# AUTH_LDAP_USER_ATTR_MAP = {
# 'first_name': 'givenName',
# 'last_name': 'sn',
# 'email': 'mail'
# }
# LOGIN_REDIRECT_URL = URL_PREFIX + '/'
# Uncomment the following lines if you are using the CAS backend
#
# AUTHENTICATION_BACKENDS = [
# 'django.contrib.auth.backends.ModelBackend',
# 'django_cas.backends.CASBackend',
# ]
# MIDDLEWARE_CLASSES.append('django_cas.middleware.CASMiddleware')
# CAS_VERSION = '1'
# CAS_SERVER_URL = 'https://cas.example.edu'
# CAS_IGNORE_REFERER = True
# CAS_REDIRECT_URL = URL_PREFIX + '/'
# The URL paths for login and logout pages
LOGIN_URL = URL_PREFIX + '/login/'
LOGOUT_URL = URL_PREFIX + '/logout/'
# SMTP mail server configuration settings
EMAIL_HOST = 'smtp.example.edu'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'service_user'
EMAIL_HOST_PASSWORD = 'service_password'
# List of IP addresses for hosts allowed to push data to the API
API_ALLOWED_HOSTS = []
# Authorization key for pushing data to the API
API_KEY = ''
# Email address that intervention requests are sent to
INTERVENTIONS_EMAIL = 'interventions@example.edu'
# "From" email address for the application
SERVER_EMAIL = 'osp@example.edu'
DEFAULT_FROM_EMAIL = SERVER_EMAIL
# Regular expression to match your ERP ID number pattern
ID_NUMBER_PATTERN = '\d{7}'
# Create a student index to improve search speed
INDEX_STUDENTS = False
# Number of characters in the note field of a visit to be displayed in a report.
# WARNING: Do not Exceed 60000 characters.
NOTE_MAX_CHARS = 5000
#Number of rows displayed in a report.
#WARNING: Do not exceed 65000 rows.
REPORT_MAX_ROWS = 65000
# All potential term choices that could be received by the API
TERM_CHOICES = [
('fa', 'Fall'),
('sp', 'Spring'),
('su', 'Summer'),
]
# Current year and term
CURRENT_TERM = 'su'
CURRENT_YEAR = 2011
# All potential enrollment status choices that could be received by the API
ENROLLMENT_STATUS_CHOICES = [
('A', 'Active'),
('D', 'Dropped'),
('W', 'Withdrawn'),
('X', 'Deleted'),
('C', 'Cancelled'),
('NP', 'Non-payment'),
]
# Enrollment statuses which are considered "active"
ACTIVE_ENROLLMENT_STATUSES = ['A',]
# List of campuses for your school
CAMPUS_CHOICES = [
'Main',
'Uptown',
]
# List of contact types for visits
VISIT_CONTACT_TYPE_CHOICES = [
'In Person',
'Email',
'Telephone',
'Online',
'Group Session',
]
# List of reasons for visits
VISIT_REASON_CHOICES = [
'New Student Admission',
'Academic Advising',
'Counseling',
'Personal Counseling',
'Early Alert Referral',
'Graduation Assessment Review',
'Career Counseling',
'Workshops, Class Presentations',
'Early Alert Counseling',
'Disability Counseling',
'Faculty Advising',
'Academic Warning',
'Academic Probation',
'First Academic Suspension',
'Final Academic Suspension',
]
# List of departments for visits
VISIT_DEPARTMENT_CHOICES = [
'Advising',
'Counseling',
]
# List of Career Services outcomes for visits
VISIT_CAREER_SERVICES_OUTCOME_CHOICES = [
'No Contact',
'Email',
'Phone',
'Scheduled Appointment with Career Services',
'No Show for Appointment',
'Took Career Assessment(s)',
'Met with Career Counselor',
'Career Decision in Process',
'Career and Program Decision Completed',
'Referred for Program Update',
'Program Updated',
]
# List of intervention reasons
INTERVENTION_REASONS = [
'Excessive Tardiness/Absenteeism',
'Failing Test/Quiz Scores',
'Missing Assignments',
'Needs Personal or Social Counseling',
'Needs Career Exploration',
'Needs Tutoring',
]
# Re-structure the choices lists for Django's sake
CAMPUS_CHOICES = [(x, x) for x in CAMPUS_CHOICES]
VISIT_CONTACT_TYPE_CHOICES = [(x, x) for x in VISIT_CONTACT_TYPE_CHOICES]
VISIT_REASON_CHOICES = [(x, x) for x in VISIT_REASON_CHOICES]
VISIT_DEPARTMENT_CHOICES = [(x, x) for x in VISIT_DEPARTMENT_CHOICES]
VISIT_CAREER_SERVICES_OUTCOME_CHOICES = [(x, x) for x in
VISIT_CAREER_SERVICES_OUTCOME_CHOICES]
INTERVENTION_REASONS = [(x, x) for x in INTERVENTION_REASONS]
# Settings for including custom assessment applications.
# A custom assessment must be wrapped inside a django application.
CUSTOM_ASSESSMENTS = [
{'application_name': 'onlinereadiness',
'menu_href': '/assessment/onlinereadiness/show/',
'menu_label': 'Online Readiness',
'results_href': '/assessment/onlinereadiness/results/',
'results_label': 'Online Readiness Results',
'responses_href': '/assessment/onlinereadiness/responses',
'responses_label': 'Online Readiness Responses'
},
{'application_name': 'studyskills',
'menu_href': '/assessment/studyskills/show',
'menu_label': 'Study Skills',
'results_href': '/assessment/studyskills/results/',
'results_label': 'Study Skills Results',
'responses_href': '/assessment/studyskills/responses',
'responses_label': 'Study Skills Responses'
}
]
try:
# Add custom assessments to list of installed apps.
custom_assessments_apps = [item['application_name'] for item in CUSTOM_ASSESSMENTS]
INSTALLED_APPS.extend(custom_assessments_apps)
except:
pass
| mariajosefrancolugo/osp | deploy/osp_settings.py | Python | lgpl-3.0 | 7,115 | [
"VisIt"
] | 0235d897dece5c4b93511fad6cbee43b0b5c714d99ec1e69c4923d800051c38b |
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
import os
import sys
import time
VERSION = "2.6.4"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20100908"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in self.packages:
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="python-core-dbg '
for name in self.packages:
if name != 'python-core-dbg':
packageLine += "%s " % name
packageLine += 'python-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in self.packages.iteritems():
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'DESCRIPTION_python-modules="All Python modules"' )
line = 'RDEPENDS_python-modules="'
for name, data in self.packages.iteritems():
if name not in ['python-core-dbg', 'python-dev']:
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_python-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
os.popen( "rm -f ./%s" % sys.argv[1] )
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "python-core", "Python Interpreter and core modules (needed!)", "",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
"lib-dynload/xreadlines.so types.* platform.* ${bindir}/python*" )
m.addPackage( "python-core-dbg", "Python core module debug information", "python-core",
"config/.debug lib-dynload/.debug ${bindir}/.debug ${libdir}/.debug" )
m.addPackage( "python-dev", "Python Development Package", "python-core",
"${includedir} ${libdir}/libpython2.6.so config" ) # package
m.addPackage( "python-idle", "Python Integrated Development Environment", "python-core python-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "python-pydoc", "Python Interactive Help Support", "python-core python-lang python-stringold python-re",
"${bindir}/pydoc pydoc.*" )
m.addPackage( "python-smtpd", "Python Simple Mail Transport Daemon", "python-core python-netserver python-email python-mime",
"${bindir}/smtpd.*" )
m.addPackage( "python-audio", "Python Audio Handling", "python-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so" )
m.addPackage( "python-bsddb", "Python Berkeley Database Bindings", "python-core",
"bsddb lib-dynload/_bsddb.so" ) # package
m.addPackage( "python-codecs", "Python Codecs, Encodings & i18n Support", "python-core python-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( "python-compile", "Python Bytecode Compilation Support", "python-core",
"py_compile.* compileall.*" )
m.addPackage( "python-compiler", "Python Compiler Support", "python-core",
"compiler" ) # package
m.addPackage( "python-compression", "Python High Level Compression Support", "python-core python-zlib",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
m.addPackage( "python-crypt", "Python Basic Cryptographic and Hashing Support", "python-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
m.addPackage( "python-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "python-core python-io python-re python-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( "python-curses", "Python Curses Support", "python-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
m.addPackage( "python-ctypes", "Python C Types Support", "python-core",
"ctypes lib-dynload/_ctypes.so" ) # directory + low level module
m.addPackage( "python-datetime", "Python Calendar and Time support", "python-core python-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( "python-db", "Python File-Based Database Support", "python-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( "python-debugger", "Python Debugger", "python-core python-io python-lang python-re python-stringold python-shell python-pprint",
"bdb.* pdb.*" )
m.addPackage( "python-difflib", "Python helpers for computing deltas between objects.", "python-lang python-re",
"difflib.*" )
m.addPackage( "python-distutils", "Python Distribution Utilities", "python-core",
"config distutils" ) # package
m.addPackage( "python-doctest", "Python framework for running examples in docstrings.", "python-core python-lang python-io python-re python-unittest python-debugger python-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "python-elementtree", "Python elementree", "python-core",
"lib-dynload/_elementtree.so" )
m.addPackage( "python-email", "Python Email Support", "python-core python-io python-re python-mime python-audio python-image python-netclient",
"imaplib.* email" ) # package
m.addPackage( "python-fcntl", "Python's fcntl Interface", "python-core",
"lib-dynload/fcntl.so" )
m.addPackage( "python-hotshot", "Python Hotshot Profiler", "python-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( "python-html", "Python HTML Processing", "python-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* " )
m.addPackage( "python-gdbm", "Python GNU Database Support", "python-core",
"lib-dynload/gdbm.so" )
m.addPackage( "python-image", "Python Graphical Image Handling", "python-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( "python-io", "Python Low-Level I/O", "python-core python-math",
"lib-dynload/_socket.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* " )
m.addPackage( "python-json", "Python JSON Support", "python-core python-math python-re",
"json" ) # package
m.addPackage( "python-lang", "Python Low-Level Language Support", "python-core",
"lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
"lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* linecache.* weakref.*" )
m.addPackage( "python-logging", "Python Logging Support", "python-core python-io python-lang python-pickle python-stringold",
"logging" ) # package
m.addPackage( "python-mailbox", "Python Mailbox Format Support", "python-core python-mime",
"mailbox.*" )
m.addPackage( "python-math", "Python Math Support", "python-core",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( "python-mime", "Python MIME Handling APIs", "python-core python-io",
"mimetools.* uu.* quopri.* rfc822.*" )
m.addPackage( "python-mmap", "Python Memory-Mapped-File Support", "python-core python-io",
"lib-dynload/mmap.so " )
m.addPackage( "python-multiprocessing", "Python Multiprocessing Support", "python-core python-io python-lang",
"lib-dynload/_multiprocessing.so multiprocessing" ) # package
m.addPackage( "python-netclient", "Python Internet Protocol Clients", "python-core python-crypt python-datetime python-io python-lang python-logging python-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "python-netserver", "Python Internet Protocol Servers", "python-core python-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "python-numbers", "Python Number APIs", "python-core python-lang python-re",
"decimal.* numbers.*" )
m.addPackage( "python-pickle", "Python Persistence Support", "python-core python-codecs python-io python-re",
"pickle.* shelve.* lib-dynload/cPickle.so" )
m.addPackage( "python-pkgutil", "Python Package Extension Utility Support", "python-core",
"pkgutil.*")
m.addPackage( "python-pprint", "Python Pretty-Print Support", "python-core",
"pprint.*" )
m.addPackage( "python-profile", "Python Basic Profiling Support", "python-core python-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
m.addPackage( "python-re", "Python Regular Expression APIs", "python-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "python-readline", "Python Readline Support", "python-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( "python-resource", "Python Resource Control Interface", "python-core",
"lib-dynload/resource.so" )
m.addPackage( "python-shell", "Python Shell-Like Functionality", "python-core python-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "python-robotparser", "Python robots.txt parser", "python-core python-netclient",
"robotparser.*")
m.addPackage( "python-subprocess", "Python Subprocess Support", "python-core python-io python-re python-fcntl python-pickle",
"subprocess.*" )
m.addPackage( "python-sqlite3", "Python Sqlite3 Database Support", "python-core python-datetime python-lang python-crypt python-io python-threading python-zlib",
"lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "python-sqlite3-tests", "Python Sqlite3 Database Support Tests", "python-core python-sqlite3",
"sqlite3/test" )
m.addPackage( "python-stringold", "Python String APIs [deprecated]", "python-core python-re",
"lib-dynload/strop.so string.*" )
m.addPackage( "python-syslog", "Python Syslog Interface", "python-core",
"lib-dynload/syslog.so" )
m.addPackage( "python-terminal", "Python Terminal Controlling Support", "python-core python-io",
"pty.* tty.*" )
m.addPackage( "python-tests", "Python Tests", "python-core",
"test" ) # package
m.addPackage( "python-threading", "Python Threading & Synchronization Support", "python-core python-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "python-tkinter", "Python Tcl/Tk Bindings", "python-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( "python-unittest", "Python Unit Testing Framework", "python-core python-stringold python-lang",
"unittest.*" )
m.addPackage( "python-unixadmin", "Python Unix Administration Support", "python-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( "python-xml", "Python basic XML support.", "python-core python-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( "python-xmlrpc", "Python XMLRPC Support", "python-core python-xml python-netserver python-lang",
"xmlrpclib.* SimpleXMLRPCServer.*" )
m.addPackage( "python-zlib", "Python zlib Support.", "python-core",
"lib-dynload/zlib.so" )
m.addPackage( "python-mailbox", "Python Mailbox Format Support", "python-core python-mime",
"mailbox.*" )
m.make()
| Martix/Eonos | contrib/python/generate-manifest-2.6.py | Python | mit | 14,988 | [
"VisIt"
] | a73697739c6238d76dc3b5af6e9edf9d451bee58e631dec53e0cc7a3406e511a |
""" Test class for JobWrapper
"""
# pylint: disable=protected-access, invalid-name
# imports
from __future__ import print_function
import unittest
import importlib
import os
import shutil
from mock import MagicMock, patch
from DIRAC import gLogger
from DIRAC.DataManagementSystem.Client.test.mock_DM import dm_mock
from DIRAC.Resources.Catalog.test.mock_FC import fc_mock
from DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper import JobWrapper
from DIRAC.WorkloadManagementSystem.JobWrapper.WatchdogLinux import WatchdogLinux
getSystemSectionMock = MagicMock()
getSystemSectionMock.return_value = 'aValue'
class JobWrapperTestCase(unittest.TestCase):
""" Base class for the JobWrapper test cases
"""
def setUp(self):
gLogger.setLevel('DEBUG')
def tearDown(self):
for f in ['std.out']:
try:
os.remove(f)
except OSError:
pass
class JobWrapperTestCaseSuccess(JobWrapperTestCase):
def test_InputData(self):
myJW = importlib.import_module('DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper')
myJW.getSystemSection = MagicMock()
myJW.ModuleFactory = MagicMock()
jw = JobWrapper()
jw.jobArgs['InputData'] = ''
res = jw.resolveInputData()
self.assertFalse(res['OK'])
jw = JobWrapper()
jw.jobArgs['InputData'] = 'pippo'
jw.dm = dm_mock
jw.fc = fc_mock
res = jw.resolveInputData()
self.assertTrue(res['OK'])
jw = JobWrapper()
jw.jobArgs['InputData'] = 'pippo'
jw.jobArgs['LocalSE'] = 'mySE'
jw.jobArgs['InputDataModule'] = 'aa.bb'
jw.dm = dm_mock
jw.fc = fc_mock
res = jw.resolveInputData()
self.assertTrue(res['OK'])
def test__performChecks(self):
wd = WatchdogLinux(pid=os.getpid(),
exeThread=MagicMock(),
spObject=MagicMock(),
jobCPUTime=1000,
memoryLimit=1024 * 1024,
jobArgs={'StopSigNumber': 10})
res = wd._performChecks()
self.assertTrue(res['OK'])
@patch("DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper.getSystemSection", side_effect=getSystemSectionMock)
@patch("DIRAC.WorkloadManagementSystem.JobWrapper.Watchdog.getSystemInstance", side_effect=getSystemSectionMock)
def test_execute(self, _patch1, _patch2):
jw = JobWrapper()
jw.jobArgs = {'Executable': '/bin/ls'}
res = jw.execute('')
print('jw.execute() returns', str(res))
self.assertTrue(res['OK'])
shutil.copy('WorkloadManagementSystem/JobWrapper/test/script-OK.sh', 'script-OK.sh')
jw = JobWrapper()
jw.jobArgs = {'Executable': 'script-OK.sh'}
res = jw.execute('')
self.assertTrue(res['OK'])
os.remove('script-OK.sh')
shutil.copy('WorkloadManagementSystem/JobWrapper/test/script.sh', 'script.sh')
jw = JobWrapper()
jw.jobArgs = {'Executable': 'script.sh', 'Arguments': '111'}
res = jw.execute('')
self.assertTrue(res['OK']) # In this case the application finished with errors,
# but the JobWrapper executed successfully
os.remove('script.sh')
shutil.copy('WorkloadManagementSystem/JobWrapper/test/script-RESC.sh', 'script-RESC.sh') # this will reschedule
jw = JobWrapper()
jw.jobArgs = {'Executable': 'script-RESC.sh'}
res = jw.execute('')
if res['OK']: # FIXME: This may happen depending on the shell - not the best test admittedly!
print("We should not be here, unless the 'Execution thread status' is equal to 1")
self.assertTrue(res['OK'])
else:
self.assertFalse(res['OK']) # In this case the application finished with an error code
# that the JobWrapper interpreted as "to reschedule"
# so in this case the "execute" is considered an error
os.remove('script-RESC.sh')
#############################################################################
# Test Suite run
#############################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(JobWrapperTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(JobWrapperTestCaseSuccess))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| chaen/DIRAC | WorkloadManagementSystem/JobWrapper/test/Test_JobWrapper.py | Python | gpl-3.0 | 4,284 | [
"DIRAC"
] | a54beaa6efd39dc712cf79428fd75f14986508106cf20aa2646b8472a984f374 |
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for the C++ BitVects
"""
from __future__ import print_function
import os
import unittest
from rdkit.DataStructs import cDataStructs
from rdkit.six.moves import cPickle # @UnresolvedImport
klass = cDataStructs.SparseBitVect
def feq(n1, n2, tol=1e-4):
return abs(n1 - n2) <= tol
def ieq(n1, n2):
return abs(n1 - n2) == 0
class VectTests(object):
def testSparseIdx(self):
""" test indexing into SparseBitVects
"""
v = self.klass(10)
v[0] = 1
v[2] = 1
v[9] = 1
with self.assertRaisesRegexp(IndexError, ""):
v[10] = 1
assert v[0] == 1, 'bad bit'
assert v[1] == 0, 'bad bit'
assert v[2] == 1, 'bad bit'
assert v[9] == 1, 'bad bit'
assert v[-1] == 1, 'bad bit'
assert v[-2] == 0, 'bad bit'
with self.assertRaisesRegexp(IndexError, ""):
_ = v[10]
def testSparseBitGet(self):
""" test operations to get sparse bits
"""
v = self.klass(10)
v[0] = 1
v[2] = 1
v[6] = 1
assert len(v) == 10, 'len(SparseBitVect) failed'
assert v.GetNumOnBits() == 3, 'NumOnBits failed'
assert tuple(v.GetOnBits()) == (0, 2, 6), 'GetOnBits failed'
def testSparseBitOps(self):
""" test bit operations on SparseBitVects
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = self.klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
assert tuple((v1 & v2).GetOnBits()) == (0, 6), 'binary & failed'
assert tuple((v1 & v2).GetOnBits()) == (0, 6), 'binary & failed'
assert tuple((v1 | v2).GetOnBits()) == (0, 2, 3, 6), 'binary | failed'
assert tuple((v1 ^ v2).GetOnBits()) == (2, 3), 'binary ^ failed'
def testTanimotoSim(self):
""" test Tanimoto Similarity measure
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = self.klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = self.klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert feq(cDataStructs.TanimotoSimilarity(v1, v1), 1.0), 'bad v1,v1 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v2, v2), 1.0), 'bad v2,v2 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v1, v2), 0.5), 'bad v1,v2 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v2, v1), 0.5), 'bad v2,v1 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v1, v3), 0.0), 'bad v1,v3 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v2, v3), 0.0), 'bad v2,v3 TanimotoSimilarity'
def testOnBitSim(self):
""" test On Bit Similarity measure
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = self.klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = self.klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert feq(cDataStructs.OnBitSimilarity(v1, v1), 1.0), 'bad v1,v1 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v2, v2), 1.0), 'bad v2,v2 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v1, v2), 0.5), 'bad v1,v2 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v2, v1), 0.5), 'bad v2,v1 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v1, v3), 0.0), 'bad v1,v3 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v2, v3), 0.0), 'bad v2,v3 OnBitSimilarity'
def testNumBitsInCommon(self):
""" test calculation of Number of Bits in Common
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = self.klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = self.klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert ieq(cDataStructs.NumBitsInCommon(v1, v1), 10), 'bad v1,v1 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v2, v2), 10), 'bad v2,v2 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v1, v2), 8), 'bad v1,v2 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v2, v1), 8), 'bad v2,v1 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v1, v3), 4), 'bad v1,v3 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v2, v3), 4), 'bad v2,v3 NumBitsInCommon'
def testAllBitSim(self):
""" test All Bit Similarity measure
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = self.klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = self.klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert feq(cDataStructs.AllBitSimilarity(v1, v1), 1.0), 'bad v1,v1 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v2, v2), 1.0), 'bad v2,v2 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v1, v2), 0.8), 'bad v1,v2 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v2, v1), 0.8), 'bad v2,v1 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v1, v3), 0.4), 'bad v1,v3 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v2, v3), 0.4), 'bad v2,v3 AllBitSimilarity'
def testStringOps(self):
""" test serialization operations
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
s = v1.ToBinary()
v2 = self.klass(s)
assert tuple(v2.GetOnBits()) == tuple(v1.GetOnBits()), 'To/From string failed'
def testOnBitsInCommon(self):
""" test OnBitsInCommon
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = self.klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = cDataStructs.OnBitsInCommon(v1, v2)
assert tuple(v3) == (0, 6), 'bad on bits in common'
def testOffBitsInCommon(self):
""" test OffBitsInCommon
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = self.klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = cDataStructs.OffBitsInCommon(v1, v2)
assert tuple(v3) == (1, 4, 5, 7, 8, 9), 'bad off bits in common'
def testOnBitProjSimilarity(self):
""" test OnBitProjSimilarity
"""
v1 = self.klass(10)
v1[1] = 1
v1[2] = 1
v1[3] = 1
v2 = self.klass(10)
v2[2] = 1
v2[3] = 1
res = cDataStructs.OnBitProjSimilarity(v1, v2)
assert feq(res[0], 0.666667), 'bad 1st OnBitsProjSimilarity'
assert feq(res[1], 1.0), 'bad 2nd OnBitsProjSimilarity'
res = cDataStructs.OnBitProjSimilarity(v2, v1)
assert feq(res[1], 0.666667), 'bad 1st OnBitsProjSimilarity'
assert feq(res[0], 1.0), 'bad 2nd OnBitsProjSimilarity'
def testOffBitProjSimilarity(self):
""" test OffBitProjSimilarity
"""
v1 = self.klass(10)
v1[1] = 1
v1[2] = 1
v1[3] = 1
v2 = self.klass(10)
v2[2] = 1
v2[3] = 1
res = cDataStructs.OffBitProjSimilarity(v1, v2)
assert feq(res[0], 1.0), 'bad 1st OffBitsProjSimilarity'
assert feq(res[1], 0.875), 'bad 2nd OffBitsProjSimilarity'
res = cDataStructs.OffBitProjSimilarity(v2, v1)
assert feq(res[1], 1.0), 'bad 1st OffBitsProjSimilarity'
assert feq(res[0], 0.875), 'bad 2nd OffBitsProjSimilarity'
def testPkl(self):
# Test pickling
v1 = self.klass(10)
v1[1] = 1
v1[2] = 1
v1[3] = 1
pklName = 'foo.pkl'
outF = open(pklName, 'wb+')
cPickle.dump(v1, outF)
outF.close()
inF = open(pklName, 'rb')
v2 = cPickle.load(inF)
inF.close()
os.unlink(pklName)
assert tuple(v1.GetOnBits()) == tuple(v2.GetOnBits()), 'pkl failed'
def testFingerprints(self):
# Test parsing Daylight fingerprints
# actual daylight output:
rawD = """
0,Cc1n[nH]c(=O)nc1N,.b+HHa.EgU6+ibEIr89.CpX0g8FZiXH+R0+Ps.mr6tg.2
1,Cc1n[nH]c(=O)[nH]c1=O,.b7HEa..ccc+gWEIr89.8lV8gOF3aXFFR.+Ps.mZ6lg.2
2,Cc1nnc(NN)nc1O,.H+nHq2EcY09y5EIr9e.8p50h0NgiWGNx4+Hm+Gbslw.2
3,Cc1nnc(N)nc1C,.1.HHa..cUI6i5E2rO8.Op10d0NoiWGVx.+Hm.Gb6lo.2
"""
dists = """0,0,1.000000
0,1,0.788991
0,2,0.677165
0,3,0.686957
1,1,1.000000
1,2,0.578125
1,3,0.591304
2,2,1.000000
2,3,0.732759
3,3,1.000000
"""
fps = []
for line in rawD.split('\n'):
if line:
sbv = self.klass(256)
_, _, fp = line.split(',')
cDataStructs.InitFromDaylightString(sbv, fp)
fps.append(sbv)
ds = dists.split('\n')
whichd = 0
for i in range(len(fps)):
for j in range(i, len(fps)):
idx1, idx2, tgt = ds[whichd].split(',')
whichd += 1
tgt = float(tgt)
dist = cDataStructs.TanimotoSimilarity(fps[i], fps[j])
assert feq(tgt, dist), 'tanimoto between fps %d and %d failed' % (int(idx1), int(idx2))
def testFold(self):
""" test folding fingerprints
"""
v1 = self.klass(16)
v1[1] = 1
v1[12] = 1
v1[9] = 1
v2 = cDataStructs.FoldFingerprint(v1) # check fold with no args
assert v1.GetNumBits() / 2 == v2.GetNumBits(), 'bad num bits post folding'
v2 = cDataStructs.FoldFingerprint(v1, 2) # check fold with arg
assert v1.GetNumBits() / 2 == v2.GetNumBits(), 'bad num bits post folding'
v2 = cDataStructs.FoldFingerprint(v1, 4)
assert v1.GetNumBits() / 4 == v2.GetNumBits(), 'bad num bits post folding'
def testOtherSims(self):
""" test other similarity measures
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = self.klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
assert feq(cDataStructs.CosineSimilarity(v1, v2), .6667)
assert feq(cDataStructs.KulczynskiSimilarity(v1, v2), .6667)
assert feq(cDataStructs.DiceSimilarity(v1, v2), .6667)
assert feq(cDataStructs.SokalSimilarity(v1, v2), .3333)
assert feq(cDataStructs.McConnaugheySimilarity(v1, v2), .3333)
assert feq(cDataStructs.AsymmetricSimilarity(v1, v2), .6667)
assert feq(cDataStructs.BraunBlanquetSimilarity(v1, v2), .6667)
assert feq(cDataStructs.RusselSimilarity(v1, v2), .2000)
assert feq(cDataStructs.RogotGoldbergSimilarity(v1, v2), .7619)
def testQuickSims(self):
""" the asymmetric similarity stuff (bv,pkl)
"""
v1 = self.klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = self.klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
pkl = v2.ToBinary()
v2 = pkl
assert feq(cDataStructs.CosineSimilarity(v1, v2), .6667)
assert feq(cDataStructs.KulczynskiSimilarity(v1, v2), .6667)
assert feq(cDataStructs.DiceSimilarity(v1, v2), .6667)
assert feq(cDataStructs.SokalSimilarity(v1, v2), .3333)
assert feq(cDataStructs.McConnaugheySimilarity(v1, v2), .3333)
assert feq(cDataStructs.AsymmetricSimilarity(v1, v2), .6667)
assert feq(cDataStructs.BraunBlanquetSimilarity(v1, v2), .6667)
assert feq(cDataStructs.RusselSimilarity(v1, v2), .2000)
assert feq(cDataStructs.RogotGoldbergSimilarity(v1, v2), .7619)
class SparseBitVectTests(VectTests, unittest.TestCase):
klass = cDataStructs.SparseBitVect
class ExplicitTestCase(VectTests, unittest.TestCase):
klass = cDataStructs.ExplicitBitVect
if __name__ == '__main__': # pragma: nocover
unittest.main()
| jandom/rdkit | rdkit/DataStructs/UnitTestcBitVect.py | Python | bsd-3-clause | 11,080 | [
"RDKit"
] | b565ec4c394087218c14fd7e530673e187ae99f9383cea4e70821e8dc8d4ec9a |
#! /usr/bin/python
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner (modified by Florian Wilhelm)
* License: Public Domain
* Version: 0.8+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
modify your __init__.py to define __version__ (by calling a function
from _version.py)
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
import os, sys, re
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.8+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False, hide_stderr=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_source,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
toplevel = run_command([GIT, "rev-parse", "--show-toplevel"],
hide_stderr=True)
root = (toplevel.strip() if toplevel else os.path.dirname(here))
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return rep_by_pep440(ver)
def git2pep440(ver_str):
try:
tag, commits, _ = ver_str.split('-', 2)
return ".post".join([tag, commits])
except ValueError:
return ver_str
def rep_by_pep440(ver):
ver["version"] = git2pep440(ver["version"])
return ver
'''
import subprocess
import sys
def run_command(args, cwd=None, verbose=False, hide_stderr=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_source, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
toplevel = run_command([GIT, "rev-parse", "--show-toplevel"],
hide_stderr=True)
root = (toplevel.strip() if toplevel else os.path.dirname(here))
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
import os.path
import sys
# os.path.relpath only appeared in Python-2.6 . Define it here for 2.5.
def os_path_relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def do_vcs_install(versionfile_source, ipy):
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
files = [versionfile_source, ipy]
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os_path_relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command([GIT, "add", "--"] + files)
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.8+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
f.close()
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return rep_by_pep440(ver)
ver = versions_from_file(versionfile)
if ver:
if verbose: print("got version from file %s %s" % (versionfile, ver))
return rep_by_pep440(ver)
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return rep_by_pep440(ver)
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return rep_by_pep440(ver)
if verbose: print("got version from default %s" % ver)
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
def git2pep440(ver_str):
try:
tag, commits, _ = ver_str.split('-', 2)
return ".post".join([tag, commits])
except ValueError:
return ver_str
def rep_by_pep440(ver):
ver["version"] = git2pep440(ver["version"])
return ver
| uwekamper/django-url-counter | versioneer.py | Python | bsd-3-clause | 28,459 | [
"Brian"
] | 21069e867ca5c260e43b39956455598f653a6c9df87264e68d85dc4cad7cafb0 |
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import csv
import json
import pickle
import os
import tempfile
import vcr
from urllib.request import urlopen
from .common import BaseTest, ACCOUNT_ID, Bag
from .test_s3 import destroyBucket
from c7n.config import Config
from c7n.resolver import ValuesFrom, URIResolver
class FakeCache:
def __init__(self):
self.state = {}
self.gets = 0
self.saves = 0
def get(self, key):
self.gets += 1
return self.state.get(pickle.dumps(key))
def save(self, key, data):
self.saves += 1
self.state[pickle.dumps(key)] = data
class FakeResolver:
def __init__(self, contents):
if isinstance(contents, bytes):
contents = contents.decode("utf8")
self.contents = contents
def resolve(self, uri):
return self.contents
class ResolverTest(BaseTest):
def test_resolve_s3(self):
session_factory = self.replay_flight_data("test_s3_resolver")
session = session_factory()
client = session.client("s3")
resource = session.resource("s3")
bname = "custodian-byebye"
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
key = resource.Object(bname, "resource.json")
content = json.dumps({"moose": {"soup": "duck"}})
key.put(
Body=content, ContentLength=len(content), ContentType="application/json"
)
cache = FakeCache()
resolver = URIResolver(session_factory, cache)
uri = "s3://%s/resource.json?RequestPayer=requestor" % bname
data = resolver.resolve(uri)
self.assertEqual(content, data)
self.assertEqual(list(cache.state.keys()), [pickle.dumps(("uri-resolver", uri))])
def test_handle_content_encoding(self):
session_factory = self.replay_flight_data("test_s3_resolver")
cache = FakeCache()
resolver = URIResolver(session_factory, cache)
uri = "http://httpbin.org/gzip"
with vcr.use_cassette('tests/data/vcr_cassettes/test_resolver.yaml'):
response = urlopen(uri)
content = resolver.handle_response_encoding(response)
data = json.loads(content)
self.assertEqual(data['gzipped'], True)
self.assertEqual(response.headers['Content-Encoding'], 'gzip')
def test_resolve_file(self):
content = json.dumps({"universe": {"galaxy": {"system": "sun"}}})
cache = FakeCache()
resolver = URIResolver(None, cache)
with tempfile.NamedTemporaryFile(mode="w+", dir=os.getcwd(), delete=False) as fh:
self.addCleanup(os.unlink, fh.name)
fh.write(content)
fh.flush()
self.assertEqual(resolver.resolve("file:%s" % fh.name), content)
class UrlValueTest(BaseTest):
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(tempfile.gettempdir())
def tearDown(self):
os.chdir(self.old_dir)
def get_values_from(self, data, content, cache=None):
config = Config.empty(account_id=ACCOUNT_ID)
mgr = Bag({"session_factory": None, "_cache": cache, "config": config})
values = ValuesFrom(data, mgr)
values.resolver = FakeResolver(content)
return values
def test_json_expr(self):
values = self.get_values_from(
{"url": "moon", "expr": "[].bean", "format": "json"},
json.dumps([{"bean": "magic"}]),
)
self.assertEqual(values.get_values(), ["magic"])
def test_invalid_format(self):
values = self.get_values_from({"url": "mars"}, "")
self.assertRaises(ValueError, values.get_values)
def test_txt(self):
with open("resolver_test.txt", "w") as out:
for i in ["a", "b", "c", "d"]:
out.write("%s\n" % i)
with open("resolver_test.txt", "rb") as out:
values = self.get_values_from({"url": "letters.txt"}, out.read())
os.remove("resolver_test.txt")
self.assertEqual(values.get_values(), ["a", "b", "c", "d"])
def test_csv_expr(self):
with open("test_expr.csv", "w") as out:
writer = csv.writer(out)
writer.writerows([range(5) for r in range(5)])
with open("test_expr.csv", "rb") as out:
values = self.get_values_from(
{"url": "sun.csv", "expr": "[*][2]"}, out.read()
)
os.remove("test_expr.csv")
self.assertEqual(values.get_values(), ["2", "2", "2", "2", "2"])
def test_csv_expr_using_dict(self):
with open("test_dict.csv", "w") as out:
writer = csv.writer(out)
writer.writerow(["aa", "bb", "cc", "dd", "ee"]) # header row
writer.writerows([range(5) for r in range(5)])
with open("test_dict.csv", "rb") as out:
values = self.get_values_from(
{"url": "sun.csv", "expr": "bb[1]", "format": "csv2dict"}, out.read()
)
os.remove("test_dict.csv")
self.assertEqual(values.get_values(), "1")
def test_csv_column(self):
with open("test_column.csv", "w") as out:
writer = csv.writer(out)
writer.writerows([range(5) for r in range(5)])
with open("test_column.csv", "rb") as out:
values = self.get_values_from({"url": "sun.csv", "expr": 1}, out.read())
os.remove("test_column.csv")
self.assertEqual(values.get_values(), ["1", "1", "1", "1", "1"])
def test_csv_raw(self):
with open("test_raw.csv", "w") as out:
writer = csv.writer(out)
writer.writerows([range(3, 4) for r in range(5)])
with open("test_raw.csv", "rb") as out:
values = self.get_values_from({"url": "sun.csv"}, out.read())
os.remove("test_raw.csv")
self.assertEqual(values.get_values(), [["3"], ["3"], ["3"], ["3"], ["3"]])
def test_value_from_vars(self):
values = self.get_values_from(
{"url": "{account_id}", "expr": '["{region}"][]', "format": "json"},
json.dumps({"us-east-1": "east-resource"}),
)
self.assertEqual(values.get_values(), ["east-resource"])
self.assertEqual(values.data.get("url", ""), ACCOUNT_ID)
def test_value_from_caching(self):
cache = FakeCache()
values = self.get_values_from(
{"url": "", "expr": '["{region}"][]', "format": "json"},
json.dumps({"us-east-1": "east-resource"}),
cache=cache,
)
self.assertEqual(values.get_values(), ["east-resource"])
self.assertEqual(values.get_values(), ["east-resource"])
self.assertEqual(values.get_values(), ["east-resource"])
self.assertEqual(cache.saves, 1)
self.assertEqual(cache.gets, 3)
| capitalone/cloud-custodian | tests/test_resolver.py | Python | apache-2.0 | 6,910 | [
"Galaxy",
"MOOSE"
] | 22ab0e6ae9e8967490b68cbd16ebcb50be4cbca22c0c1d689d0fc50ce21eabf2 |
"""This source manages a VTK dataset given to it. When this source is
pickled or persisted, it saves the data given to it in the form of a
gzipped string.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2015, Enthought, Inc.
# License: BSD Style.
import sys
import os
import tempfile
# Enthought library imports.
from traits.api import Instance, List, Str, Bool, Int
from traitsui.api import View, Group, Item
from apptools.persistence.state_pickler \
import gzip_string, gunzip_string, set_state
from tvtk.api import tvtk
from tvtk import messenger
# Local imports.
from tvtk.common import is_old_pipeline, configure_input_data
from mayavi.core.source import Source
from mayavi.core.common import handle_children_state
from mayavi.core.trait_defs import DEnum
from mayavi.core.pipeline_info import (PipelineInfo,
get_tvtk_dataset_name)
from .vtk_xml_file_reader import get_all_attributes
######################################################################
# Utility functions.
######################################################################
def write_dataset_to_string(data):
"""Given a dataset, convert the dataset to an ASCII string that can
be stored for persistence.
"""
w = tvtk.DataSetWriter(write_to_output_string=1)
warn = w.global_warning_display
configure_input_data(w, data)
w.global_warning_display = 0
w.update()
if w.output_string_length == 0:
# Some VTK versions (5.2) have a bug when writing structured
# grid datasets and produce empty output. We work around this
# by writing to a file and then reading that output.
w.write_to_output_string = 0
fh, fname = tempfile.mkstemp('.vtk')
os.close(fh); os.remove(fname)
w.file_name = fname
w.write()
# Read the data and delete the file.
sdata = open(fname).read()
os.remove(fname)
else:
sdata = w.output_string
w.global_warning_display = warn
return sdata
def has_attributes(dataset):
"""Returns `True` when the given TVTK `dataset` has any attribute
arrays in point and cell data and `False` otherwise.
"""
pd = dataset.point_data
if pd is not None and pd.number_of_arrays > 0:
return True
cd = dataset.cell_data
if cd is not None and cd.number_of_arrays > 0:
return True
return False
######################################################################
# `VTKDataSource` class
######################################################################
class VTKDataSource(Source):
"""This source manages a VTK dataset given to it. When this
source is pickled or persisted, it saves the data given to it in
the form of a gzipped string.
Note that if the VTK dataset has changed internally and you need
to notify the mayavi pipeline to flush the data just call the
`modified` method of the VTK dataset and the mayavi pipeline will
update automatically.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The VTK dataset to manage.
data = Instance(tvtk.DataSet, allow_none=False)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
########################################
# Dynamic traits: These traits are dynamic and are updated on the
# _update_data method.
# The active point scalar name.
point_scalars_name = DEnum(values_name='_point_scalars_list',
desc='scalar point data attribute to use')
# The active point vector name.
point_vectors_name = DEnum(values_name='_point_vectors_list',
desc='vectors point data attribute to use')
# The active point tensor name.
point_tensors_name = DEnum(values_name='_point_tensors_list',
desc='tensor point data attribute to use')
# The active cell scalar name.
cell_scalars_name = DEnum(values_name='_cell_scalars_list',
desc='scalar cell data attribute to use')
# The active cell vector name.
cell_vectors_name = DEnum(values_name='_cell_vectors_list',
desc='vectors cell data attribute to use')
# The active cell tensor name.
cell_tensors_name = DEnum(values_name='_cell_tensors_list',
desc='tensor cell data attribute to use')
########################################
# Our view.
view = View(Group(Item(name='point_scalars_name'),
Item(name='point_vectors_name'),
Item(name='point_tensors_name'),
Item(name='cell_scalars_name'),
Item(name='cell_vectors_name'),
Item(name='cell_tensors_name'),
Item(name='data'),
))
########################################
# Private traits.
# These private traits store the list of available data
# attributes. The non-private traits use these lists internally.
_point_scalars_list = List(Str)
_point_vectors_list = List(Str)
_point_tensors_list = List(Str)
_cell_scalars_list = List(Str)
_cell_vectors_list = List(Str)
_cell_tensors_list = List(Str)
# This filter allows us to change the attributes of the data
# object and will ensure that the pipeline is properly taken care
# of. Directly setting the array in the VTK object will not do
# this.
_assign_attribute = Instance(tvtk.AssignAttribute, args=(),
allow_none=False)
# Toggles if this is the first time this object has been used.
_first = Bool(True)
# The ID of the observer for the data.
_observer_id = Int(-1)
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(VTKDataSource, self).__get_pure_state__()
for name in ('_assign_attribute', '_first', '_observer'):
d.pop(name, None)
for name in ('point_scalars', 'point_vectors',
'point_tensors', 'cell_scalars',
'cell_vectors', 'cell_tensors'):
d.pop('_' + name + '_list', None)
d.pop('_' + name + '_name', None)
data = self.data
if data is not None:
sdata = write_dataset_to_string(data)
if sys.version_info[0] > 2:
z = gzip_string(sdata.encode('ascii'))
else:
z = gzip_string(sdata)
d['data'] = z
return d
def __set_pure_state__(self, state):
z = state.data
if z is not None:
if sys.version_info[0] > 2:
d = gunzip_string(z).decode('ascii')
else:
d = gunzip_string(z)
r = tvtk.DataSetReader(read_from_input_string=1,
input_string=d)
warn = r.global_warning_display
r.global_warning_display = 0
r.update()
r.global_warning_display = warn
self.data = r.output
# Now set the remaining state without touching the children.
set_state(self, state, ignore=['children', 'data'])
# Setup the children.
handle_children_state(self.children, state.children)
# Setup the children's state.
set_state(self, state, first=['children'], ignore=['*'])
######################################################################
# `Base` interface
######################################################################
def start(self):
"""This is invoked when this object is added to the mayavi
pipeline.
"""
# Do nothing if we are already running.
if self.running:
return
# Update the data just in case.
self._update_data()
# Call the parent method to do its thing. This will typically
# start all our children.
super(VTKDataSource, self).start()
def update(self):
"""Invoke this to flush data changes downstream. This is
typically used when you change the data object and want the
mayavi pipeline to refresh.
"""
# This tells the VTK pipeline that the data has changed. This
# will fire the data_changed event automatically.
self.data.modified()
self._assign_attribute.update()
######################################################################
# Non-public interface
######################################################################
def _data_changed(self, old, new):
if has_attributes(self.data):
aa = self._assign_attribute
self.configure_input_data(aa, new)
self._update_data()
aa.update()
self.outputs = [aa.output]
else:
self.outputs = [self.data]
self.data_changed = True
self.output_info.datasets = \
[get_tvtk_dataset_name(self.outputs[0])]
# Add an observer to the VTK dataset after removing the one
# for the old dataset. We use the messenger to avoid an
# uncollectable reference cycle. See the
# tvtk.messenger module documentation for details.
if old is not None:
old.remove_observer(self._observer_id)
self._observer_id = new.add_observer('ModifiedEvent',
messenger.send)
new_vtk = tvtk.to_vtk(new)
messenger.connect(new_vtk, 'ModifiedEvent',
self._fire_data_changed)
# Change our name so that our label on the tree is updated.
self.name = self._get_name()
def _fire_data_changed(self, *args):
"""Simply fire the `data_changed` event."""
self.data_changed = True
def _set_data_name(self, data_type, attr_type, value):
if value is None:
return
dataset = self.data
if len(value) == 0:
# If the value is empty then we deactivate that attribute.
d = getattr(dataset, attr_type + '_data')
method = getattr(d, 'set_active_%s'%data_type)
method(None)
self.data_changed = True
return
aa = self._assign_attribute
data = None
if attr_type == 'point':
data = dataset.point_data
elif attr_type == 'cell':
data = dataset.cell_data
method = getattr(data, 'set_active_%s'%data_type)
method(value)
aa.assign(value, data_type.upper(), attr_type.upper() +'_DATA')
if data_type == 'scalars' and dataset.is_a('vtkImageData'):
# Set the scalar_type for image data, if not you can either
# get garbage rendered or worse.
s = getattr(dataset, attr_type + '_data').scalars
r = s.range
if is_old_pipeline():
dataset.scalar_type = s.data_type
aa.output.scalar_type = s.data_type
aa.update()
# Fire an event, so the changes propagate.
self.data_changed = True
def _point_scalars_name_changed(self, value):
self._set_data_name('scalars', 'point', value)
def _point_vectors_name_changed(self, value):
self._set_data_name('vectors', 'point', value)
def _point_tensors_name_changed(self, value):
self._set_data_name('tensors', 'point', value)
def _cell_scalars_name_changed(self, value):
self._set_data_name('scalars', 'cell', value)
def _cell_vectors_name_changed(self, value):
self._set_data_name('vectors', 'cell', value)
def _cell_tensors_name_changed(self, value):
self._set_data_name('tensors', 'cell', value)
def _update_data(self):
if self.data is None:
return
pnt_attr, cell_attr = get_all_attributes(self.data)
pd = self.data.point_data
scalars = pd.scalars
if self.data.is_a('vtkImageData') and scalars is not None:
# For some reason getting the range of the scalars flushes
# the data through to prevent some really strange errors
# when using an ImagePlaneWidget.
r = scalars.range
if is_old_pipeline():
self._assign_attribute.output.scalar_type = scalars.data_type
self.data.scalar_type = scalars.data_type
def _setup_data_traits(obj, attributes, d_type):
"""Given the object, the dict of the attributes from the
`get_all_attributes` function and the data type
(point/cell) data this will setup the object and the data.
"""
attrs = ['scalars', 'vectors', 'tensors']
aa = obj._assign_attribute
data = getattr(obj.data, '%s_data'%d_type)
for attr in attrs:
values = attributes[attr]
values.append('')
setattr(obj, '_%s_%s_list'%(d_type, attr), values)
if len(values) > 1:
default = getattr(obj, '%s_%s_name'%(d_type, attr))
if obj._first and len(default) == 0:
default = values[0]
getattr(data, 'set_active_%s'%attr)(default)
aa.assign(default, attr.upper(),
d_type.upper() +'_DATA')
aa.update()
kw = {'%s_%s_name'%(d_type, attr): default,
'trait_change_notify': False}
obj.set(**kw)
_setup_data_traits(self, pnt_attr, 'point')
_setup_data_traits(self, cell_attr, 'cell')
if self._first:
self._first = False
# Propagate the data changed event.
self.data_changed = True
def _get_name(self):
""" Gets the name to display on the tree.
"""
ret = "VTK Data (uninitialized)"
if self.data is not None:
typ = self.data.__class__.__name__
ret = "VTK Data (%s)"%typ
if '[Hidden]' in self.name:
ret += ' [Hidden]'
return ret
| dmsurti/mayavi | mayavi/sources/vtk_data_source.py | Python | bsd-3-clause | 14,464 | [
"Mayavi",
"VTK"
] | e05016dee60b3560d774b9902875ee4f48ed1aeec48e86a91a4692289e11249a |
#!/usr/bin/env python
"""
Start DIRAC component using runsvctrl utility
"""
__RCSID__ = "$Id$"
#
from DIRAC.Core.Base import Script
Script.disableCS()
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... [system [service|agent]]' % Script.scriptName,
'Arguments:',
' system: Name of the system for the component (default *: all)',
' service|agent: Name of the particular component (default *: all)' ] ) )
Script.parseCommandLine()
args = Script.getPositionalArgs()
if len( args ) > 2:
Script.showHelp()
exit( -1 )
system = '*'
component = '*'
if len( args ) > 0:
system = args[0]
if system != '*':
if len( args ) > 1:
component = args[1]
#
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
#
gComponentInstaller.exitOnError = True
#
result = gComponentInstaller.runsvctrlComponent( system, component, 'u' )
if not result['OK']:
print 'ERROR:', result['Message']
exit( -1 )
gComponentInstaller.printStartupStatus( result['Value'] )
| andresailer/DIRAC | FrameworkSystem/scripts/dirac-start-component.py | Python | gpl-3.0 | 1,220 | [
"DIRAC"
] | e3a599d82b9973e57f4c31d1f3db782341df51209b776492c2d0bf205e0c2844 |
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
###############################################################################
# Configuration of the console.
#
# Mainly used by PrintFormat.PrintFormat
#
# ConsoleWidth : width of the console used default to 80.
# should never be less than 60.
# NameWidth : space attributed to the name in PrintList method.
# Indent : Indent of the second line.
# MaxSize : Maximal size of the sequence (default=6:
# -> 99 999 bp + 1 trailing ','
# people are unlikely to ask for restriction map of sequences
# bigger than 100.000 bp. This is needed to determine the
# space to be reserved for sites location.
#
# MaxSize = 5 => 9.999 bp
# MaxSize = 6 => 99.999 bp
# MaxSize = 7 => 999.999 bp
# example:
#
# <------------ ConsoleWidth --------------->
# <- NameWidth ->
# EcoRI : 1, 45, 50, 300, 400, 650,
# 700, 1200, 2500.
# <-->
# Indent
#
ConsoleWidth = 80
NameWidth = 10
Indent = 4
MaxSize = 6
###############################################################################
# Proxies
#
# Enter here the address of your proxy if any.
# If you don't use proxy use an empty string
# i.e.
# ftp_proxy = ''
# -> no proxy
#
# ftp_proxy = 'http://www.somewhere.something:one_number'
# -> www.somewhere.something is the address of the proxy.
# one_number is the port number.
#
ftp_proxy = ''
###############################################################################
# Rebase ftp location
#
# Do not modify the addresses.
#
ftp_Rebase = 'ftp://ftp.neb.com/'
ftp_emb_e = ftp_Rebase+'pub/rebase/emboss_e.###'
ftp_emb_s = ftp_Rebase+'pub/rebase/emboss_s.###'
ftp_emb_r = ftp_Rebase+'pub/rebase/emboss_r.###'
###############################################################################
# ftp rebase account.
#
# In order to update the rebase files, Rana need to connect to the
# ftp server corresponding.
#
# the general procedure for accessing a ftp server is generally to
# connect as anonymous user (rebase_name) and providing your e-mail address
# as password.
#
# Therefore, you need to enter your e-mail address in rebase_password.
# The address will not be send to anyone but is necessary to login the
# ftp server of rebase when connecting as anonymous user.
#
# Do not forget to enclose the address between "'".
#
Rebase_name = 'anonymous'
Rebase_password = ''
# Rebase_password = 'your_address@somewhere.something'
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/Restriction/RanaConfig.py | Python | gpl-2.0 | 3,025 | [
"Biopython"
] | 620a4bd957b0ccbe534ee3009e8bd5a81e88933f5d64b64ade9114fa2411fc1f |
#!/usr/bin/env python
import os
import shutil
import subprocess
import sys
from time import sleep
import json
from Bio import SeqIO
from store import update_job
def run_cmd(cmd, wdir=None, ignore_error=False):
"""
Run a command line command
Returns True or False based on the exit code
"""
if wdir is None:
wdir = os.getcwd()
proc = subprocess.Popen(cmd,shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=wdir)
out = proc.communicate()
return_code = proc.returncode
t = open('log.txt', 'w')
e = open('log.err', 'w')
t.write('%s\n'%str(out[0]))
if return_code != 0 and not ignore_error:
e.write('Command (%s) failed w/ error %d\n'
%(cmd, return_code))
e.write('%s\n'%str(out[1]))
e.write('\n')
return bool(not return_code)
def run_contiguator(req_id, wdir, contigs, refs, evalue=1e-20, contiglength=1000,
contigcoverage=20, hitlength=1100, multitreshold=1.5,
non=False, numN=100, pcr=False, inner=False, blastn=False,
threads=1,
optsize=20,
minsize=18,
maxsize=27,
opttemp=60,
mintemp=57,
maxtemp=63,
flanksize=1000,
minprod=1000,
maxprod=7000,
optgc=50,
mingc=20,
maxgc=80,
gcclamp=1,
exclude=100,
jobname=''):
sdir = os.getcwd()
update_job(req_id, 'status', 'Copying CONTIGuator files')
# Move all the contiguator files
shutil.copy(os.path.join(sdir, 'contiguator-app', 'CONTIGuator.py'), wdir)
shutil.copy(os.path.join(sdir, 'contiguator-app', 'abacas'), wdir)
# Move to working directory
os.chdir(wdir)
update_job(req_id, 'status', 'Getting CONTIGuator version')
# Program version
try:
vcmd = 'python2 CONTIGuator.py --version'
proc = subprocess.Popen(vcmd,shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,stderr=subprocess.PIPE,
cwd=wdir)
out = proc.communicate()
sleep(1)
ver = out[0].strip() + out[1].strip()
except:
ver = 'Unknown'
update_job(req_id, 'status', 'Preparing CONTIGuator command line')
#Run CONTIGuator
cmd = 'python2 CONTIGuator.py -c "%s"'%(os.path.basename(contigs))
for ref in refs:
cmd += ' -r "%s"'%(os.path.basename(ref))
cmd += ' -e %s'%str(evalue)
cmd += ' -L %d'%int(contiglength)
cmd += ' -C %d'%int(contigcoverage)
cmd += ' -B %d'%int(hitlength)
cmd += ' -R %s'%str(multitreshold)
cmd += ' -n %d'%int(numN)
# Check threads
if int(threads) > 3:
threads = 3
cmd += ' -t %d'%int(threads)
if bool(blastn):
cmd += ' -b'
if bool(non):
cmd += ' -N'
if bool(pcr):
cmd += ' -P -A'
if bool(inner) == True:
cmd += ' -I'
# Other PCR commands written to file
f=open(os.path.join(wdir,'newlines'),'w')
f.write('%d\n%d\n%d\n%f\n%f\n%f\n%d\n%d\n%d\n%f\n%f\n%f\n%d\n%d\n'%(
int(optsize),
int(minsize),
int(maxsize),
float(opttemp),
float(mintemp),
float(maxtemp),
int(flanksize),
int(minprod),
int(maxprod),
float(optgc),
float(mingc),
float(maxgc),
int(gcclamp),
int(exclude)))
f.close()
cmd += ' -M -V -D -G'
# Job details files
#try:
lgenparams = ['Job name', 'CONTIGuator command', 'Version']
genparams = {'Job name':jobname,
'CONTIGuator command':cmd,
'Version':ver}
fout = open(os.path.join(wdir,'genparams.tsv'), 'w')
for k in lgenparams:
fout.write('%s\t%s\n'%(k,genparams[k]))
fout.close()
lrunparams = ['Blast e-value', 'Use blastn', 'Blast threads',
'Contig length threshold',
'Contig coverage threshold (%)',
'Hit length threshold',
'Multiple replicon threshold',
'Gaps size on overlapping contigs',
'Do not use N to separate the contigs']
runparams = {'Blast e-value':evalue,
'Use blastn':blastn,
'Contig length threshold':contiglength,
'Contig coverage threshold (%)':contigcoverage,
'Hit length threshold':hitlength,
'Multiple replicon threshold':multitreshold,
'Do not use N to separate the contigs':non,
'Gaps size on overlapping contigs':numN,
'Blast threads':threads}
fout = open(os.path.join(wdir,'runparams.tsv'), 'w')
for k in lrunparams:
fout.write('%s\t%s\n'%(k,runparams[k]))
fout.close()
if pcr:
lpcrparams = ['Compute also the inner primers',
'Optimum primer size',
'Minimum primer size',
'Maximum primer size',
'Optimum melting temperature',
'Minimum melting temperature',
'Maximum melting temperature',
'Flanking region size',
'Minimum product size',
'Maximum product size',
'Optimum primer GC content (%)',
'Minimum primer GC content (%)',
'Maximum primer GC content (%)',
'GC clamp',
'Bases excluded from the end of the contig']
pcrparams = {'Compute also the inner primers':inner,
'Optimum primer size':optsize,
'Minimum primer size':minsize,
'Maximum primer size':maxsize,
'Optimum melting temperature':opttemp,
'Minimum melting temperature':mintemp,
'Maximum melting temperature':maxtemp,
'Flanking region size':flanksize,
'Minimum product size':minprod,
'Maximum product size':maxprod,
'Optimum primer GC content (%)':optgc,
'Minimum primer GC content (%)':mingc,
'Maximum primer GC content (%)':maxgc,
'GC clamp':gcclamp,
'Bases excluded from the end of the contig':exclude}
fout = open(os.path.join(wdir,'pcrparams.tsv'), 'w')
for k in lpcrparams:
fout.write('%s\t%s\n'%(k,pcrparams[k]))
fout.close()
#except:pass
update_job(req_id, 'status', 'Running CONTIGuator')
if not run_cmd(cmd, wdir):
raise Exception('CONTIGuator execution halted!')
update_job(req_id, 'status', 'Preparing output')
# Prepare some data (the long ones to save time on results requests)
# Convert the pdf files in png
# Cycle through the directories
for mapdir in os.listdir(wdir):
mdir = os.path.join(wdir,mapdir)
if os.path.isdir(mdir):
for pdf in os.listdir(mdir):
if pdf.endswith('.pdf'):
# Convert
convcmd = 'convert -density 300x300 "%s" -resize x1000 -density 150x150 -trim "%s.png"'
convcmd = convcmd%(pdf, pdf)
proc = subprocess.Popen(convcmd,
shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=mdir)
out = proc.communicate()
convcmd1 = 'convert -density 300x300 "%s" -resize x400 -density 150x150 -trim "%s_small.png"'
convcmd1 = convcmd1%(pdf, pdf)
proc = subprocess.Popen(convcmd1,
shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=mdir)
out = proc.communicate()
update_job(req_id, 'status', 'Cleaning up')
try:
# Be kind, remove the original files...
os.remove(contigs)
for ref in refs:
os.remove(ref)
# ...and the contiguator bundle
os.remove('CONTIGuator.py')
os.remove('abacas')
except:pass
update_job(req_id, 'status', 'Preparing the output archive')
# Prepare an archive
import tarfile
tarname = os.path.join(wdir,'CONTIGuator_results.tar.gz')
tar = tarfile.open(tarname,'w:gz')
for fname in os.listdir(wdir):
if fname in ['CONTIGuator.py', 'CONTIGuator_results.tar.gz',
'abacas', 'newlines',
'summary.tsv', 'genparams.tsv', 'runparams.tsv',
'pcrparams.tsv']:
continue
if os.path.isdir(os.path.join(wdir, fname)):
idir = os.path.join(wdir, fname)
for ifname in os.listdir(idir):
tar.add(os.path.join(idir, ifname),
arcname=os.path.join(fname, ifname))
else:
tar.add(os.path.join(wdir,fname),
arcname=fname)
tar.close()
# Return back to the original directory
os.chdir(sdir)
return True
if __name__ == "__main__":
req_id, wdir, dname = sys.argv[1:4]
evalue, contiglength, contigcoverage, hitlength = sys.argv[4:8]
multitreshold, non, numN, pcr, inner, blastn = sys.argv[8:14]
threads, optsize, minsize, maxsize, opttemp, mintemp = sys.argv[14:20]
maxtemp, flanksize, minprod, maxprod, optgc, mingc, maxgc = sys.argv[20:27]
gcclamp, exclude, jobname = sys.argv[27:30]
genomes = sys.argv[30:]
evalue = float(evalue)
contiglength = int(contiglength)
contigcoverage = float(contigcoverage)
hitlength = int(hitlength)
multitreshold = float(multitreshold)
non = non == 'True'
numN = int(numN)
pcr = pcr == 'True'
inner = inner == 'True'
blastn = blastn == 'True'
threads = int(threads)
optsize = int(optsize)
minsize = int(minsize)
maxsize = int(maxsize)
opttemp = float(opttemp)
mintemp = float(mintemp)
maxtemp = float(maxtemp)
flanksize = int(flanksize)
minprod = int(minprod)
maxprod = int(maxprod)
optgc = float(optgc)
mingc = float(mingc)
maxgc = float(maxgc)
gcclamp = float(gcclamp)
exclude = int(exclude)
update_job(req_id, 'status', 'Job starting')
print(req_id, wdir, dname, genomes,
evalue, contiglength, contigcoverage, hitlength,
multitreshold, non, numN, pcr, inner, blastn,
threads, optsize, minsize, maxsize, opttemp, mintemp,
maxtemp, flanksize, minprod, maxprod, optgc, mingc, maxgc,
gcclamp, exclude, jobname)
try:
result = run_contiguator(req_id, wdir, dname, genomes,
evalue, contiglength, contigcoverage, hitlength,
multitreshold, non, numN, pcr, inner, blastn,
threads, optsize, minsize, maxsize, opttemp, mintemp,
maxtemp, flanksize, minprod, maxprod, optgc, mingc, maxgc,
gcclamp, exclude, jobname)
json.dump(result, open(os.path.join(wdir, 'result.json'), 'w'))
update_job(req_id, 'status', 'Job done')
except Exception as e:
update_job(req_id, 'status', 'Job failed')
update_job(req_id, 'error', str(e))
| combogenomics/contiguator-webapp | tasks.py | Python | mit | 11,579 | [
"BLAST"
] | af173c49af3250239bc2f62b95b80b32d1c765e632862c9d3573bee66daca018 |
"""
An object to register callbacks and dispatch event wiring mouse clicks
on a scene to picking.
"""
# ETS imports
from traits.api import HasTraits, Dict, Instance, \
Enum, Int, Callable, on_trait_change, List, Tuple, WeakRef
from mayavi.core.scene import Scene
from tvtk.api import tvtk
VTK_VERSION = tvtk.Version().vtk_major_version \
+ .1*tvtk.Version().vtk_minor_version
################################################################################
# class `MousePickDispatcher`
################################################################################
class MousePickDispatcher(HasTraits):
""" An event dispatcher to send pick event on mouse clicks.
This objects wires VTK observers so that picking callbacks
can be bound to mouse click without movement.
The object deals with adding and removing the VTK-level
callbacks.
"""
# The scene events are wired to.
scene = WeakRef(Scene)
# The list of callbacks, with the picker type they should be using,
# and the mouse button that triggers them.
callbacks = List(Tuple(
Callable,
Enum('cell', 'point', 'world'),
Enum('Left', 'Middle', 'Right'),
),
help="The list of callbacks, with the picker type they "
"should be using, and the mouse button that "
"triggers them. The callback is passed "
"as an argument the tvtk picker."
)
#--------------------------------------------------------------------------
# Private traits
#--------------------------------------------------------------------------
# Whether the mouse has moved after the button press
_mouse_no_mvt = Int
# The button that has been pressed
_current_button = Enum('Left', 'Middle', 'Right')
# The various picker that are used when the mouse is pressed
_active_pickers = Dict
# The VTK callback numbers corresponding to our callbacks
_picker_callback_nbs = Dict(value_trait=Int)
# The VTK callback numbers corresponding to mouse movement
_mouse_mvt_callback_nb = Int
# The VTK callback numbers corresponding to mouse press
_mouse_press_callback_nbs = Dict
# The VTK callback numbers corresponding to mouse release
_mouse_release_callback_nbs = Dict
#--------------------------------------------------------------------------
# Callbacks management
#--------------------------------------------------------------------------
@on_trait_change('callbacks_items')
def dispatch_callbacks_change(self, name, trait_list_event):
for item in trait_list_event.added:
self.callback_added(item)
for item in trait_list_event.removed:
self.callback_removed(item)
def callback_added(self, item):
""" Wire up the different VTK callbacks.
"""
callback, type, button = item
picker = getattr(self.scene.scene.picker, '%spicker' % type)
self._active_pickers[type] = picker
# Register the pick callback
if not type in self._picker_callback_nbs:
self._picker_callback_nbs[type] = \
picker.add_observer("EndPickEvent",
self.on_pick)
# Register the callbacks on the scene interactor
if VTK_VERSION>5:
move_event = "RenderEvent"
else:
move_event = 'MouseMoveEvent'
if not self._mouse_mvt_callback_nb:
self._mouse_mvt_callback_nb = \
self.scene.scene.interactor.add_observer(move_event,
self.on_mouse_move)
if not button in self._mouse_press_callback_nbs:
self._mouse_press_callback_nbs[button] = \
self.scene.scene.interactor.add_observer(
'%sButtonPressEvent' % button,
self.on_button_press)
if VTK_VERSION>5:
release_event = "EndInteractionEvent"
else:
release_event = '%sButtonReleaseEvent' % button
if not button in self._mouse_release_callback_nbs:
self._mouse_release_callback_nbs[button] = \
self.scene.scene.interactor.add_observer(
release_event,
self.on_button_release)
def callback_removed(self, item):
""" Clean up the unecessary VTK callbacks.
"""
callback, type, button = item
# If the picker is no longer needed, clean up its observers.
if not [t for c, t, b in self.callbacks if t == type]:
picker = self._active_pickers[type]
picker.remove_observer(self._picker_callback_nbs[type])
del self._active_pickers[type]
# If there are no longer callbacks on the button, clean up
# the corresponding observers.
if not [b for c, t, b in self.callbacks if b == button]:
self.scene.scene.interactor.remove_observer(
self._mouse_press_callback_nbs[button])
self.scene.scene.interactor.remove_observer(
self._mouse_release_callback_nbs[button])
if len(self.callbacks) == 0 and self._mouse_mvt_callback_nb:
self.scene.scene.interactor.remove_observer(
self._mouse_mvt_callback_nb)
self._mouse_mvt_callback_nb = 0
def clear_callbacks(self):
while self.callbacks:
self.callbacks.pop()
#--------------------------------------------------------------------------
# Mouse movement dispatch mechanism
#--------------------------------------------------------------------------
def on_button_press(self, vtk_picker, event):
self._current_button = event[:-len('ButtonPressEvent')]
self._mouse_no_mvt = 2
def on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def on_button_release(self, vtk_picker, event):
""" If the mouse has not moved, pick with our pickers.
"""
if self._mouse_no_mvt:
x, y = vtk_picker.GetEventPosition()
for picker in self._active_pickers.values():
try:
picker.pick((x, y, 0), self.scene.scene.renderer)
except TypeError:
picker.pick(x, y, 0, self.scene.scene.renderer)
self._mouse_no_mvt = 0
def on_pick(self, vtk_picker, event):
""" Dispatch the pick to the callback associated with the
corresponding mouse button.
"""
picker = tvtk.to_tvtk(vtk_picker)
for event_type, event_picker in self._active_pickers.iteritems():
if picker is event_picker:
for callback, type, button in self.callbacks:
if ( type == event_type
and button == self._current_button):
callback(picker)
break
#--------------------------------------------------------------------------
# Private methods
#--------------------------------------------------------------------------
def __del__(self):
self.clear_callbacks()
| liulion/mayavi | mayavi/core/mouse_pick_dispatcher.py | Python | bsd-3-clause | 7,481 | [
"Mayavi",
"VTK"
] | 70b965c49f8f837e497a8af215cea30274ac943f9f6aaded4a05ed432f297867 |
import sys
import os
import pysam
import difflib
import gzip
import inspect
import tempfile
IS_PYTHON3 = sys.version_info[0] >= 3
if IS_PYTHON3:
from itertools import zip_longest
from urllib.request import urlopen
else:
from itertools import izip as zip_longest
from urllib2 import urlopen
if IS_PYTHON3:
def force_str(s):
try:
return s.decode('ascii')
except AttributeError:
return s
def force_bytes(s):
try:
return s.encode('ascii')
except AttributeError:
return s
else:
def force_str(s):
return s
def force_bytes(s):
return s
def openfile(fn):
if fn.endswith(".gz"):
try:
return gzip.open(fn, "rt", encoding="utf-8")
except TypeError:
return gzip.open(fn, "r")
else:
return open(fn)
def checkBinaryEqual(filename1, filename2):
'''return true if the two files are binary equal.
'''
if os.path.getsize(filename1) != os.path.getsize(filename2):
return False
infile1 = open(filename1, "rb")
infile2 = open(filename2, "rb")
def chariter(infile):
while 1:
c = infile.read(1)
if c == b"":
break
yield c
found = False
for c1, c2 in zip_longest(chariter(infile1), chariter(infile2)):
if c1 != c2:
break
else:
found = True
infile1.close()
infile2.close()
return found
def check_samtools_view_equal(
filename1, filename2,
without_header=False):
'''return true if the two files are equal in their
content through samtools view.
'''
# strip MD and NM tags, as not preserved in CRAM files
args = ["-x", "MD", "-x", "NM"]
if not without_header:
args.append("-h")
lines1 = pysam.samtools.view(*(args + [filename1]))
lines2 = pysam.samtools.view(*(args + [filename2]))
if len(lines1) != len(lines2):
return False
if lines1 != lines2:
# line by line comparison
# sort each line, as tags get rearranged between
# BAM/CRAM
for n, pair in enumerate(zip(lines1, lines2)):
l1, l2 = pair
l1 = sorted(l1[:-1].split("\t"))
l2 = sorted(l2[:-1].split("\t"))
if l1 != l2:
print ("mismatch in line %i" % n)
print (l1)
print (l2)
return False
else:
return False
return True
def checkURL(url):
'''return True if URL is available.
A URL might not be available if it is the wrong URL
or there is no connection to the URL.
'''
try:
urlopen(url, timeout=1)
return True
except:
return False
def checkFieldEqual(cls, read1, read2, exclude=[]):
'''check if two reads are equal by comparing each field.'''
# add the . for refactoring purposes.
for x in (".query_name",
".query_sequence",
".flag",
".reference_id",
".reference_start",
".mapping_quality",
".cigartuples",
".next_reference_id",
".next_reference_start",
".template_length",
".query_length",
".query_qualities",
".bin",
".is_paired", ".is_proper_pair",
".is_unmapped", ".mate_is_unmapped",
".is_reverse", ".mate_is_reverse",
".is_read1", ".is_read2",
".is_secondary", ".is_qcfail",
".is_duplicate"):
n = x[1:]
if n in exclude:
continue
cls.assertEqual(getattr(read1, n), getattr(read2, n),
"attribute mismatch for %s: %s != %s" %
(n, getattr(read1, n), getattr(read2, n)))
def check_lines_equal(cls, a, b, sort=False, filter_f=None, msg=None):
"""check if contents of two files are equal comparing line-wise.
sort: bool
sort contents of both files before comparing.
filter_f:
remover lines in both a and b where expression is True
"""
aa = openfile(a).readlines()
bb = openfile(b).readlines()
if filter_f is not None:
aa = [x for x in aa if not filter_f(x)]
bb = [x for x in bb if not filter_f(x)]
if sort:
cls.assertEqual(sorted(aa), sorted(bb), msg)
else:
cls.assertEqual(aa, bb, msg)
def get_temp_filename(suffix=""):
caller_name = inspect.getouterframes(inspect.currentframe(), 2)[1][3]
f = tempfile.NamedTemporaryFile(
prefix="tmp_{}_".format(caller_name),
suffix=suffix,
delete=False,
dir=".")
f.close()
return f.name
| bioinformed/pysam | tests/TestUtils.py | Python | mit | 4,775 | [
"pysam"
] | cb510ec94896daa01e939c5062f7dff0b570abb30a4319705e88843e1775c9cf |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Train ds2-style speech model on Librispeech
"""
import os
import numpy as np
from neon.backends import gen_backend
from neon.callbacks.callbacks import Callbacks
from neon.initializers import GlorotUniform, Constant, Gaussian
from neon.layers import Conv, GeneralizedCost, Affine, DeepBiRNN
from neon.models import Model
from neon.transforms import Rectlin, Identity, Rectlinclip
from neon.optimizers import GradientDescentMomentum
from neon.util.argparser import NeonArgparser, extract_valid_args
from ctc import CTC
from decoder import ArgMaxDecoder
from sample_proposals_callback import WordErrorRateCallback
from data.dataloader import make_loader
# Parse the command line arguments
arg_defaults = {'batch_size': 32}
parser = NeonArgparser(__doc__, default_overrides=arg_defaults)
parser.add_argument('--nfilters', type=int,
help='no. of conv filters', default=1152)
parser.add_argument('--filter_width', type=int,
help='width of conv filter', default=11)
parser.add_argument('--str_w', type=int, help='stride in time', default=3)
parser.add_argument('--depth', type=int, help='rnn depth', default=9)
parser.add_argument('--hidden_size', type=int,
help='affine/rnn hidden units', default=1152)
parser.add_argument('--lr', type=float,
help='learning rate', default=2e-5)
parser.add_argument('--momentum', type=float,
help='momentum', default=0.99)
args = parser.parse_args()
# Setup model hyperparameters
# Convolution layer hyperparameters
nfilters = args.nfilters # Number of convolutional filters
filter_width = args.filter_width # Width of convolutional filters
str_w = args.str_w # Convolutional filter stride
# RNN hyperparameters
depth = args.depth # Number of BiRNN layers
hidden_size = args.hidden_size # Number of units in each BiRNN layer
# Optimization hyperparameters
learning_rate = args.lr
momentum = args.momentum
gradient_clip_norm = 400
# Setup parameters for argmax decoder
alphabet = "_'ABCDEFGHIJKLMNOPQRSTUVWXYZ "
nout = len(alphabet)
argmax_decoder = ArgMaxDecoder(alphabet, space_index=alphabet.index(" "))
# Initialize our backend
be = gen_backend(**extract_valid_args(args, gen_backend))
# Setup dataloader
nbands = 13
max_tscrpt_len = 1300
max_utt_len = 30
train_manifest = args.manifest['train']
if not os.path.exists(train_manifest):
raise RuntimeError(
"training manifest file {} not found".format(train_manifest))
dev_manifest = args.manifest['val']
if not os.path.exists(dev_manifest):
raise RuntimeError(
"validation manifest file {} not found".format(dev_manifest))
train = make_loader(train_manifest, alphabet, nbands, max_tscrpt_len, max_utt_len, backend_obj=be)
dev = make_loader(dev_manifest, alphabet, nbands, max_tscrpt_len, max_utt_len, backend_obj=be)
# Setup the layers of the DNN
# Softmax is performed in warp-ctc, so we use an Identity activation in the
# final layer.
gauss = Gaussian(scale=0.01)
glorot = GlorotUniform()
layers = [
Conv(
(nbands,
filter_width,
nfilters),
init=gauss,
bias=Constant(0),
activation=Rectlin(),
padding=dict(
pad_h=0,
pad_w=5),
strides=dict(
str_h=1,
str_w=str_w)),
DeepBiRNN(
hidden_size,
init=glorot,
activation=Rectlinclip(),
batch_norm=True,
reset_cells=True,
depth=depth),
Affine(
hidden_size,
init=glorot,
activation=Rectlinclip()),
Affine(
nout=nout,
init=glorot,
activation=Identity())]
model = Model(layers=layers)
opt = GradientDescentMomentum(learning_rate, momentum,
gradient_clip_norm=gradient_clip_norm,
stochastic_round=False,
nesterov=True)
callbacks = Callbacks(model, eval_set=dev, **args.callback_args)
# Print validation set word error rate at the end of every epoch
pcb = WordErrorRateCallback(dev, argmax_decoder, max_tscrpt_len, epoch_freq=1)
callbacks.add_callback(pcb)
cost = GeneralizedCost(costfunc=CTC(max_tscrpt_len, nout=nout))
# Fit the model
model.fit(train, optimizer=opt, num_epochs=args.epochs,
cost=cost, callbacks=callbacks)
| NervanaSystems/deepspeech | speech/train.py | Python | apache-2.0 | 5,087 | [
"Gaussian"
] | 04a05bd509c83dcfc66c271e8c7bd923f0c102d325e3cba6507cd5374aabade6 |
# Copyright (C) 2013-2019 2ndQuadrant Ltd
#
# Client Utilities for Barman, Backup and Recovery Manager for PostgreSQL
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import imp
import random
import re
import subprocess
import sys
import tarfile
from contextlib import closing
from io import BytesIO
import mock
import pytest
sys.dont_write_bytecode = True
try:
# Execution from the project root
bwa = imp.load_source('bwa', 'barman-wal-archive')
except IOError:
# Execution from the 'tests' directory
bwa = imp.load_source('bwa', '../barman-wal-archive')
def pipe_helper():
"""
Create two BytesIO objects (input_mock, output_mock) to simulate a pipe.
When the input_mock is closed, the content is copied in output_mock,
ready to be used.
:rtype: tuple[BytesIO, BytesIO]
"""
input_mock = BytesIO()
output_mock = BytesIO()
# Save the content of input_mock into the output_mock before closing it
def save_before_close(orig_close=input_mock.close):
output_mock.write(input_mock.getvalue())
output_mock.seek(0)
orig_close()
input_mock.close = save_before_close
return input_mock, output_mock
# noinspection PyMethodMayBeStatic
class TestMain(object):
@mock.patch('bwa.subprocess.Popen')
def test_ok(self, popen_mock, tmpdir):
# Prepare some content
source = tmpdir.join('wal_dir/000000080000ABFF000000C1')
source.write('something', ensure=True)
source_hash = source.computehash()
# Prepare the fake Pipe
input_mock, output_mock = pipe_helper()
popen_mock.return_value.stdin = input_mock
popen_mock.return_value.returncode = 0
bwa.main(['-c', '/etc/bwa.conf', '-U', 'user', 'a.host', 'a-server',
source.strpath])
popen_mock.assert_called_once_with(
['ssh', 'user@a.host',
'barman', "--config='/etc/bwa.conf'", 'put-wal', 'a-server'],
stdin=subprocess.PIPE)
# Verify the tar content
tar = tarfile.open(mode='r|', fileobj=output_mock)
first = tar.next()
with closing(tar.extractfile(first)) as fp:
first_content = fp.read().decode()
assert first.name == '000000080000ABFF000000C1'
assert first_content == 'something'
second = tar.next()
with closing(tar.extractfile(second)) as fp:
second_content = fp.read().decode()
assert second.name == 'MD5SUMS'
assert second_content == \
'%s *000000080000ABFF000000C1\n' % source_hash
assert tar.next() is None
@mock.patch('bwa.RemotePutWal')
def test_error_dir(self, rpw_mock, tmpdir, capsys):
with pytest.raises(SystemExit) as exc:
bwa.main(['a.host', 'a-server', tmpdir.strpath])
assert exc.value.code == 2
assert not rpw_mock.called
out, err = capsys.readouterr()
assert not out
assert 'WAL_PATH cannot be a directory' in err
@mock.patch('bwa.RemotePutWal')
def test_error_io(self, rpw_mock, tmpdir, capsys):
# Prepare some content
source = tmpdir.join('wal_dir/000000080000ABFF000000C1')
source.write('something', ensure=True)
rpw_mock.side_effect = EnvironmentError
with pytest.raises(SystemExit) as exc:
bwa.main(['a.host', 'a-server', source.strpath])
assert exc.value.code == 2
out, err = capsys.readouterr()
assert not out
assert 'Error executing ssh' in err
@mock.patch('bwa.RemotePutWal')
def test_error_ssh(self, rpw_mock, tmpdir, capsys):
# Prepare some content
source = tmpdir.join('wal_dir/000000080000ABFF000000C1')
source.write('something', ensure=True)
rpw_mock.return_value.returncode = 255
with pytest.raises(SystemExit) as exc:
bwa.main(['a.host', 'a-server', source.strpath])
assert exc.value.code == 3
out, err = capsys.readouterr()
assert not out
assert 'Connection problem with ssh' in err
@mock.patch('bwa.RemotePutWal')
def test_error_barman(self, rpw_mock, tmpdir, capsys):
# Prepare some content
source = tmpdir.join('wal_dir/000000080000ABFF000000C1')
source.write('something', ensure=True)
rpw_mock.return_value.returncode = 1
with pytest.raises(SystemExit) as exc:
bwa.main(['a.host', 'a-server', source.strpath])
assert exc.value.code == 1
out, err = capsys.readouterr()
assert not out
assert "Remote 'barman put-wal' command has failed" in err
# noinspection PyMethodMayBeStatic
class TestRemotePutWal(object):
@mock.patch('bwa.subprocess.Popen')
def test_str_source_file(self, popen_mock, tmpdir):
input_mock, output_mock = pipe_helper()
popen_mock.return_value.stdin = input_mock
popen_mock.return_value.returncode = 0
config = mock.Mock(
user='barman',
barman_host='remote.barman.host',
config=None,
server_name='this-server')
source_file = tmpdir.join('test-source/000000010000000000000001')
source_file.write("test-content", ensure=True)
source_path = source_file.strpath
# In python2 the source_path can be an unicode object
if hasattr(source_path, 'decode'):
source_path = source_path.decode()
rpw = bwa.RemotePutWal(config, source_path)
popen_mock.assert_called_once_with(
['ssh', 'barman@remote.barman.host',
'barman', 'put-wal', 'this-server'], stdin=subprocess.PIPE)
assert rpw.returncode == 0
tar = tarfile.open(mode='r|', fileobj=output_mock)
first = tar.next()
with closing(tar.extractfile(first)) as fp:
first_content = fp.read().decode()
assert first.name == '000000010000000000000001'
assert first_content == 'test-content'
second = tar.next()
with closing(tar.extractfile(second)) as fp:
second_content = fp.read().decode()
assert second.name == 'MD5SUMS'
assert second_content == \
'%s *000000010000000000000001\n' % source_file.computehash('md5')
assert tar.next() is None
@mock.patch('bwa.subprocess.Popen')
def test_error(self, popen_mock, tmpdir):
input_mock = BytesIO()
popen_mock.return_value.stdin = input_mock
config = mock.Mock(
user='barman',
barman_host='remote.barman.host',
config=None,
server_name='this-server')
source_file = tmpdir.join('test-source/000000010000000000000001')
source_file.write("test-content", ensure=True)
source_path = source_file.strpath
# Simulate a remote failure
popen_mock.return_value.returncode = 5
# In python2 the source_path can be an unicode object
if hasattr(source_path, 'decode'):
source_path = source_path.decode()
rwa = bwa.RemotePutWal(config, source_path)
popen_mock.assert_called_once_with(
['ssh', 'barman@remote.barman.host',
'barman', 'put-wal', 'this-server'], stdin=subprocess.PIPE)
assert rwa.returncode == 5
# noinspection PyMethodMayBeStatic
class TestChecksumTarFile(object):
def test_tar(self, tmpdir):
# Prepare some content
source = tmpdir.join('source.file')
source.write('something', ensure=True)
source.setmtime(source.mtime() - 100) # Set mtime to 100 seconds ago
source_hash = source.computehash()
# Write the content in a tar file
storage = tmpdir.join('storage.tar')
with closing(bwa.ChecksumTarFile.open(
storage.strpath, mode='w:')) as tar:
tar.add(source.strpath, source.basename)
checksum = tar.members[0].data_checksum
assert checksum == source_hash
# Double close should not give any issue
tar.close()
lab = tmpdir.join('lab').ensure(dir=True)
tar = tarfile.open(storage.strpath, mode='r:')
tar.extractall(lab.strpath)
tar.close()
dest_file = lab.join(source.basename)
sum_file = lab.join('MD5SUMS')
sums = {}
for line in sum_file.readlines():
checksum, name = re.split(r' [* ]', line.rstrip(), 1)
sums[name] = checksum
assert list(sums.keys()) == [source.basename]
assert sums[source.basename] == source_hash
assert dest_file.computehash() == source_hash
# Verify file mtime
# Use a round(2) comparison because float is not precise in Python 2.x
assert round(dest_file.mtime(), 2) == round(source.mtime(), 2)
@pytest.mark.parametrize(['size', 'mode'],
[
[0, 0],
[10, None],
[10, 0],
[10, 1],
[10, -5],
[16 * 1024, 0],
[32 * 1024 - 1, -1],
[32 * 1024 - 1, 0],
[32 * 1024 - 1, 1],
])
def test_md5copyfileobj(self, size, mode):
"""
Test md5copyfileobj different size.
If mode is None, copy the whole data.
If mode is <= 0, copy the data passing the exact length.
If mode is > 0, require more bytes than available, raising an error
:param int size: The size of random data to use for the test
:param int|None mode: the mode of operation, see above description
"""
src = BytesIO()
dst = BytesIO()
# Generate `size` random bytes
src_string = bytearray(random.getrandbits(8) for _ in range(size))
src.write(src_string)
src.seek(0)
if mode and mode > 0:
# Require more bytes thant available. Make sure to get an exception
with pytest.raises(IOError):
bwa.md5copyfileobj(src, dst, size + mode)
else:
if mode is None:
# Copy the whole file until the end
md5 = bwa.md5copyfileobj(src, dst)
else:
# Copy only a portion of the file
md5 = bwa.md5copyfileobj(src, dst, size + mode)
src_string = src_string[0:size + mode]
# Validate the content and the checksum
assert dst.getvalue() == src_string
assert md5 == hashlib.md5(bytes(src_string)).hexdigest()
| 2ndquadrant-it/barman-cli | tests/test_barman_wal_archive.py | Python | gpl-3.0 | 11,295 | [
"BWA"
] | c138a2758c9144713097792b8c86426ece30840b221770f96a2ed7d7c62cdd8f |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hassourcecountbase import HasSourceCountBase
#-------------------------------------------------------------------------
# "Families having sources"
#-------------------------------------------------------------------------
class HasSourceCount(HasSourceCountBase):
"""Families with sources"""
name = _('Families with <count> sources')
description = _("Matches families with a certain number of sources connected to it")
| pmghalvorsen/gramps_branch | gramps/gen/filters/rules/family/_hassourcecount.py | Python | gpl-2.0 | 1,808 | [
"Brian"
] | c2537af08d11fac59c433f7a6a7dc3e5b8ab0786a325aa0cfd484a46f5d08ae8 |
# Look at sdf_example.py in the examples/ directory for more detail
from sdfpy import load_sdf
from thingking import loadtxt
import vtk
import numpy as np
def dist(r1, r2):
d = r2-r1
return np.sqrt(np.dot(d,d))
def glyphMapper(pts, sclrs):
points = vtk.vtkPoints()
for pt in pts:
points.InsertNextPoint(pt)
scalars = vtk.vtkFloatArray()
for s in sclrs:
scalars.InsertNextTuple1(s)
poly_data = vtk.vtkPolyData()
poly_data.SetPoints(points)
poly_data.GetPointData().SetScalars(scalars)
glyph_filter = vtk.vtkVertexGlyphFilter()
glyph_filter.SetInput(poly_data)
mapper = vtk.vtkDataSetMapper()
mapper.SetInput(glyph_filter.GetOutput())
mapper.SetScalarRange(scalars.GetRange())
return mapper
class vtkTimerCallback:
def __init__(self, actor, mappers):
self.t = 0
self.actor = actor
self.mappers = mappers
def execute(self, obj, event):
print("Executing callback timer: %d" % self.t)
self.t = (self.t + 1) % len(self.mappers)
self.actor.SetMapper(self.mappers[self.t])
obj.GetRenderWindow().Render()
def visualizeDatasets(particle_datasets,
target_particle_ids,
halo_center,
halo_velocity):
positions = []
momenta = []
mappers = []
for pds in particle_datasets:
print("Starting a new dataset")
px, py, pz = pds['x'], pds['y'], pds['z']
pvx, pvy, pvz = pds['vx'], pds['vy'], pds['vz']
print("Computing the displacements")
poss = [ np.array([px[i], py[i], pz[i]]) - halo_center for i in target_particle_ids ]
print("Computing the angular momenta")
moms = [ np.linalg.norm(np.cross(
np.array([px[i], py[i], pz[i]]) - halo_center,
np.array([pvx[i], pvy[i], pvz[i]]) - halo_velocity
)) for i in target_particle_ids ]
print("Done!")
positions.append(poss)
momenta.append(moms)
mappers.append(glyphMapper(poss, moms))
actor = vtk.vtkActor()
actor.SetMapper(mappers[0])
ren = vtk.vtkRenderer()
ren.AddActor(actor)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.Render()
iren.Initialize()
cb = vtkTimerCallback(actor, mappers)
iren.AddObserver("TimerEvent", cb.execute)
timerId = iren.CreateRepeatingTimer(1000)
iren.Start()
def main():
particle_dataset_filenames = [ "../data/ds14_scivis_0128_e4_dt04_1.0000"
, "../data/ds14_scivis_0128_e4_dt04_0.9900"
, "../data/ds14_scivis_0128_e4_dt04_0.9800"
, "../data/ds14_scivis_0128_e4_dt04_0.9700"
]
particle_datasets = [ load_sdf(filename) for filename in particle_dataset_filenames ]
scale, id, desc_scale, desc_id, num_prog, pid, upid, desc_pid, phantom, \
sam_mvir, mvir, rvir, rs, vrms, mmp, scale_of_last_MM, vmax, hx, hy, hz, \
hvx, hvy, hvz, Jx, Jy, Jz, Spin, Breadth_first_ID, Depth_first_ID, \
Tree_root_ID, Orig_halo_ID, Snap_num, Next_coprogenitor_depthfirst_ID, \
Last_progenitor_depthfirst_ID, Rs_Klypin, M_all, M200b, M200c, M500c, \
M2500c, Xoff, Voff, Spin_Bullock, b_to_a, c_to_a, A_x, A_y, A_z, \
b_to_a_500c, c_to_a_500c, A_x_500c, A_y_500c, A_z_500c, T_over_U, \
M_pe_Behroozi, M_pe_Diemer, Macc, Mpeak, Vacc, Vpeak, Halfmass_Scale, \
Acc_Rate_Inst, Acc_Rate_100Myr, Acc_Rate_Tdyn = \
loadtxt("../data/rockstar/hlists/hlist_1.00000.list", unpack=True)
px, py, pz = particle_datasets[0]['x'], particle_datasets[0]['y'], particle_datasets[0]['z']
pvx, pvy, pvz = particle_datasets[0]['vx'], particle_datasets[0]['vy'], particle_datasets[0]['vz']
n_particles = len(px)
particle_pts = [ np.array([px[i], py[i], pz[i]]) for i in range(n_particles) ]
# Now we want to convert the proper kpc of the particle position to comoving
# Mpc/h, a common unit used in computational cosmology in general, but
# specifically is used as the output unit in the merger tree halo list loaded
# in above. First we get the Hubble parameter, here stored as 'h_100' in the
# SDF parameters. Then we load the simulation width, L0, which is also in
# proper kpc. Finally we load the scale factor, a, which for this particular
# snapshot is equal to 1 since we are loading the final snapshot from the
# simulation.
h_100 = particle_datasets[0].parameters['h_100']
width = particle_datasets[0].parameters['L0']
cosmo_a = particle_datasets[0].parameters['a']
kpc_to_Mpc = 1./1000
# x' = (x + 0.5*width) * h_100 * kpc_to_Mpc / cosmo_a
# x = x' * cosmo_a / (h_100*kpc_to_Mpc) - 0.5*width
rockstar_scale = cosmo_a / (h_100 * kpc_to_Mpc)
rockstar_to_particle = lambda r: r*rockstar_scale - 0.5*width
n_halos = len(hx)
halo_pts = [ rockstar_to_particle(np.array([hx[i], hy[i], hz[i]])) for i in range(n_halos) ]
halo_radii = [ rvir[i]/rs[i] * rockstar_scale for i in range(n_halos) ]
#halo_radii = [ rvir[i] * rockstar_scale for i in range(n_halos) ]
myhalo_center = halo_pts[0]
myhalo_radius = halo_radii[0]
myhalo_velocity = np.array([hvx[0], hvy[0], hvz[0]])
print("Starting the particle scan...")
myhalo_particles = [ i for i in range(n_particles) if dist(particle_pts[i], myhalo_center) < myhalo_radius ]
print("Done!")
visualizeDatasets(particle_datasets, myhalo_particles, myhalo_center, myhalo_velocity)
if __name__ == "__main__":
main()
| ryanpbrewster/SciVis-2015 | src/animateHalo.py | Python | mit | 5,758 | [
"VTK"
] | d70f716febf0aee4f630334b1008015bb1de0d2e65dc4b8e5a00d25fc6ff1ceb |
import shutil
import os
import mdtraj as md
from openmoltools.utils import import_, enter_temp_directory, run_antechamber, create_ffxml_file
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format="LOG: %(message)s")
# Note: We recommend having every function return *copies* of input, to avoid headaches associated with in-place changes
def get_charges(molecule, max_confs=800, strictStereo=True, keep_confs=None):
"""Generate charges for an OpenEye OEMol molecule.
Parameters
----------
molecule : OEMol
Molecule for which to generate conformers.
Omega will be used to generate max_confs conformations.
max_confs : int, optional, default=800
Max number of conformers to generate
strictStereo : bool, optional, default=True
If False, permits smiles strings with unspecified stereochemistry.
See https://docs.eyesopen.com/omega/usage.html
keep_confs : int, optional, default=None
If not None, only keep this many conformations in the final
charged OEMol. Multiple conformations are still used to *determine*
the charges. For example, keep_confs=1 will return an OEMol with
just a single conformation, while keep_confs=None returns all the
conformations generated by Omega.
Returns
-------
charged_copy : OEMol
A molecule with OpenEye's recommended AM1BCC charge selection scheme.
Notes
-----
Roughly follows
http://docs.eyesopen.com/toolkits/cookbook/python/modeling/am1-bcc.html
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
oequacpac = import_("openeye.oequacpac")
if not oequacpac.OEQuacPacIsLicensed(): raise(ImportError("Need License for oequacpac!"))
molecule = normalize_molecule(molecule)
charged_copy = generate_conformers(molecule, max_confs=max_confs, strictStereo=strictStereo) # Generate up to max_confs conformers
status = oequacpac.OEAssignPartialCharges(charged_copy, oequacpac.OECharges_AM1BCCSym) # AM1BCCSym recommended by Chris Bayly to KAB+JDC, Oct. 20 2014.
if not status:
raise(RuntimeError("OEAssignPartialCharges returned error code %d" % status))
for k, conf in enumerate(charged_copy.GetConfs()):
if keep_confs is not None and k > keep_confs - 1:
charged_copy.DeleteConf(conf)
return charged_copy
def normalize_molecule(molecule):
"""Normalize a copy of the molecule by checking aromaticity, adding explicit hydrogens, and renaming by IUPAC name.
Parameters
----------
molecule : OEMol
the molecule to be normalized.
Returns
-------
molcopy : OEMol
A (copied) version of the normalized molecule
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
oeiupac = import_("openeye.oeiupac")
if not oeiupac.OEIUPACIsLicensed(): raise(ImportError("Need License for OEOmega!"))
molcopy = oechem.OEMol(molecule)
# Assign aromaticity.
oechem.OEAssignAromaticFlags(molcopy, oechem.OEAroModelOpenEye)
# Add hydrogens.
oechem.OEAddExplicitHydrogens(molcopy)
# Set title to IUPAC name.
name = oeiupac.OECreateIUPACName(molcopy)
molcopy.SetTitle(name)
# Check for any missing atom names, if found reassign all of them.
if any([atom.GetName() == '' for atom in molcopy.GetAtoms()]):
oechem.OETriposAtomNames(molcopy)
return molcopy
def iupac_to_oemol(iupac_name):
"""Create a OEMolBuilder from a iupac name.
Parameters
----------
iupac_name : str
IUPAC name of desired molecule.
Returns
-------
molecule : OEMol
A normalized molecule with desired iupac name
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
oeiupac = import_("openeye.oeiupac")
if not oeiupac.OEIUPACIsLicensed(): raise(ImportError("Need License for OEOmega!"))
# Create an OEMol molecule from IUPAC name.
molecule = oechem.OEMol() # create a molecule
# Populate the MoleCule from the IUPAC name
if not oeiupac.OEParseIUPACName(molecule, iupac_name):
raise ValueError("The supplied IUPAC name '%s' could not be parsed." % iupac_name)
molecule = normalize_molecule(molecule)
return molecule
def smiles_to_oemol(smiles):
"""Create a OEMolBuilder from a smiles string.
Parameters
----------
smiles : str
SMILES representation of desired molecule.
Returns
-------
molecule : OEMol
A normalized molecule with desired smiles string.
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
molecule = oechem.OEMol()
if not oechem.OEParseSmiles(molecule, smiles):
raise ValueError("The supplied SMILES '%s' could not be parsed." % smiles)
normalize_molecule(molecule)
return molecule
def generate_conformers(molecule, max_confs=800, strictStereo=True, ewindow=15.0, rms_threshold=1.0, strictTypes = True):
"""Generate conformations for the supplied molecule
Parameters
----------
molecule : OEMol
Molecule for which to generate conformers
max_confs : int, optional, default=800
Max number of conformers to generate. If None, use default OE Value.
strictStereo : bool, optional, default=True
If False, permits smiles strings with unspecified stereochemistry.
strictTypes : bool, optional, default=True
If True, requires that Omega have exact MMFF types for atoms in molecule; otherwise, allows the closest atom type of the same element to be used.
Returns
-------
molcopy : OEMol
A multi-conformer molecule with up to max_confs conformers.
Notes
-----
Roughly follows
http://docs.eyesopen.com/toolkits/cookbook/python/modeling/am1-bcc.html
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
oeomega = import_("openeye.oeomega")
if not oeomega.OEOmegaIsLicensed(): raise(ImportError("Need License for OEOmega!"))
molcopy = oechem.OEMol(molecule)
omega = oeomega.OEOmega()
# These parameters were chosen to match http://docs.eyesopen.com/toolkits/cookbook/python/modeling/am1-bcc.html
omega.SetMaxConfs(max_confs)
omega.SetIncludeInput(True)
omega.SetCanonOrder(False)
omega.SetSampleHydrogens(True) # Word to the wise: skipping this step can lead to significantly different charges!
omega.SetEnergyWindow(ewindow)
omega.SetRMSThreshold(rms_threshold) # Word to the wise: skipping this step can lead to significantly different charges!
omega.SetStrictStereo(strictStereo)
omega.SetStrictAtomTypes(strictTypes)
omega.SetIncludeInput(False) # don't include input
if max_confs is not None:
omega.SetMaxConfs(max_confs)
status = omega(molcopy) # generate conformation
if not status:
raise(RuntimeError("omega returned error code %d" % status))
return molcopy
def get_names_to_charges(molecule):
"""Return a dictionary of atom names and partial charges, as well as a string representation.
Parameters
----------
molecule : OEMol
Molecule for which to grab charges
Returns
-------
data : dictionary
A dictinoary whose (key, val) pairs are the atom names and partial
charges, respectively.
molrepr : str
A string representation of data
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for oechem!"))
molcopy = oechem.OEMol(molecule)
molrepr = ""
data = {}
for atom in molcopy.GetAtoms():
name = atom.GetName()
charge = atom.GetPartialCharge()
data[name] = charge
molrepr += "%s %f \n" % (name, charge)
return data, molrepr
def molecule_to_mol2(molecule, tripos_mol2_filename=None, conformer=0, residue_name="MOL"):
"""Convert OE molecule to tripos mol2 file.
Parameters
----------
molecule : openeye.oechem.OEGraphMol
The molecule to be converted.
tripos_mol2_filename : str, optional, default=None
Output filename. If None, will create a filename similar to
name.tripos.mol2, where name is the name of the OE molecule.
conformer : int, optional, default=0
Save this frame
residue_name : str, optional, default="MOL"
OpenEye writes mol2 files with <0> as the residue / ligand name.
This chokes many mol2 parsers, so we replace it with a string of
your choosing.
Returns
-------
tripos_mol2_filename : str
Filename of output tripos mol2 file
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for oechem!"))
# Get molecule name.
molecule_name = molecule.GetTitle()
logger.debug(molecule_name)
# Write molecule as Tripos mol2.
if tripos_mol2_filename is None:
tripos_mol2_filename = molecule_name + '.tripos.mol2'
ofs = oechem.oemolostream(tripos_mol2_filename)
ofs.SetFormat(oechem.OEFormat_MOL2H)
for k, mol in enumerate(molecule.GetConfs()):
if k == conformer:
oechem.OEWriteMolecule(ofs, mol)
ofs.close()
# Replace <0> substructure names with valid text.
infile = open(tripos_mol2_filename, 'r')
lines = infile.readlines()
infile.close()
newlines = [line.replace('<0>', residue_name) for line in lines]
outfile = open(tripos_mol2_filename, 'w')
outfile.writelines(newlines)
outfile.close()
return molecule_name, tripos_mol2_filename
def oemols_to_ffxml(molecules, base_molecule_name="lig"):
"""Generate an OpenMM ffxml object and MDTraj trajectories from multiple OEMols
Parameters
----------
molecules : list(OEMole)
Molecules WITH CHARGES. Each can have multiple conformations.
WILL GIVE UNDEFINED RESULTS IF NOT CHARGED.
base_molecule_name : str, optional, default='lig'
Base name of molecule to use inside parameter files.
Returns
-------
trajectories : list(mdtraj.Trajectory)
List of MDTraj Trajectories for molecule. May contain multiple frames
ffxml : StringIO
StringIO representation of ffxml file.
Notes
-----
We allow multiple different molecules at once so that they can all be
included in a single ffxml file, which is currently the only recommended
way to simulate multiple GAFF molecules in a single simulation. For most
applications, you will have just a single molecule:
e.g. molecules = [my_oemol]
The resulting ffxml StringIO object can be directly input to OpenMM e.g.
`forcefield = app.ForceField(ffxml)`
This will generate a lot of temporary files, so you may want to use
utils.enter_temp_directory() to avoid clutter.
"""
all_trajectories = []
gaff_mol2_filenames = []
frcmod_filenames = []
print(os.getcwd())
for i, molecule in enumerate(molecules):
trajectories = []
for j in range(molecule.NumConfs()):
molecule_name = "%s-%d-%d" % (base_molecule_name, i, j)
mol2_filename = "./%s.mol2" % molecule_name
_unused = molecule_to_mol2(molecule, mol2_filename, conformer=j)
gaff_mol2_filename, frcmod_filename = run_antechamber(molecule_name, mol2_filename, charge_method=None) # It's redundant to run antechamber on each frame, fix me later.
traj = md.load(gaff_mol2_filename)
trajectories.append(traj)
if j == 0: # Only need 1 frame of forcefield files
gaff_mol2_filenames.append(gaff_mol2_filename)
frcmod_filenames.append(frcmod_filename)
# Create a trajectory with all frames of the current molecule
traj = trajectories[0].join(trajectories[1:])
all_trajectories.append(traj)
ffxml = create_ffxml_file(gaff_mol2_filenames, frcmod_filenames, override_mol2_residue_name=base_molecule_name)
return all_trajectories, ffxml
def smiles_to_antechamber(smiles_string, gaff_mol2_filename, frcmod_filename, residue_name="MOL", strictStereo=False):
"""Build a molecule from a smiles string and run antechamber,
generating GAFF mol2 and frcmod files from a smiles string. Charges
will be generated using the OpenEye QuacPac AM1-BCC implementation.
Parameters
----------
smiles_string : str
Smiles string of molecule to construct and charge
gaff_mol2_filename : str
Filename of mol2 file output of antechamber, with charges
created from openeye
frcmod_filename : str
Filename of frcmod file output of antechamber. Most likely
this file will be almost empty, at least for typical molecules.
residue_name : str, optional, default="MOL"
OpenEye writes mol2 files with <0> as the residue / ligand name.
This chokes many mol2 parsers, so we replace it with a string of
your choosing. This might be useful for downstream applications
if the residue names are required to be unique.
strictStereo : bool, optional, default=False
If False, permits smiles strings with unspecified stereochemistry.
See https://docs.eyesopen.com/omega/usage.html
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for oechem!"))
# Get the absolute path so we can find these filenames from inside a temporary directory.
gaff_mol2_filename = os.path.abspath(gaff_mol2_filename)
frcmod_filename = os.path.abspath(frcmod_filename)
m = smiles_to_oemol(smiles_string)
m = get_charges(m, strictStereo=strictStereo, keep_confs=1)
with enter_temp_directory(): # Avoid dumping 50 antechamber files in local directory.
_unused = molecule_to_mol2(m, "./tmp.mol2", residue_name=residue_name)
net_charge = oechem.OENetCharge(m)
tmp_gaff_mol2_filename, tmp_frcmod_filename = run_antechamber("tmp", "./tmp.mol2", charge_method=None, net_charge=net_charge) # USE OE AM1BCC charges!
shutil.copy(tmp_gaff_mol2_filename, gaff_mol2_filename)
shutil.copy(tmp_frcmod_filename, frcmod_filename)
| kyleabeauchamp/openmoltools | openmoltools/openeye.py | Python | gpl-2.0 | 14,737 | [
"MDTraj",
"OpenMM"
] | e0f20de4dc7b27a8fae71253500ada70c7f2f40c0cd93efffc9a76190c12d68f |
#
# ANNarchy - SimpleSTDP
#
# A simple model showing the STDP learning on a single neuron.
#
# Model adapted from Song, Miller and Abbott (2000) and Song and Abbott (2001)
#
# Code adapted from the Brian example: http://brian.readthedocs.org/en/1.4.1/examples-plasticity_STDP1.html
#
# authors: Helge Uelo Dinkelbach, Julien Vitay
#
from ANNarchy import *
# Parameters
F = 15.0 # Poisson distribution at 15 Hz
N = 1000 # 1000 Poisson inputs
gmax = 0.01 # Maximum weight
duration = 100000.0 # Simulation for 100 seconds
# Definition of the neuron
IF = Neuron(
parameters = """
tau_m = 10.0
tau_e = 5.0
vt = -54.0
vr = -60.0
El = -74.0
Ee = 0.0
""",
equations = """
tau_m * dv/dt = El - v + g_exc * (Ee - vr) : init = -60.0
tau_e * dg_exc/dt = - g_exc
""",
spike = """
v > vt
""",
reset = """
v = vr
"""
)
# Input population
Input = PoissonPopulation(name = 'Input', geometry=N, rates=F)
# Output neuron
Output = Population(name = 'Output', geometry=1, neuron=IF)
# Projection learned using STDP
proj = Projection(
pre = Input,
post = Output,
target = 'exc',
synapse = STDP(tau_plus=20.0, tau_minus=20.0, A_plus=0.01, A_minus=0.0105, w_max=0.01)
)
proj.connect_all_to_all(weights=Uniform(0.0, gmax))
# Compile the network
compile()
# Start recording
Mi = Monitor(Input, 'spike')
Mo = Monitor(Output, 'spike')
# Start the simulation
print('Start the simulation')
simulate(duration, measure_time=True)
# Retrieve the recordings
input_spikes = Mi.get('spike')
output_spikes = Mo.get('spike')
# Compute the mean firing rates during the simulation
print('Mean firing rate in the input population: ' + str(Mi.mean_fr(input_spikes)) )
print('Mean firing rate of the output neuron: ' + str(Mo.mean_fr(output_spikes)) )
# Compute the instantaneous firing rate of the output neuron
output_rate = Mo.smoothed_rate(output_spikes, 100.0)
# Receptive field after simulation
weights = proj.w[0]
import matplotlib.pyplot as plt
plt.subplot(3,1,1)
plt.plot(output_rate[0, :])
plt.subplot(3,1,2)
plt.plot(weights, '.')
plt.subplot(3,1,3)
plt.hist(weights, bins=20)
plt.show()
| vitay/ANNarchy | examples/simple_stdp/SimpleSTDPModel.py | Python | gpl-2.0 | 2,208 | [
"Brian",
"NEURON"
] | 1150762d1f6f440455fcb27c2afe528146c8141ad2e3188d93784bbaec7bc660 |
import mimetypes
import re
from optparse import make_option
from urllib.parse import urlparse
from . import InventoryCommand
from inventory.models import Dataset, Distribution
mimetypes.init()
class Command(InventoryCommand):
help = 'Normalizes property values'
option_list = InventoryCommand.option_list + (
make_option('--licenses', action='store_true', dest='licenses',
default=False,
help='Normalize licenses.'),
make_option('--media-types', action='store_true', dest='media_types',
default=False,
help='Normalize media types.'),
)
def handle(self, *args, **options):
self.setup(*args, **options)
if args:
criteria = {'division_id__in': [catalog.division_id for catalog in self.catalogs]}
else:
criteria = {}
if options['licenses']:
self.licenses(criteria)
if options['media_types']:
self.media_types(criteria)
def media_types(self, criteria):
self.info('Normalizing media types...')
kwargs = criteria.copy()
kwargs['mediaType'] = ''
qs = Distribution.objects.filter(**kwargs)
for media_type, extension in types:
if not mimetypes.types_map.get(extension):
mimetypes.add_type(media_type, extension)
zippable = frozenset([
'application/gml+xml',
'application/json',
'application/pdf',
'application/vnd.google-earth.kml+xml',
'application/x-ascii-grid',
'application/x-filegdb',
'application/xml',
'image/tiff',
'text/csv',
])
self.warnings = {}
self.warnings_count = 0
for distribution in qs.iterator():
self.save = True
guesses = {
'mimetype': '',
'format': '',
'accessURL': '',
}
# Make the guesses.
if distribution.mimetype:
guesses['mimetype'] = self.guess_type(distribution.mimetype)
if distribution.format:
guesses['format'] = self.guess_type(distribution.format)
if distribution.accessURL:
parsed = urlparse(distribution.accessURL)
# Avoid spurious, e.g. "application/vnd.lotus-organizer" from .org
# domains and and "audio/basic" from .au domains.
if parsed.path:
guess = mimetypes.guess_type(distribution.accessURL)
if guess[1] == 'gzip' and (guesses['mimetype'] and guesses['mimetype'] != guess[0] or guesses['format'] and guesses['format'] != guess[0]):
guesses['accessURL'] = 'application/gzip'
else:
guesses['accessURL'] = guess[0] or ''
else:
guesses['accessURL'] = ''
if parsed.netloc.endswith('.org') and distribution.format == 'application/vnd.lotus-organizer':
guesses['format'] = ''
# Allow commonly zipped media types to pass. (>150 distributions globally)
if guesses['accessURL'] == 'application/zip' and guesses['format'] in zippable and not guesses['mimetype']:
guesses['accessURL'] = ''
# Eliminate non-media types.
for field, media_type in guesses.items():
if not media_type or self.ignore_type(media_type):
guesses[field] = ''
# Eliminate empty media types.
media_types = list(filter(None, guesses.values()))
# Media types guessed from extensions, e.g. "application/xml",
# can be less specific than e.g. "application/rdf+xml".
specific_media_type = None
for ambiguous_media_type, specific_media_types in ambiguous_media_types.items():
# If the media types include an ambiguous media type and a
# specific media type, eliminate the ambigious media type.
if ambiguous_media_type in media_types:
specific_media_type = next((media_type for media_type in media_types if media_type in specific_media_types), None)
if specific_media_type:
media_types = [media_type for media_type in media_types if media_type != ambiguous_media_type]
break
# Test if the guesses agree with each other.
if specific_media_type:
if len(set(media_types)) > 1:
self.bad_guess('Multiple specific media types', guesses, distribution)
elif guesses['accessURL']:
# The file extension is misleading, e.g. .xml redirects to .html
# in GB. Note that even when the metadata is consistent, the
# guessed media type could be incorrect.
if guesses['mimetype'] and guesses['accessURL'] != guesses['mimetype']:
self.bad_guess('Conflict', guesses, distribution)
elif guesses['format'] and guesses['accessURL'] != guesses['format']:
self.bad_guess('Conflict', guesses, distribution)
elif guesses['mimetype'] and guesses['format'] and guesses['mimetype'] != guesses['format']:
self.bad_guess('Conflict', guesses, distribution)
# Test that we recognize the media types.
for media_type in media_types:
if media_type and not self.valid_type(media_type):
self.bad_guess('Unrecognized', guesses, distribution)
# If a unique media type is recognized.
if self.save and len(set(media_types)) == 1:
distribution.mediaType = media_types[0]
distribution.save()
for key, (sample, count) in sorted(self.warnings.items(), key=lambda warning: warning[1][1]):
message, guesses = list(key)
guesses = dict(guesses)
print('{:5} {}'.format(count, message))
for field in ('mimetype', 'format', 'accessURL'):
value = getattr(sample, field)
if value:
if field == 'accessURL' and len(value) > 120:
value = '{}..{}'.format(value[:59], value[-59:])
# e.g. "application/vnd.openxmlformats-officedocument.presentationml.presentation"
print(' {:9} {:73} {}'.format(field, guesses[field], value))
print()
print('Warnings: {}/{} ({} unique)'.format(self.warnings_count, qs.count(), len(self.warnings)))
def licenses(self, criteria):
self.info('Normalizing licenses...')
kwargs = criteria.copy()
kwargs['license'] = ''
qs = Dataset.objects.filter(**kwargs)
# MD http://data.gov.md/ro/termeni-si-conditii
# MD doesn't take advantage of CKAN's per-dataset licensing.
# qs = Dataset.objects.filter(division_id='ocd-division/country:md'); qs.filter(license_id='notspecified').count() / qs.count()
qs.filter(division_id='ocd-division/country:md', license_id='notspecified').update(license='http://data.gov.md/en/terms-and-conditions')
# MX displays "Libro Uso MX" for all datasets.
# cc-by http://catalogo.datos.gob.mx/dataset/mexico-prospero-estadisticas-nacionales
# notspecified http://catalogo.datos.gob.mx/dataset/niveles-actuales-de-rios
qs.filter(division_id='ocd-division/country:mx', license_id__in=('cc-by', 'notspecified', '')).update(license='http://datos.gob.mx/libreusomx/')
# ID http://data.id/lisensi-dan-atribusi.html
qs.filter(division_id='ocd-division/country:id', license_id='cc-by').update(license='http://creativecommons.org/licenses/by/4.0/')
# IT http://www.dati.gov.it/content/note-legali
qs.filter(division_id='ocd-division/country:it', license_id='cc-by').update(license='http://creativecommons.org/licenses/by/3.0/it/')
# KE "When a dataset publication permissions is marked as 'public', CC-0 is what we mean."
# Datasets without a license in data.json are listed as "Public Domain" on the web.
qs.filter(division_id='ocd-division/country:ke', license_id='').update(license='http://creativecommons.org/publicdomain/zero/1.0/')
# UK and RO use the same license ID for different licenses.
qs.filter(division_id='ocd-division/country:gb', license_id='uk-ogl').update(license='http://www.nationalarchives.gov.uk/doc/open-government-licence/')
qs.filter(division_id='ocd-division/country:ro', license_id='uk-ogl').update(license='http://data.gov.ro/base/images/logoinst/OGL-ROU-1.0.pdf')
for license_id, license in license_ids.items():
qs.filter(license_id=license_id).update(license=license)
for license_url, license in license_urls.items():
qs.filter(license_url=license_url).update(license=license)
for license_title, license in license_titles.items():
qs.filter(license_title=license_title).update(license=license)
def guess_type(self, value):
value = re.sub('\A\.', '', ' '.join(value.lower().split()).replace('; charset=binary', '')) # Normalize case and spaces and remove period from extension.
value = format_corrections.get(value, value)
if not self.valid_type(value):
value = mimetypes.types_map.get('.{}'.format(value), value)
return value
def valid_type(self, value):
return value in mimetypes.types_map.values() or value in valid_media_types
def ignore_type(self, value):
return value in ignore_media_types or ',' in value
def bad_guess(self, message, guesses, sample):
self.save = False
key = (message, tuple(guesses.items()))
if self.warnings.get(key):
value = self.warnings[key]
self.warnings[key] = [value[0], value[1] + 1]
else:
self.warnings[key] = [sample, 1]
self.warnings_count += 1
license_ids = {
# Against DRM
'against-drm': 'http://www.freecreations.org/Against_DRM2.html',
# Creative Commons
# CC-BY-4.0
'cc-by-4': 'http://creativecommons.org/licenses/by/4.0/',
'cc-by-4-fi': 'http://creativecommons.org/licenses/by/4.0/',
'cc-by-4.0': 'http://creativecommons.org/licenses/by/4.0/',
# CC0
'cc-zero': 'http://creativecommons.org/publicdomain/zero/1.0/',
'cc-zero-1.0': 'http://creativecommons.org/publicdomain/zero/1.0/',
'cc0': 'http://creativecommons.org/publicdomain/zero/1.0/',
'Creative Commons 1.0 Universal (http://creativecommons.org/publicdomain/zero/1.0/legalcode)': 'http://creativecommons.org/publicdomain/zero/1.0/',
'http://creativecommons.org/publicdomain/zero/1.0/legalcode': 'http://creativecommons.org/publicdomain/zero/1.0/',
'OKD Compliant::Creative Commons CCZero': 'http://creativecommons.org/publicdomain/zero/1.0/',
# CC-**-3.0
'http://creativecommons.org/licenses/by/3.0/legalcode': 'http://creativecommons.org/licenses/by/3.0/',
'http://creativecommons.org/licenses/by-nd/3.0/legalcode': 'http://creativecommons.org/licenses/by-nd/3.0/',
'http://creativecommons.org/licenses/by-sa/3.0/legalcode': 'http://creativecommons.org/licenses/by-sa/3.0/',
# Creative Commons
# FI
'cc-by-sa-1-fi': 'http://creativecommons.org/licenses/by-sa/1.0/fi/',
# GR http://data.gov.gr/terms/
'OKD Compliant::Creative Commons Attribution': 'http://creativecommons.org/licenses/by/3.0/gr/',
# NL https://data.overheid.nl/data/
'creative-commons-attribution-cc-by-': 'http://creativecommons.org/licenses/by/3.0/nl/',
'naamsvermelding---gelijkdelen-cc-by-sa-': 'http://creativecommons.org/licenses/by-sa/3.0/nl/',
'publiek-domein': 'http://creativecommons.org/publicdomain/mark/1.0/',
# Open Data Commons
'odc-by': 'http://opendatacommons.org/licenses/by/1.0/',
'odc-odbl': 'http://opendatacommons.org/licenses/odbl/1.0/',
'odc-pddl': 'http://opendatacommons.org/licenses/pddl/1.0/',
'http://opendatacommons.org/licenses/pddl/1.0/': 'http://opendatacommons.org/licenses/pddl/1.0/',
# CA
'ca-ogl-lgo': 'http://data.gc.ca/eng/open-government-licence-canada',
'ca-odla-aldg': 'http://ocl-cal.gc.ca/eic/site/012.nsf/eng/00873.html',
# CA-BC
'OGL-BC': 'http://www.data.gov.bc.ca/local/dbc/docs/license/OGL-vbc2.0.pdf',
'OGL-Surrey': 'http://data.surrey.ca/pages/open-government-licence-surrey',
# CA-ON
'ottawa': 'http://ottawa.ca/en/open-data-terms-use',
# CA-QC
'vdm': 'http://creativecommons.org/licenses/by/4.0/',
# GB
'CEH Open Government Licence': 'http://eidchub.ceh.ac.uk/administration-folder/tools/ceh-standard-licence-texts/ceh-open-government-licence',
'Natural England-OS Open Government Licence': 'http://webarchive.nationalarchives.gov.uk/20140605090108/http://www.naturalengland.org.uk/copyright/default.aspx',
'OS OpenData Licence': 'http://www.ordnancesurvey.co.uk/docs/licences/os-opendata-licence.pdf',
'uk-ogl-2': 'http://www.nationalarchives.gov.uk/doc/open-government-licence/version/2/',
'uk-citation-required': 'http://example.com/uk-citation-required', # No URL
# IE
'gov-copyright': 'http://www.irishstatutebook.ie/2000/en/act/pub/0028/sec0191.html#sec191',
'marine': 'http://www.marine.ie/NR/rdonlyres/6F56279C-631D-42AC-B495-74C76CE93A8B/0/MIDataLicenseMar2013.pdf',
'psi': 'http://psi.gov.ie/files/2010/03/PSI-Licence.pdf',
# IT
'iodl1': 'http://www.formez.it/iodl/',
'iodl2': 'http://www.dati.gov.it/iodl/2.0/',
# FI
'helsinkikanava-opendata-tos': 'http://open.helsinkikanava.fi/tos.html',
'hri-tietoaineistot-lisenssi-nimea': 'http://www.hri.fi/lisenssit/hri-nimea/',
'http://kartta.kokkola.fi/TeklaOGCWeb/wms.ashx': 'http://kartta.kokkola.fi/TeklaOGCWeb/wms.ashx',
'http://kartta.metla.fi/MVMI-Lisenssi.pdf': 'http://kartta.metla.fi/MVMI-Lisenssi.pdf',
'http://teto.tampere.fi/lisenssit/tre_avoimen_datan_lisenssi.pdf': 'http://teto.tampere.fi/lisenssit/tre_avoimen_datan_lisenssi.pdf',
'http://tilastokeskus.fi/org/lainsaadanto/yleiset_kayttoehdot.html': 'http://tilastokeskus.fi/org/lainsaadanto/yleiset_kayttoehdot.html',
'http://tilastokeskus.fi/org/lainsaadanto/yleiset_kayttoehdot.html ]': 'http://tilastokeskus.fi/org/lainsaadanto/yleiset_kayttoehdot.html ]',
'http://tilastokeskus.fi/org/lainsaadanto/copyright_sv.html': 'http://tilastokeskus.fi/org/lainsaadanto/copyright.html',
'http://tilastokeskus.fi/org/lainsaadanto/copyright.html': 'http://tilastokeskus.fi/org/lainsaadanto/copyright.html',
'http://www.maanmittauslaitos.fi/aineistot-palvelut/rajapintapalvelut/karttakuvapalvelu-wms': 'http://www.maanmittauslaitos.fi/aineistot-palvelut/rajapintapalvelut/karttakuvapalvelu-wms',
'http://www.maanmittauslaitos.fi/aineistot-palvelut/rajapintapalvelut/karttakuvapalvelu-wms,': 'http://www.maanmittauslaitos.fi/aineistot-palvelut/rajapintapalvelut/karttakuvapalvelu-wms',
'http://www.maanmittauslaitos.fi/aineistot-palvelut/verkkopalvelut/kiinteistotietopalvelu': 'http://www.maanmittauslaitos.fi/aineistot-palvelut/verkkopalvelut/kiinteistotietopalvelu',
'http://www.maanmittauslaitos.fi/avoindata_lisenssi_versio1_20120501': 'http://www.maanmittauslaitos.fi/avoindata_lisenssi_versio1_20120501',
'http://www.maanmittauslaitos.fi/kartat/ilmakuvat/ilma-ortokuvien-indeksikartat': 'http://www.maanmittauslaitos.fi/kartat/ilmakuvat/ilma-ortokuvien-indeksikartat',
'http://www.maanmittauslaitos.fi/maanmittauslaitoksen-avoimen-tietoaineiston-cc-40-lisenssi': 'http://www.maanmittauslaitos.fi/maanmittauslaitoksen-avoimen-tietoaineiston-cc-40-lisenssi',
'http://www.maanmittauslaitos.fi/node/300': 'http://www.maanmittauslaitos.fi/laserkeilausindeksit',
'https://www.jyu.fi/sport/laitokset/liikunta/liikuntapaikat/rajapinnat': 'https://www.jyu.fi/sport/laitokset/liikunta/liikuntapaikat/rajapinnat',
'ilmoitettumuualla': 'http://www.hri.fi/lisenssit',
'kmo-aluejakorajat': 'http://www.hri.fi/lisenssit/kmo-aluejakorajat/',
# UY https://catalogodatos.gub.uy/
'odc-uy': 'http://datos.gub.uy/wps/wcm/connect/856cc1804db0463baa8bea01b72d8394/terminos-catalogodatos.pdf?MOD=AJPERES&ContentCache=NONE&CACHEID=856cc1804db0463baa8bea01b72d8394',
# Generic
'other': 'http://example.com/other',
# https://github.com/ckan/ckan/blob/master/ckan/model/license.py
'notspecified': 'http://example.com/notspecified',
'other-at': 'http://example.com/other-at',
'other-closed': 'http://example.com/other-closed',
'other-nc': 'http://example.com/other-nc',
'other-open': 'http://example.com/other-open',
'other-pd': 'http://example.com/other-pd',
}
license_urls = {
# Identical
'http://creativecommons.org/licenses/by/3.0/au/': 'http://creativecommons.org/licenses/by/3.0/au/',
'http://creativecommons.org/licenses/by-nc/2.0/': 'http://creativecommons.org/licenses/by-nc/2.0/',
'http://creativecommons.org/licenses/by-nc-nd/': 'http://creativecommons.org/licenses/by-nc-nd/',
'http://creativecommons.org/licenses/by-nc-sa': 'http://creativecommons.org/licenses/by-nc-sa/',
'http://creativecommons.org/licenses/by-nd/': 'http://creativecommons.org/licenses/by-nd/',
# Correction
'http://www.opendefinition.org/licenses/cc-by': 'http://creativecommons.org/licenses/by/',
'http://www.opendefinition.org/licenses/cc-by-sa': 'http://creativecommons.org/licenses/by-sa/',
}
license_titles = {
# Creative Commons
'cc-nc': 'http://creativecommons.org/licenses/nc/1.0/',
# Creative Commons
# AU
'cc-by-sa': 'http://creativecommons.org/licenses/by-sa/',
# EE
'Creative Commons BY 3.0': 'http://creativecommons.org/licenses/by/3.0/ee/',
# NL
'cc-by': 'http://creativecommons.org/licenses/by/',
# Generic
'License Not Specified': 'http://example.com/notspecified',
}
# Additional media types.
types = (
# http://reference.wolfram.com/language/ref/format/DBF.html
('application/dbf', '.dbf'),
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc6713#section-3
('application/gzip', '.gz'),
# http://tools.ietf.org/html/draft-hallambaker-jsonl-01
('application/json-l', '.jsonl'),
# https://tools.ietf.org/html/rfc2220
('application/marc', 'marc'),
# http://blogs.msdn.com/b/jaimer/archive/2008/01/04/mime-types.aspx
('application/msaccess', '.accdb'),
# http://www.w3.org/TR/n-triples/#n-triples-mediatype
('application/n-triples', '.nt'),
# http://www.w3.org/TR/owl2-xml-serialization/#Appendix:_Internet_Media_Type.2C_File_Extension.2C_and_Macintosh_File_Type
('application/owl+xml', '.owl'),
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc3870
('application/rdf+xml', '.rdf'),
# http://www.w3.org/TR/sparql11-results-json/#content-type
('application/sparql-results+json', '.srj'),
# http://www.w3.org/TR/2013/REC-rdf-sparql-XMLres-20130321/#mime
('application/sparql-results+xml', '.srx'),
# http://www.iana.org/assignments/media-types/media-types.xhtml
('application/vnd.geo+json', '.geojson'),
# http://www.w3.org/TR/wsdl20/#ietf-draft
('application/wsdl+xml', '.wsdl'),
# http://blogs.msdn.com/b/jaimer/archive/2008/01/04/mime-types.aspx
('application/x-msaccess', '.mdb'),
# http://en.wikipedia.org/wiki/Torrent_file
('application/x-bittorrent', '.torrent'),
# http://en.wikipedia.org/wiki/Proxy_auto-config
('application/x-ns-proxy-autoconfig', '.pac'),
# http://fileformats.archiveteam.org/wiki/SAV
('application/x-spss-sav', '.sav'),
# http://support.sas.com/resources/papers/proceedings13/115-2013.pdf
# http://help.dottoro.com/lapuadlp.php
('application/x-sas', '.sas'),
# http://www.sgmjournals.org/site/misc/suppfiletypes.xhtml
# http://help.dottoro.com/lapuadlp.php
('application/x-stata', '.dta'),
# http://www-01.ibm.com/support/knowledgecenter/SSLTBW_1.12.0/com.ibm.zos.r12.dgwa400/imwziu181183.htm
# http://help.dottoro.com/lapuadlp.php
('application/x-troff-man', '.man'),
# http://inspire.ec.europa.eu/media-types/
('application/x-ascii-grid', '.grd'),
('application/x-filegdb', '.gdb'),
('application/x-worldfile', '.tfw'),
# https://github.com/qgis/QGIS/blob/master/debian/mime/application
('application/x-esri-crs', '.prj'),
('application/x-esri-shape', '.shx'),
('application/x-mapinfo-mif', '.mif'),
('application/x-qgis-project', '.qgs'),
('application/x-raster-ecw', '.ecw'),
# http://www.zamzar.com/convert/cdr-to-eps/
('image/x-cdr', '.cdr'),
# http://reference.wolfram.com/language/ref/format/LWO.html
('image/x-lwo', '.lwo'),
# http://communities.bentley.com/products/projectwise/content_management/w/wiki/5617
('image/vnd.dgn', '.dgn'),
# No media type found, so minting:
# http://www.gdal.org/drv_s57.html
('application/x-s57', '.000'),
# http://mcmcweb.er.usgs.gov/sdts/
('application/x-sdts', '.ddt'),
# http://webhelp.esri.com/arcgisdesktop/9.3/index.cfm?TopicName=gridfloat
('application/x-gridfloat', '.flt'),
# http://en.wikipedia.org/wiki/GRIB
('application/x-greb', '.grb'),
# http://webhelp.esri.com/arcgisexplorer/900/en/add_arcgis_layers.htm
('application/x-lyr', '.lpk'),
('application/x-lyr', '.lyr'),
# http://wiki.openstreetmap.org/wiki/PBF_Format
('application/x-pbf', '.pbf'),
# http://en.wikipedia.org/wiki/SEG_Y
('application/x-segy', '.sgy'),
# http://pxr.r-forge.r-project.org/
('application/x-pc-axis', '.px'),
)
ambiguous_media_types = {
'application/json': [
'application/vnd.geo+json',
],
'application/xml': [
'application/atom+xml',
'application/gml+xml',
'application/rdf+xml',
'application/rss+xml',
'application/soap+xml',
'application/vnd.ogc.se_xml',
'application/vnd.ogc.wms_xml',
'application/wsdl+xml',
'application/xslt+xml',
],
'application/zip': [
'application/x-shapefile',
'application/x-tab',
],
}
# Media types without an extension or with an extension for which there is already a media type.
valid_media_types = frozenset([item for list in ambiguous_media_types.values() for item in list])
format_corrections = {
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc4287#section-7
'atom 1.0': 'application/atom+xml',
'atom+xml': 'application/atom+xml',
'xml (atom)': 'application/atom+xml',
# http://reference.wolfram.com/language/ref/format/DBF.html
'dbase': 'application/dbf',
'ms dbase file': 'application/dbf',
'ms dbase table': 'application/dbf',
# http://portal.opengeospatial.org/files/?artifact_id=37743
# http://en.wikipedia.org/wiki/Web_Feature_Service
# GML is default, but SHP is supported.
'text/wfs': 'application/gml+xml',
'wfs': 'application/gml+xml',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc7158#section-11
'applicaton/json': 'application/json',
'rest json': 'application/json',
'text/javascript': 'application/json',
'text/json': 'application/json',
# http://resources.arcgis.com/en/help/rest/apiref/formattypes.html
# HTML is default, but JSON and XML are available.
'arcgis map service': 'application/json',
'arcgis server': 'application/json',
'arcgis server rest': 'application/json',
'esri rest': 'application/json',
# http://www.odata.org/documentation/odata-version-4-0/
'odata webservice': 'application/json',
# http://blogs.msdn.com/b/jaimer/archive/2008/01/04/mime-types.aspx
'application/accdb': 'application/msaccess',
'application/vnd.ms-access': 'application/msaccess',
'msaccess': 'application/msaccess',
# http://blogs.msdn.com/b/vsofficedeveloper/archive/2008/05/08/office-2007-open-xml-mime-types.aspx
'application/vnd.ms-word': 'application/msword',
'word': 'application/msword',
# http://www.w3.org/TR/n-triples/#n-triples-mediatype
'n-triple': 'application/n-triples',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc2046#section-4.5.1
'application/octet-string': 'application/octet-stream',
'application/octet_stream': 'application/octet-stream',
'application/x-octet-stream': 'application/octet-stream',
'binary/octet-stream': 'application/octet-stream',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc6713#section-3
'application/x-gzip': 'application/gzip',
'gzip': 'application/gzip',
'tgz': 'application/gzip',
'txt / gz': 'application/gzip',
# http://pxr.r-forge.r-project.org/
'pc-axis': 'application/x-pc-axis',
'text/pc-axis': 'application/x-pc-axis',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc3778
'0_v2 / pdf': 'application/pdf',
'aplication/pdf': 'application/pdf',
'geopdf': 'application/pdf',
'pdf / pdf': 'application/pdf',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc3870
'application/xml+rdf': 'application/rdf+xml',
'image/rdf': 'application/rdf+xml',
'rdf-xml': 'application/rdf+xml',
'skos rdf': 'application/rdf+xml',
'skos webservice': 'application/rdf+xml',
'text/rdf': 'application/rdf+xml',
# http://www.rssboard.org/rss-mime-type-application.txt
'ensearch api': 'application/rss+xml',
'feed': 'application/rss+xml',
'rss 1.0': 'application/rss+xml',
'rss 2.0': 'application/rss+xml',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# https://tools.ietf.org/html/rfc3902
'soap': 'application/soap+xml',
'soap+xml': 'application/soap+xml',
# http://www.w3.org/TR/sparql11-results-json/#content-type
'sparql-json': 'application/sparql-results+json',
# http://www.w3.org/TR/2013/REC-rdf-sparql-XMLres-20130321/#mime
'api/sparql': 'application/sparql-results+xml',
'sparql': 'application/sparql-results+xml',
'sparql-xml': 'application/sparql-results+xml',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'kml/google maps': 'application/vnd.google-earth.kml+xml',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'kml / kmz': 'application/vnd.google-earth.kmz',
'kml/kmz': 'application/vnd.google-earth.kmz',
'xml/kml/kmz': 'application/vnd.google-earth.kmz',
# http://blogs.msdn.com/b/vsofficedeveloper/archive/2008/05/08/office-2007-open-xml-mime-types.aspx
'application/excel': 'application/vnd.ms-excel',
'application/ms-excel': 'application/vnd.ms-excel',
'application/msexcel': 'application/vnd.ms-excel',
'application/vnd.excel': 'application/vnd.ms-excel',
'application/vnd.msexcel': 'application/vnd.ms-excel',
'application/x-msexcel': 'application/vnd.ms-excel',
'application/xls': 'application/vnd.ms-excel',
'doc_xls': 'application/vnd.ms-excel',
'excel file': 'application/vnd.ms-excel',
'excel': 'application/vnd.ms-excel',
'xls via website': 'application/vnd.ms-excel',
'xl': 'application/vnd.ms-excel',
# http://blogs.msdn.com/b/vsofficedeveloper/archive/2008/05/08/office-2007-open-xml-mime-types.aspx
'application/vnd.ms-excel.macroenabled.12': 'application/vnd.ms-excel.sheet.macroEnabled.12',
'application/xlsm': 'application/vnd.ms-excel.sheet.macroEnabled.12',
# http://blogs.msdn.com/b/jaimer/archive/2008/01/04/mime-types.aspx
'application/vnd.ms-pkistl': 'application/vnd.ms-pki.stl',
# http://blogs.msdn.com/b/vsofficedeveloper/archive/2008/05/08/office-2007-open-xml-mime-types.aspx
'application/x-mspowerpoint': 'application/vnd.ms-powerpoint',
# http://blogs.msdn.com/b/vsofficedeveloper/archive/2008/05/08/office-2007-open-xml-mime-types.aspx
'application/docm': 'application/vnd.ms-word.document.macroEnabled.12',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'application/x-vnd.oasis.opendocument.presentation': 'application/vnd.oasis.opendocument.presentation',
# http://docs.geoserver.org/stable/en/user/services/wms/reference.html
'text/wms': 'application/vnd.ogc.wms_xml',
'wms': 'application/vnd.ogc.wms_xml',
'wms_xml': 'application/vnd.ogc.wms_xml',
# http://blogs.msdn.com/b/vsofficedeveloper/archive/2008/05/08/office-2007-open-xml-mime-types.aspx
'application/pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
# http://blogs.msdn.com/b/vsofficedeveloper/archive/2008/05/08/office-2007-open-xml-mime-types.aspx
'application/vnd.ms-excel.12': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'excel (.xlsx)': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'excel (xlsx)': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'openxml': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
# http://blogs.msdn.com/b/vsofficedeveloper/archive/2008/05/08/office-2007-open-xml-mime-types.aspx
'application/doc': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'ms word': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
# http://en.wikipedia.org/wiki/7z
'lzma': 'application/x-7z-compressed',
'parco_veicoli_aprile_2014.7z': 'application/x-7z-compressed',
'parco_veicoli_gennaio_2013.7z': 'application/x-7z-compressed',
# http://inspire.ec.europa.eu/media-types/
'arcgis grid format': 'application/x-ascii-grid',
'arcgrid': 'application/x-ascii-grid',
'arcinfo grid': 'application/x-ascii-grid',
'arcinfo workstation grid': 'application/x-ascii-grid',
'ascii grid': 'application/x-ascii-grid',
'ascii-grid (arcinfo)': 'application/x-ascii-grid',
'esri arc ascii': 'application/x-ascii-grid',
'esri grid': 'application/x-ascii-grid',
'grid esri': 'application/x-ascii-grid',
'grid': 'application/x-ascii-grid',
'raster data set (.grd)': 'application/x-ascii-grid',
# https://github.com/qgis/QGIS/blob/master/debian/mime/application
'mif / mid': 'application/x-mapinfo-mif',
'mif−mid': 'application/x-mapinfo-mif',
# http://resources.arcgis.com/en/help/main/10.1/index.html#//018s0000000n000000
'access': 'application/x-msaccess',
'access.mdb': 'application/x-msaccess',
'application/mdb': 'application/x-msaccess',
'arcgis personal geodatabase': 'application/x-msaccess',
'personal geodatabase feature class': 'application/x-msaccess',
'personal geodatabase': 'application/x-msaccess',
# http://en.wikipedia.org/wiki/NetCDF
'nc(netcdf)': 'application/x-netcdf',
'netcdf': 'application/x-netcdf',
'netcdf3': 'application/x-netcdf',
# http://inspire.ec.europa.eu/media-types/
'arcgis file geodatabase': 'application/x-filegdb',
'arcgis geodatabase': 'application/x-filegdb',
'esri geodatabase feature class': 'application/x-filegdb',
'fgdb': 'application/x-filegdb',
'fgdb / gdb': 'application/x-filegdb',
'file geo-database (.gdb)': 'application/x-filegdb',
'file geodatabase': 'application/x-filegdb',
'ftp site with zipped esri file geodabases': 'application/x-filegdb',
'gdb (esri)': 'application/x-filegdb',
'geodatabase': 'application/x-filegdb',
'zip:esri_fgdb': 'application/x-filegdb',
# http://en.wikipedia.org/wiki/RAR
'application/rar': 'application/x-rar-compressed',
'rar+sas': 'application/x-rar-compressed',
# https://github.com/qgis/QGIS/blob/master/debian/mime/application
'application/ecw': 'application/x-raster-ecw',
'segy': 'application/x-segy',
# http://inspire.ec.europa.eu/media-types/
'application/x-zipped-shp': 'application/x-shapefile',
'arcgis shapefile': 'application/x-shapefile',
'esri shape file': 'application/x-shapefile',
'esri shapefile': 'application/x-shapefile',
'qgis': 'application/x-shapefile',
'shape file': 'application/x-shapefile',
'shape': 'application/x-shapefile',
'shapefile': 'application/x-shapefile',
'shapefiler': 'application/x-shapefile',
'shp': 'application/x-shapefile',
'shp (cc47)': 'application/x-shapefile',
'shp (cc48)': 'application/x-shapefile',
'shp (l93)': 'application/x-shapefile',
'shp (wgs84)': 'application/x-shapefile',
'shp / zip': 'application/x-shapefile',
'tgrshp (compressed)': 'application/x-shapefile',
'winzipped shapefile': 'application/x-shapefile',
'zip (shp)': 'application/x-shapefile',
'zip:shape': 'application/x-shapefile',
# http://fileformats.archiveteam.org/wiki/SAV
'application/x-spss': 'application/x-spss-sav',
# http://inspire.ec.europa.eu/media-types/
'tab': 'application/x-tab',
# http://inspire.ec.europa.eu/media-types/
'tiff world file': 'application/x-worldfile',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc7303
# http://tools.ietf.org/html/rfc3023#section-3
'appication/xml': 'application/xml',
'application;xml': 'application/xml',
'text/xml': 'application/xml',
# http://www.openarchives.org/pmh/
'oai-pmh (xml repons)': 'application/xml',
'oai-pmh (xml respons)': 'application/xml',
'oai-pmh en sru (respons in xml)': 'application/xml',
'oai-pmh': 'application/xml',
'xml: oai-pmh dublin core': 'application/xml',
# http://wiki.openstreetmap.org/wiki/OSM_XML
'osm': 'application/xml',
'xml osm': 'application/xml',
# http://en.wikipedia.org/wiki/Web_Coverage_Service
# The protocol uses XML, but data is available in multiple formats.
'wcs': 'application/xml',
# http://en.wikipedia.org/wiki/XML_Schema_%28W3C%29
'xsd': 'application/xml',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'application/x-zip-compressed': 'application/zip',
'uso_suolo_dusaf4_2012_polygon.zip': 'application/zip',
'zip (gpx)': 'application/zip',
'zip (pdf)': 'application/zip',
'zip (sql + jpeg)': 'application/zip',
'zip (sql)': 'application/zip',
'zip / xml': 'application/zip',
'zip | kml en json': 'application/zip',
'zip | shape-files + excel': 'application/zip',
'zip(pdf)': 'application/zip',
'zip+pdf': 'application/zip',
'zip+sas': 'application/zip',
'zip+sav': 'application/zip',
'zip+shp': 'application/zip',
'zip+txt': 'application/zip',
'zip+xls': 'application/zip',
'zip+xml': 'application/zip',
'zip: spss': 'application/zip',
'zip: xls': 'application/zip',
'zip:gml': 'application/zip',
'zip:json': 'application/zip',
'zip:mdb': 'application/zip',
'zip:xml en csv': 'application/zip',
'zip:xml': 'application/zip',
'zipped esri file geodatabase': 'application/zip',
# CSV
'application/zip+text/csv': 'application/zip',
'csv (inside zip)': 'application/zip',
'csv (zip)': 'application/zip',
'csv / zip': 'application/zip',
'csv.zip': 'application/zip',
'zip (csv utf8)': 'application/zip',
'zip (csv)': 'application/zip',
'zip file containing csv files': 'application/zip',
'zip file containing multiple csv files.': 'application/zip',
'zip+csv': 'application/zip',
# http://www.geobase.ca/geobase/en/data/cded/description.html
'cdec ascii': 'application/zip',
# https://developers.google.com/transit/gtfs/reference
'gtfs': 'application/zip',
# http://reference.wolfram.com/language/ref/format/BMP.html
'application/bmp': 'image/bmp',
# http://communities.bentley.com/products/projectwise/content_management/w/wiki/5617
'dgn': 'image/vnd.dgn',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'application/dxf': 'image/vnd.dxf',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc2046#section-4.2
'application/jpg': 'image/jpeg',
'image/jpg': 'image/jpeg',
'jpeg (cc48)': 'image/jpeg',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'jpeg 2000': 'image/jp2',
# http://en.wikipedia.org/wiki/MrSID
'image/x-mrsid-image': 'image/x-mrsid-image',
'mrsid': 'image/x-mrsid-image',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'application/tif': 'image/tiff',
'image/tif': 'image/tiff',
'multi-page tiff': 'image/tiff',
'single-page tiff': 'image/tiff',
'tiff (cc48)': 'image/tiff',
# http://inspire.ec.europa.eu/media-types/
'image/geotiff': 'image/tiff',
'geotif': 'image/tiff',
'geotiff': 'image/tiff',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc5545#section-8.1
'calendar': 'text/calendar',
'icalendar': 'text/calendar',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc4180#section-3
'application/csv': 'text/csv',
'application/cvs': 'text/csv',
'aug 2014 / csv': 'text/csv',
'csv"': 'text/csv',
'csv(txt)': 'text/csv',
'csv-semicolon delimited': 'text/csv',
'csv-tab delimited': 'text/csv',
'csv/api': 'text/csv',
'csv/txt': 'text/csv',
'csv/utf8': 'text/csv',
'csv/webservice/api': 'text/csv',
'jul 2014 / csv': 'text/csv',
'link_csv': 'text/csv',
'text (csv)': 'text/csv',
'text(csv)': 'text/csv',
'text/comma-separated-values': 'text/csv',
'text/cvs': 'text/csv',
'text/x-comma-separated-values': 'text/csv',
'text;csv': 'text/csv',
'txt/cvs': 'text/csv',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'application/htm': 'text/html',
'application/html': 'text/html',
'arcgis map preview': 'text/html',
'arcgis online map': 'text/html',
'hmtl': 'text/html',
'home page': 'text/html',
'html+rdfa': 'text/html',
'html5': 'text/html',
'link': 'text/html',
'link_html': 'text/html',
'map portal': 'text/html',
'portal': 'text/html',
'query tool': 'text/html',
'search, view & download data': 'text/html',
'sparql web form': 'text/html',
'texl/html': 'text/html',
'text/htm': 'text/html',
'web': 'text/html',
'web browser display': 'text/html',
'web page': 'text/html',
'web site': 'text/html',
'web tool': 'text/html',
'web-interface': 'text/html',
'webinterface': 'text/html',
'webpage': 'text/html',
'website': 'text/html',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://tools.ietf.org/html/rfc2046#section-4.1.3
'ascii': 'text/plain',
'dat': 'text/plain',
'fixed-length ascii text': 'text/plain',
'plain': 'text/plain',
'text/ascii': 'text/plain',
'text/txt': 'text/plain',
'texte': 'text/plain',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://www.w3.org/TeamSubmission/n3/
'rdf-n3': 'text/n3',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'application/rtf': 'text/rtf',
# http://www.iana.org/assignments/media-types/media-types.xhtml
'text/tab': 'text/tab-separated-values',
# http://www.iana.org/assignments/media-types/media-types.xhtml
# http://www.w3.org/TeamSubmission/turtle/#sec-mime
'rdf-turtle': 'text/turtle',
'rdf/turtle': 'text/turtle',
'turtle': 'text/turtle',
# http://en.wikipedia.org/wiki/Flash_Video
'flash': 'video/x-flv',
}
# Media types that are as good as nil.
ignore_media_types = frozenset([
'""',
'all',
'api',
'app',
'application/api',
'application/octet-stream',
'application/unknown',
'application/x-unknown-content-type',
'binary',
'cd-rom',
'data file',
'geospatial',
'edi', # http://en.wikipedia.org/wiki/Electronic_data_interchange
'export',
'image',
'img',
'map',
'meta/void',
'n/a',
'no-type',
'octet stream',
'rest',
'service',
'tool',
'unknown/unknown',
'upon request',
'url',
'variable',
'varies',
'varies upon user output',
'various',
'various formats',
'viewservice',
'webservice',
'widget',
'wmf & wfs',
'wms & wfs',
# Download
'application/force-download',
'application/save',
'application/x-download',
'force-download',
# Error
'15 kb',
# Multiple (semi-colon)
'csv/txt; pdf',
'csv/txt; sgml; xml',
'csv/txt; sgml; xml',
'csv/txt; xml; tiff',
'sgml; xml; tiff',
'web service (xml); web service (ansi z39.50), images, documents',
'xml; tiff',
'xml; tiff',
# Multiple (slash)
'pdf/webpages',
'xml/tiff/jpeg',
'xml/tiff/jpg',
# Multiple (conjunction)
'api: xml en jpeg',
'xls en csv',
# Other
'altro', # IT
'autre', # FR
'other', # EN
# Programming language
'ashx', # ASP.NET
'asp', # Active Server Pages
'aspx', # ASP.NET
'axd', # ASP.NET
'do', # Java Struts
'jsp', # JavaServer Pages
'php', # PHP
'shtml', # Server Side Includes
# Software
'mobile application',
'cross-platform java-based desktop software',
# No standard media type
# @see http://en.wikipedia.org/wiki/Shapefile
'sbn',
'sbx',
])
| opennorth/inventory | inventory/management/commands/normalize.py | Python | mit | 42,922 | [
"NetCDF"
] | f47f70b3ffec72a4daa6559df4abdb9b6c278c1af384f844cc162e4047f59d24 |
"""A setuptools based setup module"""
#Always prefer setuptools over distutils
from setuptools import setup, find_packages
#To use a consistent encoding
from codecs import open
from os import path,system
here = path.abspath(path.dirname(__file__))
#Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
import netpyne
version = netpyne.__version__
import sys
if 'upload_via_twine' in sys.argv:
system('twine upload dist/netpyne-'+version+'-py2.py3-none-any.whl')
elif 'upload_via_twine_testpypi' in sys.argv:
system('twine upload --repository pypitest dist/netpyne_py3-'+version+'-py2.py3-none-any.whl')
else:
setup(
name = 'netpyne',
version = version, # update this in netpyne/__init__.py; makes it accessible to python scripts too...
description = 'A Python package to develop, simulate and analyse biological neuronal networks in NEURON.',
long_description = long_description,
# python_requires='>=2.7, >=3.6', # removed since makes py2 install fail with universal wheel
# The project's main homepage.
url = 'https://github.com/Neurosim-lab/netpyne',
#Author details
author = 'Salvador Dura-Bernal (Neurosim lab)',
author_email = 'salvadordura@gmail.com',
#Choose license
license = 'MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Visualization',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# What does project relate to?
keywords = ['neuron','network','developing','framework','biological', 'simulation'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages = find_packages(exclude=['saveLoadV1']),
#List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'scipy', 'matplotlib>2.2', 'matplotlib-scalebar', 'future', 'pandas'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
)
| thekerrlab/netpyne | setup.py | Python | mit | 4,303 | [
"NEURON"
] | 496b3399fb75238ce5f82a7c5b323a0e1e7a95f7bf86b9ff9dd1791e1299c8ce |
'''
@author: Victor Barrera
Description: This scripts makes use of the siq module to obtain the methylation value for each CpG island region.
'''
import sys
import pysam
import re
from siq import *
# Obtain the CpG island sequence file,
# the sam-file and the minimum read filter
cpgi_sec_path=sys.argv[1]
sam_path=sys.argv[2]
filter=int(sys.argv[3])
cpgi_sec_file=open(cpgi_sec_path,'r')
samfile = pysam.Samfile(sam_path, "rb" )
for cpgi in cpgi_sec_file:
# For each CpG island structural data is obtained and the relative
# positions for the CG dinucleotide are obtained.
id=cpgi.split()[0]
chr=str(cpgi.split()[1])
startPosition=int(cpgi.split()[2])
endPosition=int(cpgi.split()[3])
cpgisec=(cpgi.split()[4])
cpgisec=cpgisec.upper();
starts = [match.start() for match in re.finditer('CG',cpgisec)]
CG_coord=[]
nCG=0
for i in starts:
# The absolute positions for the CG dinucleotide are obtain.
c_pos=str(chr)+"\t"+str(int(i)+startPosition-1)
g_pos=str(chr)+"\t"+str(int(i)+startPosition)
CG_coord.append(c_pos)
CG_coord.append(g_pos)
nCG+=1
# An object of the class Meth_region is generated.
cgi=Meth_region(CG_coord,samfile)
print str(id)+"\t"+str(nCG)+"\t"+str(chr)+"\t"+str(startPosition)+"\t"+str(endPosition)+"\t",
# A call to obtain the mean and the standard deviation is done.
print "%i\t%.2f\t%.2f\t%i" %(cgi.methcoef_sd(filter))
| vbarrera/thesis | Genomic_Evaluation_of_individual_CpG_Methylation/Python/CpGMethStatus.py | Python | gpl-2.0 | 1,440 | [
"pysam"
] | 1ffe6fb97d08294bd08ff7e4a458dd0585d0aef2e42fe2ca6bc8264c9af88049 |
__author__ = 'mjsul_000'
import pysam
import sys
import subprocess
class variation:
def __init__(self, chrom, pos, ref, alt, qual):
self.chrom = chrom
self.pos = int(pos)
self.ref = ref
self.alt = alt
self.qual = qual
def groupflow(samfile, vcffile, outfile, minreadsvar=2, minreadsflow=2, minsnpqual=0):
try:
import pysam
except:
return 0
snps = []
vcf = open(vcffile)
sys.stdout.write('Reading vcf file...')
sys.stdout.flush()
# get variants from VCF file
for line in vcf:
if not line.startswith('#'):
chrom, pos, id, ref, alt, qual, filt, info, form, unknown = line.split()
aninstance = variation(chrom, pos, ref, alt, qual)
for i in info.split(';'):
if i.startswith('DP='):
aninstance.depth = int(i.split('=')[1])
if i.startswith('AO='):
if ',' in i:
aninstance.altcount = int(i.split('=')[1].split(',')[0])
else:
aninstance.altcount = int(i.split('=')[1])
if i.startswith('RO='):
aninstance.refcount = int(i.split('=')[1])
if i.startswith('AB='):
if ',' in i:
aninstance.altrat = float(i.split('=')[1].split(',')[0])
else:
aninstance.altrat = float(i.split('=')[1])
if aninstance.qual >= minsnpqual:
snps.append(aninstance)
sys.stdout.write(str(len(snps)) + ' snps found.\n')
sam = pysam.Samfile(samfile, 'rb')
variantmax = 0
lastfirreads = set()
lastsecreads = set()
firblock = None
secblock = None
refblock = None
printcount = 0
blocks = []
breaks = []
novar, threevar, recombinants, forked, channelled, properflow, singflow = 0, 0, 0, 0, 0, 0, 0
recombpos = []
thecovlist = []
sys.stdout.write('Finding flows...')
sys.stdout.flush()
lastthree = False
depths = []
for pileupcolumn in sam.pileup():
depths.append(pileupcolumn.n)
mediancov = depths[len(depths)/2]
global halfmed, onehalfmed
halfmed = mediancov / 2
onehalfmed = mediancov * 3 / 2
if halfmed < 5:
halfmed = 5
for snp in snps:
covcount = 0
for pileupcolumn in sam.pileup(snp.chrom, snp.pos, snp.pos + 1): # Find all pileup columns assosciated with position
if pileupcolumn.pos == snp.pos - 1: # find the specific pileup column at the position of interest
varlength = len(snp.ref) # find the amount of pilup columns assosciated with the variation called by freebayes
variants = snp.alt.split(',') + [snp.ref] # list all potential variants at this site
variantreads = [set() for i in range(len(variants))] # sets for putting reads assosciated with each variant in
for pileupread in pileupcolumn.pileups: # iterate over all reads aligning to that base
covcount += 1
count = 0
rvar = pileupread.alignment.seq[pileupread.qpos:pileupread.qpos +
pileupread.alignment.overlap(pileupcolumn.pos, pileupcolumn.pos + varlength)] # read sequence at variant position
for i in variants:
if rvar == i:
variantreads[count].add(pileupread.alignment.qname)
count += 1
flowa = []
flowb = []
count = 0
variations = 0
max1c = 0
max1 = None
max2c = 0
max2 = None
maxflow = 0
for i in variantreads:
if len(i) >= minreadsvar:
if len(i) > max1c:
max2c = max1c
max2 = max1
max1c = len(i)
max1 = count
elif len(i) > max2c:
max2c = len(i)
max2 = count
variations += 1
firflow = len(lastfirreads.intersection(i))
if firflow >= minreadsflow:
flowa.append('fir')
flowb.append(count)
secflow = len(lastsecreads.intersection(i))
if firflow > maxflow:
maxflow = firflow
if secflow >= maxflow:
maxflow = secflow
if secflow >= minreadsflow:
flowa.append('sec')
flowb.append(count)
count += 1
breakit = False
if variations >= variantmax:
variantmax = variations
if variations == 0: # if there is anything other than two variants at this site skip to next site
novar += 1
break
elif variations == 1:
break
elif variations == 2:
if lastthree:
breakit = True
lastthree = False
elif variations >= 3:
threevar += 1
lastthree = True
break
if len(flowa) == 0 or (len(flowa) == 1 and maxflow <= 5):
if not firblock is None:
blocks.append((firblock, secblock, refblock))
breaks.append('gap')
base = variants[max1]
firblock = [(snp.pos, base, len(variantreads[max1]))]
base = variants[max2]
secblock = [(snp.pos, base, len(variantreads[max2]))]
refblock = [snp.ref]
lastfirreads = variantreads[max1]
lastsecreads = variantreads[max2]
elif len(flowa) == 1 and not breakit:
singflow += 1
if flowa[0] == 'fir':
base = variants[flowb[0]]
firblock.append((snp.pos, base, len(variantreads[flowb[0]])))
if flowb[0] == max1:
base = variants[max2]
secblock.append((snp.pos, base, len(variantreads[max2])))
refblock.append(snp.ref)
lastfirreads = variantreads[max1]
lastsecreads = variantreads[max2]
else:
base = variants[max1]
secblock.append((snp.pos, base, len(variantreads[max1])))
refblock.append(snp.ref)
lastfirreads = variantreads[max2]
lastsecreads = variantreads[max1]
else:
base = variants[flowb[0]]
secblock.append((snp.pos, base, len(variantreads[flowb[0]])))
refblock.append(snp.ref)
if flowb[0] == max1:
base = variants[max2]
firblock.append((snp.pos, base, len(variantreads[max2])))
lastfirreads = variantreads[max2]
lastsecreads = variantreads[max1]
else:
base = variants[max1]
firblock.append((snp.pos, base, len(variantreads[max1])))
lastfirreads = variantreads[max1]
lastsecreads = variantreads[max2]
elif len(flowa) == 2 and not len(set(flowa)) == 1 and not len(set(flowb)) == 1 and not breakit:
properflow += 1
if flowa[0] == 'fir':
base = variants[flowb[0]]
firblock.append((snp.pos, base, len(variantreads[flowb[0]])))
base = variants[flowb[1]]
secblock.append((snp.pos, base, len(variantreads[flowb[1]])))
refblock.append(snp.ref)
lastfirreads = variantreads[flowb[0]]
lastsecreads = variantreads[flowb[1]]
else:
base = variants[flowb[0]]
secblock.append((snp.pos, base, len(variantreads[flowb[0]])))
refblock.append(snp.ref)
base = variants[flowb[1]]
firblock.append((snp.pos, base, len(variantreads[flowb[1]])))
lastfirreads = variantreads[flowb[1]]
lastsecreads = variantreads[flowb[0]]
else: # more than 3 flows means recombination is happening at significant levels at this site
recombpos.append(snp.pos)
recombinants += 1
blocks.append((firblock, secblock, refblock))
breaks.append('recomb')
base = variants[max1]
firblock = [(snp.pos, base, len(variantreads[max1]))]
base = variants[max2]
secblock = [(snp.pos, base, len(variantreads[max2]))]
refblock = [snp.ref]
lastfirreads = variantreads[max1]
lastsecreads = variantreads[max2]
thecovlist.append(covcount)
sys.stdout.write(str(singflow + properflow) + ' flows found.\n')
sys.stdout.write(str(recombinants) + ' recombinants found.\n')
sys.stdout.write(str(threevar) + ' sites with three variants found.\n')
sys.stdout.write(str(variantmax) + ' maximum variants at a single site.\n')
thecovlist.sort()
sys.stdout.write(str(mediancov) + ' median coverage, ' + str(halfmed) + '-' + str(onehalfmed)\
+ ' coverage cutoffs being used.\nFlows outside of these coverages will not be used.\n')
covcut = thecovlist[len(thecovlist) / 2] /2
dominantreads = set()
secondaryreads = set()
out = open(outfile + '.txt', 'w')
global recombblocklist
recombblocklist = []
mids, lowcovblock, goodblocks = 0, 0, 0
ratios = []
coveragelist = []
coverageblocklist = []
sys.stdout.write('Assigning blocks to strains...')
sys.stdout.flush()
for i in range(len(blocks)):
firstreads, secreads = set(), set()
blockcov = []
blockcova = []
blockcovb = []
for j in range(len(blocks[i][0])):
firvar = blocks[i][0][j][1]
secvar = blocks[i][1][j][1]
blockcov.append(blocks[i][0][j][2] + blocks[i][1][j][2])
blockcova.append(blocks[i][0][j][2])
blockcovb.append(blocks[i][1][j][2])
coveragelist.append(blocks[i][0][j][2] + blocks[i][1][j][2])
for pileupcolumn in sam.pileup(snp.chrom, blocks[i][0][j][0], blocks[i][0][j][0] + 1):
if pileupcolumn.pos == blocks[i][0][j][0] - 1:
varlength = len(blocks[i][2][j])
for pileupread in pileupcolumn.pileups:
rvar = pileupread.alignment.seq[pileupread.qpos:pileupread.qpos +
pileupread.alignment.overlap(pileupcolumn.pos, pileupcolumn.pos + varlength)]
if rvar == firvar:
firstreads.add(pileupread.alignment.qname)
elif rvar == secvar:
secreads.add(pileupread.alignment.qname)
outlist = blocks[i][0][j] + blocks[i][1][j][1:] + (blocks[i][2][j],)
out.write('\t'.join(map(str, outlist)) + '\n')
recombblock = False
coverageblocklist.append(max(blockcov))
if breaks[i] == 'recomb' and i != 0 and breaks[i-1] == 'recomb':
if blocks[i][0][-1][0] - blocks[i][0][0][0] <= 100: # if the block is less than 50 bp long
recombblock = True
recombblocklist.append((blocks[i][0][0][0], blocks[i][0][-1][0]))
if not recombblock and max(blockcov) >= covcut:
highcovs = set()
for j in range(len(blockcov)):
if onehalfmed >= blockcov[j] and (blockcova[j] >= halfmed or blockcovb[j] >= halfmed):
if blockcova[j] > blockcovb[j]:
highcovs.add('a')
ratios.append(blockcova[j] * 1.0 / blockcov[j])
else:
highcovs.add('b')
ratios.append(blockcovb[j] * 1.0 / blockcov[j])
if len(highcovs) > 1:
print 'whatthe?'
print blocks[i]
sys.exit()
else:
if 'a' in highcovs:
dominantreads.update(firstreads)
secondaryreads.update(secreads)
goodblocks += 1
elif 'b' in highcovs:
dominantreads.update(secreads)
secondaryreads.update(firstreads)
goodblocks += 1
else:
mids += 1
else:
if not recombblock:
lowcovblock += 1
out.write(breaks[i] + '\n')
sys.stdout.write(str(mids) + ' blocks removed due to indistinguishable coverage between strains.\n')
sys.stdout.write(str(len(recombblocklist)) + ' blocks under active recombination removed.\n')
sys.stdout.write(str(lowcovblock) + ' low coverage blocks removed.\n')
sys.stdout.write(str(goodblocks) + ' blocks used to assign reads.\n')
for i in range(len(firblock)):
outlist = firblock[i] + secblock[i][1:] + (refblock[i],)
out.write('\t'.join(map(str, outlist)) + '\n')
out.write('end\n')
out.close()
totalreads = 0
domsam = pysam.Samfile(outfile + '.dom.bam', 'wb', template=sam)
secsam = pysam.Samfile(outfile + '.sec.bam', 'wb', template=sam)
ratios.sort()
try:
print ratios[0], ratios[len(ratios)/4], ratios[len(ratios)/2], ratios[len(ratios) * 3 / 4], ratios[-1]
except:
pass
sys.stdout.write('Writing dominant and secondary strain bam files....\n')
sys.stdout.flush()
for read in sam.fetch():
if read.qname in dominantreads:
domsam.write(read)
elif read.qname in secondaryreads:
secsam.write(read)
totalreads += 1
domsam.close()
secsam.close()
sys.stdout.write(str(len(dominantreads)) + ' assigned to the dominant strain.\n')
sys.stdout.write(str(len(secondaryreads)) + ' assigned to the secondary strain.\n')
sys.stdout.write('Out of a possible ' + str(totalreads) + ' reads.\n')
sys.stdout.write('Sorting and indexing BAM files.\n')
pysam.sort(outfile + '.dom.bam', outfile + '.dom.sorted')
pysam.index(outfile + '.dom.sorted.bam')
pysam.sort(outfile + '.sec.bam', outfile + '.sec.sorted')
pysam.index(outfile + '.sec.sorted.bam')
def callStrains(reference, outfile, sam):
global halfmed, recombblocklist, onehalfmed
subprocess.Popen('samtools mpileup -uf ' + reference + ' ' + outfile + '.dom.sorted.bam | bcftools view -cg - > ' + outfile + '.dom.pu', shell=True).wait()
subprocess.Popen('samtools mpileup -uf ' + reference + ' ' + outfile + '.sec.sorted.bam | bcftools view -cg - > ' + outfile + '.sec.pu', shell=True).wait()
samfile = pysam.Samfile(sam)
domseq = ['n' for i in range(samfile.lengths[0])]
for pileupcolumn in samfile.pileup():
varcounts = {}
coverage = 0
for pileupread in pileupcolumn.pileups:
var = pileupread.alignment.seq[pileupread.qpos:pileupread.qpos +
pileupread.alignment.overlap(pileupcolumn.pos, pileupcolumn.pos + 1)]
coverage += 1
if var in varcounts:
varcounts[var] += 1
else:
varcounts[var] = 1
if coverage >= halfmed:
for i in varcounts:
if varcounts[i] * 1.0 / coverage >= 0.98:
domseq[pileupcolumn.pos] = i
print len(domseq)
domfile = open(outfile + '.dom.pu')
lastpos = 0
count1, count2, count3, count4 = 0, 0, 0, 0
for line in domfile:
if not line.startswith('#'):
chrom, pos, eyed, ref, alt, qual, zefilter, info = line.split()[:8]
pos = int(pos)
if alt == '.':
alt = ref
for i in info.split(';'):
if i.startswith('FQ='):
quality = - float(i.split('=')[1])
if pos == lastpos:
domseq[pos-1] = 'n'
count1 += 1
else:
if quality >= 30:
domseq[pos-1] = alt
lastpos = pos
print len(domseq)
domfile.close()
secseq = ['n' for i in range(len(domseq))]
secfile = open(outfile + '.sec.pu')
lastpos = 0
for line in secfile:
if not line.startswith('#'):
chrom, pos, eyed, ref, alt, qual, zefilter, info = line.split()[:8]
pos = int(pos)
if alt == '.':
alt = ref
for i in info.split(';'):
if i.startswith('FQ='):
quality = - float(i.split('=')[1])
if pos == lastpos:
secseq[pos-1] = 'n'
count2 += 1
else:
if quality >= 30:
secseq[pos-1] = alt
count3 += 1
else:
count4 += 1
lastpos = pos
print len(domseq)
print count1, count2, count3, count4
for i in recombblocklist:
for j in range(i[0] - 1, i[1]):
domseq[j] = 'n'
secseq[j] = 'n'
domseq = ''.join(domseq)
secseq = ''.join(secseq)
domseqs = []
secseqs = []
domslen = []
secslen = []
for i in domseq.split('nnnnnnnnnn'):
seq = i.strip('n')
if len(seq) >= 100:
domseqs.append(seq)
domslen.append(len(seq))
for i in secseq.split('nnnnnnnnnn'):
seq = i.strip('n')
if len(seq) >= 100:
secseqs.append(seq)
secslen.append(len(seq))
sys.stdout.write('\tMax\tnumber\ttotal\n')
sys.stdout.write('DOM: ' + str(max(domslen)) + ' ' + str(len(domslen)) + ' ' + str(sum(domslen)) + '\n')
try:
sys.stdout.write('SEC: ' + str(max(secslen)) + ' ' + str(len(secslen)) + ' ' + str(sum(secslen)) + '\n')
except:
sys.stdout.write('Not enough coverage to create sequence for secondary strain\n ')
out = open(outfile + '.dom.fa', 'w')
count = 1
for i in domseqs:
out.write('>dom_' + str(count) + '\n')
for j in range(0, len(i), 60):
out.write(i[j:j+60] + '\n')
count += 1
out.close()
out = open(outfile + '.sec.fa', 'w')
count = 1
for i in secseqs:
out.write('>sec_' + str(count) + '\n')
for j in range(0, len(i), 60):
out.write(i[j:j+60] + '\n')
count += 1
out.close()
mincov = 5
minratio = 0.9
if len(sys.argv) < 4 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
sys.stdout.write('''
groupFlows.py
Written by Mitchell Sullivan (mjsull@gmail.com)
LICENCE: GPLv3
USAGE: python groupFlows.py <args> bam_file vcf_file ref_file outfile_prefix
Bam file must be ordered and indexed, vcf file should be made using freebayes
Arguments
-m minimum coverage minimum coverage for base to be used in outfile
-r minimum ratio for base to be used in outfile
OUTPUT:
<prefix>.dom.bam Alignement of reads assosciated with the dominant strain
<prefix>.dom.sorted.bam sorted alignment
<prefix>.dom.sorted.bam.bai index of sorted alignment
<prefix>.dom.fa fasta of aligned regions of reference
<prefix>.sec.bam Identical files but for the secondary strain
<prefix>.sec.sorted.bam
<prefix>.sec.sorted.bam.bai
<prefix>.sec.fa
<prefix>.txt list of flows seperated into blocks
''')
sys.exit()
returned = groupflow(sys.argv[-4], sys.argv[-3], sys.argv[-1])
if returned == 0:
sys.stderr.write('Pysam not found, please install.')
sys.exit()
callStrains(sys.argv[-2], sys.argv[-1], sys.argv[-4]) | mjsull/HapFlow | groupFlows.py | Python | gpl-3.0 | 20,669 | [
"pysam"
] | 57588c3efe89ec2969cf4b146d51f2ab5e797b0094ecdd4eb37ab40ebabe8581 |
#!/usr/bin/env python
#
# Copyright (C) 2014
# Brian Caswell <bmc@lungetech.com>
# Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import string
import sys
import os
from generator.actions import Actions
from random import choice,randint
from struct import *
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from payloads import *
def random_alpha(a, b):
return ''.join(choice(string.letters) for _ in range(randint(a, b)))
def build_seg(l,n,d,c):
return pack("<I16s112s4096s",l,n,d,c)
responses = [
"It's all just electrons.",
"TOO MANY SECRETS",
"It's a living.",
"Mess with the best...",
"You can't stop the signal, Mal.",
]
class SIGSEGV(Actions):
def start(self):
pass
def overflow(self):
sel = randint(0,len(responses)-1)
name = random_alpha(16,16)
desc = random_alpha(111,111)
self.write(pack("<I",1))
self.write(build_seg(4096,name,desc,payloads[sel]))
self.read(delim="\n",expect=name)
self.read(delim="\n",expect=desc)
self.read(delim="\n",expect=responses[sel])
self.read(delim="\n",expect="fin.")
| f0rki/cb-multios | original-challenges/SIGSEGV/support/old/overflow/overflow.py | Python | mit | 2,207 | [
"Brian"
] | 26621007550e2eb821be519e90be2f6069f74c6fba7efb6a75242bedad8d5c33 |
#!/usr/bin/env python
from __future__ import print_function
import json
import optparse
import os.path
import sys
from load_db import create_table
from query_db import describe_tables, get_connection, run_query
"""
JSON config:
{ tables : [
{ file_path : '/home/galaxy/dataset_101.dat',
table_name : 't1',
column_names : ['c1','c2','c3'],
pkey_autoincr : 'id'
comment_lines : 1
unique: ['c1'],
index: ['c2', 'c3']
},
{ file_path : '/home/galaxy/dataset_102.dat',
table_name : 'gff',
column_names : ['seqname',,'date','start','end']
comment_lines : 1
load_named_columns : True
filters : [{'filter': 'regex', 'pattern': '#peptide',
'action': 'exclude_match'},
{'filter': 'replace', 'column': 3,
'replace': 'gi[|]', 'pattern': ''}]
},
{ file_path : '/home/galaxy/dataset_103.dat',
table_name : 'test',
column_names : ['c1', 'c2', 'c3']
}
]
}
"""
def __main__():
# Parse Command Line
parser = optparse.OptionParser()
parser.add_option('-s', '--sqlitedb', dest='sqlitedb', default=None,
help='The SQLite Database')
parser.add_option('-j', '--jsonfile', dest='jsonfile', default=None,
help='JSON dict of table specifications')
parser.add_option('-q', '--query', dest='query', default=None,
help='SQL query')
parser.add_option('-Q', '--query_file', dest='query_file', default=None,
help='SQL query file')
parser.add_option('-n', '--no_header', dest='no_header', default=False,
action='store_true',
help='Include a column headers line')
parser.add_option('-c', '--comment_char', dest='comment_char', default='',
help='comment character to prefix column header line')
parser.add_option('-o', '--output', dest='output', default=None,
help='Output file for query results')
parser.add_option('-d', '--debug', dest='debug', default=False,
action='store_true',
help='Output info to stderr')
(options, args) = parser.parse_args()
# determine output destination
if options.output is not None:
try:
outputPath = os.path.abspath(options.output)
outputFile = open(outputPath, 'w')
except Exception as e:
exit('Error: %s' % (e))
else:
outputFile = sys.stdout
def _create_table(ti, table):
path = table['file_path']
table_name =\
table['table_name'] if 'table_name' in table else 't%d' % (ti + 1)
comment_lines =\
table['comment_lines'] if 'comment_lines' in table else 0
comment_char =\
table['comment_char'] if 'comment_char' in table else None
column_names =\
table['column_names'] if 'column_names' in table else None
firstlinenames =\
table['firstlinenames'] if 'firstlinenames' in table else False
if column_names:
load_named_columns =\
table['load_named_columns']\
if 'load_named_columns' in table else False
else:
load_named_columns = False
unique_indexes = table['unique'] if 'unique' in table else []
indexes = table['index'] if 'index' in table else []
filters = table['filters'] if 'filters' in table else None
pkey_autoincr = \
table['pkey_autoincr'] if 'pkey_autoincr' in table else None
create_table(get_connection(options.sqlitedb), path, table_name,
pkey_autoincr=pkey_autoincr,
firstlinenames=firstlinenames,
column_names=column_names,
skip=comment_lines,
comment_char=comment_char,
load_named_columns=load_named_columns,
filters=filters,
unique_indexes=unique_indexes,
indexes=indexes)
if options.jsonfile:
try:
with open(options.jsonfile) as fh:
tdef = json.load(fh)
if options.debug:
print('JSON: %s' % tdef, file=sys.stderr)
if 'tables' in tdef:
for ti, table in enumerate(tdef['tables']):
_create_table(ti, table)
if 'sql_stmts' in tdef:
for si, stmt in enumerate(tdef['sql_stmts']):
rowcount = run_query(get_connection(options.sqlitedb), stmt, None)
if options.debug:
print('\nDB modification: %s \nrowcount: %s' %
(stmt, rowcount), file=sys.stderr)
if 'queries' in tdef:
for qi, qstmt in enumerate(tdef['queries']):
if 'header' in qstmt:
no_header = False
comment_char = qstmt['header']
else:
no_header = True
comment_char = None
with open(qstmt['result_file'], 'w') as fh:
query = qstmt['query']
rowcount = run_query(get_connection(options.sqlitedb),
query,
fh,
no_header=no_header,
comment_char=comment_char)
if options.debug:
print('\nSQL: %s \nrowcount: %s' %
(query, rowcount), file=sys.stderr)
except Exception as e:
exit('Error: %s' % (e))
query = None
if options.query_file is not None:
with open(options.query_file, 'r') as fh:
query = ''
for line in fh:
query += line
elif options.query is not None:
query = options.query
if query is None:
try:
describe_tables(get_connection(options.sqlitedb), outputFile)
except Exception as e:
exit('Error: %s' % (e))
else:
try:
rowcount = run_query(get_connection(options.sqlitedb),
query, outputFile,
no_header=options.no_header,
comment_char=options.comment_char)
if options.debug:
print('\nSQL: %s \nrowcount: %s' %
(query, rowcount), file=sys.stderr)
except Exception as e:
exit('Error: %s' % (e))
if __name__ == "__main__":
__main__()
| mvdbeek/tools-iuc | tools/query_tabular/query_tabular.py | Python | mit | 6,993 | [
"Galaxy"
] | c24193d131f5fe947cd1abe7931b00502ae01339a92f0821946968f53ee4e936 |
"""
Note Snatcher: a simple notes organizer
Copyright (C) 2015 Brian Ratliff
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import wx
ID_BTN_ADDTAG = wx.NewId()
ID_BTN_ADDUSE = wx.NewId()
ID_BTN_USETAG = wx.NewId()
ID_BTN_REMTAG = wx.NewId()
ID_BTN_DELTAG = wx.NewId()
ID_BTN_SAVETAGS = wx.NewId()
ID_TE_ADDTAG = wx.NewId()
ID_LST_ALL = wx.NewId()
ID_LST_CURRENT = wx.NewId()
class EditTagsFrame(wx.Dialog):
def __init__(self, parent, id, title, mode, db):
wx.Dialog.__init__(self, parent, id, title + " " + mode + " Mode",
style=wx.DEFAULT_DIALOG_STYLE)
self.parent = parent
self.mode = mode
self.sdb = db
panel = wx.Panel(self, -1, size=(500, 500))
# sizers
vbox = wx.BoxSizer(wx.VERTICAL)
newtagbox = wx.BoxSizer(wx.HORIZONTAL)
tagcopybox = wx.BoxSizer(wx.HORIZONTAL)
alltagsbox = wx.BoxSizer(wx.VERTICAL)
currenttagsbox = wx.BoxSizer(wx.VERTICAL)
tagbtnsbox = wx.BoxSizer(wx.VERTICAL)
bottombtns = wx.BoxSizer(wx.HORIZONTAL)
# widgets
lblNewTag = wx.StaticText(panel, -1, "New Tag:")
self.txtNewTag = wx.TextCtrl(panel, ID_TE_ADDTAG, size=(100, -1),
style=wx.TE_PROCESS_ENTER)
btnAddTag = wx.Button(panel, ID_BTN_ADDTAG, "Add Tag")
btnAddTag.SetToolTip(wx.ToolTip("Add a new tag."))
lblAllTags = wx.StaticText(panel, -1, "All Tags:")
self.lstAllTags = wx.ListBox(panel, ID_LST_ALL, size=(130, 200),
style=wx.LB_SINGLE | wx.LB_SORT | wx.LB_HSCROLL)
btnUseTag = wx.Button(panel, ID_BTN_USETAG, "Use ->")
btnUseTag.SetToolTip(wx.ToolTip("Use the currently selected tag."))
btnRemoveTag = wx.Button(panel, ID_BTN_REMTAG, "<- Remove")
btnRemoveTag.SetToolTip(wx.ToolTip("Don't use the currently"
"selected tag."))
btnDelTag = wx.Button(panel, ID_BTN_DELTAG, "Delete")
btnDelTag.SetToolTip(wx.ToolTip("Delete the selected tag from"
"the notebook."))
btnSaveTags = wx.Button(panel, wx.ID_OK)
btnCancel = wx.Button(panel, wx.ID_CANCEL)
lblCurrentTags = wx.StaticText(panel, -1, "Current Tags:")
self.lstCurrentTags = wx.ListBox(panel, ID_LST_CURRENT,
size=(130, 200), style=wx.LB_SINGLE | wx.LB_SORT | wx.LB_HSCROLL)
newtagbox.Add(self.txtNewTag, 0, wx.LEFT | wx.EXPAND, 10)
newtagbox.Add(btnAddTag, 0, wx.LEFT | wx.EXPAND, 10)
alltagsbox.Add(lblAllTags, 0, wx.LEFT, 10)
alltagsbox.Add(self.lstAllTags, 0, wx.LEFT | wx.RIGHT | wx.BOTTOM, 10)
tagbtnsbox.Add((-1, 10))
tagbtnsbox.Add(btnUseTag, 0, wx.ALIGN_LEFT)
tagbtnsbox.Add(btnRemoveTag, 0, wx.ALIGN_LEFT)
tagbtnsbox.Add((-1, 10))
tagbtnsbox.Add(btnDelTag, 0, wx.ALIGN_LEFT)
currenttagsbox.Add(lblCurrentTags, 0, wx.LEFT, 10)
currenttagsbox.Add(self.lstCurrentTags, 0,
wx.LEFT | wx.RIGHT | wx.BOTTOM, 10)
tagcopybox.Add(alltagsbox, 0)
tagcopybox.Add(tagbtnsbox, 0,
wx.ALIGN_TOP | wx.ALIGN_CENTER_HORIZONTAL)
tagcopybox.Add(currenttagsbox, 0)
bottombtns.Add(btnSaveTags, 0)
bottombtns.Add(btnCancel, 0)
vbox.Add(lblNewTag, 0, wx.LEFT | wx.TOP, 10)
vbox.Add(newtagbox, 0)
vbox.Add((-1, 10), 0)
vbox.Add(tagcopybox, 1, wx.EXPAND)
vbox.Add(bottombtns, 0, wx.ALIGN_CENTER)
#self.SetClientSize(panel.GetBestSize())
size = wx.Size(400, 300)
self.SetSize(size)
panel.SetSizer(vbox)
vbox.Fit(self)
self.Layout()
# events
# buttons
self.Bind(wx.EVT_TEXT_ENTER, self.OnAddTag, self.txtNewTag)
self.Bind(wx.EVT_BUTTON, self.OnAddTag, id=ID_BTN_ADDTAG)
self.Bind(wx.EVT_BUTTON, self.OnUseTag, id=ID_BTN_USETAG)
self.Bind(wx.EVT_BUTTON, self.OnRemTag, id=ID_BTN_REMTAG)
self.Bind(wx.EVT_BUTTON, self.OnDelTag, id=ID_BTN_DELTAG)
# list box events
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnUseTag, id=ID_LST_ALL)
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnRemTag, id=ID_LST_CURRENT)
# show the tags
self.ShowTags()
def OnAddTag(self, e):
pass
print("OnAddTag")
newtag = self.txtNewTag.GetValue()
if newtag == "":
return
print("New tag: %s" % newtag)
if self.mode == "note":
# if tag is not in current note's tags
if newtag not in self.sdb.currentNote.tags:
# add tag text to current note's tags
self.sdb.currentNote.tags.add(newtag)
# add that tag to current tag list box
self.lstCurrentTags.Append(newtag)
else:
print("Current tags: %s" %
(self.sdb.currentNote.get_tags_string()))
elif self.mode == "notelist":
if newtag not in self.sdb.tags:
self.sdb.tags.add(newtag)
self.lstCurrentTags.Append(newtag)
print("tag added to self.sdb.tags: %s" % newtag)
else:
print("Current tags: %s" % (self.sdb.tags.get_tags_string()))
self.txtNewTag.Clear()
self.txtNewTag.SetFocus()
def OnUseTag(self, e):
pass
print("OnUseTag")
sel = self.lstAllTags.GetSelection()
if sel == -1:
return
# get selected tag item from list box
seltag = self.lstAllTags.GetString(sel)
if self.mode == "notelist":
# add that tag to db tags
# update main form's listing
if seltag not in self.sdb.tags:
self.lstCurrentTags.Append(seltag)
self.sdb.tags.add(seltag)
#print("tag added to self.sdb.tags: %s" % seltag)
elif self.mode == "note":
pass
if seltag not in self.sdb.currentNote.tags:
self.lstCurrentTags.Append(seltag)
self.sdb.currentNote.tags.add(seltag)
def OnRemTag(self, e):
print("OnRemTag")
# get selected tag item from list box
sel = self.lstCurrentTags.GetSelection()
if sel == -1:
return
if self.mode == "notelist":
pass
# get selected item from list box
# remove that tag from db tags
# remove that tag from list box
# update main form's listing
seltag = self.lstCurrentTags.GetString(sel)
# remove that tag from list box
self.lstCurrentTags.Delete(sel)
# remove that tag from current note's tags
self.sdb.tags.remove(seltag)
elif self.mode == "note":
# get tag
seltag = self.lstCurrentTags.GetString(sel)
# remove that tag from list box
self.lstCurrentTags.Delete(sel)
# remove that tag from current note's tags
self.sdb.currentNote.tags.remove(seltag)
def OnDelTag(self, e):
print("OnDelTag")
# get selected tag
if self.mode == "notelist":
pass
# remove it from both list boxes
elif self.mode == "note":
pass
# remove it from both list boxes
# remove every relationship that involves that tag
def ShowTags(self):
# get all tags from tags table
tags = [t[0] for t in self.sdb.get_all_tags()]
# all tags from current note
# loop through them
for t in tags:
# add it to available tags
self.lstAllTags.Append(t)
if self.mode == "notelist":
if t in self.sdb.tags:
self.lstCurrentTags.Append(t)
elif self.mode == "note":
# if tag is part of current note's tags
if t in self.sdb.currentNote.tags:
# add it to current tags
self.lstCurrentTags.Append(t)
| brianratliff/notesnatcher | edittags.py | Python | gpl-3.0 | 8,588 | [
"Brian"
] | ca20105984e9fe509aa94cea8129044d6afdf78dde65dfe76dab2f3eb0fe9eb9 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
cc_plugin_ncei/ncei_point.py
'''
from compliance_checker.base import BaseCheck
from cc_plugin_ncei.ncei_base import TestCtx, NCEI1_1Check, NCEI2_0Check
from cc_plugin_ncei import util
class NCEIPointBase(BaseCheck):
_cc_spec = 'ncei-point'
valid_feature_types = [
'station',
'point'
]
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consitent with a point dataset
'''
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are point feature types')
t = util.get_time_variable(dataset)
# Exit prematurely
if not t:
required_ctx.assert_true(False, 'A dimension representing time is required for point feature types')
return required_ctx.to_result()
t_dims = dataset.variables[t].dimensions
o = None or (t_dims and t_dims[0])
message = '{} must be a valid timeseries feature type. It must have dimensions of ({}), and all coordinates must have dimensions of ({})'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_point(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable, o, o)
)
return required_ctx.to_result()
class NCEIPoint1_1(NCEI1_1Check, NCEIPointBase):
register_checker = True
_cc_spec_version = '1.1'
_cc_description = (
'This test checks the selected file against the NCEI netCDF Point template version 1.1 '
'(found at https://www.nodc.noaa.gov/data/formats/netcdf/v1.1/point.cdl). The NCEI '
'version 1.1 templates are based on “feature types”, as identified by Unidata and CF, '
'and conform to ACDD version 1.0 and CF version 1.6. You can find more information about '
'the version 1.1 templates at https://www.nodc.noaa.gov/data/formats/netcdf/v1.1/. This '
'test is specifically for the Point feature type which is typically used for a single '
'data point with one or more recorded observations that have no temporal or spatial '
'relationship (where each observation equals one point in time and space).')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v1.1/point.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.1.0'
valid_templates = [
"NODC_NetCDF_Point_Template_v1.1"
]
@classmethod
def beliefs(cls):
'''
Not applicable for gliders
'''
return {}
def check_required_attributes(self, dataset):
'''
Verifies that the dataset contains the NCEI required and highly recommended global attributes
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Point',
'cdm_data_type attribute must be set to Point'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'point',
'featureType attribute must be set to point'
)
results.append(required_ctx.to_result())
return results
class NCEIPoint2_0(NCEI2_0Check, NCEIPointBase):
register_checker = True
_cc_spec_version = '2.0'
_cc_description = (
'This test checks the selected file against the NCEI netCDF Point template'
'version 2.0 (found at https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/point.cdl). The NCEI '
'version 2.0 templates are based on “feature types”, as identified by Unidata and CF, and '
'conform to ACDD version 1.3 and CF version 1.6. You can find more information about the '
'version 2.0 templates at https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/. This test is '
'specifically for the Point feature type which is typically used for a single data point with '
'one or more recorded observations that have no temporal or spatial relationship (where each '
'observation equals one point in time and space).')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/point.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.3.0'
valid_templates = [
"NCEI_NetCDF_Point_Template_v2.0"
]
def check_required_attributes(self, dataset):
'''
Verifies that the dataset contains the NCEI required and highly recommended global attributes
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries')
required_ctx.assert_true(
getattr(dataset, 'ncei_template_version', '').lower() == self.valid_templates[0].lower(),
'ncei_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Point',
'cdm_data_type attribute must be set to Point'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'point',
'featureType attribute must be set to point'
)
results.append(required_ctx.to_result())
return results
| ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_point.py | Python | apache-2.0 | 5,599 | [
"NetCDF"
] | 2230c5358120be66eeca7c2d8ba28a1e32990b522624cbe85ad6cd8b9029cc3e |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
aliases: [ pkg ]
question:
description:
- A debconf configuration setting.
aliases: [ selection, setting ]
vtype:
description:
- The type of the value supplied.
- C(seen) was added in 2.2.
choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title, text ]
value:
description:
- Value to set the configuration to.
aliases: [ answer ]
unseen:
description:
- Do not set 'seen' flag when pre-seeding.
type: bool
default: False
author:
- Brian Coca (@bcoca)
'''
EXAMPLES = '''
- name: Set default locale to fr_FR.UTF-8
debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
- name: set to generate locales
debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
- name: Accept oracle license
debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: 'true'
vtype: select
- name: Specifying package you can register/return the list of questions and current values
debconf:
name: tzdata
'''
from ansible.module_utils.basic import AnsibleModule
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[key.strip('*').strip()] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['pkg']),
question=dict(type='str', aliases=['selection', 'setting']),
vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']),
value=dict(type='str', aliases=['answer']),
unseen=dict(type='bool'),
),
required_together=(['question', 'vtype', 'value'],),
supports_check_mode=True,
)
# TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if question not in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = {question: value}
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/system/debconf.py | Python | gpl-3.0 | 5,176 | [
"Brian"
] | dd1562be5e78105b1c5e7607acb58aed674947f055fd2d4f6958ea9d161c6fa5 |
import utils
import numpy as np
from scipy import ndimage
from matplotlib import pylab as plt
from PIL import Image
import cv2
#TODO: Load image into an array
raw_img = np.fromfile('LENA256.RAW', dtype=np.uint8, sep="")
img_array = raw_img.reshape((256, 256))
Image.fromarray(img_array).save('Lena.jpg')
# raw_img = np.fromfile('Ecoli exp2.png',dtype=np.uint8, sep="")
# img_array = raw_img.reshape((640, 387))
'''
Add Gaussian noise
'''
try:
# gaussian_image1 = utils.func_add_noisy(img_array)
gaussian_image2 = utils.func_add_noisy(img_array, var=30)
pepper_image1 = utils.func_add_noisy(img_array, 's&p',amount=0.01)
except Exception as Argument:
print('Adding Gaussian noise exception occurred: {0}'.format(Argument))
input()
else:
# Image.fromarray(gaussian_image1).save('Gaussian_img_1.jpg')
Image.fromarray(pepper_image1).save('Salt&pepper_img_2.jpg')
print('Successfully Added Gaussian Noise')
try:
rotate_image1 = utils.func_manual_rotate_image_interpolation(img_array, 45)
rotate_image2 = utils.func_manual_rotate_image_interpolation(img_array, 10)
rotate_image3 = utils.func_manual_rotate_image_interpolation(img_array, 90)
except Exception as Argument:
print('Rotating image exception occurred: {0}'.format(Argument))
input()
else:
Image.fromarray(utils.func_verify_image(rotate_image1)).save('rotate_img_1.jpg')
Image.fromarray(utils.func_verify_image(rotate_image2)).save('rotate_img_2.jpg')
Image.fromarray(utils.func_verify_image(rotate_image3)).save('rotate_img_3.jpg')
#Use build in function
# rotate_image12 = Image.fromarray(img_array).rotate(45, expand=True, resample=Image.NEAREST).save('rotate_img_12.jpg')
# rotate_image13 = Image.fromarray(img_array).rotate(45, expand=True, resample=Image.BILINEAR).save('rotate_img_13.jpg')
# print('Successfully Rotated Image')
# rotate_image12 = Image.fromarray(img_array).rotate(45, expand=True, resample=None).save('rotate_img_12.jpg')
rotate_image13 = Image.fromarray(img_array).rotate(45, expand=True, resample=Image.BILINEAR).save('rotate_img_13.jpg')
rotate_image13 = Image.fromarray(img_array).rotate(10, expand=True, resample=Image.BILINEAR).save('rotate_img_14.jpg')
try:
rotate_image4 = utils.func_manual_rotate_image_interpolation(img_array, 45, 1)
except Exception as Argument:
print('Adding Laplacian noise exception occurred: {0}'.format(Argument))
input()
else:
Image.fromarray(utils.func_verify_image(rotate_image4)).save('rotate_img_4.jpg')
print('Successfully Rotated Image') | mrnameless123/advance-image-processing-class | main.py | Python | gpl-3.0 | 2,561 | [
"Gaussian"
] | 6acaa1dad3cd91020ebd7c75f5e6ed9cfb2eb3b3fbef81649ef7f248e6e553b5 |
import binascii
import copy
import json
import re
from base64 import b64encode
import django.core.exceptions
from captcha.audio import AudioCaptcha
from captcha.image import ImageCaptcha
from django.contrib.auth.password_validation import validate_password
from django.core.validators import MinValueValidator
from django.db.models import Model, Q
from django.utils import timezone
from netfields import rest_framework as netfields_rf
from rest_framework import fields, serializers
from rest_framework.settings import api_settings
from rest_framework.validators import UniqueTogetherValidator, UniqueValidator, qs_filter
from api import settings
from desecapi import crypto, metrics, models, validators
class CaptchaSerializer(serializers.ModelSerializer):
challenge = serializers.SerializerMethodField()
class Meta:
model = models.Captcha
fields = ('id', 'challenge', 'kind') if not settings.DEBUG else ('id', 'challenge', 'kind', 'content')
def get_challenge(self, obj: models.Captcha):
# TODO Does this need to be stored in the object instance, in case this method gets called twice?
if obj.kind == models.Captcha.Kind.IMAGE:
challenge = ImageCaptcha().generate(obj.content).getvalue()
elif obj.kind == models.Captcha.Kind.AUDIO:
challenge = AudioCaptcha().generate(obj.content)
else:
raise ValueError(f'Unknown captcha type {obj.kind}')
return b64encode(challenge)
class CaptchaSolutionSerializer(serializers.Serializer):
id = serializers.PrimaryKeyRelatedField(
queryset=models.Captcha.objects.all(),
error_messages={'does_not_exist': 'CAPTCHA does not exist.'}
)
solution = serializers.CharField(write_only=True, required=True)
def validate(self, attrs):
captcha = attrs['id'] # Note that this already is the Captcha object
if not captcha.verify(attrs['solution']):
raise serializers.ValidationError('CAPTCHA could not be validated. Please obtain a new one and try again.')
return attrs
class TokenSerializer(serializers.ModelSerializer):
allowed_subnets = serializers.ListField(child=netfields_rf.CidrAddressField(), required=False)
token = serializers.ReadOnlyField(source='plain')
is_valid = serializers.ReadOnlyField()
class Meta:
model = models.Token
fields = ('id', 'created', 'last_used', 'max_age', 'max_unused_period', 'name', 'perm_manage_tokens',
'allowed_subnets', 'is_valid', 'token',)
read_only_fields = ('id', 'created', 'last_used', 'token')
def __init__(self, *args, include_plain=False, **kwargs):
self.include_plain = include_plain
return super().__init__(*args, **kwargs)
def get_fields(self):
fields = super().get_fields()
if not self.include_plain:
fields.pop('token')
return fields
class DomainSlugRelatedField(serializers.SlugRelatedField):
def get_queryset(self):
return self.context['request'].user.domains
class TokenDomainPolicySerializer(serializers.ModelSerializer):
domain = DomainSlugRelatedField(allow_null=True, slug_field='name')
class Meta:
model = models.TokenDomainPolicy
fields = ('domain', 'perm_dyndns', 'perm_rrsets',)
def to_internal_value(self, data):
return {**super().to_internal_value(data),
'token': self.context['request'].user.token_set.get(id=self.context['view'].kwargs['token_id'])}
def save(self, **kwargs):
try:
return super().save(**kwargs)
except django.core.exceptions.ValidationError as exc:
raise serializers.ValidationError(exc.message_dict, code='precedence')
class RequiredOnPartialUpdateCharField(serializers.CharField):
"""
This field is always required, even for partial updates (e.g. using PATCH).
"""
def validate_empty_values(self, data):
if data is serializers.empty:
self.fail('required')
return super().validate_empty_values(data)
class Validator:
message = 'This field did not pass validation.'
def __init__(self, message=None):
self.field_name = None
self.message = message or self.message
self.instance = None
def __call__(self, value):
raise NotImplementedError
def __repr__(self):
return '<%s>' % self.__class__.__name__
class ReadOnlyOnUpdateValidator(Validator):
message = 'Can only be written on create.'
requires_context = True
def __call__(self, value, serializer_field):
field_name = serializer_field.source_attrs[-1]
instance = getattr(serializer_field.parent, 'instance', None)
if isinstance(instance, Model) and value != getattr(instance, field_name):
raise serializers.ValidationError(self.message, code='read-only-on-update')
class ConditionalExistenceModelSerializer(serializers.ModelSerializer):
"""
Only considers data with certain condition as existing data.
If the existence condition does not hold, given instances are deleted, and no new instances are created,
respectively. Also, to_representation and data will return None.
Contrary, if the existence condition holds, the behavior is the same as DRF's ModelSerializer.
"""
def exists(self, arg):
"""
Determine if arg is to be considered existing.
:param arg: Either a model instance or (possibly invalid!) data object.
:return: Whether we treat this as non-existing instance.
"""
raise NotImplementedError
def to_representation(self, instance):
return None if not self.exists(instance) else super().to_representation(instance)
@property
def data(self):
try:
return super().data
except TypeError:
return None
def save(self, **kwargs):
validated_data = {}
validated_data.update(self.validated_data)
validated_data.update(kwargs)
known_instance = self.instance is not None
data_exists = self.exists(validated_data)
if known_instance and data_exists:
self.instance = self.update(self.instance, validated_data)
elif known_instance and not data_exists:
self.delete()
elif not known_instance and data_exists:
self.instance = self.create(validated_data)
elif not known_instance and not data_exists:
pass # nothing to do
return self.instance
def delete(self):
self.instance.delete()
class NonBulkOnlyDefault:
"""
This class may be used to provide default values that are only used
for non-bulk operations, but that do not return any value for bulk
operations.
Implementation inspired by CreateOnlyDefault.
"""
requires_context = True
def __init__(self, default):
self.default = default
def __call__(self, serializer_field):
is_many = getattr(serializer_field.root, 'many', False)
if is_many:
raise serializers.SkipField()
if callable(self.default):
if getattr(self.default, 'requires_context', False):
return self.default(serializer_field)
else:
return self.default()
return self.default
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self.default))
class RRSerializer(serializers.ModelSerializer):
class Meta:
model = models.RR
fields = ('content',)
def to_internal_value(self, data):
if not isinstance(data, str):
raise serializers.ValidationError('Must be a string.', code='must-be-a-string')
return super().to_internal_value({'content': data})
def to_representation(self, instance):
return instance.content
class RRsetListSerializer(serializers.ListSerializer):
default_error_messages = {
**serializers.Serializer.default_error_messages,
**serializers.ListSerializer.default_error_messages,
**{'not_a_list': 'Expected a list of items but got {input_type}.'},
}
@staticmethod
def _key(data_item):
return data_item.get('subname'), data_item.get('type')
@staticmethod
def _types_by_position_string(conflicting_indices_by_type):
types_by_position = {}
for type_, conflict_positions in conflicting_indices_by_type.items():
for position in conflict_positions:
types_by_position.setdefault(position, []).append(type_)
# Sort by position, None at the end
types_by_position = dict(sorted(types_by_position.items(), key=lambda x: (x[0] is None, x)))
db_conflicts = types_by_position.pop(None, None)
if db_conflicts: types_by_position['database'] = db_conflicts
for position, types in types_by_position.items():
types_by_position[position] = ', '.join(sorted(types))
types_by_position = [f'{position} ({types})' for position, types in types_by_position.items()]
return ', '.join(types_by_position)
def to_internal_value(self, data):
if not isinstance(data, list):
message = self.error_messages['not_a_list'].format(input_type=type(data).__name__)
raise serializers.ValidationError({api_settings.NON_FIELD_ERRORS_KEY: [message]}, code='not_a_list')
if not self.allow_empty and len(data) == 0:
if self.parent and self.partial:
raise serializers.SkipField()
else:
self.fail('empty')
ret = []
errors = []
partial = self.partial
# build look-up objects for instances and data, so we can look them up with their keys
try:
known_instances = {(x.subname, x.type): x for x in self.instance}
except TypeError: # in case self.instance is None (as during POST)
known_instances = {}
indices = {}
for idx, item in enumerate(data):
# Validate item type before using anything from it
if not isinstance(item, dict):
self.fail('invalid', datatype=type(item).__name__)
s, t = self._key(item) # subname, type
# Construct an index of the RRsets in `data` by `s` and `t`. As (subname, type) may be given multiple times
# (although invalid), we make indices[s][t] a set to properly keep track. We also check and record RRsets
# which are known in the database (once per subname), using index `None` (for checking CNAME exclusivity).
if s not in indices:
types = self.child.domain.rrset_set.filter(subname=s).values_list('type', flat=True)
indices[s] = {type_: {None} for type_ in types}
items = indices[s].setdefault(t, set())
items.add(idx)
collapsed_indices = copy.deepcopy(indices)
for idx, item in enumerate(data):
if item.get('records') == []:
s, t = self._key(item)
collapsed_indices[s][t] -= {idx, None}
# Iterate over all rows in the data given
for idx, item in enumerate(data):
try:
# see if other rows have the same key
s, t = self._key(item)
data_indices = indices[s][t] - {None}
if len(data_indices) > 1:
raise serializers.ValidationError({
'non_field_errors': [
'Same subname and type as in position(s) %s, but must be unique.' %
', '.join(map(str, data_indices - {idx}))
]
})
# see if other rows violate CNAME exclusivity
if item.get('records') != []:
conflicting_indices_by_type = {k: v for k, v in collapsed_indices[s].items()
if (k == 'CNAME') != (t == 'CNAME')}
if any(conflicting_indices_by_type.values()):
types_by_position = self._types_by_position_string(conflicting_indices_by_type)
raise serializers.ValidationError({
'non_field_errors': [
f'RRset with conflicting type present: {types_by_position}.'
' (No other RRsets are allowed alongside CNAME.)'
]
})
# determine if this is a partial update (i.e. PATCH):
# we allow partial update if a partial update method (i.e. PATCH) is used, as indicated by self.partial,
# and if this is not actually a create request because it is unknown and nonempty
unknown = self._key(item) not in known_instances.keys()
nonempty = item.get('records', None) != []
self.partial = partial and not (unknown and nonempty)
self.child.instance = known_instances.get(self._key(item), None)
# with partial value and instance in place, let the validation begin!
validated = self.child.run_validation(item)
except serializers.ValidationError as exc:
errors.append(exc.detail)
else:
ret.append(validated)
errors.append({})
self.partial = partial
if any(errors):
raise serializers.ValidationError(errors)
return ret
def update(self, instance, validated_data):
"""
Creates, updates and deletes RRsets according to the validated_data given. Relevant instances must be passed as
a queryset in the `instance` argument.
RRsets that appear in `instance` are considered "known", other RRsets are considered "unknown". RRsets that
appear in `validated_data` with records == [] are considered empty, otherwise non-empty.
The update proceeds as follows:
1. All unknown, non-empty RRsets are created.
2. All known, non-empty RRsets are updated.
3. All known, empty RRsets are deleted.
4. Unknown, empty RRsets will not cause any action.
Rationale:
As both "known"/"unknown" and "empty"/"non-empty" are binary partitions on `everything`, the combination of
both partitions `everything` in four disjoint subsets. Hence, every RRset in `everything` is taken care of.
empty | non-empty
------- | -------- | -----------
known | delete | update
unknown | no-op | create
:param instance: QuerySet of relevant RRset objects, i.e. the Django.Model subclass instances. Relevant are all
instances that are referenced in `validated_data`. If a referenced RRset is missing from instances, it will be
considered unknown and hence be created. This may cause a database integrity error. If an RRset is given, but
not relevant (i.e. not referred to by `validated_data`), a ValueError will be raised.
:param validated_data: List of RRset data objects, i.e. dictionaries.
:return: List of RRset objects (Django.Model subclass) that have been created or updated.
"""
def is_empty(data_item):
return data_item.get('records', None) == []
query = Q(pk__in=[]) # start out with an always empty query, see https://stackoverflow.com/q/35893867/6867099
for item in validated_data:
query |= Q(type=item['type'], subname=item['subname']) # validation has ensured these fields exist
instance = instance.filter(query)
instance_index = {(rrset.subname, rrset.type): rrset for rrset in instance}
data_index = {self._key(data): data for data in validated_data}
if data_index.keys() | instance_index.keys() != data_index.keys():
raise ValueError('Given set of known RRsets (`instance`) is not a subset of RRsets referred to in'
' `validated_data`. While this would produce a correct result, this is illegal due to its'
' inefficiency.')
everything = instance_index.keys() | data_index.keys()
known = instance_index.keys()
unknown = everything - known
# noinspection PyShadowingNames
empty = {self._key(data) for data in validated_data if is_empty(data)}
nonempty = everything - empty
# noinspection PyUnusedLocal
noop = unknown & empty
created = unknown & nonempty
updated = known & nonempty
deleted = known & empty
ret = []
# The above algorithm makes sure that created, updated, and deleted are disjoint. Thus, no "override cases"
# (such as: an RRset should be updated and delete, what should be applied last?) need to be considered.
# We apply deletion first to get any possible CNAME exclusivity collisions out of the way.
for subname, type_ in deleted:
instance_index[(subname, type_)].delete()
for subname, type_ in created:
ret.append(self.child.create(
validated_data=data_index[(subname, type_)]
))
for subname, type_ in updated:
ret.append(self.child.update(
instance=instance_index[(subname, type_)],
validated_data=data_index[(subname, type_)]
))
return ret
def save(self, **kwargs):
kwargs.setdefault('domain', self.child.domain)
return super().save(**kwargs)
class RRsetSerializer(ConditionalExistenceModelSerializer):
domain = serializers.SlugRelatedField(read_only=True, slug_field='name')
records = RRSerializer(many=True)
ttl = serializers.IntegerField(max_value=86400)
class Meta:
model = models.RRset
fields = ('created', 'domain', 'subname', 'name', 'records', 'ttl', 'type', 'touched',)
extra_kwargs = {
'subname': {'required': False, 'default': NonBulkOnlyDefault('')}
}
list_serializer_class = RRsetListSerializer
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.domain = self.context['domain']
except KeyError:
raise ValueError('RRsetSerializer() must be given a domain object (to validate uniqueness constraints).')
self.minimum_ttl = self.context.get('minimum_ttl', self.domain.minimum_ttl)
def get_fields(self):
fields = super().get_fields()
fields['subname'].validators.append(ReadOnlyOnUpdateValidator())
fields['type'].validators.append(ReadOnlyOnUpdateValidator())
fields['ttl'].validators.append(MinValueValidator(limit_value=self.minimum_ttl))
return fields
def get_validators(self):
return [
UniqueTogetherValidator(
self.domain.rrset_set,
('subname', 'type'),
message='Another RRset with the same subdomain and type exists for this domain.',
),
validators.ExclusionConstraintValidator(
self.domain.rrset_set,
('subname',),
exclusion_condition=('type', 'CNAME',),
message='RRset with conflicting type present: database ({types}).'
' (No other RRsets are allowed alongside CNAME.)',
),
]
@staticmethod
def validate_type(value):
if value not in models.RR_SET_TYPES_MANAGEABLE:
# user cannot manage this type, let's try to tell her the reason
if value in models.RR_SET_TYPES_AUTOMATIC:
raise serializers.ValidationError(f'You cannot tinker with the {value} RR set. It is managed '
f'automatically.')
elif value.startswith('TYPE'):
raise serializers.ValidationError('Generic type format is not supported.')
else:
raise serializers.ValidationError(f'The {value} RR set type is currently unsupported.')
return value
def validate_records(self, value):
# `records` is usually allowed to be empty (for idempotent delete), except for POST requests which are intended
# for RRset creation only. We use the fact that DRF generic views pass the request in the serializer context.
request = self.context.get('request')
if request and request.method == 'POST' and not value:
raise serializers.ValidationError('This field must not be empty when using POST.')
return value
def validate(self, attrs):
if 'records' in attrs:
try:
type_ = attrs['type']
except KeyError: # on the RRsetDetail endpoint, the type is not in attrs
type_ = self.instance.type
try:
attrs['records'] = [{'content': models.RR.canonical_presentation_format(rr['content'], type_)}
for rr in attrs['records']]
except ValueError as ex:
raise serializers.ValidationError(str(ex))
# There is a 12 byte baseline requirement per record, c.f.
# https://lists.isc.org/pipermail/bind-users/2008-April/070137.html
# There also seems to be a 32 byte (?) baseline requirement per RRset, plus the qname length, see
# https://lists.isc.org/pipermail/bind-users/2008-April/070148.html
# The binary length of the record depends actually on the type, but it's never longer than vanilla len()
qname = models.RRset.construct_name(attrs.get('subname', ''), self.domain.name)
conservative_total_length = 32 + len(qname) + sum(12 + len(rr['content']) for rr in attrs['records'])
# Add some leeway for RRSIG record (really ~110 bytes) and other data we have not thought of
conservative_total_length += 256
excess_length = conservative_total_length - 65535 # max response size
if excess_length > 0:
raise serializers.ValidationError(f'Total length of RRset exceeds limit by {excess_length} bytes.',
code='max_length')
return attrs
def exists(self, arg):
if isinstance(arg, models.RRset):
return arg.records.exists()
else:
return bool(arg.get('records')) if 'records' in arg.keys() else True
def create(self, validated_data):
rrs_data = validated_data.pop('records')
rrset = models.RRset.objects.create(**validated_data)
self._set_all_record_contents(rrset, rrs_data)
return rrset
def update(self, instance: models.RRset, validated_data):
rrs_data = validated_data.pop('records', None)
if rrs_data is not None:
self._set_all_record_contents(instance, rrs_data)
ttl = validated_data.pop('ttl', None)
if ttl and instance.ttl != ttl:
instance.ttl = ttl
instance.save() # also updates instance.touched
else:
# Update instance.touched without triggering post-save signal (no pdns action required)
models.RRset.objects.filter(pk=instance.pk).update(touched=timezone.now())
return instance
def save(self, **kwargs):
kwargs.setdefault('domain', self.domain)
return super().save(**kwargs)
@staticmethod
def _set_all_record_contents(rrset: models.RRset, rrs):
"""
Updates this RR set's resource records, discarding any old values.
:param rrset: the RRset at which we overwrite all RRs
:param rrs: list of RR representations
"""
record_contents = [rr['content'] for rr in rrs]
try:
rrset.save_records(record_contents)
except django.core.exceptions.ValidationError as e:
raise serializers.ValidationError(e.messages, code='record-content')
class DomainSerializer(serializers.ModelSerializer):
default_error_messages = {
**serializers.Serializer.default_error_messages,
'name_unavailable': 'This domain name conflicts with an existing zone, or is disallowed by policy.',
}
class Meta:
model = models.Domain
fields = ('created', 'published', 'name', 'keys', 'minimum_ttl', 'touched',)
read_only_fields = ('published', 'minimum_ttl',)
extra_kwargs = {
'name': {'trim_whitespace': False},
}
def __init__(self, *args, include_keys=False, **kwargs):
self.include_keys = include_keys
return super().__init__(*args, **kwargs)
def get_fields(self):
fields = super().get_fields()
if not self.include_keys:
fields.pop('keys')
fields['name'].validators.append(ReadOnlyOnUpdateValidator())
return fields
def validate_name(self, value):
if not models.Domain(name=value, owner=self.context['request'].user).is_registrable():
raise serializers.ValidationError(self.default_error_messages['name_unavailable'], code='name_unavailable')
return value
def create(self, validated_data):
if 'minimum_ttl' not in validated_data and models.Domain(name=validated_data['name']).is_locally_registrable:
validated_data.update(minimum_ttl=60)
return super().create(validated_data)
class DonationSerializer(serializers.ModelSerializer):
class Meta:
model = models.Donation
fields = ('name', 'iban', 'bic', 'amount', 'message', 'email', 'mref', 'interval')
read_only_fields = ('mref',)
@staticmethod
def validate_bic(value):
return re.sub(r'[\s]', '', value)
@staticmethod
def validate_iban(value):
return re.sub(r'[\s]', '', value)
def create(self, validated_data):
return self.Meta.model(**validated_data)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = ('created', 'email', 'id', 'limit_domains', 'password',)
extra_kwargs = {
'password': {
'write_only': True, # Do not expose password field
'allow_null': True,
}
}
def validate_password(self, value):
if value is not None:
validate_password(value)
return value
def create(self, validated_data):
return models.User.objects.create_user(**validated_data)
class RegisterAccountSerializer(UserSerializer):
domain = serializers.CharField(required=False, validators=models.validate_domain_name)
captcha = CaptchaSolutionSerializer(required=False)
class Meta:
model = UserSerializer.Meta.model
fields = ('email', 'password', 'domain', 'captcha')
extra_kwargs = UserSerializer.Meta.extra_kwargs
def validate_domain(self, value):
serializer = DomainSerializer(data=dict(name=value), context=self.context)
try:
serializer.is_valid(raise_exception=True)
except serializers.ValidationError:
raise serializers.ValidationError(serializer.default_error_messages['name_unavailable'],
code='name_unavailable')
return value
def create(self, validated_data):
validated_data.pop('domain', None)
# If validated_data['captcha'] exists, the captcha was also validated, so we can set the user to verified
if 'captcha' in validated_data:
validated_data.pop('captcha')
validated_data['needs_captcha'] = False
return super().create(validated_data)
class EmailSerializer(serializers.Serializer):
email = serializers.EmailField()
class EmailPasswordSerializer(EmailSerializer):
password = serializers.CharField()
class ChangeEmailSerializer(serializers.Serializer):
new_email = serializers.EmailField()
def validate_new_email(self, value):
if value == self.context['request'].user.email:
raise serializers.ValidationError('Email address unchanged.')
return value
class ResetPasswordSerializer(EmailSerializer):
captcha = CaptchaSolutionSerializer(required=True)
class CustomFieldNameUniqueValidator(UniqueValidator):
"""
Does exactly what rest_framework's UniqueValidator does, however allows to further customize the
query that is used to determine the uniqueness.
More specifically, we allow that the field name the value is queried against is passed when initializing
this validator. (At the time of writing, UniqueValidator insists that the field's name is used for the
database query field; only how the lookup must match is allowed to be changed.)
"""
def __init__(self, queryset, message=None, lookup='exact', lookup_field=None):
self.lookup_field = lookup_field
super().__init__(queryset, message, lookup)
def filter_queryset(self, value, queryset, field_name):
"""
Filter the queryset to all instances matching the given value on the specified lookup field.
"""
filter_kwargs = {'%s__%s' % (self.lookup_field or field_name, self.lookup): value}
return qs_filter(queryset, **filter_kwargs)
class AuthenticatedActionSerializer(serializers.ModelSerializer):
state = serializers.CharField() # serializer read-write, but model read-only field
validity_period = settings.VALIDITY_PERIOD_VERIFICATION_SIGNATURE
class Meta:
model = models.AuthenticatedAction
fields = ('state',)
@classmethod
def _pack_code(cls, data):
payload = json.dumps(data).encode()
code = crypto.encrypt(payload, context='desecapi.serializers.AuthenticatedActionSerializer').decode()
return code.rstrip('=')
@classmethod
def _unpack_code(cls, code, *, ttl):
code += -len(code) % 4 * '='
try:
payload = crypto.decrypt(code.encode(), context='desecapi.serializers.AuthenticatedActionSerializer',
ttl=ttl)
return json.loads(payload.decode())
except (TypeError, UnicodeDecodeError, UnicodeEncodeError, json.JSONDecodeError, binascii.Error):
raise ValueError
def to_representation(self, instance: models.AuthenticatedUserAction):
# do the regular business
data = super().to_representation(instance)
# encode into single string
return {'code': self._pack_code(data)}
def to_internal_value(self, data):
data = data.copy() # avoid side effect from .pop
# calculate code TTL
validity_period = self.context.get('validity_period', self.validity_period)
try:
ttl = validity_period.total_seconds()
except AttributeError:
ttl = None # infinite
# decode from single string
try:
unpacked_data = self._unpack_code(self.context['code'], ttl=ttl)
except KeyError:
raise serializers.ValidationError({'code': ['This field is required.']})
except ValueError:
if ttl is None:
msg = 'This code is invalid.'
else:
msg = f'This code is invalid, possibly because it expired (validity: {validity_period}).'
raise serializers.ValidationError({api_settings.NON_FIELD_ERRORS_KEY: msg})
# add extra fields added by the user
unpacked_data.update(**data)
# do the regular business
return super().to_internal_value(unpacked_data)
def act(self):
self.instance.act()
return self.instance
def save(self, **kwargs):
raise ValueError
class AuthenticatedBasicUserActionSerializer(AuthenticatedActionSerializer):
user = serializers.PrimaryKeyRelatedField(
queryset=models.User.objects.all(),
error_messages={'does_not_exist': 'This user does not exist.'},
pk_field=serializers.UUIDField()
)
class Meta:
model = models.AuthenticatedBasicUserAction
fields = AuthenticatedActionSerializer.Meta.fields + ('user',)
class AuthenticatedActivateUserActionSerializer(AuthenticatedBasicUserActionSerializer):
captcha = CaptchaSolutionSerializer(required=False)
class Meta(AuthenticatedBasicUserActionSerializer.Meta):
model = models.AuthenticatedActivateUserAction
fields = AuthenticatedBasicUserActionSerializer.Meta.fields + ('captcha', 'domain',)
extra_kwargs = {
'domain': {'default': None, 'allow_null': True}
}
def validate(self, attrs):
try:
attrs.pop('captcha') # remove captcha from internal value to avoid passing to Meta.model(**kwargs)
except KeyError:
if attrs['user'].needs_captcha:
raise serializers.ValidationError({'captcha': fields.Field.default_error_messages['required']})
return attrs
class AuthenticatedChangeEmailUserActionSerializer(AuthenticatedBasicUserActionSerializer):
new_email = serializers.EmailField(
validators=[
CustomFieldNameUniqueValidator(
queryset=models.User.objects.all(),
lookup_field='email',
message='You already have another account with this email address.',
)
],
required=True,
)
class Meta(AuthenticatedBasicUserActionSerializer.Meta):
model = models.AuthenticatedChangeEmailUserAction
fields = AuthenticatedBasicUserActionSerializer.Meta.fields + ('new_email',)
class AuthenticatedResetPasswordUserActionSerializer(AuthenticatedBasicUserActionSerializer):
new_password = serializers.CharField(write_only=True)
class Meta(AuthenticatedBasicUserActionSerializer.Meta):
model = models.AuthenticatedResetPasswordUserAction
fields = AuthenticatedBasicUserActionSerializer.Meta.fields + ('new_password',)
class AuthenticatedDeleteUserActionSerializer(AuthenticatedBasicUserActionSerializer):
class Meta(AuthenticatedBasicUserActionSerializer.Meta):
model = models.AuthenticatedDeleteUserAction
class AuthenticatedDomainBasicUserActionSerializer(AuthenticatedBasicUserActionSerializer):
domain = serializers.PrimaryKeyRelatedField(
queryset=models.Domain.objects.all(),
error_messages={'does_not_exist': 'This domain does not exist.'},
)
class Meta:
model = models.AuthenticatedDomainBasicUserAction
fields = AuthenticatedBasicUserActionSerializer.Meta.fields + ('domain',)
class AuthenticatedRenewDomainBasicUserActionSerializer(AuthenticatedDomainBasicUserActionSerializer):
validity_period = None
class Meta(AuthenticatedDomainBasicUserActionSerializer.Meta):
model = models.AuthenticatedRenewDomainBasicUserAction
| desec-io/desec-stack | api/desecapi/serializers.py | Python | mit | 34,993 | [
"TINKER"
] | e8278ba3d175da5dc3afcf18e1756e9275a558689635f8bc2aaf532b1a1f249c |
""" Threaded implementation of services
"""
import os
import time
import re
import threading
import zipfile
import zlib
import DIRAC
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData, ConfigurationData
from DIRAC.ConfigurationSystem.private.Refresher import gRefresher
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
__RCSID__ = "$Id$"
class ServiceInterface( threading.Thread ):
def __init__( self, sURL ):
threading.Thread.__init__( self )
self.sURL = sURL
gLogger.info( "Initializing Configuration Service", "URL is %s" % sURL )
self.__modificationsIgnoreMask = [ '/DIRAC/Configuration/Servers', '/DIRAC/Configuration/Version' ]
gConfigurationData.setAsService()
if not gConfigurationData.isMaster():
gLogger.info( "Starting configuration service as slave" )
gRefresher.autoRefreshAndPublish( self.sURL )
else:
gLogger.info( "Starting configuration service as master" )
gRefresher.disable()
self.__loadConfigurationData()
self.dAliveSlaveServers = {}
self.__launchCheckSlaves()
def isMaster( self ):
return gConfigurationData.isMaster()
def __launchCheckSlaves( self ):
gLogger.info( "Starting purge slaves thread" )
self.setDaemon( 1 )
self.start()
def __loadConfigurationData( self ):
mkDir( os.path.join( DIRAC.rootPath, "etc", "csbackup" ) )
gConfigurationData.loadConfigurationData()
if gConfigurationData.isMaster():
bBuiltNewConfiguration = False
if not gConfigurationData.getName():
DIRAC.abort( 10, "Missing name for the configuration to be exported!" )
gConfigurationData.exportName()
sVersion = gConfigurationData.getVersion()
if sVersion == "0":
gLogger.info( "There's no version. Generating a new one" )
gConfigurationData.generateNewVersion()
bBuiltNewConfiguration = True
if self.sURL not in gConfigurationData.getServers():
gConfigurationData.setServers( self.sURL )
bBuiltNewConfiguration = True
gConfigurationData.setMasterServer( self.sURL )
if bBuiltNewConfiguration:
gConfigurationData.writeRemoteConfigurationToDisk()
def __generateNewVersion( self ):
if gConfigurationData.isMaster():
gConfigurationData.generateNewVersion()
gConfigurationData.writeRemoteConfigurationToDisk()
def publishSlaveServer( self, sSlaveURL ):
if not gConfigurationData.isMaster():
return S_ERROR( "Configuration modification is not allowed in this server" )
gLogger.info( "Pinging slave %s" % sSlaveURL )
rpcClient = RPCClient( sSlaveURL, timeout = 10, useCertificates = True )
retVal = rpcClient.ping()
if not retVal[ 'OK' ]:
gLogger.info( "Slave %s didn't reply" % sSlaveURL )
return
if retVal[ 'Value' ][ 'name' ] != 'Configuration/Server':
gLogger.info( "Slave %s is not a CS serveR" % sSlaveURL )
return
bNewSlave = False
if not sSlaveURL in self.dAliveSlaveServers.keys():
bNewSlave = True
gLogger.info( "New slave registered", sSlaveURL )
self.dAliveSlaveServers[ sSlaveURL ] = time.time()
if bNewSlave:
gConfigurationData.setServers( "%s, %s" % ( self.sURL,
", ".join( self.dAliveSlaveServers.keys() ) ) )
self.__generateNewVersion()
def __checkSlavesStatus( self, forceWriteConfiguration = False ):
gLogger.info( "Checking status of slave servers" )
iGraceTime = gConfigurationData.getSlavesGraceTime()
lSlaveURLs = self.dAliveSlaveServers.keys()
bModifiedSlaveServers = False
for sSlaveURL in lSlaveURLs:
if time.time() - self.dAliveSlaveServers[ sSlaveURL ] > iGraceTime:
gLogger.info( "Found dead slave", sSlaveURL )
del self.dAliveSlaveServers[ sSlaveURL ]
bModifiedSlaveServers = True
if bModifiedSlaveServers or forceWriteConfiguration:
gConfigurationData.setServers( "%s, %s" % ( self.sURL,
", ".join( self.dAliveSlaveServers.keys() ) ) )
self.__generateNewVersion()
def getCompressedConfiguration( self ):
sData = gConfigurationData.getCompressedData()
def updateConfiguration( self, sBuffer, commiter = "", updateVersionOption = False ):
if not gConfigurationData.isMaster():
return S_ERROR( "Configuration modification is not allowed in this server" )
#Load the data in a ConfigurationData object
oRemoteConfData = ConfigurationData( False )
oRemoteConfData.loadRemoteCFGFromCompressedMem( sBuffer )
if updateVersionOption:
oRemoteConfData.setVersion( gConfigurationData.getVersion() )
#Test that remote and new versions are the same
sRemoteVersion = oRemoteConfData.getVersion()
sLocalVersion = gConfigurationData.getVersion()
gLogger.info( "Checking versions\nremote: %s\nlocal: %s" % ( sRemoteVersion, sLocalVersion ) )
if sRemoteVersion != sLocalVersion:
if not gConfigurationData.mergingEnabled():
return S_ERROR( "Local and remote versions differ (%s vs %s). Cannot commit." % ( sLocalVersion, sRemoteVersion ) )
else:
gLogger.info( "AutoMerging new data!" )
if updateVersionOption:
return S_ERROR( "Cannot AutoMerge! version was overwritten" )
result = self.__mergeIndependentUpdates( oRemoteConfData )
if not result[ 'OK' ]:
gLogger.warn( "Could not AutoMerge!", result[ 'Message' ] )
return S_ERROR( "AutoMerge failed: %s" % result[ 'Message' ] )
requestedRemoteCFG = result[ 'Value' ]
gLogger.info( "AutoMerge successful!" )
oRemoteConfData.setRemoteCFG( requestedRemoteCFG )
#Test that configuration names are the same
sRemoteName = oRemoteConfData.getName()
sLocalName = gConfigurationData.getName()
if sRemoteName != sLocalName:
return S_ERROR( "Names differ: Server is %s and remote is %s" % ( sLocalName, sRemoteName ) )
#Update and generate a new version
gLogger.info( "Committing new data..." )
gConfigurationData.lock()
gLogger.info( "Setting the new CFG" )
gConfigurationData.setRemoteCFG( oRemoteConfData.getRemoteCFG() )
gConfigurationData.unlock()
gLogger.info( "Generating new version" )
gConfigurationData.generateNewVersion()
#self.__checkSlavesStatus( forceWriteConfiguration = True )
gLogger.info( "Writing new version to disk!" )
retVal = gConfigurationData.writeRemoteConfigurationToDisk( "%s@%s" % ( commiter, gConfigurationData.getVersion() ) )
gLogger.info( "New version it is!" )
return retVal
def getCompressedConfigurationData( self ):
return gConfigurationData.getCompressedData()
def getVersion( self ):
return gConfigurationData.getVersion()
def getCommitHistory( self ):
files = self.__getCfgBackups( gConfigurationData.getBackupDir() )
backups = [ ".".join( fileName.split( "." )[1:-1] ).split( "@" ) for fileName in files ]
return backups
def run( self ):
while True:
iWaitTime = gConfigurationData.getSlavesGraceTime()
time.sleep( iWaitTime )
self.__checkSlavesStatus()
def getVersionContents( self, date ):
backupDir = gConfigurationData.getBackupDir()
files = self.__getCfgBackups( backupDir, date )
for fileName in files:
with zipfile.ZipFile( "%s/%s" % ( backupDir, fileName ), "r" ) as zFile:
cfgName = zFile.namelist()[0]
retVal = S_OK( zlib.compress( zFile.read( cfgName ) , 9 ) )
return retVal
return S_ERROR( "Version %s does not exist" % date )
def __getCfgBackups( self, basePath, date = "", subPath = "" ):
rs = re.compile( r"^%s\..*%s.*\.zip$" % ( gConfigurationData.getName(), date ) )
fsEntries = os.listdir( "%s/%s" % ( basePath, subPath ) )
fsEntries.sort( reverse = True )
backupsList = []
for entry in fsEntries:
entryPath = "%s/%s/%s" % ( basePath, subPath, entry )
if os.path.isdir( entryPath ):
backupsList.extend( self.__getCfgBackups( basePath, date, "%s/%s" % ( subPath, entry ) ) )
elif os.path.isfile( entryPath ):
if rs.search( entry ):
backupsList.append( "%s/%s" % ( subPath, entry ) )
return backupsList
def __getPreviousCFG( self, oRemoteConfData ):
remoteExpectedVersion = oRemoteConfData.getVersion()
backupsList = self.__getCfgBackups( gConfigurationData.getBackupDir(), date = oRemoteConfData.getVersion() )
if not backupsList:
return S_ERROR( "Could not AutoMerge. Could not retrieve original commiter's version" )
prevRemoteConfData = ConfigurationData()
backFile = backupsList[0]
if backFile[0] == "/":
backFile = os.path.join( gConfigurationData.getBackupDir(), backFile[1:] )
try:
prevRemoteConfData.loadConfigurationData( backFile )
except Exception as e:
return S_ERROR( "Could not load original commiter's version: %s" % str( e ) )
gLogger.info( "Loaded client original version %s" % prevRemoteConfData.getVersion() )
return S_OK( prevRemoteConfData.getRemoteCFG() )
def _checkConflictsInModifications( self, realModList, reqModList, parentSection = "" ):
realModifiedSections = dict( [ ( modAc[1], modAc[3] ) for modAc in realModList if modAc[0].find( 'Sec' ) == len( modAc[0] ) - 3 ] )
reqOptionsModificationList = dict( [ ( modAc[1], modAc[3] ) for modAc in reqModList if modAc[0].find( 'Opt' ) == len( modAc[0] ) - 3 ] )
optionModRequests = 0
for modAc in reqModList:
action = modAc[0]
objectName = modAc[1]
if action == "addSec":
if objectName in realModifiedSections:
return S_ERROR( "Section %s/%s already exists" % ( parentSection, objectName ) )
elif action == "delSec":
if objectName in realModifiedSections:
return S_ERROR( "Section %s/%s cannot be deleted. It has been modified." % ( parentSection, objectName ) )
elif action == "modSec":
if objectName in realModifiedSections:
result = self._checkConflictsInModifications( realModifiedSections[ objectName ],
modAc[3], "%s/%s" % ( parentSection, objectName ) )
if not result[ 'OK' ]:
return result
for modAc in realModList:
action = modAc[0]
objectName = modAc[1]
if action.find( "Opt" ) == len( action ) - 3:
return S_ERROR( "Section %s cannot be merged. Option %s/%s has been modified" % ( parentSection, parentSection, objectName ) )
return S_OK()
def __mergeIndependentUpdates( self, oRemoteConfData ):
#return S_ERROR( "AutoMerge is still not finished. Meanwhile... why don't you get the newest conf and update from there?" )
#Get all the CFGs
curSrvCFG = gConfigurationData.getRemoteCFG().clone()
curCliCFG = oRemoteConfData.getRemoteCFG().clone()
result = self.__getPreviousCFG( oRemoteConfData )
if not result[ 'OK' ]:
return result
prevCliCFG = result[ 'Value' ]
#Try to merge curCli with curSrv. To do so we check the updates from
# prevCli -> curSrv VS prevCli -> curCli
prevCliToCurCliModList = prevCliCFG.getModifications( curCliCFG )
prevCliToCurSrvModList = prevCliCFG.getModifications( curSrvCFG )
result = self._checkConflictsInModifications( prevCliToCurSrvModList,
prevCliToCurCliModList )
if not result[ 'OK' ]:
return S_ERROR( "Cannot AutoMerge: %s" % result[ 'Message' ] )
#Merge!
result = curSrvCFG.applyModifications( prevCliToCurCliModList )
if not result[ 'OK' ]:
return result
return S_OK( curSrvCFG )
| Andrew-McNab-UK/DIRAC | ConfigurationSystem/private/ServiceInterface.py | Python | gpl-3.0 | 11,836 | [
"DIRAC"
] | 36ebe85400fc2d618b00452344bf7eb45b7ab1b53f09d387840d40aa4dcf4e00 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# In this example, an image is centered at (0,0,0) before a
# rotation is applied to ensure that the rotation occurs about
# the center of the image.
reader = vtk.vtkPNGReader()
reader.SetDataSpacing(0.8,0.8,1.5)
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
# first center the image at (0,0,0)
information = vtk.vtkImageChangeInformation()
information.SetInputConnection(reader.GetOutputPort())
information.CenterImageOn()
reslice = vtk.vtkImageReslice()
reslice.SetInputConnection(information.GetOutputPort())
reslice.SetResliceAxesDirectionCosines([0.866025,-0.5,0,0.5,0.866025,0,0,0,1])
reslice.SetInterpolationModeToCubic()
# reset the image back to the way it was (you don't have
# to do this, it is just put in as an example)
information2 = vtk.vtkImageChangeInformation()
information2.SetInputConnection(reslice.GetOutputPort())
reader.Update()
information2.SetInformationInputData(reader.GetOutput())
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(information2.GetOutputPort())
viewer.SetColorWindow(2000)
viewer.SetColorLevel(1000)
viewer.Render()
# --- end of script --
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/TestChangeInformation.py | Python | gpl-3.0 | 1,250 | [
"VTK"
] | 5413b2c04a2c14f3114a023653def1f15dbf8f6a7c800fd1511b946325d482bf |
#!/usr/bin/python
#----------------------------------------------------------------------------#
# #
# ozz-animation is hosted at http://github.com/guillaumeblanc/ozz-animation #
# and distributed under the MIT License (MIT). #
# #
# Copyright (c) 2015 Guillaume Blanc #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#----------------------------------------------------------------------------#
# CMake python helper script.
import subprocess
import multiprocessing
import shutil
import sys
import os
import re
from functools import partial
# Build global path variables.
root = os.path.abspath(os.path.join(os.getcwd(), '.'))
build_dir = os.path.join(root, 'build')
build_dir_cc = os.path.join(root, 'build-cc')
cmake_cache_file = os.path.join(build_dir, 'CMakeCache.txt')
config = 'Release'
generators = {0: 'default'}
generator = generators[0]
emscripten_path = os.environ.get('EMSCRIPTEN')
def ValidateCMake():
try:
# Test that cmake can be executed, silently...
pipe = subprocess.Popen(['cmake'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = pipe.communicate()
except OSError as e:
print("CMake is not installed or properly setup. Please visit www.cmake.org.")
return False
print("CMake is installed and setup properly.")
return True
def CheckEmscripten():
if(emscripten_path == None):
return False
try:
# Test that cmake can be executed, silently...
pipe = subprocess.Popen(['emcc'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = pipe.communicate()
except OSError as e:
print("Emscripten is not installed or properly setup.")
return False
print("Emscripten is installed and setup properly.")
return True
def MakeBuildDir(_build_dir = build_dir):
print("Creating out-of-source build directory: \"" + _build_dir + "\".")
if not os.path.exists(_build_dir):
os.makedirs(_build_dir)
return True
def CleanBuildDir():
print("Deleting out-of-source build directory: \"" + build_dir + "\".")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
print("Deleting out-of-source cross compilation build directory: \"" + build_dir_cc + "\".")
if os.path.exists(build_dir_cc):
shutil.rmtree(build_dir_cc)
return True
def Configure():
# Configure build process.
print("Configuring build project.")
options = ['cmake']
options += ['-D', 'CMAKE_BUILD_TYPE=' + config]
global generator
if(generator != 'default'):
options += ['-G', generator]
options += [root]
config_process = subprocess.Popen(options, cwd=build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Configuration failed.")
return False
print("Configuration succeeded.")
# Updates generator once configuration is complete
generator = DetectGenerator()
return True
def ConfigureCC():
# Configure build process.
print("Configuring cross compilation build project.")
options = ['cmake']
options += ['-D', 'CMAKE_BUILD_TYPE=' + config]
options += ['-D', 'CMAKE_TOOLCHAIN_FILE=' + emscripten_path + '/cmake/Modules/Platform/Emscripten.cmake']
options += ['-G', 'MinGW Makefiles']
options += [root]
config_process = subprocess.Popen(options, cwd=build_dir_cc)
config_process.wait()
if(config_process.returncode != 0):
print("Configuration failed.")
return False
print("Configuration succeeded.")
# Updates generator once configuration is complete
generator = DetectGenerator()
return True
def Build(_build_dir = build_dir):
# Configure build process.
print("Building project.")
options = ['cmake', '--build', _build_dir, '--config', config, '--use-stderr'];
# Appends parallel build option if supported by the generator.
if "Unix Makefiles" in generator:
options += ['--', '-j' + str(multiprocessing.cpu_count())]
config_process = subprocess.Popen(options, cwd=_build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Build failed.")
return False
print("Build succeeded.")
return True
def Test():
# Configure Test process.
print("Running unit tests.")
options = ['ctest' ,'--output-on-failure', '-j' + str(multiprocessing.cpu_count()), '--build-config', config]
config_process = subprocess.Popen(options, cwd=build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Testing failed.")
return False
print("Testing succeeded.")
return True
def PackSources(_type):
print("Packing sources.")
options = ['cpack', '-G', _type, '--config', 'CPackSourceConfig.cmake']
config_process = subprocess.Popen(options, cwd=build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Packing sources of type " + _type + " failed.")
return False
print("Packing sources of type " + _type + " succeeded.")
return True
def PackBinaries(_type, _build_dir = build_dir):
print("Packing binaries.")
options = ['cpack', '-G', _type, '-C', config]
config_process = subprocess.Popen(options, cwd=_build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Packing binaries of type " + _type + " failed.")
return False
print("Packing binaries of type " + _type + " succeeded.")
return True
def SelecConfig():
configs = {
1: 'Debug',
2: 'Release',
3: 'RelWithDebInfo',
4: 'MinSizeRel'}
while True:
print("Select build configuration:")
for num, message in sorted(configs.iteritems()):
print("%d: %s") % (num, message)
# Get input and check validity
try:
answer = int(raw_input("Enter a value: "))
except:
continue
if not answer in configs:
continue
# Affect global configuration variable
global config
config = configs[answer]
return True
def FindGenerators():
# Finds all generators outputted from cmake usage
process = subprocess.Popen(['cmake', '--help'], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
sub_stdout = stdout[stdout.rfind('Generators'):]
matches = re.findall(r"\s*(.+)\s*=.+", sub_stdout, re.MULTILINE)
# Fills generators list
global generators
for match in matches:
generator_name = match.strip()
# Appends also Win64/ARM option if generator is VS
if " [arch]" in generator_name:
gen_name = generator_name[0:len(generator_name) - 7]
generators[len(generators)] = gen_name
generators[len(generators)] = gen_name + " Win64"
generators[len(generators)] = gen_name + " ARM"
else:
generators[len(generators)] = generator_name
def FindInCache(_regex):
try:
cache_file = open(cmake_cache_file)
except:
return None
return re.search(_regex, cache_file.read())
def DetectGenerator():
match = FindInCache(r"CMAKE_GENERATOR:INTERNAL=(.*)")
if match:
global generators
global generator
for num, message in sorted(generators.iteritems()):
if match.group(1) == message:
return message
return 'default'
def SelecGenerator():
global generators
while True:
print("Select generator:")
for num, message in sorted(generators.iteritems()):
print("%d: %s") % (num, message)
# Get input and check validity
try:
answer = int(raw_input("Enter a value: "))
except:
continue
if not answer in generators:
continue
# Check if this is the current generator
current_generator = DetectGenerator()
if current_generator == 'default':
global generator
generator = generators[answer]
return True
if current_generator != generators[answer]:
print("Selected generator '%s' is different from the current one '%s'.") % (generators[answer], current_generator)
clean = raw_input("Do you want to clean build directory to apply the change? (y/n): ") == "y"
if clean:
generator = generators[answer]
return CleanBuildDir()
return True
def ClearScreen():
os.system('cls' if os.name=='nt' else 'clear')
def Exit():
sys.exit(0)
return True
def main():
# Checks CMake installation is correct.
if not ValidateCMake():
return
# Emscripten is optional
CheckEmscripten()
# Detects available generators
FindGenerators()
# Update current generator
print("DetectGenerator")
global generator
generator = DetectGenerator()
options = {
'1': ["Build", [MakeBuildDir, Configure, Build]],
'2': ["Run unit tests", [MakeBuildDir, Configure, Build, Test]],
'3': ["Execute CMake generation step (don't build)", [MakeBuildDir, Configure]],
'4': ["Clean out-of-source build directory\n ------------------", [CleanBuildDir]],
'5': ["Pack binaries", [MakeBuildDir, Configure, Build, partial(PackBinaries, "ZIP"), partial(PackBinaries, "TBZ2")]],
'6': ["Pack sources\n ------------------", [MakeBuildDir, Configure, partial(PackSources, "ZIP"), partial(PackSources, "TBZ2")]],
'7': ["Select build configuration", [SelecConfig]],
'8': ["Select cmake generator\n ------------------", [SelecGenerator]],
'9': ["Exit\n------------------", [Exit]]}
# Adds emscripten
global emscripten_path
if emscripten_path != None:
options['1a'] = ["Build emscripten", [MakeBuildDir, Configure, Build, partial(MakeBuildDir, build_dir_cc), ConfigureCC, partial(Build, build_dir_cc)]]
options['5a'] = ["Pack emscripten binaries", [MakeBuildDir, Configure, Build, partial(MakeBuildDir, build_dir_cc), ConfigureCC, partial(Build, build_dir_cc), partial(PackBinaries, "ZIP", build_dir_cc)]]
while True:
# Displays options
ClearScreen()
print("ozz CMake build helper tool")
print("")
print("Selected build configuration: %s") % config
print("Selected generator: %s") % generator
print("")
print("Choose an option:")
print("------------------")
for key, message in sorted(options.iteritems()):
print(" %s: %s") % (key, message[0])
# Get input and check validity
answer = raw_input("Enter a value: ")
if not answer in options:
continue
# Execute command in a try catch to avoid crashes and allow retries.
ClearScreen()
try:
for command in options[answer][1]:
if command():
print("\nExecution success.\n")
else:
print("\nExecution failed.\n")
break
except Exception, e:
print("\nAn error occured during script execution: %s\n") % e
raw_input("Press enter to continue...")
return 0
if __name__ == '__main__':
main()
| mgdesign-fr/ozz-animation | build-helper.py | Python | mit | 12,126 | [
"VisIt"
] | bf09c97f5e68794a4babfd08ad46af27442652c4ba80c976ee93ab42f7e42a7a |
# coding=utf-8
# main codes, call functions at stokes_flow.py
# Zhang Ji, 20160410
import sys
import petsc4py
petsc4py.init(sys.argv)
# import warnings
# from memory_profiler import profile
import numpy as np
from src import stokes_flow as sf
# import stokes_flow as sf
from src.stokes_flow import problem_dic, obj_dic
from petsc4py import PETSc
from src.geo import *
from time import time
import pickle
from scipy.io import savemat, loadmat
from src.ref_solution import *
# @profile
def view_matrix(m, **kwargs):
args = {
'vmin': None,
'vmax': None,
'title': ' ',
'cmap': None
}
for key, value in args.items():
if key in kwargs:
args[key] = kwargs[key]
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cax = ax.matshow(m,
origin='lower',
vmin=args['vmin'],
vmax=args['vmax'],
cmap=plt.get_cmap(args['cmap']))
fig.colorbar(cax)
plt.title(args['title'])
plt.show()
def save_vtk(problem: sf.StokesFlowProblem):
t0 = time()
ref_slt = sphere_slt(problem)
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = problem.get_kwargs()
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
u = problem_kwargs['u']
sphere_err = 0
# problem.vtk_obj(fileHandle)
# problem.vtk_velocity('%s_Velocity' % fileHandle)
# problem.vtk_self(fileHandle)
theta = np.pi / 2
sphere_check = sf.StokesFlowObj()
sphere_geo_check = sphere_geo() # force geo
if not 'r_factor' in problem_kwargs:
r_factor = np.ones(1)
else:
r_factor = problem_kwargs['r_factor']
sphere_err = r_factor.copy()
for i0, d0 in enumerate(r_factor):
sphere_geo_check.create_n(2000, radius * d0)
sphere_geo_check.set_rigid_velocity([u, 0, 0, 0, 0, 0])
sphere_geo_check.node_rotation(norm=np.array([0, 1, 0]), theta=theta)
sphere_check.set_data(sphere_geo_check, sphere_geo_check)
sphere_err[i0] = problem.vtk_check('%s_Check_%f' % (fileHandle, (radius * d0)), sphere_check, ref_slt)[0]
t1 = time()
PETSc.Sys.Print('%s: write vtk files use: %fs' % (str(problem), (t1 - t0)))
return sphere_err
def get_problem_kwargs(**main_kwargs):
OptDB = PETSc.Options()
radius = OptDB.getReal('r', 1)
deltaLength = OptDB.getReal('d', 0.3)
epsilon = OptDB.getReal('e', 0.3)
u = OptDB.getReal('u', 1)
fileHandle = OptDB.getString('f', 'sphere')
solve_method = OptDB.getString('s', 'gmres')
precondition_method = OptDB.getString('g', 'none')
plot_geo = OptDB.getBool('plot_geo', False)
debug_mode = OptDB.getBool('debug', False)
matrix_method = OptDB.getString('sm', 'rs')
restart = OptDB.getBool('restart', False)
twoPara_n = OptDB.getInt('tp_n', 1)
legendre_m = OptDB.getInt('legendre_m', 3)
legendre_k = OptDB.getInt('legendre_k', 2)
n_sphere_check = OptDB.getInt('n_sphere_check', 2000)
n_node_threshold = OptDB.getInt('n_threshold', 10000)
random_velocity = OptDB.getBool('random_velocity', False)
getConvergenceHistory = OptDB.getBool('getConvergenceHistory', False)
pickProblem = OptDB.getBool('pickProblem', False)
prb_index = OptDB.getInt('prb_index', -1)
n_obj = OptDB.getInt('n', 1)
n_obj_x = OptDB.getInt('nx', n_obj)
n_obj_y = OptDB.getInt('ny', n_obj)
distance = OptDB.getReal('dist', 3)
distance_x = OptDB.getReal('distx', distance)
distance_y = OptDB.getReal('disty', distance)
move_delta = np.array([distance_x, distance_y, 1])
# field_range: describe a sector area.
field_range = np.array([[-3, -3, -3], [n_obj_x - 1, n_obj_y - 1, 0] * move_delta + [3, 3, 3]])
n_grid = np.array([n_obj_x, n_obj_y, 1]) * 20
problem_kwargs = {
'name': 'spherePrb',
'matrix_method': matrix_method,
'deltaLength': deltaLength,
'epsilon': epsilon,
'delta': deltaLength * epsilon, # for rs method
'd_radia': deltaLength / 2, # for sf method
'solve_method': solve_method,
'precondition_method': precondition_method,
'field_range': field_range,
'n_grid': n_grid,
'plot_geo': plot_geo,
'debug_mode': debug_mode,
'fileHandle': fileHandle,
'region_type': 'rectangle',
'twoPara_n': twoPara_n,
'legendre_m': legendre_m,
'legendre_k': legendre_k,
'radius': radius,
'u': u,
'random_velocity': random_velocity,
'n_obj_x': n_obj_x,
'n_obj_y': n_obj_y,
'move_delta': move_delta,
'restart': restart,
'n_sphere_check': n_sphere_check,
'n_node_threshold': n_node_threshold,
'getConvergenceHistory': getConvergenceHistory,
'pickProblem': pickProblem,
'prb_index': prb_index,
}
for key in main_kwargs:
problem_kwargs[key] = main_kwargs[key]
return problem_kwargs
def print_case_info(**problem_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
size = comm.Get_size()
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
deltaLength = problem_kwargs['deltaLength']
matrix_method = problem_kwargs['matrix_method']
u = problem_kwargs['u']
PETSc.Sys.Print('sphere radius: %f, delta length: %f, velocity: %f' % (radius, deltaLength, u))
err_msg = "Only 'pf', 'rs', 'tp_rs', and 'lg_rs' methods are accept for this main code. "
assert matrix_method in (
'rs', 'rs_plane', 'tp_rs', 'lg_rs', 'rs_precondition', 'tp_rs_precondition', 'lg_rs_precondition',
'pf'), err_msg
epsilon = problem_kwargs['epsilon']
if matrix_method in ('rs', 'rs_plane', 'rs_precondition', 'pf'):
PETSc.Sys.Print('create matrix method: %s, epsilon: %f'
% (matrix_method, epsilon))
elif matrix_method in ('tp_rs', 'tp_rs_precondition'):
twoPara_n = problem_kwargs['twoPara_n']
PETSc.Sys.Print('create matrix method: %s, epsilon: %f, order: %d'
% (matrix_method, epsilon, twoPara_n))
elif matrix_method in ('lg_rs', 'lg_rs_precondition'):
legendre_m = problem_kwargs['legendre_m']
legendre_k = problem_kwargs['legendre_k']
PETSc.Sys.Print('create matrix method: %s, epsilon: %f, m: %d, k: %d, p: %d'
% (matrix_method, epsilon, legendre_m, legendre_k, (legendre_m + 2 * legendre_k + 1)))
solve_method = problem_kwargs['solve_method']
precondition_method = problem_kwargs['precondition_method']
PETSc.Sys.Print('solve method: %s, precondition method: %s'
% (solve_method, precondition_method))
PETSc.Sys.Print('output file headle: ' + fileHandle)
PETSc.Sys.Print('MPI size: %d' % size)
# @profile
def main_fun(**main_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = get_problem_kwargs(**main_kwargs)
restart = problem_kwargs['restart']
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
deltaLength = problem_kwargs['deltaLength']
epsilon = problem_kwargs['epsilon']
u = problem_kwargs['u']
matrix_method = problem_kwargs['matrix_method']
n_obj_x = problem_kwargs['n_obj_x']
n_obj_y = problem_kwargs['n_obj_y']
move_delta = problem_kwargs['move_delta']
random_velocity = problem_kwargs['random_velocity']
getConvergenceHistory = problem_kwargs['getConvergenceHistory']
pickProblem = problem_kwargs['pickProblem']
if not restart:
print_case_info(**problem_kwargs)
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.create_delta(deltaLength, radius)
# # DBG
# nodes = ((0.17389, 0.2938, 0.37454),
# (0.76774, 0.87325, 0.50809),
# (0.17557, 0.82348, 0.7485),
# (0.50734, 0.99882, 0.39992))
# sphere_geo0.set_nodes(nodes=nodes, deltalength=deltaLength)
if random_velocity:
sphere_velocity = np.random.sample(6) * u
else:
sphere_velocity = np.array([0, u, 0, 0, 0, 0])
sphere_geo0.set_rigid_velocity(sphere_velocity)
problem = problem_dic[matrix_method](**problem_kwargs)
if pickProblem:
problem.pickmyself(fileHandle,
ifcheck=True) # not save anything really, just check if the path is correct, to avoid this error after long time calculation.
obj_sphere = obj_dic[matrix_method]()
obj_sphere_kwargs = {'name': 'sphereObj_0_0'}
sphere_geo1 = sphere_geo0.copy()
if matrix_method in ('pf',):
sphere_geo1.node_zoom((radius + deltaLength * epsilon) / radius)
obj_sphere.set_data(sphere_geo1, sphere_geo0, **obj_sphere_kwargs)
obj_sphere.move((0, 0, 0))
for i in range(n_obj_x * n_obj_y):
ix = i // n_obj_x
iy = i % n_obj_x
obj2 = obj_sphere.copy()
obj2.set_name('sphereObj_%d_%d' % (ix, iy))
move_dist = np.array([ix, iy, 0]) * move_delta
obj2.move(move_dist)
if random_velocity:
sphere_velocity = np.random.sample(6) * u
obj2.get_u_geo().set_rigid_velocity(sphere_velocity)
problem.add_obj(obj2)
problem.print_info()
problem.create_matrix()
residualNorm = problem.solve()
fp = problem.get_force_petsc()
if getConvergenceHistory:
convergenceHistory = problem.get_convergenceHistory()
if pickProblem:
problem.pickmyself(fileHandle)
else:
with open(fileHandle + '_pick.bin', 'rb') as input:
unpick = pickle.Unpickler(input)
problem = unpick.load()
problem.unpick_myself()
residualNorm = problem.get_residualNorm()
obj_sphere = problem.get_obj_list()[0]
PETSc.Sys.Print('---->>>unpick the problem from file %s.pickle' % (fileHandle))
sphere_err = 0
# sphere_err = save_vtk(problem, **main_kwargs)
force_sphere = obj2.get_total_force()
PETSc.Sys.Print('---->>>Resultant is', force_sphere / 6 / np.pi / radius / u)
return problem, sphere_err
# @profile
def two_step_main_fun(**main_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = get_problem_kwargs(**main_kwargs)
restart = problem_kwargs['restart']
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
deltaLength = problem_kwargs['deltaLength']
u = problem_kwargs['u']
matrix_method = problem_kwargs['matrix_method']
if not restart:
n = int(16 * radius * radius / deltaLength / deltaLength)
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.create_n(n, radius)
sphere_geo0.set_rigid_velocity([u, 0, 0, 0, 0, 0])
print_case_info(**problem_kwargs)
problem = problem_dic[matrix_method](**problem_kwargs)
problem.pickmyself(
fileHandle) # not save anything really, just check if the path is correct, to avoid this error after long time calculation.
obj_sphere = obj_dic[matrix_method]()
obj_sphere_kwargs = {'name': 'sphereObj'}
obj_sphere.set_data(sphere_geo0, sphere_geo0, **obj_sphere_kwargs)
problem.add_obj(obj_sphere)
problem.print_info()
problem.create_matrix()
residualNorm = problem.solve()
# problem.pickmyself(fileHandle)
else:
with open(fileHandle + '_pick.bin', 'rb') as input:
unpick = pickle.Unpickler(input)
problem = unpick.load()
problem.unpick_myself()
residualNorm = problem.get_residualNorm()
obj_sphere = problem.get_obj_list()[-1]
PETSc.Sys.Print('---->>>unpick the problem from file %s.pickle' % (fileHandle))
sphere_err = 0
# sphere_err = save_vtk(problem, **main_kwargs)
factor = 10
obj_sphere1 = obj_sphere.copy()
obj_sphere1.zoom(factor)
ref_slt = sphere_slt(problem)
problem.vtk_check('%s_Check_%f' % (fileHandle, (radius * d0)), obj_sphere1)
sphere_geo_check = sphere_geo()
sphere_geo_check.create_n(2000, radius)
sphere_geo_check.set_rigid_velocity([u, 0, 0, 0, 0, 0])
theta = np.pi / 2
sphere_geo_check.node_rotation(norm=np.array([0, 1, 0]), theta=theta)
sphere_check = sf.StokesFlowObj()
sphere_check.set_data(sphere_geo_check, sphere_geo_check)
sphere_err0 = problem.vtk_check('%s_Check_%f' % (fileHandle, (radius)), sphere_check)[0]
t0 = time()
problem_kwargs['delta'] = deltaLength * epsilon * d0
problem_kwargs['name'] = 'spherePrb1'
problem1 = problem_dic[matrix_method](**problem_kwargs)
problem1.add_obj(obj_sphere1)
problem1.create_matrix()
t1 = time()
PETSc.Sys.Print('%s: create problem use: %fs' % (str(problem), (t1 - t0)))
residualNorm1 = problem1.solve()
sphere_err1 = problem1.vtk_check('%s_Check_%f' % (fileHandle, (radius * d0)), sphere_check)
force_sphere = obj_sphere.get_force_x()
PETSc.Sys.Print('sphere_err0=%f, sphere_err1=%f' % (sphere_err0, sphere_err1))
PETSc.Sys.Print('---->>>Resultant at x axis is %f' % (np.sum(force_sphere)))
return problem, sphere_err, residualNorm
def tp_rs_wrapper():
# r_factor = np.array((1, 1))
# deltaLength = (0.5, 0.4)
# epsilon = (0.1, 0.2)
# N = np.array((1, 2))
r_factor = 3 ** (np.arange(0, 1.2, 0.2) ** 2)
deltaLength = 0.05 ** np.arange(0.25, 1.05, 0.1)
epsilon = 0.1 ** np.arange(-1, 1.2, 0.2)
N = np.array((1, 2, 10, 20))
deltaLength, epsilon, N = np.meshgrid(deltaLength, epsilon, N)
deltaLength = deltaLength.flatten()
epsilon = epsilon.flatten()
N = N.flatten()
sphere_err = np.zeros((epsilon.size, r_factor.size))
residualNorm = epsilon.copy()
main_kwargs = {'r_factor': r_factor}
OptDB = PETSc.Options()
OptDB.setValue('sm', 'tp_rs')
for i0 in range(epsilon.size):
d = deltaLength[i0]
e = epsilon[i0]
n = N[i0]
fileHandle = 'sphere_%05d_%6.4f_%4.2f_%d' % (i0, d, e, n)
OptDB.setValue('d', d)
OptDB.setValue('e', e)
OptDB.setValue('tp_n', int(n))
OptDB.setValue('f', fileHandle)
_, sphere_err[i0, :], residualNorm[i0] = main_fun(**main_kwargs)
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
savemat('sphere_err.mat',
{'deltaLength': deltaLength,
'epsilon': epsilon,
'N': N,
'sphere_err': sphere_err,
'residualNorm': residualNorm,
'r_factor': r_factor},
oned_as='column')
def lg_rs_wrapper():
"""
to determine best combination of m and n for this method.
:return:
"""
# r_factor = np.array((1, 1))
# deltaLength = (0.5, 0.4)
# epsilon = (0.1, 0.2)
# mk_bank = np.array(((2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
# (3, 0), (3, 1), (3, 2), (3, 3),
# (4, 0), (4, 1), (4, 2), (4, 3),
# (5, 0), (5, 1), (5, 2)))
OptDB = PETSc.Options()
r_factor = 3 ** (np.arange(0, 1.2, 0.2) ** 2)
deltaLength = 0.05 ** np.arange(0.25, 1.05, 0.1)
epsilon = 0.1 ** np.arange(-1, 1.2, 0.2)
mk_case = OptDB.getInt('mk_case', 0)
mk_banks = {
0: np.array((2, 1)),
1: np.array(((2, 0), (2, 1), (2, 2), (2, 3), (2, 4))),
2: np.array(((2, 1), (3, 1), (4, 1), (5, 1))),
3: np.array(((2, 2), (3, 2), (4, 2), (5, 2))),
10: np.array(((2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
(3, 0), (3, 1), (3, 2), (3, 3),
(4, 0), (4, 1), (4, 2), (4, 3),
(5, 0), (5, 1), (5, 2)))
}
mk_bank = mk_banks[mk_case].reshape((-1, 2))
deltaLength, epsilon, mk_index = np.meshgrid(deltaLength, epsilon, range(mk_bank.shape[0]))
deltaLength = deltaLength.flatten()
epsilon = epsilon.flatten()
mk_index = mk_index.flatten()
sphere_err = np.zeros((epsilon.size, r_factor.size))
residualNorm = epsilon.copy()
main_kwargs = {'r_factor': r_factor}
OptDB.setValue('sm', 'lg_rs')
for i0 in range(epsilon.size):
d = deltaLength[i0]
e = epsilon[i0]
m = mk_bank[mk_index[i0], 0]
k = mk_bank[mk_index[i0], 1]
fileHandle = 'sphere_%05d_%6.4f_%4.2f_m=%d,k=%d' % (i0, d, e, m, k)
OptDB.setValue('d', d)
OptDB.setValue('e', e)
OptDB.setValue('legendre_m', int(m))
OptDB.setValue('legendre_k', int(k))
OptDB.setValue('f', fileHandle)
_, sphere_err[i0, :], residualNorm[i0] = main_fun(**main_kwargs)
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
savemat('sphere_err.mat',
{'deltaLength': deltaLength,
'epsilon': epsilon,
'mk_bank': mk_bank,
'mk_index': mk_index,
'sphere_err': sphere_err,
'residualNorm': residualNorm,
'r_factor': r_factor},
oned_as='column')
def percondition_wrapper():
"""
multi spheres with random velocities. to determine if the precondition method is work.
:return:
"""
# r_factor = np.array((1, 1))
# deltaLength = (0.5, 0.4)
# epsilon = (0.1, 0.2)
# mk_bank = np.array(((2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
# (3, 0), (3, 1), (3, 2), (3, 3),
# (4, 0), (4, 1), (4, 2), (4, 3),
# (5, 0), (5, 1), (5, 2)))
OptDB = PETSc.Options()
OptDB.setValue('r', 1)
OptDB.setValue('d', 0.2)
OptDB.setValue('e', 0.25)
OptDB.setValue('f', 'sphere')
OptDB.setValue('sm', 'lg_rs')
OptDB.setValue('random_velocity', True)
OptDB.setValue('getConvergenceHistory', True)
OptDB.setValue('ksp_rtol', 1e-8)
n_max = OptDB.getInt('n_max', 2)
sphere_err = np.zeros((n_max,), dtype=np.object)
convergenceHistory = np.zeros((n_max,), dtype=np.object)
for n in range(0, n_max):
OptDB.setValue('n', n + 1)
problem, sphere_err[n] = main_fun()
convergenceHistory[n] = problem.get_convergenceHistory()
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
savemat('sphere_err.mat',
{'n': np.arange(n_max),
'convergenceHistory': convergenceHistory,
'sphere_err': sphere_err},
oned_as='column')
def two_step_wrapper():
OptDB = PETSc.Options()
# r_factor = 3 ** (np.arange(0, 1.2, 0.2) ** 2)
r_factor = np.ones(1)
deltaLength = 0.10573713
epsilon = 3
mk_bank = np.array((3, 2))
sphere_err = np.zeros((r_factor.size))
main_kwargs = {'r_factor': r_factor}
OptDB.setValue('sm', 'lg_rs')
fileHandle = 'sphere_%6.4f_%4.2f_m=%d,k=%d' % \
(deltaLength, epsilon, mk_bank[0], mk_bank[1])
OptDB.setValue('d', deltaLength)
OptDB.setValue('e', epsilon)
OptDB.setValue('legendre_m', int(mk_bank[0]))
OptDB.setValue('legendre_k', int(mk_bank[1]))
OptDB.setValue('f', fileHandle)
problem, sphere_err[:], residualNorm = two_step_main_fun(**main_kwargs)
if __name__ == '__main__':
# lg_rs_wrapper()
# tp_rs_wrapper()
# percondition_wrapper()
main_fun()
pass
# OptDB.setValue('sm', 'sf')
# m_sf = main_fun()
# delta_m = np.abs(m_rs - m_sf)
# # view_matrix(np.log10(delta_m), 'rs_m - sf_m')
# percentage = delta_m / (np.maximum(np.abs(m_rs), np.abs(m_sf)) + 1e-100)
#
# view_args = {'vmin': -10,
# 'vmax': 0,
# 'title': 'log10_abs_rs',
# 'cmap': 'gray'}
# view_matrix(np.log10(np.abs(m_rs) + 1e-100), **view_args)
#
# view_args = {'vmin': -10,
# 'vmax': 0,
# 'title': 'log10_abs_sf',
# 'cmap': 'gray'}
# view_matrix(np.log10(np.abs(m_sf) + 1e-100), **view_args)
#
# view_args = {'vmin': 0,
# 'vmax': 1,
# 'title': 'percentage',
# 'cmap': 'gray'}
# view_matrix(percentage, **view_args)
#
# view_args = {'vmin': 0,
# 'vmax': -10,
# 'title': 'log10_percentage',
# 'cmap': 'gray'}
# view_matrix(np.log10(percentage + 1e-100), **view_args)
| pcmagic/stokes_flow | sphere/sphere_rs.py | Python | mit | 20,947 | [
"VTK"
] | c716593bae7d3dcbce5cd7f319815d73575934d88edb9fe7a9fb0669bb275902 |
import numpy as np
import numbers
from scipy import sparse
from scipy import linalg
import scipy.sparse.linalg as spla
from mesh import Vertex, Interval, HalfEdge, QuadCell, convert_to_array
from function import Map, Nodal, Constant
from fem import parse_derivative_info, Basis
from inspect import signature
import time
class GaussRule(object):
"""
Gaussian Quadrature weights and nodes on reference cell
"""
def __init__(self, order, element=None, shape=None):
"""
Constructor
Inputs:
order: int, order of quadrature rule
1D rule: order in {1,2,3,4,5,6}
2D rule: order in {1,4,9,16,25,36} for quadrilaterals
{1,3,7,13} for triangles
element: Element object
OR
shape: str, 'interval', 'triangle', or 'quadrilateral'.
"""
#
# Determine shape of cells
#
if element is None:
# Shape specified directly
assert shape is not None, 'Must specify either element or cell shape.'
else:
# Element given
shape = element.cell_type()
# Check if shape is supported
assert shape in ['interval','triangle','quadrilateral'], \
"Use 'interval', 'triangle', or 'quadrilateral'."
# Get dimension
dim = 1 if shape=='interval' else 2
#
# Tensorize 1D rules if cell is quadrilateral
#
use_tensor_product_rules = \
( dim == 1 or shape == 'quadrilateral' )
if use_tensor_product_rules:
#
# Determine the order of constituent 1D rules
#
if dim == 1:
assert order in [1,2,3,4,5,6], 'Gauss rules in 1D: 1,2,3,4,5,6.'
order_1d = order
elif dim == 2:
assert order in [1,4,9,16,25,36], 'Gauss rules over quads in 2D: 1,4,16,25'
order_1d = int(np.sqrt(order))
r = [0]*order_1d # initialize as list of zeros
w = [0]*order_1d
#
# One Dimensional Rules
#
if order_1d == 1:
r[0] = 0.0
w[0] = 2.0
elif order_1d == 2:
# Nodes
r[0] = -1.0 /np.sqrt(3.0)
r[1] = -r[0]
# Weights
w[0] = 1.0
w[1] = 1.0
elif order_1d == 3:
# Nodes
r[0] =-np.sqrt(3.0/5.0)
r[1] = 0.0
r[2] =-r[0]
# weights
w[0] = 5.0/9.0
w[1] = 8.0/9.0
w[2] = w[0]
elif order_1d == 4:
# Nodes
r[0] =-np.sqrt((3.0+2.0*np.sqrt(6.0/5.0))/7.0)
r[1] =-np.sqrt((3.0-2.0*np.sqrt(6.0/5.0))/7.0)
r[2] =-r[1]
r[3] =-r[0]
# Weights
w[0] = 0.5 - 1.0 / ( 6.0 * np.sqrt(6.0/5.0) )
w[1] = 0.5 + 1.0 / ( 6.0 * np.sqrt(6.0/5.0) )
w[2] = w[1]
w[3] = w[0]
elif order_1d == 5:
# Nodes
r[0] =-np.sqrt(5.0+4.0*np.sqrt(5.0/14.0)) / 3.0
r[1] =-np.sqrt(5.0-4.0*np.sqrt(5.0/14.0)) / 3.0
r[2] = 0.0
r[3] =-r[1]
r[4] =-r[0]
# Weights
w[0] = 161.0/450.0-13.0/(180.0*np.sqrt(5.0/14.0))
w[1] = 161.0/450.0+13.0/(180.0*np.sqrt(5.0/14.0))
w[2] = 128.0/225.0
w[3] = w[1]
w[4] = w[0]
elif order_1d == 6:
# Nodes
r[0] = -0.2386191861
r[1] = -0.6612093865
r[2] = -0.9324695142
r[3] = - r[0]
r[4] = - r[1]
r[5] = - r[2]
# Weights
w[0] = .4679139346
w[1] = .3607615730
w[2] = .1713244924
w[3] = w[0]
w[4] = w[1]
w[5] = w[2]
#
# Transform from [-1,1] to [0,1]
#
r = [0.5+0.5*ri for ri in r]
w = [0.5*wi for wi in w]
if dim == 1:
self.__nodes = np.array(r)
self.__weights = np.array(w)
elif dim == 2:
#
# Combine 1d rules into tensor product rules
#
nodes = []
weights = []
for i in range(len(r)):
for j in range(len(r)):
nodes.append((r[i],r[j]))
weights.append(w[i]*w[j])
self.__nodes = np.array(nodes)
self.__weights = np.array(weights)
elif element.cell_type == 'triangle':
#
# Two dimensional rules over triangles
#
assert order in [1,3,7,13], 'Gauss rules on triangles in 2D: 1, 3, 7 or 13.'
if order == 1:
#
# One point rule
#
r = [(2.0/3.0,1.0/3.0)]
w = [0.5]
elif order == 3:
#
# 3 point rule
#
r = [0]*order
r[0] = (2.0/3.0, 1.0/6.0)
r[1] = (1.0/6.0, 2.0/3.0)
r[2] = (1.0/6.0, 1.0/6.0)
w = [0]*order
w[0] = 1.0/6.0
w[1] = w[0]
w[2] = w[0]
elif order == 7:
# The following points correspond to a 7 point rule,
# see Dunavant, IJNME, v. 21, pp. 1129-1148, 1995.
# or Braess, p. 95.
#
# Nodes
#
t1 = 1.0/3.0
t2 = (6.0 + np.sqrt(15.0))/21.0
t3 = 4.0/7.0 - t2
r = [0]*order
r[0] = (t1,t1)
r[1] = (t2,t2)
r[2] = (1.0-2.0*t2, t2)
r[3] = (t2,1.0-2.0*t2)
r[4] = (t3,t3)
r[5] = (1.0-2.0*t3,t3)
r[6] = (t3,1.0-2.0*t3);
#
# Weights
#
t1 = 9.0/80.0
t2 = ( 155.0 + np.sqrt(15.0))/2400.0
t3 = 31.0/240.0 - t2
w = [0]*order
w[0] = t1
w[1] = t2
w[2] = t2
w[3] = t2
w[4] = t3
w[5] = t3
w[6] = t3
elif order == 13:
r = [0]*order
r1 = 0.0651301029022
r2 = 0.8697397941956
r4 = 0.3128654960049
r5 = 0.6384441885698
r6 = 0.0486903154253
r10 = 0.2603459660790
r11 = 0.4793080678419
r13 = 0.3333333333333
r[0] = (r1,r1)
r[1] = (r2,r1)
r[2] = (r1,r2)
r[3] = (r4,r6)
r[4] = (r5,r4)
r[5] = (r6,r5)
r[6] = (r5,r6)
r[7] = (r4,r5)
r[8] = (r6,r4)
r[9] = (r10,r10)
r[10] = (r11,r10)
r[11] = (r10,r11)
r[12] = (r13,r13)
w = [0]*order
w1 = 0.0533472356088
w4 = 0.0771137608903
w10 = 0.1756152574332
w13 = -0.1495700444677
w[0] = w1
w[1] = w1
w[2] = w1
w[3] = w4
w[4] = w4
w[5] = w4
w[6] = w4
w[7] = w4
w[8] = w4
w[9] = w10
w[10] = w10
w[11] = w10
w[12] = w13
w = [0.5*wi for wi in w]
self.__nodes = np.array(r)
self.__weights = np.array(w)
self.__cell_type = shape
self.__dim = dim
def nodes(self):
"""
Return quadrature nodes
"""
return self.__nodes
def weights(self):
"""
Return quadrature weights
"""
return self.__weights
def n_nodes(self):
"""
Return the size of the rule
"""
return len(self.__weights)
def dim(self):
"""
Return the dimension of the rule
"""
return self.__dim
def mapped_rule(self, region, jac_p2r=False, hess_p2r=False):
"""
Return the rule associated with a specific Cell, Interval, or HalfEdge
as well as the inverse jacobians and hessians associated with the
transformation.
Inputs:
region: object, region (Interval, HalfEdge, or Cell) to which rule
is mapped.
jac_p2r, hess_p2r: bool, indicate whether the jacobian and hessian
of the inverse mapping should be returned. These are useful
when evaluating the gradients and second derivatives of shape
functions.
TODO: Move assembler.shape_eval part to here.
"""
#
# Map quadrature rule to entity (cell/halfedge)
#
if isinstance(region, Interval):
#
# Interval
#
# Check compatiblity
assert self.dim()==1, 'Interval requires a 1D rule.'
# Get reference nodes and weights
x_ref = self.nodes()
w_ref = self.weights()
# Map reference quadrature nodes to cell
xg, mg = region.reference_map(x_ref, jac_r2p=True,
jac_p2r=jac_p2r,
hess_p2r=hess_p2r)
# Get jacobian of forward mapping
jac = mg['jac_r2p']
# Modify the quadrature weights
wg = w_ref*np.array(jac)
elif isinstance(region, HalfEdge):
#
# Edge
#
# Check compatibility
assert self.dim()==1, 'Half Edge requires a 1D rule.'
# Get reference quadrature nodes and weights
x_ref = self.nodes()
w_ref = self.weights()
# Map reference nodes to halfedge
xg, mg = region.reference_map(x_ref, jac_r2p=True,
jac_p2r=jac_p2r,
hess_p2r=hess_p2r)
# Get jaobian of forward mapping
jac = mg['jac_r2p']
# Modify the quadrature weights
wg = w_ref*np.array(np.linalg.norm(jac[0]))
elif isinstance(region, QuadCell):
#
# Quadrilateral
#
# Check compatibility
assert self.dim()==2, 'QuadCell requires 2D rule.'
x_ref = self.nodes()
w_ref = self.weights()
# Map reference quaddrature nodes to quadcell
xg, mg = region.reference_map(x_ref, jac_r2p=True,
jac_p2r=jac_p2r,
hess_p2r=hess_p2r)
# Get Jacobian of forward mapping
jac = mg['jac_r2p']
# Modify quadrature weights
wg = w_ref*np.array([np.linalg.det(j) for j in jac])
else:
raise Exception('Only Intervals, HalfEdges, & QuadCells supported')
#
# Return Gauss nodes and weights, and Jacobian/Hessian of inverse map
#
if any([jac_p2r,hess_p2r]):
return xg, wg, mg
else:
return xg, wg
class Kernel(object):
"""
Kernel (combination of Functions) to be used in Forms
"""
def __init__(self, f, derivatives=None, F=None, subsample=None):
"""
Constructor
Inputs:
f: single Function, or list of Functions
*f_kwargs: dict, (list of) keyword arguments to be passed to the f's
*F: function, lambda function describing how the f's are combined
and modified to form the kernel
*subsample: int, numpy array of subsample indices
"""
#
# Store input function(s)
#
if type(f) is not list:
#
# Single function
#
assert isinstance(f, Map), 'Input "f" should be a "Map" object.'
f = [f]
self.__f = f
n_functions = len(self.__f)
#
# Parse function derivatives
#
dfdx = []
if derivatives is None:
#
# No derivatives specified
#
dfdx = [None for dummy in self.__f]
elif type(derivatives) is list:
#
# derivatives specified in list
#
assert len(derivatives)==n_functions, \
'The size of input "derivatives" incompatible with '+\
'that of input "f".'
dfdx = derivatives
else:
#
# Single derivative applies to all functions
#
dfdx = parse_derivative_info(derivatives)
dfdx = [dfdx for dummy in self.__f]
self.__dfdx = dfdx
#
# Store meta function F
#
# Check that F takes the right number of inputs
if F is None:
# Store metafunction F
assert n_functions == 1, \
'If input "F" not specified, only one function allowed.'
F = lambda f: f
self.__F = F
# Store function signature of F
sigF = signature(F)
# Figure out which of the
cell_args = {}
for arg in ['cell', 'region', 'phi', 'dofs']:
if arg in sigF.parameters:
cell_args[arg] = None
bound = sigF.bind_partial(**cell_args)
self.__bound = bound
self.__signature = sigF
# Store subsample
self.set_subsample(subsample)
def basis(self):
"""
Determine the basis functions used in the Kernel
"""
basis = []
for f in self.__f:
if isinstance(f, Nodal):
basis.append(f.basis())
return basis
def set_subsample(self, subsample):
"""
Set kernel's subsample
Input:
subsample: int, numpy array specifying subsample indices
Note: For stochastic functions, the default subsample is the entire
range. For deterministic functions, the subsample can only be None.
"""
#
# Parse subsample
#
if subsample is None:
#
# Check whether there is a stochastic function in the list
#
for f in self.__f:
if f.n_samples()>1:
f.set_subsample(subsample)
subsample = f.subsample()
break
#
# Set same subsample for all functions
#
for f in self.__f:
f.set_subsample(subsample)
if subsample is not None:
assert np.allclose(f.subsample(),subsample), \
'Incompatible subsample.'
self.__subsample = subsample
def n_subsample(self):
"""
Returns the subsample of functions used
"""
if self.__subsample is not None:
return len(self.__subsample)
else:
return 1
def f(self):
"""
Returns the list of functions
"""
return self.__f
def F(self):
"""
Returns the metafunction
"""
return self.__F
def is_symmetric(self):
"""
Returns True if all functions in the kernel are symmetric.
"""
return all([f.is_symmetric() for f in self.f()])
def eval(self, x, phi=None, cell=None, region=None, dofs=None):
"""
Evaluate the kernel at the points stored in x
Inputs:
x: (n_points, dim) array of points at which to evaluate the kernel
phi: basis-indexed dictionary of shape functions
region: Geometric region (Cell, Interval, HalfEdge, Vertex)
Included for modified kernels
cell: Interval or QuadCell on which kernel is to be evaluated
phi: (basis-indexed) shape functions over region
Output:
Kernel function evaluated at point x.
TODO: FIX KERNEL! Interaction with assembler
- Different mesh sizes
- Derivatives vs. Basis functions.
"""
#
# Evaluate constituent functions
#
f_vals = []
for f, dfdx in zip(self.__f, self.__dfdx):
if isinstance(f, Nodal):
phi_f = phi if phi is None else phi[f.basis()]
dof_f = None if dofs is None else dofs[f.basis()]
if dof_f is None or phi_f is None:
fv = f.eval(x=x, derivative=dfdx, cell=cell)
else:
fv = f.eval(x=x, derivative=dfdx, cell=cell, phi=phi_f, dofs=dof_f)
else:
fv = f.eval(x=x)
f_vals.append(fv)
#
# Combine functions using meta-function F
#
# Figure out which of the keyword parameters F can take
signature = self.__signature
bound = self.__bound
cell_args = {'phi': phi, 'cell': cell, 'region':region, 'dofs':dofs}
for arg, val in cell_args.items():
if arg in signature.parameters:
bound.arguments[arg] = val
# Evaluate F
return self.__F(*f_vals, **bound.kwargs)
class Form(object):
"""
Constant, Linear, or Bilinear forms (integrals)
"""
def __init__(self, kernel=None, trial=None, test=None,\
dmu='dx', flag=None, dim=None):
"""
Constructor
Inputs:
*kernel: Kernel, specifying the form's kernel
*trial: Basis, basis function representing the trial space
*test: Basis, basis function representing the test space
*dmu: str, area of integration
'dx' - integrate over a cell
'ds' - integrate over a half-edge
'dv' - integrate over a vertex
*flag: str/int/tuple cell/half_edge/vertex marker
*dim: int, dimension of the domain.
"""
#
# Parse test function
#
if test is not None:
dim = test.dofhandler().element.dim()
assert isinstance(test, Basis), \
'Input "test" must be of type "Basis".'
self.test = test
#
# Parse trial function
#
if trial is not None:
# Check that trial is a Basis
assert isinstance(trial, Basis), \
'Input "trial" must be of type "Basis".'
# Check that dimensions are compatible
assert dim==trial.dofhandler().element.dim(), \
'Test and trial functions should be defined over the same '+\
' dimensional domain.'
self.trial = trial
#
# Parse measure
#
assert dmu in ['dx', 'ds', 'dv'], \
'Input "dmu" should be "dx", "ds", or "dv".'
#
# Check: ds can only be used in 2D
#
if dmu=='ds' and test is not None:
assert dim==2, 'Measure "ds" can only be defined over 2D region.'
self.dmu = dmu
#
# Parse kernel
#
if kernel is not None:
#
# Check that kernel is the right type
#
if isinstance(kernel, Map):
#
# Kernel entered as Map
#
kernel = Kernel(kernel)
elif isinstance(kernel, numbers.Real):
#
# Kernel entered as real number
#
kernel = Kernel(Constant(kernel))
else:
#
# Otherwise, kernel must be of type Kernel
#
assert isinstance(kernel, Kernel), \
'Input "kernel" must be of class "Kernel".'
else:
#
# Default Kernel
#
kernel = Kernel(Constant(1))
self.kernel = kernel
self.flag = flag
#
# Determine Form type
#
if self.test is None:
#
# Constant form
#
form_type = 'constant'
elif self.trial is None:
#
# Linear form
#
form_type = 'linear'
else:
#
# Bilinear form
#
form_type = 'bilinear'
self.type = form_type
def basis(self, mesh=None, subforest_flag=None):
"""
Returns a list of the form's basis functions (trial, test, and Kernel)
Inputs:
mesh: Mesh, reference mesh
subforest_flag: reference submesh flag
Output:
basis: Basis, list of basis functions.
"""
basis = []
if self.test is not None:
#
# Add test basis
#
basis.append(self.test)
if self.trial is not None:
#
# Add trial basis
#
basis.append(self.trial)
#
# Add basis functions from the kernel
#
basis.extend(self.kernel.basis())
#
# Return basis list
#
return basis
"""
This is from when it was important for the basis functions to be
defined on the same mesh.
#
# Add basis functions from kernel
#
kernel_basis = self.kernel.basis()
if mesh is None:
#
# No reference mesh: return all kernel basis functions
#
basis.extend(kernel_basis)
else:
#
# Return only functions with compatible mesh/subforest_flag
#
for phi in kernel_basis:
if phi.same_mesh(mesh=mesh, subforest_flag=subforest_flag):
#
# Check that basis defined over same mesh
#
basis.append(phi)
"""
def dim(self):
"""
Return the dimension of the form
0 = constant
1 = linear
2 = bilinear
"""
if self.test is None:
#
# Constant
#
return 0
elif self.trial is None:
#
# Linear
#
return 1
else:
#
# Bilinear
#
return 2
def regions(self, cell):
"""
Determine the regions over which the form is integrated, using
information from dmu and markers
"""
regions = []
dmu = self.dmu
if dmu=='dx':
#
# Integration region is a cell
#
if self.flag is None or cell.is_marked(self.flag):
#
# Valid Cell
#
regions.append(cell)
elif dmu=='ds':
#
# Integration region is a half-edge
#
for half_edge in cell.get_half_edges():
#
# Iterate over half edges
#
if self.flag is None or half_edge.is_marked(self.flag):
#
# Valid HalfEdge
#
regions.append(half_edge)
elif dmu=='dv':
#
# Integration region is a vertex
#
for vertex in cell.get_vertices():
#
# Iterate over cell vertices
#
if self.flag is None or vertex.is_marked(self.flag):
#
# Valid vertex
#
regions.append(vertex)
return regions
def eval(self, cell, xg, wg, phi, dofs):
"""
Evaluates the local kernel, test, (and trial) functions of a (bi)linear
form on a given entity.
Inputs:
cell: Cell containing subregions over which Form is defined
xg: dict, Gaussian quadrature points, indexed by regions.
wg: dict, Gaussian quadrature weights, indexed by regions.
phi: dict, shape functions, indexed by regions -> basis
dofs: dict, global degrees of freedom associated with region,
indexed by region -> basis
Outputs:
Constant-, linear-, or bilinear forms and their associated local
degrees of freedom.
TODO: Explain what the output looks like!
Note: This method should be run in conjunction with the Assembler class
"""
# Determine regions over which form is defined
regions = self.regions(cell)
# Number of samples
n_samples = self.kernel.n_subsample()
f_loc = None
for region in regions:
# Get Gauss points in region
x = xg[region]
#
# Compute kernel, weight by quadrature weights
#
kernel = self.kernel
Ker = kernel.eval(x=x, region=region, cell=cell,
phi=phi[region], dofs=dofs[region])
# Weight kernel using quadrature weights
wKer = (wg[region]*Ker.T).T
if self.type=='constant':
#
# Constant form
#
# Initialize form if necessary
if f_loc is None:
f_loc = np.zeros((1,n_samples))
#
# Update form
#
f_loc += np.sum(wKer, axis=0)
elif self.type=='linear':
#
# Linear form
#
# Test functions evaluated at Gauss nodes
n_dofs_test = self.test.dofhandler().element.n_dofs()
test = phi[region][self.test]
# Initialize forms if necessary
if f_loc is None:
if n_samples is None:
f_loc = np.zeros(n_dofs_test)
else:
f_loc = np.zeros((n_dofs_test,n_samples))
# Update form
f_loc += np.dot(test.T, wKer)
elif self.type=='bilinear':
#
# Bilinear form
#
# Test functions evaluated at Gauss nodes
n_dofs_test = self.test.dofhandler().element.n_dofs()
test = phi[region][self.test]
# Trial functions evaluated at Gauss nodes
n_dofs_trial = self.trial.dofhandler().element.n_dofs()
trial = phi[region][self.trial]
#
# Initialize local matrix if necessary
#
if f_loc is None:
#
# Initialize form
#
if n_samples is None:
f_loc = np.zeros((n_dofs_test,n_dofs_trial))
else:
f_loc = np.zeros((n_dofs_test,n_dofs_trial,n_samples))
#
# Update form
#
if n_samples is None:
#
# Deterministic kernel
#
'''
f_loc_det = np.dot(test.T, np.dot(np.diag(wg[region]*Ker),trial))
f_loc += f_loc_det.reshape((n_dofs_test*n_dofs_trial,), order='F')
'''
f_loc += np.dot(test.T, np.dot(np.diag(wg[region]*Ker),trial))
else:
#
# Sampled kernel
#
'''
f_loc_smp = []
for i in range(n_dofs_trial):
f_loc_smp.append(np.dot(test.T, (trial[:,i]*wKer.T).T))
f_loc += np.concatenate(f_loc_smp, axis=0)
'''
for i in range(n_dofs_trial):
f_loc[:,i,:] += np.dot(test.T, (trial[:,i]*wKer.T).T)
#
# Initialize zero local matrix if necessary
#
if f_loc is None:
if self.type == 'constant':
#
# Constant form
#
if n_samples is None:
#
# Deterministic form
#
f_loc = 0
else:
#
# Sampled form
#
f_loc = np.zeros(n_samples)
elif self.type=='linear':
#
# Linear form
#
n_dofs_test = self.test.dofhandler().element.n_dofs()
if n_samples is None:
#
# Deterministic form
#
f_loc = np.zeros(n_dofs_test)
else:
#
# Sampled form
#
f_loc = np.zeros((n_dofs_test, n_samples))
elif self.type=='bilinear':
#
# Bilinear form
#
n_dofs_test = self.test.dofhandler().element.n_dofs()
n_dofs_trial = self.trial.dofhandler().element.n_dofs()
if n_samples is None:
#
# Deterministic form
#
f_loc = np.zeros((n_dofs_test, n_dofs_trial))
else:
#
# Sampled form
#
f_loc = np.zeros((n_dofs_test, n_dofs_trial, n_samples))
#
# Return f_loc
#
return f_loc
"""
for region in regions:
n_samples = kernel.n_samples
if self.test is not None:
#
# Need test function
#
drv = parse_derivative_info(self.test.derivative)
test_etype = self.test.element.element_type()
test = phi[region][test_etype][drv]
n_dofs_test = test.shape[1]
if self.trial is not None:
#
# Need trial function
#
drv = parse_derivative_info(self.trial.derivative)
trial_etype = self.trial.element.element_type()
trial = phi[region][trial_etype][drv]
n_dofs_trial = trial.shape[1]
#
# Bilinear form
#
if n_samples is None:
#
# Deterministic Kernel
#
f_loc = np.dot(test.T, np.dot(np.diag(wg[region]*Ker),trial))
f_loc.reshape((n_dofs_test*n_dofs_trial,1), order='F')
else:
#
# Sampled kernel
#
f_loc = np.dot(test.T, np.reshape(np.kron(trial, wKer),(n_gauss,-1), order='F'))
f_loc.reshape((n_dofs_test*n_dofs_trial, n_samples), order='F')
#
# Extract local dofs
#
rows, cols = np.meshgrid(np.arange(n_dofs_test),
np.arange(n_dofs_trial),
indexing='ij')
rows = rows.ravel()
cols = cols.ravel()
return f_loc, rows, cols
else:
#
# Linear Form
#
rows = np.arange(n_dofs_test)
return f_loc, rows
else:
#
# Simple integral
#
f_loc =
return f_loc
"""
class IIForm(Form):
"""
Bilinear form arising from the interpolatory approximation of an integral
operator.
Cu(x) = I_D k(x,y) u(y) dy
Ku(x)_i = I_D k(xi,y) u(y) dy, i=1,...,n_dofs
"""
def __init__(self, kernel=None, trial=None, test=None, dmu='dx', flag=None):
"""
Constructor
Inputs:
*kernel: Kernel, specifying the form's kernel
*trial: Basis, basis function representing the trial space
*test: Basis, basis function representing the test space
*dmu: str, area of integration
'dx' - integrate over a cell
'ds' - integrate over an edge
'dv' - integrate over a vertex
*flag: str/int/tuple cell/half_edge/vertex marker
"""
#
# Initialize form
#
Form.__init__(self, kernel=kernel, trial=trial, test=test,
dmu=dmu, flag=flag)
#
# Checks
#
assert trial is not None and test is not None,\
'Both trial and test functions should be specified.'
def eval(self, cell, xg, wg, phi, dofs):
"""
Evaluate the local bilinear form
I_{Ej} k(xi, y) phij(y)dy phii(x) for all dof-vertices xi
where Ej is a mesh cell
Inputs:
cell: Cell, containing subregions over which Form is defined
x: (n, dim) array of interpolation points over mesh
xg: Gaussian quadrature points
wg: Gaussian quadrature weights
phi: shape functions evaluated at quadrature points
"""
# =====================================================================
# Interpolate in the test function component
# =====================================================================
test = self.test
x = test.dofhandler().get_dof_vertices(test.subforest_flag())
n = x.shape[0]
# =====================================================================
# Specify trial function
# =====================================================================
trial = self.trial
# Number of dofs
n_dofs = trial.dofhandler().element.n_dofs()
f_loc = None
for reg in self.regions(cell):
# Get trial functions evaluated at Gauss nodes
phi_g = phi[reg][trial]
x_g = xg[reg]
w_g = wg[reg]
#
# Initialize local matrix if necessary
#
if f_loc is None:
#
# Initialize form
#
f_loc = np.zeros((n,n_dofs))
#
# Evaluate covariance function at the local Gauss points
#
n_gauss = x_g.shape[0]
ii,jj = np.meshgrid(np.arange(n),np.arange(n_gauss), indexing='ij')
x = (x[ii.ravel(),:],x_g[jj.ravel(),:])
"""
if self.dim == 1:
x1, x2 = x[ii.ravel()], x_g[jj.ravel()]
elif self.dim == 2:
"""
C_loc = self.kernel.eval(x, region=reg, cell=cell,
phi=phi[reg], dofs=dofs[reg])
C_loc = C_loc.reshape(n,n_gauss)
#
# Compute local integral
#
# Weight shape functions
Wphi = np.diag(w_g).dot(phi_g)
# Combine
f_loc += C_loc.dot(Wphi)
return f_loc
class IPForm(Form):
"""
Bilinear form arising from the projection based approximation of an integral
operator.
Cu(x) = I_D k(x,y) u(y) dy
Kij = I_D I_D k(x,y) phij(y)dy phii(x) dx,
Note: The approximation of Cu(x) is given by
Cu(x) ~= M^{-1} K u
"""
def __init__(self, kernel=None, trial=None, test=None, dmu='dx', flag=None):
"""
Constructor
Inputs:
*kernel: Kernel, specifying the form's kernel
*trial: Basis, basis function representing the trial space
*test: Basis, basis function representing the test space
*dmu: str, area of integration
'dx' - integrate over a cell
'ds' - integrate over an edge
'dv' - integrate over a vertex
*flag: str/int/tuple cell/half_edge/vertex marker
"""
#
# Initialize form
#
Form.__init__(self, kernel=kernel, trial=trial, test=test, dmu=dmu, flag=flag)
#
# Checks
#
assert trial is not None and test is not None,\
'Integral forms have both test and trial functions'
for f in kernel.f():
assert f.n_variables()==2, 'Integral kernel must be bivariate.'
def eval(self, cells, xg, wg, phi, dofs):
"""
Evaluates the local bilinear form
I_{Ei} I_{Ej} k(x,y) phij(y) dy phii(x) dx,
where Ei, Ej are mesh cells
Inputs:
cells: Cells (2,) pair, containing subregions over which Form is defined
xg: dict, (2,) pair of Gaussian quadrature points
wg: dict, (2,) pair of Gaussian quadrature weights
phi: (2,) pair of shape functions evaluated at quadrature points
"""
# Cells
ci, cj = cells
# Determine integration regions
regi = self.regions(ci)
regj = self.regions(cj)
# =====================================================================
# Specify the test and trial functions
# =====================================================================
test = self.test
trial = self.trial
# Degrees of freedom
n_dofsi = self.test.dofhandler().element.n_dofs()
n_dofsj = self.trial.dofhandler().element.n_dofs()
# Sample size
n_samples = self.kernel.n_subsample()
f_loc = None
for regi in self.regions(ci):
for regj in self.regions(cj):
# Access test(i) and trial(j) functions
phii = phi[0][regi][test]
phij = phi[1][regj][trial]
# Get quadrature nodes
xi_g = xg[0][regi]
xj_g = xg[1][regj]
# Get quadrature weights
wi_g = wg[0][regi]
wj_g = wg[1][regj]
# Get dofs
dofi = dofs[0][regi]
dofj = dofs[1][regj]
#
# Initialize local matrix if necessary
#
if f_loc is None:
#
# Initialize form
#
if n_samples==1:
f_loc = np.zeros((n_dofsi,n_dofsj))
else:
f_loc = np.zeros((n_dofsi,n_dofsj,n_samples))
#
# Evaluate kernel function at the local Gauss points
#
n_gauss = xi_g.shape[0]
ig = np.arange(n_gauss)
ii,jj = np.meshgrid(ig,ig,indexing='ij')
x = (xi_g[ii.ravel(),:],xj_g[jj.ravel(),:])
"""
if self.dim() == 1:
x1, x2 = xi_g[ii.ravel()], xj_g[jj.ravel()]
elif self.dim() == 2:
x1, x2 = xi_g[ii.ravel(),:],xj_g[jj.ravel(),:]
"""
#x, phi=None, cell=None, region=None, dofs=None)
C_loc = self.kernel.eval(x, cell=(ci,cj), region=(regi, regj),
phi=(phi[0][regi],phi[1][regj]),
dofs=(dofi,dofj))
C_loc = C_loc.reshape(n_gauss,n_gauss)
#
# Compute local integral
#
# Weight shape functions
Wphii = np.diag(wi_g).dot(phii)
Wphij = np.diag(wj_g).dot(phij)
# Combine
f_loc += np.dot(Wphii.T, C_loc.dot(Wphij))
# Return local form
return f_loc
'''
class IForm(Form):
"""
Bilinear form for an integral operator
Cu(x) = I_D k(x,y) u(y) dy
TODO: Replace with IIForm and IPForm
"""
def __init__(self, kernel, trial=None, test=None, dmu='dx', flag=None,
form_type='projection'):
"""
Constructor
Inputs:
*kernel: Kernel, specifying the form's kernel
*trial: Basis, basis function representing the trial space
*test: Basis, basis function representing the test space
*dmu: str, area of integration
'dx' - integrate over a cell
'ds' - integrate over an edge
*flag: str/int/tuple cell/half_edge/vertex marker
*approximation_type: str ('projection' or 'interpolation').
"""
self.type = 'bilinear'
self.flag = flag
#
# Trial space
#
assert isinstance(trial, Basis),\
'Input "trial" should be of type "Basis".'
self.trial = trial
# Dimension
self.__dim = self.trial.element.dim()
#
# Test space
#
assert isinstance(test, Basis),\
'Input "test" should be of type "Basis".'
self.test = test
#
# Check kernel
#
self.kernel = kernel
#
# Check measure
#
self.dmu = dmu
#
# Record form type
#
assert form_type in ['projection', 'interpolation'], \
'Input "approximation_type" is either "projection" or "interpolation".'
self.__approximation_type = form_type
def dim(self):
"""
Returns the dimension of the underlying domain
"""
return self.__dim
def assembly_type(self):
"""
Specify whether the operator is approximated via projection or
interpolation.
"""
return self.__approximation_type
def eval(self, cells, xg, wg, phi):
"""
Evaluate the integral form between two cells
"""
if self.assembly_type()=='projection':
#
# Projection mode
#
return self.eval_projection(cells, xg, wg, phi)
elif self.assembly_type()=='interpolation':
#
# Interpolation
#
return self.eval_interpolation(cells, xg, wg, phi)
def eval_interpolation(self, cell, x, xg, wg, phi):
"""
Evaluate the local bilinear form
I_{Ej} k(xi, y) phij(y)dy phii(x) for xi in Ei
where Ei, Ej are mesh cells
Inputs:
cell: Cell, containing subregions over which Form is defined
x: (n, dim) array of interpolation points over mesh
xg: Gaussian quadrature points
wg: Gaussian quadrature weights
phi: shape functions evaluated at quadrature points
"""
n = x.shape[0]
# Determine the integration regions
reg = self.integration_regions(cell)
# =====================================================================
# Specify the test and trial functions
# =====================================================================
# Derivatives of trial functions
der = self.trial.derivative
# Number of dofs
n_dofs = self.trial.element.n_dofs()
# Element types
etype = self.trial.element.element_type()
f_loc = None
for reg in self.integration_regions(cell):
# Get trial functions evaluated at Gauss nodes
trial = phi[reg][etype][der]
x_g = xg[reg]
w_g = wg[reg]
#
# Initialize local matrix if necessary
#
if f_loc is None:
#
# Initialize form
#
f_loc = np.zeros((n,n_dofs))
#
# Evaluate covariance function at the local Gauss points
#
n_gauss = x_g.shape[0]
ii,jj = np.meshgrid(np.arange(n),np.arange(n_gauss), indexing='ij')
if self.dim() == 1:
x1, x2 = x[ii.ravel()], x_g[jj.ravel()]
elif self.dim() == 2:
x1, x2 = x[ii.ravel(),:],x_g[jj.ravel(),:]
C_loc = self.kernel.eval(x1,x2)
C_loc = C_loc.reshape(n,n_gauss)
#
# Compute local integral
#
# Weight shape functions
Wphi = np.diag(w_g).dot(trial)
# Combine
f_loc += C_loc.dot(Wphi)
return f_loc
def eval_projection(self, cells, xg, wg, phi):
"""
Evaluates the local bilinear form
I_{Ei} I_{Ej} k(x,y) phij(y) dy phii(x) dx,
where Ei, Ej are mesh cells
Inputs:
cells: Cells (2,) pair, containing subregions over which Form is defined
xg: dict, (2,) pair of Gaussian quadrature points
wg: dict, (2,) pair of Gaussian quadrature weights
phi: (2,) pair of shape functions evaluated at quadrature points
"""
# Cells
ci, cj = cells
# Determine integration regions
regi = self.integration_regions(ci)
regj = self.integration_regions(cj)
# =====================================================================
# Specify the test and trial functions
# =====================================================================
# Derivatives of test functions
deri, derj = self.test.derivative, self.trial.derivative
# Element types
etypei = self.test.element.element_type()
etypej = self.trial.element.element_type()
# Degrees of freedom
n_dofsi = self.test.element.n_dofs()
n_dofsj = self.trial.element.n_dofs()
# Sample size
n_samples = self.kernel.n_samples
f_loc = None
for regi in self.integration_regions(ci):
for regj in self.integration_regions(cj):
# Access test(i) and trial(j) functions
phii = phi[0][regi][etypei][deri]
phij = phi[1][regj][etypej][derj]
# Get quadrature nodes
xi_g = xg[0][regi]
xj_g = xg[1][regj]
# Get quadrature weights
wi_g = wg[0][regi]
wj_g = wg[1][regj]
#
# Initialize local matrix if necessary
#
if f_loc is None:
#
# Initialize form
#
if n_samples is None:
f_loc = np.zeros((n_dofsi,n_dofsj))
else:
f_loc = np.zeros((n_dofsi,n_dofsj,n_samples))
#
# Evaluate kernel function at the local Gauss points
#
n_gauss = xi_g.shape[0]
ig = np.arange(n_gauss)
ii,jj = np.meshgrid(ig,ig,indexing='ij')
if self.dim() == 1:
x1, x2 = xi_g[ii.ravel()], xj_g[jj.ravel()]
elif self.dim() == 2:
x1, x2 = xi_g[ii.ravel(),:],xj_g[jj.ravel(),:]
C_loc = self.kernel.eval(x1,x2)
C_loc = C_loc.reshape(n_gauss,n_gauss)
#
# Compute local integral
#
# Weight shape functions
Wphii = np.diag(wi_g).dot(phii)
Wphij = np.diag(wj_g).dot(phij)
# Combine
f_loc += np.dot(Wphii.T, C_loc.dot(Wphij))
# Return local form
return f_loc
'''
class Assembler(object):
"""
Representation of sums of bilinear/linear/constant forms as
matrices/vectors/numbers.
"""
def __init__(self, problems, mesh=None, subforest_flag=None, n_gauss=(4,16)):
"""
Constructor
- Define the quadrature rules that will be used for assembly
- Collect information from all forms to construct the dofhandlers
necessary for evaluating kernels and shape functions and for
storing assembled forms in arrays.
- Initialize AssembledForm's, objects for storing the assembled
matrices, vectors, or constants.
Inputs:
problems: list of bilinear, linear, or constant Forms
mesh: Mesh, finite element mesh
subforest_flag: submesh marker over which to assemble forms
n_gauss: int tuple, number of quadrature nodes in 1d and 2d respectively
"""
#
# Parse "problems" Input
#
problem_error = 'Input "problems" should be (i) a Form, (ii) a list '+\
'of Forms, or (iii) a list of a list of Forms.'
if type(problems) is list:
#
# Multiple forms (and/or problems)
#
if all([isinstance(problem, Form) for problem in problems]):
#
# Single problem consisting of multiple forms
#
problems = [problems]
else:
#
# Multiple problems
#
for problem in problems:
if type(problem) is not list:
#
# Found problem not in list form
#
assert isinstance(problem, Form), problem_error
#
# Convert form to problem
#
problems[problems.index(problem)] = [problem]
else:
#
# Single form
#
assert isinstance(problems, Form), problem_error
problems = [[problems]]
# Store info
self.problems = problems
"""
#
# Get mesh from problems (check consistency)
#
ref_basis = None
for problem in problems:
for form in problem:
for basis in [form.test, form.trial]:
if basis is not None:
if ref_basis is None:
ref_basis = basis
if mesh is None:
mesh = basis.dofhandler().mesh
# TODO: Get rid of this
if subforest_flag is None:
subforest_flag = basis.subforest_flag()
assert basis.same_mesh(ref_basis)
"""
assert mesh is not None, 'No mesh specified.'
# Store mesh
self.__mesh = mesh
self.__subforest_flag = subforest_flag
#
# Initialize Gauss Quadrature Rule
#
self.n_gauss_2d = n_gauss[1]
self.n_gauss_1d = n_gauss[0]
dim = self.mesh().dim()
if dim==1:
#
# 1D rule over intervals
#
self.cell_rule = GaussRule(self.n_gauss_1d,shape='interval')
elif dim==2:
#
# 2D rule over rectangles
#
self.edge_rule = GaussRule(self.n_gauss_1d,shape='interval')
self.cell_rule = GaussRule(self.n_gauss_2d,shape='quadrilateral')
#
# Initialize list for storing assembled forms [iproblem][dim]
#
af = []
for problem in self.problems:
p_af = [None]*3
for form in problem:
dim = form.dim()
if p_af[dim] is None:
# Initialize new assembled form
p_af[dim] = AssembledForm(dim)
# Incorporate form
p_af[dim].add_form(form)
af.append(p_af)
self.__af = af
#
# Initialize dictionaries to store Dirichlet boundary conditions and
# hanging node conditions.
#
dirichlet_bc = []
hanging_nodes = []
for dummy in problems:
dirichlet_bc.append({})
hanging_nodes.append({})
# Store result
self.__dirichlet_bc = dirichlet_bc
self.__hanging_nodes = hanging_nodes
def mesh(self):
return self.__mesh
def subforest_flag(self):
return self.__subforest_flag
def assembled_forms(self, i_problem=0):
return self.__af[i_problem]
def assemble(self, keep_cellwise_data=False):
"""
Assembles constant, linear, and bilinear forms over computational mesh,
Input:
problems: A list of finite element problems. Each problem is a list
of constant, linear, and bilinear forms.
Output:
assembled_forms: list of dictionaries (one for each problem), each of
which contains:
A: double coo_matrix, system matrix determined by bilinear forms and
boundary conditions.
b: double, right hand side vector determined by linear forms and
boundary conditions.
Note: If problems contain one integral form (IPFORM), then the assembly
uses a double loop of cells. This is inefficient if problems are mixed.
"""
t_shape_info = 0
#t_gauss_rules = 0
t_shape_eval = 0
t_form_eval = 0
#t_get_node_address = 0
#t_af_update = 0
#t_af_consolidate = 0
#t_reference_map = 0
#
# Assemble forms over mesh cells
#
sf = self.subforest_flag()
cells = self.mesh().cells.get_leaves(subforest_flag=sf)
for ci in cells:
#
# Determine what shape functions and Gauss rules to
# compute on current cells
#
tic = time.time()
ci_shape_info = self.shape_info(ci)
t_shape_info += time.time()-tic
#
# Compute shape functions on cell
#
tic = time.time()
xi_g, wi_g, phii, dofsi = self.shape_eval(ci_shape_info, ci)
t_shape_eval += time.time()-tic
#
# Assemble local forms and assign to global dofs
#
for problem, i_problem in zip(self.problems, range(self.n_problems())):
#
# Loop over problems
#
for form in problem:
#
# Loop over forms
#
# Get form dimension
dim = form.dim()
# Get assembled form
aform = self.assembled_forms(i_problem)[dim]
#
# Evaluate form
#
if not isinstance(form, IPForm):
#
# Not an integral form
#
# Evaluate local form
tic = time.time()
form_loc = form.eval(ci, xi_g, wi_g, phii, dofsi)
t_form_eval += time.time()-tic
# Uppdate assembled form cellwise
if dim == 0:
#
# Constant form
#
aform.update_cellwise(ci, form_loc)
elif dim == 1:
#
# Linear form
#
dofs = [form.test.dofs(ci)]
aform.update_cellwise(ci, form_loc, dofs=dofs)
elif dim == 2:
#
# Bilinear form
#
# Trial dofs
dofs_trl = form.trial.dofs(ci)
# Test dofs
if isinstance(form, IIForm):
# Interpolatory Integral forms use all dofs
dofs_tst = form.test.dofs(None)
else:
dofs_tst = form.test.dofs(ci)
# Update assembled form
dofs = [dofs_tst, dofs_trl]
aform.update_cellwise(ci, form_loc, dofs=dofs)
if isinstance(form, IPForm):
#
# Form is Double Integral
#
for cj in cells:
#
# Shape function info on ocell
#
cj_sinfo = self.shape_info(cj)
#
# Compute shape function on cell
#
xj_g, wj_g, phij, dofsj = self.shape_eval(cj_sinfo, cj)
#
# Evaluate integral form
#
form_loc = form.eval((ci,cj), (xi_g,xj_g), \
(wi_g,wj_g), (phii,phij),\
(dofsi,dofsj))
# Test and trial dofs
dofs_tst = form.test.dofs(ci)
dofs_trl = form.trial.dofs(cj)
#
# Update Assembled Form
#
aform.update_cellwise(ci, form_loc,
dofs = [dofs_tst, dofs_trl])
#
# Special efficiency when kernel is symmetric
#
if form.kernel.is_symmetric():
if ci!=cj:
#
# Symmetric kernel, store the transpose
#
aform.update_cellwise(ci, form_loc.T,
dofs = [dofs_trl, dofs_tst])
else:
#
# Symmetric forms assembled over subtriangular block
#
break
#
# Aggregate cellwise information
#
for i_problem in range(self.n_problems()):
# Get Dirichlet BC's
dir_bc = self.get_dirichlet(i_problem)
# Get hanging nodes
hng = self.get_hanging_nodes(i_problem)
for dim in range(3):
aform = self.assembled_forms(i_problem)[dim]
if aform is not None:
#
# Update aggregate
#
aform.distribute(ci, dir_bc=dir_bc, hng=hng)
#
# Delete cellwise information
#
if not keep_cellwise_data:
aform.clear_cellwise_data(ci)
#
# Consolidate arrays
#
for i_problem in range(self.n_problems()):
for dim in range(3):
aform = self.assembled_forms(i_problem)[dim]
if aform is not None:
aform.consolidate()
"""
for i_problem in range(len(self.problems)):
for form_type in self.af()[i_problem].keys():
#
# Iterate over assembled forms
#
af = self.af[i_problem][form_type]
#
# Consolidate assembly
#
tic = time.time()
af.consolidate(clear_cell_data=clear_cell_data)
t_af_consolidate += time.time()-tic
print('t_consolidate', t_af_consolidate)
print('Timings')
print('Shape infor',t_shape_info)
print('Shape Eval', t_shape_eval)
print('Form Eval', t_form_eval)
print('Get node address', t_get_node_address)
print('AF update', t_af_update)
print('AF consolidate', t_af_consolidate)
"""
'''
#
# Assemble forms over boundary edges
#
if isinstance(self.mesh, Mesh2D):
#
# Determine flags used to mark boundary edges
#
boundary_segments = \
self.mesh.get_boundary_segments(subforest_flag=subforest_flag)
for problem in problems:
for nc in problem['bc']['neumann']:
bnd_segs = self.mesh.get_boundary_segments(subforest_flag=subforest_flag, flag=nc['marker'])
'''
'''
if boundary_conditions is not None:
#
# Unpack boundary data
#
if 'dirichlet' in boundary_conditions:
bc_dirichlet = boundary_conditions['dirichlet']
else:
bc_dirichlet = None
if 'neumann' in boundary_conditions:
bc_neumann = boundary_conditions['neumann']
else:
bc_neumann = None
if 'robin' in boundary_conditions:
bc_robin = boundary_conditions['robin']
else:
bc_robin = None
rows = []
cols = []
dir_dofs_encountered = set()
for node in self.mesh.root_node().get_leaves():
node_dofs = self.dofhandler.get_global_dofs(node)
cell = node.cell()
#
# Assemble local system matrices/vectors
#
if bilinear_forms is not None:
bf_loc = np.zeros((n_dofs,n_dofs))
for bf in bilinear_forms:
bf_loc += self.form_eval(bf, node)
if linear_forms is not None:
lf_loc = np.zeros((n_dofs,))
for lf in linear_forms:
lf_loc += self.form_eval(lf, node)
if boundary_conditions:
#
# Boundary conditions
#
for direction in ['W','E','S','N']:
edge = cell.get_edges(direction)
#
# Check for Neumann conditions
#
neumann_edge = False
if bc_neumann is not None:
for bc_neu in bc_neumann:
m_neu,g_neu = bc_neu
if m_neu(edge):
# ---------------------------------------------
# Neumann edge
# ---------------------------------------------
neumann_edge = True
#
# Update local linear form
#
lf_loc += self.form_eval((g_neu,'v'),node, \
edge_loc=direction)
break
#
# Else Check Robin Edge
#
if not neumann_edge and bc_robin is not None:
for bc_rob in bc_robin:
m_rob, data_rob = bc_rob
if m_rob(edge):
# ---------------------------------------------
# Robin edge
# ---------------------------------------------
gamma_rob, g_rob = data_rob
#
# Update local bilinear form
#
bf_loc += \
gamma_rob*self.form_eval((1,'u','v'),\
node,\
edge_loc=direction)
#
# Update local linear form
#
lf_loc += \
gamma_rob*self.form_eval((g_rob,'v'),\
node,\
edge_loc=direction)
break
#
# Check for Dirichlet Nodes
#
x_ref = self.element.reference_nodes()
x_cell = self.rule_2d.map(cell,x=x_ref)
cell_dofs = np.arange(n_dofs)
if bc_dirichlet is not None:
list_dir_dofs_loc = []
for bc_dir in bc_dirichlet:
m_dir,g_dir = bc_dir
is_dirichlet = m_dir(x_cell[:,0],x_cell[:,1])
if is_dirichlet.any():
dir_nodes_loc = x_cell[is_dirichlet,:]
dir_dofs_loc = cell_dofs[is_dirichlet]
list_dir_dofs_loc.extend(dir_dofs_loc)
for j,x_dir in zip(dir_dofs_loc,dir_nodes_loc):
#
# Modify jth row
#
notj = np.arange(n_dofs)!=j
uj = g_dir(x_dir[0],x_dir[1])
if node_dofs[j] not in dir_dofs_encountered:
bf_loc[j,j] = 1.0
bf_loc[j,notj]=0.0
lf_loc[j] = uj
else:
bf_loc[j,:] = 0.0 # make entire row 0
lf_loc[j] = 0.0
#
# Modify jth column and right hand side
#
lf_loc[notj] -= bf_loc[notj,j]*uj
bf_loc[notj,j] = 0.0
for dof in list_dir_dofs_loc:
dir_dofs_encountered.add(dof)
#
# Local to global mapping
#
for i in range(n_dofs):
#
# Update right hand side
#
if linear_forms is not None:
linvec[node_dofs[i]] += lf_loc[i]
#
# Update system matrix
#
if bilinear_forms is not None:
for j in range(n_dofs):
rows.append(node_dofs[i])
cols.append(node_dofs[j])
bivals.append(bf_loc[i,j])
#
# Save results as a sparse matrix
#
out = []
if bilinear_forms is not None:
A = sparse.coo_matrix((bivals,(rows,cols)))
out.append(A)
if linear_forms is not None:
out.append(linvec)
if len(out) == 1:
return out[0]
elif len(out) == 2:
return tuple(out)
'''
'''
def map_to_global(self, form_loc, form, cell):
"""
Maps local form on a cell (in terms of local shape functions) onto the
global form (in terms of global basis functions). Global basis functions
are the same as shape functions, except in cells adjoining hanging nodes.
There, global basis fns are extended to ensure continuity over hanging n.
Input:
loc_form: double, np.array representing the local form returned
by method 'Form.eval'
form: Form, class used to extract element types
cell: Cell, mesh cell over which assembly is occurring.
Output:
form_glb: double, array evaluated form in terms of global basis
functions.
'constant': (1,) or (n_smpl, ) array
'linear': (n_tst_glb, 1) or (n_tst_glb, n_smpl) array
'bilinear: (n_tst_glb, n_trl_glb, n_smpl) array
TODO: Not necessary.
"""
subforest_flag = self.subforest_flag
if form.type=='constant':
#
# Constant form
#
return form_loc
elif form.type=='linear':
#
# Linear form
#
# Get element types for test functions
etype_tst = form.test.element.element_type()
# Extract dofhandler
dh_tst = self.dofhandlers[etype_tst]
# Retrieve local to global mapping
l2g_tst = dh_tst.get_l2g_map(cell, subforest_flag=subforest_flag)
# Get global dofs
dofs_tst = list(l2g_tst.keys())
# Convert l2g map to matrix
l2g_tst = np.array(list(l2g_tst.values()))
# Compute linear form in terms of global basis
L = l2g_tst.dot(form_loc)
# Return global linear form and global test dofs
return L, dofs_tst
elif form.type=='bilinear':
#
# Bilinear form
#
# Get element types for test and trial functions
etype_tst = form.test.element.element_type()
etype_trl = form.trial.element.element_type()
# Extract dofhandlers for both element types
dh_tst = self.dofhandlers[etype_tst]
dh_trl = self.dofhandlers[etype_trl]
# Retrieve the local to global mapping for each dh over the cell
l2g_tst = dh_tst.get_l2g_map(cell, subforest_flag=subforest_flag)
l2g_trl = dh_trl.get_l2g_map(cell, subforest_flag=subforest_flag)
# Get global dofs
dofs_tst = list(l2g_tst.keys())
dofs_trl = list(l2g_trl.keys())
# Convert l2g maps to matrix form
l2g_tst = np.array(list(l2g_tst.values()))
l2g_trl = np.array(list(l2g_trl.values()))
# Compute bilinear form in terms of global basis
dim = len(form_loc.shape)
if dim==3:
#
# Sampled bilinear form (n_tst, n_trl, n_smpl)
#
# Change to (n_smpl, n_tst, n_trl)
form_loc = form_loc.transpose([2,0,1])
# Multiply each slice by Test*(..)*Trial^T
B = l2g_tst.dot(form_loc.dot(l2g_trl.T))
# Change dimensions to (n_glb_tst, n_glb_trl, n_smpl)
B = B.transpose([0,2,1])
elif dim==2:
#
# Deterministic bilinear form (n_tst, n_trl)
#
B = l2g_tst.dot(form_loc).dot(l2g_trl.T)
return B, dofs_tst, dofs_trl
'''
'''
def consolidate_assembly(self):
"""
Postprocess assembled forms to make them amenable to linear algebra
operations. This includes renumbering equations that involve only a
subset of the degreees of freedom.
Bilinear Form:
row_dofs: (n_row_dofs, ) ordered numpy array of mesh dofs
corresponding to test functions.
col_dofs: (n_col_dofs, ) ordered numpy array of unique mesh dofs
corresponding to trial space
rows: (n_nonzero,) row indices (renumbered)
cols: (n_nonzero,) column indices (renumbered).
vals: (n_nonzero, n_samples) numpy array of matrix values
corresponding to each row-column pair
Linear Form:
row_dofs: (n_row_dofs,) order array of mesh dofs corresponding
to row dofs
vals: (n_row_dofs, n_samples) array of vector values for each dof.
Constant form:
vals: (n_samples, ) array of integral values.
TODO: Delete!
"""
for i_problem in range(len(self.problems)):
for form_type in self.af[i_problem].keys():
form = self.af[i_problem][form_type]
n_samples = self.n_samples(i_problem, form_type)
if form_type=='bilinear':
# =========================================================
# Bilinear Form
# =========================================================
#
# Parse row and column dofs
#
# Flatten
rows = []
cols = []
vals = []
rcv = (form['row_dofs'], form['col_dofs'], form['vals'])
for rdof, cdof, val in zip(*rcv):
#
# Store global dofs in vectors
#
R,C = np.meshgrid(rdof,cdof)
rows.append(R.ravel())
cols.append(C.ravel())
#
# Store values
#
n_entries = len(rdof)*len(cdof)
if n_samples is None:
#
# Deterministic form
#
vals.append(val.reshape(n_entries, order='F'))
else:
#
# Sampled form
#
v = val.reshape((n_entries,n_samples), order='F')
vals.append(v)
#
rows = np.concatenate(rows, axis=0)
cols = np.concatenate(cols, axis=0)
vals = np.concatenate(vals, axis=0)
#
# Renumber dofs from 0 ... n_dofs
#
# Extract sorted list of unique dofs for rows and columns
unique_rdofs = list(set(list(rows)))
unique_cdofs = list(set(list(cols)))
# Dof to index mapping for rows
map_rows = np.zeros(unique_rdofs[-1]+1, dtype=np.int)
map_rows[unique_rdofs] = np.arange(len(unique_rdofs))
# Dof-to-index mapping for cols
map_cols = np.zeros(unique_cdofs[-1]+1, dtype=np.int)
map_cols[unique_cdofs] = np.arange(len(unique_cdofs))
# Transform from dofs to indices
rows = map_rows[rows]
cols = map_cols[cols]
# Store row and column information
form['row_dofs'] = np.array(unique_rdofs)
form['col_dofs'] = np.array(unique_cdofs)
form['rows'] = rows
form['cols'] = cols
form['vals'] = vals
elif form_type=='linear':
# =========================================================
# Linear Form
# =========================================================
#
# Parse row dofs
#
# Flatten list of lists
rows = [item for sublist in form['row_dofs'] for item in sublist]
# Extract sorted list of unique dofs for rows and columns
unique_rdofs = list(set(rows))
n_dofs = len(unique_rdofs)
# Convert rows into numpy array
rows = np.array(rows)
# Dof-to-index mapping for rows
map_rows = np.zeros(unique_rdofs[-1]+1, dtype=np.int)
map_rows[unique_rdofs] = np.arange(n_dofs)
# Transform from dofs to indices
rows = map_rows[rows]
# Concatenate all function values in a vector
vals = np.concatenate(form['vals'])
if n_samples is None:
#
# Deterministic problem
#
b = np.zeros(n_dofs)
for i in range(n_dofs):
b[i] = vals[rows==unique_rdofs[i]].sum()
else:
#
# Sampled linear form
#
b = np.zeros((n_dofs,n_samples))
for i in range(n_dofs):
b[i,:] = vals[rows==unique_rdofs[i],:].sum(axis=0)
# Store arrays
form['row_dofs'] = np.array(unique_rdofs)
form['vals'] = b
elif form_type=='constant':
#
# Constant form
#
pass
'''
def get_matrix(self, i_problem=0, i_sample=0):
"""
Return the sparse matrix representation of the bilinear form of
specified sample of specified problem.
Inputs:
i_problem: int [0], problem index
i_sample: int, [0], sample index
Output:
A: double, sparse array representing bilinear form
"""
aform = self.__af[i_problem][2]
return aform.aggregate_data()['array'][i_sample]
def get_vector(self, i_problem=0, i_sample=0):
"""
Return the vector representation of the linear form of the specified
sample of specified problem.
Inputs:
i_problem: int [0], problem index
i_sample: int [0], sample index
Output:
b: double, vector representing linear form
"""
assembled_form = self.__af[i_problem][1]
data = assembled_form.aggregate_data()['array']
if type(i_sample) is int:
#
# Single sample
#
return data[i_sample]
else:
#
# Multiple samples
#
assert type(i_sample) is list, \
'Input "i_sample" should be a (list of) integer(s).'
return np.array([data[i] for i in i_sample])
def get_scalar(self, i_problem=0, i_sample=0):
"""
Return the scalar representation of the constant form of the specified
sample of the specified problem
Inputs:
i_problem: int [0], problem index
i_sample: int [0], sample index
Output:
c: double, scalar representing constant form
"""
aform = self.__af[i_problem][0]
return aform.aggregate_data()['array'][i_sample]
def get_dofs(self, dof_type, i_problem=0):
"""
Get dofs for problem, divided into 'interior', 'dirichlet', and
'hanging_nodes'.
Inputs:
i_problem: int, problem index
"""
if dof_type == 'dirichlet':
#
# DOFs of Dirichlet Boundaries
#
dir_bc = self.get_dirichlet(i_problem=i_problem)
dir_dofs = list(dir_bc.keys())
return dir_dofs
elif dof_type == 'hanging_nodes':
#
# DOFs of hanging nodes
#
hng = self.get_hanging_nodes(i_problem=i_problem)
hng_dofs = list(hng.keys())
return hng_dofs
elif dof_type == 'interior':
#
# Interior dofs
#
bform = self.assembled_forms(i_problem=i_problem)[2]
int_dofs = bform.aggregate_data()['udofs'][0]
return int_dofs
def assembled_bnd(self, i_problem=0, i_sample=0):
"""
Returns
"""
aform = self.__af[i_problem][2]
if aform is not None:
return aform.dirichlet_correction()['array'][i_sample]
def n_samples(self, i_problem, form_type):
"""
Returns the number of realizations of problem i_problem
Inputs:
i_problem: int, 0<i_problem<len(self.problems) problem index
form_type: str 'constant', 'linear', or 'bilinear'.
Output:
n_samples: int (or None), number of samples associated with the given
form type of the given problem
"""
n_samples = None
for form in self.problems[i_problem]:
if form.type==form_type:
n_kernel_sample = form.kernel.n_subsample()
#
# Consider only forms of given type
#
if n_kernel_sample is not None:
#
# Sampling in effect
#
if n_samples is None:
#
# New trivial sample size
#
n_samples = n_kernel_sample
else:
#
# There exists a nontrivial sample size
#
if n_kernel_sample > 1:
#
# Kernel contains more than one sample.
# Check for consistency
#
assert n_samples == n_kernel_sample,\
' Inconsistent sample sizes in kernels'
return n_samples
def n_problems(self):
"""
Returns the number of problems
"""
return len(self.problems)
def shape_info(self, cell):
"""
Determine what shape functions must be computed and over what region
within a particular cell.
Inputs:
cell: cell over which to assemble
Output:
info: (nested) dictionary, whose entries info[region][element]
consist of Basis functions representing the shape functions
associated with the cell.
"""
info = {}
for problem in self.problems:
for form in problem:
#
# Form basis functions on same mesh/flag
#
basis = form.basis(self.mesh(), self.subforest_flag())
for region in form.regions(cell):
#
# Record form's regions
#
if not region in info.keys():
info[region] = set()
info[region].update(basis)
return info
def gauss_rules(self, shape_info):
"""
Compute the Gauss nodes and weights over all regions specified by the
shape_info dictionary.
Inputs:
shape_info: dict, generated for each cell by means of
self.shape_info(cell).
Outputs:
xg: dict, of Gauss nodes on cell, indexed by cell's subregions
wg: dict, of Gauss weights on cell, indexed by cell's subregionss
mg: dict, of mapped gradients, indexed recursively by the cell's
subregions and 'jac_p2r' and/or 'hess_p2r'
TODO: Delete
"""
xg, wg, mg = {}, {}, {}
for region in shape_info.keys():
#
# Determine whether shape derivatives will be needed
#
if any([basis.derivative()[0]==1 for basis in shape_info[region]]):
#
# Need Jacobian of Inverse Mapping
#
jac_p2r = True
else:
jac_p2r = False
if any([basis.derivative()[0]==2 for basis in shape_info[region]]):
#
# Need Hessian of inverse mapping
#
hess_p2r = True
else:
hess_p2r = False
#
# Map quadrature rule to entity (cell/halfedge)
#
if isinstance(region, Interval):
#
# Interval
#
xg[region], wg[region], mg[region] = \
self.cell_rule.mapped_rule(region, jac_p2r=jac_p2r,
hess_p2r=hess_p2r)
elif isinstance(region, HalfEdge):
#
# HalfEdge
#
xg[region], wg[region], mg[region] = \
self.edge_rule.mapped_rule(region, jac_p2r=jac_p2r,
hess_p2r=hess_p2r)
elif isinstance(region, QuadCell):
#
# Quadrilateral
#
xg[region], wg[region], mg[region] = \
self.cell_rule.mapped_rule(region, jac_p2r=jac_p2r,
hess_p2r=hess_p2r)
elif isinstance(region, Vertex):
#
# Vertex
#
xg[region], wg[region] = convert_to_array(region.coordinates()), 1
else:
raise Exception('Only Intervals, HalfEdges, Vertices, & '+\
'QuadCells supported.')
#
# Return results
#
if any([hess_p2r,jac_p2r]):
return xg, wg, mg
else:
return xg, wg
def shape_eval(self, shape_info, cell):
"""
(i) Map reference quadrature rule and (ii) evaluate the element shape
functions (and their derivatives) at the mapped quadrature points in
each region specified by "shape_info".
Inputs:
shape_info: dictionary, whose keys are the integration regions
(QuadCell, Interval, or HalfEdge) over which to integrate and
whose values are the basis functions to be integrated.
cell: cell over which to integrate
Output:
xg: dictionary (indexed by regions), of mapped quadrature nodes.
wg: dictionary (indexed by regions), of mapped quadrature weights.
phi: dictionary phi[region][basis] of shape functions evaluated at
the quadrature nodes.
TODO: A big chunk can be moved to GaussRule, map_rule
"""
# Initialize
xg, wg, phi, dofs = {}, {}, {}, {}
# Iterate over integration regions
for region in shape_info.keys():
#
# Get global dof numbers for the region
#
# Initialize degrees of freedom
dofs[region] = {}
for basis in shape_info[region]:
#
# Get region dofs for each basis
#
#dh = basis.dofhandler()
#rdofs = dh.get_cell_dofs(cell,entity=region,
# subforest_flag=basis.subforest_flag())
rdofs = basis.dofs(cell)
#rdofs = dh.get_cell_dofs(cell, subforest_flag=basis.subforest_flag())
dofs[region][basis] = rdofs
#
# Determine whether shape derivatives will be needed for region
#
if any([basis.derivative()[0] in [1,2] for basis in shape_info[region]]):
#
# Need Jacobian of inverse mapping
#
jac_p2r = True
else:
jac_p2r = False
if any([basis.derivative()[0]==2 for basis in shape_info[region]]):
#
# Need Hessian of inverse mapping
#
hess_p2r = True
else:
hess_p2r = False
#
# Map reference quadrature nodes to physical ones
#
if isinstance(region, Interval):
#
# Interval
#
# Check compatiblity
assert self.mesh().dim()==1, 'Interval requires a 1D rule.'
# Get reference nodes and weights
x_ref = self.cell_rule.nodes()
w_ref = self.cell_rule.weights()
# Map to physical region
xg[region], mg = \
region.reference_map(x_ref, jac_r2p=True,
jac_p2r=jac_p2r,
hess_p2r=hess_p2r)
# Get jacobian of forward mapping
jac = mg['jac_r2p']
# Modify the quadrature weights
wg[region] = w_ref*np.array(jac)
elif isinstance(region, HalfEdge):
#
# Edge
#
# Reference nodes and weights
r = self.edge_rule.nodes()
w_ref = self.edge_rule.weights()
# Map from interval to physical region
xg[region], mg = region.reference_map(r, jac_r2p=True,
jac_p2r=jac_p2r, hess_p2r=hess_p2r)
# Get jaobian of forward mapping
jac = mg['jac_r2p']
# Modify the quadrature weights
wg[region] = w_ref*np.array(np.linalg.norm(jac[0]))
# To evaluate phi (and derivatives), map 1D reference nodes
# to 2D ones and record jacobians/hessians
if len(shape_info[region])>0:
# There are shape functions associated with region
# Get reference cell from single basis function
basis = list(shape_info[region])[0]
ref_cell = basis.dofhandler().element.reference_cell()
# Determine equivalent Half-edge on reference element
i_he = cell.get_half_edges().index(region)
ref_he = ref_cell.get_half_edge(i_he)
# Get 2D reference nodes
b,h = convert_to_array(ref_he.get_vertices())
x_ref = np.array([b[i]+r*(h[i]-b[i]) for i in range(2)]).T
# Map 2D reference point to physical cell
xg[region], mg = cell.reference_map(x_ref, jac_r2p=False,
jac_p2r=True,
hess_p2r=True)
elif isinstance(region, QuadCell):
#
# Quadrilateral
#
# Check compatibility
assert self.mesh().dim()==2, 'QuadCell requires 2D rule.'
# Get reference nodes and weights
x_ref = self.cell_rule.nodes()
w_ref = self.cell_rule.weights()
# Map to physical region
xg[region], mg = \
region.reference_map(x_ref, jac_r2p=True,
jac_p2r=jac_p2r,
hess_p2r=hess_p2r)
# Get Jacobian of forward mapping
jac = mg['jac_r2p']
# Modify quadrature weights
wg[region] = w_ref*np.array([np.linalg.det(j) for j in jac])
elif isinstance(region, Vertex):
#
# Vertex (special case)
#
xg[region], wg[region] = convert_to_array(region.coordinates()), 1
#
# Determine reference vertex corresponding to Vertex region
#
basis = list(shape_info[region])[0]
ref_cell = basis.dofhandler().element.reference_cell()
# Determine equivalent Vertex on reference element
i_v = cell.get_vertices().index(region)
v = ref_cell.get_vertex(i_v)
x_ref = convert_to_array(v, dim=v.dim())
# Map to physical
xg[region], mg = cell.reference_map(x_ref, jac_r2p=True,
jac_p2r=jac_p2r,
hess_p2r=hess_p2r)
wg[region] = 1
else:
raise Exception('Only Intervals, HalfEdges, Vertices & '+\
'QuadCells supported')
#
# Evaluate (derivatives of) basis functions at the quadrature nodes
#
phi[region] = {}
for basis in shape_info[region]:
#
# Iterate over basis functions
#
if basis not in phi[region]:
#
# Evaluate basis functions over regions
#
element = basis.dofhandler().element
D = basis.derivative()
jac_p2r = mg['jac_p2r'] if D[0] in [1,2] else None
hess_p2r = mg['hess_p2r'] if D[0]==2 else None
p = element.shape(x_ref=x_ref, derivatives=D, cell=cell,
jac_p2r=jac_p2r, hess_p2r=hess_p2r)
phi[region][basis] = p
# Return mapped quadrature nodes, weights, and shape functions
return xg, wg, phi, dofs
def add_dirichlet(self, dir_marker, dir_fn=0, on_bnd=True, i_problem=0):
"""
Add a Dirichlet condition to a problem, i.e. a set of dofs and vals
corresponding to dirichlet conditions.
Inputs:
dir_marker: str/int flag to identify dirichlet halfedges
i_problem: int, problem index
dir_fn: Map/scalar, defining the Dirichlet boundary conditions.
on_bnd: bool, True if function values are prescribed on boundary.
Outputs:
None
Notes:
To maintain the dimensions of the matrix, the trial and test function
spaces must be the same, i.e. it must be a Galerkin approximation.
Specifying the Dirichlet conditions this way is necessary if there
are hanging nodes, since a Dirichlet node may be a supporting node for
one of the hanging nodes.
Modified Attributes:
__dirichlet_bc: (i_problem indexed) list of dictionaries, containing
dofs and values of corresponding to dirichlet nodes.
"""
#
# Extract dofhandler information from trial function
#
bilinear = self.assembled_forms(i_problem)[2]
trial = bilinear.basis()[1]
dh = trial.dofhandler()
sf = trial.subforest_flag()
#
# Get Dofs Associated with Dirichlet boundary
#
if dh.mesh.dim()==1:
#
# One dimensional mesh
#
dir_dofs = dh.get_region_dofs(entity_type='vertex', \
entity_flag=dir_marker,\
interior=False, \
on_boundary=on_bnd,\
subforest_flag=sf)
elif dh.mesh.dim()==2:
#
# Two dimensional mesh
#
dir_dofs = dh.get_region_dofs(entity_type='half_edge',
entity_flag=dir_marker,
interior=False,
on_boundary=on_bnd, \
subforest_flag=sf)
# Number of dirichlet dofs
n_dirichlet = len(dir_dofs)
#
# Evaluate dirichlet function at vertices associated with dirichlet dofs
#
if isinstance(dir_fn, numbers.Number):
#
# Dirichlet function is constant
#
dir_vals = dir_fn*np.ones((n_dirichlet,1))
elif isinstance(dir_fn, Nodal) and dir_fn.basis().same_dofs(trial):
#
# Nodal function whose dofs coincide with problem dofs
#
idx = dir_fn.dof2idx(dir_dofs)
dir_vals = dir_fn.data()[idx,:]
else:
#
# Evaluate the function explicitly at the dirichlet vertices
#
dir_verts = dh.get_dof_vertices(dir_dofs)
x_dir = convert_to_array(dir_verts)
dir_vals = dir_fn.eval(x_dir)
#
# Store Dirichlet Dofs and Values
#
dir_bc = self.__dirichlet_bc[i_problem]
for dof,vals in zip(dir_dofs,dir_vals):
dir_bc[dof] = vals
# Store result
self.__dirichlet_bc[i_problem] = dir_bc
def get_dirichlet(self, i_problem=0, asdict=True):
"""
Return dirichlet boundary conditions as the dict {dofs:vals}
"""
if asdict:
return self.__dirichlet_bc[i_problem]
else:
dir_dofs = list(self.__dirichlet_bc[i_problem])
dir_vals = np.array(list(self.__dirichlet_bc[i_problem].values()))
return dir_dofs, dir_vals
def add_hanging_nodes(self, i_problem=0):
"""
Add hanging nodes to a problem, as computed by the dofhandler.
"""
# Get bilinear form
biform = self.assembled_forms(i_problem)[2]
assert biform is not None, "Problem has no bilinear form."
# Get trial function of bilinear form
phi = biform.basis()[1]
assert phi.same_dofs(biform.basis()[0]), \
"Trial and test functions should have the same dofs."
# Set hanging nodes
self.__hanging_nodes[i_problem] \
= phi.dofhandler().get_hanging_nodes(subforest_flag=self.subforest_flag())
def get_hanging_nodes(self, i_problem=0):
"""
Return hanging nodes
"""
return self.__hanging_nodes[i_problem]
def hanging_node_matrix(self, i_problem=0):
"""
Return matrix used to reconstruct hanging node values from support.
"""
# Get dof information
biform = self.assembled_forms(i_problem)[2] # bilinear form
test = biform.basis()[0] # test function basis (same as trial)
n_dofs = test.n_dofs()
# Get hanging nodes
hng = self.__hanging_nodes[i_problem]
# Initialize r,c,v triplets
rows = []
cols = []
vals = []
for h_dof, supp in hng.items():
for s_dof, s_coef in zip(*supp):
# Update rows (hanging nodes)
rows.append(test.d2i(h_dof))
# Update cols (supporting nodes)
cols.append(test.d2i(s_dof))
# Update vals (supporting coefficients)
vals.append(s_coef)
C = sparse.coo_matrix((vals,(rows,cols)),shape=(n_dofs,n_dofs))
return C
def solve(self, i_problem=0, i_matrix=0, i_vector=0):
"""
Solve the assembled problem
Inputs:
i_problem: int, problem index
i_matrix: int, matrix (sample) index
i_vector: int, vector (sample) index (can be a list)
Output:
u: double, (n_dofs, n_samples) solution array.
TODO: Sort out the sampling - multiple right hand sides
"""
#
# Determine problem's number of dofs
#
# From linear form
phi_lin = self.assembled_forms(i_problem)[1].basis()[0]
assert phi_lin is not None, 'Missing assembled linear form.'
n_dofs = phi_lin.n_dofs()
for phi_bil in self.assembled_forms(i_problem)[2].basis():
assert phi_bil.same_dofs(phi_lin), \
'Linear and bilinear forms should have the same basis.'
# Get system matrix
A = self.get_matrix(i_problem=i_problem, i_sample=i_matrix).tocsc()
# System vectors
b = self.get_vector(i_problem=i_problem, i_sample=i_vector)
# Assembled Dirichlet BC
x0 = self.assembled_bnd(i_problem=i_problem, i_sample=i_matrix)
#
# Initialize solution array
#
u = np.zeros(n_dofs)
#
# Solve linear system on interior dofs
#
int_dofs = self.get_dofs('interior', i_problem=i_problem)
u[int_dofs] = spla.spsolve(A,b-x0)
#
# Resolve Dirichlet conditions
#
dir_dofs, dir_vals = self.get_dirichlet(i_problem=i_problem, asdict=False)
u[dir_dofs] = dir_vals[:,0]
# Resolve hanging nodes
C = self.hanging_node_matrix(i_problem=i_problem)
u += C.dot(u)
return u
def interpolate(self, marker_coarse, marker_fine, u_coarse=None):
"""
Interpolate a coarse grid function at fine grid points.
Inputs:
marker_coarse: str/int, tree node marker denoting the cells of the
coarse grid.
marker_fine: str/int, tree node marker labeling the cells of the
fine grid.
u_coarse: double, nodal vector defined on the coarse grid.
Outputs:
if u_coarse is not None:
u_interp: double, nodal vector of interpolant
elif u_coarse is None:
I: double, sparse interplation matrix, u_fine = I*u_coarse
"""
#
# Initialize
#
n_coarse = self.dofhandler.n_dofs(marker_coarse)
n_fine = self.dofhandler().n_dofs(marker_fine)
if u_coarse is None:
#
# Initialize sparse matrix
#
rows = []
cols = []
vals = []
else:
#
# Interpolated nodes
#
u_interp = np.empty(n_fine)
#
# Construct
#
for node in self.mesh.root_node().get_leaves(marker_fine):
if node.has_parent(marker_coarse):
parent = node.get_parent(marker_coarse)
node_dofs = self.dofhandler.get_global_dofs(node)
parent_dofs = self.dofhandler.get_global_dofs(parent)
x = self.dofhandler.dof_vertices(node)
phi = self.shape_eval(cell=parent.cell(), x=x)
if u_coarse is not None:
#
# Update nodal vector
#
u_interp[node_dofs] = \
np.dot(phi,u_coarse[parent_dofs])
else:
#
# Update interpolation matrix
#
for i in range(len(node_dofs)):
fine_dof = node_dofs[i]
if fine_dof not in rows:
#
# New fine dof
#
for j in range(len(parent_dofs)):
coarse_dof = parent_dofs[j]
phi_val = phi[i,j]
if abs(phi_val) > 1e-9:
rows.append(fine_dof)
cols.append(coarse_dof)
vals.append(phi_val)
#
# Return
#
if u_coarse is not None:
return u_interp
else:
I = sparse.coo_matrix((vals,(rows,cols)),\
shape=(n_fine,n_coarse))
return I
def restrict(self, marker_coarse, marker_fine, u_fine=None):
"""
Restrict a fine grid function to a coarse mesh.
Inputs:
marker_coarse: str/int, tree node marker denoting the cells of the
coarse grid.
marker_fine: str/int, tree node marker labeling the cells of the
fine grid.
u_fine: nodal vector defined on the fine grid.
Outputs:
if u_fine is not None:
u_restrict: double, nodal vector defined on coarse grid
if u_fine is None:
R: double, sparse restriction matrix, u_restrict = R*u_fine
TODO: The "correct" restriction operator is the transpose of the interpolation operator.
"""
I = self.interpolate(marker_coarse, marker_fine)
I = I.toarray()
Q,R = linalg.qr(I, mode='economic')
R = linalg.solve(R, Q.T)
if u_fine is None:
return R
else:
return R.dot(u_fine)
class AssembledForm(object):
"""
bilinear, linear, or constant form
"""
def __init__(self, dim):
"""
Constructor
Inputs:
dim: int, dimension of the forms to be included (0,1,2)
"""
#
# Initialize dimensions, basis, and sample size
#
self.__dim = dim
self.__basis = None
self.__n_samples = 1
#
# Initialize cellwise- and aggregate data
#
self.__cellwise_data = {}
self.__aggregate_data = {'dofs':[[] for dummy in range(dim)],
'udofs': [],
'vals': [],
'array': None}
if dim == 2:
#
# Bilinear form
#
self.__bnd = {'dofs': [], 'vals': []}
self.__hng = {'dofs': [[],[]], 'vals': []}
def dim(self):
"""
Return dimension
"""
return self.__dim
def basis(self):
"""
Return basis vectors
"""
return self.__basis
def n_dofs(self):
"""
Return number of dofs for problem
"""
return [basis.n_dofs() for basis in self.basis()]
def n_samples(self):
"""
Return sample size
"""
return self.__n_samples
def cellwise_data(self):
"""
Return cellwise data
"""
return self.__cellwise_data
def aggregate_data(self):
"""
Return aggregate data
"""
return self.__aggregate_data
def dirichlet_correction(self):
"""
Return the dirichlet correction term
"""
return self.__bnd
def add_form(self, form):
"""
Add form to aggregate form.
Inputs:
form: Form, to be added
"""
# Check that input is a Form
assert isinstance(form, Form), 'Input "form" should be a "Form" object'
# Check dimension
assert form.dim()==self.dim(), 'Input "form" has incompatible dim.'
#
# Get basis from form
#
if form.type == 'bilinear':
form_basis = [form.test, form.trial]
elif form.type == 'linear':
form_basis = [form.test]
elif form.type == 'constant':
form_basis = None
#
# Update/compare aggregate's basis
#
if self.basis() is None:
#
# Store basis
#
self.__basis = form_basis
else:
#
# Basis functions should have the same dofs
#
for basis, fbasis in zip(self.basis(), form_basis):
assert basis.same_dofs(fbasis), \
'Basis functions have incompatible dofs'
#
# Update/compare sample size
#
n_smpl = self.n_samples()
n_smpl_form = form.kernel.n_subsample() # TODO: Change to n_samples()
if n_smpl_form > 1:
if n_smpl==1:
#
# Update sample size of aggregate form
#
self.__n_samples = n_smpl_form
else:
#
# Check that sample size is the same
#
assert n_smpl==n_smpl_form, 'Sample sizes incompatible.'
def update_cellwise(self, cell, vals, dofs=None):
"""
Update cellwise assembled form
"""
n_samples = self.n_samples()
dim = self.dim()
data = self.__cellwise_data
if cell not in data:
#
# Initialize cellwise data
#
data[cell] = {'dofs': [[] for i in range(dim)],
'vals': []}
#
# Postprocess dofs and vals for multilinear forms
#
if dim > 1:
#
# Postprocess dofs-vals for bilinear forms
#
# Form grid of dofs
R, C = np.meshgrid(*dofs)
dofs[0] = list(R.ravel())
dofs[1] = list(C.ravel())
n_entries = len(dofs[0])
vals = vals.reshape((n_entries,n_samples), order='F')
# Update dofs
for i in range(dim):
# Update ith set of dofs
data[cell]['dofs'][i].extend(dofs[i])
# Update vals
data[cell]['vals'].extend(vals)
self.__cellwise_data = data
def distribute(self, cell, dir_bc={}, hng={}, clear_cellwise_data=False):
"""
Update the aggregate assembled form data, incorporating constraints
arising from hanging nodes and Dirichlet boundary conditions.
For a concrete example, consider the following system
a11 a12 a13 a14 u1 b1
a21 a22 a23 a24 u2 = b2
a31 a32 a33 a34 u3 b3
a41 a42 a43 a44 u4 b4
1. Suppose Dirichlet conditions u2=g2 and u4=g4 are prescribed.
The system is converted to
a11 a13 u1 = b1 - a12*g2 - a14*g4
a31 a33 u3 b3 - a32*g2 - a34*g4
The solution [u1,u3]^T of this system can then be enlarged with the
dirichlet boundary values g2 and g4.
2. Hanging nodes arise as a result of local mesh refinement. In
particular when a dof-vertex of one cell is not a dof-vertex of
its neighboring cell. We resolve hanging nodes by enforcing
continuity accross edges, i.e. requiring that the node value of a
function at a hanging node can be computed by evaluating a linear
combination of basis functions centered at a set of supporting nodes
in the coarse neighbor element.
Use DofHandler.set_hanging_nodes() and DofHandler.get_hanging_nodes
to determine the supporting dofs and coefficients for a given
mesh-element pair.
To incorporate the hanging nodes constraint into a system, we need to
replace both the test and trial functions at the hanging node by linear
combinations of its supporting basis. We therefore have to
(i) Distribute the hanging node columns of the system matrix A amongst
its supporting columns (trial)
AND/OR
(ii) Distribute the equation associated with the hanging node to the
equations associated with supporting dofs (test).
(iii) In each case, if a supporting dof is a dirichlet dof, should be
dealt with accordingly (either ignored - row - or moved to the rhs
- col -).
Suppose u2 in the above is a hanging node supported by u1 and u4, with
coefficients c1 and c4, i.e.
u2 = c1*u1 + c4*u4
Then the coefficients aij i,j in {1,2} are modified to
aaij = aij + ci*ai2 + cj*a2j + ci*cj*a22
If, in addition, uj=gj is a dirichlet node, then the rhs is modified
by subtracting aaij*gj
Inputs:
cell: Cell, on which constraints are incorporated
dir_bc: dict, dirichlet-dof-indexed dictionary whose entries
are the function values at the dirichlet nodes.
hng: dict, hanging-node-dof-indexed dictionary with entries
consisting of supporting dofs and coefficients
clear_cellwise_data: bool, whether or not to delete dof-value data
stored separately for current cell.
"""
#
# Get cellwise data
#
if clear_cellwise_data:
cell_data = self.__cellwise_data[cell]
else:
cell_data = self.__cellwise_data[cell].copy()
# Aggregate data
data = self.__aggregate_data
vals = cell_data['vals']
dim = self.dim()
if dim == 0:
#
# Constant form
#
data['vals'].extend(vals)
elif dim == 1:
#
# Linear form
#
rows, = cell_data['dofs']
while len(rows)>0:
r,v = rows.pop(), vals.pop()
if r in hng:
#
# Hanging Node -> distribute to supporting rows
#
for supp, coef in zip(*hng[r]):
#
# Add supporting dofs and modified vals to list
#
rows.append(supp)
vals.append(coef*v)
elif r not in dir_bc:
#
# Interior Node
#
data['dofs'][0].append(r)
data['vals'].append(v)
elif dim == 2:
#
# Bilinear form
#
rows, cols = cell_data['dofs']
while len(rows)>0:
r, c, v = rows.pop(), cols.pop(), vals.pop()
if r in hng:
#
# Hanging node -> distribute to supporting rows
#
for supp, coef in zip(*hng[r]):
#
# Add supporting dofs and modified vals to list
#
rows.append(supp)
cols.append(c)
vals.append(v*coef)
elif r not in dir_bc:
#
# Interior row
#
if c in hng:
#
# Column hanging node -> distribute to supporting cols
#
for supp, coef in zip(*hng[c]):
#
# Add supporting dofs and modified vals to todo list
#
rows.append(r)
cols.append(supp)
vals.append(v*coef)
elif c in dir_bc:
#
# Dirichlet column -> update bnd function
#
self.__bnd['dofs'].append(r)
self.__bnd['vals'].append(v*dir_bc[c])
else:
#
# Interior column
#
data['dofs'][0].append(r)
data['dofs'][1].append(c)
data['vals'].append(v)
if clear_cellwise_data:
self.clear_cellwise_data(cell)
def clear_cellwise_data(self, cell):
"""
Remove dof-val data stored at cellwise level
"""
if cell in self.__cellwise_data:
self.__cellwise_data.pop(cell)
def consolidate(self):
"""
Postprocess assembled form to make it amenable to linear algebra
operations. This includes renumbering equations that involve only a
subset of the degreees of freedom.
Input:
clear_cell_data: bool, specify whether to delete cellwise specific
data (such as dofs and vals).
Bilinear Form:
row_dofs: (n_row_dofs, ) ordered numpy array of mesh dofs
corresponding to test functions.
col_dofs: (n_col_dofs, ) ordered numpy array of unique mesh dofs
corresponding to trial space
rows: (n_nonzero,) row indices (renumbered)
cols: (n_nonzero,) column indices (renumbered).
vals: (n_nonzero, n_samples) numpy array of matrix values
corresponding to each row-column pair
Linear Form:
row_dofs: (n_row_dofs,) order array of mesh dofs corresponding
to row dofs
vals: (n_row_dofs, n_samples) array of vector values for each dof.
Constant form:
vals: (n_samples, ) array of integral values.
"""
dim = self.dim()
dofs = self.__aggregate_data['dofs']
#
# Determine unique list of dofs associated with asmbld_form
#
udofs = [] # unique dofs
n_idx = [] # number
dof2idx = [] # dof-to-index mapping
for i in range(dim):
#
# Get unique dofs from data
#
udofs.append(list(set(dofs[i])))
#
# Number of index vectors
#
n_idx.append(len(udofs[i]))
#
# Dof-to-index mapping for interior dofs
#
dof2idx.append(np.zeros(udofs[i][-1]+1, dtype=np.int))
dof2idx[i][udofs[i]] = np.arange(n_idx[i])
#
# Store values in (n, n_samples) array
#
vals = self.__aggregate_data['vals']
vals = np.array(vals)
n_samples = self.n_samples()
if dim == 0:
#
# Constant (scalar)
#
# Sum up all entries
c = np.sum(vals, axis=0)
# Store result
self.__aggregate_data['array'] = list(c)
elif dim == 1:
#
# Linear (vector)
#
rows = dof2idx[0][dofs[0]]
n_rows = n_idx[0]
b = []
for i in range(n_samples):
b.append(np.bincount(rows,vals[:,i],n_rows))
self.__aggregate_data['array'] = b
self.__aggregate_data['udofs'] = udofs
elif dim == 2:
#
# Bilinear (matrix)
#
# Dirichlet term
x0_dofs = dof2idx[0][self.__bnd['dofs']]
x0_vals = np.array(self.__bnd['vals'])
# Matrix rows & cols
rows = dof2idx[0][dofs[0]]
cols = dof2idx[1][dofs[1]]
# Dimensions
n_rows, n_cols = n_idx
A = []
x0 = []
for i in range(n_samples):
# Form sparse assembled matrix
Ai = sparse.coo_matrix((vals[:,i],(rows, cols)),
shape=(n_rows,n_cols))
A.append(Ai)
# Form
if len(x0_dofs)>0:
x0.append(np.bincount(x0_dofs, x0_vals[:,i],n_rows))
# Store result
self.__aggregate_data['array'] = A
self.__aggregate_data['udofs'] = udofs
self.__bnd['array'] = x0 | hvanwyk/quadmesh | src/assembler.py | Python | mit | 130,191 | [
"Gaussian"
] | dd6099fa0c8e9c528d1c5c186d8fee1493d41d66328888a36e9234f4ce8f62ee |
"""
Acceptance tests for Studio related to the split_test module.
"""
import math
from unittest import skip
from nose.plugins.attrib import attr
from selenium.webdriver.support.ui import Select
from xmodule.partitions.partitions import Group
from bok_choy.promise import Promise, EmptyPromise
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.studio.component_editor import ComponentEditorView
from common.test.acceptance.pages.studio.overview import CourseOutlinePage, CourseOutlineUnit
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.utils import add_advanced_component
from common.test.acceptance.pages.xblock.utils import wait_for_xblock_initialization
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.tests.helpers import create_user_partition_json
from base_studio_test import StudioCourseTest
from test_studio_container import ContainerBase
class SplitTestMixin(object):
"""
Mixin that contains useful methods for split_test module testing.
"""
def verify_groups(self, container, active_groups, inactive_groups, verify_missing_groups_not_present=True):
"""
Check that the groups appear and are correctly categorized as to active and inactive.
Also checks that the "add missing groups" button/link is not present unless a value of False is passed
for verify_missing_groups_not_present.
"""
def wait_for_xblocks_to_render():
# First xblock is the container for the page, subtract 1.
return (len(active_groups) + len(inactive_groups) == len(container.xblocks) - 1, len(active_groups))
Promise(wait_for_xblocks_to_render, "Number of xblocks on the page are incorrect").fulfill()
def check_xblock_names(expected_groups, actual_blocks):
self.assertEqual(len(expected_groups), len(actual_blocks))
for idx, expected in enumerate(expected_groups):
self.assertEqual(expected, actual_blocks[idx].name)
check_xblock_names(active_groups, container.active_xblocks)
check_xblock_names(inactive_groups, container.inactive_xblocks)
# Verify inactive xblocks appear after active xblocks
check_xblock_names(active_groups + inactive_groups, container.xblocks[1:])
if verify_missing_groups_not_present:
self.verify_add_missing_groups_button_not_present(container)
def verify_add_missing_groups_button_not_present(self, container):
"""
Checks that the "add missing groups" button/link is not present.
"""
def missing_groups_button_not_present():
button_present = container.missing_groups_button_present()
return (not button_present, not button_present)
Promise(missing_groups_button_not_present, "Add missing groups button should not be showing.").fulfill()
@attr(shard=2)
class SplitTest(ContainerBase, SplitTestMixin):
"""
Tests for creating and editing split test instances in Studio.
"""
__test__ = True
def setUp(self):
super(SplitTest, self).setUp()
# This line should be called once courseFixture is installed
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,beta',
'first',
[Group("0", 'alpha'), Group("1", 'beta')]
),
create_user_partition_json(
1,
'Configuration 0,1,2',
'second',
[Group("0", 'Group 0'), Group("1", 'Group 1'), Group("2", 'Group 2')]
),
],
},
})
def populate_course_fixture(self, course_fixture):
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def verify_add_missing_groups_button_not_present(self, container):
"""
Checks that the "add missing groups" button/link is not present.
"""
def missing_groups_button_not_present():
button_present = container.missing_groups_button_present()
return (not button_present, not button_present)
Promise(missing_groups_button_not_present, "Add missing groups button should not be showing.").fulfill()
def create_poorly_configured_split_instance(self):
"""
Creates a split test instance with a missing group and an inactive group.
Returns the container page.
"""
unit = self.go_to_unit_page()
add_advanced_component(unit, 0, 'split_test')
container = self.go_to_nested_container_page()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration alpha,beta')
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,beta',
'first',
[Group("0", 'alpha'), Group("2", 'gamma')]
)
],
},
})
return self.go_to_nested_container_page()
def test_create_and_select_group_configuration(self):
"""
Tests creating a split test instance on the unit page, and then
assigning the group configuration.
"""
unit = self.go_to_unit_page()
add_advanced_component(unit, 0, 'split_test')
container = self.go_to_nested_container_page()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration alpha,beta')
self.verify_groups(container, ['alpha', 'beta'], [])
# Switch to the other group configuration. Must navigate again to the container page so
# that there is only a single "editor" on the page.
container = self.go_to_nested_container_page()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration 0,1,2')
self.verify_groups(container, ['Group 0', 'Group 1', 'Group 2'], ['Group ID 0', 'Group ID 1'])
# Reload the page to make sure the groups were persisted.
container = self.go_to_nested_container_page()
self.verify_groups(container, ['Group 0', 'Group 1', 'Group 2'], ['Group ID 0', 'Group ID 1'])
@skip("This fails periodically where it fails to trigger the add missing groups action.Dis")
def test_missing_group(self):
"""
The case of a split test with invalid configuration (missing group).
"""
container = self.create_poorly_configured_split_instance()
# Wait for the xblock to be fully initialized so that the add button is rendered
wait_for_xblock_initialization(self, '.xblock[data-block-type="split_test"]')
# Click the add button and verify that the groups were added on the page
container.add_missing_groups()
self.verify_groups(container, ['alpha', 'gamma'], ['beta'])
# Reload the page to make sure the groups were persisted.
container = self.go_to_nested_container_page()
self.verify_groups(container, ['alpha', 'gamma'], ['beta'])
def test_delete_inactive_group(self):
"""
Test deleting an inactive group.
"""
container = self.create_poorly_configured_split_instance()
# The inactive group is the 2nd group, but it is the first one
# with a visible delete button, so use index 0
container.delete(0)
self.verify_groups(container, ['alpha'], [], verify_missing_groups_not_present=False)
@attr(shard=2)
class GroupConfigurationsNoSplitTest(StudioCourseTest):
"""
Tests how the Group Configuration page should look when the split_test module is not enabled.
"""
def setUp(self):
super(GroupConfigurationsNoSplitTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def test_no_content_experiment_sections(self):
"""
Scenario: if split_test module is not present in Advanced Settings, content experiment
parts of the Group Configurations page are not shown.
Given I have a course with split_test module not enabled
Then when I go to the Group Configurations page there are no content experiment sections
"""
self.group_configurations_page.visit()
self.assertFalse(self.group_configurations_page.experiment_group_sections_present)
@attr(shard=2)
class GroupConfigurationsTest(ContainerBase, SplitTestMixin):
"""
Tests that Group Configurations page works correctly with previously
added configurations in Studio
"""
__test__ = True
def setUp(self):
super(GroupConfigurationsTest, self).setUp()
self.page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def _assert_fields(self, config, cid=None, name='', description='', groups=None):
self.assertEqual(config.mode, 'details')
if name:
self.assertIn(name, config.name)
if cid:
self.assertEqual(cid, config.id)
else:
# To make sure that id is present on the page and it is not an empty.
# We do not check the value of the id, because it's generated randomly and we cannot
# predict this value
self.assertTrue(config.id)
# Expand the configuration
config.toggle()
if description:
self.assertIn(description, config.description)
if groups:
allocation = int(math.floor(100 / len(groups)))
self.assertEqual(groups, [group.name for group in config.groups])
for group in config.groups:
self.assertEqual(str(allocation) + "%", group.allocation)
# Collapse the configuration
config.toggle()
def _add_split_test_to_vertical(self, number, group_configuration_metadata=None):
"""
Add split test to vertical #`number`.
If `group_configuration_metadata` is not None, use it to assign group configuration to split test.
"""
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[number]
if group_configuration_metadata:
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata=group_configuration_metadata)
else:
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment')
self.course_fixture.create_xblock(vertical.locator, split_test)
return split_test
def populate_course_fixture(self, course_fixture):
course_fixture.add_advanced_settings({
u"advanced_modules": {"value": ["split_test"]},
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_group_configuration_experiment(self, groups, associate_experiment):
"""
Creates a Group Configuration containing a list of groups.
Optionally creates a Content Experiment and associates it with previous Group Configuration.
Returns group configuration or (group configuration, experiment xblock)
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(0, "Name", "Description.", groups),
],
},
})
if associate_experiment:
# Assign newly created group configuration to experiment
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
self.course_fixture.create_xblock(vertical.locator, split_test)
# Go to the Group Configuration Page
self.page.visit()
config = self.page.experiment_group_configurations[0]
if associate_experiment:
return config, split_test
return config
def publish_unit_in_lms_and_view(self, courseware_page, publish=True):
"""
Given course outline page, publish first unit and view it in LMS when publish is false, it will only view
"""
self.outline_page.visit()
self.outline_page.expand_all_subsections()
section = self.outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0).go_to()
# I publish and view in LMS and it is rendered correctly
if publish:
unit.publish_action.click()
unit.view_published_version()
self.assertEqual(len(self.browser.window_handles), 2)
courseware_page.wait_for_page()
def get_select_options(self, page, selector):
"""
Get list of options of dropdown that is specified by selector on a given page.
"""
select_element = page.q(css=selector)
self.assertTrue(select_element.is_present())
return [option.text for option in Select(select_element[0]).options]
def test_no_group_configurations_added(self):
"""
Scenario: Ensure that message telling me to create a new group configuration is
shown when group configurations were not added.
Given I have a course without group configurations
When I go to the Group Configuration page in Studio
Then I see "You have not created any group configurations yet." message
"""
self.page.visit()
self.assertTrue(self.page.experiment_group_sections_present)
self.assertTrue(self.page.no_experiment_groups_message_is_present)
self.assertIn(
"You have not created any group configurations yet.",
self.page.no_experiment_groups_message_text
)
def test_group_configurations_have_correct_data(self):
"""
Scenario: Ensure that the group configuration is rendered correctly in expanded/collapsed mode.
Given I have a course with 2 group configurations
And I go to the Group Configuration page in Studio
And I work with the first group configuration
And I see `name`, `id` are visible and have correct values
When I expand the first group configuration
Then I see `description` and `groups` appear and also have correct values
And I do the same checks for the second group configuration
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Name of the Group Configuration',
'Description of the group configuration.',
[Group("0", 'Group 0'), Group("1", 'Group 1')]
),
create_user_partition_json(
1,
'Name of second Group Configuration',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
),
],
},
})
self.page.visit()
config = self.page.experiment_group_configurations[0]
# no groups when the the configuration is collapsed
self.assertEqual(len(config.groups), 0)
self._assert_fields(
config,
cid="0", name="Name of the Group Configuration",
description="Description of the group configuration.",
groups=["Group 0", "Group 1"]
)
config = self.page.experiment_group_configurations[1]
self._assert_fields(
config,
name="Name of second Group Configuration",
description="Second group configuration.",
groups=["Alpha", "Beta", "Gamma"]
)
def test_can_create_and_edit_group_configuration(self):
"""
Scenario: Ensure that the group configuration can be created and edited correctly.
Given I have a course without group configurations
When I click button 'Create new Group Configuration'
And I set new name and description, change name for the 2nd default group, add one new group
And I click button 'Create'
Then I see the new group configuration is added and has correct data
When I edit the group group_configuration
And I change the name and description, add new group, remove old one and change name for the Group A
And I click button 'Save'
Then I see the group configuration is saved successfully and has the new data
"""
self.page.visit()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
# Create new group configuration
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
config.name = "New Group Configuration Name"
config.description = "New Description of the group configuration."
config.groups[1].name = "New Group Name"
# Add new group
config.add_group() # Group C
# Save the configuration
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self._assert_fields(
config,
name="New Group Configuration Name",
description="New Description of the group configuration.",
groups=["Group A", "New Group Name", "Group C"]
)
# Edit the group configuration
config.edit()
# Update fields
self.assertTrue(config.id)
config.name = "Second Group Configuration Name"
config.description = "Second Description of the group configuration."
self.assertEqual(config.get_text('.action-primary'), "Save")
# Add new group
config.add_group() # Group D
# Remove group with name "New Group Name"
config.groups[1].remove()
# Rename Group A
config.groups[0].name = "First Group"
# Save the configuration
config.save()
self._assert_fields(
config,
name="Second Group Configuration Name",
description="Second Description of the group configuration.",
groups=["First Group", "Group C", "Group D"]
)
def test_focus_management_in_experiment_group_inputs(self):
"""
Scenario: Ensure that selecting the focus inputs in the groups list
sets the .is-focused class on the fieldset
Given I have a course with experiment group configurations
When I click the name of the first group
Then the fieldset wrapping the group names whould get class .is-focused
When I click away from the first group
Then the fieldset should not have class .is-focused anymore
"""
self.page.visit()
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
group_a = config.groups[0]
# Assert the fieldset doesn't have .is-focused class
self.assertFalse(self.page.q(css="fieldset.groups-fields.is-focused").visible)
# Click on the Group A input field
self.page.q(css=group_a.prefix).click()
# Assert the fieldset has .is-focused class applied
self.assertTrue(self.page.q(css="fieldset.groups-fields.is-focused").visible)
# Click away
self.page.q(css=".page-header").click()
# Assert the fieldset doesn't have .is-focused class
self.assertFalse(self.page.q(css="fieldset.groups-fields.is-focused").visible)
def test_use_group_configuration(self):
"""
Scenario: Ensure that the group configuration can be used by split_module correctly
Given I have a course without group configurations
When I create new group configuration
And I set new name and add a new group, save the group configuration
And I go to the unit page in Studio
And I add new advanced module "Content Experiment"
When I assign created group configuration to the module
Then I see the module has correct groups
"""
self.page.visit()
# Create new group configuration
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
config.name = "New Group Configuration Name"
# Add new group
config.add_group()
config.groups[2].name = "New group"
# Save the configuration
config.save()
split_test = self._add_split_test_to_vertical(number=0)
container = ContainerPage(self.browser, split_test.locator)
container.visit()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'New Group Configuration Name')
self.verify_groups(container, ['Group A', 'Group B', 'New group'], [])
def test_container_page_active_verticals_names_are_synced(self):
"""
Scenario: Ensure that the Content Experiment display synced vertical names and correct groups.
Given I have a course with group configuration
And I go to the Group Configuration page in Studio
And I edit the name of the group configuration, add new group and remove old one
And I change the name for the group "New group" to "Second Group"
And I go to the Container page in Studio
And I edit the Content Experiment
Then I see the group configuration name is changed in `Group Configuration` dropdown
And the group configuration name is changed on container page
And I see the module has 2 active groups and one inactive
And I see "Add missing groups" link exists
When I click on "Add missing groups" link
The I see the module has 3 active groups and one inactive
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Name of the Group Configuration',
'Description of the group configuration.',
[Group("0", 'Group A'), Group("1", 'Group B'), Group("2", 'Group C')]
),
],
},
})
# Add split test to vertical and assign newly created group configuration to it
split_test = self._add_split_test_to_vertical(number=0, group_configuration_metadata={'user_partition_id': 0})
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.edit()
config.name = "Second Group Configuration Name"
# `Group C` -> `Second Group`
config.groups[2].name = "Second Group"
# Add new group
config.add_group() # Group D
# Remove Group A
config.groups[0].remove()
# Save the configuration
config.save()
container = ContainerPage(self.browser, split_test.locator)
container.visit()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
self.assertEqual(
"Second Group Configuration Name",
component_editor.get_selected_option_text('Group Configuration')
)
component_editor.cancel()
self.assertIn(
"Second Group Configuration Name",
container.get_xblock_information_message()
)
self.verify_groups(
container, ['Group B', 'Second Group'], ['Group ID 0'],
verify_missing_groups_not_present=False
)
# Click the add button and verify that the groups were added on the page
container.add_missing_groups()
self.verify_groups(container, ['Group B', 'Second Group', 'Group D'], ['Group ID 0'])
def test_can_cancel_creation_of_group_configuration(self):
"""
Scenario: Ensure that creation of the group configuration can be canceled correctly.
Given I have a course without group configurations
When I click button 'Create new Group Configuration'
And I set new name and description, add 1 additional group
And I click button 'Cancel'
Then I see that there is no new group configurations in the course
"""
self.page.visit()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
# Create new group configuration
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
config.name = "Name of the Group Configuration"
config.description = "Description of the group configuration."
# Add new group
config.add_group() # Group C
# Cancel the configuration
config.cancel()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
def test_can_cancel_editing_of_group_configuration(self):
"""
Scenario: Ensure that editing of the group configuration can be canceled correctly.
Given I have a course with group configuration
When I go to the edit mode of the group configuration
And I set new name and description, add 2 additional groups
And I click button 'Cancel'
Then I see that new changes were discarded
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Name of the Group Configuration',
'Description of the group configuration.',
[Group("0", 'Group 0'), Group("1", 'Group 1')]
),
create_user_partition_json(
1,
'Name of second Group Configuration',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
),
],
},
})
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.name = "New Group Configuration Name"
config.description = "New Description of the group configuration."
# Add 2 new groups
config.add_group() # Group C
config.add_group() # Group D
# Cancel the configuration
config.cancel()
self._assert_fields(
config,
name="Name of the Group Configuration",
description="Description of the group configuration.",
groups=["Group 0", "Group 1"]
)
def test_group_configuration_validation(self):
"""
Scenario: Ensure that validation of the group configuration works correctly.
Given I have a course without group configurations
And I create new group configuration with 2 default groups
When I set only description and try to save
Then I see error message "Group Configuration name is required."
When I set a name
And I delete the name of one of the groups and try to save
Then I see error message "All groups must have a name"
When I delete all the groups and try to save
Then I see error message "There must be at least one group."
When I add a group and try to save
Then I see the group configuration is saved successfully
"""
def try_to_save_and_verify_error_message(message):
# Try to save
config.save()
# Verify that configuration is still in editing mode
self.assertEqual(config.mode, 'edit')
# Verify error message
self.assertEqual(message, config.validation_message)
self.page.visit()
# Create new group configuration
self.page.create_experiment_group_configuration()
# Leave empty required field
config = self.page.experiment_group_configurations[0]
config.description = "Description of the group configuration."
try_to_save_and_verify_error_message("Group Configuration name is required.")
# Set required field
config.name = "Name of the Group Configuration"
config.groups[1].name = ''
try_to_save_and_verify_error_message("All groups must have a name.")
config.groups[0].remove()
config.groups[0].remove()
try_to_save_and_verify_error_message("There must be at least one group.")
config.add_group()
# Save the configuration
config.save()
self._assert_fields(
config,
name="Name of the Group Configuration",
description="Description of the group configuration.",
groups=["Group A"]
)
def test_group_configuration_empty_usage(self):
"""
Scenario: When group configuration is not used, ensure that the link to outline page works correctly.
Given I have a course without group configurations
And I create new group configuration with 2 default groups
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
],
},
})
# Go to the Group Configuration Page and click on outline anchor
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
self.outline_page.wait_for_page()
def test_group_configuration_non_empty_usage(self):
"""
Scenario: When group configuration is used, ensure that the links to units using a group configuration work correctly.
Given I have a course without group configurations
And I create new group configuration with 2 default groups
And I create a unit and assign the newly created group configuration
And open the Group Configuration page
Then I see a link to the newly created unit
When I click on the unit link
Then I see correct unit page
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
],
},
})
# Assign newly created group configuration to unit
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
)
unit = CourseOutlineUnit(self.browser, vertical.locator)
# Go to the Group Configuration Page and click unit anchor
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.toggle()
usage = config.usages[0]
config.click_unit_anchor()
unit = ContainerPage(self.browser, vertical.locator)
# Waiting for the page load and verify that we've landed on the unit page
unit.wait_for_page()
self.assertIn(unit.name, usage)
def test_can_delete_unused_group_configuration(self):
"""
Scenario: Ensure that the user can delete unused group configuration.
Given I have a course with 2 group configurations
And I go to the Group Configuration page
When I delete the Group Configuration with name "Configuration 1"
Then I see that there is one Group Configuration
When I edit the Group Configuration with name "Configuration 2"
And I delete the Group Configuration with name "Configuration 2"
Then I see that the are no Group Configurations
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration 1',
'Description of the group configuration.',
[Group("0", 'Group 0'), Group("1", 'Group 1')]
),
create_user_partition_json(
1,
'Configuration 2',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
)
],
},
})
self.page.visit()
self.assertEqual(len(self.page.experiment_group_configurations), 2)
config = self.page.experiment_group_configurations[1]
# Delete first group configuration via detail view
config.delete()
self.assertEqual(len(self.page.experiment_group_configurations), 1)
config = self.page.experiment_group_configurations[0]
config.edit()
self.assertFalse(config.delete_button_is_disabled)
# Delete first group configuration via edit view
config.delete()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
def test_cannot_delete_used_group_configuration(self):
"""
Scenario: Ensure that the user cannot delete unused group configuration.
Given I have a course with group configuration that is used in the Content Experiment
When I go to the Group Configuration page
Then I do not see delete button and I see a note about that
When I edit the Group Configuration
Then I do not see delete button and I see the note about that
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
)
],
},
})
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
)
# Go to the Group Configuration Page and click unit anchor
self.page.visit()
config = self.page.experiment_group_configurations[0]
self.assertTrue(config.delete_button_is_disabled)
self.assertIn('Cannot delete when in use by an experiment', config.delete_note)
config.edit()
self.assertTrue(config.delete_button_is_disabled)
self.assertIn('Cannot delete when in use by an experiment', config.delete_note)
def test_easy_access_from_experiment(self):
"""
Scenario: When a Content Experiment uses a Group Configuration,
ensure that the link to that Group Configuration works correctly.
Given I have a course with two Group Configurations
And Content Experiment is assigned to one Group Configuration
Then I see a link to Group Configuration
When I click on the Group Configuration link
Then I see the Group Configurations page
And I see that appropriate Group Configuration is expanded.
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
create_user_partition_json(
1,
'Name of second Group Configuration',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
),
],
},
})
# Assign newly created group configuration to unit
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 1})
)
unit = ContainerPage(self.browser, vertical.locator)
unit.visit()
experiment = unit.xblocks[0]
group_configuration_link_name = experiment.group_configuration_link_name
experiment.go_to_group_configuration_page()
self.page.wait_for_page()
# Appropriate Group Configuration is expanded.
self.assertFalse(self.page.experiment_group_configurations[0].is_expanded)
self.assertTrue(self.page.experiment_group_configurations[1].is_expanded)
self.assertEqual(
group_configuration_link_name,
self.page.experiment_group_configurations[1].name
)
def test_details_error_validation_message(self):
"""
Scenario: When a Content Experiment uses a Group Configuration, ensure
that an error validation message appears if necessary.
Given I have a course with a Group Configuration containing two Groups
And a Content Experiment is assigned to that Group Configuration
When I go to the Group Configuration Page
Then I do not see a error icon and message in the Group Configuration details view.
When I add a Group
Then I see an error icon and message in the Group Configuration details view
"""
# Create group configuration and associated experiment
config, _ = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B")], True)
# Display details view
config.toggle()
# Check that error icon and message are not present
self.assertFalse(config.details_error_icon_is_present)
self.assertFalse(config.details_message_is_present)
# Add a group
config.toggle()
config.edit()
config.add_group()
config.save()
# Display details view
config.toggle()
# Check that error icon and message are present
self.assertTrue(config.details_error_icon_is_present)
self.assertTrue(config.details_message_is_present)
self.assertIn(
"This content experiment has issues that affect content visibility.",
config.details_message_text
)
def test_details_warning_validation_message(self):
"""
Scenario: When a Content Experiment uses a Group Configuration, ensure
that a warning validation message appears if necessary.
Given I have a course with a Group Configuration containing three Groups
And a Content Experiment is assigned to that Group Configuration
When I go to the Group Configuration Page
Then I do not see a warning icon and message in the Group Configuration details view.
When I remove a Group
Then I see a warning icon and message in the Group Configuration details view
"""
# Create group configuration and associated experiment
config, _ = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B"), Group("2", "Group C")], True)
# Display details view
config.toggle()
# Check that warning icon and message are not present
self.assertFalse(config.details_warning_icon_is_present)
self.assertFalse(config.details_message_is_present)
# Remove a group
config.toggle()
config.edit()
config.groups[2].remove()
config.save()
# Display details view
config.toggle()
# Check that warning icon and message are present
self.assertTrue(config.details_warning_icon_is_present)
self.assertTrue(config.details_message_is_present)
self.assertIn(
"This content experiment has issues that affect content visibility.",
config.details_message_text
)
def test_edit_warning_message_empty_usage(self):
"""
Scenario: When a Group Configuration is not used, ensure that there are no warning icon and message.
Given I have a course with a Group Configuration containing two Groups
When I edit the Group Configuration
Then I do not see a warning icon and message
"""
# Create a group configuration with no associated experiment and display edit view
config = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B")], False)
config.edit()
# Check that warning icon and message are not present
self.assertFalse(config.edit_warning_icon_is_present)
self.assertFalse(config.edit_warning_message_is_present)
def test_edit_warning_message_non_empty_usage(self):
"""
Scenario: When a Group Configuration is used, ensure that there are a warning icon and message.
Given I have a course with a Group Configuration containing two Groups
When I edit the Group Configuration
Then I see a warning icon and message
"""
# Create a group configuration with an associated experiment and display edit view
config, _ = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B")], True)
config.edit()
# Check that warning icon and message are present
self.assertTrue(config.edit_warning_icon_is_present)
self.assertTrue(config.edit_warning_message_is_present)
self.assertIn(
"This configuration is currently used in content experiments. If you make changes to the groups, you may need to edit those experiments.",
config.edit_warning_message_text
)
def publish_unit_and_verify_groups_in_lms(self, courseware_page, group_names, publish=True):
"""
Publish first unit in LMS and verify that Courseware page has given Groups
"""
self.publish_unit_in_lms_and_view(courseware_page, publish)
self.assertEqual(u'split_test', courseware_page.xblock_component_type())
self.assertTrue(courseware_page.q(css=".split-test-select").is_present())
rendered_group_names = self.get_select_options(page=courseware_page, selector=".split-test-select")
self.assertListEqual(group_names, rendered_group_names)
def test_split_test_LMS_staff_view(self):
"""
Scenario: Ensure that split test is correctly rendered in LMS staff mode as it is
and after inactive group removal.
Given I have a course with group configurations and split test that assigned to first group configuration
Then I publish split test and view it in LMS in staff view
And it is rendered correctly
Then I go to group configuration and delete group
Then I publish split test and view it in LMS in staff view
And it is rendered correctly
Then I go to split test and delete inactive vertical
Then I publish unit and view unit in LMS in staff view
And it is rendered correctly
"""
config, split_test = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B"), Group("2", "Group C")], True)
container = ContainerPage(self.browser, split_test.locator)
# render in LMS correctly
courseware_page = CoursewarePage(self.browser, self.course_id)
self.publish_unit_and_verify_groups_in_lms(courseware_page, [u'Group A', u'Group B', u'Group C'])
# I go to group configuration and delete group
self.page.visit()
self.page.q(css='.group-toggle').first.click()
config.edit()
config.groups[2].remove()
config.save()
self.page.q(css='.group-toggle').first.click()
self._assert_fields(config, name="Name", description="Description", groups=["Group A", "Group B"])
self.browser.close()
self.browser.switch_to_window(self.browser.window_handles[0])
# render in LMS to see how inactive vertical is rendered
self.publish_unit_and_verify_groups_in_lms(
courseware_page,
[u'Group A', u'Group B', u'Group ID 2 (inactive)'],
publish=False
)
self.browser.close()
self.browser.switch_to_window(self.browser.window_handles[0])
# I go to split test and delete inactive vertical
container.visit()
container.delete(0)
# render in LMS again
self.publish_unit_and_verify_groups_in_lms(courseware_page, [u'Group A', u'Group B'])
| naresh21/synergetics-edx-platform | common/test/acceptance/tests/studio/test_studio_split_test.py | Python | agpl-3.0 | 48,100 | [
"VisIt"
] | ff07317907d6e5d73cdff00b97bae6b23dd5042539e0594cc3be2becbdd5e603 |
# encoding: utf-8
"""
Global exception classes for IPython.core.
Authors:
* Brian Granger
* Fernando Perez
* Min Ragan-Kelley
Notes
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Exception classes
#-----------------------------------------------------------------------------
class IPythonCoreError(Exception):
pass
class TryNext(IPythonCoreError):
"""Try next hook exception.
Raise this in your hook function to indicate that the next hook handler
should be used to handle the operation.
"""
class UsageError(IPythonCoreError):
"""Error in magic function arguments, etc.
Something that probably won't warrant a full traceback, but should
nevertheless interrupt a macro / batch file.
"""
class StdinNotImplementedError(IPythonCoreError, NotImplementedError):
"""raw_input was requested in a context where it is not supported
For use in IPython kernels, where only some frontends may support
stdin requests.
"""
| noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/core/error.py | Python | apache-2.0 | 1,534 | [
"Brian"
] | f31631b6fb92fc48903c15cab018edec5901f6447567ec5ddadcfe7ac13e9c8d |
#!/usr/bin/env python
#
# Copyright 2013 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
""" Tests for general functionality of the KGML parser and pathway model
"""
# Builtins
from __future__ import with_statement
import os
import unittest
import tempfile
# Biopython Bio.KEGG.KGML (?)
from Bio.KEGG.KGML.KGML_parser import read
class PathwayData(object):
""" Convenience structure for testing pathway data
"""
def __init__(self, infilename, outfilename, element_counts,
pathway_image, show_pathway_image=False):
self.infilename = infilename
self.outfilename = outfilename
self.element_counts = element_counts
self.pathway_image = pathway_image
self.show_pathway_image = show_pathway_image
class KGMLPathwayTest(unittest.TestCase):
""" Import the ko01100 metabolic map from a local .xml KGML file, and from
the KEGG site, and write valid KGML output for each
"""
def setUp(self):
# Does our output director exist? If not, create it
if not os.path.isdir('KEGG'):
os.mkdir('KEGG')
# Define some data to work with as a list of tuples:
# (infilename, outfilename, (entry_count, ortholog_count,
# compound_count, map_counts), pathway_image,
# show_image_map)
self.data = [PathwayData(os.path.join("KEGG", "ko01100.xml"),
tempfile.gettempprefix() + ".ko01100.kgml",
(3628, 1726, 1746, 149),
os.path.join("KEGG", "map01100.png")),
PathwayData(os.path.join("KEGG", "ko03070.xml"),
tempfile.gettempprefix() + ".ko03070.kgml",
(81, 72, 8, 1),
os.path.join("KEGG", "map03070.png"),
True)]
# A list of KO IDs that we're going to use to modify pathway
# appearance. These are KO IDs for reactions that take part in ko00020,
# the TCA cycle
self.ko_ids = \
set(['ko:K00239','ko:K00240','ko:K00241','ko:K00242','ko:K00244',
'ko:K00245','ko:K00246','ko:K00247','ko:K00174','ko:K00175',
'ko:K00177','ko:K00176','ko:K00382','ko:K00164','ko:K00164',
'ko:K00658','ko:K01902','ko:K01903','ko:K01899','ko:K01900',
'ko:K01899','ko:K01900','ko:K00031','ko:K00030','ko:K00031',
'ko:K01648','ko:K00234','ko:K00235','ko:K00236','ko:K00237',
'ko:K01676','ko:K01677','ko:K01678','ko:K01679','ko:K01681',
'ko:K01682','ko:K01681','ko:K01682','ko:K01647','ko:K00025',
'ko:K00026','ko:K00024','ko:K01958','ko:K01959','ko:K01960',
'ko:K00163','ko:K00161','ko:K00162','ko:K00163','ko:K00161',
'ko:K00162','ko:K00382','ko:K00627','ko:K00169','ko:K00170',
'ko:K00172','ko:K00171','ko:K01643','ko:K01644','ko:K01646',
'ko:K01610','ko:K01596'])
def tearDown(self):
for p in self.data:
if os.path.isfile(p.outfilename):
os.remove(p.outfilename)
def test_read_and_write_KGML_files(self):
""" Read KGML from, and write KGML to, local files.
Check we read/write the correct number of elements.
"""
for p in self.data:
# Test opening file
with open(p.infilename, 'rU') as f:
pathway = read(f)
# Do we have the correct number of elements of each type
self.assertEqual((len(pathway.entries),
len(pathway.orthologs),
len(pathway.compounds),
len(pathway.maps)),
p.element_counts)
# Test writing file
with open(p.outfilename, 'w') as f:
f.write(pathway.get_KGML())
# Can we read the file we wrote?
with open(p.outfilename, 'rU') as f:
pathway = read(f)
# Do we have the correct number of elements of each type
self.assertEqual((len(pathway.entries),
len(pathway.orthologs),
len(pathway.compounds),
len(pathway.maps)),
p.element_counts)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_KGML_nographics.py | Python | gpl-2.0 | 4,772 | [
"Biopython"
] | 4a6974415ec35d44f61fa5c96ca343c551956e566187f7ef856fc9884496d969 |
#!/usr/bin/env python
"""
@package ion.processes.event
@file ion/processes/event/notification_sent_scanner.py
@author Brian McKenna <bmckenna@asascience.com>
@brief NotificationSentScanner plugin. An EventPersister plugin scanning for, and keeping state(count) of, NotificationEvent's
"""
import time
from datetime import date, datetime, timedelta
from collections import Counter
from pyon.core import bootstrap
from pyon.core.exception import NotFound
from pyon.event.event import EventPublisher
from pyon.public import log, OT
NOTIFICATION_EVENTS = {OT.NotificationSentEvent}
class NotificationSentScanner(object):
def __init__(self, container=None):
self.container = container or bootstrap.container_instance
self.object_store = self.container.object_store
self.resource_registry = self.container.resource_registry
self.event_publisher = EventPublisher()
# next_midnight is used to flush the counts (see NOTE in method)
self.next_midnight = self._midnight(days=1)
self.persist_interval = 300 # interval in seconds to persist/reload counts TODO: use CFG
self.time_last_persist = 0
# initalize volatile counts (memory only, should be routinely persisted)
self._initialize_counts()
def process_events(self, event_list):
notifications = set() # set() of notifications to disable, _disable_notifications can happen >1 depending on len(event_list)
for e in event_list:
# skip if not a NotificationEvent
if e.type_ not in NOTIFICATION_EVENTS:
continue
user_id = e.user_id
notification_id = e.notification_id
notification_max = e.notification_max # default value is zero indicating no max
# initialize user_id if necessary
if user_id not in self.counts:
self.counts[user_id] = Counter()
# increment counts (user_id key to allow ALL to be counted)
self.counts[user_id]['all'] += 1 # tracks total notifications by user
self.counts[user_id][notification_id] += 1
self.counts_updated_since_persist = True
# disable notification if notification_max reached
if notification_max:
if self.counts[user_id][notification_id] >= notification_max:
# TODO this could be dict so key could be checked for insertion
notifications.add(self._disable_notification(notification_id))
# update notifications that have been disabled
if notifications:
self._update_notifications(notifications)
# only attempt to persist counts if there was an update
if self.counts_updated_since_persist:
if time.time() > (self.time_last_persist + self.persist_interval):
self._persist_counts()
# reset counts if reset_interval has elapsed
if time.time() > self.next_midnight:
self._reset_counts()
# NOTE: ObjectStore 'objects' contain '_id' and '_rev'
def _initialize_counts(self):
""" initialize the volatile (memory only) counts from ObjectStore if available """
try:
self.counts_obj = self.object_store.read_doc('notification_counts')
# persisted as standard dicts, convert to Counter objects ignoring the ObjectStore '_id' and '_rev'
self.counts = {k:Counter(v) for k,v in self.counts_obj.items() if not (k == '_id' or k == '_rev') }
except NotFound:
self.counts = {}
self._persist_counts()
def _persist_counts(self):
""" persist the counts to ObjectStore """
try:
self.counts_obj = self.object_store.read_doc('notification_counts')
except NotFound:
self.object_store.create_doc({}, 'notification_counts')
self.counts_obj = self.object_store.read_doc('notification_counts')
# Counter objects cannot be persisted, convert to standard dicts (leaves '_id', '_rev' untouched)
self.counts_obj.update({k:dict(v) for k,v in self.counts.items()})
self.object_store.update_doc(self.counts_obj)
self.time_last_persist = time.time()
self.counts_updated_since_persist = False # boolean to check if counts should be persisted
def _reset_counts(self):
""" clears the persisted counts """
self.object_store.delete_doc('notification_counts')
self._initialize_counts() # NOTE: NotificationRequest boolean disabled_by_system reset by UNS
self.next_midnight = self._midnight(days=1)
def _disable_notification(self, notification_id):
""" set the disabled_by_system boolean to True """
notification = self.resource_registry.read(notification_id)
notification.disabled_by_system = True
return notification
def _update_notifications(self, notifications):
""" updates notifications then publishes ReloadUserInfoEvent """
for n in notifications:
self.resource_registry.update(n)
self.event_publisher.publish_event(event_type=OT.ReloadUserInfoEvent)
def _midnight(self, days=0):
""" NOTE: this is midnight PDT (+0700) """
dt = datetime.combine(date.today(), datetime.min.time()) + timedelta(days=days,hours=7)
return (dt - datetime.utcfromtimestamp(0)).total_seconds()
| ooici/coi-services | ion/processes/event/notification_sent_scanner.py | Python | bsd-2-clause | 5,398 | [
"Brian"
] | 19f31ec03366bdc1cf0179c071a17efe12dcb73b55a872b2140bd98eb6e4ca3a |
"""
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`:
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`:
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N*=2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree:
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree:
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin is None:
xfin = xini + 0.0
xini = 0.0
if delta is None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field {0!s}'.format(name))
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field {0!s}'.format(name))
def makekey(row):
return tuple([row[name] for name in key])
r1d = {makekey(row): i for i,row in enumerate(r1)}
r2d = {makekey(row): i for i,row in enumerate(r2)}
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column{0:d}'.format(i)
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_{0:d}'.format(cnt))
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%1.{0:d}f'.format(precision))
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '{0:d}'.format(int(x))
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`:
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version {0!s}'.format(__version__))
else:
verbose.report('using delaunay version {0!s}'.format(__version__))
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| runt18/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py | Python | agpl-3.0 | 104,342 | [
"Gaussian"
] | 3c42a5b83fec48b1027ac64484c9244ad5fa5c19afdf2ed424141ccd25064bf8 |
# -*- coding: utf-8 -*-
"""
This file is a part of python_translate package
(c) Adam Zieliński <adam@symfony2.guru>
For the full copyright and license information, please view the LICENSE and LICENSE_SYMFONY_TRANSLATION
files that were distributed with this source code.
"""
import ast
import codegen
from python_translate.extractors.base import Translation, TransVar, ExtensionBasedExtractor
class PythonExtractor(ExtensionBasedExtractor):
def __init__(
self,
file_extensions=None,
tranz_functions=None,
tranzchoice_functions=None):
file_extensions = file_extensions if file_extensions is not None else (
"*.py",
)
self.tranz_functions = tranz_functions if tranz_functions is not None else (
'_',
'tranz')
self.tranzchoice_functions = tranzchoice_functions if tranzchoice_functions is not None else (
'tranzchoice',
)
super(PythonExtractor, self).__init__(file_extensions=file_extensions)
def extract_translations(self, string):
"""Extract messages from Python string."""
tree = ast.parse(string)
# ast_visit(tree)
visitor = TransVisitor(
self.tranz_functions,
self.tranzchoice_functions)
visitor.visit(tree)
return visitor.translations
class TransVisitor(ast.NodeVisitor):
def __init__(self, tranz_functions, tranzchoice_functions):
self.tranz_functions = tranz_functions
self.tranzchoice_functions = tranzchoice_functions
self.translations = []
super(TransVisitor, self).__init__()
def visit(self, node):
if isinstance(node, ast.Call):
self.process_node(node)
return self.generic_visit(node)
def process_node(self, node):
func_name = self.get_func_name(node.func)
if func_name not in self.tranz_functions + self.tranzchoice_functions:
return
kwargs = {}
# Arguments
kwargs['id'] = self.prepare_arg(
node.args[0]) if len(
node.args) > 0 else None
idx = 1
if func_name in self.tranzchoice_functions:
kwargs['number'] = self.prepare_arg(
node.args[1]) if len(
node.args) > 1 else None
idx += 1
kwargs['parameters'] = self.parse_kwargs(
node.args[idx]) if len(
node.args) > idx else None
kwargs['domain'] = self.prepare_arg(
node.args[idx + 1]) if len(node.args) > idx + 1 else None
kwargs['locale'] = self.prepare_arg(
node.args[idx + 2]) if len(node.args) > idx + 2 else None
# Keyword arguments
if node.keywords:
for keyword in node.keywords:
if keyword.arg == "id" and not kwargs['id']:
kwargs['id'] = self.prepare_arg(keyword.value)
if keyword.arg == "number" and not kwargs['number']:
kwargs['number'] = self.prepare_arg(keyword.value)
if keyword.arg == "domain" and not kwargs['domain']:
kwargs['domain'] = self.prepare_arg(keyword.value)
if keyword.arg == 'parameters':
kwargs['parameters'] = self.parse_kwargs(keyword.value)
if keyword.arg == 'locale':
kwargs['locale'] = self.parse_kwargs(keyword.value)
# Splats
if node.starargs or node.kwargs:
_id = "*" + node.starargs.id if node.starargs else "**" + \
node.kwargs.id
if not kwargs['number']:
kwargs['number'] = TransVar(_id, TransVar.VARNAME)
if not kwargs['id']:
kwargs['id'] = TransVar(_id, TransVar.VARNAME)
if not kwargs['domain']:
kwargs['domain'] = TransVar(_id, TransVar.VARNAME)
if not kwargs['locale']:
kwargs['locale'] = TransVar(_id, TransVar.VARNAME)
if not kwargs['parameters']:
kwargs['parameters'] = self.parse_kwargs(kwargs['parameters'])
fixed = ast.fix_missing_locations(node)
kwargs.update({
"is_transchoice": func_name in self.tranzchoice_functions,
"lineno": fixed.lineno,
"column": fixed.col_offset,
})
self.translations.append(Translation(**kwargs))
def parse_kwargs(self, Dict):
if not isinstance(Dict, ast.Dict):
return self.expr_to_source(Dict)
parameters = []
for k in Dict.keys:
if isinstance(k, ast.Str):
parameters.append(k.s)
else:
return self.expr_to_source(Dict)
return TransVar(parameters, TransVar.LITERAL)
def expr_to_source(self, expr):
try:
src = codegen.to_source(expr)
except Exception as e:
src = "-unknown-"
return TransVar(src, TransVar.UNKNOWN)
def prepare_arg(self, value):
if value is None:
return None
if isinstance(value, ast.Str):
return TransVar(value.s, TransVar.LITERAL)
if isinstance(value, ast.Num):
return TransVar(value.n, TransVar.LITERAL)
if isinstance(value, ast.Attribute):
return TransVar(
value.attr if isinstance(
value.attr,
str) else value.attr.id,
TransVar.VARNAME)
if isinstance(value, ast.Call):
return TransVar(self.get_func_name(value), TransVar.VARNAME)
return TransVar(None, TransVar.UNKNOWN)
def get_func_name(self, func):
if isinstance(func, ast.Attribute):
return self.get_attr_name(func.attr)
elif isinstance(func, ast.Name):
return func.id
else:
return None
# lambda or so
raise ValueError('Unexpected type of Call node')
def get_attr_name(self, attr):
return attr if isinstance(attr, str) else attr.id
| adamziel/python_translate | python_translate/extractors/python.py | Python | mit | 6,128 | [
"VisIt"
] | dbef96b9142ab48ec0602f32a13df96c10a2e9ca9d665c114d10e025ad72f6b6 |
# -*- coding: UTF-8 -*-
"""
``blast_old`` :sup:`*` - Old module. Use ``blast`` instead
-----------------------------------------------------------------
:Authors: Menachem Sklarz
:Affiliation: Bioinformatics core facility
:Organization: National Institute of Biotechnology in the Negev, Ben Gurion University.
A class that defines a module for executing blast on a nucleotide or protein fasta file.
The search can be either on a sample fasta or on a project-wide fasta.
It can use the fasta as a database or as a query.
If used as a database, you must call the makeblastdb module prior to this step.
Requires:
~~~~~~~~~~~~~
* fasta files in one of the following slots for sample-wise blast:
* ``sample_data[<sample>]["fasta.nucl"]``
* ``sample_data[<sample>]["fasta.prot"]``
* or fasta files in one of the following slots for project-wise blast:
* ``sample_data["fasta.nucl"]``
* ``sample_data["fasta.prot"]``
* or a ``makeblastdb`` index in one of the following slots:
* When 'scope' is set to 'project'
* ``sample_data["blastdb.nucl"|"blastdb.prot"]``
* ``sample_data["blastdb.log.nucl"|"blastdb.log.prot"]``
* When 'scope' is set to 'sample'
* ``sample_data[<sample>]["blastdb.nucl"|"blastdb.prot"]``
* ``sample_data[<sample>]["blastdb.log.nucl"|"blastdb.log.prot"]``
Output:
~~~~~~~~~~~~~
* puts BLAST output files in the following slots for sample-wise blast:
* ``sample_data[<sample>]["blast.nucl"|"blast.prot"]``
* ``sample_data[<sample>]["blast"]``
* puts fasta output files in the following slots for project-wise blast:
* ``sample_data["blast.nucl"|"blast.prot"]``
* ``sample_data["blast"]``
Parameters that can be set
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. csv-table::
:header: "Parameter", "Values", "Comments"
:widths: 5,10,10
"scope", "sample|project", "Set if project-wide fasta slot should be used"
"dbscope", "sample|project", "If scope=sample and you want to use internal fasta and database, this will tell "
"fasta2use", "nucl|prot", "Helps the module decide which fasta file to use."
"db2use", "nucl|prot", "Helps the module decide which database to use."
"-query | -db", "Path to fasta or BLAST index", "The sequences to use as query, using the internal database as database, or a BLAST database index, using the internal fasta as query."
Lines for parameter file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
External query, project-wise fasta database (must be proceeded by ``makeblastdb`` module)::
blastOnAssembl:
module: blast_old
base: mkblst1
script_path: /path/to/bin/blastn
fasta2use: nucl
scope: project
redirects:
-evalue: '0.0001'
-num_descriptions: '20'
-num_threads: '40'
-outfmt: '"6 qseqid sallseqid qlen slen qstart qend sstart send length
evalue bitscore score pident qframe"'
-query: /path/to/query.fasta
Sample specific fasta, external database::
sprot:
module: blast_old
base: sample_assembl
script_path: /path/to/blastx
redirects:
-db: /path/to/database/index
-evalue: '0.0001'
-max_target_seqs: '5'
-num_of_proc: '20'
-num_threads: '20'
-outfmt: '"6 qseqid sallseqid qlen slen qstart qend sstart send length
evalue bitscore score pident qframe"'
scope: sample
References
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Altschul, S.F., Madden, T.L., Schäffer, A.A., Zhang, J., Zhang, Z., Miller, W. and Lipman, D.J., 1997. **Gapped BLAST and PSI-BLAST: a new generation of protein database search programs**. *Nucleic acids research*, 25(17), pp.3389-3402.
"""
import os
import sys
import re
from neatseq_flow.PLC_step import Step,AssertionExcept
__author__ = "Menachem Sklarz"
__version__ = "1.6.0"
class Step_blast_old(Step):
def step_specific_init(self):
""" Called on intiation
Good place for parameter testing.
Wrong place for sample data testing
"""
self.shell = "bash" # Can be set to "bash" by inheriting instances
self.file_tag = ".blast.out"
# Check that either -db or -query (not both) are set in redir_params:
if "-db" not in self.params["redir_params"] and "-query" not in self.params["redir_params"]:
raise AssertionExcept("You must supply either '-db' or '-query'\n")
if "-db" in self.params["redir_params"] and "-query" in self.params["redir_params"]:
raise AssertionExcept("You can't supply both '-db' and '-query'\n")
if "db2use" in self.params or "dbscope" in self.params:
raise AssertionExcept("'db2use' and 'dbscope' are not implemented yet. Try using the alternative blast module.")
def step_sample_initiation(self):
""" A place to do initiation stages following setting of sample_data
"""
if "scope" in self.params:
if self.params["scope"]=="project":
self.step_sample_initiation_byproject()
elif self.params["scope"]=="sample":
self.step_sample_initiation_bysample()
else:
raise AssertionExcept("'scope' must be either 'sample' or 'project'")
else:
raise AssertionExcept("No 'scope' specified.")
def step_sample_initiation_bysample(self):
""" A place to do initiation stages following setting of sample_data
This set of tests is performed for sample-level BLAST
"""
for sample in self.sample_data["samples"]: # Getting list of samples out of samples_hash
# if not "blast" in self.sample_data[sample].keys():
# self.sample_data[sample]["blast"] = dict()
# Decide on locations of -db and -query
if "-query" in list(self.params["redir_params"].keys()):
if not "blastdb" in self.sample_data[sample]:
raise AssertionExcept("For sample-as-DB BLAST, you need to first run makeblastdb.\n\tIf the query is a project fasta, set parameter 'scope' to 'project'\n", sample)
# Decide which fasta to use in blast:
# This has holes. If some of the samples have only nucl and some only prot, it will not fail...
if "fasta2use" not in list(self.params.keys()):
# # "fasta" is not defined for the sample:
# if not "fasta" in self.sample_data[sample].keys():
# raise AssertionExcept("No 'fasta' defined.\nIf the query is a project fasta, set parameter 'scope' to 'project'\n",sample)
# Both nucl and prot exist:
if "fasta.nucl" in self.sample_data[sample] and "fasta.prot" in self.sample_data[sample]:
raise AssertionExcept("There are both nucl and prot fasta files. You must supply a fasta2use param\n",sample)
# Neither nucl nor prot exist:
if "fasta.nucl" not in self.sample_data[sample] and "fasta.prot" not in self.sample_data[sample]:
raise AssertionExcept("There are neither nucl nor prot fasta files.\nIf the query is a project fasta, set parameter 'scope' to 'project'\n",sample)
if "fasta.nucl" in list(self.sample_data[sample].keys()):
self.params["fasta2use"] = "nucl"
elif "fasta.prot" in list(self.sample_data[sample].keys()):
self.params["fasta2use"] = "prot"
else:
""
# if "fasta2use" in self.params.keys():
self.fasta2use = self.params["fasta2use"]
if not "fasta." + self.fasta2use in self.sample_data[sample]:
raise AssertionExcept("The type you passed in fasta2use ('%s') does not exist.\nIf the query is a project fasta, set parameter 'scope' to 'project'\n" % self.params["fasta2use"], sample)
pass
def step_sample_initiation_byproject(self):
""" A place to do initiation stages following setting of sample_data
This set of tests is performed for project-level BLAST
"""
# if not "blast" in self.sample_data.keys():
# self.sample_data["project_data"]["blast"] = dict()
# if not "fasta" in self.sample_data.keys():
# raise AssertionExcept("You need a 'fasta' file defined to run BLAST.\n\tIf the 'fasta' files are per sample, remove the 'projectBLAST' parameter.\n")
# Decide on locations of -db and -query
if "-query" in list(self.params["redir_params"].keys()):
if not "blastdb" in self.sample_data:
raise AssertionExcept("For project-as-DB BLAST, you need to first run makeblastdb.\n")
# Decide which fasta to use in blast:
# This has holes. If some of the samples have only nucl and some only prot, it will not fail...
if "fasta2use" not in list(self.params.keys()):
# If both nucl and prot exist:
if "fasta.nucl" in self.sample_data and "fasta.prot" in self.sample_data:
raise AssertionExcept("There are both 'nucl' and 'prot' fasta files. You must supply a fasta2use param\n")
# If neither nucl or prot exist:
if "fasta.nucl" not in self.sample_data and "fasta.prot" not in self.sample_data:
raise AssertionExcept("There are neither 'nucl' and 'prot' fasta files defined\n")
if "fasta.nucl" in list(self.sample_data.keys()):
self.params["fasta2use"] = "nucl"
elif "fasta.prot" in list(self.sample_data.keys()):
self.params["fasta2use"] = "prot"
else:
pass # Should'nt get here because of assertions above. Included elif "prot" for clarity
# if "fasta2use" in self.params.keys():
self.fasta2use = self.params["fasta2use"]
if not ("fasta." + self.fasta2use) in self.sample_data:
raise AssertionExcept("The type you passed in 'fasta2use' ('%s') does not exist for the project.\n\tIf the 'fasta' files are per sample, remove the 'projectBLAST' parameter.\n" % self.params["fasta2use"])
# pass
def create_spec_wrapping_up_script(self):
""" Add stuff to check and agglomerate the output data
"""
if self.params["scope"]=="project":
pass
elif self.params["scope"]=="sample":
self.make_sample_file_index() # see definition below
def build_scripts(self):
""" This is the actual script building function
"""
if self.params["scope"]=="project":
self.build_scripts_byproject()
elif self.params["scope"]=="sample":
self.build_scripts_bysample()
else:
raise AssertionExcept("'scope' must be either 'sample' or 'project'")
def build_scripts_bysample(self):
""" Script building function for sample-level BLAST
"""
# Each iteration must define the following class variables:
# spec_script_name
# script
for sample in self.sample_data["samples"]: # Getting list of samples out of samples_hash
# Name of specific script:
self.spec_script_name = self.set_spec_script_name(sample)
self.script = ""
# Make a dir for the current sample:
sample_dir = self.make_folder_for_sample(sample)
# This line should be left before every new script. It sees to local issues.
# Use the dir it returns as the base_dir for this step.
use_dir = self.local_start(sample_dir)
# Define output filename
output_filename = "".join([use_dir , sample , self.file_tag])
self.script += self.get_script_const()
# Define query and db files:
# If db is defined by user, set the query to the correct 'fasta2use'
if "-db" in list(self.params["redir_params"].keys()):
self.script += "-query %s \\\n\t" % self.sample_data[sample]["fasta." + self.fasta2use]
# If -db is not defined by user, set the -db to the correct blastdb, with 'fasta2use'
# -query must be set by user. assertion is made in step_specific_init()
else:
self.script += "-db %s \\\n\t" % self.sample_data[sample]["blastdb." + self.fasta2use]
self.script += "-out %s\n\n" % output_filename
# Store BLAST result file:
self.sample_data[sample]["blast." + self.fasta2use] = (sample_dir + os.path.basename(output_filename))
self.stamp_file(self.sample_data[sample]["blast." + self.fasta2use])
self.sample_data[sample]["blast"] = self.sample_data[sample]["blast." + self.fasta2use]
# Wrapping up function. Leave these lines at the end of every iteration:
self.local_finish(use_dir,sample_dir) # Sees to copying local files to final destination (and other stuff)
self.create_low_level_script()
def build_scripts_byproject(self):
""" Script building function for project-level BLAST
"""
# Each iteration must define the following class variables:
# spec_script_name
# script
# Name of specific script:
self.spec_script_name = self.set_spec_script_name()
self.script = ""
# This line should be left before every new script. It sees to local issues.
# Use the dir it returns as the base_dir for this step.
use_dir = self.local_start(self.base_dir)
# Define output filename
output_filename = "".join([use_dir , self.sample_data["Title"] , self.file_tag])
self.script += self.get_script_const()
# Define query and db files:
# If db is defined by user, set the query to the correct 'fasta2use'
if "-db" in list(self.params["redir_params"].keys()):
self.script += "-query %s \\\n\t" % self.sample_data["project_data"]["fasta." + self.fasta2use]
# If -db is not defined by user, set the -db to the correct blastdb, with 'fasta2use'
# -query must be set by user. assertion is made in step_specific_init()
else:
self.script += "-db %s \\\n\t" % self.sample_data["project_data"]["blastdb." + self.fasta2use]
self.script += "-out %s\n\n" % output_filename
# Store BLAST result file:
self.sample_data["project_data"]["blast." + self.fasta2use] = (self.base_dir + os.path.basename(output_filename))
self.stamp_file(self.sample_data["project_data"]["blast." + self.fasta2use])
self.sample_data["project_data"]["blast"] = self.sample_data["project_data"]["blast." + self.fasta2use]
# Wrapping up function. Leave these lines at the end of every iteration:
self.local_finish(use_dir,self.base_dir) # Sees to copying local files to final destination (and other stuff)
self.create_low_level_script()
def make_sample_file_index(self):
""" Make file containing samples and target file names.
This can be used by scripts called by create_spec_wrapping_up_script() to summarize the BLAST outputs.
"""
with open(self.base_dir + "BLAST_files_index.txt", "w") as index_fh:
index_fh.write("Sample\tBLAST_report\n")
for sample in self.sample_data["samples"]: # Getting list of samples out of samples_hash
index_fh.write("%s\t%s\n" % (sample,self.sample_data[sample]["blast." + self.fasta2use]))
self.sample_data["project_data"]["BLAST_files_index"] = self.base_dir + "BLAST_files_index.txt"
| bioinfo-core-BGU/neatseq-flow_modules | neatseq_flow_modules/main_NSF_classes/searching/blast_old.py | Python | gpl-3.0 | 16,504 | [
"BLAST"
] | 982d113465149c664a62c965ab2c2cace54147a7d31d25debea65b8fad6b122f |
"""
Routines for handling 'Projects' in Python.
"""
import ast
import imp
import os
import sys
import tarfile
import token
import traceback
from ConfigParser import SafeConfigParser
from cStringIO import StringIO
from threading import RLock
from tokenize import generate_tokens
# from pkg_resources import get_distribution, DistributionNotFound
from openmdao.main.assembly import set_as_top
from openmdao.main.component import SimulationRoot
from openmdao.main.variable import namecheck_rgx
from openmdao.main.factorymanager import create as factory_create
from openmdao.main.publisher import publish
from openmdao.util.fileutil import get_module_path, expand_path, file_md5, \
find_in_path
from openmdao.util.fileutil import find_module as util_findmodule
from openmdao.util.log import logger
from openmdao.util.astutil import parse_ast, text_to_node
# extension for project files and directories
PROJ_FILE_EXT = '.proj'
PROJ_DIR_EXT = '.projdir'
PROJ_HOME = os.path.expanduser('~/.openmdao/gui/01-codes')
# use this to keep track of project classes that have been instantiated
# so we can determine if we need to force a Project save & reload. This
# is the reason for the existence of the custom import hook classes ProjFinder
# and ProjLoader, as well as the _CtorInstrumenter ast node transformer.
#
# FIXME: This doesn't keep track of when instances are deleted, so
# it's possible that the _instantiated_classes set will contain names
# of classes that no longer have any active instances.
_instantiated_classes = set()
_instclass_lock = RLock()
_macro_lock = RLock()
def _clear_insts():
with _instclass_lock:
_instantiated_classes.clear()
def _register_inst(typname):
with _instclass_lock:
_instantiated_classes.add(typname)
def _match_insts(classes):
return _instantiated_classes.intersection(classes)
class _CtorInstrumenter(ast.NodeTransformer):
"""All __init__ calls will be replaced with a call to a wrapper function
that records the call by calling _register_inst(typename) before creating
the instance.
"""
def __init__(self):
super(_CtorInstrumenter, self).__init__()
def visit_ClassDef(self, node):
text = None
reg = "_register_inst('.'.join([self.__class__.__module__,self.__class__.__name__]))"
for stmt in node.body:
if isinstance(stmt, ast.FunctionDef) and stmt.name == '__init__':
stmt.body = [text_to_node(reg, stmt.lineno)] + stmt.body
break
else: # no __init__ found, make one
text = """
def __init__(self, *args, **kwargs):
_register_inst('.'.join([self.__class__.__module__,self.__class__.__name__]))
super(%s, self).__init__(*args, **kwargs)
""" % node.name
node.body = [text_to_node(text, node.lineno)] + node.body
return node
def _add_init_monitors(node):
"""Take the specified AST and translate it into the instrumented version,
which will record all instances.
"""
node = _CtorInstrumenter().visit(node)
node.body = [
ast.copy_location(
text_to_node('from openmdao.main.project import _register_inst'), node)
] + node.body
return node
class ProjFinder(object):
"""A finder class for custom imports from an OpenMDAO project. For
this to work, an entry must be added to sys.path of the form
``top_dir+PROJ_DIR_EXT``, where `top_dir` is the top directory of the project
where Python files are kept.
"""
def __init__(self, path_entry):
"""When path_entry has the form mentioned above (``top_dir+PROJ_DIR_EXT``),
this returns a ProjFinder instance that will be used to locate modules
within the project.
"""
if path_entry.endswith(PROJ_DIR_EXT) and \
os.path.isdir(os.path.splitext(path_entry)[0]):
self.path_entry = path_entry
self.projdir = os.path.splitext(path_entry)[0]
if os.path.isdir(self.projdir):
return
raise ImportError("can't import from %s" % path_entry)
def find_module(self, modpath, path=None):
"""This looks within the project for the specified module, returning a
loader if the module is found and None if it isn't.
"""
if path is None:
path = self.path_entry
fpath = util_findmodule(modpath, path=[self.projdir])
if fpath:
return ProjLoader(path)
class ProjLoader(object):
"""This is the import loader for files within an OpenMDAO project.
We use it to instrument the imported files so we can keep track of what
classes have been instantiated so we know when a project must be reloaded.
"""
def __init__(self, path_entry):
self.path_entry = path_entry
self.projdir = os.path.splitext(path_entry)[0]
def _get_filename(self, modpath):
parts = [self.projdir] + modpath.split('.')
path = os.path.join(*parts)
if os.path.isdir(path):
return os.path.join(path, '__init__.py')
else:
return path + '.py'
def is_package(self, modpath):
fpath = self._get_filename(modpath)
return os.path.basename(fpath) == '__init__.py' and os.path.isfile(fpath)
def get_source(self, modpath):
with open(self._get_filename(modpath), 'r') as f:
return f.read()
def get_code(self, modpath):
"""Opens the file, compiles it into an AST, and then translates it into
the instrumented version before compiling that into bytecode.
"""
contents = self.get_source(modpath)
fname = self._get_filename(modpath)
root = parse_ast(contents, fname, mode='exec')
return compile(_add_init_monitors(root), fname, 'exec')
def load_module(self, modpath):
"""Creates a new module if one doesn't exist already and then updates
the dict of that module based on the contents of the instrumented
module file.
"""
if modpath in sys.modules:
mod = sys.modules[modpath]
else:
mod = sys.modules.setdefault(modpath, imp.new_module(modpath))
mod.__file__ = self._get_filename(modpath)
mod.__name__ = modpath
mod.__loader__ = self
mod.__package__ = '.'.join(modpath.split('.')[:-1])
if self.is_package(modpath):
mod.__path__ = [self.path_entry]
else:
mod.__path__ = self.path_entry
try:
code = self.get_code(modpath)
exec (code, mod.__dict__)
except Exception as err:
del sys.modules[modpath] # remove bad module
raise type(err)("Error in file %s: %s"
% (os.path.basename(mod.__file__), err))
return mod
def parse_archive_name(pathname):
"""Return the name of the project given the pathname of a project
archive file.
"""
return os.path.splitext(os.path.basename(pathname))[0]
def project_from_archive(archive_name, proj_name=None, dest_dir=None,
create=True, overwrite=False):
"""Expand the given project archive file in the specified destination
directory and return a Project object that points to the newly
expanded project.
archive_name: str
Path to the project archive to be expanded.
proj_name: str (optional)
Name of the new project. Defaults to the name of the project contained
in the name of the archive.
dest_dir: str (optional)
Directory where the project directory for the expanded archive will
reside. Defaults to the directory where the archive is located.
create: bool (optional)
If True, create and return a Project object. Otherwise, just unpack the
project directory.
"""
archive_name = expand_path(archive_name)
if dest_dir is None:
dest_dir = os.path.dirname(archive_name)
else:
dest_dir = expand_path(dest_dir)
if proj_name is None:
proj_name = parse_archive_name(archive_name)
projpath = os.path.join(dest_dir, proj_name)
if not overwrite and os.path.exists(projpath):
raise RuntimeError("Directory '%s' already exists" % projpath)
if not os.path.exists(projpath):
os.mkdir(projpath)
if os.path.getsize(archive_name) > 0:
try:
f = open(archive_name, 'rb')
tf = tarfile.open(fileobj=f, mode='r')
tf.extractall(projpath)
except Exception as err:
logger.error(str(err))
print "Error expanding project archive:", err
finally:
tf.close()
if create:
return Project(projpath)
# def find_distrib_for_obj(obj):
# """Return the name of the distribution containing the module that
# contains the given object, or None if it's not part of a distribution.
# """
# try:
# fname = getfile(obj)
# except TypeError:
# return None
# modpath = get_module_path(fname)
# parts = modpath.split('.')
# l = len(parts)
# for i in range(l):
# try:
# dist = get_distribution('.'.join(parts[:l-i]))
# except DistributionNotFound:
# continue
# return dist
# return None
_excluded_calls = set(['run', 'execute'])
# def _check_hierarchy(pathname, objs):
# # any operation we apply to a given object will be cancelled
# # out if that object or any of its parents are overwritten
# # later. Returns True if the command applying to the object
# # indicated by the given pathname should be filtered out.
# if pathname in objs:
# return True
# for name in objs:
# if pathname.startswith(name + '.'):
# return True
# return False
def _filter_macro(lines):
"""Removes commands from a macro that are overridden by later commands."""
# FIXME: this needs a lot of work. Things get a little messy when you have
# rename and move calls mixed in and I didn't have time to sort out those issues yet,
# so right now I'm just filtering out multiple execfile() calls and all calls to
# run() and execute().
filt_lines = []
# assigns = set()
execs = set()
# objs = set()
for line in lines[::-1]:
stripped = line.strip()
if stripped.startswith('execfile'):
lst = list(generate_tokens(StringIO(stripped).readline))
if lst[0][1] == 'execfile':
if lst[2][0] == token.STRING:
fname = lst[2][1].strip("'").strip('"')
if fname in execs:
continue
else:
execs.add(fname)
else:
match = namecheck_rgx.match(stripped)
if match:
full = match.group()
# rest = stripped[len(full):].strip()
parts = full.rsplit('.', 1)
if len(parts) > 1:
# remove calls to run, execute, ...
if parts[1] in _excluded_calls:
continue
# elif parts[1] in ['add', 'remove']:
# lst = list(generate_tokens(StringIO(rest).readline))
# if lst[1][0] == token.STRING:
# pathname = '.'.join([parts[0],
# lst[1][1].strip("'").strip('"')])
# if _check_hierarchy(pathname, objs):
# continue
# objs.add(pathname)
# if parts[1] == 'remove': # don't include the remove command
# continue # since there won't be anything to remove
## only keep the most recent assignment to any variable, and throw away
## assigns to variables in objects that have been overridden by newer ones with
## the same name.
# if rest.startswith('='):
# if full in assigns or _check_hierarchy(full, objs):
# continue
# else:
# assigns.add(full)
filt_lines.append(line)
return filt_lines[::-1] # reverse the result
class Project(object):
def __init__(self, projpath, gui=True, globals_dict=None):
"""Initializes a Project containing the project found in the
specified directory or creates a new project if 'create' is True
and one doesn't exist.
projpath: str
Path to the project's directory.
gui: bool (optional) (default=True)
GUI is running. This determines how the project is loaded.
globals_dict: dict (optional)
If this is not None, it will be populated with the objects
from the project model. Otherwise the Project will use its
own internal dict.
"""
self._recorded_cmds = []
self._cmds_to_save = []
self.path = expand_path(projpath)
self._project_globals = globals_dict if globals_dict is not None else {}
self._gui = gui
self.macrodir = os.path.join(self.path, '_macros')
self.macro = 'default'
if gui and not os.path.isdir(self.macrodir):
os.makedirs(self.macrodir)
settings = os.path.join(self.path, '_settings.cfg')
if gui and not os.path.isfile(settings):
self._create_config()
self.config = SafeConfigParser()
self.config.optionxform = str # Preserve case.
files = self.config.read(settings)
if not files:
self._error("Failed to read project config file")
def _create_config(self):
"""Create the initial _settings.cfg file for the project."""
settings = """
[preferences]
export_repo = false
[info]
version = 0
description =
"""
with open(os.path.join(self.path, '_settings.cfg'), 'wb') as f:
f.write(settings)
def get_info(self):
""" Return settings 'info' section as a dictionary. """
return dict(self.config.items('info'))
def set_info(self, info):
""" Set settings 'info' section from `info` dictionary. """
for key, value in info.items():
self.config.set('info', key, value)
with open(os.path.join(self.path, '_settings.cfg'), 'wb') as f:
self.config.write(f)
def create(self, typname, version=None, server=None, res_desc=None, **ctor_args):
if server is None and res_desc is None and typname in self._project_globals:
return getattr(self._project_globals, typname)(**ctor_args)
return factory_create(typname, version, server, res_desc, **ctor_args)
@property
def name(self):
return os.path.basename(self.path)
def __contains__(self, pathname):
parts = pathname.split('.')
try:
obj = self._project_globals[parts[0]]
for name in parts[1:]:
obj = getattr(obj, name)
except Exception:
return False
return True
def items(self):
return self._project_globals.items()
def _error(self, msg, errclass=RuntimeError):
if self._gui:
logger.error(msg)
else:
raise errclass(msg)
def execfile(self, fname, digest=None):
# first, make sure file has been imported
__import__(get_module_path(fname))
newdigest = file_md5(fname)
if digest and digest != newdigest:
logger.warning("file '%s' has been modified since the last time"
" it was exec'd" % fname)
with open(fname) as f:
contents = f.read()
node = _add_init_monitors(parse_ast(contents, fname, mode='exec'))
exec compile(node, fname, 'exec') in self._project_globals
# make the recorded execfile command use the current md5 hash
self._cmds_to_save.append("execfile('%s', '%s')" % (fname, newdigest))
def get(self, pathname):
parts = pathname.split('.')
try:
obj = self._project_globals[parts[0]]
for name in parts[1:]:
obj = getattr(obj, name)
except (KeyError, AttributeError) as err:
raise AttributeError("'%s' not found: %s" % (pathname, str(err)))
return obj
def load_macro(self, macro_name):
fpath = os.path.join(self.macrodir, macro_name)
self._recorded_cmds = []
with open(fpath, 'rU') as f:
content = f.read()
# fix missing newline at end of file to avoid issues later when
# we append to it
if not content.endswith('\n'):
with open(fpath, 'a') as f:
f.write('\n')
lines = content.split('\n')
for i, line in enumerate(lines):
logger.debug(line)
try:
self.command(line, save=False)
except Exception as err:
logger.error(''.join(traceback.format_tb(sys.exc_info()[2])))
msg = str(err)
if self._gui:
try:
publish('console_errors', msg)
except:
logger.error("publishing of error failed")
else:
raise RuntimeError(msg)
def _save_command(self, save):
"""Save the current command(s) to the macro file."""
self._recorded_cmds.extend(self._cmds_to_save)
if save:
with open(os.path.join(self.macrodir, self.macro), 'a') as f:
for cmd in self._cmds_to_save:
f.write(cmd + '\n')
self._cmds_to_save = []
def command(self, cmd, save=True):
err = None
result = None
self._cmds_to_save = []
try:
code = compile(cmd, '<string>', 'eval')
except SyntaxError:
try:
exec (cmd) in self._project_globals
except Exception as err:
pass
else:
try:
result = eval(code, self._project_globals)
except Exception as err:
pass
if err:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.error("command '%s' generated an exception:\n %s",
cmd, ''.join(lines))
raise
else:
if not self._cmds_to_save:
self._cmds_to_save.append(cmd)
self._save_command(save)
return result
def _initialize(self):
if os.path.isfile(os.path.join(self.macrodir, self.macro)):
logger.info('Reconstructing project using %s macro' % self.macro)
self.load_macro(self.macro)
else:
self.command("# Auto-generated file - MODIFY AT YOUR OWN RISK")
def _init_globals(self):
self._project_globals['create'] = self.create # add create funct here so macros can call it
self._project_globals['execfile'] = self.execfile
self._project_globals['set_as_top'] = set_as_top
if self._gui:
self._project_globals[
'__name__'] = '__main__' # set name to __main__ to allow execfile to work the way we want
def activate(self):
"""Make this project active by putting its directory on sys.path and
executing its macro.
"""
if self._gui:
# set SimulationRoot and put our path on sys.path
SimulationRoot.chroot(self.path)
self.add_to_path()
# set up the project
self._init_globals()
self._initialize()
def add_to_path(self):
"""Puts this project's directory on sys.path so that imports from it
will be processed by our special loader.
"""
if self._gui:
# this weird extension is needed in order for our import hook
# to fire
projectdir = self.path + PROJ_DIR_EXT
if projectdir not in sys.path:
sys.path = [projectdir] + sys.path
else:
if self.path not in sys.path:
sys.path = [self.path] + sys.path
def deactivate(self):
"""Removes this project's directory from sys.path."""
projectdir = self.path
try:
if self._gui:
sys.path.remove(projectdir + PROJ_DIR_EXT)
else:
sys.path.remove(self.path)
except:
pass
def export(self, projname=None, destdir='.'):
"""Creates an archive of the current project for export.
projname: str (optional)
The name that the project in the archive will have. Defaults to
the current project name.
destdir: str (optional)
The directory where the project archive will be placed. Defaults to
the current directory.
"""
export_repo = self.config.getboolean("preferences", "export_repo")
excludes = ['.git', '.bzr', '.hg', '.projrepo']
ddir = expand_path(destdir)
if projname is None:
projname = self.name
if os.path.basename(ddir) not in excludes and ddir.startswith(self.path):
# the project contains the dest directory... bad
raise RuntimeError("Destination directory for export (%s) is within"
" project directory (%s)" % (ddir, self.path))
startdir = os.getcwd()
os.chdir(self.path)
try:
try:
fname = os.path.join(ddir, projname + PROJ_FILE_EXT)
f = open(fname, 'wb')
tf = tarfile.open(fileobj=f, mode='w:gz')
for entry in os.listdir(self.path):
if export_repo or entry not in excludes:
tf.add(entry)
except Exception as err:
print "Error creating project archive:", err
fname = None
finally:
tf.close()
finally:
os.chdir(startdir)
return fname
def load_project(pname, globals_dict=None):
"""Load the model from the named project into the current
globals dict so that the model can be used as part of a
python script outside of the GUI. pname can either be an
absolute or relative path to a project directory, or just
a project name. If it's a project name, the project directory
will be searched for in PATH, and if not found there will be
searched for in $HOME/.openmdao/gui/01-codes.
"""
abspname = os.path.abspath(pname)
if os.path.isdir(abspname):
ppath = abspname
else:
ppath = find_in_path(pname)
hpath = os.path.join(PROJ_HOME, pname)
if ppath is None and os.path.isdir(hpath):
ppath = hpath
if ppath is None:
raise RuntimeError("can't locate project '%s'" % pname)
proj = Project(ppath, gui=False, globals_dict=globals_dict)
proj.activate()
return proj
def list_projects():
"""Return a list of available 01-codes."""
return sorted(os.listdir(PROJ_HOME))
| DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/project.py | Python | mit | 23,432 | [
"VisIt"
] | 4598782f5dd96e2632cbe272283bc0a28d3becbfcc5399998c8463a2419ed85d |
# coding: utf-8
# # Using Python to Access NEXRAD Level 2 Data from Unidata THREDDS Server
# This is a modified version of Ryan May's notebook here:
# http://nbviewer.jupyter.org/gist/dopplershift/356f2e14832e9b676207
#
# The TDS provides a mechanism to query for available data files, as well as provides access to the data as native volume files, through OPeNDAP, and using its own CDMRemote protocol. Since we're using Python, we can take advantage of Unidata's Siphon package, which provides an easy API for talking to THREDDS servers.
#
# Bookmark these resources for when you want to use Siphon later!
# + [latest Siphon documentation](http://siphon.readthedocs.org/en/latest/)
# + [Siphon github repo](https://github.com/Unidata/siphon)
# + [TDS documentation](http://www.unidata.ucar.edu/software/thredds/current/tds/TDS.html)
# ## Downloading the single latest volume
#
# Just a bit of initial set-up to use inline figures and quiet some warnings.
# In[1]:
import matplotlib
import warnings
warnings.filterwarnings("ignore", category=matplotlib.cbook.MatplotlibDeprecationWarning)
get_ipython().magic(u'matplotlib inline')
# First we'll create an instance of RadarServer to point to the appropriate radar server access URL.
# In[2]:
# The archive of data on S3 URL did not work for me, despite .edu domain
#url = 'http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/'
#Trying motherlode URL
url = 'http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/'
from siphon.radarserver import RadarServer
rs = RadarServer(url)
# Next, we'll create a new query object to help request the data. Using the chaining methods, let's ask for the latest data at the radar KLVX (Louisville, KY). We see that when the query is represented as a string, it shows the encoded URL.
# In[3]:
from datetime import datetime, timedelta
query = rs.query()
query.stations('KLVX').time(datetime.utcnow())
# We can use the RadarServer instance to check our query, to make sure we have required parameters and that we have chosen valid station(s) and variable(s)
#
# In[4]:
rs.validate_query(query)
# Make the request, which returns an instance of TDSCatalog; this handles parsing the returned XML information.
# In[5]:
catalog = rs.get_catalog(query)
# We can look at the datasets on the catalog to see what data we found by the query. We find one volume in the return, since we asked for the volume nearest to a single time.
# In[6]:
catalog.datasets
# We can pull that dataset out of the dictionary and look at the available access URLs. We see URLs for OPeNDAP, CDMRemote, and HTTPServer (direct download).
# In[7]:
ds = list(catalog.datasets.values())[0]
ds.access_urls
# We'll use the CDMRemote reader in Siphon and pass it the appropriate access URL.
# In[8]:
from siphon.cdmr import Dataset
data = Dataset(ds.access_urls['CdmRemote'])
# We define some helper functions to make working with the data easier. One takes the raw data and converts it to floating point values with the missing data points appropriately marked. The other helps with converting the polar coordinates (azimuth and range) to Cartesian (x and y).
# In[9]:
import numpy as np
def raw_to_masked_float(var, data):
# Values come back signed. If the _Unsigned attribute is set, we need to convert
# from the range [-127, 128] to [0, 255].
if var._Unsigned:
data = data & 255
# Mask missing points
data = np.ma.array(data, mask=data==0)
# Convert to float using the scale and offset
return data * var.scale_factor + var.add_offset
def polar_to_cartesian(az, rng):
az_rad = np.deg2rad(az)[:, None]
x = rng * np.sin(az_rad)
y = rng * np.cos(az_rad)
return x, y
# The CDMRemote reader provides an interface that is almost identical to the usual python NetCDF interface. We pull out the variables we need for azimuth and range, as well as the data itself.
# In[10]:
sweep = 0
ref_var = data.variables['Reflectivity_HI']
ref_data = ref_var[sweep]
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
# Then convert the raw data to floating point values and the polar coordinates to Cartesian.
# In[11]:
ref = raw_to_masked_float(ref_var, ref_data)
x, y = polar_to_cartesian(az, rng)
# MetPy is a Python package for meteorology (Documentation: http://metpy.readthedocs.org and GitHub: http://github.com/MetPy/MetPy). We import MetPy and use it to get the colortable and value mapping information for the NWS Reflectivity data.
# In[12]:
from metpy.plots import ctables # For NWS colortable
ref_norm, ref_cmap = ctables.registry.get_with_steps('NWSReflectivity', 5, 5)
# Finally, we plot them up using matplotlib and cartopy. We create a helper function for making a map to keep things simpler later.
# In[13]:
import matplotlib.pyplot as plt
import cartopy
def new_map(fig, lon, lat):
# Create projection centered on the radar. This allows us to use x
# and y relative to the radar.
proj = cartopy.crs.LambertConformal(central_longitude=lon, central_latitude=lat)
# New axes with the specified projection
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add coastlines
ax.coastlines('50m', 'black', linewidth=2, zorder=2)
# Grab state borders
state_borders = cartopy.feature.NaturalEarthFeature(
category='cultural', name='admin_1_states_provinces_lines',
scale='50m', facecolor='none')
ax.add_feature(state_borders, edgecolor='black', linewidth=1, zorder=3)
return ax
# ## Download a collection of historical data
# This time we'll make a query based on a longitude, latitude point and using a time range.
# In[14]:
# Our specified time
#dt = datetime(2012, 10, 29, 15) # Superstorm Sandy
#dt = datetime(2016, 6, 18, 1)
dt = datetime(2016, 6, 8, 18)
query = rs.query()
query.lonlat_point(-73.687, 41.175).time_range(dt, dt + timedelta(hours=1))
# The specified longitude, latitude are in NY and the TDS helpfully finds the closest station to that point. We can see that for this time range we obtained multiple datasets.
# In[15]:
cat = rs.get_catalog(query)
cat.datasets
# Grab the first dataset so that we can get the longitude and latitude of the station and make a map for plotting. We'll go ahead and specify some longitude and latitude bounds for the map.
# In[16]:
ds = list(cat.datasets.values())[0]
data = Dataset(ds.access_urls['CdmRemote'])
# Pull out the data of interest
sweep = 0
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
ref_var = data.variables['Reflectivity_HI']
# Convert data to float and coordinates to Cartesian
ref = raw_to_masked_float(ref_var, ref_var[sweep])
x, y = polar_to_cartesian(az, rng)
# Use the function to make a new map and plot a colormapped view of the data
# In[17]:
fig = plt.figure(figsize=(10, 10))
ax = new_map(fig, data.StationLongitude, data.StationLatitude)
# Set limits in lat/lon space
ax.set_extent([-77, -70, 38, 43])
# Add ocean and land background
ocean = cartopy.feature.NaturalEarthFeature('physical', 'ocean', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['water'])
land = cartopy.feature.NaturalEarthFeature('physical', 'land', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['land'])
ax.add_feature(ocean, zorder=-1)
ax.add_feature(land, zorder=-1)
ax.pcolormesh(x, y, ref, cmap=ref_cmap, norm=ref_norm, zorder=0);
# In[ ]:
| rsignell-usgs/notebook | NEXRAD/THREDDS_NEXRAD.py | Python | mit | 7,648 | [
"NetCDF"
] | 054bdae1152bffe46aa135246a5de2c1e4652994519ec75b7fcf373a5acba153 |
"""
Utilities to create replication transformations
"""
from DIRAC.TransformationSystem.Client.Transformation import Transformation
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC import gLogger, S_OK, S_ERROR
def createDataTransformation(flavour, targetSE, sourceSE,
metaKey, metaValue,
extraData=None, extraname='',
groupSize=1,
plugin='Broadcast',
tGroup=None,
tBody=None,
enable=False,
):
"""Creates the replication transformation based on the given parameters.
:param str flavour: Flavour of replication to create: Replication or Moving
:param targetSE: Destination for files
:type targetSE: python:list or str
:param str sourceSE: Origin of files.
:param int metaKey: Meta key to identify input files
:param int metaValue: Meta value to identify input files
:param dict metaData: Additional meta data to use to identify input files
:param str extraname: addition to the transformation name, only needed if the same transformation was already created
:param int groupSize: number of files per transformation taks
:param str plugin: plugin to use
:param str tGroup: transformation group to set
:param tBody: transformation body to set
:param bool enable: if true submit the transformation, otherwise dry run
:returns: S_OK, S_ERROR
"""
metadata = {metaKey: metaValue}
if isinstance(extraData, dict):
metadata.update(extraData)
gLogger.debug("Using %r for metadata search" % metadata)
if isinstance(targetSE, basestring):
targetSE = [targetSE]
if flavour not in ('Replication', 'Moving'):
return S_ERROR('Unsupported flavour %s' % flavour)
transVerb = {'Replication': 'Replicate', 'Moving': 'Move'}[flavour]
transGroup = {'Replication': 'Replication', 'Moving': 'Moving'}[flavour] if not tGroup else tGroup
trans = Transformation()
transName = '%s_%s_%s' % (transVerb, str(metaValue), ",".join(targetSE))
if extraname:
transName += "_%s" % extraname
trans.setTransformationName(transName)
description = '%s files for %s %s to %s' % (transVerb, metaKey, str(metaValue), ",".join(targetSE))
trans.setDescription(description)
trans.setLongDescription(description)
trans.setType('Replication')
trans.setTransformationGroup(transGroup)
trans.setGroupSize(groupSize)
trans.setPlugin(plugin)
transBody = {'Moving': [("ReplicateAndRegister", {"SourceSE": sourceSE, "TargetSE": targetSE}),
("RemoveReplica", {"TargetSE": sourceSE})],
'Replication': '', # empty body
}[flavour] if tBody is None else tBody
trans.setBody(transBody)
if sourceSE:
res = trans.setSourceSE(sourceSE)
if not res['OK']:
return S_ERROR("SourceSE not valid: %s" % res['Message'])
res = trans.setTargetSE(targetSE)
if not res['OK']:
return S_ERROR("TargetSE not valid: %s" % res['Message'])
if not enable:
gLogger.always("Dry run, not creating transformation")
return S_OK()
res = trans.addTransformation()
if not res['OK']:
return res
gLogger.verbose(res)
trans.setStatus('Active')
trans.setAgentType('Automatic')
currtrans = trans.getTransformationID()['Value']
client = TransformationClient()
res = client.createTransformationInputDataQuery(currtrans, metadata)
if not res['OK']:
return res
gLogger.always("Successfully created replication transformation")
return S_OK()
| arrabito/DIRAC | TransformationSystem/Utilities/ReplicationTransformation.py | Python | gpl-3.0 | 3,648 | [
"DIRAC"
] | c4021ccac66445a3c797034750b4e2b069fbfca6358656e23212292ba66f5b2b |
"""
select
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from PySide import QtGui
from mcedit2.editortools import EditorTool
from mcedit2.util.bresenham import bresenham
from mcedit2.util.load_ui import load_ui
log = logging.getLogger(__name__)
class SelectEntityCommand(QtGui.QUndoCommand):
def __init__(self, tool, ray, *args, **kwargs):
QtGui.QUndoCommand.__init__(self, *args, **kwargs)
self.setText("Select Entity")
self.ray = ray
self.tool = tool
def undo(self):
self.tool.setSelectionRay(self.ray)
def redo(self):
self.previousRay = self.tool.selectionRay
self.tool.setSelectionRay(self.ray)
class SelectEntityTool(EditorTool):
name = "Select Entity"
iconName = "edit_entity"
selectionRay = None
currentEntity = None
def __init__(self, editorSession, *args, **kwargs):
"""
:type editorSession: EditorSession
"""
super(SelectEntityTool, self).__init__(editorSession, *args, **kwargs)
self.toolWidget = load_ui("editortools/select_entity.ui")
self.toolWidget.tableWidget.cellClicked.connect(self.cellWasClicked)
self.toolWidget.tableWidget.setColumnCount(2)
self.toolWidget.tableWidget.setHorizontalHeaderLabels(["ID", "Position"])
self.selectedEntities = []
def mousePress(self, event):
command = SelectEntityCommand(self, event.ray)
self.editorSession.pushCommand(command)
def setSelectionRay(self, ray):
self.selectionRay = ray
editorSession = self.editorSession
entities = entitiesOnRay(editorSession.currentDimension, ray)
tableWidget = self.toolWidget.tableWidget
tableWidget.clear()
self.selectedEntities = list(entities)
if len(self.selectedEntities):
tableWidget.setRowCount(len(self.selectedEntities))
for row, e in enumerate(self.selectedEntities):
pos = e.Position
tableWidget.setItem(row, 0, QtGui.QTableWidgetItem(e.id))
tableWidget.setItem(row, 1, QtGui.QTableWidgetItem("%0.2f, %0.2f, %0.2f" % (pos[0], pos[1], pos[2])))
self.cellWasClicked(0, 0)
def cellWasClicked(self, row, column):
if len(self.selectedEntities):
self.currentEntity = self.selectedEntities[row]
self.editorSession.inspectEntity(self.currentEntity)
else:
self.editorSession.inspectEntity(None)
def entitiesOnRay(dimension, ray, rayWidth=0.75, maxDistance = 1000):
pos, vec = ray
endpos = pos + vec.normalize() * maxDistance
ray_dir = vec.normalize()
# Visit each chunk along the ray
def chunks(pos, endpos):
last_cpos = None
for x, y, z in bresenham(pos, endpos):
cpos = int(x) >> 4, int(z) >> 4
if cpos != last_cpos:
yield cpos
last_cpos = cpos
class RaySelection(object):
positions = list(chunks(pos, endpos))
def chunkPositions(self):
return self.positions
def __contains__(self, position):
evec = (position + (0.5, 0.5, 0.5)) - pos
dist = ray_dir.cross(evec).length()
return dist < rayWidth
sr = RaySelection()
return dimension.getEntities(sr)
| Rubisk/mcedit2 | src/mcedit2/editortools/select_entity.py | Python | bsd-3-clause | 3,391 | [
"VisIt"
] | 4e4d3544924c5be4eb86232d5455e00b0c6d084bf51101d1ac01b620f136f107 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.