max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
cinderclient/tests/v1/test_volume_encryption_types.py | citrix-openstack-build/python-cinderclient | 0 | 6624351 | <gh_stars>0
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v1.volume_encryption_types import VolumeEncryptionType
from cinderclient.tests import utils
from cinderclient.tests.v1 import fakes
cs = fakes.FakeClient()
class VolumeEncryptionTypesTest(utils.TestCase):
"""
Test suite for the Volume Encryption Types Resource and Manager.
"""
def test_list(self):
"""
Unit test for VolumeEncryptionTypesManager.list
Verify that a series of GET requests are made:
- one GET request for the list of volume types
- one GET request per volume type for encryption type information
Verify that all returned information is :class: VolumeEncryptionType
"""
encryption_types = cs.volume_encryption_types.list()
cs.assert_called_anytime('GET', '/types')
cs.assert_called_anytime('GET', '/types/2/encryption')
cs.assert_called_anytime('GET', '/types/1/encryption')
for encryption_type in encryption_types:
self.assertIsInstance(encryption_type, VolumeEncryptionType)
def test_get(self):
"""
Unit test for VolumeEncryptionTypesManager.get
Verify that one GET request is made for the volume type encryption
type information. Verify that returned information is :class:
VolumeEncryptionType
"""
encryption_type = cs.volume_encryption_types.get(1)
cs.assert_called('GET', '/types/1/encryption')
self.assertIsInstance(encryption_type, VolumeEncryptionType)
def test_get_no_encryption(self):
"""
Unit test for VolumeEncryptionTypesManager.get
Verify that a request on a volume type with no associated encryption
type information returns a VolumeEncryptionType with no attributes.
"""
encryption_type = cs.volume_encryption_types.get(2)
self.assertIsInstance(encryption_type, VolumeEncryptionType)
self.assertFalse(hasattr(encryption_type, 'id'),
'encryption type has an id')
def test_create(self):
"""
Unit test for VolumeEncryptionTypesManager.create
Verify that one POST request is made for the encryption type creation.
Verify that encryption type creation returns a VolumeEncryptionType.
"""
result = cs.volume_encryption_types.create(2, {'encryption':
{'provider': 'Test',
'key_size': None,
'cipher': None,
'control_location':
None}})
cs.assert_called('POST', '/types/2/encryption')
self.assertIsInstance(result, VolumeEncryptionType)
def test_update(self):
"""
Unit test for VolumeEncryptionTypesManager.update
"""
self.skipTest("Not implemented")
def test_delete(self):
"""
Unit test for VolumeEncryptionTypesManager.delete
"""
self.skipTest("Not implemented")
| # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v1.volume_encryption_types import VolumeEncryptionType
from cinderclient.tests import utils
from cinderclient.tests.v1 import fakes
cs = fakes.FakeClient()
class VolumeEncryptionTypesTest(utils.TestCase):
"""
Test suite for the Volume Encryption Types Resource and Manager.
"""
def test_list(self):
"""
Unit test for VolumeEncryptionTypesManager.list
Verify that a series of GET requests are made:
- one GET request for the list of volume types
- one GET request per volume type for encryption type information
Verify that all returned information is :class: VolumeEncryptionType
"""
encryption_types = cs.volume_encryption_types.list()
cs.assert_called_anytime('GET', '/types')
cs.assert_called_anytime('GET', '/types/2/encryption')
cs.assert_called_anytime('GET', '/types/1/encryption')
for encryption_type in encryption_types:
self.assertIsInstance(encryption_type, VolumeEncryptionType)
def test_get(self):
"""
Unit test for VolumeEncryptionTypesManager.get
Verify that one GET request is made for the volume type encryption
type information. Verify that returned information is :class:
VolumeEncryptionType
"""
encryption_type = cs.volume_encryption_types.get(1)
cs.assert_called('GET', '/types/1/encryption')
self.assertIsInstance(encryption_type, VolumeEncryptionType)
def test_get_no_encryption(self):
"""
Unit test for VolumeEncryptionTypesManager.get
Verify that a request on a volume type with no associated encryption
type information returns a VolumeEncryptionType with no attributes.
"""
encryption_type = cs.volume_encryption_types.get(2)
self.assertIsInstance(encryption_type, VolumeEncryptionType)
self.assertFalse(hasattr(encryption_type, 'id'),
'encryption type has an id')
def test_create(self):
"""
Unit test for VolumeEncryptionTypesManager.create
Verify that one POST request is made for the encryption type creation.
Verify that encryption type creation returns a VolumeEncryptionType.
"""
result = cs.volume_encryption_types.create(2, {'encryption':
{'provider': 'Test',
'key_size': None,
'cipher': None,
'control_location':
None}})
cs.assert_called('POST', '/types/2/encryption')
self.assertIsInstance(result, VolumeEncryptionType)
def test_update(self):
"""
Unit test for VolumeEncryptionTypesManager.update
"""
self.skipTest("Not implemented")
def test_delete(self):
"""
Unit test for VolumeEncryptionTypesManager.delete
"""
self.skipTest("Not implemented") | en | 0.813483 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Test suite for the Volume Encryption Types Resource and Manager. Unit test for VolumeEncryptionTypesManager.list Verify that a series of GET requests are made: - one GET request for the list of volume types - one GET request per volume type for encryption type information Verify that all returned information is :class: VolumeEncryptionType Unit test for VolumeEncryptionTypesManager.get Verify that one GET request is made for the volume type encryption type information. Verify that returned information is :class: VolumeEncryptionType Unit test for VolumeEncryptionTypesManager.get Verify that a request on a volume type with no associated encryption type information returns a VolumeEncryptionType with no attributes. Unit test for VolumeEncryptionTypesManager.create Verify that one POST request is made for the encryption type creation. Verify that encryption type creation returns a VolumeEncryptionType. Unit test for VolumeEncryptionTypesManager.update Unit test for VolumeEncryptionTypesManager.delete | 2.123585 | 2 |
api/qymatix/etl_clarus.py | manisharmagarg/qymatix | 0 | 6624352 | import datetime
from api.qymatix import uploader
# from . import file_uploader
class EtlBase():
def __init__(self, dbname, file_name=None, since=None):
import datetime
from api.qymatix import uploader
self.dbname = dbname
self.file_name = file_name
self.since = since
if since is None:
self.since = datetime.date(1999, 1, 1)
self.data = uploader.load_data(filename=file_name, nrows=None)
self.transform_and_filter()
def transform_and_filter(self, data=None):
"""
:return:
"""
from pandas import to_datetime
cols = {
# 'Kundennr.': 'Account Name',
'Auftragsnr.': 'invoice',
# 'Re-/Gu-Nr.': ,
'Re-/Gu-Datum': 'Date',
'Artikelnr.': 'product',
'Artikelgruppennr.': 'product type',
# 'ME': ,
'Umsatz VK': 'price',
'Marge': 'margin',
'Warengruppennr.': 'product line',
'Menge': 'quantity',
# 'Breite': ,
# 'Positionswert': ,
# 'Pos.': ,
# 'F': ,
# 'Unterkundennr.': ,
# 'Auftragsposnr.': ,
# 'Menge ext.': ,
# 'DEK-Preis': ,
# 'LEK-Preis': ,
# 'VK-Preis': ,
# 'Umsatz EK': ,
# 'Auftr.Art': ,
# 'Auftragsartbez.': ,
# 'Artikel MC Verkauf': ,
# 'Benutzer': ,
# 'Gewicht': ,
# 'PE': ,
# 'MPVE-Bez': ,
# 'Menge/Verp.Einheit': ,
# 'VE Menge': ,
'MC Kunde': 'Account Name',
# 'Vertretername': ,
# 'Sachbearbeiter': ,
# 'Landkz.': ,
'Branchennr.': 'product class',
# 'Branchenbez.': ,
}
self.data.rename(columns=cols, inplace=True)
self.data['Date'] = to_datetime(self.data['Date'], dayfirst=True)
if self.since is not None:
self.data = self.data[self.data['Date'] > self.since]
# data = data[(data['Jahr'] == 2018)]
# data = data[data['Art'] == 'Verkauf']
# data = data[data['customer_id'] < 300000]
self.data['cost'] = self.data['price'] - self.data['margin']
data = self.data.copy()
data_accounts = self.data.copy()
if 'Account id' not in self.data.columns:
data_accounts.drop_duplicates('Account Name', inplace=True)
data_accounts['Account id'] = data_accounts.index + 1000
for n in data_accounts['Account Name'].values:
data.loc[data['Account Name'] == n, 'Account id'] = \
data_accounts[data_accounts['Account Name'] == n]['Account id'].values[0]
self.data = data
def upload_products_class(self, file_name_product_class=None):
"""
"""
from api.qymatix import uploader
if file_name_product_class is not None:
data = uploader.load_data(filename=file_name_product_class)
else:
data = self.data
data.drop_duplicates('product class', inplace=True)
uploader.upload_product_class(dbname=self.dbname, data=data)
def upload_products_line(self, file_name_product_line=None):
"""
"""
from api.qymatix import uploader
if file_name_product_line is not None:
data = uploader.load_data(filename=file_name_product_line)
self.transform_and_filter()
else:
data = self.data
data.drop_duplicates('product line', inplace=True)
uploader.upload_product_line(dbname=self.dbname, data=data)
def upload_products_type(self, file_name_product_type=None):
"""
"""
from api.qymatix import uploader
if file_name_product_type is not None:
data = uploader.load_data(filename=file_name_product_type)
self.transform_and_filter()
else:
data = self.data
data.drop_duplicates('product type', inplace=True)
uploader.upload_product_type(dbname=self.dbname, data=data)
def upload_products(self, file_name_products=None):
"""
"""
from api.qymatix import uploader
if file_name_products is not None:
data = uploader.load_data(filename=file_name_products)
self.transform_and_filter(data)
else:
data = self.data
data.drop_duplicates('product', inplace=True)
uploader.upload_products(dbname=self.dbname, data=data)
def upload_customers(self, file_name_customers=None):
"""
"""
from api.qymatix import uploader
if file_name_customers is not None:
data = uploader.load_data(filename=file_name_customers)
else:
data = self.data
if 'Account id' in data.keys():
data.drop_duplicates('Account id', inplace=True)
else:
data.drop_duplicates('Account Name', inplace=True)
uploader.upload_customers(dbname=self.dbname, data=data)
def upload_kams(self, file_name_kams=None):
"""
:param what:
:return:
"""
from api.qymatix import uploader
if file_name_kams is not None:
data = uploader.load_data(filename=file_name_kams)
else:
data = self.data
data.drop_duplicates('kam', inplace=True)
uploader.upload_kam(dbname=self.dbname, data=data)
def upload_sales(self, file_name_sales=None):
"""
:param file_name_sales:
:return:
"""
from api.qymatix import uploader
if file_name_sales is not None:
data_sales = uploader.load_data(filename=file_name)
else:
data_sales = self.data
uploader.upload_sales(dbname=self.dbname, data=data_sales)
def upload_plans(self, file_name_plans=None, skiprows=None):
"""
:param file_name_plans:
:param skiprows:
:return:
"""
from api.qymatix import uploader
if file_name_plans is not None:
data = uploader.load_data(filename=file_name_plans, skiprows=skiprows)
else:
data = self.data
data.drop_duplicates('plan name', inplace=True)
uploader.upload_plans(dbname=self.dbname, data=data)
file_name = '/var/www/qyapp/ExcelExport_tblFaktStd_201909020919.xlsx'
database_name = 'clarus_de'
etl = EtlBase(database_name, file_name)
# print("Importing product classes...")
# etl.upload_products_class()
#
# print('\n')
# print('\n')
# print("Importing product lines...")
# etl.upload_products_line()
#
# print('\n')
# print('\n')
# print("Importing product types...")
# etl.upload_products_type()
#
# print('\n')
# print('\n')
# print("Importing products...")
# etl.upload_products()
# print("uploading customers....")
# etl.upload_customers()
# etl.upload_kams()
print("uploading sales....")
etl.upload_sales()
# if __name__ == "__main__":
#
# file_name = '/var/www/qyapp/pfisterer_file_000001.xlsx'
#
# database_name = 'data_pfisterer_de'
#
# etl = EtlBase(database_name, file_name)
#
# print("Importing product classes...")
# etl.upload_products_class()
# etl.upload_products_line()
# print("uploading product types....")
# etl.upload_products_type()
# print("uploading products....")
# etl.upload_products()
# print("uploading customers....")
# etl.upload_customers()
# etl.upload_kams()
# print("uploading sales....")
# etl.upload_sales()
| import datetime
from api.qymatix import uploader
# from . import file_uploader
class EtlBase():
def __init__(self, dbname, file_name=None, since=None):
import datetime
from api.qymatix import uploader
self.dbname = dbname
self.file_name = file_name
self.since = since
if since is None:
self.since = datetime.date(1999, 1, 1)
self.data = uploader.load_data(filename=file_name, nrows=None)
self.transform_and_filter()
def transform_and_filter(self, data=None):
"""
:return:
"""
from pandas import to_datetime
cols = {
# 'Kundennr.': 'Account Name',
'Auftragsnr.': 'invoice',
# 'Re-/Gu-Nr.': ,
'Re-/Gu-Datum': 'Date',
'Artikelnr.': 'product',
'Artikelgruppennr.': 'product type',
# 'ME': ,
'Umsatz VK': 'price',
'Marge': 'margin',
'Warengruppennr.': 'product line',
'Menge': 'quantity',
# 'Breite': ,
# 'Positionswert': ,
# 'Pos.': ,
# 'F': ,
# 'Unterkundennr.': ,
# 'Auftragsposnr.': ,
# 'Menge ext.': ,
# 'DEK-Preis': ,
# 'LEK-Preis': ,
# 'VK-Preis': ,
# 'Umsatz EK': ,
# 'Auftr.Art': ,
# 'Auftragsartbez.': ,
# 'Artikel MC Verkauf': ,
# 'Benutzer': ,
# 'Gewicht': ,
# 'PE': ,
# 'MPVE-Bez': ,
# 'Menge/Verp.Einheit': ,
# 'VE Menge': ,
'MC Kunde': 'Account Name',
# 'Vertretername': ,
# 'Sachbearbeiter': ,
# 'Landkz.': ,
'Branchennr.': 'product class',
# 'Branchenbez.': ,
}
self.data.rename(columns=cols, inplace=True)
self.data['Date'] = to_datetime(self.data['Date'], dayfirst=True)
if self.since is not None:
self.data = self.data[self.data['Date'] > self.since]
# data = data[(data['Jahr'] == 2018)]
# data = data[data['Art'] == 'Verkauf']
# data = data[data['customer_id'] < 300000]
self.data['cost'] = self.data['price'] - self.data['margin']
data = self.data.copy()
data_accounts = self.data.copy()
if 'Account id' not in self.data.columns:
data_accounts.drop_duplicates('Account Name', inplace=True)
data_accounts['Account id'] = data_accounts.index + 1000
for n in data_accounts['Account Name'].values:
data.loc[data['Account Name'] == n, 'Account id'] = \
data_accounts[data_accounts['Account Name'] == n]['Account id'].values[0]
self.data = data
def upload_products_class(self, file_name_product_class=None):
"""
"""
from api.qymatix import uploader
if file_name_product_class is not None:
data = uploader.load_data(filename=file_name_product_class)
else:
data = self.data
data.drop_duplicates('product class', inplace=True)
uploader.upload_product_class(dbname=self.dbname, data=data)
def upload_products_line(self, file_name_product_line=None):
"""
"""
from api.qymatix import uploader
if file_name_product_line is not None:
data = uploader.load_data(filename=file_name_product_line)
self.transform_and_filter()
else:
data = self.data
data.drop_duplicates('product line', inplace=True)
uploader.upload_product_line(dbname=self.dbname, data=data)
def upload_products_type(self, file_name_product_type=None):
"""
"""
from api.qymatix import uploader
if file_name_product_type is not None:
data = uploader.load_data(filename=file_name_product_type)
self.transform_and_filter()
else:
data = self.data
data.drop_duplicates('product type', inplace=True)
uploader.upload_product_type(dbname=self.dbname, data=data)
def upload_products(self, file_name_products=None):
"""
"""
from api.qymatix import uploader
if file_name_products is not None:
data = uploader.load_data(filename=file_name_products)
self.transform_and_filter(data)
else:
data = self.data
data.drop_duplicates('product', inplace=True)
uploader.upload_products(dbname=self.dbname, data=data)
def upload_customers(self, file_name_customers=None):
"""
"""
from api.qymatix import uploader
if file_name_customers is not None:
data = uploader.load_data(filename=file_name_customers)
else:
data = self.data
if 'Account id' in data.keys():
data.drop_duplicates('Account id', inplace=True)
else:
data.drop_duplicates('Account Name', inplace=True)
uploader.upload_customers(dbname=self.dbname, data=data)
def upload_kams(self, file_name_kams=None):
"""
:param what:
:return:
"""
from api.qymatix import uploader
if file_name_kams is not None:
data = uploader.load_data(filename=file_name_kams)
else:
data = self.data
data.drop_duplicates('kam', inplace=True)
uploader.upload_kam(dbname=self.dbname, data=data)
def upload_sales(self, file_name_sales=None):
"""
:param file_name_sales:
:return:
"""
from api.qymatix import uploader
if file_name_sales is not None:
data_sales = uploader.load_data(filename=file_name)
else:
data_sales = self.data
uploader.upload_sales(dbname=self.dbname, data=data_sales)
def upload_plans(self, file_name_plans=None, skiprows=None):
"""
:param file_name_plans:
:param skiprows:
:return:
"""
from api.qymatix import uploader
if file_name_plans is not None:
data = uploader.load_data(filename=file_name_plans, skiprows=skiprows)
else:
data = self.data
data.drop_duplicates('plan name', inplace=True)
uploader.upload_plans(dbname=self.dbname, data=data)
file_name = '/var/www/qyapp/ExcelExport_tblFaktStd_201909020919.xlsx'
database_name = 'clarus_de'
etl = EtlBase(database_name, file_name)
# print("Importing product classes...")
# etl.upload_products_class()
#
# print('\n')
# print('\n')
# print("Importing product lines...")
# etl.upload_products_line()
#
# print('\n')
# print('\n')
# print("Importing product types...")
# etl.upload_products_type()
#
# print('\n')
# print('\n')
# print("Importing products...")
# etl.upload_products()
# print("uploading customers....")
# etl.upload_customers()
# etl.upload_kams()
print("uploading sales....")
etl.upload_sales()
# if __name__ == "__main__":
#
# file_name = '/var/www/qyapp/pfisterer_file_000001.xlsx'
#
# database_name = 'data_pfisterer_de'
#
# etl = EtlBase(database_name, file_name)
#
# print("Importing product classes...")
# etl.upload_products_class()
# etl.upload_products_line()
# print("uploading product types....")
# etl.upload_products_type()
# print("uploading products....")
# etl.upload_products()
# print("uploading customers....")
# etl.upload_customers()
# etl.upload_kams()
# print("uploading sales....")
# etl.upload_sales()
| en | 0.207195 | # from . import file_uploader :return: # 'Kundennr.': 'Account Name', # 'Re-/Gu-Nr.': , # 'ME': , # 'Breite': , # 'Positionswert': , # 'Pos.': , # 'F': , # 'Unterkundennr.': , # 'Auftragsposnr.': , # 'Menge ext.': , # 'DEK-Preis': , # 'LEK-Preis': , # 'VK-Preis': , # 'Umsatz EK': , # 'Auftr.Art': , # 'Auftragsartbez.': , # 'Artikel MC Verkauf': , # 'Benutzer': , # 'Gewicht': , # 'PE': , # 'MPVE-Bez': , # 'Menge/Verp.Einheit': , # 'VE Menge': , # 'Vertretername': , # 'Sachbearbeiter': , # 'Landkz.': , # 'Branchenbez.': , # data = data[(data['Jahr'] == 2018)] # data = data[data['Art'] == 'Verkauf'] # data = data[data['customer_id'] < 300000] :param what: :return: :param file_name_sales: :return: :param file_name_plans: :param skiprows: :return: # print("Importing product classes...") # etl.upload_products_class() # # print('\n') # print('\n') # print("Importing product lines...") # etl.upload_products_line() # # print('\n') # print('\n') # print("Importing product types...") # etl.upload_products_type() # # print('\n') # print('\n') # print("Importing products...") # etl.upload_products() # print("uploading customers....") # etl.upload_customers() # etl.upload_kams() # if __name__ == "__main__": # # file_name = '/var/www/qyapp/pfisterer_file_000001.xlsx' # # database_name = 'data_pfisterer_de' # # etl = EtlBase(database_name, file_name) # # print("Importing product classes...") # etl.upload_products_class() # etl.upload_products_line() # print("uploading product types....") # etl.upload_products_type() # print("uploading products....") # etl.upload_products() # print("uploading customers....") # etl.upload_customers() # etl.upload_kams() # print("uploading sales....") # etl.upload_sales() | 2.214325 | 2 |
BOJ/graph_boj/bfs_reference.py | mrbartrns/swacademy_structure | 0 | 6624353 | <gh_stars>0
# 미로 탈출
"""
n * m 크기의 직사각형 형태의 미로에 갖혔다. 미로에는 여러마리의 괴물이 있어 이를 피해 탈출해야 한다.
동빈이의 위치는 (1, 1)이며, 출구는 (n, m)이다. 한번에 한칸씩만 이동할 수 있다.
탈출하기 위한 최소 이동거리를 출력하라
"""
from collections import deque
def bfs(x, y):
que = deque()
que.append((x, y))
while que:
x, y = que.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if nx < 0 or nx >= n or ny < 0 or ny >= m:
continue
if graph[nx][ny] == 0:
continue
if graph[nx][ny] == 1:
graph[nx][ny] = graph[x][y] + 1
que.append((nx, ny))
return graph[n - 1][m - 1]
n, m = map(int, input().split())
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
graph = []
for i in range(n):
graph.append(list(map(int, input())))
# print(graph)
print(bfs(0, 0))
| # 미로 탈출
"""
n * m 크기의 직사각형 형태의 미로에 갖혔다. 미로에는 여러마리의 괴물이 있어 이를 피해 탈출해야 한다.
동빈이의 위치는 (1, 1)이며, 출구는 (n, m)이다. 한번에 한칸씩만 이동할 수 있다.
탈출하기 위한 최소 이동거리를 출력하라
"""
from collections import deque
def bfs(x, y):
que = deque()
que.append((x, y))
while que:
x, y = que.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if nx < 0 or nx >= n or ny < 0 or ny >= m:
continue
if graph[nx][ny] == 0:
continue
if graph[nx][ny] == 1:
graph[nx][ny] = graph[x][y] + 1
que.append((nx, ny))
return graph[n - 1][m - 1]
n, m = map(int, input().split())
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
graph = []
for i in range(n):
graph.append(list(map(int, input())))
# print(graph)
print(bfs(0, 0)) | ko | 1.000069 | # 미로 탈출 n * m 크기의 직사각형 형태의 미로에 갖혔다. 미로에는 여러마리의 괴물이 있어 이를 피해 탈출해야 한다. 동빈이의 위치는 (1, 1)이며, 출구는 (n, m)이다. 한번에 한칸씩만 이동할 수 있다. 탈출하기 위한 최소 이동거리를 출력하라 # print(graph) | 3.4522 | 3 |
src/ipyradiant/visualization/interactive_exploration.py | sanbales/ipyradiant | 0 | 6624354 | # Copyright (c) 2021 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
import traitlets as trt
import ipycytoscape
import ipywidgets as W
import rdflib
from ipycytoscape import Edge, Node
from ipyradiant.query.api import SPARQLQueryFramer
from ipyradiant.rdf2nx.uri_converter import URItoID
DEFAULT_CYTO_STYLE = [
{
"selector": "node",
"css": {
"label": "data(_label)",
"text-wrap": "wrap",
"text-max-width": "150px",
"text-valign": "center",
"text-halign": "center",
"font-size": "10",
"font-family": '"Gill Sans", sans-serif',
"color": "black",
"background-color": "grey",
},
},
{
"selector": "edge[classes='temp-edge']",
"css": {
"label": "data(_label)",
"line-color": "#a8eae5",
},
},
{
"selector": "node.clicked",
"css": {
"background-color": "grey",
"line-color": "black",
"target-arrow-color": "black",
"source-arrow-color": "black",
},
},
{
"selector": "node.temp",
"css": {
"background-color": "#FFB6C1",
"line-color": "black",
"target-arrow-color": "black",
"source-arrow-color": "black",
},
},
{
"selector": "edge.directed",
"style": {
"curve-style": "bezier",
"target-arrow-shape": "triangle",
"line-color": "grey",
# "label": "data(iri)",
"font-size": "5",
},
},
{
"selector": "edge.temp",
"style": {
"curve-style": "bezier",
"line-color": "#a8eae5",
# "label": "data(iri)",
"font-size": "5",
},
},
{"selector": "edge.multiple_edges", "style": {"curve-style": "bezier"}},
]
class GetOutgoingPredicateObjects(SPARQLQueryFramer):
"""
Return all triples for non-Literal objects (and the optional object labels).
"""
sparql = """
SELECT DISTINCT ?s ?p ?o ?label
WHERE {
?s ?p ?o .
FILTER (!isLiteral(?o))
OPTIONAL {?o rdfs:label ?label}
}
"""
# Throughout this class, we assign the layout to self.cyto_graph_layout multiple times.
# This is so that the graph refreshes the layout every time nodes are added or removed,
# which provides an optimal viewing experience.
class InteractiveViewer(W.VBox):
expand_button = trt.Instance(W.Button)
undo_button = trt.Instance(W.Button)
remove_temp_nodes_button = trt.Instance(W.Button)
cyto_graph = trt.Instance(ipycytoscape.CytoscapeWidget)
selected_node = trt.Instance(ipycytoscape.Node, allow_none=True)
rdf_graph = trt.Instance(rdflib.graph.Graph, allow_none=True)
cyto_style = trt.List(allow_none=True)
cyto_graph_layout = trt.Unicode(default_value="cola")
@trt.default("expand_button")
def _create_expand_button(self):
button = W.Button(
description="Expand Upon Selected Node",
layout=W.Layout(width="50%", height="40px"),
)
button.on_click(self.expand_button_clicked)
return button
@trt.default("undo_button")
def _create_undo_button(self):
button = W.Button(
description="Undo Last Expansion",
layout=W.Layout(width="25%", height="40px"),
disabled=True,
)
button.on_click(self.undo_expansion)
return button
@trt.default("remove_temp_nodes_button")
def _create_remove_temp_nodes_button(self):
button = W.Button(
description="Remove Temporary Nodes",
layout=W.Layout(width="25%", height="40px"),
disabled=False,
)
button.on_click(self.remove_temp_nodes)
return button
@trt.default("selected_node")
def _create_default_selected_node(self):
return None
@trt.default("cyto_style")
def _create_cyto_style(self):
return DEFAULT_CYTO_STYLE
@trt.default("rdf_graph")
def _create_rdf_graph(self):
return rdflib.Graph()
@trt.default("cyto_graph")
def _create_cyto_graph(self):
return ipycytoscape.CytoscapeWidget()
@trt.default("layout")
def _create_layout(self):
return W.Layout(width="80%")
@trt.observe("cyto_graph")
def update_cyto_graph(self, change):
self.cyto_graph.set_layout(name=self.cyto_graph_layout)
self.cyto_graph.set_style(self.cyto_style)
# on is a callback for cyto_graph instance (must be set on each instance)
self.cyto_graph.on("node", "click", self.log_node_clicks)
# Here we have to set the children again so that the changes propogate to the front end
# automatically. Ideally this would be done with traits but did not seem to work. LINK TO GITHUB ISSUE:
# https://github.com/jupyrdf/ipyradiant/issues/79
self.children = (
self.cyto_graph,
W.HBox(
children=[
self.expand_button,
self.undo_button,
self.remove_temp_nodes_button,
]
),
)
@trt.validate("children")
def validate_children(self, proposal):
children = proposal.value
if not children:
children = (
self.cyto_graph,
W.HBox(
children=[
self.expand_button,
self.undo_button,
self.remove_temp_nodes_button,
]
),
)
return children
def get_node(self, node: Node) -> Node:
"""
This function is used to find a node given the id of a node copy.
"""
for node_obj in self.cyto_graph.graph.nodes:
if node_obj.data["id"] == node["data"]["id"]:
return node_obj
# TODO: Make this function return None and log a warning if not node not found.
raise ValueError("Node not found in cytoscape.graph.nodes.")
def log_node_clicks(self, node: Node):
"""
This function works with registering a click on a node. This will mark the node as selected and change the color of the
selected node.
"""
node_object = self.get_node(node)
if self.selected_node == node_object:
node_object.classes = "clicked"
# NOTE: Class changes won't propogate to the front end for added nodes until
# the graph is updated.
# To fix this we create a random node and then quickly delete it so that the changes propogate.
self.update_cytoscape_frontend()
self.selected_node = node_object
def expand_button_clicked(self, button):
"""
This function expands a node by loading in its predicates and subjects when
a node is selected and the expand button is clicked.
"""
self.undo_button.disabled = False
if self.selected_node is None:
return None
new_data = GetOutgoingPredicateObjects.run_query(
graph=self.rdf_graph, s=self.selected_node.data["iri"]
)
objs = new_data["o"].tolist()
preds = new_data["p"].tolist()
labels = new_data["label"].tolist()
# add nodes
self.existing_node_ids = [
node.data["id"] for node in self.cyto_graph.graph.nodes
]
self.new_nodes = {}
self.new_edges = {}
for ii, x in enumerate(objs):
if str(x) not in self.existing_node_ids:
self.new_nodes[ii] = Node(
data={
"id": str(x),
"iri": x,
"_label": labels[ii] or str(x),
},
classes="temp",
)
self.cyto_graph.graph.add_node(self.new_nodes[ii])
self.new_edges[ii] = Edge(
data={
"source": self.selected_node.data["id"],
"target": str(x),
"iri": URItoID(preds[ii]),
},
classes="temp",
)
self.cyto_graph.graph.add_edge(self.new_edges[ii])
self.cyto_graph.set_layout(name=self.cyto_graph_layout)
def undo_expansion(self, button):
"""
This is a preliminary function for undoing expansions upon a node.
As of right now, a user can only undo the most recent expansion. After doing this,
the button will be disabled until a new expansion is made.
"""
self.undo_button.disabled = True
for node in self.new_nodes:
self.cyto_graph.graph.remove_node_by_id(self.new_nodes[node].data["id"])
for edge in self.new_edges:
try:
self.cyto_graph.graph.remove_edge(self.new_edges[edge])
except ValueError:
# edge already removed from graph because the node was removed earlier.
pass
self.cyto_graph.set_layout(name=self.cyto_graph_layout)
def remove_temp_nodes(self, button):
"""
This is a basic function that cycles through the graph and removes all nodes that
have the 'temp' style (i.e. nodes that are not starting nodes or have not been clicked on).
"""
nodes_to_remove = []
for node in self.cyto_graph.graph.nodes:
if node.classes == "temp":
nodes_to_remove.append(node.data["id"])
for node in nodes_to_remove:
self.cyto_graph.graph.remove_node_by_id(node)
# change edge color
for edge in self.cyto_graph.graph.edges:
edge.classes = "directed"
# propogate changes to front end using hack
self.update_cytoscape_frontend()
self.cyto_graph.set_layout(name=self.cyto_graph_layout)
self.undo_button.disabled = True
def update_cytoscape_frontend(self):
"""
This function quickly adds and deletes a node to update cytoscape front end. Looking to improve
it for future release.
"""
self.cyto_graph.graph.add_node(Node(data={"id": "random node"}))
self.cyto_graph.graph.remove_node_by_id("random node")
| # Copyright (c) 2021 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
import traitlets as trt
import ipycytoscape
import ipywidgets as W
import rdflib
from ipycytoscape import Edge, Node
from ipyradiant.query.api import SPARQLQueryFramer
from ipyradiant.rdf2nx.uri_converter import URItoID
DEFAULT_CYTO_STYLE = [
{
"selector": "node",
"css": {
"label": "data(_label)",
"text-wrap": "wrap",
"text-max-width": "150px",
"text-valign": "center",
"text-halign": "center",
"font-size": "10",
"font-family": '"Gill Sans", sans-serif',
"color": "black",
"background-color": "grey",
},
},
{
"selector": "edge[classes='temp-edge']",
"css": {
"label": "data(_label)",
"line-color": "#a8eae5",
},
},
{
"selector": "node.clicked",
"css": {
"background-color": "grey",
"line-color": "black",
"target-arrow-color": "black",
"source-arrow-color": "black",
},
},
{
"selector": "node.temp",
"css": {
"background-color": "#FFB6C1",
"line-color": "black",
"target-arrow-color": "black",
"source-arrow-color": "black",
},
},
{
"selector": "edge.directed",
"style": {
"curve-style": "bezier",
"target-arrow-shape": "triangle",
"line-color": "grey",
# "label": "data(iri)",
"font-size": "5",
},
},
{
"selector": "edge.temp",
"style": {
"curve-style": "bezier",
"line-color": "#a8eae5",
# "label": "data(iri)",
"font-size": "5",
},
},
{"selector": "edge.multiple_edges", "style": {"curve-style": "bezier"}},
]
class GetOutgoingPredicateObjects(SPARQLQueryFramer):
"""
Return all triples for non-Literal objects (and the optional object labels).
"""
sparql = """
SELECT DISTINCT ?s ?p ?o ?label
WHERE {
?s ?p ?o .
FILTER (!isLiteral(?o))
OPTIONAL {?o rdfs:label ?label}
}
"""
# Throughout this class, we assign the layout to self.cyto_graph_layout multiple times.
# This is so that the graph refreshes the layout every time nodes are added or removed,
# which provides an optimal viewing experience.
class InteractiveViewer(W.VBox):
expand_button = trt.Instance(W.Button)
undo_button = trt.Instance(W.Button)
remove_temp_nodes_button = trt.Instance(W.Button)
cyto_graph = trt.Instance(ipycytoscape.CytoscapeWidget)
selected_node = trt.Instance(ipycytoscape.Node, allow_none=True)
rdf_graph = trt.Instance(rdflib.graph.Graph, allow_none=True)
cyto_style = trt.List(allow_none=True)
cyto_graph_layout = trt.Unicode(default_value="cola")
@trt.default("expand_button")
def _create_expand_button(self):
button = W.Button(
description="Expand Upon Selected Node",
layout=W.Layout(width="50%", height="40px"),
)
button.on_click(self.expand_button_clicked)
return button
@trt.default("undo_button")
def _create_undo_button(self):
button = W.Button(
description="Undo Last Expansion",
layout=W.Layout(width="25%", height="40px"),
disabled=True,
)
button.on_click(self.undo_expansion)
return button
@trt.default("remove_temp_nodes_button")
def _create_remove_temp_nodes_button(self):
button = W.Button(
description="Remove Temporary Nodes",
layout=W.Layout(width="25%", height="40px"),
disabled=False,
)
button.on_click(self.remove_temp_nodes)
return button
@trt.default("selected_node")
def _create_default_selected_node(self):
return None
@trt.default("cyto_style")
def _create_cyto_style(self):
return DEFAULT_CYTO_STYLE
@trt.default("rdf_graph")
def _create_rdf_graph(self):
return rdflib.Graph()
@trt.default("cyto_graph")
def _create_cyto_graph(self):
return ipycytoscape.CytoscapeWidget()
@trt.default("layout")
def _create_layout(self):
return W.Layout(width="80%")
@trt.observe("cyto_graph")
def update_cyto_graph(self, change):
self.cyto_graph.set_layout(name=self.cyto_graph_layout)
self.cyto_graph.set_style(self.cyto_style)
# on is a callback for cyto_graph instance (must be set on each instance)
self.cyto_graph.on("node", "click", self.log_node_clicks)
# Here we have to set the children again so that the changes propogate to the front end
# automatically. Ideally this would be done with traits but did not seem to work. LINK TO GITHUB ISSUE:
# https://github.com/jupyrdf/ipyradiant/issues/79
self.children = (
self.cyto_graph,
W.HBox(
children=[
self.expand_button,
self.undo_button,
self.remove_temp_nodes_button,
]
),
)
@trt.validate("children")
def validate_children(self, proposal):
children = proposal.value
if not children:
children = (
self.cyto_graph,
W.HBox(
children=[
self.expand_button,
self.undo_button,
self.remove_temp_nodes_button,
]
),
)
return children
def get_node(self, node: Node) -> Node:
"""
This function is used to find a node given the id of a node copy.
"""
for node_obj in self.cyto_graph.graph.nodes:
if node_obj.data["id"] == node["data"]["id"]:
return node_obj
# TODO: Make this function return None and log a warning if not node not found.
raise ValueError("Node not found in cytoscape.graph.nodes.")
def log_node_clicks(self, node: Node):
"""
This function works with registering a click on a node. This will mark the node as selected and change the color of the
selected node.
"""
node_object = self.get_node(node)
if self.selected_node == node_object:
node_object.classes = "clicked"
# NOTE: Class changes won't propogate to the front end for added nodes until
# the graph is updated.
# To fix this we create a random node and then quickly delete it so that the changes propogate.
self.update_cytoscape_frontend()
self.selected_node = node_object
def expand_button_clicked(self, button):
"""
This function expands a node by loading in its predicates and subjects when
a node is selected and the expand button is clicked.
"""
self.undo_button.disabled = False
if self.selected_node is None:
return None
new_data = GetOutgoingPredicateObjects.run_query(
graph=self.rdf_graph, s=self.selected_node.data["iri"]
)
objs = new_data["o"].tolist()
preds = new_data["p"].tolist()
labels = new_data["label"].tolist()
# add nodes
self.existing_node_ids = [
node.data["id"] for node in self.cyto_graph.graph.nodes
]
self.new_nodes = {}
self.new_edges = {}
for ii, x in enumerate(objs):
if str(x) not in self.existing_node_ids:
self.new_nodes[ii] = Node(
data={
"id": str(x),
"iri": x,
"_label": labels[ii] or str(x),
},
classes="temp",
)
self.cyto_graph.graph.add_node(self.new_nodes[ii])
self.new_edges[ii] = Edge(
data={
"source": self.selected_node.data["id"],
"target": str(x),
"iri": URItoID(preds[ii]),
},
classes="temp",
)
self.cyto_graph.graph.add_edge(self.new_edges[ii])
self.cyto_graph.set_layout(name=self.cyto_graph_layout)
def undo_expansion(self, button):
"""
This is a preliminary function for undoing expansions upon a node.
As of right now, a user can only undo the most recent expansion. After doing this,
the button will be disabled until a new expansion is made.
"""
self.undo_button.disabled = True
for node in self.new_nodes:
self.cyto_graph.graph.remove_node_by_id(self.new_nodes[node].data["id"])
for edge in self.new_edges:
try:
self.cyto_graph.graph.remove_edge(self.new_edges[edge])
except ValueError:
# edge already removed from graph because the node was removed earlier.
pass
self.cyto_graph.set_layout(name=self.cyto_graph_layout)
def remove_temp_nodes(self, button):
"""
This is a basic function that cycles through the graph and removes all nodes that
have the 'temp' style (i.e. nodes that are not starting nodes or have not been clicked on).
"""
nodes_to_remove = []
for node in self.cyto_graph.graph.nodes:
if node.classes == "temp":
nodes_to_remove.append(node.data["id"])
for node in nodes_to_remove:
self.cyto_graph.graph.remove_node_by_id(node)
# change edge color
for edge in self.cyto_graph.graph.edges:
edge.classes = "directed"
# propogate changes to front end using hack
self.update_cytoscape_frontend()
self.cyto_graph.set_layout(name=self.cyto_graph_layout)
self.undo_button.disabled = True
def update_cytoscape_frontend(self):
"""
This function quickly adds and deletes a node to update cytoscape front end. Looking to improve
it for future release.
"""
self.cyto_graph.graph.add_node(Node(data={"id": "random node"}))
self.cyto_graph.graph.remove_node_by_id("random node")
| en | 0.863131 | # Copyright (c) 2021 ipyradiant contributors. # Distributed under the terms of the Modified BSD License. # "label": "data(iri)", # "label": "data(iri)", Return all triples for non-Literal objects (and the optional object labels). SELECT DISTINCT ?s ?p ?o ?label WHERE { ?s ?p ?o . FILTER (!isLiteral(?o)) OPTIONAL {?o rdfs:label ?label} } # Throughout this class, we assign the layout to self.cyto_graph_layout multiple times. # This is so that the graph refreshes the layout every time nodes are added or removed, # which provides an optimal viewing experience. # on is a callback for cyto_graph instance (must be set on each instance) # Here we have to set the children again so that the changes propogate to the front end # automatically. Ideally this would be done with traits but did not seem to work. LINK TO GITHUB ISSUE: # https://github.com/jupyrdf/ipyradiant/issues/79 This function is used to find a node given the id of a node copy. # TODO: Make this function return None and log a warning if not node not found. This function works with registering a click on a node. This will mark the node as selected and change the color of the selected node. # NOTE: Class changes won't propogate to the front end for added nodes until # the graph is updated. # To fix this we create a random node and then quickly delete it so that the changes propogate. This function expands a node by loading in its predicates and subjects when a node is selected and the expand button is clicked. # add nodes This is a preliminary function for undoing expansions upon a node. As of right now, a user can only undo the most recent expansion. After doing this, the button will be disabled until a new expansion is made. # edge already removed from graph because the node was removed earlier. This is a basic function that cycles through the graph and removes all nodes that have the 'temp' style (i.e. nodes that are not starting nodes or have not been clicked on). # change edge color # propogate changes to front end using hack This function quickly adds and deletes a node to update cytoscape front end. Looking to improve it for future release. | 1.874906 | 2 |
examples/pesum_kediyaram/to_audio.py | nv-d/open-tamil | 2 | 6624355 | # -*- coding: utf8 -*-
# This file is distributed under MIT License
# 2015 <NAME> <<EMAIL>>
#
import datetime
import time
import os
import sys
import tamil
import winsound
import wave
def get_time():
time_as_string = time.ctime()
# access only the date
today = datetime.date.today()
dnt_now = datetime.datetime.now()
# access hour, minute, second and microsecond fields
return (dnt_now.hour, dnt_now.minute, dnt_now.second, dnt_now.microsecond)
def concat_audio_files(infiles, outfile):
data = []
for infile in infiles:
w = wave.open(infile, "rb")
data.append([w.getparams(), w.readframes(w.getnframes())])
w.close()
output = wave.open(outfile, "wb")
output.setparams(data[0][0])
for itr in range(len(infiles)):
output.writeframes(data[itr][1])
output.close()
return
def say_number_in_tamil(number, cleanup=False, voice_gender="female"):
if number < 0:
raise Exception("Negative numbers are not supported")
# 1) Generate the numeral for number
actual_fn = []
numeral = tamil.numeral.num2tamilstr(number, actual_fn)
# 2) Find the relevant audio file
infiles = []
for fn in actual_fn:
infiles.append(os.path.join("data", "audio", voice_gender, fn + ".wav"))
# 3) Generate a single audio file
outfile = "audio_" + str(number).replace(".", "_") + ".wav"
concat_audio_files(infiles, outfile)
# 4) Play this newly created audio file
winsound.PlaySound(outfile, winsound.SND_NOSTOP)
# 5) Cleanup if requested
if cleanup:
os.unlink(outfile)
return
def say_time():
hh, mm, ss, us = get_time()
pos_data = ("hour", "minute", "sec", "usec")
hr_data = (hh, mm, ss, us)
# murpagal/pirpagal / kalai/maalai
# skip microseconds data
for pos, hr in zip(pos_data[0:3], hr_data[0:3]):
say_number_in_tamil(hr, cleanup=True) # ayinthu, narpathi-ettu, aympathu
# say_string_in_tamil(pos) #mani, nimidam, vinadigal
return
if len(sys.argv) >= 2:
say_time()
while True:
try:
number = input(u"Enter a number >> ")
except Exception as e:
print("Exception => ", str(e))
continue
say_number_in_tamil(number)
sys.exit(0)
# numerale = u'ஓர் ஆயிரம் புள்ளி நான்கு ஐந்து'
# filenames = ['one_thousand_prefix','thousands_0','pulli','units_4','units_5']
# # ideally compose the audio stream and run some kind of smoothing filter
# for fn in filenames:
# winsound.PlaySound(os.path.join('data','audio','female',fn+'.wav'),winsound.SND_NOSTOP) #winsound.SND_ASYNC)
| # -*- coding: utf8 -*-
# This file is distributed under MIT License
# 2015 <NAME> <<EMAIL>>
#
import datetime
import time
import os
import sys
import tamil
import winsound
import wave
def get_time():
time_as_string = time.ctime()
# access only the date
today = datetime.date.today()
dnt_now = datetime.datetime.now()
# access hour, minute, second and microsecond fields
return (dnt_now.hour, dnt_now.minute, dnt_now.second, dnt_now.microsecond)
def concat_audio_files(infiles, outfile):
data = []
for infile in infiles:
w = wave.open(infile, "rb")
data.append([w.getparams(), w.readframes(w.getnframes())])
w.close()
output = wave.open(outfile, "wb")
output.setparams(data[0][0])
for itr in range(len(infiles)):
output.writeframes(data[itr][1])
output.close()
return
def say_number_in_tamil(number, cleanup=False, voice_gender="female"):
if number < 0:
raise Exception("Negative numbers are not supported")
# 1) Generate the numeral for number
actual_fn = []
numeral = tamil.numeral.num2tamilstr(number, actual_fn)
# 2) Find the relevant audio file
infiles = []
for fn in actual_fn:
infiles.append(os.path.join("data", "audio", voice_gender, fn + ".wav"))
# 3) Generate a single audio file
outfile = "audio_" + str(number).replace(".", "_") + ".wav"
concat_audio_files(infiles, outfile)
# 4) Play this newly created audio file
winsound.PlaySound(outfile, winsound.SND_NOSTOP)
# 5) Cleanup if requested
if cleanup:
os.unlink(outfile)
return
def say_time():
hh, mm, ss, us = get_time()
pos_data = ("hour", "minute", "sec", "usec")
hr_data = (hh, mm, ss, us)
# murpagal/pirpagal / kalai/maalai
# skip microseconds data
for pos, hr in zip(pos_data[0:3], hr_data[0:3]):
say_number_in_tamil(hr, cleanup=True) # ayinthu, narpathi-ettu, aympathu
# say_string_in_tamil(pos) #mani, nimidam, vinadigal
return
if len(sys.argv) >= 2:
say_time()
while True:
try:
number = input(u"Enter a number >> ")
except Exception as e:
print("Exception => ", str(e))
continue
say_number_in_tamil(number)
sys.exit(0)
# numerale = u'ஓர் ஆயிரம் புள்ளி நான்கு ஐந்து'
# filenames = ['one_thousand_prefix','thousands_0','pulli','units_4','units_5']
# # ideally compose the audio stream and run some kind of smoothing filter
# for fn in filenames:
# winsound.PlaySound(os.path.join('data','audio','female',fn+'.wav'),winsound.SND_NOSTOP) #winsound.SND_ASYNC)
| en | 0.536063 | # -*- coding: utf8 -*- # This file is distributed under MIT License # 2015 <NAME> <<EMAIL>> # # access only the date # access hour, minute, second and microsecond fields # 1) Generate the numeral for number # 2) Find the relevant audio file # 3) Generate a single audio file # 4) Play this newly created audio file # 5) Cleanup if requested # murpagal/pirpagal / kalai/maalai # skip microseconds data # ayinthu, narpathi-ettu, aympathu # say_string_in_tamil(pos) #mani, nimidam, vinadigal # numerale = u'ஓர் ஆயிரம் புள்ளி நான்கு ஐந்து' # filenames = ['one_thousand_prefix','thousands_0','pulli','units_4','units_5'] # # ideally compose the audio stream and run some kind of smoothing filter # for fn in filenames: # winsound.PlaySound(os.path.join('data','audio','female',fn+'.wav'),winsound.SND_NOSTOP) #winsound.SND_ASYNC) | 3.086103 | 3 |
pandas/tseries/tests/test_plotting.py | bjacobowski/pandas | 0 | 6624356 | <reponame>bjacobowski/pandas<filename>pandas/tseries/tests/test_plotting.py
from datetime import datetime, timedelta, date, time
import nose
from pandas.compat import lrange, zip
import numpy as np
from numpy.testing.decorators import slow
from pandas import Index, Series, DataFrame
from pandas.tseries.index import date_range, bdate_range
from pandas.tseries.offsets import DateOffset, Week
from pandas.tseries.period import period_range, Period, PeriodIndex
from pandas.tseries.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.util.testing as tm
from pandas.tests.test_graphics import _skip_if_no_scipy_gaussian_kde
@tm.mplskip
class TestTSPlot(tm.TestCase):
def setUp(self):
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def tearDown(self):
tm.close()
@slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 9), index=range(10))
ax = df.plot(fontsize=2)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
self.assertEqual(label.get_fontsize(), 2)
@slow
def test_frame_inferred(self):
# inferred freq
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]}, idx)
ax = df.plot() # it works
self.assertEqual(len(ax.get_lines()), 1) #B was plotted
plt.close(plt.gcf())
self.assertRaises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
ax = plt.gca()
ts = tm.makeTimeSeries()
f = lambda *args, **kwds: tsplot(s, plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
ax = ts.plot(style='k')
self.assertEqual((0., 0., 0.), ax.get_lines()[0].get_color())
def test_both_style_and_color(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
self.assertRaises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
self.assertRaises(ValueError, s.plot, style='b-', color='#000099')
@slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot)
def test_get_datevalue(self):
from pandas.tseries.converter import get_datevalue
self.assertIsNone(get_datevalue(None, 'D'))
self.assertEqual(get_datevalue(1987, 'A'), 1987)
self.assertEqual(get_datevalue(Period(1987, 'A'), 'M'),
Period('1987-12', 'M').ordinal)
self.assertEqual(get_datevalue('1/1/1987', 'D'),
Period('1987-1-1', 'D').ordinal)
@slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
self.assertEqual(expected_string, ax.format_coord(first_x, first_y))
except (ValueError):
raise nose.SkipTest("skipping test because issue forming test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3, freq='A-DEC'))
check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
check_format_of_first_point(daily.plot(), 't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
import matplotlib.pyplot as plt
from pandas.tseries.plotting import tsplot
tsplot(annual, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014 y = 1.000000')
tsplot(daily, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014-01-01 y = 1.000000')
@slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ax = ts.plot()
self.assertFalse(hasattr(ax, 'freq'))
@slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1),
datetime(2000, 1, 6),
datetime(2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_uhf(self):
import pandas.tseries.converter as conv
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
ax = df.plot()
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
self.assertEqual(xp, rs)
@slow
def test_irreg_hf(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.ix[[0, 1, 3, 4]]
ax = irreg.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
self.assertTrue((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all())
plt.clf()
fig.add_subplot(111)
df2 = df.copy()
df2.index = df.index.asobject
ax = df2.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
self.assertTrue((np.fabs(diffs[1:] - sec) < 1e-8).all())
def test_irregular_datetime64_repr_bug(self):
import matplotlib.pyplot as plt
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
ret = ser.plot()
self.assertIsNotNone(ret)
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
self.assertEqual(rs, xp)
def test_business_freq(self):
import matplotlib.pyplot as plt
bts = tm.makePeriodSeries()
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
bts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'B')
@slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
ts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'M')
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') +
timedelta(minutes=30))
df = DataFrame(np.arange(24), index=idx)
ax = df.plot()
rs = ax.get_lines()[0].get_xdata()
self.assertFalse(Index(rs).is_normalized)
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
ax = bts.plot()
idx = ax.get_lines()[0].get_xdata()
tm.assert_numpy_array_equal(bts.index.to_period(), PeriodIndex(idx))
@slow
def test_axis_limits(self):
import matplotlib.pyplot as plt
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(result[0], xlim[0] - 5)
self.assertEqual(result[1], xlim[1] + 10)
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
# datetim
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
fig = ax.get_figure()
plt.close(fig)
ser = tm.makeTimeSeries()
ax = ser.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
ax = df.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.tseries.converter as conv
self.assertEqual(conv.get_finder('B'), conv._daily_finder)
self.assertEqual(conv.get_finder('D'), conv._daily_finder)
self.assertEqual(conv.get_finder('M'), conv._monthly_finder)
self.assertEqual(conv.get_finder('Q'), conv._quarterly_finder)
self.assertEqual(conv.get_finder('A'), conv._annual_finder)
self.assertEqual(conv.get_finder('W'), conv._daily_finder)
@slow
def test_finder_daily(self):
import matplotlib.pyplot as plt
xp = Period('1999-1-1', freq='B').ordinal
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
for n in day_lst:
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_quarterly(self):
import matplotlib.pyplot as plt
xp = Period('1988Q1').ordinal
yrs = [3.5, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_monthly(self):
import matplotlib.pyplot as plt
xp = Period('Jan 1988').ordinal
yrs = [1.15, 2.5, 4, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
self.assertEqual(rs, xp)
@slow
def test_finder_annual(self):
import matplotlib.pyplot as plt
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, Period(xp[i], freq='A').ordinal)
plt.close(ax.get_figure())
@slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
self.assertEqual(rs, xp)
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
self.assertEqual(rs, xp)
@slow
def test_gaps(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
ax = ser.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
@slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
ax = low.plot()
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(len(ax.right_ax.get_lines()), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
@slow
def test_secondary_y(self):
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
self.assertFalse(hasattr(ax, 'left_ax'))
self.assertTrue(hasattr(ax, 'right_ax'))
self.assertTrue(hasattr(ax2, 'left_ax'))
self.assertFalse(hasattr(ax2, 'right_ax'))
@slow
def test_secondary_y_ts(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
@slow
def test_secondary_kde(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='density')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='bar')
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
def test_mixed_freq_regular_first(self):
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
ax = s1.plot()
ax2 = s2.plot(style='g')
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first(self):
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
s2.plot(style='g')
ax = s1.plot()
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s1.plot()
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s2.plot(style='g')
ax = s1.plot(ax=ax)
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
@slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
ax = ts.plot()
ts2.plot(style='r')
self.assertEqual(ax.lines[0].get_xdata()[0],
ax.lines[1].get_xdata()[0])
@slow
def test_mixed_freq_lf_first(self):
import matplotlib.pyplot as plt
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot(legend=True)
ax = high.plot(legend=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
leg = ax.get_legend()
self.assertEqual(len(leg.texts), 2)
plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'T')
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
irreg.plot()
ps.plot()
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(high, plt.Axes.plot)
lines = tsplot(low, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
@slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
expected_h = idxh.to_period().asi8
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
for l in ax.get_lines():
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(low, plt.Axes.plot)
lines = tsplot(high, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
@slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = low.plot(kind=kind1, stacked=True)
ax = high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
# check stacked values are correct
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = high.plot(kind=kind1, stacked=True)
ax = low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
@slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
high.plot()
ax = low.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
tm.close()
# low to high
low.plot()
ax = high.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
@slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].asobject
df = DataFrame(np.random.randn(len(idx), 3), idx)
_check_plot_works(df.plot)
@slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
@slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
us = int((t - int(t)) * 1e6)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S.%f')
self.assertEqual(xp, rs)
@slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot(secondary_y=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
for l in ax.left_ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
@slow
def test_secondary_legend(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B (right)')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'C'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'])
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
# non-ts
df = tm.makeDataFrame()
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
ax = df.plot()
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
self.assertEqual(l.get_rotation(), 30)
@slow
def test_ax_plot(self):
import matplotlib.pyplot as plt
x = DatetimeIndex(start='2012-01-02', periods=10,
freq='D')
y = lrange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
lines = ax.plot(x, y, label='Y')
tm.assert_numpy_array_equal(DatetimeIndex(lines[0].get_xdata()), x)
@slow
def test_mpl_nopandas(self):
import matplotlib.pyplot as plt
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line1.get_xydata()[:, 0])
tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line2.get_xydata()[:, 0])
@slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
ax = ts_irregular[:5].plot()
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
@slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
ax = ts.plot()
left_before, right_before = ax.get_xlim()
ts.resample('D').plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
self.assertEqual(left_before, left_after)
self.assertEqual(right_before, right_after)
@slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
ax = ts_irregular[:5].plot()
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
plt.close(fig)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| from datetime import datetime, timedelta, date, time
import nose
from pandas.compat import lrange, zip
import numpy as np
from numpy.testing.decorators import slow
from pandas import Index, Series, DataFrame
from pandas.tseries.index import date_range, bdate_range
from pandas.tseries.offsets import DateOffset, Week
from pandas.tseries.period import period_range, Period, PeriodIndex
from pandas.tseries.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.util.testing as tm
from pandas.tests.test_graphics import _skip_if_no_scipy_gaussian_kde
@tm.mplskip
class TestTSPlot(tm.TestCase):
def setUp(self):
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def tearDown(self):
tm.close()
@slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 9), index=range(10))
ax = df.plot(fontsize=2)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
self.assertEqual(label.get_fontsize(), 2)
@slow
def test_frame_inferred(self):
# inferred freq
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]}, idx)
ax = df.plot() # it works
self.assertEqual(len(ax.get_lines()), 1) #B was plotted
plt.close(plt.gcf())
self.assertRaises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
ax = plt.gca()
ts = tm.makeTimeSeries()
f = lambda *args, **kwds: tsplot(s, plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
ax = ts.plot(style='k')
self.assertEqual((0., 0., 0.), ax.get_lines()[0].get_color())
def test_both_style_and_color(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
self.assertRaises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
self.assertRaises(ValueError, s.plot, style='b-', color='#000099')
@slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot)
def test_get_datevalue(self):
from pandas.tseries.converter import get_datevalue
self.assertIsNone(get_datevalue(None, 'D'))
self.assertEqual(get_datevalue(1987, 'A'), 1987)
self.assertEqual(get_datevalue(Period(1987, 'A'), 'M'),
Period('1987-12', 'M').ordinal)
self.assertEqual(get_datevalue('1/1/1987', 'D'),
Period('1987-1-1', 'D').ordinal)
@slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
self.assertEqual(expected_string, ax.format_coord(first_x, first_y))
except (ValueError):
raise nose.SkipTest("skipping test because issue forming test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3, freq='A-DEC'))
check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
check_format_of_first_point(daily.plot(), 't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
import matplotlib.pyplot as plt
from pandas.tseries.plotting import tsplot
tsplot(annual, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014 y = 1.000000')
tsplot(daily, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014-01-01 y = 1.000000')
@slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ax = ts.plot()
self.assertFalse(hasattr(ax, 'freq'))
@slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1),
datetime(2000, 1, 6),
datetime(2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_uhf(self):
import pandas.tseries.converter as conv
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
ax = df.plot()
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
self.assertEqual(xp, rs)
@slow
def test_irreg_hf(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.ix[[0, 1, 3, 4]]
ax = irreg.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
self.assertTrue((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all())
plt.clf()
fig.add_subplot(111)
df2 = df.copy()
df2.index = df.index.asobject
ax = df2.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
self.assertTrue((np.fabs(diffs[1:] - sec) < 1e-8).all())
def test_irregular_datetime64_repr_bug(self):
import matplotlib.pyplot as plt
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
ret = ser.plot()
self.assertIsNotNone(ret)
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
self.assertEqual(rs, xp)
def test_business_freq(self):
import matplotlib.pyplot as plt
bts = tm.makePeriodSeries()
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
bts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'B')
@slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
ts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'M')
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') +
timedelta(minutes=30))
df = DataFrame(np.arange(24), index=idx)
ax = df.plot()
rs = ax.get_lines()[0].get_xdata()
self.assertFalse(Index(rs).is_normalized)
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
ax = bts.plot()
idx = ax.get_lines()[0].get_xdata()
tm.assert_numpy_array_equal(bts.index.to_period(), PeriodIndex(idx))
@slow
def test_axis_limits(self):
import matplotlib.pyplot as plt
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(result[0], xlim[0] - 5)
self.assertEqual(result[1], xlim[1] + 10)
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
# datetim
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
fig = ax.get_figure()
plt.close(fig)
ser = tm.makeTimeSeries()
ax = ser.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
ax = df.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.tseries.converter as conv
self.assertEqual(conv.get_finder('B'), conv._daily_finder)
self.assertEqual(conv.get_finder('D'), conv._daily_finder)
self.assertEqual(conv.get_finder('M'), conv._monthly_finder)
self.assertEqual(conv.get_finder('Q'), conv._quarterly_finder)
self.assertEqual(conv.get_finder('A'), conv._annual_finder)
self.assertEqual(conv.get_finder('W'), conv._daily_finder)
@slow
def test_finder_daily(self):
import matplotlib.pyplot as plt
xp = Period('1999-1-1', freq='B').ordinal
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
for n in day_lst:
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_quarterly(self):
import matplotlib.pyplot as plt
xp = Period('1988Q1').ordinal
yrs = [3.5, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_monthly(self):
import matplotlib.pyplot as plt
xp = Period('Jan 1988').ordinal
yrs = [1.15, 2.5, 4, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
self.assertEqual(rs, xp)
@slow
def test_finder_annual(self):
import matplotlib.pyplot as plt
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, Period(xp[i], freq='A').ordinal)
plt.close(ax.get_figure())
@slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
self.assertEqual(rs, xp)
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
self.assertEqual(rs, xp)
@slow
def test_gaps(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
ax = ser.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
@slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
ax = low.plot()
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(len(ax.right_ax.get_lines()), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
@slow
def test_secondary_y(self):
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
self.assertFalse(hasattr(ax, 'left_ax'))
self.assertTrue(hasattr(ax, 'right_ax'))
self.assertTrue(hasattr(ax2, 'left_ax'))
self.assertFalse(hasattr(ax2, 'right_ax'))
@slow
def test_secondary_y_ts(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
@slow
def test_secondary_kde(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='density')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='bar')
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
def test_mixed_freq_regular_first(self):
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
ax = s1.plot()
ax2 = s2.plot(style='g')
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first(self):
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
s2.plot(style='g')
ax = s1.plot()
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s1.plot()
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s2.plot(style='g')
ax = s1.plot(ax=ax)
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
@slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
ax = ts.plot()
ts2.plot(style='r')
self.assertEqual(ax.lines[0].get_xdata()[0],
ax.lines[1].get_xdata()[0])
@slow
def test_mixed_freq_lf_first(self):
import matplotlib.pyplot as plt
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot(legend=True)
ax = high.plot(legend=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
leg = ax.get_legend()
self.assertEqual(len(leg.texts), 2)
plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'T')
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
irreg.plot()
ps.plot()
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(high, plt.Axes.plot)
lines = tsplot(low, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
@slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
expected_h = idxh.to_period().asi8
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
for l in ax.get_lines():
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(low, plt.Axes.plot)
lines = tsplot(high, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
@slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = low.plot(kind=kind1, stacked=True)
ax = high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
# check stacked values are correct
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = high.plot(kind=kind1, stacked=True)
ax = low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
@slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
high.plot()
ax = low.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
tm.close()
# low to high
low.plot()
ax = high.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
@slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].asobject
df = DataFrame(np.random.randn(len(idx), 3), idx)
_check_plot_works(df.plot)
@slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
@slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
us = int((t - int(t)) * 1e6)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S.%f')
self.assertEqual(xp, rs)
@slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot(secondary_y=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
for l in ax.left_ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
@slow
def test_secondary_legend(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B (right)')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'C'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'])
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
# non-ts
df = tm.makeDataFrame()
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
ax = df.plot()
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
self.assertEqual(l.get_rotation(), 30)
@slow
def test_ax_plot(self):
import matplotlib.pyplot as plt
x = DatetimeIndex(start='2012-01-02', periods=10,
freq='D')
y = lrange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
lines = ax.plot(x, y, label='Y')
tm.assert_numpy_array_equal(DatetimeIndex(lines[0].get_xdata()), x)
@slow
def test_mpl_nopandas(self):
import matplotlib.pyplot as plt
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line1.get_xydata()[:, 0])
tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line2.get_xydata()[:, 0])
@slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
ax = ts_irregular[:5].plot()
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
@slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
ax = ts.plot()
left_before, right_before = ax.get_xlim()
ts.resample('D').plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
self.assertEqual(left_before, left_after)
self.assertEqual(right_before, right_after)
@slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
ax = ts_irregular[:5].plot()
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
plt.close(fig)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False) | en | 0.729248 | # GH2877 # For issue #8765 # inferred freq # axes freq # N > 1 # it works #B was plotted # note this is added to the annual plot already in existence, and changes its freq field # tsplot # GH2571 # string # datetim # irregular # non-ts # GH 9852 # GH 9852 # tsplot # idxl lines # tsplot # idxl lines # low to high # check low dataframe result # check stacked values are correct # check high dataframe result # high to low # check high dataframe result # check low dataframe result # GH 7772, GH 7760 # high to low # low to high # date # np.datetime64 # verify tick labels # change xlim # check tick labels again # verify tick labels # ts # TODO: color cycle problems # TODO: color cycle problems # non-ts # TODO: color cycle problems # TODO: color cycle problems # GH 2960 # plot the left section of the irregular series, then the right section # check that axis limits are correct # GH 3490 - non-timeseries with secondary y # GH 3490 - regular-timeseries with secondary y # GH 3490 - mixed frequency timeseries with secondary y # a downsample should not have changed either limit # GH 3490 - irregular-timeseries with secondary y # plot higher-x values on secondary axis # ensure secondary limits aren't overwritten by plot on primary # do something more intelligent # do something more intelligent | 2.349677 | 2 |
leetcode/easy/linkedList/test_delete_node_in_a_linked_list.py | yenbohuang/online-contest-python | 0 | 6624357 | # https://leetcode.com/problems/delete-node-in-a-linked-list/
import unittest
from ...leetcode_data_model import ListNode
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
class TestSolution(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def tearDown(self):
pass
def test_case_1(self):
root = ListNode(1)
root.next = ListNode(2)
root.next.next = ListNode(3)
root.next.next.next = ListNode(4)
self.assertEqual(str(root), "1->2->3->4")
self.solution.deleteNode(root.next.next)
self.assertEqual(str(root), "1->2->4")
if __name__ == '__main__':
unittest.main()
| # https://leetcode.com/problems/delete-node-in-a-linked-list/
import unittest
from ...leetcode_data_model import ListNode
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
class TestSolution(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def tearDown(self):
pass
def test_case_1(self):
root = ListNode(1)
root.next = ListNode(2)
root.next.next = ListNode(3)
root.next.next.next = ListNode(4)
self.assertEqual(str(root), "1->2->3->4")
self.solution.deleteNode(root.next.next)
self.assertEqual(str(root), "1->2->4")
if __name__ == '__main__':
unittest.main()
| en | 0.692952 | # https://leetcode.com/problems/delete-node-in-a-linked-list/ :type node: ListNode :rtype: void Do not return anything, modify node in-place instead. | 3.902002 | 4 |
Visualisation/voxel_to_mesh.py | ayushjain1144/SeeingByMoving | 13 | 6624358 | <gh_stars>10-100
import skimage
from skimage.measure import marching_cubes as mcl
from mayavi import mlab
import numpy as np
import ipdb
st = ipdb.set_trace
def save_voxel_to_mesh(voxel_grid, output_fname):
verts, faces, normals, values = mcl(voxel_grid, 0.0)
mlab.triangular_mesh([vert[0] for vert in verts],
[vert[1] for vert in verts],
[vert[2] for vert in verts],
faces)
faces = faces + 1
mlab.show()
thefile = open(output_fname, 'w')
for item in verts:
thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2]))
for item in normals:
thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))
for item in faces:
thefile.write("f {0}/{0} {1}/{1} {2}/{2}\n".format(item[0],item[1],item[2]))
thefile.close()
def main():
# load voxel grid
voxel_grid = np.load('car_72_1775_1.npy' ,allow_pickle=True)
#st()
print(f'shape of voxel grid: {voxel_grid.shape}')
save_voxel_to_mesh(voxel_grid[0][0], "couch_mesh.obj")
if __name__ == "__main__":
main()
| import skimage
from skimage.measure import marching_cubes as mcl
from mayavi import mlab
import numpy as np
import ipdb
st = ipdb.set_trace
def save_voxel_to_mesh(voxel_grid, output_fname):
verts, faces, normals, values = mcl(voxel_grid, 0.0)
mlab.triangular_mesh([vert[0] for vert in verts],
[vert[1] for vert in verts],
[vert[2] for vert in verts],
faces)
faces = faces + 1
mlab.show()
thefile = open(output_fname, 'w')
for item in verts:
thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2]))
for item in normals:
thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))
for item in faces:
thefile.write("f {0}/{0} {1}/{1} {2}/{2}\n".format(item[0],item[1],item[2]))
thefile.close()
def main():
# load voxel grid
voxel_grid = np.load('car_72_1775_1.npy' ,allow_pickle=True)
#st()
print(f'shape of voxel grid: {voxel_grid.shape}')
save_voxel_to_mesh(voxel_grid[0][0], "couch_mesh.obj")
if __name__ == "__main__":
main() | en | 0.287742 | # load voxel grid #st() | 2.277357 | 2 |
django/utils/encoding.py | mitar/django | 1 | 6624359 | import types
import urllib
import locale
import datetime
import codecs
from decimal import Decimal
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a UTF-8 bytestring.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode('utf-8')
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_unicode(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(s, unicode):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = u' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = u' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if isinstance(s, Promise):
return unicode(s).encode(encoding, errors)
elif not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return urllib.quote(smart_str(path).replace("\\", "/"), safe="/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| import types
import urllib
import locale
import datetime
import codecs
from decimal import Decimal
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a UTF-8 bytestring.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode('utf-8')
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_unicode(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(s, unicode):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = u' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = u' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if isinstance(s, Promise):
return unicode(s).encode(encoding, errors)
elif not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return urllib.quote(smart_str(path).replace("\\", "/"), safe="/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| en | 0.858916 | A class whose __str__ returns its __unicode__ as a UTF-8 bytestring. Useful as a mix-in. Returns a unicode object representing 's'. Treats bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. # The input is the result of a gettext_lazy() call. Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). Similar to smart_unicode, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. # Handle the common case first, saves 30-40% in performance when s # is an instance of unicode. This function gets called often in that # setting. # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII data without special # handling to display as a string. We need to handle this # without raising a further exception. We do an # approximation to what the Exception's standard str() # output should be. # Note: We use .decode() here, instead of unicode(s, encoding, # errors), so that if s is a SafeString, it ends up being a # SafeUnicode at the end. # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. Returns a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects. # An Exception subclass containing non-ASCII data that doesn't # know how to print itself properly. We shouldn't raise a # further exception. Convert an Internationalized Resource Identifier (IRI) portion to a URI portion that is suitable for inclusion in a URL. This is the algorithm from section 3.1 of RFC 3987. However, since we are assuming input is either UTF-8 or unicode already, we can simplify things a little from the full method. Returns an ASCII string containing the encoded result. # The list of safe characters here is constructed from the "reserved" and # "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986: # reserved = gen-delims / sub-delims # gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" # / "*" / "+" / "," / ";" / "=" # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" # Of the unreserved characters, urllib.quote already considers all but # the ~ safe. # The % character is also added to the list of safe characters here, as the # end of section 3.1 of RFC 3987 specifically mentions that % must not be # converted. #%[]=:;$&()+,!?*@'~") Convert an file system path to a URI portion that is suitable for inclusion in a URL. We are assuming input is either UTF-8 or unicode already. This method will encode certain chars that would normally be recognized as special chars for URIs. Note that this method does not encode the ' character, as it is a valid character within URIs. See encodeURIComponent() JavaScript function for more details. Returns an ASCII string containing the encoded result. # I know about `os.sep` and `os.altsep` but I want to leave # some flexibility for hardcoding separators. # The encoding of the default system locale but falls back to the # given fallback encoding if the encoding is unsupported by python or could # not be determined. See tickets #10335 and #5846 | 2.557197 | 3 |
detection_train.py | cclauss/simpledet | 0 | 6624360 | import argparse
import importlib
import logging
import os
import pprint
import pickle as pkl
from functools import reduce
from core.detection_module import DetModule
from utils import callback
from utils.memonger_v2 import search_plan_to_layer
from utils.lr_scheduler import WarmupMultiFactorScheduler, LRSequential, AdvancedLRScheduler
from utils.load_model import load_checkpoint
from utils.patch_config import patch_config_as_nothrow
import mxnet as mx
import numpy as np
def train_net(config):
pGen, pKv, pRpn, pRoi, pBbox, pDataset, pModel, pOpt, pTest, \
transform, data_name, label_name, metric_list = config.get_config(is_train=True)
pGen = patch_config_as_nothrow(pGen)
pKv = patch_config_as_nothrow(pKv)
pRpn = patch_config_as_nothrow(pRpn)
pRoi = patch_config_as_nothrow(pRoi)
pBbox = patch_config_as_nothrow(pBbox)
pDataset = patch_config_as_nothrow(pDataset)
pModel = patch_config_as_nothrow(pModel)
pOpt = patch_config_as_nothrow(pOpt)
pTest = patch_config_as_nothrow(pTest)
ctx = [mx.gpu(int(i)) for i in pKv.gpus]
pretrain_prefix = pModel.pretrain.prefix
pretrain_epoch = pModel.pretrain.epoch
prefix = pGen.name
save_path = os.path.join("experiments", prefix)
begin_epoch = pOpt.schedule.begin_epoch
end_epoch = pOpt.schedule.end_epoch
lr_iter = pOpt.schedule.lr_iter
# only rank==0 print all debug infos
kvstore_type = "dist_sync" if os.environ.get("DMLC_ROLE") == "worker" else pKv.kvstore
kv = mx.kvstore.create(kvstore_type)
rank = kv.rank
# for distributed training using shared file system
os.makedirs(save_path, exist_ok=True)
from utils.logger import config_logger
config_logger(os.path.join(save_path, "log.txt"))
model_prefix = os.path.join(save_path, "checkpoint")
# set up logger
logger = logging.getLogger()
sym = pModel.train_symbol
# setup multi-gpu
input_batch_size = pKv.batch_image * len(ctx)
# print config
# if rank == 0:
# logger.info(pprint.pformat(config))
# load dataset and prepare imdb for training
image_sets = pDataset.image_set
roidbs = [pkl.load(open("data/cache/{}.roidb".format(i), "rb"), encoding="latin1") for i in image_sets]
roidb = reduce(lambda x, y: x + y, roidbs)
# filter empty image
roidb = [rec for rec in roidb if rec["gt_bbox"].shape[0] > 0]
# add flip roi record
flipped_roidb = []
for rec in roidb:
new_rec = rec.copy()
new_rec["flipped"] = True
flipped_roidb.append(new_rec)
roidb = roidb + flipped_roidb
from core.detection_input import AnchorLoader
train_data = AnchorLoader(
roidb=roidb,
transform=transform,
data_name=data_name,
label_name=label_name,
batch_size=input_batch_size,
shuffle=True,
kv=kv,
num_worker=pGen.loader_worker or 12,
num_collector=pGen.loader_collector or 1,
worker_queue_depth=2,
collector_queue_depth=2
)
# infer shape
worker_data_shape = dict(train_data.provide_data + train_data.provide_label)
for key in worker_data_shape:
worker_data_shape[key] = (pKv.batch_image,) + worker_data_shape[key][1:]
arg_shape, _, aux_shape = sym.infer_shape(**worker_data_shape)
_, out_shape, _ = sym.get_internals().infer_shape(**worker_data_shape)
out_shape_dict = list(zip(sym.get_internals().list_outputs(), out_shape))
_, out_shape, _ = sym.infer_shape(**worker_data_shape)
terminal_out_shape_dict = zip(sym.list_outputs(), out_shape)
if rank == 0:
logger.info('parameter shape')
logger.info(pprint.pformat([i for i in out_shape_dict if not i[0].endswith('output')]))
logger.info('intermediate output shape')
logger.info(pprint.pformat([i for i in out_shape_dict if i[0].endswith('output')]))
logger.info('terminal output shape')
logger.info(pprint.pformat([i for i in terminal_out_shape_dict]))
# memonger
if pModel.memonger:
last_block = pModel.memonger_until or ""
if rank == 0:
logger.info("do memonger up to {}".format(last_block))
type_dict = {k: np.float32 for k in worker_data_shape}
sym = search_plan_to_layer(sym, last_block, 1000, type_dict=type_dict, **worker_data_shape)
# load and initialize params
if pOpt.schedule.begin_epoch != 0:
arg_params, aux_params = load_checkpoint(model_prefix, begin_epoch)
elif pModel.from_scratch:
arg_params, aux_params = dict(), dict()
else:
arg_params, aux_params = load_checkpoint(pretrain_prefix, pretrain_epoch)
if pModel.process_weight is not None:
pModel.process_weight(sym, arg_params, aux_params)
# merge batch normalization to save memory in fix bn training
from utils.graph_optimize import merge_bn
sym, arg_params, aux_params = merge_bn(sym, arg_params, aux_params)
if pModel.random:
import time
mx.random.seed(int(time.time()))
np.random.seed(int(time.time()))
init = mx.init.Xavier(factor_type="in", rnd_type='gaussian', magnitude=2)
init.set_verbosity(verbose=True)
# create solver
fixed_param = pModel.pretrain.fixed_param
excluded_param = pModel.pretrain.excluded_param
data_names = [k[0] for k in train_data.provide_data]
label_names = [k[0] for k in train_data.provide_label]
mod = DetModule(sym, data_names=data_names, label_names=label_names,
logger=logger, context=ctx, fixed_param=fixed_param, excluded_param=excluded_param)
eval_metrics = mx.metric.CompositeEvalMetric(metric_list)
# callback
batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=pGen.log_frequency)
epoch_end_callback = callback.do_checkpoint(model_prefix)
sym.save(model_prefix + ".json")
# decide learning rate
lr_mode = pOpt.optimizer.lr_mode or 'step'
base_lr = pOpt.optimizer.lr * kv.num_workers
lr_factor = 0.1
iter_per_epoch = len(train_data) // input_batch_size
lr_iter = [it // kv.num_workers for it in lr_iter]
lr_iter = [it - iter_per_epoch * begin_epoch for it in lr_iter]
lr_iter_discount = [it for it in lr_iter if it > 0]
current_lr = base_lr * (lr_factor ** (len(lr_iter) - len(lr_iter_discount)))
if rank == 0:
logging.info('total iter {}'.format(iter_per_epoch * (end_epoch - begin_epoch)))
logging.info('lr {}, lr_iters {}'.format(current_lr, lr_iter_discount))
logging.info('lr mode: {}'.format(lr_mode))
if pOpt.warmup is not None and pOpt.schedule.begin_epoch == 0:
if rank == 0:
logging.info(
'warmup lr {}, warmup step {}'.format(
pOpt.warmup.lr,
pOpt.warmup.iter // kv.num_workers)
)
if lr_mode == 'step':
lr_scheduler = WarmupMultiFactorScheduler(
step=lr_iter_discount,
factor=lr_factor,
warmup=True,
warmup_type=pOpt.warmup.type,
warmup_lr=pOpt.warmup.lr,
warmup_step=pOpt.warmup.iter // kv.num_workers
)
elif lr_mode == 'cosine':
warmup_lr_scheduler = AdvancedLRScheduler(
mode='linear',
base_lr=pOpt.warmup.lr,
target_lr=base_lr,
niters=pOpt.warmup.iter // kv.num_workers
)
cosine_lr_scheduler = AdvancedLRScheduler(
mode='cosine',
base_lr=base_lr,
target_lr=0,
niters=(iter_per_epoch * (end_epoch - begin_epoch) - pOpt.warmup.iter) // kv.num_workers
)
lr_scheduler = LRSequential([warmup_lr_scheduler, cosine_lr_scheduler])
else:
raise NotImplementedError
else:
if lr_mode == 'step':
lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(lr_iter_discount, lr_factor)
elif lr_mode == 'cosine':
lr_scheduler = AdvancedLRScheduler(
mode='cosine',
base_lr=base_lr,
target_lr=0,
niters=iter_per_epoch * (end_epoch - begin_epoch) // kv.num_workers
)
else:
lr_scheduler = None
# optimizer
optimizer_params = dict(
momentum=pOpt.optimizer.momentum,
wd=pOpt.optimizer.wd,
learning_rate=current_lr,
lr_scheduler=lr_scheduler,
rescale_grad=1.0 / (len(pKv.gpus) * kv.num_workers),
clip_gradient=pOpt.optimizer.clip_gradient
)
if pKv.fp16:
optimizer_params['multi_precision'] = True
optimizer_params['rescale_grad'] /= 128.0
profile = pGen.profile or False
if profile:
mx.profiler.set_config(profile_all=True, filename=os.path.join(save_path, "profile.json"))
# train
mod.fit(
train_data=train_data,
eval_metric=eval_metrics,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kv,
optimizer=pOpt.optimizer.type,
optimizer_params=optimizer_params,
initializer=init,
allow_missing=True,
arg_params=arg_params,
aux_params=aux_params,
begin_epoch=begin_epoch,
num_epoch=end_epoch,
profile=profile
)
logging.info("Training has done")
time.sleep(10)
logging.info("Exiting")
def parse_args():
parser = argparse.ArgumentParser(description='Train Detection')
parser.add_argument('--config', help='config file path', type=str)
args = parser.parse_args()
config = importlib.import_module(args.config.replace('.py', '').replace('/', '.'))
return config
if __name__ == '__main__':
train_net(parse_args())
| import argparse
import importlib
import logging
import os
import pprint
import pickle as pkl
from functools import reduce
from core.detection_module import DetModule
from utils import callback
from utils.memonger_v2 import search_plan_to_layer
from utils.lr_scheduler import WarmupMultiFactorScheduler, LRSequential, AdvancedLRScheduler
from utils.load_model import load_checkpoint
from utils.patch_config import patch_config_as_nothrow
import mxnet as mx
import numpy as np
def train_net(config):
pGen, pKv, pRpn, pRoi, pBbox, pDataset, pModel, pOpt, pTest, \
transform, data_name, label_name, metric_list = config.get_config(is_train=True)
pGen = patch_config_as_nothrow(pGen)
pKv = patch_config_as_nothrow(pKv)
pRpn = patch_config_as_nothrow(pRpn)
pRoi = patch_config_as_nothrow(pRoi)
pBbox = patch_config_as_nothrow(pBbox)
pDataset = patch_config_as_nothrow(pDataset)
pModel = patch_config_as_nothrow(pModel)
pOpt = patch_config_as_nothrow(pOpt)
pTest = patch_config_as_nothrow(pTest)
ctx = [mx.gpu(int(i)) for i in pKv.gpus]
pretrain_prefix = pModel.pretrain.prefix
pretrain_epoch = pModel.pretrain.epoch
prefix = pGen.name
save_path = os.path.join("experiments", prefix)
begin_epoch = pOpt.schedule.begin_epoch
end_epoch = pOpt.schedule.end_epoch
lr_iter = pOpt.schedule.lr_iter
# only rank==0 print all debug infos
kvstore_type = "dist_sync" if os.environ.get("DMLC_ROLE") == "worker" else pKv.kvstore
kv = mx.kvstore.create(kvstore_type)
rank = kv.rank
# for distributed training using shared file system
os.makedirs(save_path, exist_ok=True)
from utils.logger import config_logger
config_logger(os.path.join(save_path, "log.txt"))
model_prefix = os.path.join(save_path, "checkpoint")
# set up logger
logger = logging.getLogger()
sym = pModel.train_symbol
# setup multi-gpu
input_batch_size = pKv.batch_image * len(ctx)
# print config
# if rank == 0:
# logger.info(pprint.pformat(config))
# load dataset and prepare imdb for training
image_sets = pDataset.image_set
roidbs = [pkl.load(open("data/cache/{}.roidb".format(i), "rb"), encoding="latin1") for i in image_sets]
roidb = reduce(lambda x, y: x + y, roidbs)
# filter empty image
roidb = [rec for rec in roidb if rec["gt_bbox"].shape[0] > 0]
# add flip roi record
flipped_roidb = []
for rec in roidb:
new_rec = rec.copy()
new_rec["flipped"] = True
flipped_roidb.append(new_rec)
roidb = roidb + flipped_roidb
from core.detection_input import AnchorLoader
train_data = AnchorLoader(
roidb=roidb,
transform=transform,
data_name=data_name,
label_name=label_name,
batch_size=input_batch_size,
shuffle=True,
kv=kv,
num_worker=pGen.loader_worker or 12,
num_collector=pGen.loader_collector or 1,
worker_queue_depth=2,
collector_queue_depth=2
)
# infer shape
worker_data_shape = dict(train_data.provide_data + train_data.provide_label)
for key in worker_data_shape:
worker_data_shape[key] = (pKv.batch_image,) + worker_data_shape[key][1:]
arg_shape, _, aux_shape = sym.infer_shape(**worker_data_shape)
_, out_shape, _ = sym.get_internals().infer_shape(**worker_data_shape)
out_shape_dict = list(zip(sym.get_internals().list_outputs(), out_shape))
_, out_shape, _ = sym.infer_shape(**worker_data_shape)
terminal_out_shape_dict = zip(sym.list_outputs(), out_shape)
if rank == 0:
logger.info('parameter shape')
logger.info(pprint.pformat([i for i in out_shape_dict if not i[0].endswith('output')]))
logger.info('intermediate output shape')
logger.info(pprint.pformat([i for i in out_shape_dict if i[0].endswith('output')]))
logger.info('terminal output shape')
logger.info(pprint.pformat([i for i in terminal_out_shape_dict]))
# memonger
if pModel.memonger:
last_block = pModel.memonger_until or ""
if rank == 0:
logger.info("do memonger up to {}".format(last_block))
type_dict = {k: np.float32 for k in worker_data_shape}
sym = search_plan_to_layer(sym, last_block, 1000, type_dict=type_dict, **worker_data_shape)
# load and initialize params
if pOpt.schedule.begin_epoch != 0:
arg_params, aux_params = load_checkpoint(model_prefix, begin_epoch)
elif pModel.from_scratch:
arg_params, aux_params = dict(), dict()
else:
arg_params, aux_params = load_checkpoint(pretrain_prefix, pretrain_epoch)
if pModel.process_weight is not None:
pModel.process_weight(sym, arg_params, aux_params)
# merge batch normalization to save memory in fix bn training
from utils.graph_optimize import merge_bn
sym, arg_params, aux_params = merge_bn(sym, arg_params, aux_params)
if pModel.random:
import time
mx.random.seed(int(time.time()))
np.random.seed(int(time.time()))
init = mx.init.Xavier(factor_type="in", rnd_type='gaussian', magnitude=2)
init.set_verbosity(verbose=True)
# create solver
fixed_param = pModel.pretrain.fixed_param
excluded_param = pModel.pretrain.excluded_param
data_names = [k[0] for k in train_data.provide_data]
label_names = [k[0] for k in train_data.provide_label]
mod = DetModule(sym, data_names=data_names, label_names=label_names,
logger=logger, context=ctx, fixed_param=fixed_param, excluded_param=excluded_param)
eval_metrics = mx.metric.CompositeEvalMetric(metric_list)
# callback
batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=pGen.log_frequency)
epoch_end_callback = callback.do_checkpoint(model_prefix)
sym.save(model_prefix + ".json")
# decide learning rate
lr_mode = pOpt.optimizer.lr_mode or 'step'
base_lr = pOpt.optimizer.lr * kv.num_workers
lr_factor = 0.1
iter_per_epoch = len(train_data) // input_batch_size
lr_iter = [it // kv.num_workers for it in lr_iter]
lr_iter = [it - iter_per_epoch * begin_epoch for it in lr_iter]
lr_iter_discount = [it for it in lr_iter if it > 0]
current_lr = base_lr * (lr_factor ** (len(lr_iter) - len(lr_iter_discount)))
if rank == 0:
logging.info('total iter {}'.format(iter_per_epoch * (end_epoch - begin_epoch)))
logging.info('lr {}, lr_iters {}'.format(current_lr, lr_iter_discount))
logging.info('lr mode: {}'.format(lr_mode))
if pOpt.warmup is not None and pOpt.schedule.begin_epoch == 0:
if rank == 0:
logging.info(
'warmup lr {}, warmup step {}'.format(
pOpt.warmup.lr,
pOpt.warmup.iter // kv.num_workers)
)
if lr_mode == 'step':
lr_scheduler = WarmupMultiFactorScheduler(
step=lr_iter_discount,
factor=lr_factor,
warmup=True,
warmup_type=pOpt.warmup.type,
warmup_lr=pOpt.warmup.lr,
warmup_step=pOpt.warmup.iter // kv.num_workers
)
elif lr_mode == 'cosine':
warmup_lr_scheduler = AdvancedLRScheduler(
mode='linear',
base_lr=pOpt.warmup.lr,
target_lr=base_lr,
niters=pOpt.warmup.iter // kv.num_workers
)
cosine_lr_scheduler = AdvancedLRScheduler(
mode='cosine',
base_lr=base_lr,
target_lr=0,
niters=(iter_per_epoch * (end_epoch - begin_epoch) - pOpt.warmup.iter) // kv.num_workers
)
lr_scheduler = LRSequential([warmup_lr_scheduler, cosine_lr_scheduler])
else:
raise NotImplementedError
else:
if lr_mode == 'step':
lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(lr_iter_discount, lr_factor)
elif lr_mode == 'cosine':
lr_scheduler = AdvancedLRScheduler(
mode='cosine',
base_lr=base_lr,
target_lr=0,
niters=iter_per_epoch * (end_epoch - begin_epoch) // kv.num_workers
)
else:
lr_scheduler = None
# optimizer
optimizer_params = dict(
momentum=pOpt.optimizer.momentum,
wd=pOpt.optimizer.wd,
learning_rate=current_lr,
lr_scheduler=lr_scheduler,
rescale_grad=1.0 / (len(pKv.gpus) * kv.num_workers),
clip_gradient=pOpt.optimizer.clip_gradient
)
if pKv.fp16:
optimizer_params['multi_precision'] = True
optimizer_params['rescale_grad'] /= 128.0
profile = pGen.profile or False
if profile:
mx.profiler.set_config(profile_all=True, filename=os.path.join(save_path, "profile.json"))
# train
mod.fit(
train_data=train_data,
eval_metric=eval_metrics,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kv,
optimizer=pOpt.optimizer.type,
optimizer_params=optimizer_params,
initializer=init,
allow_missing=True,
arg_params=arg_params,
aux_params=aux_params,
begin_epoch=begin_epoch,
num_epoch=end_epoch,
profile=profile
)
logging.info("Training has done")
time.sleep(10)
logging.info("Exiting")
def parse_args():
parser = argparse.ArgumentParser(description='Train Detection')
parser.add_argument('--config', help='config file path', type=str)
args = parser.parse_args()
config = importlib.import_module(args.config.replace('.py', '').replace('/', '.'))
return config
if __name__ == '__main__':
train_net(parse_args())
| en | 0.636597 | # only rank==0 print all debug infos # for distributed training using shared file system # set up logger # setup multi-gpu # print config # if rank == 0: # logger.info(pprint.pformat(config)) # load dataset and prepare imdb for training # filter empty image # add flip roi record # infer shape # memonger # load and initialize params # merge batch normalization to save memory in fix bn training # create solver # callback # decide learning rate # optimizer # train | 1.89022 | 2 |
sequence_search/consumer/nhmmer_parse.py | RNAcentral/sequence_search | 2 | 6624361 | <gh_stars>1-10
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import re
import os
def record_generator(f, delimiter='\n', bufsize=4096):
"""
Read file using input record separator.
Based on this SO question:
http://stackoverflow.com/questions/19600475/how-to-read-records-terminated-by-custom-separator-from-file-in-python
"""
buf = ''
while True:
newbuf = f.read(bufsize)
if not newbuf:
yield buf
return
buf += newbuf
lines = buf.split(delimiter)
for line in lines[:-1]:
yield line
buf = lines[-1]
def parse_record_description(lines):
"""
Example:
URS0000000013 Vibrio gigantis partial 16S ribosomal RNA
score bias Evalue hmmfrom hmm to alifrom ali to envfrom env to sq len acc
------ ----- --------- ------- ------- --------- --------- --------- --------- --------- ----
! 76.9 4.1 5.4e-23 67 196 .. 41 170 .. 21 175 .. 1421 0.94
"""
def parse_first_line(line):
"""Get URS id and description."""
match = re.search(r'URS[0-9A-Fa-f]{10}(_\d+)?', line)
return {
'rnacentral_id': match.group(),
'description': line.replace(match.group(), '').replace(';', '').strip(),
}
def parse_fourth_line(line):
"""Parse out hit statistics."""
line = line.replace('!', '').\
replace('?', '').\
replace('[]', '').\
replace('[.', '').\
replace('.]', '').\
replace('..', '').\
strip()
scores = re.split(r' +', line)
return {
'score': float(scores[0]),
'bias': float(scores[1]),
'e_value': float(scores[2]),
'alignment_start': float(scores[5]),
'alignment_stop': float(scores[6]),
'target_length': int(scores[9]),
}
data = parse_first_line(lines[0])
data.update(parse_fourth_line(lines[3]))
return data
def parse_alignment(lines, target_length, query_length):
"""
Example:
score: 76.9 bits
query 67 gagcggcggacgggugaguaaugccuaggaaucugccugguagugggggauaacgcucggaaacggacgcuaauaccgcauacguccuacgggaga 162
gagcggcggacgggugaguaaugccuaggaa ugccu g gugggggauaac u ggaaacg gcuaauaccgcaua ccuacggg a
URS0000000013 41 GAGCGGCGGACGGGUGAGUAAUGCCUAGGAAAUUGCCUUGAUGUGGGGGAUAACCAUUGGAAACGAUGGCUAAUACCGCAUAAUGCCUACGGGCCA 136
789********************************************************************************************* PP
query 163 aagcaggggaccuucgggccuugcgcuaucagau 196
aag ggggaccuucgggccu cgc agau
URS0000000013 137 AAGAGGGGGACCUUCGGGCCUCUCGCGUCAAGAU 170
*********************9999877777766 PP
"""
alignment = []
alignment_length = 0
matches = 0
nts_count1 = 0
nts_count2 = 0
gap_count = 0
alignment_sequence = ''
for i, line in enumerate(lines):
gaps = line.count('-') + line.count('.')
if i % 5 == 0: # query
match = re.match(r'^(\s+query\s+\d+ )(.+) \d+', line)
if match:
label = match.group(1)
block_length = len(match.group(2))
alignment_length += block_length
line = line.upper().replace('.', '-').replace('QUERY', 'Query')
line = re.sub('^\s+', '', line)
alignment.append(line)
match2 = re.match('^Query\s+\d+ ', line)
whitespace = len(match2.group(0))
nts_count1 += block_length - gaps
gap_count += gaps
elif i % 5 == 1: # matches
line = ' ' * whitespace + re.sub('\w', '|', line[len(label):])
matches += line.count('|')
alignment.append(line)
elif i % 5 == 2: # target
line = re.sub('\s+URS[0-9A-Fa-f]{10}(_\d+)?;?', 'Sbjct', line.upper())
match = re.match(r'^Sbjct\s+\d+ (.+) \d+', line)
if match:
block_length = len(match.group(1))
nts_count2 += block_length - gaps
gap_count += gaps
alignment.append(line)
try:
get_alignment_sequence = list(filter(None, line.split(" ")))[2]
alignment_sequence += get_alignment_sequence
except IndexError:
pass
elif i % 5 == 3: # skip nhmmer confidence lines
pass
elif i % 5 == 4: # blank line
alignment.append(line)
return {
'alignment': '\n'.join(alignment).strip(),
'alignment_length': alignment_length,
'gap_count': gap_count,
'match_count': matches,
'nts_count1': nts_count1,
'nts_count2': nts_count2,
'identity': (float(matches) / alignment_length) * 100,
'query_coverage': (float(nts_count1) / query_length) * 100,
'target_coverage': (float(nts_count2) / target_length) * 100,
'gaps': (float(gap_count) / alignment_length) * 100,
'alignment_sequence': alignment_sequence.replace('-', '')
}
def parse_record(text, query_length):
"""
Example record:
URS0000000013 Vibrio gigantis partial 16S ribosomal RNA
score bias Evalue hmmfrom hmm to alifrom ali to envfrom env to sq len acc
------ ----- --------- ------- ------- --------- --------- --------- --------- --------- ----
! 76.9 4.1 5.4e-23 67 196 .. 41 170 .. 21 175 .. 1421 0.94
Alignment:
score: 76.9 bits
query 67 gagcggcggacgggugaguaaugccuaggaaucugccugguagugggggauaacgcucggaaacggacgcuaauaccgcauacguccuacgggaga 162
gagcggcggacgggugaguaaugccuaggaa ugccu g gugggggauaac u ggaaacg gcuaauaccgcaua ccuacggg a
URS0000000013 41 GAGCGGCGGACGGGUGAGUAAUGCCUAGGAAAUUGCCUUGAUGUGGGGGAUAACCAUUGGAAACGAUGGCUAAUACCGCAUAAUGCCUACGGGCCA 136
789********************************************************************************************* PP
query 163 aagcaggggaccuucgggccuugcgcuaucagau 196
aag ggggaccuucgggccu cgc agau
URS0000000013 137 AAGAGGGGGACCUUCGGGCCUCUCGCGUCAAGAU 170
*********************9999877777766 PP
"""
lines = text.split('\n')
data = parse_record_description(lines[:6])
data.update(parse_alignment(lines[7:], data['target_length'], query_length))
data['query_length'] = query_length
return data
def strip_out_internal_stats(record, stats_text):
"""Delete lines with internal query statistics."""
delete = 0
lines = record.split('\n')
for i, line in enumerate(lines):
if stats_text in line:
delete = i
break
if delete:
lines = lines[:delete]
return '\n'.join(lines)
def get_query_length(record):
"""
Get query sequence length.
Example line:
Query: query [M=2905]
"""
match = re.search(r'Query: query \[M=(\d+)\]', record)
return int(match.group(1)) if match else 0
def nhmmer_parse(filename="", stats_text='Internal pipeline statistics summary'):
"""Split file into matches and return parsed data."""
with open(filename, 'r') as f:
for i, record in enumerate(record_generator(f, '>>')):
if i == 0:
# first block with job stats
query_length = get_query_length(record)
continue
if stats_text in record: # last record contains internal query statistics
record = strip_out_internal_stats(record, stats_text)
data = parse_record(record, query_length)
data['result_id'] = i
yield data
def parse_number_of_hits(filename):
command = "tail -n 10 %s | grep Total" % filename
total = os.popen(command).read()
return total if total else None
if __name__ == "__main__":
"""Run from command line for testing purposes."""
import sys
if len(sys.argv) < 2:
print('Provide full path to the input file with nhmmer search results')
sys.exit(1)
filename = sys.argv[1]
for record in nhmmer_parse(filename=filename):
print(record)
| """
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import re
import os
def record_generator(f, delimiter='\n', bufsize=4096):
"""
Read file using input record separator.
Based on this SO question:
http://stackoverflow.com/questions/19600475/how-to-read-records-terminated-by-custom-separator-from-file-in-python
"""
buf = ''
while True:
newbuf = f.read(bufsize)
if not newbuf:
yield buf
return
buf += newbuf
lines = buf.split(delimiter)
for line in lines[:-1]:
yield line
buf = lines[-1]
def parse_record_description(lines):
"""
Example:
URS0000000013 Vibrio gigantis partial 16S ribosomal RNA
score bias Evalue hmmfrom hmm to alifrom ali to envfrom env to sq len acc
------ ----- --------- ------- ------- --------- --------- --------- --------- --------- ----
! 76.9 4.1 5.4e-23 67 196 .. 41 170 .. 21 175 .. 1421 0.94
"""
def parse_first_line(line):
"""Get URS id and description."""
match = re.search(r'URS[0-9A-Fa-f]{10}(_\d+)?', line)
return {
'rnacentral_id': match.group(),
'description': line.replace(match.group(), '').replace(';', '').strip(),
}
def parse_fourth_line(line):
"""Parse out hit statistics."""
line = line.replace('!', '').\
replace('?', '').\
replace('[]', '').\
replace('[.', '').\
replace('.]', '').\
replace('..', '').\
strip()
scores = re.split(r' +', line)
return {
'score': float(scores[0]),
'bias': float(scores[1]),
'e_value': float(scores[2]),
'alignment_start': float(scores[5]),
'alignment_stop': float(scores[6]),
'target_length': int(scores[9]),
}
data = parse_first_line(lines[0])
data.update(parse_fourth_line(lines[3]))
return data
def parse_alignment(lines, target_length, query_length):
"""
Example:
score: 76.9 bits
query 67 gagcggcggacgggugaguaaugccuaggaaucugccugguagugggggauaacgcucggaaacggacgcuaauaccgcauacguccuacgggaga 162
gagcggcggacgggugaguaaugccuaggaa ugccu g gugggggauaac u ggaaacg gcuaauaccgcaua ccuacggg a
URS0000000013 41 GAGCGGCGGACGGGUGAGUAAUGCCUAGGAAAUUGCCUUGAUGUGGGGGAUAACCAUUGGAAACGAUGGCUAAUACCGCAUAAUGCCUACGGGCCA 136
789********************************************************************************************* PP
query 163 aagcaggggaccuucgggccuugcgcuaucagau 196
aag ggggaccuucgggccu cgc agau
URS0000000013 137 AAGAGGGGGACCUUCGGGCCUCUCGCGUCAAGAU 170
*********************9999877777766 PP
"""
alignment = []
alignment_length = 0
matches = 0
nts_count1 = 0
nts_count2 = 0
gap_count = 0
alignment_sequence = ''
for i, line in enumerate(lines):
gaps = line.count('-') + line.count('.')
if i % 5 == 0: # query
match = re.match(r'^(\s+query\s+\d+ )(.+) \d+', line)
if match:
label = match.group(1)
block_length = len(match.group(2))
alignment_length += block_length
line = line.upper().replace('.', '-').replace('QUERY', 'Query')
line = re.sub('^\s+', '', line)
alignment.append(line)
match2 = re.match('^Query\s+\d+ ', line)
whitespace = len(match2.group(0))
nts_count1 += block_length - gaps
gap_count += gaps
elif i % 5 == 1: # matches
line = ' ' * whitespace + re.sub('\w', '|', line[len(label):])
matches += line.count('|')
alignment.append(line)
elif i % 5 == 2: # target
line = re.sub('\s+URS[0-9A-Fa-f]{10}(_\d+)?;?', 'Sbjct', line.upper())
match = re.match(r'^Sbjct\s+\d+ (.+) \d+', line)
if match:
block_length = len(match.group(1))
nts_count2 += block_length - gaps
gap_count += gaps
alignment.append(line)
try:
get_alignment_sequence = list(filter(None, line.split(" ")))[2]
alignment_sequence += get_alignment_sequence
except IndexError:
pass
elif i % 5 == 3: # skip nhmmer confidence lines
pass
elif i % 5 == 4: # blank line
alignment.append(line)
return {
'alignment': '\n'.join(alignment).strip(),
'alignment_length': alignment_length,
'gap_count': gap_count,
'match_count': matches,
'nts_count1': nts_count1,
'nts_count2': nts_count2,
'identity': (float(matches) / alignment_length) * 100,
'query_coverage': (float(nts_count1) / query_length) * 100,
'target_coverage': (float(nts_count2) / target_length) * 100,
'gaps': (float(gap_count) / alignment_length) * 100,
'alignment_sequence': alignment_sequence.replace('-', '')
}
def parse_record(text, query_length):
"""
Example record:
URS0000000013 Vibrio gigantis partial 16S ribosomal RNA
score bias Evalue hmmfrom hmm to alifrom ali to envfrom env to sq len acc
------ ----- --------- ------- ------- --------- --------- --------- --------- --------- ----
! 76.9 4.1 5.4e-23 67 196 .. 41 170 .. 21 175 .. 1421 0.94
Alignment:
score: 76.9 bits
query 67 gagcggcggacgggugaguaaugccuaggaaucugccugguagugggggauaacgcucggaaacggacgcuaauaccgcauacguccuacgggaga 162
gagcggcggacgggugaguaaugccuaggaa ugccu g gugggggauaac u ggaaacg gcuaauaccgcaua ccuacggg a
URS0000000013 41 GAGCGGCGGACGGGUGAGUAAUGCCUAGGAAAUUGCCUUGAUGUGGGGGAUAACCAUUGGAAACGAUGGCUAAUACCGCAUAAUGCCUACGGGCCA 136
789********************************************************************************************* PP
query 163 aagcaggggaccuucgggccuugcgcuaucagau 196
aag ggggaccuucgggccu cgc agau
URS0000000013 137 AAGAGGGGGACCUUCGGGCCUCUCGCGUCAAGAU 170
*********************9999877777766 PP
"""
lines = text.split('\n')
data = parse_record_description(lines[:6])
data.update(parse_alignment(lines[7:], data['target_length'], query_length))
data['query_length'] = query_length
return data
def strip_out_internal_stats(record, stats_text):
"""Delete lines with internal query statistics."""
delete = 0
lines = record.split('\n')
for i, line in enumerate(lines):
if stats_text in line:
delete = i
break
if delete:
lines = lines[:delete]
return '\n'.join(lines)
def get_query_length(record):
"""
Get query sequence length.
Example line:
Query: query [M=2905]
"""
match = re.search(r'Query: query \[M=(\d+)\]', record)
return int(match.group(1)) if match else 0
def nhmmer_parse(filename="", stats_text='Internal pipeline statistics summary'):
"""Split file into matches and return parsed data."""
with open(filename, 'r') as f:
for i, record in enumerate(record_generator(f, '>>')):
if i == 0:
# first block with job stats
query_length = get_query_length(record)
continue
if stats_text in record: # last record contains internal query statistics
record = strip_out_internal_stats(record, stats_text)
data = parse_record(record, query_length)
data['result_id'] = i
yield data
def parse_number_of_hits(filename):
command = "tail -n 10 %s | grep Total" % filename
total = os.popen(command).read()
return total if total else None
if __name__ == "__main__":
"""Run from command line for testing purposes."""
import sys
if len(sys.argv) < 2:
print('Provide full path to the input file with nhmmer search results')
sys.exit(1)
filename = sys.argv[1]
for record in nhmmer_parse(filename=filename):
print(record) | en | 0.495856 | Copyright [2009-2019] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Read file using input record separator. Based on this SO question: http://stackoverflow.com/questions/19600475/how-to-read-records-terminated-by-custom-separator-from-file-in-python Example: URS0000000013 Vibrio gigantis partial 16S ribosomal RNA score bias Evalue hmmfrom hmm to alifrom ali to envfrom env to sq len acc ------ ----- --------- ------- ------- --------- --------- --------- --------- --------- ---- ! 76.9 4.1 5.4e-23 67 196 .. 41 170 .. 21 175 .. 1421 0.94 Get URS id and description. Parse out hit statistics. Example: score: 76.9 bits query 67 gagcggcggacgggugaguaaugccuaggaaucugccugguagugggggauaacgcucggaaacggacgcuaauaccgcauacguccuacgggaga 162 gagcggcggacgggugaguaaugccuaggaa ugccu g gugggggauaac u ggaaacg gcuaauaccgcaua ccuacggg a URS0000000013 41 GAGCGGCGGACGGGUGAGUAAUGCCUAGGAAAUUGCCUUGAUGUGGGGGAUAACCAUUGGAAACGAUGGCUAAUACCGCAUAAUGCCUACGGGCCA 136 789********************************************************************************************* PP query 163 aagcaggggaccuucgggccuugcgcuaucagau 196 aag ggggaccuucgggccu cgc agau URS0000000013 137 AAGAGGGGGACCUUCGGGCCUCUCGCGUCAAGAU 170 *********************9999877777766 PP # query # matches # target # skip nhmmer confidence lines # blank line Example record: URS0000000013 Vibrio gigantis partial 16S ribosomal RNA score bias Evalue hmmfrom hmm to alifrom ali to envfrom env to sq len acc ------ ----- --------- ------- ------- --------- --------- --------- --------- --------- ---- ! 76.9 4.1 5.4e-23 67 196 .. 41 170 .. 21 175 .. 1421 0.94 Alignment: score: 76.9 bits query 67 gagcggcggacgggugaguaaugccuaggaaucugccugguagugggggauaacgcucggaaacggacgcuaauaccgcauacguccuacgggaga 162 gagcggcggacgggugaguaaugccuaggaa ugccu g gugggggauaac u ggaaacg gcuaauaccgcaua ccuacggg a URS0000000013 41 GAGCGGCGGACGGGUGAGUAAUGCCUAGGAAAUUGCCUUGAUGUGGGGGAUAACCAUUGGAAACGAUGGCUAAUACCGCAUAAUGCCUACGGGCCA 136 789********************************************************************************************* PP query 163 aagcaggggaccuucgggccuugcgcuaucagau 196 aag ggggaccuucgggccu cgc agau URS0000000013 137 AAGAGGGGGACCUUCGGGCCUCUCGCGUCAAGAU 170 *********************9999877777766 PP Delete lines with internal query statistics. Get query sequence length. Example line: Query: query [M=2905] Split file into matches and return parsed data. # first block with job stats # last record contains internal query statistics Run from command line for testing purposes. | 2.589684 | 3 |
test/dialect/mssql/test_reflection.py | lxl0928/timi_sqlalchemy | 1 | 6624362 | # -*- encoding: utf-8
from sqlalchemy import Column
from sqlalchemy import DDL
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import types
from sqlalchemy import util
from sqlalchemy.databases import mssql
from sqlalchemy.dialects.mssql import base
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode
from sqlalchemy.dialects.mssql.information_schema import tables
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import ComparesTables
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
__only_on__ = "mssql"
__backend__ = True
@testing.provide_metadata
def test_basic_reflection(self):
meta = self.metadata
users = Table(
"engine_users",
meta,
Column("user_id", types.INT, primary_key=True),
Column("user_name", types.VARCHAR(20), nullable=False),
Column("test1", types.CHAR(5), nullable=False),
Column("test2", types.Float(5), nullable=False),
Column("test2.5", types.Float(), nullable=False),
Column("test3", types.Text()),
Column("test4", types.Numeric, nullable=False),
Column("test4.5", types.Numeric(10, 2), nullable=False),
Column("test5", types.DateTime),
Column(
"parent_user_id",
types.Integer,
ForeignKey("engine_users.user_id"),
),
Column("test6", types.DateTime, nullable=False),
Column("test7", types.Text()),
Column("test8", types.LargeBinary()),
Column("test_passivedefault2", types.Integer, server_default="5"),
Column("test9", types.BINARY(100)),
Column("test_numeric", types.Numeric()),
)
addresses = Table(
"engine_email_addresses",
meta,
Column("address_id", types.Integer, primary_key=True),
Column(
"remote_user_id", types.Integer, ForeignKey(users.c.user_id)
),
Column("email_address", types.String(20)),
)
meta.create_all()
meta2 = MetaData()
reflected_users = Table(
"engine_users", meta2, autoload=True, autoload_with=testing.db
)
reflected_addresses = Table(
"engine_email_addresses",
meta2,
autoload=True,
autoload_with=testing.db,
)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.provide_metadata
def _test_specific_type(self, type_obj, ddl):
metadata = self.metadata
table = Table("type_test", metadata, Column("col1", type_obj))
table.create()
m2 = MetaData()
table2 = Table("type_test", m2, autoload_with=testing.db)
self.assert_compile(
schema.CreateTable(table2),
"CREATE TABLE type_test (col1 %s NULL)" % ddl,
)
def test_xml_type(self):
self._test_specific_type(mssql.XML, "XML")
def test_image_type(self):
self._test_specific_type(mssql.IMAGE, "IMAGE")
def test_money_type(self):
self._test_specific_type(mssql.MONEY, "MONEY")
def test_numeric_prec_scale(self):
self._test_specific_type(mssql.NUMERIC(10, 2), "NUMERIC(10, 2)")
def test_float(self):
self._test_specific_type(mssql.FLOAT, "FLOAT(53)")
def test_real(self):
self._test_specific_type(mssql.REAL, "REAL")
def test_float_as_real(self):
# FLOAT(5) comes back as REAL
self._test_specific_type(mssql.FLOAT(5), "REAL")
@testing.provide_metadata
def test_identity(self):
metadata = self.metadata
table = Table(
"identity_test",
metadata,
Column(
"col1",
Integer,
mssql_identity_start=2,
mssql_identity_increment=3,
primary_key=True,
),
)
table.create()
meta2 = MetaData(testing.db)
table2 = Table("identity_test", meta2, autoload=True)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_start"], 2)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_increment"], 3)
@testing.emits_warning("Did not recognize")
@testing.provide_metadata
def test_skip_types(self):
metadata = self.metadata
testing.db.execute(
"""
create table foo (id integer primary key, data xml)
"""
)
with mock.patch.object(
testing.db.dialect, "ischema_names", {"int": mssql.INTEGER}
):
t1 = Table("foo", metadata, autoload=True)
assert isinstance(t1.c.id.type, Integer)
assert isinstance(t1.c.data.type, types.NullType)
@testing.provide_metadata
def test_cross_schema_fk_pk_name_overlaps(self):
# test for issue #4228
metadata = self.metadata
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema,
)
Table(
"referrer",
metadata,
Column("id", Integer, primary_key=True),
Column(
"sid",
ForeignKey(
"%s.subject.id" % testing.config.test_schema,
name="fk_subject",
),
),
schema=testing.config.test_schema,
)
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema_2,
)
metadata.create_all()
insp = inspect(testing.db)
eq_(
insp.get_foreign_keys("referrer", testing.config.test_schema),
[
{
"name": "fk_subject",
"constrained_columns": ["sid"],
"referred_schema": "test_schema",
"referred_table": "subject",
"referred_columns": ["id"],
}
],
)
@testing.provide_metadata
def test_db_qualified_items(self):
metadata = self.metadata
Table("foo", metadata, Column("id", Integer, primary_key=True))
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id", name="fkfoo")),
)
metadata.create_all()
dbname = testing.db.scalar("select db_name()")
owner = testing.db.scalar("SELECT user_name()")
referred_schema = "%(dbname)s.%(owner)s" % {
"dbname": dbname,
"owner": owner,
}
inspector = inspect(testing.db)
bar_via_db = inspector.get_foreign_keys("bar", schema=referred_schema)
eq_(
bar_via_db,
[
{
"referred_table": "foo",
"referred_columns": ["id"],
"referred_schema": referred_schema,
"name": "fkfoo",
"constrained_columns": ["foo_id"],
}
],
)
assert testing.db.has_table("bar", schema=referred_schema)
m2 = MetaData()
Table(
"bar",
m2,
schema=referred_schema,
autoload=True,
autoload_with=testing.db,
)
eq_(m2.tables["%s.foo" % referred_schema].schema, referred_schema)
@testing.provide_metadata
def test_indexes_cols(self):
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer), Column("y", Integer))
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_commas(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x, col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x, col"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_spaces(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x col"], t2.c.y]))
@testing.provide_metadata
def test_max_ident_in_varchar_not_present(self):
"""test [ticket:3504].
Here we are testing not just that the "max" token comes back
as None, but also that these types accept "max" as the value
of "length" on construction, which isn't a directly documented
pattern however is likely in common use.
"""
metadata = self.metadata
Table(
"t",
metadata,
Column("t1", types.String),
Column("t2", types.Text("max")),
Column("t3", types.Text("max")),
Column("t4", types.LargeBinary("max")),
Column("t5", types.VARBINARY("max")),
)
metadata.create_all()
for col in inspect(testing.db).get_columns("t"):
is_(col["type"].length, None)
in_("max", str(col["type"].compile(dialect=testing.db.dialect)))
class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_info_unicode_coercion(self):
dialect = mssql.dialect()
value = CoerceUnicode().bind_processor(dialect)("a string")
assert isinstance(value, util.text_type)
def test_info_unicode_cast_no_2000(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2000_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = :table_name_1",
dialect=dialect,
)
def test_info_unicode_cast(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2005_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = "
"CAST(:table_name_1 AS NVARCHAR(max))",
dialect=dialect,
)
class ReflectHugeViewTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
# crashes on freetds 0.91, not worth it
__skip_if__ = (lambda: testing.requires.mssql_freetds.enabled,)
def setup(self):
self.col_num = 150
self.metadata = MetaData(testing.db)
t = Table(
"base_table",
self.metadata,
*[
Column("long_named_column_number_%d" % i, Integer)
for i in range(self.col_num)
]
)
self.view_str = view_str = (
"CREATE VIEW huge_named_view AS SELECT %s FROM base_table"
% (
",".join(
"long_named_column_number_%d" % i
for i in range(self.col_num)
)
)
)
assert len(view_str) > 4000
event.listen(t, "after_create", DDL(view_str))
event.listen(t, "before_drop", DDL("DROP VIEW huge_named_view"))
self.metadata.create_all()
def teardown(self):
self.metadata.drop_all()
def test_inspect_view_definition(self):
inspector = Inspector.from_engine(testing.db)
view_def = inspector.get_view_definition("huge_named_view")
eq_(view_def, self.view_str)
| # -*- encoding: utf-8
from sqlalchemy import Column
from sqlalchemy import DDL
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import types
from sqlalchemy import util
from sqlalchemy.databases import mssql
from sqlalchemy.dialects.mssql import base
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode
from sqlalchemy.dialects.mssql.information_schema import tables
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import ComparesTables
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
__only_on__ = "mssql"
__backend__ = True
@testing.provide_metadata
def test_basic_reflection(self):
meta = self.metadata
users = Table(
"engine_users",
meta,
Column("user_id", types.INT, primary_key=True),
Column("user_name", types.VARCHAR(20), nullable=False),
Column("test1", types.CHAR(5), nullable=False),
Column("test2", types.Float(5), nullable=False),
Column("test2.5", types.Float(), nullable=False),
Column("test3", types.Text()),
Column("test4", types.Numeric, nullable=False),
Column("test4.5", types.Numeric(10, 2), nullable=False),
Column("test5", types.DateTime),
Column(
"parent_user_id",
types.Integer,
ForeignKey("engine_users.user_id"),
),
Column("test6", types.DateTime, nullable=False),
Column("test7", types.Text()),
Column("test8", types.LargeBinary()),
Column("test_passivedefault2", types.Integer, server_default="5"),
Column("test9", types.BINARY(100)),
Column("test_numeric", types.Numeric()),
)
addresses = Table(
"engine_email_addresses",
meta,
Column("address_id", types.Integer, primary_key=True),
Column(
"remote_user_id", types.Integer, ForeignKey(users.c.user_id)
),
Column("email_address", types.String(20)),
)
meta.create_all()
meta2 = MetaData()
reflected_users = Table(
"engine_users", meta2, autoload=True, autoload_with=testing.db
)
reflected_addresses = Table(
"engine_email_addresses",
meta2,
autoload=True,
autoload_with=testing.db,
)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.provide_metadata
def _test_specific_type(self, type_obj, ddl):
metadata = self.metadata
table = Table("type_test", metadata, Column("col1", type_obj))
table.create()
m2 = MetaData()
table2 = Table("type_test", m2, autoload_with=testing.db)
self.assert_compile(
schema.CreateTable(table2),
"CREATE TABLE type_test (col1 %s NULL)" % ddl,
)
def test_xml_type(self):
self._test_specific_type(mssql.XML, "XML")
def test_image_type(self):
self._test_specific_type(mssql.IMAGE, "IMAGE")
def test_money_type(self):
self._test_specific_type(mssql.MONEY, "MONEY")
def test_numeric_prec_scale(self):
self._test_specific_type(mssql.NUMERIC(10, 2), "NUMERIC(10, 2)")
def test_float(self):
self._test_specific_type(mssql.FLOAT, "FLOAT(53)")
def test_real(self):
self._test_specific_type(mssql.REAL, "REAL")
def test_float_as_real(self):
# FLOAT(5) comes back as REAL
self._test_specific_type(mssql.FLOAT(5), "REAL")
@testing.provide_metadata
def test_identity(self):
metadata = self.metadata
table = Table(
"identity_test",
metadata,
Column(
"col1",
Integer,
mssql_identity_start=2,
mssql_identity_increment=3,
primary_key=True,
),
)
table.create()
meta2 = MetaData(testing.db)
table2 = Table("identity_test", meta2, autoload=True)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_start"], 2)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_increment"], 3)
@testing.emits_warning("Did not recognize")
@testing.provide_metadata
def test_skip_types(self):
metadata = self.metadata
testing.db.execute(
"""
create table foo (id integer primary key, data xml)
"""
)
with mock.patch.object(
testing.db.dialect, "ischema_names", {"int": mssql.INTEGER}
):
t1 = Table("foo", metadata, autoload=True)
assert isinstance(t1.c.id.type, Integer)
assert isinstance(t1.c.data.type, types.NullType)
@testing.provide_metadata
def test_cross_schema_fk_pk_name_overlaps(self):
# test for issue #4228
metadata = self.metadata
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema,
)
Table(
"referrer",
metadata,
Column("id", Integer, primary_key=True),
Column(
"sid",
ForeignKey(
"%s.subject.id" % testing.config.test_schema,
name="fk_subject",
),
),
schema=testing.config.test_schema,
)
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema_2,
)
metadata.create_all()
insp = inspect(testing.db)
eq_(
insp.get_foreign_keys("referrer", testing.config.test_schema),
[
{
"name": "fk_subject",
"constrained_columns": ["sid"],
"referred_schema": "test_schema",
"referred_table": "subject",
"referred_columns": ["id"],
}
],
)
@testing.provide_metadata
def test_db_qualified_items(self):
metadata = self.metadata
Table("foo", metadata, Column("id", Integer, primary_key=True))
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id", name="fkfoo")),
)
metadata.create_all()
dbname = testing.db.scalar("select db_name()")
owner = testing.db.scalar("SELECT user_name()")
referred_schema = "%(dbname)s.%(owner)s" % {
"dbname": dbname,
"owner": owner,
}
inspector = inspect(testing.db)
bar_via_db = inspector.get_foreign_keys("bar", schema=referred_schema)
eq_(
bar_via_db,
[
{
"referred_table": "foo",
"referred_columns": ["id"],
"referred_schema": referred_schema,
"name": "fkfoo",
"constrained_columns": ["foo_id"],
}
],
)
assert testing.db.has_table("bar", schema=referred_schema)
m2 = MetaData()
Table(
"bar",
m2,
schema=referred_schema,
autoload=True,
autoload_with=testing.db,
)
eq_(m2.tables["%s.foo" % referred_schema].schema, referred_schema)
@testing.provide_metadata
def test_indexes_cols(self):
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer), Column("y", Integer))
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_commas(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x, col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x, col"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_spaces(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x col"], t2.c.y]))
@testing.provide_metadata
def test_max_ident_in_varchar_not_present(self):
"""test [ticket:3504].
Here we are testing not just that the "max" token comes back
as None, but also that these types accept "max" as the value
of "length" on construction, which isn't a directly documented
pattern however is likely in common use.
"""
metadata = self.metadata
Table(
"t",
metadata,
Column("t1", types.String),
Column("t2", types.Text("max")),
Column("t3", types.Text("max")),
Column("t4", types.LargeBinary("max")),
Column("t5", types.VARBINARY("max")),
)
metadata.create_all()
for col in inspect(testing.db).get_columns("t"):
is_(col["type"].length, None)
in_("max", str(col["type"].compile(dialect=testing.db.dialect)))
class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_info_unicode_coercion(self):
dialect = mssql.dialect()
value = CoerceUnicode().bind_processor(dialect)("a string")
assert isinstance(value, util.text_type)
def test_info_unicode_cast_no_2000(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2000_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = :table_name_1",
dialect=dialect,
)
def test_info_unicode_cast(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2005_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = "
"CAST(:table_name_1 AS NVARCHAR(max))",
dialect=dialect,
)
class ReflectHugeViewTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
# crashes on freetds 0.91, not worth it
__skip_if__ = (lambda: testing.requires.mssql_freetds.enabled,)
def setup(self):
self.col_num = 150
self.metadata = MetaData(testing.db)
t = Table(
"base_table",
self.metadata,
*[
Column("long_named_column_number_%d" % i, Integer)
for i in range(self.col_num)
]
)
self.view_str = view_str = (
"CREATE VIEW huge_named_view AS SELECT %s FROM base_table"
% (
",".join(
"long_named_column_number_%d" % i
for i in range(self.col_num)
)
)
)
assert len(view_str) > 4000
event.listen(t, "after_create", DDL(view_str))
event.listen(t, "before_drop", DDL("DROP VIEW huge_named_view"))
self.metadata.create_all()
def teardown(self):
self.metadata.drop_all()
def test_inspect_view_definition(self):
inspector = Inspector.from_engine(testing.db)
view_def = inspector.get_view_definition("huge_named_view")
eq_(view_def, self.view_str)
| en | 0.947667 | # -*- encoding: utf-8 # FLOAT(5) comes back as REAL create table foo (id integer primary key, data xml) # test for issue #4228 test [ticket:3504]. Here we are testing not just that the "max" token comes back as None, but also that these types accept "max" as the value of "length" on construction, which isn't a directly documented pattern however is likely in common use. # crashes on freetds 0.91, not worth it | 2.208454 | 2 |
robocode-python-ls-core/src/robocode_ls_core/subprocess_wrapper.py | emanlove/robotframework-lsp | 0 | 6624363 | <reponame>emanlove/robotframework-lsp<filename>robocode-python-ls-core/src/robocode_ls_core/subprocess_wrapper.py
import subprocess
def _fix_subprocess():
import sys
from robocode_ls_core.constants import IS_PY37_ONWARDS
# Workaround for https://bugs.python.org/issue37380 in older versions of Python.
# i.e.: OSError: [WinError 6] The handle is invalid
if IS_PY37_ONWARDS:
return # This is fixed in 3.7 onwards.
_cleanup = (
subprocess._cleanup
) # Just check that there is in fact a _cleanup attribute there.
def _new_cleanup():
for inst in subprocess._active[:]:
try:
res = inst._internal_poll(_deadstate=sys.maxsize)
except OSError:
res = 1 # This is the fix.
if res is not None:
try:
subprocess._active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
subprocess._cleanup = _new_cleanup
_fix_subprocess()
| import subprocess
def _fix_subprocess():
import sys
from robocode_ls_core.constants import IS_PY37_ONWARDS
# Workaround for https://bugs.python.org/issue37380 in older versions of Python.
# i.e.: OSError: [WinError 6] The handle is invalid
if IS_PY37_ONWARDS:
return # This is fixed in 3.7 onwards.
_cleanup = (
subprocess._cleanup
) # Just check that there is in fact a _cleanup attribute there.
def _new_cleanup():
for inst in subprocess._active[:]:
try:
res = inst._internal_poll(_deadstate=sys.maxsize)
except OSError:
res = 1 # This is the fix.
if res is not None:
try:
subprocess._active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
subprocess._cleanup = _new_cleanup
_fix_subprocess() | en | 0.914415 | # Workaround for https://bugs.python.org/issue37380 in older versions of Python. # i.e.: OSError: [WinError 6] The handle is invalid # This is fixed in 3.7 onwards. # Just check that there is in fact a _cleanup attribute there. # This is the fix. # This can happen if two threads create a new Popen instance. # It's harmless that it was already removed, so ignore. | 2.360623 | 2 |
Lib/test/test_traceback.py | odeke-em/cpython | 0 | 6624364 | """Test cases for traceback module"""
from collections import namedtuple
from io import StringIO
import linecache
import sys
import unittest
import re
from test.support import run_unittest, Error, captured_output
from test.support import TESTFN, unlink, cpython_only
from test.script_helper import assert_python_ok
import textwrap
import traceback
test_code = namedtuple('code', ['co_filename', 'co_name'])
test_frame = namedtuple('frame', ['f_code', 'f_globals', 'f_locals'])
test_tb = namedtuple('tb', ['tb_frame', 'tb_lineno', 'tb_next'])
class SyntaxTracebackCases(unittest.TestCase):
# For now, a very minimal set of tests. I want to be sure that
# formatting of SyntaxErrors works based on changes for 2.1.
def get_exception_format(self, func, exc):
try:
func()
except exc as value:
return traceback.format_exception_only(exc, value)
else:
raise ValueError("call did not raise exception")
def syntax_error_with_caret(self):
compile("def fact(x):\n\treturn x!\n", "?", "exec")
def syntax_error_with_caret_2(self):
compile("1 +\n", "?", "exec")
def syntax_error_bad_indentation(self):
compile("def spam():\n print(1)\n print(2)", "?", "exec")
def syntax_error_with_caret_non_ascii(self):
compile('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', "?", "exec")
def syntax_error_bad_indentation2(self):
compile(" print(2)", "?", "exec")
def test_caret(self):
err = self.get_exception_format(self.syntax_error_with_caret,
SyntaxError)
self.assertEqual(len(err), 4)
self.assertTrue(err[1].strip() == "return x!")
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[1].find("!"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_2,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_non_ascii,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place
def test_nocaret(self):
exc = SyntaxError("error", ("x.py", 23, None, "bad syntax"))
err = traceback.format_exception_only(SyntaxError, exc)
self.assertEqual(len(err), 3)
self.assertEqual(err[1].strip(), "bad syntax")
def test_bad_indentation(self):
err = self.get_exception_format(self.syntax_error_bad_indentation,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find(")"), err[2].find("^"))
err = self.get_exception_format(self.syntax_error_bad_indentation2,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find("p"), err[2].find("^"))
def test_base_exception(self):
# Test that exceptions derived from BaseException are formatted right
e = KeyboardInterrupt()
lst = traceback.format_exception_only(e.__class__, e)
self.assertEqual(lst, ['KeyboardInterrupt\n'])
def test_format_exception_only_bad__str__(self):
class X(Exception):
def __str__(self):
1/0
err = traceback.format_exception_only(X, X())
self.assertEqual(len(err), 1)
str_value = '<unprintable %s object>' % X.__name__
if X.__module__ in ('__main__', 'builtins'):
str_name = X.__qualname__
else:
str_name = '.'.join([X.__module__, X.__qualname__])
self.assertEqual(err[0], "%s: %s\n" % (str_name, str_value))
def test_without_exception(self):
err = traceback.format_exception_only(None, None)
self.assertEqual(err, ['None\n'])
def test_encoded_file(self):
# Test that tracebacks are correctly printed for encoded source files:
# - correct line number (Issue2384)
# - respect file encoding (Issue3975)
import tempfile, sys, subprocess, os
# The spawned subprocess has its stdout redirected to a PIPE, and its
# encoding may be different from the current interpreter, on Windows
# at least.
process = subprocess.Popen([sys.executable, "-c",
"import sys; print(sys.stdout.encoding)"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
output_encoding = str(stdout, 'ascii').splitlines()[0]
def do_test(firstlines, message, charset, lineno):
# Raise the message in a subprocess, and catch the output
try:
output = open(TESTFN, "w", encoding=charset)
output.write("""{0}if 1:
import traceback;
raise RuntimeError('{1}')
""".format(firstlines, message))
output.close()
process = subprocess.Popen([sys.executable, TESTFN],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
stdout = stdout.decode(output_encoding).splitlines()
finally:
unlink(TESTFN)
# The source lines are encoded with the 'backslashreplace' handler
encoded_message = message.encode(output_encoding,
'backslashreplace')
# and we just decoded them with the output_encoding.
message_ascii = encoded_message.decode(output_encoding)
err_line = "raise RuntimeError('{0}')".format(message_ascii)
err_msg = "RuntimeError: {0}".format(message_ascii)
self.assertIn(("line %s" % lineno), stdout[1],
"Invalid line number: {0!r} instead of {1}".format(
stdout[1], lineno))
self.assertTrue(stdout[2].endswith(err_line),
"Invalid traceback line: {0!r} instead of {1!r}".format(
stdout[2], err_line))
self.assertTrue(stdout[3] == err_msg,
"Invalid error message: {0!r} instead of {1!r}".format(
stdout[3], err_msg))
do_test("", "foo", "ascii", 3)
for charset in ("ascii", "iso-8859-1", "utf-8", "GBK"):
if charset == "ascii":
text = "foo"
elif charset == "GBK":
text = "\u4E02\u5100"
else:
text = "h\xe9 ho"
do_test("# coding: {0}\n".format(charset),
text, charset, 4)
do_test("#!shebang\n# coding: {0}\n".format(charset),
text, charset, 5)
do_test(" \t\f\n# coding: {0}\n".format(charset),
text, charset, 5)
# Issue #18960: coding spec should has no effect
do_test("0\n# coding: GBK\n", "h\xe9 ho", 'utf-8', 5)
def test_print_traceback_at_exit(self):
# Issue #22599: Ensure that it is possible to use the traceback module
# to display an exception at Python exit
code = textwrap.dedent("""
import sys
import traceback
class PrintExceptionAtExit(object):
def __init__(self):
try:
x = 1 / 0
except Exception:
self.exc_info = sys.exc_info()
# self.exc_info[1] (traceback) contains frames:
# explicitly clear the reference to self in the current
# frame to break a reference cycle
self = None
def __del__(self):
traceback.print_exception(*self.exc_info)
# Keep a reference in the module namespace to call the destructor
# when the module is unloaded
obj = PrintExceptionAtExit()
""")
rc, stdout, stderr = assert_python_ok('-c', code)
expected = [b'Traceback (most recent call last):',
b' File "<string>", line 8, in __init__',
b'ZeroDivisionError: division by zero']
self.assertEqual(stderr.splitlines(), expected)
class TracebackFormatTests(unittest.TestCase):
def some_exception(self):
raise KeyError('blah')
@cpython_only
def check_traceback_format(self, cleanup_func=None):
from _testcapi import traceback_print
try:
self.some_exception()
except KeyError:
type_, value, tb = sys.exc_info()
if cleanup_func is not None:
# Clear the inner frames, not this one
cleanup_func(tb.tb_next)
traceback_fmt = 'Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
file_ = StringIO()
traceback_print(tb, file_)
python_fmt = file_.getvalue()
# Call all _tb and _exc functions
with captured_output("stderr") as tbstderr:
traceback.print_tb(tb)
tbfile = StringIO()
traceback.print_tb(tb, file=tbfile)
with captured_output("stderr") as excstderr:
traceback.print_exc()
excfmt = traceback.format_exc()
excfile = StringIO()
traceback.print_exc(file=excfile)
else:
raise Error("unable to create test traceback string")
# Make sure that Python and the traceback module format the same thing
self.assertEqual(traceback_fmt, python_fmt)
# Now verify the _tb func output
self.assertEqual(tbstderr.getvalue(), tbfile.getvalue())
# Now verify the _exc func output
self.assertEqual(excstderr.getvalue(), excfile.getvalue())
self.assertEqual(excfmt, excfile.getvalue())
# Make sure that the traceback is properly indented.
tb_lines = python_fmt.splitlines()
self.assertEqual(len(tb_lines), 5)
banner = tb_lines[0]
location, source_line = tb_lines[-2:]
self.assertTrue(banner.startswith('Traceback'))
self.assertTrue(location.startswith(' File'))
self.assertTrue(source_line.startswith(' raise'))
def test_traceback_format(self):
self.check_traceback_format()
def test_traceback_format_with_cleared_frames(self):
# Check that traceback formatting also works with a clear()ed frame
def cleanup_tb(tb):
tb.tb_frame.clear()
self.check_traceback_format(cleanup_tb)
def test_stack_format(self):
# Verify _stack functions. Note we have to use _getframe(1) to
# compare them without this frame appearing in the output
with captured_output("stderr") as ststderr:
traceback.print_stack(sys._getframe(1))
stfile = StringIO()
traceback.print_stack(sys._getframe(1), file=stfile)
self.assertEqual(ststderr.getvalue(), stfile.getvalue())
stfmt = traceback.format_stack(sys._getframe(1))
self.assertEqual(ststderr.getvalue(), "".join(stfmt))
cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n\n")
context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n\n")
boundaries = re.compile(
'(%s|%s)' % (re.escape(cause_message), re.escape(context_message)))
class BaseExceptionReportingTests:
def get_exception(self, exception_or_callable):
if isinstance(exception_or_callable, Exception):
return exception_or_callable
try:
exception_or_callable()
except Exception as e:
return e
def zero_div(self):
1/0 # In zero_div
def check_zero_div(self, msg):
lines = msg.splitlines()
self.assertTrue(lines[-3].startswith(' File'))
self.assertIn('1/0 # In zero_div', lines[-2])
self.assertTrue(lines[-1].startswith('ZeroDivisionError'), lines[-1])
def test_simple(self):
try:
1/0 # Marker
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('1/0 # Marker', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as e:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError:
raise KeyError
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], context_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context_suppression(self):
try:
try:
raise Exception
except:
raise ZeroDivisionError from None
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('ZeroDivisionError from None', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause_and_context(self):
# When both a cause and a context are set, only the cause should be
# displayed and the context should be muted.
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as _e:
e = _e
try:
xyzzy
except NameError:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_cause_recursive(self):
def inner_raise():
try:
try:
self.zero_div()
except ZeroDivisionError as e:
z = e
raise KeyError from e
except KeyError as e:
raise z from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
# The first block is the KeyError raised from the ZeroDivisionError
self.assertIn('raise KeyError from e', blocks[0])
self.assertNotIn('1/0', blocks[0])
# The second block (apart from the boundary) is the ZeroDivisionError
# re-raised from the KeyError
self.assertIn('inner_raise() # Marker', blocks[2])
self.check_zero_div(blocks[2])
def test_syntax_error_offset_at_eol(self):
# See #10186.
def e():
raise SyntaxError('', ('', 0, 5, 'hello'))
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], " ^")
def e():
exec("x = 5 | 4 |")
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], ' ^')
class PyExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks reporting through the 'traceback' module, with both
# format_exception() and print_exception().
#
def get_report(self, e):
e = self.get_exception(e)
s = ''.join(
traceback.format_exception(type(e), e, e.__traceback__))
with captured_output("stderr") as sio:
traceback.print_exception(type(e), e, e.__traceback__)
self.assertEqual(sio.getvalue(), s)
return s
class CExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks built-in reporting by the interpreter.
#
@cpython_only
def get_report(self, e):
from _testcapi import exception_print
e = self.get_exception(e)
with captured_output("stderr") as s:
exception_print(e)
return s.getvalue()
class MiscTracebackCases(unittest.TestCase):
#
# Check non-printing functions in traceback module
#
def test_clear(self):
def outer():
middle()
def middle():
inner()
def inner():
i = 1
1/0
try:
outer()
except:
type_, value, tb = sys.exc_info()
# Initial assertion: there's one local in the inner frame.
inner_frame = tb.tb_next.tb_next.tb_next.tb_frame
self.assertEqual(len(inner_frame.f_locals), 1)
# Clear traceback frames
traceback.clear_frames(tb)
# Local variable dict should now be empty.
self.assertEqual(len(inner_frame.f_locals), 0)
class TestFrame(unittest.TestCase):
def test_basics(self):
linecache.clearcache()
linecache.lazycache("f", globals())
f = traceback.FrameSummary("f", 1, "dummy")
self.assertEqual(
("f", 1, "dummy", '"""Test cases for traceback module"""'),
tuple(f))
self.assertEqual(None, f.locals)
def test_lazy_lines(self):
linecache.clearcache()
f = traceback.FrameSummary("f", 1, "dummy", lookup_line=False)
self.assertEqual(None, f._line)
linecache.lazycache("f", globals())
self.assertEqual(
'"""Test cases for traceback module"""',
f.line)
def test_explicit_line(self):
f = traceback.FrameSummary("f", 1, "dummy", line="line")
self.assertEqual("line", f.line)
class TestStack(unittest.TestCase):
def test_walk_stack(self):
s = list(traceback.walk_stack(None))
self.assertGreater(len(s), 10)
def test_walk_tb(self):
try:
1/0
except Exception:
_, _, tb = sys.exc_info()
s = list(traceback.walk_tb(tb))
self.assertEqual(len(s), 1)
def test_extract_stack(self):
s = traceback.StackSummary.extract(traceback.walk_stack(None))
self.assertIsInstance(s, traceback.StackSummary)
def test_extract_stack_limit(self):
s = traceback.StackSummary.extract(traceback.walk_stack(None), limit=5)
self.assertEqual(len(s), 5)
def test_extract_stack_lookup_lines(self):
linecache.clearcache()
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=True)
linecache.clearcache()
self.assertEqual(s[0].line, "import sys")
def test_extract_stackup_deferred_lookup_lines(self):
linecache.clearcache()
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=False)
self.assertEqual({}, linecache.cache)
linecache.updatecache('/foo.py', globals())
self.assertEqual(s[0].line, "import sys")
def test_from_list(self):
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
self.assertEqual(
[' File "foo.py", line 1, in fred\n line\n'],
s.format())
def test_format_smoke(self):
# For detailed tests see the format_list tests, which consume the same
# code.
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
self.assertEqual(
[' File "foo.py", line 1, in fred\n line\n'],
s.format())
def test_locals(self):
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
s = traceback.StackSummary.extract(iter([(f, 6)]), capture_locals=True)
self.assertEqual(s[0].locals, {'something': '1'})
def test_no_locals(self):
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
s = traceback.StackSummary.extract(iter([(f, 6)]))
self.assertEqual(s[0].locals, None)
def test_format_locals(self):
def some_inner(k, v):
a = 1
b = 2
return traceback.StackSummary.extract(
traceback.walk_stack(None), capture_locals=True, limit=1)
s = some_inner(3, 4)
self.assertEqual(
[' File "' + __file__ + '", line 585, '
'in some_inner\n'
' traceback.walk_stack(None), capture_locals=True, limit=1)\n'
' a = 1\n'
' b = 2\n'
' k = 3\n'
' v = 4\n'
], s.format())
class TestTracebackException(unittest.TestCase):
def test_smoke(self):
try:
1/0
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
self.assertEqual(None, exc.__cause__)
self.assertEqual(None, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_from_exception(self):
# Check all the parameters are accepted.
def foo():
1/0
try:
foo()
except Exception as e:
exc_info = sys.exc_info()
self.expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]), limit=1, lookup_lines=False,
capture_locals=True)
self.exc = traceback.TracebackException.from_exception(
e, limit=1, lookup_lines=False, capture_locals=True)
expected_stack = self.expected_stack
exc = self.exc
self.assertEqual(None, exc.__cause__)
self.assertEqual(None, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_cause(self):
try:
try:
1/0
finally:
exc_info_context = sys.exc_info()
exc_context = traceback.TracebackException(*exc_info_context)
cause = Exception("cause")
raise Exception("uh oh") from cause
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
exc_cause = traceback.TracebackException(Exception, cause, None)
self.assertEqual(exc_cause, exc.__cause__)
self.assertEqual(exc_context, exc.__context__)
self.assertEqual(True, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_context(self):
try:
try:
1/0
finally:
exc_info_context = sys.exc_info()
exc_context = traceback.TracebackException(*exc_info_context)
raise Exception("uh oh")
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
self.assertEqual(None, exc.__cause__)
self.assertEqual(exc_context, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_limit(self):
def recurse(n):
if n:
recurse(n-1)
else:
1/0
try:
recurse(10)
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info, limit=5)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]), limit=5)
self.assertEqual(expected_stack, exc.stack)
def test_lookup_lines(self):
linecache.clearcache()
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(Exception, e, tb, lookup_lines=False)
self.assertEqual({}, linecache.cache)
linecache.updatecache('/foo.py', globals())
self.assertEqual(exc.stack[0].line, "import sys")
def test_locals(self):
linecache.updatecache('/foo.py', globals())
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1, 'other': 'string'})
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(
Exception, e, tb, capture_locals=True)
self.assertEqual(
exc.stack[0].locals, {'something': '1', 'other': "'string'"})
def test_no_locals(self):
linecache.updatecache('/foo.py', globals())
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(Exception, e, tb)
self.assertEqual(exc.stack[0].locals, None)
def test_main():
run_unittest(__name__)
if __name__ == "__main__":
test_main()
| """Test cases for traceback module"""
from collections import namedtuple
from io import StringIO
import linecache
import sys
import unittest
import re
from test.support import run_unittest, Error, captured_output
from test.support import TESTFN, unlink, cpython_only
from test.script_helper import assert_python_ok
import textwrap
import traceback
test_code = namedtuple('code', ['co_filename', 'co_name'])
test_frame = namedtuple('frame', ['f_code', 'f_globals', 'f_locals'])
test_tb = namedtuple('tb', ['tb_frame', 'tb_lineno', 'tb_next'])
class SyntaxTracebackCases(unittest.TestCase):
# For now, a very minimal set of tests. I want to be sure that
# formatting of SyntaxErrors works based on changes for 2.1.
def get_exception_format(self, func, exc):
try:
func()
except exc as value:
return traceback.format_exception_only(exc, value)
else:
raise ValueError("call did not raise exception")
def syntax_error_with_caret(self):
compile("def fact(x):\n\treturn x!\n", "?", "exec")
def syntax_error_with_caret_2(self):
compile("1 +\n", "?", "exec")
def syntax_error_bad_indentation(self):
compile("def spam():\n print(1)\n print(2)", "?", "exec")
def syntax_error_with_caret_non_ascii(self):
compile('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', "?", "exec")
def syntax_error_bad_indentation2(self):
compile(" print(2)", "?", "exec")
def test_caret(self):
err = self.get_exception_format(self.syntax_error_with_caret,
SyntaxError)
self.assertEqual(len(err), 4)
self.assertTrue(err[1].strip() == "return x!")
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[1].find("!"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_2,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_non_ascii,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place
def test_nocaret(self):
exc = SyntaxError("error", ("x.py", 23, None, "bad syntax"))
err = traceback.format_exception_only(SyntaxError, exc)
self.assertEqual(len(err), 3)
self.assertEqual(err[1].strip(), "bad syntax")
def test_bad_indentation(self):
err = self.get_exception_format(self.syntax_error_bad_indentation,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find(")"), err[2].find("^"))
err = self.get_exception_format(self.syntax_error_bad_indentation2,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find("p"), err[2].find("^"))
def test_base_exception(self):
# Test that exceptions derived from BaseException are formatted right
e = KeyboardInterrupt()
lst = traceback.format_exception_only(e.__class__, e)
self.assertEqual(lst, ['KeyboardInterrupt\n'])
def test_format_exception_only_bad__str__(self):
class X(Exception):
def __str__(self):
1/0
err = traceback.format_exception_only(X, X())
self.assertEqual(len(err), 1)
str_value = '<unprintable %s object>' % X.__name__
if X.__module__ in ('__main__', 'builtins'):
str_name = X.__qualname__
else:
str_name = '.'.join([X.__module__, X.__qualname__])
self.assertEqual(err[0], "%s: %s\n" % (str_name, str_value))
def test_without_exception(self):
err = traceback.format_exception_only(None, None)
self.assertEqual(err, ['None\n'])
def test_encoded_file(self):
# Test that tracebacks are correctly printed for encoded source files:
# - correct line number (Issue2384)
# - respect file encoding (Issue3975)
import tempfile, sys, subprocess, os
# The spawned subprocess has its stdout redirected to a PIPE, and its
# encoding may be different from the current interpreter, on Windows
# at least.
process = subprocess.Popen([sys.executable, "-c",
"import sys; print(sys.stdout.encoding)"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
output_encoding = str(stdout, 'ascii').splitlines()[0]
def do_test(firstlines, message, charset, lineno):
# Raise the message in a subprocess, and catch the output
try:
output = open(TESTFN, "w", encoding=charset)
output.write("""{0}if 1:
import traceback;
raise RuntimeError('{1}')
""".format(firstlines, message))
output.close()
process = subprocess.Popen([sys.executable, TESTFN],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
stdout = stdout.decode(output_encoding).splitlines()
finally:
unlink(TESTFN)
# The source lines are encoded with the 'backslashreplace' handler
encoded_message = message.encode(output_encoding,
'backslashreplace')
# and we just decoded them with the output_encoding.
message_ascii = encoded_message.decode(output_encoding)
err_line = "raise RuntimeError('{0}')".format(message_ascii)
err_msg = "RuntimeError: {0}".format(message_ascii)
self.assertIn(("line %s" % lineno), stdout[1],
"Invalid line number: {0!r} instead of {1}".format(
stdout[1], lineno))
self.assertTrue(stdout[2].endswith(err_line),
"Invalid traceback line: {0!r} instead of {1!r}".format(
stdout[2], err_line))
self.assertTrue(stdout[3] == err_msg,
"Invalid error message: {0!r} instead of {1!r}".format(
stdout[3], err_msg))
do_test("", "foo", "ascii", 3)
for charset in ("ascii", "iso-8859-1", "utf-8", "GBK"):
if charset == "ascii":
text = "foo"
elif charset == "GBK":
text = "\u4E02\u5100"
else:
text = "h\xe9 ho"
do_test("# coding: {0}\n".format(charset),
text, charset, 4)
do_test("#!shebang\n# coding: {0}\n".format(charset),
text, charset, 5)
do_test(" \t\f\n# coding: {0}\n".format(charset),
text, charset, 5)
# Issue #18960: coding spec should has no effect
do_test("0\n# coding: GBK\n", "h\xe9 ho", 'utf-8', 5)
def test_print_traceback_at_exit(self):
# Issue #22599: Ensure that it is possible to use the traceback module
# to display an exception at Python exit
code = textwrap.dedent("""
import sys
import traceback
class PrintExceptionAtExit(object):
def __init__(self):
try:
x = 1 / 0
except Exception:
self.exc_info = sys.exc_info()
# self.exc_info[1] (traceback) contains frames:
# explicitly clear the reference to self in the current
# frame to break a reference cycle
self = None
def __del__(self):
traceback.print_exception(*self.exc_info)
# Keep a reference in the module namespace to call the destructor
# when the module is unloaded
obj = PrintExceptionAtExit()
""")
rc, stdout, stderr = assert_python_ok('-c', code)
expected = [b'Traceback (most recent call last):',
b' File "<string>", line 8, in __init__',
b'ZeroDivisionError: division by zero']
self.assertEqual(stderr.splitlines(), expected)
class TracebackFormatTests(unittest.TestCase):
def some_exception(self):
raise KeyError('blah')
@cpython_only
def check_traceback_format(self, cleanup_func=None):
from _testcapi import traceback_print
try:
self.some_exception()
except KeyError:
type_, value, tb = sys.exc_info()
if cleanup_func is not None:
# Clear the inner frames, not this one
cleanup_func(tb.tb_next)
traceback_fmt = 'Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
file_ = StringIO()
traceback_print(tb, file_)
python_fmt = file_.getvalue()
# Call all _tb and _exc functions
with captured_output("stderr") as tbstderr:
traceback.print_tb(tb)
tbfile = StringIO()
traceback.print_tb(tb, file=tbfile)
with captured_output("stderr") as excstderr:
traceback.print_exc()
excfmt = traceback.format_exc()
excfile = StringIO()
traceback.print_exc(file=excfile)
else:
raise Error("unable to create test traceback string")
# Make sure that Python and the traceback module format the same thing
self.assertEqual(traceback_fmt, python_fmt)
# Now verify the _tb func output
self.assertEqual(tbstderr.getvalue(), tbfile.getvalue())
# Now verify the _exc func output
self.assertEqual(excstderr.getvalue(), excfile.getvalue())
self.assertEqual(excfmt, excfile.getvalue())
# Make sure that the traceback is properly indented.
tb_lines = python_fmt.splitlines()
self.assertEqual(len(tb_lines), 5)
banner = tb_lines[0]
location, source_line = tb_lines[-2:]
self.assertTrue(banner.startswith('Traceback'))
self.assertTrue(location.startswith(' File'))
self.assertTrue(source_line.startswith(' raise'))
def test_traceback_format(self):
self.check_traceback_format()
def test_traceback_format_with_cleared_frames(self):
# Check that traceback formatting also works with a clear()ed frame
def cleanup_tb(tb):
tb.tb_frame.clear()
self.check_traceback_format(cleanup_tb)
def test_stack_format(self):
# Verify _stack functions. Note we have to use _getframe(1) to
# compare them without this frame appearing in the output
with captured_output("stderr") as ststderr:
traceback.print_stack(sys._getframe(1))
stfile = StringIO()
traceback.print_stack(sys._getframe(1), file=stfile)
self.assertEqual(ststderr.getvalue(), stfile.getvalue())
stfmt = traceback.format_stack(sys._getframe(1))
self.assertEqual(ststderr.getvalue(), "".join(stfmt))
cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n\n")
context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n\n")
boundaries = re.compile(
'(%s|%s)' % (re.escape(cause_message), re.escape(context_message)))
class BaseExceptionReportingTests:
def get_exception(self, exception_or_callable):
if isinstance(exception_or_callable, Exception):
return exception_or_callable
try:
exception_or_callable()
except Exception as e:
return e
def zero_div(self):
1/0 # In zero_div
def check_zero_div(self, msg):
lines = msg.splitlines()
self.assertTrue(lines[-3].startswith(' File'))
self.assertIn('1/0 # In zero_div', lines[-2])
self.assertTrue(lines[-1].startswith('ZeroDivisionError'), lines[-1])
def test_simple(self):
try:
1/0 # Marker
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('1/0 # Marker', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as e:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError:
raise KeyError
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], context_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context_suppression(self):
try:
try:
raise Exception
except:
raise ZeroDivisionError from None
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('ZeroDivisionError from None', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause_and_context(self):
# When both a cause and a context are set, only the cause should be
# displayed and the context should be muted.
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as _e:
e = _e
try:
xyzzy
except NameError:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_cause_recursive(self):
def inner_raise():
try:
try:
self.zero_div()
except ZeroDivisionError as e:
z = e
raise KeyError from e
except KeyError as e:
raise z from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
# The first block is the KeyError raised from the ZeroDivisionError
self.assertIn('raise KeyError from e', blocks[0])
self.assertNotIn('1/0', blocks[0])
# The second block (apart from the boundary) is the ZeroDivisionError
# re-raised from the KeyError
self.assertIn('inner_raise() # Marker', blocks[2])
self.check_zero_div(blocks[2])
def test_syntax_error_offset_at_eol(self):
# See #10186.
def e():
raise SyntaxError('', ('', 0, 5, 'hello'))
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], " ^")
def e():
exec("x = 5 | 4 |")
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], ' ^')
class PyExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks reporting through the 'traceback' module, with both
# format_exception() and print_exception().
#
def get_report(self, e):
e = self.get_exception(e)
s = ''.join(
traceback.format_exception(type(e), e, e.__traceback__))
with captured_output("stderr") as sio:
traceback.print_exception(type(e), e, e.__traceback__)
self.assertEqual(sio.getvalue(), s)
return s
class CExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks built-in reporting by the interpreter.
#
@cpython_only
def get_report(self, e):
from _testcapi import exception_print
e = self.get_exception(e)
with captured_output("stderr") as s:
exception_print(e)
return s.getvalue()
class MiscTracebackCases(unittest.TestCase):
#
# Check non-printing functions in traceback module
#
def test_clear(self):
def outer():
middle()
def middle():
inner()
def inner():
i = 1
1/0
try:
outer()
except:
type_, value, tb = sys.exc_info()
# Initial assertion: there's one local in the inner frame.
inner_frame = tb.tb_next.tb_next.tb_next.tb_frame
self.assertEqual(len(inner_frame.f_locals), 1)
# Clear traceback frames
traceback.clear_frames(tb)
# Local variable dict should now be empty.
self.assertEqual(len(inner_frame.f_locals), 0)
class TestFrame(unittest.TestCase):
def test_basics(self):
linecache.clearcache()
linecache.lazycache("f", globals())
f = traceback.FrameSummary("f", 1, "dummy")
self.assertEqual(
("f", 1, "dummy", '"""Test cases for traceback module"""'),
tuple(f))
self.assertEqual(None, f.locals)
def test_lazy_lines(self):
linecache.clearcache()
f = traceback.FrameSummary("f", 1, "dummy", lookup_line=False)
self.assertEqual(None, f._line)
linecache.lazycache("f", globals())
self.assertEqual(
'"""Test cases for traceback module"""',
f.line)
def test_explicit_line(self):
f = traceback.FrameSummary("f", 1, "dummy", line="line")
self.assertEqual("line", f.line)
class TestStack(unittest.TestCase):
def test_walk_stack(self):
s = list(traceback.walk_stack(None))
self.assertGreater(len(s), 10)
def test_walk_tb(self):
try:
1/0
except Exception:
_, _, tb = sys.exc_info()
s = list(traceback.walk_tb(tb))
self.assertEqual(len(s), 1)
def test_extract_stack(self):
s = traceback.StackSummary.extract(traceback.walk_stack(None))
self.assertIsInstance(s, traceback.StackSummary)
def test_extract_stack_limit(self):
s = traceback.StackSummary.extract(traceback.walk_stack(None), limit=5)
self.assertEqual(len(s), 5)
def test_extract_stack_lookup_lines(self):
linecache.clearcache()
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=True)
linecache.clearcache()
self.assertEqual(s[0].line, "import sys")
def test_extract_stackup_deferred_lookup_lines(self):
linecache.clearcache()
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=False)
self.assertEqual({}, linecache.cache)
linecache.updatecache('/foo.py', globals())
self.assertEqual(s[0].line, "import sys")
def test_from_list(self):
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
self.assertEqual(
[' File "foo.py", line 1, in fred\n line\n'],
s.format())
def test_format_smoke(self):
# For detailed tests see the format_list tests, which consume the same
# code.
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
self.assertEqual(
[' File "foo.py", line 1, in fred\n line\n'],
s.format())
def test_locals(self):
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
s = traceback.StackSummary.extract(iter([(f, 6)]), capture_locals=True)
self.assertEqual(s[0].locals, {'something': '1'})
def test_no_locals(self):
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
s = traceback.StackSummary.extract(iter([(f, 6)]))
self.assertEqual(s[0].locals, None)
def test_format_locals(self):
def some_inner(k, v):
a = 1
b = 2
return traceback.StackSummary.extract(
traceback.walk_stack(None), capture_locals=True, limit=1)
s = some_inner(3, 4)
self.assertEqual(
[' File "' + __file__ + '", line 585, '
'in some_inner\n'
' traceback.walk_stack(None), capture_locals=True, limit=1)\n'
' a = 1\n'
' b = 2\n'
' k = 3\n'
' v = 4\n'
], s.format())
class TestTracebackException(unittest.TestCase):
def test_smoke(self):
try:
1/0
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
self.assertEqual(None, exc.__cause__)
self.assertEqual(None, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_from_exception(self):
# Check all the parameters are accepted.
def foo():
1/0
try:
foo()
except Exception as e:
exc_info = sys.exc_info()
self.expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]), limit=1, lookup_lines=False,
capture_locals=True)
self.exc = traceback.TracebackException.from_exception(
e, limit=1, lookup_lines=False, capture_locals=True)
expected_stack = self.expected_stack
exc = self.exc
self.assertEqual(None, exc.__cause__)
self.assertEqual(None, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_cause(self):
try:
try:
1/0
finally:
exc_info_context = sys.exc_info()
exc_context = traceback.TracebackException(*exc_info_context)
cause = Exception("cause")
raise Exception("uh oh") from cause
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
exc_cause = traceback.TracebackException(Exception, cause, None)
self.assertEqual(exc_cause, exc.__cause__)
self.assertEqual(exc_context, exc.__context__)
self.assertEqual(True, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_context(self):
try:
try:
1/0
finally:
exc_info_context = sys.exc_info()
exc_context = traceback.TracebackException(*exc_info_context)
raise Exception("uh oh")
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
self.assertEqual(None, exc.__cause__)
self.assertEqual(exc_context, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_limit(self):
def recurse(n):
if n:
recurse(n-1)
else:
1/0
try:
recurse(10)
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info, limit=5)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]), limit=5)
self.assertEqual(expected_stack, exc.stack)
def test_lookup_lines(self):
linecache.clearcache()
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(Exception, e, tb, lookup_lines=False)
self.assertEqual({}, linecache.cache)
linecache.updatecache('/foo.py', globals())
self.assertEqual(exc.stack[0].line, "import sys")
def test_locals(self):
linecache.updatecache('/foo.py', globals())
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1, 'other': 'string'})
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(
Exception, e, tb, capture_locals=True)
self.assertEqual(
exc.stack[0].locals, {'something': '1', 'other': "'string'"})
def test_no_locals(self):
linecache.updatecache('/foo.py', globals())
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(Exception, e, tb)
self.assertEqual(exc.stack[0].locals, None)
def test_main():
run_unittest(__name__)
if __name__ == "__main__":
test_main()
| en | 0.780875 | Test cases for traceback module # For now, a very minimal set of tests. I want to be sure that # formatting of SyntaxErrors works based on changes for 2.1. # third line has caret # in the right place # third line has caret # and no additional newline # in the right place # third line has caret # and no additional newline # in the right place # Test that exceptions derived from BaseException are formatted right # Test that tracebacks are correctly printed for encoded source files: # - correct line number (Issue2384) # - respect file encoding (Issue3975) # The spawned subprocess has its stdout redirected to a PIPE, and its # encoding may be different from the current interpreter, on Windows # at least. # Raise the message in a subprocess, and catch the output {0}if 1: import traceback; raise RuntimeError('{1}') # The source lines are encoded with the 'backslashreplace' handler # and we just decoded them with the output_encoding. # coding: {0}\n".format(charset), # coding: {0}\n".format(charset), # Issue #18960: coding spec should has no effect # coding: GBK\n", "h\xe9 ho", 'utf-8', 5) # Issue #22599: Ensure that it is possible to use the traceback module # to display an exception at Python exit import sys import traceback class PrintExceptionAtExit(object): def __init__(self): try: x = 1 / 0 except Exception: self.exc_info = sys.exc_info() # self.exc_info[1] (traceback) contains frames: # explicitly clear the reference to self in the current # frame to break a reference cycle self = None def __del__(self): traceback.print_exception(*self.exc_info) # Keep a reference in the module namespace to call the destructor # when the module is unloaded obj = PrintExceptionAtExit() # Clear the inner frames, not this one # Call all _tb and _exc functions # Make sure that Python and the traceback module format the same thing # Now verify the _tb func output # Now verify the _exc func output # Make sure that the traceback is properly indented. # Check that traceback formatting also works with a clear()ed frame # Verify _stack functions. Note we have to use _getframe(1) to # compare them without this frame appearing in the output # In zero_div # In zero_div', lines[-2]) # Marker # Marker', lines[2]) # Marker # Marker', blocks[2]) # Marker # Marker', blocks[2]) # When both a cause and a context are set, only the cause should be # displayed and the context should be muted. # Marker # Marker', blocks[2]) # Marker # The first block is the KeyError raised from the ZeroDivisionError # The second block (apart from the boundary) is the ZeroDivisionError # re-raised from the KeyError # Marker', blocks[2]) # See #10186. # # This checks reporting through the 'traceback' module, with both # format_exception() and print_exception(). # # # This checks built-in reporting by the interpreter. # # # Check non-printing functions in traceback module # # Initial assertion: there's one local in the inner frame. # Clear traceback frames # Local variable dict should now be empty. Test cases for traceback module Test cases for traceback module # For detailed tests see the format_list tests, which consume the same # code. # Check all the parameters are accepted. | 2.771586 | 3 |
Predict_Score.py | US579/RecommenderSystems | 0 | 6624365 | <reponame>US579/RecommenderSystems
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.metrics.pairwise import cosine_similarity
import scipy.sparse as sp
from scipy.sparse.linalg import svds
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import pairwise_distances
import warnings
# Read Data and Cleaning
Udata_header = ['user_id', 'item_id', 'rating', 'timestamp']
m_cols = ['item_id', 'title', 'release_date', 'video_release_date', 'imdb_url']
Udata_df = pd.read_csv('data/ml-100k/u.data',sep='\t',names=Udata_header)
# Uitem_header = ['item_id', 'movie_title', 'release_date', 'video_release_date',
# 'IMDb_URL', 'unknown', 'Action', 'Adventure' ,'Animation',
# 'Childrens','Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy',
# 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi',
# 'Thriller', 'War', 'Western']
Uitem_df = pd.read_csv('data/ml-100k/u.item',sep='|',names=m_cols,encoding='latin1',usecols=range(5))
Ugenre_header = ['Type','id']
Ugenre_df = pd.read_csv('data/ml-100k/u.genre',sep='|',names=Ugenre_header)
Uuser_header = ['user_id', 'age', 'gender', 'occupation' ,'zip_code']
Uuser_df = pd.read_csv('data/ml-100k/u.user',sep='|',names=Uuser_header)
Total_df = pd.merge(Udata_df,Uitem_df,on = "item_id")
Total_df = pd.merge(Total_df,Uuser_df,on = "user_id")
SortByUser=Total_df.sort_values(by = ["user_id"])
# Modelling
df = Total_df
n_users = df.user_id.unique().shape[0]
n_items = df.item_id.unique().shape[0]
# classifying the movie according to the type
ratings = pd.DataFrame(df.groupby('title')['rating'].mean())
ratings['number_of_ratings'] = df.groupby('title')['rating'].count()
movie_matrix = df.pivot_table(index='user_id', columns='title', values='rating')
X_train,X_test = train_test_split(df,test_size=0.2)
# Create Data matrix
train_data_matrix = np.zeros((n_users,n_items))
for row in X_train.itertuples():
train_data_matrix[row[1]-1,row[2]-1] = row[3]
for row in X_test.itertuples():
train_data_matrix[row[1]-1,row[2]-1] = row[3]
train_data_matrix1 = np.zeros((n_users,n_items))
for row in df.itertuples():
train_data_matrix1[row[1]-1,row[2]-1] = row[3]
test_data_matrix = np.zeros((n_users, n_items))
for line in X_test.itertuples():
test_data_matrix[line[1]-1, line[2]-1] = line[3]
# Similarity
user_similarity = pairwise_distances(train_data_matrix, metric = "cosine")
#user_similarity = pairwise_distances(train_data_matrix, metric = "euclidean")
item_similarity = cosine_similarity(train_data_matrix.T, dense_output=True)
###############################
# #
# Predict Movie Score Part #
# #
###############################
# get similarity of testUser with allUser
def get_similarity(testUser, allUser):
return pairwise_distances(testUser,allUser, metric = "cosine")
# get matrix of topK similarity User
def get_topK(matrix,similarity,k):
similarity = similarity[0]
topK_data_matrix = []
i = len(similarity)
for j in range(i):
# 有问题
arr = similarity.argsort()[-k:]
arr_index = arr
for m in arr_index:
topK_data_matrix.append(matrix[m])
# top k mean similarity
topK_data_matrix = np.asarray(topK_data_matrix)
return topK_data_matrix
# Through User based to predict score
# The function and formula with previous one is different
def user_base_predict(testUser, topKUser):
# similarity again:
sim = pairwise_distances(testUser,topKUser, metric = "cosine")
sim2 = pairwise_distances(testUser,topKUser, metric = "cosine")
#print(sim)
for i in range(len(sim)):
for j in range(len(sim[0])):
sim[i][j] = 1/(sim[i][j]+1)
sim_avg = sim.mean(axis = 1)
pred = sim_avg * (np.dot(sim2,topKUser))
return pred
def user_base_predict2(testUser, topKUser):
r1 = topKUser.mean(axis =1)
sim = pairwise_distances(testUser,topKUser, metric = "cosine")
sim2 = pairwise_distances(testUser,topKUser, metric = "cosine")
for i in range(len(sim)):
for j in range(len(sim[0])):
sim[i][j] = 1/(sim[i][j]+1)
sim_avg = sim.mean(axis = 1)
r2 = sim_avg* (np.dot(sim2,topKUser))
diff = topKUser - r1[:,np.newaxis]
pred = r1[:,np.newaxis] + sim_avg* (np.dot(sim2,diff))
return pred
# predict all user's score
def predict_all(train_data_matrix,topK):
predict = []
for i in range(len(train_data_matrix)):
testUser = [train_data_matrix[i]]
if i == 0:
allUser = train_data_matrix[i+1:]
elif i == (len(train_data_matrix) -1):
allUser = train_data_matrix[:i]
else:
allUp = train_data_matrix[:i]
allDown = train_data_matrix[i+1:]
allUser = np.concatenate((allUp,allDown))
s = get_similarity(testUser,allUser)
topKUser = get_topK(train_data_matrix,s,topK)
prediction = user_base_predict(testUser,topKUser)
predict.append(prediction)
return np.asarray(predict)
y_predict = predict_all(train_data_matrix,10)
def predict_userMovieScore(predictall, userID):
return predictall[userID-1]
## Useing MSE to test the result:
#from sklearn.metrics import mean_squared_error
#y_true = train_data_matrix
#y_predict = np.squeeze(y_predict, axis=1)
#mean_squared_error(y_true, y_predict)
# RUN: if we want to predict the 1st user's score:
predict_userMovieScore(y_predict,1)
| import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.metrics.pairwise import cosine_similarity
import scipy.sparse as sp
from scipy.sparse.linalg import svds
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import pairwise_distances
import warnings
# Read Data and Cleaning
Udata_header = ['user_id', 'item_id', 'rating', 'timestamp']
m_cols = ['item_id', 'title', 'release_date', 'video_release_date', 'imdb_url']
Udata_df = pd.read_csv('data/ml-100k/u.data',sep='\t',names=Udata_header)
# Uitem_header = ['item_id', 'movie_title', 'release_date', 'video_release_date',
# 'IMDb_URL', 'unknown', 'Action', 'Adventure' ,'Animation',
# 'Childrens','Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy',
# 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi',
# 'Thriller', 'War', 'Western']
Uitem_df = pd.read_csv('data/ml-100k/u.item',sep='|',names=m_cols,encoding='latin1',usecols=range(5))
Ugenre_header = ['Type','id']
Ugenre_df = pd.read_csv('data/ml-100k/u.genre',sep='|',names=Ugenre_header)
Uuser_header = ['user_id', 'age', 'gender', 'occupation' ,'zip_code']
Uuser_df = pd.read_csv('data/ml-100k/u.user',sep='|',names=Uuser_header)
Total_df = pd.merge(Udata_df,Uitem_df,on = "item_id")
Total_df = pd.merge(Total_df,Uuser_df,on = "user_id")
SortByUser=Total_df.sort_values(by = ["user_id"])
# Modelling
df = Total_df
n_users = df.user_id.unique().shape[0]
n_items = df.item_id.unique().shape[0]
# classifying the movie according to the type
ratings = pd.DataFrame(df.groupby('title')['rating'].mean())
ratings['number_of_ratings'] = df.groupby('title')['rating'].count()
movie_matrix = df.pivot_table(index='user_id', columns='title', values='rating')
X_train,X_test = train_test_split(df,test_size=0.2)
# Create Data matrix
train_data_matrix = np.zeros((n_users,n_items))
for row in X_train.itertuples():
train_data_matrix[row[1]-1,row[2]-1] = row[3]
for row in X_test.itertuples():
train_data_matrix[row[1]-1,row[2]-1] = row[3]
train_data_matrix1 = np.zeros((n_users,n_items))
for row in df.itertuples():
train_data_matrix1[row[1]-1,row[2]-1] = row[3]
test_data_matrix = np.zeros((n_users, n_items))
for line in X_test.itertuples():
test_data_matrix[line[1]-1, line[2]-1] = line[3]
# Similarity
user_similarity = pairwise_distances(train_data_matrix, metric = "cosine")
#user_similarity = pairwise_distances(train_data_matrix, metric = "euclidean")
item_similarity = cosine_similarity(train_data_matrix.T, dense_output=True)
###############################
# #
# Predict Movie Score Part #
# #
###############################
# get similarity of testUser with allUser
def get_similarity(testUser, allUser):
return pairwise_distances(testUser,allUser, metric = "cosine")
# get matrix of topK similarity User
def get_topK(matrix,similarity,k):
similarity = similarity[0]
topK_data_matrix = []
i = len(similarity)
for j in range(i):
# 有问题
arr = similarity.argsort()[-k:]
arr_index = arr
for m in arr_index:
topK_data_matrix.append(matrix[m])
# top k mean similarity
topK_data_matrix = np.asarray(topK_data_matrix)
return topK_data_matrix
# Through User based to predict score
# The function and formula with previous one is different
def user_base_predict(testUser, topKUser):
# similarity again:
sim = pairwise_distances(testUser,topKUser, metric = "cosine")
sim2 = pairwise_distances(testUser,topKUser, metric = "cosine")
#print(sim)
for i in range(len(sim)):
for j in range(len(sim[0])):
sim[i][j] = 1/(sim[i][j]+1)
sim_avg = sim.mean(axis = 1)
pred = sim_avg * (np.dot(sim2,topKUser))
return pred
def user_base_predict2(testUser, topKUser):
r1 = topKUser.mean(axis =1)
sim = pairwise_distances(testUser,topKUser, metric = "cosine")
sim2 = pairwise_distances(testUser,topKUser, metric = "cosine")
for i in range(len(sim)):
for j in range(len(sim[0])):
sim[i][j] = 1/(sim[i][j]+1)
sim_avg = sim.mean(axis = 1)
r2 = sim_avg* (np.dot(sim2,topKUser))
diff = topKUser - r1[:,np.newaxis]
pred = r1[:,np.newaxis] + sim_avg* (np.dot(sim2,diff))
return pred
# predict all user's score
def predict_all(train_data_matrix,topK):
predict = []
for i in range(len(train_data_matrix)):
testUser = [train_data_matrix[i]]
if i == 0:
allUser = train_data_matrix[i+1:]
elif i == (len(train_data_matrix) -1):
allUser = train_data_matrix[:i]
else:
allUp = train_data_matrix[:i]
allDown = train_data_matrix[i+1:]
allUser = np.concatenate((allUp,allDown))
s = get_similarity(testUser,allUser)
topKUser = get_topK(train_data_matrix,s,topK)
prediction = user_base_predict(testUser,topKUser)
predict.append(prediction)
return np.asarray(predict)
y_predict = predict_all(train_data_matrix,10)
def predict_userMovieScore(predictall, userID):
return predictall[userID-1]
## Useing MSE to test the result:
#from sklearn.metrics import mean_squared_error
#y_true = train_data_matrix
#y_predict = np.squeeze(y_predict, axis=1)
#mean_squared_error(y_true, y_predict)
# RUN: if we want to predict the 1st user's score:
predict_userMovieScore(y_predict,1) | en | 0.475529 | # Read Data and Cleaning # Uitem_header = ['item_id', 'movie_title', 'release_date', 'video_release_date', # 'IMDb_URL', 'unknown', 'Action', 'Adventure' ,'Animation', # 'Childrens','Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', # 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', # 'Thriller', 'War', 'Western'] # Modelling # classifying the movie according to the type # Create Data matrix # Similarity #user_similarity = pairwise_distances(train_data_matrix, metric = "euclidean") ############################### # # # Predict Movie Score Part # # # ############################### # get similarity of testUser with allUser # get matrix of topK similarity User # 有问题 # top k mean similarity # Through User based to predict score # The function and formula with previous one is different # similarity again: #print(sim) # predict all user's score ## Useing MSE to test the result: #from sklearn.metrics import mean_squared_error #y_true = train_data_matrix #y_predict = np.squeeze(y_predict, axis=1) #mean_squared_error(y_true, y_predict) # RUN: if we want to predict the 1st user's score: | 2.254189 | 2 |
scraper/test_scraper.py | ecohealthalliance/grits-api | 3 | 6624366 | # coding=utf8
import unittest
import scraper
import process_resources
import logging
import translation
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('scraper')
logger.setLevel(logging.INFO)
class TestScraper(unittest.TestCase):
def test_promed_1(self):
result = scraper.scrape("http://promedmail.org/direct.php?id=20140919.436908")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_promed_2(self):
result = scraper.scrape("http://www.promedmail.org/direct.php?id=3041400")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_1(self):
result = scraper.scrape("http://news.zing.vn/nhip-song-tre/thay-giao-gay-sot-tung-bo-luat-tinh-yeu/a291427.html")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_2(self):
result = scraper.scrape("http://news.google.com/news/url?sa=t&fd=R&usg=AFQjCNErKUuDda2EHlPu0LwpUJ0dcdDY4g&url=http://focus.stockstar.com/SS2012101000003737.shtml")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_3(self):
result = scraper.scrape("http://www.theargus.co.uk/news/9845086.Screening_follows_new_cases_of_TB_reported_in_Sussex/")
print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_4(self):
result = scraper.scrape("http://www.foodmate.net/news/yujing/2012/05/206413.html")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_5(self):
# This article can be visited in my browser, but the server
# sends back error messages in html comments when scraped.
result = scraper.scrape("http://news.google.com/news/url?sa=t&fd=R&usg=AFQjCNHf5IPdc5RFjTgsO7TnHq_LW8l0-Q&url=http://www.eltribuno.info/Jujuy/218240-Suspendieron-las-clases-por-un-brote-de-influenza-B-en-pueblos-de-la-Puna-.note.aspx")
if result.get('unscrapable'):
print result
self.assertFalse(result.get('unscrapable'))
self.assertTrue(len(process_resources.extract_clean_content(result['htmlContent'])['content']) > 1)
def test_english_detection(self):
from translation import Translator
my_translator = Translator(None)
result = scraper.scrape("http://news.google.com/news/url?sa=t&fd=R&usg=AFQjCNFY1KzEAhaiZchzd5ulmoY4_4P8kA&url=http://vov.vn/Van-hoa/NSND-Thanh-Hoa-xuc-dong-hat-truoc-benh-nhan/228256.vov")
self.assertFalse(result.get('unscrapable'))
text_obj = process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(my_translator.is_english(text_obj['content']))
def test_english_translation(self):
import config
from translation import Translator
my_translator = Translator(config)
result = scraper.scrape("http://peninsulardigital.com/municipios/comondu/refuerzan-acciones-contra-el-dengue/155929")
text_obj = process_resources.extract_clean_content(result['htmlContent'])
translation_obj = my_translator.translate_to_english(text_obj['content'])
self.assertFalse(translation_obj.get('error'))
def test_cutoff(self):
# This article is being cut off at "Tochter in die Kita bringen"
# Goose is at fault. Using beautiful soup instead seems to avoid the
# cutoff, however we need a method to determine which method we should
# be using.
result = scraper.scrape("http://www.haz.de/Hannover/Aus-der-Region/Wennigsen/Nachrichten/Kita-Kind-an-Ehec-erkrankt")
self.assertTrue(
process_resources.extract_clean_content(
result['htmlContent'])['content']
.strip()
.endswith("<NAME>"))
def test_pdf_querystring(self):
result = scraper.scrape(
"http://apps.who.int/iris/bitstream/10665/136645/1/roadmapupdate17Oct14_eng.pdf?ua=1")
def test_english_detection(self):
import config
from translation import Translator
my_translator = Translator(config)
self.assertFalse(my_translator.is_english("""
Bệnh viêm não năm nay đã xuất hiện nhiều dấu hiệu bất thường...
""")) | # coding=utf8
import unittest
import scraper
import process_resources
import logging
import translation
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('scraper')
logger.setLevel(logging.INFO)
class TestScraper(unittest.TestCase):
def test_promed_1(self):
result = scraper.scrape("http://promedmail.org/direct.php?id=20140919.436908")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_promed_2(self):
result = scraper.scrape("http://www.promedmail.org/direct.php?id=3041400")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_1(self):
result = scraper.scrape("http://news.zing.vn/nhip-song-tre/thay-giao-gay-sot-tung-bo-luat-tinh-yeu/a291427.html")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_2(self):
result = scraper.scrape("http://news.google.com/news/url?sa=t&fd=R&usg=AFQjCNErKUuDda2EHlPu0LwpUJ0dcdDY4g&url=http://focus.stockstar.com/SS2012101000003737.shtml")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_3(self):
result = scraper.scrape("http://www.theargus.co.uk/news/9845086.Screening_follows_new_cases_of_TB_reported_in_Sussex/")
print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_4(self):
result = scraper.scrape("http://www.foodmate.net/news/yujing/2012/05/206413.html")
# print process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(result.get('unscrapable'))
def test_link_5(self):
# This article can be visited in my browser, but the server
# sends back error messages in html comments when scraped.
result = scraper.scrape("http://news.google.com/news/url?sa=t&fd=R&usg=AFQjCNHf5IPdc5RFjTgsO7TnHq_LW8l0-Q&url=http://www.eltribuno.info/Jujuy/218240-Suspendieron-las-clases-por-un-brote-de-influenza-B-en-pueblos-de-la-Puna-.note.aspx")
if result.get('unscrapable'):
print result
self.assertFalse(result.get('unscrapable'))
self.assertTrue(len(process_resources.extract_clean_content(result['htmlContent'])['content']) > 1)
def test_english_detection(self):
from translation import Translator
my_translator = Translator(None)
result = scraper.scrape("http://news.google.com/news/url?sa=t&fd=R&usg=AFQjCNFY1KzEAhaiZchzd5ulmoY4_4P8kA&url=http://vov.vn/Van-hoa/NSND-Thanh-Hoa-xuc-dong-hat-truoc-benh-nhan/228256.vov")
self.assertFalse(result.get('unscrapable'))
text_obj = process_resources.extract_clean_content(result['htmlContent'])
self.assertFalse(my_translator.is_english(text_obj['content']))
def test_english_translation(self):
import config
from translation import Translator
my_translator = Translator(config)
result = scraper.scrape("http://peninsulardigital.com/municipios/comondu/refuerzan-acciones-contra-el-dengue/155929")
text_obj = process_resources.extract_clean_content(result['htmlContent'])
translation_obj = my_translator.translate_to_english(text_obj['content'])
self.assertFalse(translation_obj.get('error'))
def test_cutoff(self):
# This article is being cut off at "Tochter in die Kita bringen"
# Goose is at fault. Using beautiful soup instead seems to avoid the
# cutoff, however we need a method to determine which method we should
# be using.
result = scraper.scrape("http://www.haz.de/Hannover/Aus-der-Region/Wennigsen/Nachrichten/Kita-Kind-an-Ehec-erkrankt")
self.assertTrue(
process_resources.extract_clean_content(
result['htmlContent'])['content']
.strip()
.endswith("<NAME>"))
def test_pdf_querystring(self):
result = scraper.scrape(
"http://apps.who.int/iris/bitstream/10665/136645/1/roadmapupdate17Oct14_eng.pdf?ua=1")
def test_english_detection(self):
import config
from translation import Translator
my_translator = Translator(config)
self.assertFalse(my_translator.is_english("""
Bệnh viêm não năm nay đã xuất hiện nhiều dấu hiệu bất thường...
""")) | en | 0.55513 | # coding=utf8 # print process_resources.extract_clean_content(result['htmlContent']) # print process_resources.extract_clean_content(result['htmlContent']) # print process_resources.extract_clean_content(result['htmlContent']) # print process_resources.extract_clean_content(result['htmlContent']) # print process_resources.extract_clean_content(result['htmlContent']) # This article can be visited in my browser, but the server # sends back error messages in html comments when scraped. # This article is being cut off at "Tochter in die Kita bringen" # Goose is at fault. Using beautiful soup instead seems to avoid the # cutoff, however we need a method to determine which method we should # be using. Bệnh viêm não năm nay đã xuất hiện nhiều dấu hiệu bất thường... | 2.575717 | 3 |
handy/rand/number.py | Al3xChen/handy | 5 | 6624367 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 27 22:20:38 2017
@author: Frank
"""
from random import choice
#封装函数,生成随机手机号码
def randomPN():
area_num = ['187','186','186','158','155','156','138','135','136','177','176','144','147']
#获取手机号码区域号
area_number = choice(area_num)
#生成后8位手机号码
seed = "1234567890"
sa = []
for i in range(8):
sa.append(choice(seed))
last_eightnumber = ''.join(sa)
#拼接生成完整手机号码
return area_number + last_eightnumber
if __name__ == "__main__":
print(randomPN()) | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 27 22:20:38 2017
@author: Frank
"""
from random import choice
#封装函数,生成随机手机号码
def randomPN():
area_num = ['187','186','186','158','155','156','138','135','136','177','176','144','147']
#获取手机号码区域号
area_number = choice(area_num)
#生成后8位手机号码
seed = "1234567890"
sa = []
for i in range(8):
sa.append(choice(seed))
last_eightnumber = ''.join(sa)
#拼接生成完整手机号码
return area_number + last_eightnumber
if __name__ == "__main__":
print(randomPN()) | zh | 0.621582 | # -*- coding: utf-8 -*- Created on Wed Dec 27 22:20:38 2017 @author: Frank #封装函数,生成随机手机号码 #获取手机号码区域号 #生成后8位手机号码 #拼接生成完整手机号码 | 3.401988 | 3 |
Day66-75/code/example02.py | EngrSaad2/Python-100-Days | 37 | 6624368 | <reponame>EngrSaad2/Python-100-Days<gh_stars>10-100
from bs4 import BeautifulSoup
import re
def main():
html = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>首页</title>
</head>
<body>
<h1>Hello, world!</h1>
<p>这是一个<em>神奇</em>的网站!</p>
<hr>
<div>
<h2>这是一个例子程序</h2>
<p>静夜思</p>
<p class="foo">床前明月光</p>
<p id="bar">疑似地上霜</p>
<p class="foo">举头望明月</p>
<div><a href="http://www.baidu.com"><p>低头思故乡</p></a></div>
</div>
<a class="foo" href="http://www.qq.com">腾讯网</a>
<img src="./img/pretty-girl.png" alt="美女">
<img src="./img/hellokitty.png" alt="凯蒂猫">
<img src="/static/img/pretty-girl.png" alt="美女">
<table>
<tr>
<th>姓名</th>
<th>上场时间</th>
<th>得分</th>
<th>篮板</th>
<th>助攻</th>
</tr>
</table>
</body>
</html>
"""
soup = BeautifulSoup(html, 'lxml')
# JavaScript - document.title
print(soup.title)
# JavaScript - document.body.h1
print(soup.body.h1)
print(soup.p)
print(soup.body.p.text)
print(soup.body.p.contents)
for p_child in soup.body.p.children:
print(p_child)
print(len([elem for elem in soup.body.children]))
print(len([elem for elem in soup.body.descendants]))
print(soup.findAll(re.compile(r'^h[1-6]')))
print(soup.body.find_all(r'^h'))
print(soup.body.div.find_all(re.compile(r'^h')))
print(soup.find_all(re.compile(r'r$')))
print(soup.find_all('img', {'src': re.compile(r'\./img/\w+.png')}))
print(soup.find_all(lambda x: len(x.attrs) == 2))
print(soup.find_all(foo))
print(soup.find_all('p', {'class': 'foo'}))
for elem in soup.select('a[href]'):
print(elem.attrs['href'])
def foo(elem):
return len(elem.attrs) == 2
if __name__ == '__main__':
main()
| from bs4 import BeautifulSoup
import re
def main():
html = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>首页</title>
</head>
<body>
<h1>Hello, world!</h1>
<p>这是一个<em>神奇</em>的网站!</p>
<hr>
<div>
<h2>这是一个例子程序</h2>
<p>静夜思</p>
<p class="foo">床前明月光</p>
<p id="bar">疑似地上霜</p>
<p class="foo">举头望明月</p>
<div><a href="http://www.baidu.com"><p>低头思故乡</p></a></div>
</div>
<a class="foo" href="http://www.qq.com">腾讯网</a>
<img src="./img/pretty-girl.png" alt="美女">
<img src="./img/hellokitty.png" alt="凯蒂猫">
<img src="/static/img/pretty-girl.png" alt="美女">
<table>
<tr>
<th>姓名</th>
<th>上场时间</th>
<th>得分</th>
<th>篮板</th>
<th>助攻</th>
</tr>
</table>
</body>
</html>
"""
soup = BeautifulSoup(html, 'lxml')
# JavaScript - document.title
print(soup.title)
# JavaScript - document.body.h1
print(soup.body.h1)
print(soup.p)
print(soup.body.p.text)
print(soup.body.p.contents)
for p_child in soup.body.p.children:
print(p_child)
print(len([elem for elem in soup.body.children]))
print(len([elem for elem in soup.body.descendants]))
print(soup.findAll(re.compile(r'^h[1-6]')))
print(soup.body.find_all(r'^h'))
print(soup.body.div.find_all(re.compile(r'^h')))
print(soup.find_all(re.compile(r'r$')))
print(soup.find_all('img', {'src': re.compile(r'\./img/\w+.png')}))
print(soup.find_all(lambda x: len(x.attrs) == 2))
print(soup.find_all(foo))
print(soup.find_all('p', {'class': 'foo'}))
for elem in soup.select('a[href]'):
print(elem.attrs['href'])
def foo(elem):
return len(elem.attrs) == 2
if __name__ == '__main__':
main() | ja | 0.125663 | <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>首页</title> </head> <body> <h1>Hello, world!</h1> <p>这是一个<em>神奇</em>的网站!</p> <hr> <div> <h2>这是一个例子程序</h2> <p>静夜思</p> <p class="foo">床前明月光</p> <p id="bar">疑似地上霜</p> <p class="foo">举头望明月</p> <div><a href="http://www.baidu.com"><p>低头思故乡</p></a></div> </div> <a class="foo" href="http://www.qq.com">腾讯网</a> <img src="./img/pretty-girl.png" alt="美女"> <img src="./img/hellokitty.png" alt="凯蒂猫"> <img src="/static/img/pretty-girl.png" alt="美女"> <table> <tr> <th>姓名</th> <th>上场时间</th> <th>得分</th> <th>篮板</th> <th>助攻</th> </tr> </table> </body> </html> # JavaScript - document.title # JavaScript - document.body.h1 | 3.126176 | 3 |
scripts/xor_4f/xor_4f_diagnostics.py | NRuf77/proset | 0 | 6624369 | """Score proset classifier trained on the 'continuous XOR' problem with 4 features.
Copyright by <NAME>
Released under the MIT license - see LICENSE file for details
"""
import gzip
import os
import pickle
import numpy as np
from sklearn.metrics import classification_report, log_loss, roc_auc_score
import proset.utility as utility
print("* Apply user settings")
input_path = "scripts/results"
output_path = "scripts/reports"
input_file = "xor_4f_2d_95_model.gz"
export_file = input_file.replace(".gz", "_export.xlsx")
model_name = input_file.replace(".gz", "")
print("* Load model fit results")
with gzip.open(os.path.join(input_path, input_file), mode="rb") as file:
result = pickle.load(file)
print("* Generate prototype report")
scale = np.sqrt(result["model"]["transform"].var_)
offset = result["model"]["transform"].mean_
export = result["model"]["model"].export(
feature_names=result["data"]["feature_names"],
scale=scale,
offset=offset
)
utility.write_report(file_path=os.path.join(output_path, export_file), report=export)
print("* Show results")
train_features = result["model"]["transform"].transform(result["data"]["X_train"])
train_labels = result["data"]["y_train"]
test_features = result["model"]["transform"].transform(result["data"]["X_test"])
test_labels = result["data"]["y_test"]
prediction = result["model"]["model"].predict(test_features)
probabilities = result["model"]["model"].predict_proba(test_features)
active_features = result["model"]["model"].set_manager_.get_active_features()
misclassified = prediction != test_labels
print("- Hyperparameter selection")
utility.print_hyperparameter_report(result)
print("- Final model")
print("log-loss = {:.2f}".format(log_loss(y_true=test_labels, y_pred=probabilities)))
print("roc-auc = {:.2f}".format(roc_auc_score(y_true=test_labels, y_score=probabilities[:, 1])))
print("number of active features = {}".format(active_features.shape[0]))
print("number of prototypes = {}\n".format(result["model"]["model"].set_manager_.get_num_prototypes()))
print("- Selected features and weights")
utility.print_feature_report(model=result["model"]["model"], feature_names=result["data"]["feature_names"])
print("- Classification report")
print(classification_report(y_true=test_labels, y_pred=prediction))
utility.plot_select_results(result=result, model_name=model_name)
plotter = utility.ClassifierPlots(
model=result["model"]["model"],
model_name=model_name,
feature_names=result["data"]["feature_names"],
scale=scale,
offset=offset
)
ix = np.prod(result["data"]["X_train"][:, 2:], axis=1) >= 0
# select test samples which have identical class based on the first two features
x_range, y_range = plotter.plot_surface(
features=train_features[ix, :],
target=train_labels[ix],
baseline=np.ones((1, 4)) * 0.5, # fix remaining features to positive sign
plot_index=np.array([0, 1]),
comment="training samples",
use_proba=True
)
ix = np.prod(result["data"]["X_test"][:, 2:], axis=1) >= 0
# noinspection PyUnresolvedReferences
plotter.plot_surface(
features=test_features[ix, :],
target=test_labels[ix],
baseline=np.ones((1, 4)) * 0.5, # fix remaining features to positive sign
plot_index=np.array([0, 1]),
comment="test samples",
highlight=misclassified[ix],
highlight_name="misclassified",
x_range=x_range,
y_range=y_range
)
x_range, y_range = plotter.plot_batch_map(
batch=1,
features=train_features,
target=train_labels,
comment="training samples",
show_index=False
)
plotter.plot_batch_map(
batch=1,
features=test_features,
target=test_labels,
comment="test samples",
highlight=misclassified,
highlight_name="misclassified",
x_range=x_range,
y_range=y_range,
show_index=False
)
plotter.plot_batch_map(batch=1, x_range=x_range, y_range=y_range, show_index=False)
# feature plot is pointless as no pair of features carries enough information to determine the class
print("* Done")
| """Score proset classifier trained on the 'continuous XOR' problem with 4 features.
Copyright by <NAME>
Released under the MIT license - see LICENSE file for details
"""
import gzip
import os
import pickle
import numpy as np
from sklearn.metrics import classification_report, log_loss, roc_auc_score
import proset.utility as utility
print("* Apply user settings")
input_path = "scripts/results"
output_path = "scripts/reports"
input_file = "xor_4f_2d_95_model.gz"
export_file = input_file.replace(".gz", "_export.xlsx")
model_name = input_file.replace(".gz", "")
print("* Load model fit results")
with gzip.open(os.path.join(input_path, input_file), mode="rb") as file:
result = pickle.load(file)
print("* Generate prototype report")
scale = np.sqrt(result["model"]["transform"].var_)
offset = result["model"]["transform"].mean_
export = result["model"]["model"].export(
feature_names=result["data"]["feature_names"],
scale=scale,
offset=offset
)
utility.write_report(file_path=os.path.join(output_path, export_file), report=export)
print("* Show results")
train_features = result["model"]["transform"].transform(result["data"]["X_train"])
train_labels = result["data"]["y_train"]
test_features = result["model"]["transform"].transform(result["data"]["X_test"])
test_labels = result["data"]["y_test"]
prediction = result["model"]["model"].predict(test_features)
probabilities = result["model"]["model"].predict_proba(test_features)
active_features = result["model"]["model"].set_manager_.get_active_features()
misclassified = prediction != test_labels
print("- Hyperparameter selection")
utility.print_hyperparameter_report(result)
print("- Final model")
print("log-loss = {:.2f}".format(log_loss(y_true=test_labels, y_pred=probabilities)))
print("roc-auc = {:.2f}".format(roc_auc_score(y_true=test_labels, y_score=probabilities[:, 1])))
print("number of active features = {}".format(active_features.shape[0]))
print("number of prototypes = {}\n".format(result["model"]["model"].set_manager_.get_num_prototypes()))
print("- Selected features and weights")
utility.print_feature_report(model=result["model"]["model"], feature_names=result["data"]["feature_names"])
print("- Classification report")
print(classification_report(y_true=test_labels, y_pred=prediction))
utility.plot_select_results(result=result, model_name=model_name)
plotter = utility.ClassifierPlots(
model=result["model"]["model"],
model_name=model_name,
feature_names=result["data"]["feature_names"],
scale=scale,
offset=offset
)
ix = np.prod(result["data"]["X_train"][:, 2:], axis=1) >= 0
# select test samples which have identical class based on the first two features
x_range, y_range = plotter.plot_surface(
features=train_features[ix, :],
target=train_labels[ix],
baseline=np.ones((1, 4)) * 0.5, # fix remaining features to positive sign
plot_index=np.array([0, 1]),
comment="training samples",
use_proba=True
)
ix = np.prod(result["data"]["X_test"][:, 2:], axis=1) >= 0
# noinspection PyUnresolvedReferences
plotter.plot_surface(
features=test_features[ix, :],
target=test_labels[ix],
baseline=np.ones((1, 4)) * 0.5, # fix remaining features to positive sign
plot_index=np.array([0, 1]),
comment="test samples",
highlight=misclassified[ix],
highlight_name="misclassified",
x_range=x_range,
y_range=y_range
)
x_range, y_range = plotter.plot_batch_map(
batch=1,
features=train_features,
target=train_labels,
comment="training samples",
show_index=False
)
plotter.plot_batch_map(
batch=1,
features=test_features,
target=test_labels,
comment="test samples",
highlight=misclassified,
highlight_name="misclassified",
x_range=x_range,
y_range=y_range,
show_index=False
)
plotter.plot_batch_map(batch=1, x_range=x_range, y_range=y_range, show_index=False)
# feature plot is pointless as no pair of features carries enough information to determine the class
print("* Done")
| en | 0.926049 | Score proset classifier trained on the 'continuous XOR' problem with 4 features.
Copyright by <NAME>
Released under the MIT license - see LICENSE file for details # select test samples which have identical class based on the first two features # fix remaining features to positive sign # noinspection PyUnresolvedReferences # fix remaining features to positive sign # feature plot is pointless as no pair of features carries enough information to determine the class | 2.32154 | 2 |
acc_utils/errors.py | cap-lab/MidapSim | 2 | 6624370 | <gh_stars>1-10
from __future__ import absolute_import, division, print_function, unicode_literals
# Define possible errors in simulator
def _assert(cand, note=""):
if not cand:
raise ValueError(note)
class ModelBuildError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP MODEL Build Error: ' + msg
def __str__(self):
return self.msg
class ControlManagerError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP Control Sequence Generation Error: ' + msg
def __str__(self):
return self.msg
class MIDAPError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP Hardware simulator Error: ' + msg
def __str__(self):
return self.msg
class DataInfoTableError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP Data Info Table Setup Error: ' + msg
def __str__(self):
return self.msg
class CompilerError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP Compiler Error: ' + msg
def __str__(self):
return self.msg
| from __future__ import absolute_import, division, print_function, unicode_literals
# Define possible errors in simulator
def _assert(cand, note=""):
if not cand:
raise ValueError(note)
class ModelBuildError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP MODEL Build Error: ' + msg
def __str__(self):
return self.msg
class ControlManagerError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP Control Sequence Generation Error: ' + msg
def __str__(self):
return self.msg
class MIDAPError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP Hardware simulator Error: ' + msg
def __str__(self):
return self.msg
class DataInfoTableError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP Data Info Table Setup Error: ' + msg
def __str__(self):
return self.msg
class CompilerError(Exception):
def __init__(self, msg='Not defined'):
self.msg = 'MIDAP Compiler Error: ' + msg
def __str__(self):
return self.msg | en | 0.50854 | # Define possible errors in simulator | 2.37515 | 2 |
tests/test_folder.py | phaustin/canvasapi | 0 | 6624371 | from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import requests_mock
import warnings
from canvasapi import Canvas
from canvasapi.file import File
from canvasapi.folder import Folder
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestFolder(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({'folder': ['get_by_id']}, m)
self.folder = self.canvas.get_folder(1)
# __str__()
def test__str__(self, m):
string = str(self.folder)
self.assertIsInstance(string, str)
# list_files()
def test_list_files(self, m):
register_uris({'folder': ['list_folder_files', 'list_folder_files2']}, m)
with warnings.catch_warnings(record=True) as warning_list:
files = self.folder.list_files()
file_list = [file for file in files]
self.assertEqual(len(file_list), 4)
self.assertIsInstance(file_list[0], File)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# get_files()
def test_get_files(self, m):
register_uris({'folder': ['list_folder_files', 'list_folder_files2']}, m)
files = self.folder.get_files()
file_list = [file for file in files]
self.assertEqual(len(file_list), 4)
self.assertIsInstance(file_list[0], File)
# delete()
def test_delete_file(self, m):
register_uris({'folder': ['delete_folder']}, m)
deleted_folder = self.folder.delete()
self.assertIsInstance(deleted_folder, Folder)
self.assertTrue(hasattr(deleted_folder, 'name'))
self.assertEqual(deleted_folder.full_name, "course_files/Folder 1")
# list_folders()
def test_list_folders(self, m):
register_uris({'folder': ['list_folders']}, m)
with warnings.catch_warnings(record=True) as warning_list:
folders = self.folder.list_folders()
folder_list = [folder for folder in folders]
self.assertEqual(len(folder_list), 2)
self.assertIsInstance(folder_list[0], Folder)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# get_folders()
def test_get_folders(self, m):
register_uris({'folder': ['list_folders']}, m)
folders = self.folder.get_folders()
folder_list = [folder for folder in folders]
self.assertEqual(len(folder_list), 2)
self.assertIsInstance(folder_list[0], Folder)
# create_folder()
def test_create_folder(self, m):
register_uris({'folder': ['create_folder']}, m)
name_str = "<NAME>"
response = self.folder.create_folder(name=name_str)
self.assertIsInstance(response, Folder)
# update()
def test_update(self, m):
register_uris({'folder': ['update']}, m)
new_name = '<NAME>'
response = self.folder.update(name=new_name)
self.assertIsInstance(response, Folder)
self.assertEqual(self.folder.name, new_name)
# copy_file()
def test_copy_file(self, m):
register_uris({'folder': ['copy_file']}, m)
new_file = self.folder.copy_file(1)
self.assertIsInstance(new_file, File)
self.assertEqual(new_file.display_name, 'Dummy File-1')
self.assertEqual(new_file.id, 1)
| from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import requests_mock
import warnings
from canvasapi import Canvas
from canvasapi.file import File
from canvasapi.folder import Folder
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestFolder(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({'folder': ['get_by_id']}, m)
self.folder = self.canvas.get_folder(1)
# __str__()
def test__str__(self, m):
string = str(self.folder)
self.assertIsInstance(string, str)
# list_files()
def test_list_files(self, m):
register_uris({'folder': ['list_folder_files', 'list_folder_files2']}, m)
with warnings.catch_warnings(record=True) as warning_list:
files = self.folder.list_files()
file_list = [file for file in files]
self.assertEqual(len(file_list), 4)
self.assertIsInstance(file_list[0], File)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# get_files()
def test_get_files(self, m):
register_uris({'folder': ['list_folder_files', 'list_folder_files2']}, m)
files = self.folder.get_files()
file_list = [file for file in files]
self.assertEqual(len(file_list), 4)
self.assertIsInstance(file_list[0], File)
# delete()
def test_delete_file(self, m):
register_uris({'folder': ['delete_folder']}, m)
deleted_folder = self.folder.delete()
self.assertIsInstance(deleted_folder, Folder)
self.assertTrue(hasattr(deleted_folder, 'name'))
self.assertEqual(deleted_folder.full_name, "course_files/Folder 1")
# list_folders()
def test_list_folders(self, m):
register_uris({'folder': ['list_folders']}, m)
with warnings.catch_warnings(record=True) as warning_list:
folders = self.folder.list_folders()
folder_list = [folder for folder in folders]
self.assertEqual(len(folder_list), 2)
self.assertIsInstance(folder_list[0], Folder)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# get_folders()
def test_get_folders(self, m):
register_uris({'folder': ['list_folders']}, m)
folders = self.folder.get_folders()
folder_list = [folder for folder in folders]
self.assertEqual(len(folder_list), 2)
self.assertIsInstance(folder_list[0], Folder)
# create_folder()
def test_create_folder(self, m):
register_uris({'folder': ['create_folder']}, m)
name_str = "<NAME>"
response = self.folder.create_folder(name=name_str)
self.assertIsInstance(response, Folder)
# update()
def test_update(self, m):
register_uris({'folder': ['update']}, m)
new_name = '<NAME>'
response = self.folder.update(name=new_name)
self.assertIsInstance(response, Folder)
self.assertEqual(self.folder.name, new_name)
# copy_file()
def test_copy_file(self, m):
register_uris({'folder': ['copy_file']}, m)
new_file = self.folder.copy_file(1)
self.assertIsInstance(new_file, File)
self.assertEqual(new_file.display_name, 'Dummy File-1')
self.assertEqual(new_file.id, 1)
| en | 0.096998 | # __str__() # list_files() # get_files() # delete() # list_folders() # get_folders() # create_folder() # update() # copy_file() | 2.365379 | 2 |
aispider/test.py | East196/aispider | 1 | 6624372 | # -*- coding: utf-8 -*-
import urllib
from bs4 import BeautifulSoup
from random import randint
import time
import json
import codecs
file_name = u"d:/anjuke.index.html"
# home = 'http://sz.fang.anjuke.com/loupan/all/'
# response = urllib2.urlopen(home)
# html = response.read()
#
# f = open(file_name, 'w')
# f.write(html)
# f = open(file_name, 'r')
# html = f.read().encode("utf-8")
# soup = BeautifulSoup(html, "lxml")
# for c in soup.children:
# print c.getpath()
from lxml import etree
parser = etree.HTMLParser()
root = etree.parse(file_name, parser)
# print root.xpath("/html/body/div[2]/div[2]/div[1]/div[4]/div[19]/div[1]/div[2]/span")[0].text
def fixed_path(path):
if path.endswith("]"):
return path[:-3]
return path
one_item_paths = []
metas = []
for e in root.iter():
path = root.getpath(e)
if path.find("div[19]") > 0:
one_item_paths.append((path, len(path)))
field_class = e.get("class")
if field_class is not None and e.text is not None and e.text.strip() != "":
field_class = field_class.split(" ")[-1:]
field_path = fixed_path(path)
attr_num = len(root.xpath(field_path))
field_is_list = attr_num > 1
item_string = root.xpath(field_path)[0].xpath("string()")
meta = (field_class, field_path, field_is_list)
try:
if [y[0] for y in metas].index(field_class) is -1:
metas.append(meta)
print(meta, item_string)
except ValueError:
metas.append(meta)
print(meta, item_string)
# text=root.xpath(path)[0].text
# if text is not None:
# if text.find("华润城润府")!=-1:
# print path
# if text.find("70000")!=-1:
# print path
class_ = root.xpath(one_item_paths[0][0])[0].get("class")
items_path = one_item_paths[0][0].replace("div[19]", "div") + "[@class='" + class_ + "']"
one_page_item_size = len(root.xpath(items_path))
print(items_path)
print(root.getpath(root.xpath(items_path)[0]))
print(root.getpath(root.xpath(items_path)[-1]))
print(one_page_item_size)
for meta in metas:
print(str(meta))
| # -*- coding: utf-8 -*-
import urllib
from bs4 import BeautifulSoup
from random import randint
import time
import json
import codecs
file_name = u"d:/anjuke.index.html"
# home = 'http://sz.fang.anjuke.com/loupan/all/'
# response = urllib2.urlopen(home)
# html = response.read()
#
# f = open(file_name, 'w')
# f.write(html)
# f = open(file_name, 'r')
# html = f.read().encode("utf-8")
# soup = BeautifulSoup(html, "lxml")
# for c in soup.children:
# print c.getpath()
from lxml import etree
parser = etree.HTMLParser()
root = etree.parse(file_name, parser)
# print root.xpath("/html/body/div[2]/div[2]/div[1]/div[4]/div[19]/div[1]/div[2]/span")[0].text
def fixed_path(path):
if path.endswith("]"):
return path[:-3]
return path
one_item_paths = []
metas = []
for e in root.iter():
path = root.getpath(e)
if path.find("div[19]") > 0:
one_item_paths.append((path, len(path)))
field_class = e.get("class")
if field_class is not None and e.text is not None and e.text.strip() != "":
field_class = field_class.split(" ")[-1:]
field_path = fixed_path(path)
attr_num = len(root.xpath(field_path))
field_is_list = attr_num > 1
item_string = root.xpath(field_path)[0].xpath("string()")
meta = (field_class, field_path, field_is_list)
try:
if [y[0] for y in metas].index(field_class) is -1:
metas.append(meta)
print(meta, item_string)
except ValueError:
metas.append(meta)
print(meta, item_string)
# text=root.xpath(path)[0].text
# if text is not None:
# if text.find("华润城润府")!=-1:
# print path
# if text.find("70000")!=-1:
# print path
class_ = root.xpath(one_item_paths[0][0])[0].get("class")
items_path = one_item_paths[0][0].replace("div[19]", "div") + "[@class='" + class_ + "']"
one_page_item_size = len(root.xpath(items_path))
print(items_path)
print(root.getpath(root.xpath(items_path)[0]))
print(root.getpath(root.xpath(items_path)[-1]))
print(one_page_item_size)
for meta in metas:
print(str(meta))
| en | 0.323748 | # -*- coding: utf-8 -*- # home = 'http://sz.fang.anjuke.com/loupan/all/' # response = urllib2.urlopen(home) # html = response.read() # # f = open(file_name, 'w') # f.write(html) # f = open(file_name, 'r') # html = f.read().encode("utf-8") # soup = BeautifulSoup(html, "lxml") # for c in soup.children: # print c.getpath() # print root.xpath("/html/body/div[2]/div[2]/div[1]/div[4]/div[19]/div[1]/div[2]/span")[0].text # text=root.xpath(path)[0].text # if text is not None: # if text.find("华润城润府")!=-1: # print path # if text.find("70000")!=-1: # print path | 2.896704 | 3 |
unittests/test_models/test_db.py | BoroviyOrest/QuizzesAPI | 0 | 6624373 | import pytest
from bson import ObjectId
from pydantic import ValidationError
from models.db import DBModelMixin
@pytest.fixture
def id_data():
return {'_id': '<KEY>'}
def test_db_model_mixin_correct_data(id_data):
model = DBModelMixin(**id_data)
assert model.id == ObjectId(id_data['_id'])
@pytest.mark.parametrize('data, exception', [
({'id': '<KEY>'}, ValueError),
({'_id': 'incorrect_object_id'}, ValidationError)
])
def test_db_model_mixin_incorrect_data(data, exception):
with pytest.raises(exception):
DBModelMixin(**data)
| import pytest
from bson import ObjectId
from pydantic import ValidationError
from models.db import DBModelMixin
@pytest.fixture
def id_data():
return {'_id': '<KEY>'}
def test_db_model_mixin_correct_data(id_data):
model = DBModelMixin(**id_data)
assert model.id == ObjectId(id_data['_id'])
@pytest.mark.parametrize('data, exception', [
({'id': '<KEY>'}, ValueError),
({'_id': 'incorrect_object_id'}, ValidationError)
])
def test_db_model_mixin_incorrect_data(data, exception):
with pytest.raises(exception):
DBModelMixin(**data)
| none | 1 | 2.456089 | 2 | |
grakel/kernels/pyramid_match.py | vishalbelsare/GraKeL | 0 | 6624374 | """The pyramid match kernel as in :cite:`nikolentzos2017matching`."""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import collections
import warnings
import numpy as np
from itertools import chain
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import eigs
from grakel.graph import Graph
from grakel.kernels import Kernel
# Python 2/3 cross-compatibility import
from six import itervalues
from six import iteritems
class PyramidMatch(Kernel):
"""Pyramid match kernel class.
Kernel defined in :cite:`nikolentzos2017matching`
Parameters
----------
with_labels : bool, default=True
A flag that determines if the kernel computation will consider labels.
L : int, default=4
Pyramid histogram level.
d : int, default=6
The dimension of the hypercube.
Attributes
----------
L : int
Defines the histogram level of the pyramid.
d : int
The dimension of the hypercube.
with_labels : bool
Defines if to use labels in the calculation of the `pyramid_match`
kernel.
_num_labels : int
The number of distinct labels, on the fit data.
_labels : dict
A dictionary of label enumeration, made from fitted data.
"""
_graph_format = "adjacency"
def __init__(self, n_jobs=None,
normalize=False,
verbose=False,
with_labels=True,
L=4,
d=6):
"""Initialise a `pyramid_match` kernel."""
super(PyramidMatch, self).__init__(n_jobs=n_jobs,
normalize=normalize,
verbose=verbose)
self.with_labels = with_labels
self.L = L
self.d = d
self.initialized_.update({"d": False, "L": False, "with_labels": False})
def initialize_(self):
"""Initialize all transformer arguments, needing initialization."""
super(PyramidMatch, self).initialize_()
if not self.initialized_["with_labels"]:
if type(self.with_labels) != bool:
raise TypeError('with labels must be a boolean variable')
self.initialized_["with_labels"] = True
if not self.initialized_["L"]:
if type(self.L) is not int or self.L < 0:
raise TypeError('L: the number of levels must be an integer '
'bigger equal to 0')
self.initialized_["L"] = True
if not self.initialized_["d"]:
if type(self.d) is not int or self.d < 1:
raise TypeError('d: hypercube dimension must be an '
'integer bigger than 1')
self.initialized_["d"] = True
def parse_input(self, X):
"""Parse and create features for pyramid_match kernel.
Parameters
----------
X : iterable
For the input to pass the test, we must have:
Each element must be an iterable with at most three features and at
least one. The first that is obligatory is a valid graph structure
(adjacency matrix or edge_dictionary) while the second is
node_labels and the third edge_labels (that correspond to the given
graph format). A valid input also consists of graph type objects.
Returns
-------
H : list
A list of lists of Histograms for all levels for each graph.
"""
if not isinstance(X, collections.Iterable):
raise TypeError('input must be an iterable\n')
else:
i = 0
Us = []
if self.with_labels:
Ls = []
for (idx, x) in enumerate(iter(X)):
is_iter = isinstance(x, collections.Iterable)
if is_iter:
x = list(x)
if is_iter and (len(x) == 0 or (len(x) >= 1 and not self.with_labels) or
(len(x) >= 2 and self.with_labels)):
if len(x) == 0:
warnings.warn('Ignoring empty element on index: ' + str(idx))
continue
elif not self.with_labels:
x = Graph(x[0], {}, {}, self._graph_format)
else:
x = Graph(x[0], x[1], {}, self._graph_format)
elif not type(x) is Graph:
raise TypeError('each element of X must be either a graph object or a list with '
'at least a graph like object and node labels dict \n')
A = x.get_adjacency_matrix()
if self.with_labels:
L = x.get_labels(purpose="adjacency")
i += 1
if A.shape[0] == 0:
Us.append(np.zeros((1, self.d)))
else:
# Perform eigenvalue decomposition.
# Rows of matrix U correspond to vertex representations
# Embed vertices into the d-dimensional space
if A.shape[0] > self.d+1:
# If size of graph smaller than d, pad with zeros
Lambda, U = eigs(csr_matrix(A, dtype=np.float),
k=self.d, ncv=10*self.d)
idx = Lambda.argsort()[::-1]
U = U[:, idx]
else:
Lambda, U = np.linalg.eig(A)
idx = Lambda.argsort()[::-1]
U = U[:, idx]
U = U[:, :self.d]
# Replace all components by their absolute values
U = np.absolute(U)
Us.append((A.shape[0], U))
if self.with_labels:
Ls.append(L)
if i == 0:
raise ValueError('parsed input is empty')
if self.with_labels:
# Map labels to values between 0 and |L|-1
# where |L| is the number of distinct labels
if self._method_calling in [1, 2]:
self._num_labels = 0
self._labels = set()
for L in Ls:
self._labels |= set(itervalues(L))
self._num_labels = len(self._labels)
self._labels = {l: i for (i, l) in enumerate(self._labels)}
return self._histogram_calculation(Us, Ls, self._labels)
elif self._method_calling == 3:
labels = set()
for L in Ls:
labels |= set(itervalues(L))
rest_labels = labels - set(self._labels.keys())
nouveau_labels = dict(chain(iteritems(self._labels),
((j, i) for (i, j) in enumerate(rest_labels, len(self._labels)))))
return self._histogram_calculation(Us, Ls, nouveau_labels)
else:
return self._histogram_calculation(Us)
def _histogram_calculation(self, Us, *args):
"""Calculate histograms.
Parameters
----------
Us : list
List of tuples with the first element corresponding to the
number of vertices of a graph and the second to it's
corresponding to vertex embeddings on the d-dimensional space.
Ls : list, optional
List of labels corresponding to each graph.
If provided the histograms are calculated with labels.
Labels : dict, optional
A big dictionary with enumeration of labels.
Returns
-------
Hs : list
List of histograms for each graph.
"""
Hs = list()
if len(args) == 0:
for (i, (n, u)) in enumerate(Us):
du = list()
if n > 0:
for j in range(self.L):
# Number of cells along each dimension at level j
k = 2**j
# Determines the cells in which each vertex lies
# along each dimension since nodes lie in the unit
# hypercube in R^d
D = np.zeros((self.d, k))
T = np.floor(u*k)
T[np.where(T == k)] = k-1
for p in range(u.shape[0]):
if p >= n:
break
for q in range(u.shape[1]):
# Identify the cell into which the i-th
# vertex lies and increase its value by 1
D[q, int(T[p, q])] += 1
du.append(D)
Hs.append(du)
elif len(args) > 0:
Ls = args[0]
Labels = args[1]
num_labels = len(Labels)
for (i, ((n, u), L)) in enumerate(zip(Us, Ls)):
du = list()
if n > 0:
for j in range(self.L):
# Number of cells along each dimension at level j
k = 2**j
# To store the number of vertices that are assigned
# a specific label and lie in each of the 2^j cells
# of each dimension at level j
D = np.zeros((self.d*num_labels, k))
T = np.floor(u*k)
T[np.where(T == k)] = k-1
for p in range(u.shape[0]):
if p >= n:
break
for q in range(u.shape[1]):
# Identify the cell into which the i-th
# vertex lies and increase its value by 1
D[Labels[L[p]]*self.d + q, int(T[p, q])] += 1
du.append(D)
Hs.append(du)
return Hs
def pairwise_operation(self, x, y):
"""Calculate a pairwise kernel between two elements.
Parameters
----------
x, y : dict
Histograms as produced by `parse_input`.
Returns
-------
kernel : number
The kernel value.
"""
k = 0
if len(x) != 0 and len(y) != 0:
intersec = np.zeros(self.L)
for (p, xp, yp) in zip(range(self.L), x, y):
# Calculate histogram intersection
# (eq. 6 in :cite:`nikolentzos2017matching`)
if xp.shape[0] < yp.shape[0]:
xpp, ypp = xp, yp[:xp.shape[0], :]
elif yp.shape[0] < xp.shape[0]:
xpp, ypp = xp[:yp.shape[0], :], yp
else:
xpp, ypp = xp, yp
intersec[p] = np.sum(np.minimum(xpp, ypp))
k += intersec[self.L-1]
for p in range(self.L-1):
# Computes the new matches that occur at level p.
# These matches weight less than those that occur at
# higher levels (e.g. p+1 level)
k += (1.0/(2**(self.L-p-1)))*(intersec[p]-intersec[p+1])
return k
| """The pyramid match kernel as in :cite:`nikolentzos2017matching`."""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import collections
import warnings
import numpy as np
from itertools import chain
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import eigs
from grakel.graph import Graph
from grakel.kernels import Kernel
# Python 2/3 cross-compatibility import
from six import itervalues
from six import iteritems
class PyramidMatch(Kernel):
"""Pyramid match kernel class.
Kernel defined in :cite:`nikolentzos2017matching`
Parameters
----------
with_labels : bool, default=True
A flag that determines if the kernel computation will consider labels.
L : int, default=4
Pyramid histogram level.
d : int, default=6
The dimension of the hypercube.
Attributes
----------
L : int
Defines the histogram level of the pyramid.
d : int
The dimension of the hypercube.
with_labels : bool
Defines if to use labels in the calculation of the `pyramid_match`
kernel.
_num_labels : int
The number of distinct labels, on the fit data.
_labels : dict
A dictionary of label enumeration, made from fitted data.
"""
_graph_format = "adjacency"
def __init__(self, n_jobs=None,
normalize=False,
verbose=False,
with_labels=True,
L=4,
d=6):
"""Initialise a `pyramid_match` kernel."""
super(PyramidMatch, self).__init__(n_jobs=n_jobs,
normalize=normalize,
verbose=verbose)
self.with_labels = with_labels
self.L = L
self.d = d
self.initialized_.update({"d": False, "L": False, "with_labels": False})
def initialize_(self):
"""Initialize all transformer arguments, needing initialization."""
super(PyramidMatch, self).initialize_()
if not self.initialized_["with_labels"]:
if type(self.with_labels) != bool:
raise TypeError('with labels must be a boolean variable')
self.initialized_["with_labels"] = True
if not self.initialized_["L"]:
if type(self.L) is not int or self.L < 0:
raise TypeError('L: the number of levels must be an integer '
'bigger equal to 0')
self.initialized_["L"] = True
if not self.initialized_["d"]:
if type(self.d) is not int or self.d < 1:
raise TypeError('d: hypercube dimension must be an '
'integer bigger than 1')
self.initialized_["d"] = True
def parse_input(self, X):
"""Parse and create features for pyramid_match kernel.
Parameters
----------
X : iterable
For the input to pass the test, we must have:
Each element must be an iterable with at most three features and at
least one. The first that is obligatory is a valid graph structure
(adjacency matrix or edge_dictionary) while the second is
node_labels and the third edge_labels (that correspond to the given
graph format). A valid input also consists of graph type objects.
Returns
-------
H : list
A list of lists of Histograms for all levels for each graph.
"""
if not isinstance(X, collections.Iterable):
raise TypeError('input must be an iterable\n')
else:
i = 0
Us = []
if self.with_labels:
Ls = []
for (idx, x) in enumerate(iter(X)):
is_iter = isinstance(x, collections.Iterable)
if is_iter:
x = list(x)
if is_iter and (len(x) == 0 or (len(x) >= 1 and not self.with_labels) or
(len(x) >= 2 and self.with_labels)):
if len(x) == 0:
warnings.warn('Ignoring empty element on index: ' + str(idx))
continue
elif not self.with_labels:
x = Graph(x[0], {}, {}, self._graph_format)
else:
x = Graph(x[0], x[1], {}, self._graph_format)
elif not type(x) is Graph:
raise TypeError('each element of X must be either a graph object or a list with '
'at least a graph like object and node labels dict \n')
A = x.get_adjacency_matrix()
if self.with_labels:
L = x.get_labels(purpose="adjacency")
i += 1
if A.shape[0] == 0:
Us.append(np.zeros((1, self.d)))
else:
# Perform eigenvalue decomposition.
# Rows of matrix U correspond to vertex representations
# Embed vertices into the d-dimensional space
if A.shape[0] > self.d+1:
# If size of graph smaller than d, pad with zeros
Lambda, U = eigs(csr_matrix(A, dtype=np.float),
k=self.d, ncv=10*self.d)
idx = Lambda.argsort()[::-1]
U = U[:, idx]
else:
Lambda, U = np.linalg.eig(A)
idx = Lambda.argsort()[::-1]
U = U[:, idx]
U = U[:, :self.d]
# Replace all components by their absolute values
U = np.absolute(U)
Us.append((A.shape[0], U))
if self.with_labels:
Ls.append(L)
if i == 0:
raise ValueError('parsed input is empty')
if self.with_labels:
# Map labels to values between 0 and |L|-1
# where |L| is the number of distinct labels
if self._method_calling in [1, 2]:
self._num_labels = 0
self._labels = set()
for L in Ls:
self._labels |= set(itervalues(L))
self._num_labels = len(self._labels)
self._labels = {l: i for (i, l) in enumerate(self._labels)}
return self._histogram_calculation(Us, Ls, self._labels)
elif self._method_calling == 3:
labels = set()
for L in Ls:
labels |= set(itervalues(L))
rest_labels = labels - set(self._labels.keys())
nouveau_labels = dict(chain(iteritems(self._labels),
((j, i) for (i, j) in enumerate(rest_labels, len(self._labels)))))
return self._histogram_calculation(Us, Ls, nouveau_labels)
else:
return self._histogram_calculation(Us)
def _histogram_calculation(self, Us, *args):
"""Calculate histograms.
Parameters
----------
Us : list
List of tuples with the first element corresponding to the
number of vertices of a graph and the second to it's
corresponding to vertex embeddings on the d-dimensional space.
Ls : list, optional
List of labels corresponding to each graph.
If provided the histograms are calculated with labels.
Labels : dict, optional
A big dictionary with enumeration of labels.
Returns
-------
Hs : list
List of histograms for each graph.
"""
Hs = list()
if len(args) == 0:
for (i, (n, u)) in enumerate(Us):
du = list()
if n > 0:
for j in range(self.L):
# Number of cells along each dimension at level j
k = 2**j
# Determines the cells in which each vertex lies
# along each dimension since nodes lie in the unit
# hypercube in R^d
D = np.zeros((self.d, k))
T = np.floor(u*k)
T[np.where(T == k)] = k-1
for p in range(u.shape[0]):
if p >= n:
break
for q in range(u.shape[1]):
# Identify the cell into which the i-th
# vertex lies and increase its value by 1
D[q, int(T[p, q])] += 1
du.append(D)
Hs.append(du)
elif len(args) > 0:
Ls = args[0]
Labels = args[1]
num_labels = len(Labels)
for (i, ((n, u), L)) in enumerate(zip(Us, Ls)):
du = list()
if n > 0:
for j in range(self.L):
# Number of cells along each dimension at level j
k = 2**j
# To store the number of vertices that are assigned
# a specific label and lie in each of the 2^j cells
# of each dimension at level j
D = np.zeros((self.d*num_labels, k))
T = np.floor(u*k)
T[np.where(T == k)] = k-1
for p in range(u.shape[0]):
if p >= n:
break
for q in range(u.shape[1]):
# Identify the cell into which the i-th
# vertex lies and increase its value by 1
D[Labels[L[p]]*self.d + q, int(T[p, q])] += 1
du.append(D)
Hs.append(du)
return Hs
def pairwise_operation(self, x, y):
"""Calculate a pairwise kernel between two elements.
Parameters
----------
x, y : dict
Histograms as produced by `parse_input`.
Returns
-------
kernel : number
The kernel value.
"""
k = 0
if len(x) != 0 and len(y) != 0:
intersec = np.zeros(self.L)
for (p, xp, yp) in zip(range(self.L), x, y):
# Calculate histogram intersection
# (eq. 6 in :cite:`nikolentzos2017matching`)
if xp.shape[0] < yp.shape[0]:
xpp, ypp = xp, yp[:xp.shape[0], :]
elif yp.shape[0] < xp.shape[0]:
xpp, ypp = xp[:yp.shape[0], :], yp
else:
xpp, ypp = xp, yp
intersec[p] = np.sum(np.minimum(xpp, ypp))
k += intersec[self.L-1]
for p in range(self.L-1):
# Computes the new matches that occur at level p.
# These matches weight less than those that occur at
# higher levels (e.g. p+1 level)
k += (1.0/(2**(self.L-p-1)))*(intersec[p]-intersec[p+1])
return k
| en | 0.783617 | The pyramid match kernel as in :cite:`nikolentzos2017matching`. # Author: <NAME> <<EMAIL>> # License: BSD 3 clause # Python 2/3 cross-compatibility import Pyramid match kernel class. Kernel defined in :cite:`nikolentzos2017matching` Parameters ---------- with_labels : bool, default=True A flag that determines if the kernel computation will consider labels. L : int, default=4 Pyramid histogram level. d : int, default=6 The dimension of the hypercube. Attributes ---------- L : int Defines the histogram level of the pyramid. d : int The dimension of the hypercube. with_labels : bool Defines if to use labels in the calculation of the `pyramid_match` kernel. _num_labels : int The number of distinct labels, on the fit data. _labels : dict A dictionary of label enumeration, made from fitted data. Initialise a `pyramid_match` kernel. Initialize all transformer arguments, needing initialization. Parse and create features for pyramid_match kernel. Parameters ---------- X : iterable For the input to pass the test, we must have: Each element must be an iterable with at most three features and at least one. The first that is obligatory is a valid graph structure (adjacency matrix or edge_dictionary) while the second is node_labels and the third edge_labels (that correspond to the given graph format). A valid input also consists of graph type objects. Returns ------- H : list A list of lists of Histograms for all levels for each graph. # Perform eigenvalue decomposition. # Rows of matrix U correspond to vertex representations # Embed vertices into the d-dimensional space # If size of graph smaller than d, pad with zeros # Replace all components by their absolute values # Map labels to values between 0 and |L|-1 # where |L| is the number of distinct labels Calculate histograms. Parameters ---------- Us : list List of tuples with the first element corresponding to the number of vertices of a graph and the second to it's corresponding to vertex embeddings on the d-dimensional space. Ls : list, optional List of labels corresponding to each graph. If provided the histograms are calculated with labels. Labels : dict, optional A big dictionary with enumeration of labels. Returns ------- Hs : list List of histograms for each graph. # Number of cells along each dimension at level j # Determines the cells in which each vertex lies # along each dimension since nodes lie in the unit # hypercube in R^d # Identify the cell into which the i-th # vertex lies and increase its value by 1 # Number of cells along each dimension at level j # To store the number of vertices that are assigned # a specific label and lie in each of the 2^j cells # of each dimension at level j # Identify the cell into which the i-th # vertex lies and increase its value by 1 Calculate a pairwise kernel between two elements. Parameters ---------- x, y : dict Histograms as produced by `parse_input`. Returns ------- kernel : number The kernel value. # Calculate histogram intersection # (eq. 6 in :cite:`nikolentzos2017matching`) # Computes the new matches that occur at level p. # These matches weight less than those that occur at # higher levels (e.g. p+1 level) | 2.783228 | 3 |
prompt_tuning/data/metrics_test.py | techthiyanes/prompt-tuning | 0 | 6624375 | # Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics."""
import unittest.mock as mock
from absl.testing import absltest
from prompt_tuning.data import metrics
class MetricsTest(absltest.TestCase):
def test_metric_with_examples(self):
gold = "my_gold_value"
metric_fn = mock.MagicMock(return_value=gold)
targets = list(range(0, 100))
predictions = list(range(100, 0, -1))
prediction_field = "my_prediction_field"
prediction_wrappers = [{prediction_field: p} for p in predictions]
metric = metrics.metric_with_examples(metric_fn, targets,
prediction_wrappers, prediction_field)
self.assertEqual(metric, gold)
metric_fn.assert_called_once_with(targets, predictions)
@mock.patch("prompt_tuning.data.metrics.random.seed", autospec=True)
def test_safe_sample_seeded(self, seed_mock):
seed = 100
metrics.safe_sample(12, [], seed=seed)
seed_mock.assert_called_once_with(seed)
def test_safe_sample_full(self):
pop = [1, 2, 3, 4, 5, 10]
result = metrics.safe_sample(-1, pop, None)
self.assertEqual(result, range(len(pop)))
def test_safe_sample_extra(self):
pop = [4, 5, 6, 7, 8, 9, 100, -1]
result = metrics.safe_sample(len(pop) + 12, pop, None)
self.assertEqual(result, range(len(pop)))
@mock.patch("prompt_tuning.data.metrics.random.sample", autospec=True)
def test_safe_sample(self, sample_mock):
pop = list("This is my population lol")
k = 3
gold = "lol"
sample_mock.return_value = gold
result = metrics.safe_sample(k, pop, None)
self.assertEqual(result, gold)
sample_mock.assert_called_once_with(range(len(pop)), k=k)
def test_safe_sample_k(self):
pop = list("This is my population lol")
k = 5
result = metrics.safe_sample(k, pop, None)
self.assertLen(result, k)
if __name__ == "__main__":
absltest.main()
| # Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics."""
import unittest.mock as mock
from absl.testing import absltest
from prompt_tuning.data import metrics
class MetricsTest(absltest.TestCase):
def test_metric_with_examples(self):
gold = "my_gold_value"
metric_fn = mock.MagicMock(return_value=gold)
targets = list(range(0, 100))
predictions = list(range(100, 0, -1))
prediction_field = "my_prediction_field"
prediction_wrappers = [{prediction_field: p} for p in predictions]
metric = metrics.metric_with_examples(metric_fn, targets,
prediction_wrappers, prediction_field)
self.assertEqual(metric, gold)
metric_fn.assert_called_once_with(targets, predictions)
@mock.patch("prompt_tuning.data.metrics.random.seed", autospec=True)
def test_safe_sample_seeded(self, seed_mock):
seed = 100
metrics.safe_sample(12, [], seed=seed)
seed_mock.assert_called_once_with(seed)
def test_safe_sample_full(self):
pop = [1, 2, 3, 4, 5, 10]
result = metrics.safe_sample(-1, pop, None)
self.assertEqual(result, range(len(pop)))
def test_safe_sample_extra(self):
pop = [4, 5, 6, 7, 8, 9, 100, -1]
result = metrics.safe_sample(len(pop) + 12, pop, None)
self.assertEqual(result, range(len(pop)))
@mock.patch("prompt_tuning.data.metrics.random.sample", autospec=True)
def test_safe_sample(self, sample_mock):
pop = list("This is my population lol")
k = 3
gold = "lol"
sample_mock.return_value = gold
result = metrics.safe_sample(k, pop, None)
self.assertEqual(result, gold)
sample_mock.assert_called_once_with(range(len(pop)), k=k)
def test_safe_sample_k(self):
pop = list("This is my population lol")
k = 5
result = metrics.safe_sample(k, pop, None)
self.assertLen(result, k)
if __name__ == "__main__":
absltest.main()
| en | 0.85137 | # Copyright 2022 Google. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for metrics. | 2.302775 | 2 |
python/mxnet/_ctypes/symbol.py | bill-teng/mxnet-test | 5 | 6624376 | <reponame>bill-teng/mxnet-test
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments, global-statement
"""Symbolic configuration API."""
from __future__ import absolute_import as _abs
import ctypes
import sys
import numpy as _numpy
from ..base import _LIB
from ..base import c_array, c_str, mx_uint, py_str
from ..base import SymbolHandle, OpHandle
from ..base import check_call
from ..symbol_doc import _build_doc
from ..name import NameManager
from ..attribute import AttrScope
_symbol_cls = None
class SymbolBase(object):
"""Symbol is symbolic graph."""
__slots__ = ["handle"]
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
self.handle = handle
def __del__(self):
check_call(_LIB.NNSymbolFree(self.handle))
def _compose(self, *args, **kwargs):
"""Compose symbol on inputs.
This call mutates the current symbol.
Parameters
----------
args:
provide positional arguments
kwargs:
provide keyword arguments
Returns
-------
the resulting symbol
"""
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for arg in args:
if not isinstance(arg, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
for val in kwargs.values():
if not isinstance(val, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
num_args = len(args) + len(kwargs)
if len(kwargs) != 0:
keys = c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()])
args = c_array(SymbolHandle, [s.handle for s in kwargs.values()])
else:
keys = None
args = c_array(SymbolHandle, [s.handle for s in args])
check_call(_LIB.NNSymbolCompose(
self.handle, name, num_args, keys, args))
def _set_attr(self, **kwargs):
"""Set the attribute of the symbol.
Parameters
----------
**kwargs
The attributes to set
"""
keys = c_array(ctypes.c_char_p,
[c_str(key) for key in kwargs.keys()])
vals = c_array(ctypes.c_char_p,
[c_str(str(val)) for val in kwargs.values()])
num_args = mx_uint(len(kwargs))
check_call(_LIB.MXSymbolSetAttrs(
self.handle, num_args, keys, vals))
def _set_handle(self, handle):
"""Set handle."""
self.handle = handle
def __reduce__(self):
return (_symbol_cls, (None,), self.__getstate__())
def _set_symbol_class(cls):
"""Set the symbolic class to be cls"""
global _symbol_cls
_symbol_cls = cls
def _make_atomic_symbol_function(handle, name):
"""Create an atomic symbol function by handle and funciton name."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
func_name = name
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(func_name,
py_str(desc.value),
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
def creator(*args, **kwargs):
"""Activation Operator of Neural Net.
The parameters listed below can be passed in as keyword arguments.
Parameters
----------
name : string, required.
Name of the resulting symbol.
Returns
-------
symbol: Symbol
the resulting symbol
"""
param_keys = []
param_vals = []
symbol_kwargs = {}
attr = kwargs.pop('attr', None)
kwargs.update(AttrScope.current.get(attr))
name = kwargs.pop('name', None)
if 'dtype' in kwargs:
kwargs['dtype'] = _numpy.dtype(kwargs['dtype']).name
if key_var_num_args and key_var_num_args not in kwargs:
param_keys.append(c_str(key_var_num_args))
param_vals.append(c_str(str(len(args))))
for k, v in kwargs.items():
if isinstance(v, SymbolBase):
symbol_kwargs[k] = v
else:
param_keys.append(c_str(k))
param_vals.append(c_str(str(v)))
# create atomic symbol
param_keys = c_array(ctypes.c_char_p, param_keys)
param_vals = c_array(ctypes.c_char_p, param_vals)
sym_handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateAtomicSymbol(
handle,
mx_uint(len(param_keys)),
param_keys, param_vals,
ctypes.byref(sym_handle)))
if len(args) != 0 and len(symbol_kwargs) != 0:
raise TypeError(
'%s can only accept input'
'Symbols either as positional or keyword arguments, not both' % func_name)
s = _symbol_cls(sym_handle)
hint = func_name.lower()
name = NameManager.current.get(name, hint)
s._compose(*args, name=name, **symbol_kwargs)
return s
creator.__name__ = func_name
creator.__doc__ = doc_str
creator.__module__ = 'mxnet.symbol'
return creator
def _init_symbol_module(symbol_class, root_namespace):
"""List and add all the atomic symbol functions to current module."""
_set_symbol_class(symbol_class)
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_names.append(py_str(plist[i]))
module_obj = sys.modules["%s.symbol" % root_namespace]
module_internal = sys.modules["%s._symbol_internal" % root_namespace]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
function = _make_atomic_symbol_function(hdl, name)
if function.__name__.startswith('_'):
setattr(module_internal, function.__name__, function)
else:
setattr(module_obj, function.__name__, function)
| # coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments, global-statement
"""Symbolic configuration API."""
from __future__ import absolute_import as _abs
import ctypes
import sys
import numpy as _numpy
from ..base import _LIB
from ..base import c_array, c_str, mx_uint, py_str
from ..base import SymbolHandle, OpHandle
from ..base import check_call
from ..symbol_doc import _build_doc
from ..name import NameManager
from ..attribute import AttrScope
_symbol_cls = None
class SymbolBase(object):
"""Symbol is symbolic graph."""
__slots__ = ["handle"]
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
self.handle = handle
def __del__(self):
check_call(_LIB.NNSymbolFree(self.handle))
def _compose(self, *args, **kwargs):
"""Compose symbol on inputs.
This call mutates the current symbol.
Parameters
----------
args:
provide positional arguments
kwargs:
provide keyword arguments
Returns
-------
the resulting symbol
"""
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for arg in args:
if not isinstance(arg, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
for val in kwargs.values():
if not isinstance(val, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
num_args = len(args) + len(kwargs)
if len(kwargs) != 0:
keys = c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()])
args = c_array(SymbolHandle, [s.handle for s in kwargs.values()])
else:
keys = None
args = c_array(SymbolHandle, [s.handle for s in args])
check_call(_LIB.NNSymbolCompose(
self.handle, name, num_args, keys, args))
def _set_attr(self, **kwargs):
"""Set the attribute of the symbol.
Parameters
----------
**kwargs
The attributes to set
"""
keys = c_array(ctypes.c_char_p,
[c_str(key) for key in kwargs.keys()])
vals = c_array(ctypes.c_char_p,
[c_str(str(val)) for val in kwargs.values()])
num_args = mx_uint(len(kwargs))
check_call(_LIB.MXSymbolSetAttrs(
self.handle, num_args, keys, vals))
def _set_handle(self, handle):
"""Set handle."""
self.handle = handle
def __reduce__(self):
return (_symbol_cls, (None,), self.__getstate__())
def _set_symbol_class(cls):
"""Set the symbolic class to be cls"""
global _symbol_cls
_symbol_cls = cls
def _make_atomic_symbol_function(handle, name):
"""Create an atomic symbol function by handle and funciton name."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
func_name = name
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(func_name,
py_str(desc.value),
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
def creator(*args, **kwargs):
"""Activation Operator of Neural Net.
The parameters listed below can be passed in as keyword arguments.
Parameters
----------
name : string, required.
Name of the resulting symbol.
Returns
-------
symbol: Symbol
the resulting symbol
"""
param_keys = []
param_vals = []
symbol_kwargs = {}
attr = kwargs.pop('attr', None)
kwargs.update(AttrScope.current.get(attr))
name = kwargs.pop('name', None)
if 'dtype' in kwargs:
kwargs['dtype'] = _numpy.dtype(kwargs['dtype']).name
if key_var_num_args and key_var_num_args not in kwargs:
param_keys.append(c_str(key_var_num_args))
param_vals.append(c_str(str(len(args))))
for k, v in kwargs.items():
if isinstance(v, SymbolBase):
symbol_kwargs[k] = v
else:
param_keys.append(c_str(k))
param_vals.append(c_str(str(v)))
# create atomic symbol
param_keys = c_array(ctypes.c_char_p, param_keys)
param_vals = c_array(ctypes.c_char_p, param_vals)
sym_handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateAtomicSymbol(
handle,
mx_uint(len(param_keys)),
param_keys, param_vals,
ctypes.byref(sym_handle)))
if len(args) != 0 and len(symbol_kwargs) != 0:
raise TypeError(
'%s can only accept input'
'Symbols either as positional or keyword arguments, not both' % func_name)
s = _symbol_cls(sym_handle)
hint = func_name.lower()
name = NameManager.current.get(name, hint)
s._compose(*args, name=name, **symbol_kwargs)
return s
creator.__name__ = func_name
creator.__doc__ = doc_str
creator.__module__ = 'mxnet.symbol'
return creator
def _init_symbol_module(symbol_class, root_namespace):
"""List and add all the atomic symbol functions to current module."""
_set_symbol_class(symbol_class)
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_names.append(py_str(plist[i]))
module_obj = sys.modules["%s.symbol" % root_namespace]
module_internal = sys.modules["%s._symbol_internal" % root_namespace]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
function = _make_atomic_symbol_function(hdl, name)
if function.__name__.startswith('_'):
setattr(module_internal, function.__name__, function)
else:
setattr(module_obj, function.__name__, function) | en | 0.516885 | # coding: utf-8 # pylint: disable=invalid-name, protected-access, too-many-arguments, global-statement Symbolic configuration API. Symbol is symbolic graph. # pylint: disable=no-member Initialize the function with handle Parameters ---------- handle : SymbolHandle the handle to the underlying C++ Symbol Compose symbol on inputs. This call mutates the current symbol. Parameters ---------- args: provide positional arguments kwargs: provide keyword arguments Returns ------- the resulting symbol Set the attribute of the symbol. Parameters ---------- **kwargs The attributes to set Set handle. Set the symbolic class to be cls Create an atomic symbol function by handle and funciton name. Activation Operator of Neural Net. The parameters listed below can be passed in as keyword arguments. Parameters ---------- name : string, required. Name of the resulting symbol. Returns ------- symbol: Symbol the resulting symbol # create atomic symbol List and add all the atomic symbol functions to current module. | 2.242832 | 2 |
creatorRSA.py | munozariasjm/RSA_toyModel | 0 | 6624377 | <gh_stars>0
import numpy as np
import math
def isPrime(k):
ban=True
for z in range(2,k):
if k%z==0: ban=False
return ban
sec=100
def setSec(secs): sec=secs
def generarVals(numerito):
e=65537
p=11;q=17
#primesP = [i for i in range(13,numerito) if isPrime(i) and i%e!=1]
#p = np.random.choice(primesP)
#primesQ = [i for i in range(17,numerito) if isPrime(i) and i%e!=1]
#q = np.random.choice(primesQ)
phi=(p-1)*(q-1)
n=p*q
while True:
e=np.random.randint(2,phi/2)
if p%e!=1 and q%e!=1: break
e=3
return n,e,phi
#print("public Key es: {}{}".format(n,e))
def getPUBLIC(sec=100):
n,e,f=generarVals(sec)
return int(n),int(e)
i=np.random.randint(1,4)
#print("private Key es: {} ".format(int(d)))
def getPRIVATE(sec=100):
n,e,phi=generarVals(sec)
d=((2*phi)+1)/e
return int(d)
| import numpy as np
import math
def isPrime(k):
ban=True
for z in range(2,k):
if k%z==0: ban=False
return ban
sec=100
def setSec(secs): sec=secs
def generarVals(numerito):
e=65537
p=11;q=17
#primesP = [i for i in range(13,numerito) if isPrime(i) and i%e!=1]
#p = np.random.choice(primesP)
#primesQ = [i for i in range(17,numerito) if isPrime(i) and i%e!=1]
#q = np.random.choice(primesQ)
phi=(p-1)*(q-1)
n=p*q
while True:
e=np.random.randint(2,phi/2)
if p%e!=1 and q%e!=1: break
e=3
return n,e,phi
#print("public Key es: {}{}".format(n,e))
def getPUBLIC(sec=100):
n,e,f=generarVals(sec)
return int(n),int(e)
i=np.random.randint(1,4)
#print("private Key es: {} ".format(int(d)))
def getPRIVATE(sec=100):
n,e,phi=generarVals(sec)
d=((2*phi)+1)/e
return int(d) | en | 0.13215 | #primesP = [i for i in range(13,numerito) if isPrime(i) and i%e!=1] #p = np.random.choice(primesP) #primesQ = [i for i in range(17,numerito) if isPrime(i) and i%e!=1] #q = np.random.choice(primesQ) #print("public Key es: {}{}".format(n,e)) #print("private Key es: {} ".format(int(d))) | 2.998129 | 3 |
examples/kubeflow/main.py | rbrishabh/fairing | 0 | 6624378 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
INPUT_DATA_DIR = '/tmp/tensorflow/mnist/input_data/'
MAX_STEPS = 2000
BATCH_SIZE = 100
LEARNING_RATE = 0.3
HIDDEN_1 = 128
HIDDEN_2 = 32
# HACK: Ideally we would want to have a unique subpath for each instance of the job,
# but since we can't, we are instead appending HOSTNAME to the logdir
LOG_DIR = os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/logs/fully_connected_feed/', os.getenv('HOSTNAME', ''))
class TensorflowModel():
def train(self, **kwargs): #pylint:disable=unused-argument
tf.logging.set_verbosity(tf.logging.ERROR)
self.data_sets = input_data.read_data_sets(INPUT_DATA_DIR)
self.images_placeholder = tf.placeholder(
tf.float32, shape=(BATCH_SIZE, mnist.IMAGE_PIXELS))
self.labels_placeholder = tf.placeholder(tf.int32, shape=(BATCH_SIZE))
logits = mnist.inference(self.images_placeholder,
HIDDEN_1,
HIDDEN_2)
self.loss = mnist.loss(logits, self.labels_placeholder)
self.train_op = mnist.training(self.loss, LEARNING_RATE)
self.summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.summary_writer = tf.summary.FileWriter(LOG_DIR, self.sess.graph)
self.sess.run(init)
data_set = self.data_sets.train
for step in xrange(MAX_STEPS):
images_feed, labels_feed = data_set.next_batch(BATCH_SIZE, False)
feed_dict = {
self.images_placeholder: images_feed,
self.labels_placeholder: labels_feed,
}
_, loss_value = self.sess.run([self.train_op, self.loss],
feed_dict=feed_dict)
if step % 100 == 0:
print("At step {}, loss = {}".format(step, loss_value))
summary_str = self.sess.run(self.summary, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, step)
self.summary_writer.flush()
if __name__ == '__main__':
if os.getenv('FAIRING_RUNTIME', None) is None:
import fairing
fairing.config.set_preprocessor('python', input_files=[__file__])
fairing.config.set_builder(name='docker', registry='<your-registry-name>',
base_image='tensorflow/tensorflow:1.13.1-py3')
fairing.config.run()
else:
remote_train = TensorflowModel()
remote_train.train()
| # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
INPUT_DATA_DIR = '/tmp/tensorflow/mnist/input_data/'
MAX_STEPS = 2000
BATCH_SIZE = 100
LEARNING_RATE = 0.3
HIDDEN_1 = 128
HIDDEN_2 = 32
# HACK: Ideally we would want to have a unique subpath for each instance of the job,
# but since we can't, we are instead appending HOSTNAME to the logdir
LOG_DIR = os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/logs/fully_connected_feed/', os.getenv('HOSTNAME', ''))
class TensorflowModel():
def train(self, **kwargs): #pylint:disable=unused-argument
tf.logging.set_verbosity(tf.logging.ERROR)
self.data_sets = input_data.read_data_sets(INPUT_DATA_DIR)
self.images_placeholder = tf.placeholder(
tf.float32, shape=(BATCH_SIZE, mnist.IMAGE_PIXELS))
self.labels_placeholder = tf.placeholder(tf.int32, shape=(BATCH_SIZE))
logits = mnist.inference(self.images_placeholder,
HIDDEN_1,
HIDDEN_2)
self.loss = mnist.loss(logits, self.labels_placeholder)
self.train_op = mnist.training(self.loss, LEARNING_RATE)
self.summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.summary_writer = tf.summary.FileWriter(LOG_DIR, self.sess.graph)
self.sess.run(init)
data_set = self.data_sets.train
for step in xrange(MAX_STEPS):
images_feed, labels_feed = data_set.next_batch(BATCH_SIZE, False)
feed_dict = {
self.images_placeholder: images_feed,
self.labels_placeholder: labels_feed,
}
_, loss_value = self.sess.run([self.train_op, self.loss],
feed_dict=feed_dict)
if step % 100 == 0:
print("At step {}, loss = {}".format(step, loss_value))
summary_str = self.sess.run(self.summary, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, step)
self.summary_writer.flush()
if __name__ == '__main__':
if os.getenv('FAIRING_RUNTIME', None) is None:
import fairing
fairing.config.set_preprocessor('python', input_files=[__file__])
fairing.config.set_builder(name='docker', registry='<your-registry-name>',
base_image='tensorflow/tensorflow:1.13.1-py3')
fairing.config.run()
else:
remote_train = TensorflowModel()
remote_train.train()
| en | 0.834099 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Trains and Evaluates the MNIST network using a feed dictionary. # pylint: disable=redefined-builtin # HACK: Ideally we would want to have a unique subpath for each instance of the job, # but since we can't, we are instead appending HOSTNAME to the logdir #pylint:disable=unused-argument | 2.275605 | 2 |
_test_projects/unittests/index.py | oren0e/cob | 2 | 6624379 | # cob: type=views mountpoint=/index
from cob import route
from . import mymodels
from flask import jsonify
@route('/list_models')
def get_all_models():
return jsonify([{'id': p.id} for p in mymodels.Person.query.all()])
| # cob: type=views mountpoint=/index
from cob import route
from . import mymodels
from flask import jsonify
@route('/list_models')
def get_all_models():
return jsonify([{'id': p.id} for p in mymodels.Person.query.all()])
| en | 0.727207 | # cob: type=views mountpoint=/index | 2.099873 | 2 |
tasks.py | slekwati/dotfiles-1 | 0 | 6624380 | #!/usr/bin/env python3
from invoke import task
import sys
from typing import List
from deploy_nixos import DeployHost, DeployGroup
def deploy_nixos(hosts: List[DeployHost]) -> None:
"""
Deploy to all hosts in parallel
"""
g = DeployGroup(hosts)
def deploy(h: DeployHost) -> None:
h.run_local(
f"rsync --exclude=`git ls-files --exclude-standard -oi --directory` --exclude='.git/' -vaF --delete -e ssh . {h.user}@{h.host}:/etc/nixos",
)
flake_path = "/etc/nixos"
flake_attr = h.meta.get("flake_attr")
if flake_attr:
flake_path += "#" + flake_attr
target_host = h.meta.get("target_host", "localhost")
h.run(
f"nixos-rebuild switch --build-host localhost --target-host {target_host} --flake {flake_path}"
)
g.run_function(deploy)
@task
def deploy(c):
"""
Deploy to eve, eva and localhost
"""
deploy_nixos(
[
DeployHost("eve.r"),
DeployHost("localhost"),
DeployHost(
"eve.r",
forward_agent=True,
command_prefix="eva.r",
meta=dict(target_host="eva.r", flake_attr="eva"),
),
]
)
@task
def deploy_bernie(c):
"""
Deploy to bernie
"""
deploy_nixos([DeployHost("bernie.r")])
@task
def deploy_matchbox(c):
"""
Deploy to matchbox
"""
deploy_nixos(
[
DeployHost(
"localhost",
command_prefix="eva.r",
meta=dict(target_host="matchbox.r", flake_attr="matchbox"),
)
]
)
@task
def deploy_rock(c):
"""
Deploy to matchbox
"""
deploy_nixos(
[DeployHost("localhost", meta=dict(target_host="rock.r", flake_attr="rock"))]
)
@task
def deploy_dotfiles(c):
"""
Deploy to dotfiles
"""
hosts = [
DeployHost("localhost", meta=dict(flake_attr="desktop")),
DeployHost("eve.r", meta=dict(flake_attr="eve")),
]
g = DeployGroup(hosts)
def deploy_homemanager(host: DeployHost) -> None:
host.run(
f"""sudo -u joerg zsh <<'EOF'
cd $HOME
source $HOME/.zshrc
homeshick pull
homeshick symlink
homeshick cd dotfiles
nix build --out-link $HOME/.hm-activate ".#hmConfigurations.{host.meta["flake_attr"]}.activation-script"
$HOME/.hm-activate/activate
EOF"""
)
g.run_function(deploy_homemanager)
def wait_for_port(host: str, port: int, shutdown: bool = False) -> None:
import socket, time
while True:
try:
with socket.create_connection((host, port), timeout=1):
if shutdown:
time.sleep(1)
sys.stdout.write(".")
sys.stdout.flush()
else:
break
except OSError as ex:
if shutdown:
break
else:
time.sleep(0.01)
sys.stdout.write(".")
sys.stdout.flush()
@task
def reboot(c, hosts=""):
"""
Reboot hosts. example usage: fab --hosts clara.r,donna.r reboot
"""
deploy_hosts = [DeployHost(h) for h in hosts.split(",")]
for h in deploy_hosts:
g = DeployGroup([h])
g.run("reboot &")
print(f"Wait for {h.host} to shutdown", end="")
sys.stdout.flush()
wait_for_port(h.host, h.port, shutdown=True)
print("")
print(f"Wait for {h.host} to start", end="")
sys.stdout.flush()
wait_for_port(h.host, h.port)
print("")
@task
def cleanup_gcroots(c, hosts=""):
deploy_hosts = [DeployHost(h) for h in hosts.split(",")]
for h in deploy_hosts:
g = DeployGroup([h])
g.run("find /nix/var/nix/gcroots/auto -type s -delete")
g.run("systemctl restart nix-gc")
| #!/usr/bin/env python3
from invoke import task
import sys
from typing import List
from deploy_nixos import DeployHost, DeployGroup
def deploy_nixos(hosts: List[DeployHost]) -> None:
"""
Deploy to all hosts in parallel
"""
g = DeployGroup(hosts)
def deploy(h: DeployHost) -> None:
h.run_local(
f"rsync --exclude=`git ls-files --exclude-standard -oi --directory` --exclude='.git/' -vaF --delete -e ssh . {h.user}@{h.host}:/etc/nixos",
)
flake_path = "/etc/nixos"
flake_attr = h.meta.get("flake_attr")
if flake_attr:
flake_path += "#" + flake_attr
target_host = h.meta.get("target_host", "localhost")
h.run(
f"nixos-rebuild switch --build-host localhost --target-host {target_host} --flake {flake_path}"
)
g.run_function(deploy)
@task
def deploy(c):
"""
Deploy to eve, eva and localhost
"""
deploy_nixos(
[
DeployHost("eve.r"),
DeployHost("localhost"),
DeployHost(
"eve.r",
forward_agent=True,
command_prefix="eva.r",
meta=dict(target_host="eva.r", flake_attr="eva"),
),
]
)
@task
def deploy_bernie(c):
"""
Deploy to bernie
"""
deploy_nixos([DeployHost("bernie.r")])
@task
def deploy_matchbox(c):
"""
Deploy to matchbox
"""
deploy_nixos(
[
DeployHost(
"localhost",
command_prefix="eva.r",
meta=dict(target_host="matchbox.r", flake_attr="matchbox"),
)
]
)
@task
def deploy_rock(c):
"""
Deploy to matchbox
"""
deploy_nixos(
[DeployHost("localhost", meta=dict(target_host="rock.r", flake_attr="rock"))]
)
@task
def deploy_dotfiles(c):
"""
Deploy to dotfiles
"""
hosts = [
DeployHost("localhost", meta=dict(flake_attr="desktop")),
DeployHost("eve.r", meta=dict(flake_attr="eve")),
]
g = DeployGroup(hosts)
def deploy_homemanager(host: DeployHost) -> None:
host.run(
f"""sudo -u joerg zsh <<'EOF'
cd $HOME
source $HOME/.zshrc
homeshick pull
homeshick symlink
homeshick cd dotfiles
nix build --out-link $HOME/.hm-activate ".#hmConfigurations.{host.meta["flake_attr"]}.activation-script"
$HOME/.hm-activate/activate
EOF"""
)
g.run_function(deploy_homemanager)
def wait_for_port(host: str, port: int, shutdown: bool = False) -> None:
import socket, time
while True:
try:
with socket.create_connection((host, port), timeout=1):
if shutdown:
time.sleep(1)
sys.stdout.write(".")
sys.stdout.flush()
else:
break
except OSError as ex:
if shutdown:
break
else:
time.sleep(0.01)
sys.stdout.write(".")
sys.stdout.flush()
@task
def reboot(c, hosts=""):
"""
Reboot hosts. example usage: fab --hosts clara.r,donna.r reboot
"""
deploy_hosts = [DeployHost(h) for h in hosts.split(",")]
for h in deploy_hosts:
g = DeployGroup([h])
g.run("reboot &")
print(f"Wait for {h.host} to shutdown", end="")
sys.stdout.flush()
wait_for_port(h.host, h.port, shutdown=True)
print("")
print(f"Wait for {h.host} to start", end="")
sys.stdout.flush()
wait_for_port(h.host, h.port)
print("")
@task
def cleanup_gcroots(c, hosts=""):
deploy_hosts = [DeployHost(h) for h in hosts.split(",")]
for h in deploy_hosts:
g = DeployGroup([h])
g.run("find /nix/var/nix/gcroots/auto -type s -delete")
g.run("systemctl restart nix-gc")
| en | 0.568665 | #!/usr/bin/env python3 Deploy to all hosts in parallel Deploy to eve, eva and localhost Deploy to bernie Deploy to matchbox Deploy to matchbox Deploy to dotfiles sudo -u joerg zsh <<'EOF' cd $HOME source $HOME/.zshrc homeshick pull homeshick symlink homeshick cd dotfiles nix build --out-link $HOME/.hm-activate ".#hmConfigurations.{host.meta["flake_attr"]}.activation-script" $HOME/.hm-activate/activate EOF Reboot hosts. example usage: fab --hosts clara.r,donna.r reboot | 2.026087 | 2 |
ledgerwallet/proto/listApps_pb2.py | Vedrillan/ledgerctl | 26 | 6624381 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: listApps.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='listApps.proto',
package='listapps',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0elistApps.proto\x12\x08listapps\"F\n\x03\x41pp\x12\r\n\x05\x66lags\x18\x01 \x01(\r\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\x12\x14\n\x0chashCodeData\x18\x03 \x01(\x0c\x12\x0c\n\x04name\x18\x04 \x01(\t\"&\n\x07\x41ppList\x12\x1b\n\x04list\x18\x01 \x03(\x0b\x32\r.listapps.Appb\x06proto3')
)
_APP = _descriptor.Descriptor(
name='App',
full_name='listapps.App',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='flags', full_name='listapps.App.flags', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hash', full_name='listapps.App.hash', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hashCodeData', full_name='listapps.App.hashCodeData', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='listapps.App.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=98,
)
_APPLIST = _descriptor.Descriptor(
name='AppList',
full_name='listapps.AppList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='list', full_name='listapps.AppList.list', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=138,
)
_APPLIST.fields_by_name['list'].message_type = _APP
DESCRIPTOR.message_types_by_name['App'] = _APP
DESCRIPTOR.message_types_by_name['AppList'] = _APPLIST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
App = _reflection.GeneratedProtocolMessageType('App', (_message.Message,), dict(
DESCRIPTOR = _APP,
__module__ = 'listApps_pb2'
# @@protoc_insertion_point(class_scope:listapps.App)
))
_sym_db.RegisterMessage(App)
AppList = _reflection.GeneratedProtocolMessageType('AppList', (_message.Message,), dict(
DESCRIPTOR = _APPLIST,
__module__ = 'listApps_pb2'
# @@protoc_insertion_point(class_scope:listapps.AppList)
))
_sym_db.RegisterMessage(AppList)
# @@protoc_insertion_point(module_scope)
| # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: listApps.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='listApps.proto',
package='listapps',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0elistApps.proto\x12\x08listapps\"F\n\x03\x41pp\x12\r\n\x05\x66lags\x18\x01 \x01(\r\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\x12\x14\n\x0chashCodeData\x18\x03 \x01(\x0c\x12\x0c\n\x04name\x18\x04 \x01(\t\"&\n\x07\x41ppList\x12\x1b\n\x04list\x18\x01 \x03(\x0b\x32\r.listapps.Appb\x06proto3')
)
_APP = _descriptor.Descriptor(
name='App',
full_name='listapps.App',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='flags', full_name='listapps.App.flags', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hash', full_name='listapps.App.hash', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hashCodeData', full_name='listapps.App.hashCodeData', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='listapps.App.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=98,
)
_APPLIST = _descriptor.Descriptor(
name='AppList',
full_name='listapps.AppList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='list', full_name='listapps.AppList.list', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=138,
)
_APPLIST.fields_by_name['list'].message_type = _APP
DESCRIPTOR.message_types_by_name['App'] = _APP
DESCRIPTOR.message_types_by_name['AppList'] = _APPLIST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
App = _reflection.GeneratedProtocolMessageType('App', (_message.Message,), dict(
DESCRIPTOR = _APP,
__module__ = 'listApps_pb2'
# @@protoc_insertion_point(class_scope:listapps.App)
))
_sym_db.RegisterMessage(App)
AppList = _reflection.GeneratedProtocolMessageType('AppList', (_message.Message,), dict(
DESCRIPTOR = _APPLIST,
__module__ = 'listApps_pb2'
# @@protoc_insertion_point(class_scope:listapps.AppList)
))
_sym_db.RegisterMessage(AppList)
# @@protoc_insertion_point(module_scope)
| en | 0.505465 | # Generated by the protocol buffer compiler. DO NOT EDIT! # source: listApps.proto # @@protoc_insertion_point(imports) # @@protoc_insertion_point(class_scope:listapps.App) # @@protoc_insertion_point(class_scope:listapps.AppList) # @@protoc_insertion_point(module_scope) | 1.167686 | 1 |
SVS/model/archive/preprocessing/kaldi_data.py | PeterGuoRuc/SVS_system | 78 | 6624382 | """Copyright [2019] [<NAME>].
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright 2019 Hitachi, Ltd. (author: <NAME>)
# Licensed under the MIT license.
# This library provides utilities for kaldi-style data directory.
from __future__ import print_function
from functools import lru_cache
import io
import numpy as np
import os
import soundfile as sf
import subprocess
import sys
def load_segments(segments_file):
"""load_segments."""
# load segments file as array
if not os.path.exists(segments_file):
return None
return np.loadtxt(
segments_file,
dtype=[("utt", "object"), ("rec", "object"), ("st", "f"), ("et", "f")],
ndmin=1,
)
def load_segments_hash(segments_file):
"""load_segments_hash."""
ret = {}
if not os.path.exists(segments_file):
return None
for line in open(segments_file):
utt, rec, st, et = line.strip().split()
ret[utt] = (rec, float(st), float(et))
return ret
def load_segments_rechash(segments_file):
"""load_segments_rechash."""
ret = {}
if not os.path.exists(segments_file):
return None
for line in open(segments_file):
utt, rec, st, et = line.strip().split()
if rec not in ret:
ret[rec] = []
ret[rec].append({"utt": utt, "st": float(st), "et": float(et)})
return ret
def load_wav_scp(wav_scp_file):
"""load_wav_scp."""
# return dictionary { rec: wav_rxfilename }
lines = [line.strip().split(None, 1) for line in open(wav_scp_file)]
return {x[0]: x[1] for x in lines}
@lru_cache(maxsize=1)
def load_wav(wav_rxfilename, start=0, end=None):
"""Read audio file and return data in numpy.
float32 array."lru_cache" holds recently loaded audio so that can be called
many times on the same audio file.
OPTIMIZE: controls lru_cache size for random access,
considering memory size
"""
if wav_rxfilename.endswith("|"):
# input piped command
p = subprocess.Popen(wav_rxfilename[:-1], shell=True, stdout=subprocess.PIPE)
data, samplerate = sf.read(io.BytesIO(p.stdout.read()), dtype="float32")
# cannot seek
data = data[start:end]
elif wav_rxfilename == "-":
# stdin
data, samplerate = sf.read(sys.stdin, dtype="float32")
# cannot seek
data = data[start:end]
else:
# normal wav file
data, samplerate = sf.read(wav_rxfilename, start=start, stop=end)
return data, samplerate
def load_utt2spk(utt2spk_file):
"""load_utt2spk."""
# returns dictionary { uttid: spkid }
lines = [line.strip().split(None, 1) for line in open(utt2spk_file)]
return {x[0]: x[1] for x in lines}
def load_spk2utt(spk2utt_file):
"""load_spk2utt."""
# returns dictionary { spkid: list of uttids }
if not os.path.exists(spk2utt_file):
return None
lines = [line.strip().split() for line in open(spk2utt_file)]
return {x[0]: x[1:] for x in lines}
def load_reco2dur(reco2dur_file):
"""load_reco2dur."""
# returns dictionary { recid: duration }
if not os.path.exists(reco2dur_file):
return None
lines = [line.strip().split(None, 1) for line in open(reco2dur_file)]
return {x[0]: float(x[1]) for x in lines}
def process_wav(wav_rxfilename, process):
"""Return preprocessed wav_rxfilename.
Args:
wav_rxfilename: input
process: command which can be connected via pipe,
use stdin and stdout
Returns:
wav_rxfilename: output piped command
"""
if wav_rxfilename.endswith("|"):
# input piped command
return wav_rxfilename + process + "|"
else:
# stdin "-" or normal file
return "cat {} | {} |".format(wav_rxfilename, process)
class KaldiData:
"""KaldiData."""
def __init__(self, data_dir):
"""init."""
self.data_dir = data_dir
self.segments = load_segments_rechash(os.path.join(self.data_dir, "segments"))
self.utt2spk = load_utt2spk(os.path.join(self.data_dir, "utt2spk"))
self.wavs = load_wav_scp(os.path.join(self.data_dir, "wav.scp"))
self.reco2dur = load_reco2dur(os.path.join(self.data_dir, "reco2dur"))
self.spk2utt = load_spk2utt(os.path.join(self.data_dir, "spk2utt"))
def load_wav(self, recid, start=0, end=None):
"""load_wav."""
data, rate = load_wav(self.wavs[recid], start, end)
return data, rate
| """Copyright [2019] [<NAME>].
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright 2019 Hitachi, Ltd. (author: <NAME>)
# Licensed under the MIT license.
# This library provides utilities for kaldi-style data directory.
from __future__ import print_function
from functools import lru_cache
import io
import numpy as np
import os
import soundfile as sf
import subprocess
import sys
def load_segments(segments_file):
"""load_segments."""
# load segments file as array
if not os.path.exists(segments_file):
return None
return np.loadtxt(
segments_file,
dtype=[("utt", "object"), ("rec", "object"), ("st", "f"), ("et", "f")],
ndmin=1,
)
def load_segments_hash(segments_file):
"""load_segments_hash."""
ret = {}
if not os.path.exists(segments_file):
return None
for line in open(segments_file):
utt, rec, st, et = line.strip().split()
ret[utt] = (rec, float(st), float(et))
return ret
def load_segments_rechash(segments_file):
"""load_segments_rechash."""
ret = {}
if not os.path.exists(segments_file):
return None
for line in open(segments_file):
utt, rec, st, et = line.strip().split()
if rec not in ret:
ret[rec] = []
ret[rec].append({"utt": utt, "st": float(st), "et": float(et)})
return ret
def load_wav_scp(wav_scp_file):
"""load_wav_scp."""
# return dictionary { rec: wav_rxfilename }
lines = [line.strip().split(None, 1) for line in open(wav_scp_file)]
return {x[0]: x[1] for x in lines}
@lru_cache(maxsize=1)
def load_wav(wav_rxfilename, start=0, end=None):
"""Read audio file and return data in numpy.
float32 array."lru_cache" holds recently loaded audio so that can be called
many times on the same audio file.
OPTIMIZE: controls lru_cache size for random access,
considering memory size
"""
if wav_rxfilename.endswith("|"):
# input piped command
p = subprocess.Popen(wav_rxfilename[:-1], shell=True, stdout=subprocess.PIPE)
data, samplerate = sf.read(io.BytesIO(p.stdout.read()), dtype="float32")
# cannot seek
data = data[start:end]
elif wav_rxfilename == "-":
# stdin
data, samplerate = sf.read(sys.stdin, dtype="float32")
# cannot seek
data = data[start:end]
else:
# normal wav file
data, samplerate = sf.read(wav_rxfilename, start=start, stop=end)
return data, samplerate
def load_utt2spk(utt2spk_file):
"""load_utt2spk."""
# returns dictionary { uttid: spkid }
lines = [line.strip().split(None, 1) for line in open(utt2spk_file)]
return {x[0]: x[1] for x in lines}
def load_spk2utt(spk2utt_file):
"""load_spk2utt."""
# returns dictionary { spkid: list of uttids }
if not os.path.exists(spk2utt_file):
return None
lines = [line.strip().split() for line in open(spk2utt_file)]
return {x[0]: x[1:] for x in lines}
def load_reco2dur(reco2dur_file):
"""load_reco2dur."""
# returns dictionary { recid: duration }
if not os.path.exists(reco2dur_file):
return None
lines = [line.strip().split(None, 1) for line in open(reco2dur_file)]
return {x[0]: float(x[1]) for x in lines}
def process_wav(wav_rxfilename, process):
"""Return preprocessed wav_rxfilename.
Args:
wav_rxfilename: input
process: command which can be connected via pipe,
use stdin and stdout
Returns:
wav_rxfilename: output piped command
"""
if wav_rxfilename.endswith("|"):
# input piped command
return wav_rxfilename + process + "|"
else:
# stdin "-" or normal file
return "cat {} | {} |".format(wav_rxfilename, process)
class KaldiData:
"""KaldiData."""
def __init__(self, data_dir):
"""init."""
self.data_dir = data_dir
self.segments = load_segments_rechash(os.path.join(self.data_dir, "segments"))
self.utt2spk = load_utt2spk(os.path.join(self.data_dir, "utt2spk"))
self.wavs = load_wav_scp(os.path.join(self.data_dir, "wav.scp"))
self.reco2dur = load_reco2dur(os.path.join(self.data_dir, "reco2dur"))
self.spk2utt = load_spk2utt(os.path.join(self.data_dir, "spk2utt"))
def load_wav(self, recid, start=0, end=None):
"""load_wav."""
data, rate = load_wav(self.wavs[recid], start, end)
return data, rate
| en | 0.767329 | Copyright [2019] [<NAME>]. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Copyright 2019 Hitachi, Ltd. (author: <NAME>) # Licensed under the MIT license. # This library provides utilities for kaldi-style data directory. load_segments. # load segments file as array load_segments_hash. load_segments_rechash. load_wav_scp. # return dictionary { rec: wav_rxfilename } Read audio file and return data in numpy. float32 array."lru_cache" holds recently loaded audio so that can be called many times on the same audio file. OPTIMIZE: controls lru_cache size for random access, considering memory size # input piped command # cannot seek # stdin # cannot seek # normal wav file load_utt2spk. # returns dictionary { uttid: spkid } load_spk2utt. # returns dictionary { spkid: list of uttids } load_reco2dur. # returns dictionary { recid: duration } Return preprocessed wav_rxfilename. Args: wav_rxfilename: input process: command which can be connected via pipe, use stdin and stdout Returns: wav_rxfilename: output piped command # input piped command # stdin "-" or normal file KaldiData. init. load_wav. | 2.062063 | 2 |
H36M_BasePredModel.py | xcyan/eccv18_mtvae | 37 | 6624383 | """Base class for Human3.6M Keypoint Generation."""
import os
import numpy as np
import tensorflow as tf
import h36m_input as input_generator
import h36m_losses as losses
import utils
import model_utils
from preprocess.video_proc_utils import VideoProc
slim = tf.contrib.slim
def _get_data_from_provider(inputs, batch_size,
split_name, is_training=True, load_image=False):
"""Load data from input_genetator."""
input_tuple = [inputs['landmarks']]
if load_image:
input_tuple.append(inputs['images'])
tmp_outputs = tf.train.batch(input_tuple,
batch_size=batch_size,
num_threads=64,
capacity=batch_size*4,
name='batching_queues/%s' % split_name)
outputs = dict()
outputs['dataset_size'] = inputs['dataset_size']
if load_image:
outputs['landmarks'] = tmp_outputs[0]
outputs['images'] = tmp_outputs[1]
else:
outputs['landmarks'] = tmp_outputs
return outputs
class BasePredModel(object):
"""Defines Human3.6M motion generation model."""
def __init__(self, params):
self._params = params
def get_inputs_from_placeholder(self, dataset_size, batch_size):
params = self._params
init_length = params.max_input_length
assert params.max_input_length == params.min_input_length
sample_length = init_length + params.max_length
#
placeholder = dict()
placeholder['landmarks'] = tf.placeholder(
dtype=tf.float32, shape=(batch_size, sample_length, params.keypoint_dim, 2))
#
inputs = dict()
inputs.update(placeholder)
inputs['dataset_size'] = dataset_size
inputs['his_lens'] = tf.tile([init_length], [batch_size])
inputs['fut_lens'] = tf.tile([params.max_length], [batch_size])
inputs['parzen_radius'] = tf.placeholder(
dtype=tf.float32, shape=())
return inputs
def get_duplicate_inputs(self, dataset_dir, dataset_name, split_name,
batch_size, load_image):
"""Loads a batch of input from a single source."""
params = self._params
init_length = params.max_input_length
sample_length = init_length + params.max_length
assert params.max_input_length == params.min_input_length
with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
raw_inputs = input_generator.get_tfrecord(
dataset_dir, dataset_name, split_name,
shuffle=False, sample_length=sample_length)
inputs = _get_data_from_provider(
raw_inputs, batch_size, split_name,
is_training=True, load_image=load_image)
inputs['landmarks'] = tf.tile(
inputs['landmarks'][0:1], [batch_size, 1, 1, 1])
inputs['his_lens'] = tf.tile([init_length], [batch_size])
inputs['fut_lens'] = tf.tile([params.max_length], [batch_size])
assert (not load_image)
return inputs
def get_inputs(self, dataset_dir, dataset_name, split_name,
batch_size, is_training):
"""Loads given dataset and split."""
params = self._params
sample_length = params.max_input_length + params.max_length
with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
raw_inputs = input_generator.get_tfrecord(
dataset_dir, dataset_name, split_name,
shuffle=is_training, sample_length=sample_length)
inputs = _get_data_from_provider(
raw_inputs, batch_size, split_name, is_training)
#
if params.min_input_length < params.max_input_length:
inputs['his_lens'] = tf.random_uniform(
[batch_size], minval=params.min_input_length, maxval=params.max_input_length,
dtype=tf.int32)
else:
inputs['his_lens'] = tf.tile([params.max_input_length], [batch_size])
inputs['fut_lens'] = tf.tile([params.max_length], [batch_size])
return inputs
def preprocess(self, raw_inputs, is_training, load_image=False):
"""Data augmentation."""
params = self._params
shp = raw_inputs['landmarks'].get_shape().as_list()
quantity, max_length = shp[0], shp[1]
inputs = dict()
inputs['dataset_size'] = raw_inputs['dataset_size']
inputs['his_lens'] = raw_inputs['his_lens']
inputs['fut_lens'] = raw_inputs['fut_lens']
inputs['his_landmarks'], inputs['fut_landmarks'] = \
input_generator.split_pts_seq(
raw_inputs['landmarks'], inputs['his_lens'], inputs['fut_lens'],
params.max_input_length, params.max_length, is_training)
seq_idx = tf.range(0, quantity, dtype=tf.int32, name='range')
seq_idx = tf.stack([seq_idx, inputs['his_lens']], axis=1)
inputs['last_landmarks'] = tf.gather_nd(inputs['his_landmarks'], seq_idx)
inputs['last_landmarks'] = tf.reshape(
inputs['last_landmarks'], [quantity, 1, params.keypoint_dim, 2])
assert (not load_image)
return inputs
def save_html_page(self, save_dir, num_repeat, batch_size):
content = '<html><body><h1>Visualization</h1><table boder="1" style="width=100%">'
content = content + '<tr><td>ground truth</td><td>pred (multiple trajectories)</td>'
content = content + '</td>'
for t in xrange(num_repeat):
content = content + '\n<tr>'
content = content + '<td><img src=\"%02d_gt.gif\" style=\"width:255px;height:255px;\"></td>' % t
content += '<td><img src=\"%02d_pred.gif\" style=\"width:%dpx;height:255px;\"></td>' % (t, batch_size * 255)
content += '</tr>'
content += '</html>'
with open(os.path.join(save_dir, 'index.html'), 'w') as f:
f.write(content)
f.flush()
f.close()
def get_init_fn(self, scopes):
"""Initialize assignment operator function used while training."""
return model_utils.get_init_fn(scopes, self._params.init_model)
def get_train_op_for_scope(self, loss, optimizer, scopes):
train_op = model_utils.get_train_op_for_scope(
loss, optimizer, scopes, self._params.clip_gradient_norm)
return train_op
def get_regularization_loss(self, outputs, scopes):
params = self._params
reg_loss = losses.regularization_loss(scopes, params)
return reg_loss
def get_visualization_op(self, inputs, outputs, log_dir, counter, output_length=None):
params = self._params
batch_size = self._params.batch_size
max_length = self._params.max_length
img_size = self._params.img_size
ob_lens = inputs['his_lens']
gt_lens = inputs['fut_lens']
if output_length is None:
pred_lens = gt_lens
else:
pred_lens = tf.zeros_like(gt_lens, dtype=tf.int32) + output_length
ob_landmarks = inputs['his_landmarks'] * params.img_size
pred_landmarks = outputs['fut_landmarks'] * params.img_size
gt_landmarks = inputs['fut_landmarks'] * params.img_size
def write_grid(base_id, his_lens, gt_lens, pred_lens,
his_landmarks, gt_landmarks, pred_landmarks):
"""Python function."""
tmp_dir = os.path.join(log_dir, 'tmp')
utils.force_mkdir(tmp_dir)
video_proc_lib = VideoProc()
#############################
## Plot the history frames ##
#############################
his_video = np.zeros((his_lens[0], img_size, img_size, 3), dtype=np.float32)
for t in xrange(his_lens[0]):
his_video[t] = utils.visualize_landmarks(his_video[t], his_landmarks[0][t])
his_video[t] = utils.visualize_h36m_skeleton(his_video[t], his_landmarks[0][t])
his_video[t] = utils.visualize_boundary(his_video[t], colormap='green')
#################################
## Plot the gt (future) frames ##
#################################
gt_video = np.zeros((gt_lens[0], img_size, img_size, 3), dtype=np.float32)
for t in xrange(gt_lens[0]):
gt_video[t] = utils.visualize_landmarks(gt_video[t], gt_landmarks[0][t])
gt_video[t] = utils.visualize_h36m_skeleton(gt_video[t], gt_landmarks[0][t])
gt_video[t] = utils.visualize_boundary(gt_video[t], colormap='blue')
merged_video = np.concatenate((his_video, gt_video), axis=0)
video_proc_lib.save_img_seq_to_video(
merged_video, log_dir, '%02d_gt.gif' % base_id, frame_rate=7.5, codec=None, override=True)
###################################
## Plot the pred (future) frames ##
###################################
raw_gif_list = []
for i in xrange(batch_size):
print(base_id * batch_size + i)
pred_video = np.zeros((pred_lens[i], img_size, img_size, 3), dtype=np.float32)
for t in xrange(pred_lens[i]):
pred_video[t] = utils.visualize_landmarks(pred_video[t], pred_landmarks[i][t])
pred_video[t] = utils.visualize_h36m_skeleton(pred_video[t], pred_landmarks[i][t])
pred_video[t] = utils.visualize_boundary(pred_video[t], colormap='red')
merged_video = np.concatenate((his_video, pred_video), axis=0)
video_proc_lib.save_img_seq_to_video(
merged_video, log_dir, '%02d_pred%02d.gif' % (base_id, i),
frame_rate=7.5, codec=None, override=True)
raw_gif_list.append('%02d_pred%02d.gif' % (base_id, i))
video_proc_lib.merge_video_side_by_side(
log_dir, raw_gif_list, '%02d_pred.gif' % base_id,
override=True)
return 0
save_op = tf.py_func(
func=write_grid,
inp=[counter, ob_lens, gt_lens, pred_lens, ob_landmarks, gt_landmarks, pred_landmarks],
Tout=[tf.int64], name='write_grid')[0]
return save_op
| """Base class for Human3.6M Keypoint Generation."""
import os
import numpy as np
import tensorflow as tf
import h36m_input as input_generator
import h36m_losses as losses
import utils
import model_utils
from preprocess.video_proc_utils import VideoProc
slim = tf.contrib.slim
def _get_data_from_provider(inputs, batch_size,
split_name, is_training=True, load_image=False):
"""Load data from input_genetator."""
input_tuple = [inputs['landmarks']]
if load_image:
input_tuple.append(inputs['images'])
tmp_outputs = tf.train.batch(input_tuple,
batch_size=batch_size,
num_threads=64,
capacity=batch_size*4,
name='batching_queues/%s' % split_name)
outputs = dict()
outputs['dataset_size'] = inputs['dataset_size']
if load_image:
outputs['landmarks'] = tmp_outputs[0]
outputs['images'] = tmp_outputs[1]
else:
outputs['landmarks'] = tmp_outputs
return outputs
class BasePredModel(object):
"""Defines Human3.6M motion generation model."""
def __init__(self, params):
self._params = params
def get_inputs_from_placeholder(self, dataset_size, batch_size):
params = self._params
init_length = params.max_input_length
assert params.max_input_length == params.min_input_length
sample_length = init_length + params.max_length
#
placeholder = dict()
placeholder['landmarks'] = tf.placeholder(
dtype=tf.float32, shape=(batch_size, sample_length, params.keypoint_dim, 2))
#
inputs = dict()
inputs.update(placeholder)
inputs['dataset_size'] = dataset_size
inputs['his_lens'] = tf.tile([init_length], [batch_size])
inputs['fut_lens'] = tf.tile([params.max_length], [batch_size])
inputs['parzen_radius'] = tf.placeholder(
dtype=tf.float32, shape=())
return inputs
def get_duplicate_inputs(self, dataset_dir, dataset_name, split_name,
batch_size, load_image):
"""Loads a batch of input from a single source."""
params = self._params
init_length = params.max_input_length
sample_length = init_length + params.max_length
assert params.max_input_length == params.min_input_length
with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
raw_inputs = input_generator.get_tfrecord(
dataset_dir, dataset_name, split_name,
shuffle=False, sample_length=sample_length)
inputs = _get_data_from_provider(
raw_inputs, batch_size, split_name,
is_training=True, load_image=load_image)
inputs['landmarks'] = tf.tile(
inputs['landmarks'][0:1], [batch_size, 1, 1, 1])
inputs['his_lens'] = tf.tile([init_length], [batch_size])
inputs['fut_lens'] = tf.tile([params.max_length], [batch_size])
assert (not load_image)
return inputs
def get_inputs(self, dataset_dir, dataset_name, split_name,
batch_size, is_training):
"""Loads given dataset and split."""
params = self._params
sample_length = params.max_input_length + params.max_length
with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
raw_inputs = input_generator.get_tfrecord(
dataset_dir, dataset_name, split_name,
shuffle=is_training, sample_length=sample_length)
inputs = _get_data_from_provider(
raw_inputs, batch_size, split_name, is_training)
#
if params.min_input_length < params.max_input_length:
inputs['his_lens'] = tf.random_uniform(
[batch_size], minval=params.min_input_length, maxval=params.max_input_length,
dtype=tf.int32)
else:
inputs['his_lens'] = tf.tile([params.max_input_length], [batch_size])
inputs['fut_lens'] = tf.tile([params.max_length], [batch_size])
return inputs
def preprocess(self, raw_inputs, is_training, load_image=False):
"""Data augmentation."""
params = self._params
shp = raw_inputs['landmarks'].get_shape().as_list()
quantity, max_length = shp[0], shp[1]
inputs = dict()
inputs['dataset_size'] = raw_inputs['dataset_size']
inputs['his_lens'] = raw_inputs['his_lens']
inputs['fut_lens'] = raw_inputs['fut_lens']
inputs['his_landmarks'], inputs['fut_landmarks'] = \
input_generator.split_pts_seq(
raw_inputs['landmarks'], inputs['his_lens'], inputs['fut_lens'],
params.max_input_length, params.max_length, is_training)
seq_idx = tf.range(0, quantity, dtype=tf.int32, name='range')
seq_idx = tf.stack([seq_idx, inputs['his_lens']], axis=1)
inputs['last_landmarks'] = tf.gather_nd(inputs['his_landmarks'], seq_idx)
inputs['last_landmarks'] = tf.reshape(
inputs['last_landmarks'], [quantity, 1, params.keypoint_dim, 2])
assert (not load_image)
return inputs
def save_html_page(self, save_dir, num_repeat, batch_size):
content = '<html><body><h1>Visualization</h1><table boder="1" style="width=100%">'
content = content + '<tr><td>ground truth</td><td>pred (multiple trajectories)</td>'
content = content + '</td>'
for t in xrange(num_repeat):
content = content + '\n<tr>'
content = content + '<td><img src=\"%02d_gt.gif\" style=\"width:255px;height:255px;\"></td>' % t
content += '<td><img src=\"%02d_pred.gif\" style=\"width:%dpx;height:255px;\"></td>' % (t, batch_size * 255)
content += '</tr>'
content += '</html>'
with open(os.path.join(save_dir, 'index.html'), 'w') as f:
f.write(content)
f.flush()
f.close()
def get_init_fn(self, scopes):
"""Initialize assignment operator function used while training."""
return model_utils.get_init_fn(scopes, self._params.init_model)
def get_train_op_for_scope(self, loss, optimizer, scopes):
train_op = model_utils.get_train_op_for_scope(
loss, optimizer, scopes, self._params.clip_gradient_norm)
return train_op
def get_regularization_loss(self, outputs, scopes):
params = self._params
reg_loss = losses.regularization_loss(scopes, params)
return reg_loss
def get_visualization_op(self, inputs, outputs, log_dir, counter, output_length=None):
params = self._params
batch_size = self._params.batch_size
max_length = self._params.max_length
img_size = self._params.img_size
ob_lens = inputs['his_lens']
gt_lens = inputs['fut_lens']
if output_length is None:
pred_lens = gt_lens
else:
pred_lens = tf.zeros_like(gt_lens, dtype=tf.int32) + output_length
ob_landmarks = inputs['his_landmarks'] * params.img_size
pred_landmarks = outputs['fut_landmarks'] * params.img_size
gt_landmarks = inputs['fut_landmarks'] * params.img_size
def write_grid(base_id, his_lens, gt_lens, pred_lens,
his_landmarks, gt_landmarks, pred_landmarks):
"""Python function."""
tmp_dir = os.path.join(log_dir, 'tmp')
utils.force_mkdir(tmp_dir)
video_proc_lib = VideoProc()
#############################
## Plot the history frames ##
#############################
his_video = np.zeros((his_lens[0], img_size, img_size, 3), dtype=np.float32)
for t in xrange(his_lens[0]):
his_video[t] = utils.visualize_landmarks(his_video[t], his_landmarks[0][t])
his_video[t] = utils.visualize_h36m_skeleton(his_video[t], his_landmarks[0][t])
his_video[t] = utils.visualize_boundary(his_video[t], colormap='green')
#################################
## Plot the gt (future) frames ##
#################################
gt_video = np.zeros((gt_lens[0], img_size, img_size, 3), dtype=np.float32)
for t in xrange(gt_lens[0]):
gt_video[t] = utils.visualize_landmarks(gt_video[t], gt_landmarks[0][t])
gt_video[t] = utils.visualize_h36m_skeleton(gt_video[t], gt_landmarks[0][t])
gt_video[t] = utils.visualize_boundary(gt_video[t], colormap='blue')
merged_video = np.concatenate((his_video, gt_video), axis=0)
video_proc_lib.save_img_seq_to_video(
merged_video, log_dir, '%02d_gt.gif' % base_id, frame_rate=7.5, codec=None, override=True)
###################################
## Plot the pred (future) frames ##
###################################
raw_gif_list = []
for i in xrange(batch_size):
print(base_id * batch_size + i)
pred_video = np.zeros((pred_lens[i], img_size, img_size, 3), dtype=np.float32)
for t in xrange(pred_lens[i]):
pred_video[t] = utils.visualize_landmarks(pred_video[t], pred_landmarks[i][t])
pred_video[t] = utils.visualize_h36m_skeleton(pred_video[t], pred_landmarks[i][t])
pred_video[t] = utils.visualize_boundary(pred_video[t], colormap='red')
merged_video = np.concatenate((his_video, pred_video), axis=0)
video_proc_lib.save_img_seq_to_video(
merged_video, log_dir, '%02d_pred%02d.gif' % (base_id, i),
frame_rate=7.5, codec=None, override=True)
raw_gif_list.append('%02d_pred%02d.gif' % (base_id, i))
video_proc_lib.merge_video_side_by_side(
log_dir, raw_gif_list, '%02d_pred.gif' % base_id,
override=True)
return 0
save_op = tf.py_func(
func=write_grid,
inp=[counter, ob_lens, gt_lens, pred_lens, ob_landmarks, gt_landmarks, pred_landmarks],
Tout=[tf.int64], name='write_grid')[0]
return save_op
| de | 0.300657 | Base class for Human3.6M Keypoint Generation. Load data from input_genetator. Defines Human3.6M motion generation model. # # Loads a batch of input from a single source. Loads given dataset and split. # Data augmentation. Initialize assignment operator function used while training. Python function. ############################# ## Plot the history frames ## ############################# ################################# ## Plot the gt (future) frames ## ################################# ################################### ## Plot the pred (future) frames ## ################################### | 2.315866 | 2 |
examples/combat_sim/entities.py | LearnPythonAndMakeGames/ecs | 6 | 6624384 | #!/usr/bin/env python
import sys
from ecs import Entity
from components import Health, Damage
if sys.platform.startswith('2'):
range = xrange
def setup_entities(number_of_cowboys=100, number_of_aliens=100):
'''Sets up all the entities'''
# Generate entities and add components to them
cowboys = []
for cowboy in range(number_of_cowboys):
cowboy = Entity('cowboy-{:02}'.format(cowboy))
cowboy.health = Health(cowboy, current=51, max=53)
cowboy.damage = Damage(cowboy, normal=12, critical=17, critical_percent=19)
cowboy.humanoid = 'cowboy'
cowboys.append(cowboy)
aliens = []
for alien in range(number_of_aliens):
alien = Entity('alien-{:02}'.format(alien))
alien.health = Health(alien, current=102, max=102)
alien.damage = Damage(alien, normal=6, critical=8, critical_percent=20)
alien.humanoid = 'alien'
aliens.append(alien)
# Convenient dictionary splitting the two types of entities
entities = {'cowboys': cowboys, 'aliens': aliens}
return entities
if __name__ == '__main__':
from doctest import testmod
testmod()
| #!/usr/bin/env python
import sys
from ecs import Entity
from components import Health, Damage
if sys.platform.startswith('2'):
range = xrange
def setup_entities(number_of_cowboys=100, number_of_aliens=100):
'''Sets up all the entities'''
# Generate entities and add components to them
cowboys = []
for cowboy in range(number_of_cowboys):
cowboy = Entity('cowboy-{:02}'.format(cowboy))
cowboy.health = Health(cowboy, current=51, max=53)
cowboy.damage = Damage(cowboy, normal=12, critical=17, critical_percent=19)
cowboy.humanoid = 'cowboy'
cowboys.append(cowboy)
aliens = []
for alien in range(number_of_aliens):
alien = Entity('alien-{:02}'.format(alien))
alien.health = Health(alien, current=102, max=102)
alien.damage = Damage(alien, normal=6, critical=8, critical_percent=20)
alien.humanoid = 'alien'
aliens.append(alien)
# Convenient dictionary splitting the two types of entities
entities = {'cowboys': cowboys, 'aliens': aliens}
return entities
if __name__ == '__main__':
from doctest import testmod
testmod()
| en | 0.711847 | #!/usr/bin/env python Sets up all the entities # Generate entities and add components to them # Convenient dictionary splitting the two types of entities | 2.826732 | 3 |
3_6Dpose_estimator/train_YOLO/scripts/gt_single_object.py | Minipeps/betapose | 66 | 6624385 | <gh_stars>10-100
import os
import yaml
import random
from tqdm import tqdm
from shutil import copyfile
opj = os.path.join
NUM_SEQS = 15
CLASS_NAMES = ('ape', 'bvise', 'bowl', 'camera', 'can', 'cat', 'cup',
'driller', 'duck', 'eggbo', 'glue', 'holepuncher', 'iron', 'lamp', 'phone')
SIXD = '/home/data/sixd/hinterstoisser/test'
DARKNET = '/home/projects/detection/darknet/data_linemod_gt'
SEQS = ['%02d' % (i+1) for i in range(NUM_SEQS)]
GT_RATIO = 0.1
WIDTH = 640
HEIGHT = 480
if __name__ == '__main__':
tbar = tqdm(SEQS, ascii=True, ncols=80)
for idx, seq in enumerate(tbar):
CLASS_NAME = CLASS_NAMES[int(seq)-1]
SIXD_IMGS = opj(SIXD, seq, 'rgb')
DARKNET_IMGS = opj(DARKNET, seq, 'images')
SIXD_ANNO = opj(SIXD, seq, 'gt.yml')
with open(SIXD_ANNO) as f:
gt_info = yaml.load(f)
IMG_PATHS = []
if os.path.exists(DARKNET_IMGS) == False:
os.makedirs(DARKNET_IMGS, exist_ok=True)
for img in tqdm(os.listdir(SIXD_IMGS), ascii=True, ncols=80):
if random.random() < GT_RATIO:
# image
src_img = opj(SIXD_IMGS, img)
dst_img = opj(DARKNET_IMGS, img)
copyfile(src_img, dst_img)
IMG_PATHS.append(dst_img)
# anno
img_name = src_img.split('.')[-2].split('/')[-1]
bbox = gt_info[int(img_name)][0]['obj_bb']
bbox[0] = (bbox[0] + bbox[2] / 2) / WIDTH
bbox[1] = (bbox[1] + bbox[3] / 2) / HEIGHT
bbox[2] = bbox[2] / WIDTH
bbox[3] = bbox[3] / HEIGHT
dst_anno = dst_img.replace('.png', '.txt')
with open(dst_anno, 'a') as f:
f.write("0 %f %f %f %f\n" % (bbox[0], bbox[1], bbox[2], bbox[3]))
DARKNET_TRAIN_LIST = opj(DARKNET, seq, 'all.txt')
DARKNET_DATA_CFG = opj(DARKNET, seq, CLASS_NAME + '.data')
DARKNET_NAMES = opj(DARKNET, seq, CLASS_NAME + '.names')
with open(DARKNET_TRAIN_LIST, 'a') as f:
for img_path in IMG_PATHS:
f.write(img_path + '\n')
with open(DARKNET_DATA_CFG, 'a') as f:
f.write('classes = 1\n')
f.write('train = ' + opj('data_linemod_gt', seq, 'all.txt') + '\n')
f.write('val = ' + opj('data_linemod_gt', seq, 'all.txt') + '\n')
f.write('names = ' + opj('data_linemod_gt', seq, CLASS_NAME + '.names') + '\n')
f.write('backup = ' + opj('backup_linemod_gt', seq))
with open(DARKNET_NAMES, 'a') as f:
f.write(CLASS_NAME)
| import os
import yaml
import random
from tqdm import tqdm
from shutil import copyfile
opj = os.path.join
NUM_SEQS = 15
CLASS_NAMES = ('ape', 'bvise', 'bowl', 'camera', 'can', 'cat', 'cup',
'driller', 'duck', 'eggbo', 'glue', 'holepuncher', 'iron', 'lamp', 'phone')
SIXD = '/home/data/sixd/hinterstoisser/test'
DARKNET = '/home/projects/detection/darknet/data_linemod_gt'
SEQS = ['%02d' % (i+1) for i in range(NUM_SEQS)]
GT_RATIO = 0.1
WIDTH = 640
HEIGHT = 480
if __name__ == '__main__':
tbar = tqdm(SEQS, ascii=True, ncols=80)
for idx, seq in enumerate(tbar):
CLASS_NAME = CLASS_NAMES[int(seq)-1]
SIXD_IMGS = opj(SIXD, seq, 'rgb')
DARKNET_IMGS = opj(DARKNET, seq, 'images')
SIXD_ANNO = opj(SIXD, seq, 'gt.yml')
with open(SIXD_ANNO) as f:
gt_info = yaml.load(f)
IMG_PATHS = []
if os.path.exists(DARKNET_IMGS) == False:
os.makedirs(DARKNET_IMGS, exist_ok=True)
for img in tqdm(os.listdir(SIXD_IMGS), ascii=True, ncols=80):
if random.random() < GT_RATIO:
# image
src_img = opj(SIXD_IMGS, img)
dst_img = opj(DARKNET_IMGS, img)
copyfile(src_img, dst_img)
IMG_PATHS.append(dst_img)
# anno
img_name = src_img.split('.')[-2].split('/')[-1]
bbox = gt_info[int(img_name)][0]['obj_bb']
bbox[0] = (bbox[0] + bbox[2] / 2) / WIDTH
bbox[1] = (bbox[1] + bbox[3] / 2) / HEIGHT
bbox[2] = bbox[2] / WIDTH
bbox[3] = bbox[3] / HEIGHT
dst_anno = dst_img.replace('.png', '.txt')
with open(dst_anno, 'a') as f:
f.write("0 %f %f %f %f\n" % (bbox[0], bbox[1], bbox[2], bbox[3]))
DARKNET_TRAIN_LIST = opj(DARKNET, seq, 'all.txt')
DARKNET_DATA_CFG = opj(DARKNET, seq, CLASS_NAME + '.data')
DARKNET_NAMES = opj(DARKNET, seq, CLASS_NAME + '.names')
with open(DARKNET_TRAIN_LIST, 'a') as f:
for img_path in IMG_PATHS:
f.write(img_path + '\n')
with open(DARKNET_DATA_CFG, 'a') as f:
f.write('classes = 1\n')
f.write('train = ' + opj('data_linemod_gt', seq, 'all.txt') + '\n')
f.write('val = ' + opj('data_linemod_gt', seq, 'all.txt') + '\n')
f.write('names = ' + opj('data_linemod_gt', seq, CLASS_NAME + '.names') + '\n')
f.write('backup = ' + opj('backup_linemod_gt', seq))
with open(DARKNET_NAMES, 'a') as f:
f.write(CLASS_NAME) | la | 0.810422 | # image # anno | 2.309425 | 2 |
pay-api/src/pay_api/services/statement_settings.py | thorwolpert/sbc-pay | 4 | 6624386 | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service class to control all the operations related to statements."""
from datetime import date, datetime, timedelta
from flask import current_app
from pay_api.models import StatementSettings as StatementSettingsModel
from pay_api.models import StatementSettingsSchema as StatementSettingsModelSchema
from pay_api.models.payment_account import PaymentAccount as PaymentAccountModel
from pay_api.utils.enums import StatementFrequency
from pay_api.utils.util import current_local_time, get_first_and_last_dates_of_month, get_week_start_and_end_date
class StatementSettings: # pylint:disable=too-many-instance-attributes
"""Service to manage statement related operations."""
def __init__(self):
"""Return a Statement Service Object."""
self.__dao = None
self._id: int = None
self._frequency = None
self._payment_account_id = None
self._from_date = None
self._to_date = None
@property
def _dao(self):
if not self.__dao:
self.__dao = StatementSettingsModel()
return self.__dao
@_dao.setter
def _dao(self, value):
self.__dao = value
self.id: int = self._dao.id
self.frequency: str = self._dao.frequency
self.payment_account_id: int = self._dao.payment_account_id
self.from_date: datetime = self._dao.from_date
self.to_date: datetime = self._dao.to_date
@property
def frequency(self):
"""Return the for the statement settings."""
return self._frequency
@frequency.setter
def frequency(self, value: int):
"""Set the frequency for the statement settings."""
self._frequency = value
self._dao.frequency = value
@property
def id(self):
"""Return the _id."""
return self._id
@id.setter
def id(self, value: int):
"""Set the id."""
self._id = value
self._dao.id = value
@property
def from_date(self):
"""Return the from_date of the statement setting."""
return self._from_date
@from_date.setter
def from_date(self, value: date):
"""Set the from for the statement setting."""
self._from_date = value
self._dao.from_date = value
@property
def payment_account_id(self):
"""Return the account_id."""
return self._payment_account_id
@payment_account_id.setter
def payment_account_id(self, value: str):
"""Set the account_id."""
self._payment_account_id = value
self._dao.payment_account_id = value
@property
def to_date(self):
"""Return the to_date of the statement setting."""
return self._to_date
@to_date.setter
def to_date(self, value: date):
"""Set the to_date for the statement setting."""
self._to_date = value
self._dao.to_date = value
def asdict(self):
"""Return the invoice as a python dict."""
statements_settings_schema = StatementSettingsModelSchema()
d = statements_settings_schema.dump(self._dao)
return d
@staticmethod
def find_by_account_id(auth_account_id: str):
"""Find statements by account id."""
current_app.logger.debug(f'<find_by_account_id {auth_account_id}')
statements_settings = StatementSettingsModel.find_latest_settings(auth_account_id)
if statements_settings is None:
return None
all_settings = []
# iterate and find the next start date to all frequencies
for freq in StatementFrequency:
max_frequency = StatementSettings._find_longest_frequency(statements_settings.frequency, freq.value)
last_date = StatementSettings._get_end_of(max_frequency)
all_settings.append({
'frequency': freq.name,
'start_date': last_date + timedelta(days=1)
})
statements_settings_schema = StatementSettingsModelSchema()
settings_details = {
'current_frequency': statements_settings_schema.dump(statements_settings),
'frequencies': all_settings
}
current_app.logger.debug('>statements_find_by_account_id')
return settings_details
@staticmethod
def update_statement_settings(auth_account_id: str, frequency: str):
"""Update statements by account id.
rather than checking frequency changes by individual if , it just applies the following logic.
find the maximum frequency of current one and new one ;and calculate the date which it will keep on going.
"""
statements_settings_schema = StatementSettingsModelSchema()
today = datetime.today()
current_statements_settings = StatementSettingsModel.find_active_settings(auth_account_id, today)
payment_account: PaymentAccountModel = PaymentAccountModel.find_by_auth_account_id(auth_account_id)
if current_statements_settings is None:
# no frequency yet.first time accessing the statement settings.so create a new record
statements_settings = StatementSettingsModel(frequency=frequency,
payment_account_id=payment_account.id)
statements_settings.save()
return statements_settings_schema.dump(statements_settings)
# check if the latest one is the active one.. if not , inactivate the latest one.
# this handles the case of quickly changing of frequencies..
# changed from daily to monthly but then changed back to weekly..
# the monthly didn't get applied ,but even before that its being changed to weekly
future_statements_settings = StatementSettingsModel.find_latest_settings(auth_account_id)
if future_statements_settings is not None and current_statements_settings.id != future_statements_settings.id:
future_statements_settings.to_date = today
future_statements_settings.save()
max_frequency = StatementSettings._find_longest_frequency(current_statements_settings.frequency, frequency)
last_date = StatementSettings._get_end_of(max_frequency)
current_statements_settings.to_date = last_date
current_statements_settings.save()
new_statements_settings = StatementSettingsModel(frequency=frequency,
payment_account_id=payment_account.id,
from_date=last_date + timedelta(days=1))
new_statements_settings.save()
return statements_settings_schema.dump(new_statements_settings)
@staticmethod
def _find_longest_frequency(old_frequency, new_frequency):
"""Return the longest frequency in the passed inputs."""
freq_list = [StatementFrequency.DAILY.value,
StatementFrequency.WEEKLY.value,
StatementFrequency.MONTHLY.value]
max_index = max(freq_list.index(old_frequency), freq_list.index(new_frequency))
return freq_list[max_index]
@staticmethod
def _get_end_of(frequency: StatementFrequency):
"""Return the end of either week or month."""
today = datetime.today()
end_date = current_local_time()
if frequency == StatementFrequency.WEEKLY.value:
end_date = get_week_start_and_end_date()[1]
if frequency == StatementFrequency.MONTHLY.value:
end_date = get_first_and_last_dates_of_month(today.month, today.year)[1]
return end_date
| # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service class to control all the operations related to statements."""
from datetime import date, datetime, timedelta
from flask import current_app
from pay_api.models import StatementSettings as StatementSettingsModel
from pay_api.models import StatementSettingsSchema as StatementSettingsModelSchema
from pay_api.models.payment_account import PaymentAccount as PaymentAccountModel
from pay_api.utils.enums import StatementFrequency
from pay_api.utils.util import current_local_time, get_first_and_last_dates_of_month, get_week_start_and_end_date
class StatementSettings: # pylint:disable=too-many-instance-attributes
"""Service to manage statement related operations."""
def __init__(self):
"""Return a Statement Service Object."""
self.__dao = None
self._id: int = None
self._frequency = None
self._payment_account_id = None
self._from_date = None
self._to_date = None
@property
def _dao(self):
if not self.__dao:
self.__dao = StatementSettingsModel()
return self.__dao
@_dao.setter
def _dao(self, value):
self.__dao = value
self.id: int = self._dao.id
self.frequency: str = self._dao.frequency
self.payment_account_id: int = self._dao.payment_account_id
self.from_date: datetime = self._dao.from_date
self.to_date: datetime = self._dao.to_date
@property
def frequency(self):
"""Return the for the statement settings."""
return self._frequency
@frequency.setter
def frequency(self, value: int):
"""Set the frequency for the statement settings."""
self._frequency = value
self._dao.frequency = value
@property
def id(self):
"""Return the _id."""
return self._id
@id.setter
def id(self, value: int):
"""Set the id."""
self._id = value
self._dao.id = value
@property
def from_date(self):
"""Return the from_date of the statement setting."""
return self._from_date
@from_date.setter
def from_date(self, value: date):
"""Set the from for the statement setting."""
self._from_date = value
self._dao.from_date = value
@property
def payment_account_id(self):
"""Return the account_id."""
return self._payment_account_id
@payment_account_id.setter
def payment_account_id(self, value: str):
"""Set the account_id."""
self._payment_account_id = value
self._dao.payment_account_id = value
@property
def to_date(self):
"""Return the to_date of the statement setting."""
return self._to_date
@to_date.setter
def to_date(self, value: date):
"""Set the to_date for the statement setting."""
self._to_date = value
self._dao.to_date = value
def asdict(self):
"""Return the invoice as a python dict."""
statements_settings_schema = StatementSettingsModelSchema()
d = statements_settings_schema.dump(self._dao)
return d
@staticmethod
def find_by_account_id(auth_account_id: str):
"""Find statements by account id."""
current_app.logger.debug(f'<find_by_account_id {auth_account_id}')
statements_settings = StatementSettingsModel.find_latest_settings(auth_account_id)
if statements_settings is None:
return None
all_settings = []
# iterate and find the next start date to all frequencies
for freq in StatementFrequency:
max_frequency = StatementSettings._find_longest_frequency(statements_settings.frequency, freq.value)
last_date = StatementSettings._get_end_of(max_frequency)
all_settings.append({
'frequency': freq.name,
'start_date': last_date + timedelta(days=1)
})
statements_settings_schema = StatementSettingsModelSchema()
settings_details = {
'current_frequency': statements_settings_schema.dump(statements_settings),
'frequencies': all_settings
}
current_app.logger.debug('>statements_find_by_account_id')
return settings_details
@staticmethod
def update_statement_settings(auth_account_id: str, frequency: str):
"""Update statements by account id.
rather than checking frequency changes by individual if , it just applies the following logic.
find the maximum frequency of current one and new one ;and calculate the date which it will keep on going.
"""
statements_settings_schema = StatementSettingsModelSchema()
today = datetime.today()
current_statements_settings = StatementSettingsModel.find_active_settings(auth_account_id, today)
payment_account: PaymentAccountModel = PaymentAccountModel.find_by_auth_account_id(auth_account_id)
if current_statements_settings is None:
# no frequency yet.first time accessing the statement settings.so create a new record
statements_settings = StatementSettingsModel(frequency=frequency,
payment_account_id=payment_account.id)
statements_settings.save()
return statements_settings_schema.dump(statements_settings)
# check if the latest one is the active one.. if not , inactivate the latest one.
# this handles the case of quickly changing of frequencies..
# changed from daily to monthly but then changed back to weekly..
# the monthly didn't get applied ,but even before that its being changed to weekly
future_statements_settings = StatementSettingsModel.find_latest_settings(auth_account_id)
if future_statements_settings is not None and current_statements_settings.id != future_statements_settings.id:
future_statements_settings.to_date = today
future_statements_settings.save()
max_frequency = StatementSettings._find_longest_frequency(current_statements_settings.frequency, frequency)
last_date = StatementSettings._get_end_of(max_frequency)
current_statements_settings.to_date = last_date
current_statements_settings.save()
new_statements_settings = StatementSettingsModel(frequency=frequency,
payment_account_id=payment_account.id,
from_date=last_date + timedelta(days=1))
new_statements_settings.save()
return statements_settings_schema.dump(new_statements_settings)
@staticmethod
def _find_longest_frequency(old_frequency, new_frequency):
"""Return the longest frequency in the passed inputs."""
freq_list = [StatementFrequency.DAILY.value,
StatementFrequency.WEEKLY.value,
StatementFrequency.MONTHLY.value]
max_index = max(freq_list.index(old_frequency), freq_list.index(new_frequency))
return freq_list[max_index]
@staticmethod
def _get_end_of(frequency: StatementFrequency):
"""Return the end of either week or month."""
today = datetime.today()
end_date = current_local_time()
if frequency == StatementFrequency.WEEKLY.value:
end_date = get_week_start_and_end_date()[1]
if frequency == StatementFrequency.MONTHLY.value:
end_date = get_first_and_last_dates_of_month(today.month, today.year)[1]
return end_date
| en | 0.876177 | # Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Service class to control all the operations related to statements. # pylint:disable=too-many-instance-attributes Service to manage statement related operations. Return a Statement Service Object. Return the for the statement settings. Set the frequency for the statement settings. Return the _id. Set the id. Return the from_date of the statement setting. Set the from for the statement setting. Return the account_id. Set the account_id. Return the to_date of the statement setting. Set the to_date for the statement setting. Return the invoice as a python dict. Find statements by account id. # iterate and find the next start date to all frequencies Update statements by account id. rather than checking frequency changes by individual if , it just applies the following logic. find the maximum frequency of current one and new one ;and calculate the date which it will keep on going. # no frequency yet.first time accessing the statement settings.so create a new record # check if the latest one is the active one.. if not , inactivate the latest one. # this handles the case of quickly changing of frequencies.. # changed from daily to monthly but then changed back to weekly.. # the monthly didn't get applied ,but even before that its being changed to weekly Return the longest frequency in the passed inputs. Return the end of either week or month. | 1.911011 | 2 |
mesh.py | MarcoCiaramella/Voxelize | 1 | 6624387 | from voxel import Voxel
from color import Color
class Mesh:
PLY_HEADER_TOP = "ply\nformat ascii 1.0\nelement vertex %d\n"
PLY_HEADER_VERTEX = "property float x\nproperty float y\nproperty float z\n"
PLY_HEADER_NORMAL = "property float nx\nproperty float ny\nproperty float nz\n"
PLY_HEADER_COLOR = "property uchar red\nproperty uchar green\nproperty uchar blue\n"
PLY_HEADER_BOTTOM = "element face %d\nproperty list uchar uint vertex_indices\nend_header\n"
def __init__(self,images_manager):
self.images_manager = images_manager
self.size_x = images_manager.size_x
self.size_y = images_manager.size_y
self.size_z = images_manager.size_z
self.voxels = []
self.__build()
self.__check()
def __build(self):
for x in range(self.size_x):
arr_x = []
self.voxels.append(arr_x)
for y in range(self.size_y):
arr_y = []
arr_x.append(arr_y)
for z in range(self.size_z):
arr_y.append(Voxel(x,y,z))
self.num_voxels = 0
for x in range(self.size_x):
for y in range(self.size_y):
for z in range(self.size_z):
if self.images_manager.not_alpha(x,y,z):
self.voxels[x][y][z].on = True
self.num_voxels += 1
def coloring(self):
for x in range(self.size_x):
for y in range(self.size_y):
for z in range(self.size_z):
if self.voxels[x][y][z].on:
self.voxels[x][y][z].coloring(self.images_manager.get_colors(x,y,z))
def export_ply(self,name):
content = "%s%s%s%s%s"%(
Mesh.PLY_HEADER_TOP%(self.num_voxels*Voxel.NUM_VERTICES),
Mesh.PLY_HEADER_VERTEX,
Mesh.PLY_HEADER_NORMAL,
Mesh.PLY_HEADER_COLOR,
Mesh.PLY_HEADER_BOTTOM%(self.num_voxels*Voxel.NUM_FACES)
)
with open(name,'w') as f:
f.write(content)
content = ''
for x in range(self.size_x):
for y in range(self.size_y):
for z in range(self.size_z):
if self.voxels[x][y][z].on:
i = 0
while i < Voxel.NUM_VERTICES:
v = i*3
n = i*3
c = i*4
vx = self.voxels[x][y][z].vertices[v]
vy = self.voxels[x][y][z].vertices[v+1]
vz = self.voxels[x][y][z].vertices[v+2]
nx = self.voxels[x][y][z].normals[n]
ny = self.voxels[x][y][z].normals[n+1]
nz = self.voxels[x][y][z].normals[n+2]
r = self.voxels[x][y][z].colors[c]
g = self.voxels[x][y][z].colors[c+1]
b = self.voxels[x][y][z].colors[c+2]
i += 1
content += '%f %f %f %f %f %f %d %d %d\n'%(vx,vy,vz,nx,ny,nz,r,g,b)
with open(name,'a') as f:
f.write(content)
content = ''
v = 0
for x in range(self.size_x):
for y in range(self.size_y):
for z in range(self.size_z):
if self.voxels[x][y][z].on:
i = 0
while i < Voxel.NUM_FACES:
i += 1
content += '4 %d %d %d %d\n'%(v,v+1,v+2,v+3)
v += 4
with open(name,'a') as f:
f.write(content)
def __check(self):
print("num voxels: "+str(self.num_voxels))
| from voxel import Voxel
from color import Color
class Mesh:
PLY_HEADER_TOP = "ply\nformat ascii 1.0\nelement vertex %d\n"
PLY_HEADER_VERTEX = "property float x\nproperty float y\nproperty float z\n"
PLY_HEADER_NORMAL = "property float nx\nproperty float ny\nproperty float nz\n"
PLY_HEADER_COLOR = "property uchar red\nproperty uchar green\nproperty uchar blue\n"
PLY_HEADER_BOTTOM = "element face %d\nproperty list uchar uint vertex_indices\nend_header\n"
def __init__(self,images_manager):
self.images_manager = images_manager
self.size_x = images_manager.size_x
self.size_y = images_manager.size_y
self.size_z = images_manager.size_z
self.voxels = []
self.__build()
self.__check()
def __build(self):
for x in range(self.size_x):
arr_x = []
self.voxels.append(arr_x)
for y in range(self.size_y):
arr_y = []
arr_x.append(arr_y)
for z in range(self.size_z):
arr_y.append(Voxel(x,y,z))
self.num_voxels = 0
for x in range(self.size_x):
for y in range(self.size_y):
for z in range(self.size_z):
if self.images_manager.not_alpha(x,y,z):
self.voxels[x][y][z].on = True
self.num_voxels += 1
def coloring(self):
for x in range(self.size_x):
for y in range(self.size_y):
for z in range(self.size_z):
if self.voxels[x][y][z].on:
self.voxels[x][y][z].coloring(self.images_manager.get_colors(x,y,z))
def export_ply(self,name):
content = "%s%s%s%s%s"%(
Mesh.PLY_HEADER_TOP%(self.num_voxels*Voxel.NUM_VERTICES),
Mesh.PLY_HEADER_VERTEX,
Mesh.PLY_HEADER_NORMAL,
Mesh.PLY_HEADER_COLOR,
Mesh.PLY_HEADER_BOTTOM%(self.num_voxels*Voxel.NUM_FACES)
)
with open(name,'w') as f:
f.write(content)
content = ''
for x in range(self.size_x):
for y in range(self.size_y):
for z in range(self.size_z):
if self.voxels[x][y][z].on:
i = 0
while i < Voxel.NUM_VERTICES:
v = i*3
n = i*3
c = i*4
vx = self.voxels[x][y][z].vertices[v]
vy = self.voxels[x][y][z].vertices[v+1]
vz = self.voxels[x][y][z].vertices[v+2]
nx = self.voxels[x][y][z].normals[n]
ny = self.voxels[x][y][z].normals[n+1]
nz = self.voxels[x][y][z].normals[n+2]
r = self.voxels[x][y][z].colors[c]
g = self.voxels[x][y][z].colors[c+1]
b = self.voxels[x][y][z].colors[c+2]
i += 1
content += '%f %f %f %f %f %f %d %d %d\n'%(vx,vy,vz,nx,ny,nz,r,g,b)
with open(name,'a') as f:
f.write(content)
content = ''
v = 0
for x in range(self.size_x):
for y in range(self.size_y):
for z in range(self.size_z):
if self.voxels[x][y][z].on:
i = 0
while i < Voxel.NUM_FACES:
i += 1
content += '4 %d %d %d %d\n'%(v,v+1,v+2,v+3)
v += 4
with open(name,'a') as f:
f.write(content)
def __check(self):
print("num voxels: "+str(self.num_voxels))
| none | 1 | 2.777761 | 3 | |
DataStructure and algorithms/LinkedList/reverseLL.py | Rajatkhatri7/Project-Milap | 0 | 6624388 | <reponame>Rajatkhatri7/Project-Milap
#!/usr/bin/env python3
class Node:
def __init__(self,data):
self.data = data
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def appending(self,data):
if self.head is None:
new_node = Node(data)
self.head = new_node
new_node.next = None
return
else:
new_node = Node(data)
last_node = self.head
while last_node.next:
last_node = last_node.next
last_node.next = new_node
new_node.next = None
def print_list(self):
ptr = self.head
while ptr:
print(ptr.data)
ptr = ptr.next
def reverse_iteratiion(self): #swapping via iteration
prev = None
current = self.head
while current !=None:
nxt = current.next
current.next = prev
prev = current
current = nxt
self.head = prev
def recursive_reverse(self):
def _reverse_reverse(current,prev):
if not current: #base case when current reach the end of linked list ,current is None
return prev
nxt = current.next
current.next = prev
prev = current
current = nxt
return _reverse_reverse(current,prev)
self.head = _reverse_reverse(current = self.head,prev = None)
llist = LinkedList()
llist.appending("A")
llist.appending("B")
llist.appending("C")
llist.appending("D")
print("Original List")
llist.print_list()
print("reversing by recursion: ")
llist.recursive_reverse()
llist.print_list()
print("reverse the list via iteration: ")
llist.reverse_iteratiion()
#see in the results swapping happens
llist.print_list() | #!/usr/bin/env python3
class Node:
def __init__(self,data):
self.data = data
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def appending(self,data):
if self.head is None:
new_node = Node(data)
self.head = new_node
new_node.next = None
return
else:
new_node = Node(data)
last_node = self.head
while last_node.next:
last_node = last_node.next
last_node.next = new_node
new_node.next = None
def print_list(self):
ptr = self.head
while ptr:
print(ptr.data)
ptr = ptr.next
def reverse_iteratiion(self): #swapping via iteration
prev = None
current = self.head
while current !=None:
nxt = current.next
current.next = prev
prev = current
current = nxt
self.head = prev
def recursive_reverse(self):
def _reverse_reverse(current,prev):
if not current: #base case when current reach the end of linked list ,current is None
return prev
nxt = current.next
current.next = prev
prev = current
current = nxt
return _reverse_reverse(current,prev)
self.head = _reverse_reverse(current = self.head,prev = None)
llist = LinkedList()
llist.appending("A")
llist.appending("B")
llist.appending("C")
llist.appending("D")
print("Original List")
llist.print_list()
print("reversing by recursion: ")
llist.recursive_reverse()
llist.print_list()
print("reverse the list via iteration: ")
llist.reverse_iteratiion()
#see in the results swapping happens
llist.print_list() | en | 0.618715 | #!/usr/bin/env python3 #swapping via iteration #base case when current reach the end of linked list ,current is None #see in the results swapping happens | 4.247748 | 4 |
roialign/roi_align/crop_and_resize.py | chan4899/pytorch-mask-rcnn | 0 | 6624389 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from . import crop_and_resize as _backend
class CropAndResizeFunction(Function):
def __init__(self, crop_height, crop_width, extrapolation_value=0):
self.crop_height = crop_height
self.crop_width = crop_width
self.extrapolation_value = extrapolation_value
def forward(self, image, boxes, box_ind):
crops = torch.zeros_like(image)
if image.is_cuda:
_backend.crop_and_resize_gpu_forward(
image, boxes, box_ind,
self.extrapolation_value, self.crop_height, self.crop_width, crops)
else:
_backend.crop_and_resize_forward(
image, boxes, box_ind,
self.extrapolation_value, self.crop_height, self.crop_width, crops)
# save for backward
self.im_size = image.size()
self.save_for_backward(boxes, box_ind)
return crops
def backward(self, grad_outputs):
boxes, box_ind = self.saved_tensors
grad_outputs = grad_outputs.contiguous()
grad_image = torch.zeros_like(grad_outputs).resize_(*self.im_size)
if grad_outputs.is_cuda:
_backend.crop_and_resize_gpu_backward(
grad_outputs, boxes, box_ind, grad_image
)
else:
_backend.crop_and_resize_backward(
grad_outputs, boxes, box_ind, grad_image
)
return grad_image, None, None
class CropAndResize(nn.Module):
"""
Crop and resize ported from tensorflow
See more details on https://www.tensorflow.org/api_docs/python/tf/image/crop_and_resize
"""
def __init__(self, crop_height, crop_width, extrapolation_value=0):
super(CropAndResize, self).__init__()
self.crop_height = crop_height
self.crop_width = crop_width
self.extrapolation_value = extrapolation_value
def forward(self, image, boxes, box_ind):
return CropAndResizeFunction(self.crop_height, self.crop_width, self.extrapolation_value)(image, boxes, box_ind)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from . import crop_and_resize as _backend
class CropAndResizeFunction(Function):
def __init__(self, crop_height, crop_width, extrapolation_value=0):
self.crop_height = crop_height
self.crop_width = crop_width
self.extrapolation_value = extrapolation_value
def forward(self, image, boxes, box_ind):
crops = torch.zeros_like(image)
if image.is_cuda:
_backend.crop_and_resize_gpu_forward(
image, boxes, box_ind,
self.extrapolation_value, self.crop_height, self.crop_width, crops)
else:
_backend.crop_and_resize_forward(
image, boxes, box_ind,
self.extrapolation_value, self.crop_height, self.crop_width, crops)
# save for backward
self.im_size = image.size()
self.save_for_backward(boxes, box_ind)
return crops
def backward(self, grad_outputs):
boxes, box_ind = self.saved_tensors
grad_outputs = grad_outputs.contiguous()
grad_image = torch.zeros_like(grad_outputs).resize_(*self.im_size)
if grad_outputs.is_cuda:
_backend.crop_and_resize_gpu_backward(
grad_outputs, boxes, box_ind, grad_image
)
else:
_backend.crop_and_resize_backward(
grad_outputs, boxes, box_ind, grad_image
)
return grad_image, None, None
class CropAndResize(nn.Module):
"""
Crop and resize ported from tensorflow
See more details on https://www.tensorflow.org/api_docs/python/tf/image/crop_and_resize
"""
def __init__(self, crop_height, crop_width, extrapolation_value=0):
super(CropAndResize, self).__init__()
self.crop_height = crop_height
self.crop_width = crop_width
self.extrapolation_value = extrapolation_value
def forward(self, image, boxes, box_ind):
return CropAndResizeFunction(self.crop_height, self.crop_width, self.extrapolation_value)(image, boxes, box_ind)
| en | 0.787265 | # save for backward Crop and resize ported from tensorflow See more details on https://www.tensorflow.org/api_docs/python/tf/image/crop_and_resize | 2.655502 | 3 |
var/spack/repos/builtin/packages/openblas/package.py | pbrady/spack | 348 | 6624390 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
from spack import *
from spack.package_test import compare_output_file, compile_c_and_execute
class Openblas(MakefilePackage):
"""OpenBLAS: An optimized BLAS library"""
homepage = 'https://www.openblas.net'
url = 'https://github.com/xianyi/OpenBLAS/archive/v0.2.19.tar.gz'
git = 'https://github.com/xianyi/OpenBLAS.git'
version('develop', branch='develop')
version('0.3.19', sha256='947f51bfe50c2a0749304fbe373e00e7637600b0a47b78a51382aeb30ca08562')
version('0.3.18', sha256='1632c1e8cca62d8bed064b37747e331a1796fc46f688626337362bf0d16aeadb')
version('0.3.17', sha256='df2934fa33d04fd84d839ca698280df55c690c86a5a1133b3f7266fce1de279f')
version('0.3.16', sha256='fa19263c5732af46d40d3adeec0b2c77951b67687e670fb6ba52ea3950460d79')
version('0.3.15', sha256='30a99dec977594b387a17f49904523e6bc8dd88bd247266e83485803759e4bbe')
version('0.3.14', sha256='d381935d26f9cae8e4bbd7d7f278435adf8e3a90920edf284bb9ad789ee9ad60')
version('0.3.13', sha256='79197543b17cc314b7e43f7a33148c308b0807cd6381ee77f77e15acf3e6459e')
version('0.3.12', sha256='65a7d3a4010a4e3bd5c0baa41a234797cd3a1735449a4a5902129152601dc57b')
version('0.3.11', sha256='bc4617971179e037ae4e8ebcd837e46db88422f7b365325bd7aba31d1921a673')
version('0.3.10', sha256='0484d275f87e9b8641ff2eecaa9df2830cbe276ac79ad80494822721de6e1693')
version('0.3.9', sha256='17d4677264dfbc4433e97076220adc79b050e4f8a083ea3f853a53af253bc380')
version('0.3.8', sha256='8f86ade36f0dbed9ac90eb62575137388359d97d8f93093b38abe166ad7ef3a8')
version('0.3.7', sha256='bde136122cef3dd6efe2de1c6f65c10955bbb0cc01a520c2342f5287c28f9379')
version('0.3.6', sha256='e64c8fe083832ffbc1459ab6c72f71d53afd3b36e8497c922a15a06b72e9002f')
version('0.3.5', sha256='0950c14bd77c90a6427e26210d6dab422271bc86f9fc69126725833ecdaa0e85')
version('0.3.4', sha256='4b4b4453251e9edb5f57465bf2b3cf67b19d811d50c8588cdf2ea1f201bb834f')
version('0.3.3', sha256='49d88f4494ae780e3d7fa51769c00d982d7cdb73e696054ac3baa81d42f13bab')
version('0.3.2', sha256='e8ba64f6b103c511ae13736100347deb7121ba9b41ba82052b1a018a65c0cb15')
version('0.3.1', sha256='1f5e956f35f3acdd3c74516e955d797a320c2e0135e31d838cbdb3ea94d0eb33')
version('0.3.0', sha256='cf51543709abe364d8ecfb5c09a2b533d2b725ea1a66f203509b21a8e9d8f1a1')
version('0.2.20', sha256='5ef38b15d9c652985774869efd548b8e3e972e1e99475c673b25537ed7bcf394')
version('0.2.19', sha256='9c40b5e4970f27c5f6911cb0a28aa26b6c83f17418b69f8e5a116bb983ca8557')
version('0.2.18', sha256='7d9f8d4ea4a65ab68088f3bb557f03a7ac9cb5036ef2ba30546c3a28774a4112')
version('0.2.17', sha256='0fe836dfee219ff4cadcc3567fb2223d9e0da5f60c7382711fb9e2c35ecf0dbf')
version('0.2.16', sha256='766f350d0a4be614812d535cead8c816fc3ad3b9afcd93167ea5e4df9d61869b')
version('0.2.15', sha256='73c40ace5978282224e5e122a41c8388c5a19e65a6f2329c2b7c0b61bacc9044')
variant('ilp64', default=False, description='Force 64-bit Fortran native integers')
variant('pic', default=True, description='Build position independent code')
variant('shared', default=True, description='Build shared libraries')
variant('consistent_fpcsr', default=False, description='Synchronize FP CSR between threads (x86/x86_64 only)')
variant('bignuma', default=False, description='Enable experimental support for up to 1024 CPUs/Cores and 128 numa nodes')
variant('symbol_suffix', default='none', description='Set a symbol suffix')
variant('locking', default=True, description='Build with thread safety')
variant(
'threads', default='none',
description='Multithreading support',
values=('pthreads', 'openmp', 'none'),
multi=False
)
# virtual dependency
provides('blas')
provides('lapack')
# OpenBLAS >=3.0 has an official way to disable internal parallel builds
patch('make.patch', when='@0.2.16:0.2.20')
# This patch is in a pull request to OpenBLAS that has not been handled
# https://github.com/xianyi/OpenBLAS/pull/915
# UPD: the patch has been merged starting version 0.2.20
patch('openblas_icc.patch', when='@:0.2.19%intel')
patch('openblas_icc_openmp.patch', when='@:0.2.20%intel@16.0:')
patch('openblas_icc_fortran.patch', when='@:0.3.14%intel@16.0:')
patch('openblas_icc_fortran2.patch', when='@:0.3.14%intel@18.0:')
# See https://github.com/spack/spack/issues/15385
patch('lapack-0.3.9-xerbl.patch', when='@0.3.8:0.3.9 %intel')
# Fixes compilation error on POWER8 with GCC 7
# https://github.com/xianyi/OpenBLAS/pull/1098
patch('power8.patch', when='@0.2.18:0.2.19 %gcc@7.1.0: target=power8')
# Change file comments to work around clang 3.9 assembler bug
# https://github.com/xianyi/OpenBLAS/pull/982
patch('openblas0.2.19.diff', when='@0.2.19')
# Fix CMake export symbol error
# https://github.com/xianyi/OpenBLAS/pull/1703
patch('openblas-0.3.2-cmake.patch', when='@0.3.1:0.3.2')
# Disable experimental TLS code that lead to many threading issues
# https://github.com/xianyi/OpenBLAS/issues/1735#issuecomment-422954465
# https://github.com/xianyi/OpenBLAS/issues/1761#issuecomment-421039174
# https://github.com/xianyi/OpenBLAS/pull/1765
patch('https://github.com/xianyi/OpenBLAS/commit/4d183e5567346f80f2ef97eb98f8601c47f8cb56.patch',
sha256='714aea33692304a50bd0ccde42590c176c82ded4a8ac7f06e573dc8071929c33',
when='@0.3.3')
# Fix parallel build issues on filesystems
# with missing sub-second timestamp resolution
patch('https://github.com/xianyi/OpenBLAS/commit/79ea839b635d1fd84b6ce8a47e086f01d64198e6.patch',
sha256='f1b066a4481a50678caeb7656bf3e6764f45619686ac465f257c8017a2dc1ff0',
when='@0.3.0:0.3.3')
# Fix https://github.com/xianyi/OpenBLAS/issues/2431
# Patch derived from https://github.com/xianyi/OpenBLAS/pull/2424
patch('openblas-0.3.8-darwin.patch', when='@0.3.8 platform=darwin')
# Fix ICE in LLVM 9.0.0 https://github.com/xianyi/OpenBLAS/pull/2329
# Patch as in https://github.com/xianyi/OpenBLAS/pull/2597
patch('openblas_appleclang11.patch', when='@0.3.8:0.3.9 %apple-clang@11.0.3')
# There was an error in Reference-LAPACK that is triggeret by Xcode12
# fixed upstream by https://github.com/xianyi/OpenBLAS/pull/2808 and
# should be included in post 0.3.10 versions. Application to earlier
# versions was not tested.
# See also https://github.com/xianyi/OpenBLAS/issues/2870
patch('https://github.com/xianyi/OpenBLAS/commit/f42e84d46c52f4ee1e05af8f365cd85de8a77b95.patch',
sha256='7b1eec78d1b1f55d3a3f1249696be7da0e2e1cd3b7fadae852e97dc860f8a7fd',
when='@0.3.8:0.3.10 %apple-clang@12.0.0:')
# Add conditions to f_check to determine the Fujitsu compiler
# See https://github.com/xianyi/OpenBLAS/pull/3010
# UPD: the patch has been merged starting version 0.3.13
patch('openblas_fujitsu.patch', when='@:0.3.10 %fj')
patch('openblas_fujitsu_v0.3.11.patch', when='@0.3.11:0.3.12 %fj')
patch('openblas_fujitsu2.patch', when='@0.3.10:0.3.12 %fj')
# Use /usr/bin/env perl in build scripts
patch('0001-use-usr-bin-env-perl.patch', when='@:0.3.13')
# See https://github.com/spack/spack/issues/19932#issuecomment-733452619
conflicts('%gcc@7.0.0:7.3,8.0.0:8.2', when='@0.3.11:')
# See https://github.com/xianyi/OpenBLAS/issues/3074
conflicts('%gcc@:10.1', when='@0.3.13 target=ppc64le:')
# See https://github.com/spack/spack/issues/3036
conflicts('%intel@16', when='@0.2.15:0.2.19')
conflicts('+consistent_fpcsr', when='threads=none',
msg='FPCSR consistency only applies to multithreading')
conflicts('threads=pthreads', when='~locking', msg='Pthread support requires +locking')
conflicts('threads=openmp', when='%apple-clang', msg="Apple's clang does not support OpenMP")
conflicts('threads=openmp @:0.2.19', when='%clang', msg='OpenBLAS @:0.2.19 does not support OpenMP with clang!')
depends_on('perl', type='build')
@property
def parallel(self):
# unclear whether setting `-j N` externally was supported before 0.3
return self.spec.version >= Version('0.3.0')
@run_before('edit')
def check_compilers(self):
# As of 06/2016 there is no mechanism to specify that packages which
# depends on Blas/Lapack need C or/and Fortran symbols. For now
# require both.
if self.compiler.fc is None:
raise InstallError(
'OpenBLAS requires both C and Fortran compilers!'
)
@staticmethod
def _read_targets(target_file):
"""Parse a list of available targets from the OpenBLAS/TargetList.txt
file.
"""
micros = []
re_target = re.compile(r'^[A-Z0-9_]+$')
for line in target_file:
match = re_target.match(line)
if match is not None:
micros.append(line.strip().lower())
return micros
def _microarch_target_args(self):
"""Given a spack microarchitecture and a list of targets found in
OpenBLAS' TargetList.txt, determine the best command-line arguments.
"""
# Read available openblas targets
targetlist_name = join_path(self.stage.source_path, "TargetList.txt")
if os.path.exists(targetlist_name):
with open(targetlist_name) as f:
available_targets = self._read_targets(f)
else:
available_targets = []
# Get our build microarchitecture
microarch = self.spec.target
# List of arguments returned by this function
args = []
# List of available architectures, and possible aliases
openblas_arch = set(['alpha', 'arm', 'ia64', 'mips', 'mips64',
'power', 'riscv64', 'sparc', 'zarch'])
openblas_arch_map = {
'amd64': 'x86_64',
'powerpc64': 'power',
'i386': 'x86',
'aarch64': 'arm64',
}
openblas_arch.update(openblas_arch_map.keys())
openblas_arch.update(openblas_arch_map.values())
# Add spack-only microarchitectures to list
skylake = set(["skylake", "skylake_avx512"])
available_targets = set(available_targets) | skylake | openblas_arch
# Find closest ancestor that is known to build in blas
if microarch.name not in available_targets:
for microarch in microarch.ancestors:
if microarch.name in available_targets:
break
if self.version >= Version("0.3"):
# 'ARCH' argument causes build errors in older OpenBLAS
# see https://github.com/spack/spack/issues/15385
arch_name = microarch.family.name
if arch_name in openblas_arch:
# Apply possible spack->openblas arch name mapping
arch_name = openblas_arch_map.get(arch_name, arch_name)
args.append('ARCH=' + arch_name)
if microarch.vendor == 'generic' and microarch.name != 'riscv64':
# User requested a generic platform, or we couldn't find a good
# match for the requested one. Allow OpenBLAS to determine
# an optimized kernel at run time, including older CPUs, while
# forcing it not to add flags for the current host compiler.
args.append('DYNAMIC_ARCH=1')
if self.spec.version >= Version('0.3.12'):
# These are necessary to prevent OpenBLAS from targeting the
# host architecture on newer version of OpenBLAS, but they
# cause build errors on 0.3.5 .
args.extend(['DYNAMIC_OLDER=1', 'TARGET=GENERIC'])
elif microarch.name in skylake:
# Special case for renaming skylake family
args.append('TARGET=SKYLAKEX')
if microarch.name == "skylake":
# Special case for disabling avx512 instructions
args.append('NO_AVX512=1')
elif microarch.name == 'riscv64':
# Special case for renaming the generic riscv64 uarch to the
# corresponding OpenBLAS target. riscv64 does not yet support
# DYNAMIC_ARCH or TARGET=GENERIC. Once it does, this special
# case can go away.
args.append('TARGET=' + "RISCV64_GENERIC")
else:
args.append('TARGET=' + microarch.name.upper())
return args
@property
def make_defs(self):
# Configure fails to pick up fortran from FC=/abs/path/to/fc, but
# works fine with FC=/abs/path/to/gfortran.
# When mixing compilers make sure that
# $SPACK_ROOT/lib/spack/env/<compiler> have symlinks with reasonable
# names and hack them inside lib/spack/spack/compilers/<compiler>.py
make_defs = [
'CC={0}'.format(spack_cc),
'FC={0}'.format(spack_fc),
]
# force OpenBLAS to use externally defined parallel build
if self.spec.version < Version('0.3'):
make_defs.append('MAKE_NO_J=1') # flag defined by our make.patch
else:
make_defs.append('MAKE_NB_JOBS=0') # flag provided by OpenBLAS
# Add target and architecture flags
make_defs += self._microarch_target_args()
if '~shared' in self.spec:
if '+pic' in self.spec:
make_defs.extend([
'CFLAGS={0}'.format(self.compiler.cc_pic_flag),
'FFLAGS={0}'.format(self.compiler.f77_pic_flag)
])
make_defs += ['NO_SHARED=1']
# fix missing _dggsvd_ and _sggsvd_
if self.spec.satisfies('@0.2.16'):
make_defs += ['BUILD_LAPACK_DEPRECATED=1']
# serial, but still thread-safe version
if self.spec.satisfies('@0.3.7:'):
if '+locking' in self.spec:
make_defs += ['USE_LOCKING=1']
else:
make_defs += ['USE_LOCKING=0']
# Add support for multithreading
if self.spec.satisfies('threads=openmp'):
make_defs += ['USE_OPENMP=1', 'USE_THREAD=1']
elif self.spec.satisfies('threads=pthreads'):
make_defs += ['USE_OPENMP=0', 'USE_THREAD=1']
else:
make_defs += ['USE_OPENMP=0', 'USE_THREAD=0']
# 64bit ints
if '+ilp64' in self.spec:
make_defs += ['INTERFACE64=1']
suffix = self.spec.variants['symbol_suffix'].value
if suffix != 'none':
make_defs += ['SYMBOLSUFFIX={0}'.format(suffix)]
# Synchronize floating-point control and status register (FPCSR)
# between threads (x86/x86_64 only).
if '+consistent_fpcsr' in self.spec:
make_defs += ['CONSISTENT_FPCSR=1']
# Flang/f18 does not provide ETIME as an intrinsic
if self.spec.satisfies('%clang'):
make_defs.append('TIMER=INT_CPU_TIME')
# Prevent errors in `as` assembler from newer instructions
if self.spec.satisfies('%gcc@:4.8.4'):
make_defs.append('NO_AVX2=1')
# Fujitsu Compiler dose not add Fortran runtime in rpath.
if self.spec.satisfies('%fj'):
make_defs.append('LDFLAGS=-lfj90i -lfj90f -lfjsrcinfo -lelf')
# Newer versions of openblas will try to find ranlib in the compiler's
# prefix, for instance, .../lib/spack/env/gcc/ranlib, which will fail.
if self.spec.satisfies('@0.3.13:'):
make_defs.append('RANLIB=ranlib')
if self.spec.satisfies('+bignuma'):
make_defs.append('BIGNUMA=1')
return make_defs
@property
def headers(self):
# As in netlib-lapack, the only public headers for cblas and lapacke in
# openblas are cblas.h and lapacke.h. The remaining headers are private
# headers either included in one of these two headers, or included in
# one of the source files implementing functions declared in these
# headers.
return find_headers(['cblas', 'lapacke'], self.prefix.include)
@property
def libs(self):
spec = self.spec
# Look for openblas{symbol_suffix}
name = 'libopenblas'
search_shared = bool(spec.variants['shared'].value)
suffix = spec.variants['symbol_suffix'].value
if suffix != 'none':
name += suffix
return find_libraries(name, spec.prefix, shared=search_shared, recursive=True)
@property
def build_targets(self):
targets = ['libs', 'netlib']
# Build shared if variant is set.
if '+shared' in self.spec:
targets += ['shared']
return self.make_defs + targets
@run_after('build')
@on_package_attributes(run_tests=True)
def check_build(self):
make('tests', *self.make_defs, parallel=False)
@property
def install_targets(self):
make_args = [
'install',
'PREFIX={0}'.format(self.prefix),
]
return make_args + self.make_defs
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
spec = self.spec
# Openblas may pass its own test but still fail to compile Lapack
# symbols. To make sure we get working Blas and Lapack, do a small
# test.
source_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.c')
blessed_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.output')
include_flags = spec['openblas'].headers.cpp_flags
link_flags = spec['openblas'].libs.ld_flags
if self.compiler.name == 'intel':
link_flags += ' -lifcore'
if self.spec.satisfies('threads=pthreads'):
link_flags += ' -lpthread'
if spec.satisfies('threads=openmp'):
link_flags += ' -lpthread ' + self.compiler.openmp_flag
output = compile_c_and_execute(
source_file, [include_flags], link_flags.split()
)
compare_output_file(output, blessed_file)
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
from spack import *
from spack.package_test import compare_output_file, compile_c_and_execute
class Openblas(MakefilePackage):
"""OpenBLAS: An optimized BLAS library"""
homepage = 'https://www.openblas.net'
url = 'https://github.com/xianyi/OpenBLAS/archive/v0.2.19.tar.gz'
git = 'https://github.com/xianyi/OpenBLAS.git'
version('develop', branch='develop')
version('0.3.19', sha256='947f51bfe50c2a0749304fbe373e00e7637600b0a47b78a51382aeb30ca08562')
version('0.3.18', sha256='1632c1e8cca62d8bed064b37747e331a1796fc46f688626337362bf0d16aeadb')
version('0.3.17', sha256='df2934fa33d04fd84d839ca698280df55c690c86a5a1133b3f7266fce1de279f')
version('0.3.16', sha256='fa19263c5732af46d40d3adeec0b2c77951b67687e670fb6ba52ea3950460d79')
version('0.3.15', sha256='30a99dec977594b387a17f49904523e6bc8dd88bd247266e83485803759e4bbe')
version('0.3.14', sha256='d381935d26f9cae8e4bbd7d7f278435adf8e3a90920edf284bb9ad789ee9ad60')
version('0.3.13', sha256='79197543b17cc314b7e43f7a33148c308b0807cd6381ee77f77e15acf3e6459e')
version('0.3.12', sha256='65a7d3a4010a4e3bd5c0baa41a234797cd3a1735449a4a5902129152601dc57b')
version('0.3.11', sha256='bc4617971179e037ae4e8ebcd837e46db88422f7b365325bd7aba31d1921a673')
version('0.3.10', sha256='0484d275f87e9b8641ff2eecaa9df2830cbe276ac79ad80494822721de6e1693')
version('0.3.9', sha256='17d4677264dfbc4433e97076220adc79b050e4f8a083ea3f853a53af253bc380')
version('0.3.8', sha256='8f86ade36f0dbed9ac90eb62575137388359d97d8f93093b38abe166ad7ef3a8')
version('0.3.7', sha256='bde136122cef3dd6efe2de1c6f65c10955bbb0cc01a520c2342f5287c28f9379')
version('0.3.6', sha256='e64c8fe083832ffbc1459ab6c72f71d53afd3b36e8497c922a15a06b72e9002f')
version('0.3.5', sha256='0950c14bd77c90a6427e26210d6dab422271bc86f9fc69126725833ecdaa0e85')
version('0.3.4', sha256='4b4b4453251e9edb5f57465bf2b3cf67b19d811d50c8588cdf2ea1f201bb834f')
version('0.3.3', sha256='49d88f4494ae780e3d7fa51769c00d982d7cdb73e696054ac3baa81d42f13bab')
version('0.3.2', sha256='e8ba64f6b103c511ae13736100347deb7121ba9b41ba82052b1a018a65c0cb15')
version('0.3.1', sha256='1f5e956f35f3acdd3c74516e955d797a320c2e0135e31d838cbdb3ea94d0eb33')
version('0.3.0', sha256='cf51543709abe364d8ecfb5c09a2b533d2b725ea1a66f203509b21a8e9d8f1a1')
version('0.2.20', sha256='5ef38b15d9c652985774869efd548b8e3e972e1e99475c673b25537ed7bcf394')
version('0.2.19', sha256='9c40b5e4970f27c5f6911cb0a28aa26b6c83f17418b69f8e5a116bb983ca8557')
version('0.2.18', sha256='7d9f8d4ea4a65ab68088f3bb557f03a7ac9cb5036ef2ba30546c3a28774a4112')
version('0.2.17', sha256='0fe836dfee219ff4cadcc3567fb2223d9e0da5f60c7382711fb9e2c35ecf0dbf')
version('0.2.16', sha256='766f350d0a4be614812d535cead8c816fc3ad3b9afcd93167ea5e4df9d61869b')
version('0.2.15', sha256='73c40ace5978282224e5e122a41c8388c5a19e65a6f2329c2b7c0b61bacc9044')
variant('ilp64', default=False, description='Force 64-bit Fortran native integers')
variant('pic', default=True, description='Build position independent code')
variant('shared', default=True, description='Build shared libraries')
variant('consistent_fpcsr', default=False, description='Synchronize FP CSR between threads (x86/x86_64 only)')
variant('bignuma', default=False, description='Enable experimental support for up to 1024 CPUs/Cores and 128 numa nodes')
variant('symbol_suffix', default='none', description='Set a symbol suffix')
variant('locking', default=True, description='Build with thread safety')
variant(
'threads', default='none',
description='Multithreading support',
values=('pthreads', 'openmp', 'none'),
multi=False
)
# virtual dependency
provides('blas')
provides('lapack')
# OpenBLAS >=3.0 has an official way to disable internal parallel builds
patch('make.patch', when='@0.2.16:0.2.20')
# This patch is in a pull request to OpenBLAS that has not been handled
# https://github.com/xianyi/OpenBLAS/pull/915
# UPD: the patch has been merged starting version 0.2.20
patch('openblas_icc.patch', when='@:0.2.19%intel')
patch('openblas_icc_openmp.patch', when='@:0.2.20%intel@16.0:')
patch('openblas_icc_fortran.patch', when='@:0.3.14%intel@16.0:')
patch('openblas_icc_fortran2.patch', when='@:0.3.14%intel@18.0:')
# See https://github.com/spack/spack/issues/15385
patch('lapack-0.3.9-xerbl.patch', when='@0.3.8:0.3.9 %intel')
# Fixes compilation error on POWER8 with GCC 7
# https://github.com/xianyi/OpenBLAS/pull/1098
patch('power8.patch', when='@0.2.18:0.2.19 %gcc@7.1.0: target=power8')
# Change file comments to work around clang 3.9 assembler bug
# https://github.com/xianyi/OpenBLAS/pull/982
patch('openblas0.2.19.diff', when='@0.2.19')
# Fix CMake export symbol error
# https://github.com/xianyi/OpenBLAS/pull/1703
patch('openblas-0.3.2-cmake.patch', when='@0.3.1:0.3.2')
# Disable experimental TLS code that lead to many threading issues
# https://github.com/xianyi/OpenBLAS/issues/1735#issuecomment-422954465
# https://github.com/xianyi/OpenBLAS/issues/1761#issuecomment-421039174
# https://github.com/xianyi/OpenBLAS/pull/1765
patch('https://github.com/xianyi/OpenBLAS/commit/4d183e5567346f80f2ef97eb98f8601c47f8cb56.patch',
sha256='714aea33692304a50bd0ccde42590c176c82ded4a8ac7f06e573dc8071929c33',
when='@0.3.3')
# Fix parallel build issues on filesystems
# with missing sub-second timestamp resolution
patch('https://github.com/xianyi/OpenBLAS/commit/79ea839b635d1fd84b6ce8a47e086f01d64198e6.patch',
sha256='f1b066a4481a50678caeb7656bf3e6764f45619686ac465f257c8017a2dc1ff0',
when='@0.3.0:0.3.3')
# Fix https://github.com/xianyi/OpenBLAS/issues/2431
# Patch derived from https://github.com/xianyi/OpenBLAS/pull/2424
patch('openblas-0.3.8-darwin.patch', when='@0.3.8 platform=darwin')
# Fix ICE in LLVM 9.0.0 https://github.com/xianyi/OpenBLAS/pull/2329
# Patch as in https://github.com/xianyi/OpenBLAS/pull/2597
patch('openblas_appleclang11.patch', when='@0.3.8:0.3.9 %apple-clang@11.0.3')
# There was an error in Reference-LAPACK that is triggeret by Xcode12
# fixed upstream by https://github.com/xianyi/OpenBLAS/pull/2808 and
# should be included in post 0.3.10 versions. Application to earlier
# versions was not tested.
# See also https://github.com/xianyi/OpenBLAS/issues/2870
patch('https://github.com/xianyi/OpenBLAS/commit/f42e84d46c52f4ee1e05af8f365cd85de8a77b95.patch',
sha256='7b1eec78d1b1f55d3a3f1249696be7da0e2e1cd3b7fadae852e97dc860f8a7fd',
when='@0.3.8:0.3.10 %apple-clang@12.0.0:')
# Add conditions to f_check to determine the Fujitsu compiler
# See https://github.com/xianyi/OpenBLAS/pull/3010
# UPD: the patch has been merged starting version 0.3.13
patch('openblas_fujitsu.patch', when='@:0.3.10 %fj')
patch('openblas_fujitsu_v0.3.11.patch', when='@0.3.11:0.3.12 %fj')
patch('openblas_fujitsu2.patch', when='@0.3.10:0.3.12 %fj')
# Use /usr/bin/env perl in build scripts
patch('0001-use-usr-bin-env-perl.patch', when='@:0.3.13')
# See https://github.com/spack/spack/issues/19932#issuecomment-733452619
conflicts('%gcc@7.0.0:7.3,8.0.0:8.2', when='@0.3.11:')
# See https://github.com/xianyi/OpenBLAS/issues/3074
conflicts('%gcc@:10.1', when='@0.3.13 target=ppc64le:')
# See https://github.com/spack/spack/issues/3036
conflicts('%intel@16', when='@0.2.15:0.2.19')
conflicts('+consistent_fpcsr', when='threads=none',
msg='FPCSR consistency only applies to multithreading')
conflicts('threads=pthreads', when='~locking', msg='Pthread support requires +locking')
conflicts('threads=openmp', when='%apple-clang', msg="Apple's clang does not support OpenMP")
conflicts('threads=openmp @:0.2.19', when='%clang', msg='OpenBLAS @:0.2.19 does not support OpenMP with clang!')
depends_on('perl', type='build')
@property
def parallel(self):
# unclear whether setting `-j N` externally was supported before 0.3
return self.spec.version >= Version('0.3.0')
@run_before('edit')
def check_compilers(self):
# As of 06/2016 there is no mechanism to specify that packages which
# depends on Blas/Lapack need C or/and Fortran symbols. For now
# require both.
if self.compiler.fc is None:
raise InstallError(
'OpenBLAS requires both C and Fortran compilers!'
)
@staticmethod
def _read_targets(target_file):
"""Parse a list of available targets from the OpenBLAS/TargetList.txt
file.
"""
micros = []
re_target = re.compile(r'^[A-Z0-9_]+$')
for line in target_file:
match = re_target.match(line)
if match is not None:
micros.append(line.strip().lower())
return micros
def _microarch_target_args(self):
"""Given a spack microarchitecture and a list of targets found in
OpenBLAS' TargetList.txt, determine the best command-line arguments.
"""
# Read available openblas targets
targetlist_name = join_path(self.stage.source_path, "TargetList.txt")
if os.path.exists(targetlist_name):
with open(targetlist_name) as f:
available_targets = self._read_targets(f)
else:
available_targets = []
# Get our build microarchitecture
microarch = self.spec.target
# List of arguments returned by this function
args = []
# List of available architectures, and possible aliases
openblas_arch = set(['alpha', 'arm', 'ia64', 'mips', 'mips64',
'power', 'riscv64', 'sparc', 'zarch'])
openblas_arch_map = {
'amd64': 'x86_64',
'powerpc64': 'power',
'i386': 'x86',
'aarch64': 'arm64',
}
openblas_arch.update(openblas_arch_map.keys())
openblas_arch.update(openblas_arch_map.values())
# Add spack-only microarchitectures to list
skylake = set(["skylake", "skylake_avx512"])
available_targets = set(available_targets) | skylake | openblas_arch
# Find closest ancestor that is known to build in blas
if microarch.name not in available_targets:
for microarch in microarch.ancestors:
if microarch.name in available_targets:
break
if self.version >= Version("0.3"):
# 'ARCH' argument causes build errors in older OpenBLAS
# see https://github.com/spack/spack/issues/15385
arch_name = microarch.family.name
if arch_name in openblas_arch:
# Apply possible spack->openblas arch name mapping
arch_name = openblas_arch_map.get(arch_name, arch_name)
args.append('ARCH=' + arch_name)
if microarch.vendor == 'generic' and microarch.name != 'riscv64':
# User requested a generic platform, or we couldn't find a good
# match for the requested one. Allow OpenBLAS to determine
# an optimized kernel at run time, including older CPUs, while
# forcing it not to add flags for the current host compiler.
args.append('DYNAMIC_ARCH=1')
if self.spec.version >= Version('0.3.12'):
# These are necessary to prevent OpenBLAS from targeting the
# host architecture on newer version of OpenBLAS, but they
# cause build errors on 0.3.5 .
args.extend(['DYNAMIC_OLDER=1', 'TARGET=GENERIC'])
elif microarch.name in skylake:
# Special case for renaming skylake family
args.append('TARGET=SKYLAKEX')
if microarch.name == "skylake":
# Special case for disabling avx512 instructions
args.append('NO_AVX512=1')
elif microarch.name == 'riscv64':
# Special case for renaming the generic riscv64 uarch to the
# corresponding OpenBLAS target. riscv64 does not yet support
# DYNAMIC_ARCH or TARGET=GENERIC. Once it does, this special
# case can go away.
args.append('TARGET=' + "RISCV64_GENERIC")
else:
args.append('TARGET=' + microarch.name.upper())
return args
@property
def make_defs(self):
# Configure fails to pick up fortran from FC=/abs/path/to/fc, but
# works fine with FC=/abs/path/to/gfortran.
# When mixing compilers make sure that
# $SPACK_ROOT/lib/spack/env/<compiler> have symlinks with reasonable
# names and hack them inside lib/spack/spack/compilers/<compiler>.py
make_defs = [
'CC={0}'.format(spack_cc),
'FC={0}'.format(spack_fc),
]
# force OpenBLAS to use externally defined parallel build
if self.spec.version < Version('0.3'):
make_defs.append('MAKE_NO_J=1') # flag defined by our make.patch
else:
make_defs.append('MAKE_NB_JOBS=0') # flag provided by OpenBLAS
# Add target and architecture flags
make_defs += self._microarch_target_args()
if '~shared' in self.spec:
if '+pic' in self.spec:
make_defs.extend([
'CFLAGS={0}'.format(self.compiler.cc_pic_flag),
'FFLAGS={0}'.format(self.compiler.f77_pic_flag)
])
make_defs += ['NO_SHARED=1']
# fix missing _dggsvd_ and _sggsvd_
if self.spec.satisfies('@0.2.16'):
make_defs += ['BUILD_LAPACK_DEPRECATED=1']
# serial, but still thread-safe version
if self.spec.satisfies('@0.3.7:'):
if '+locking' in self.spec:
make_defs += ['USE_LOCKING=1']
else:
make_defs += ['USE_LOCKING=0']
# Add support for multithreading
if self.spec.satisfies('threads=openmp'):
make_defs += ['USE_OPENMP=1', 'USE_THREAD=1']
elif self.spec.satisfies('threads=pthreads'):
make_defs += ['USE_OPENMP=0', 'USE_THREAD=1']
else:
make_defs += ['USE_OPENMP=0', 'USE_THREAD=0']
# 64bit ints
if '+ilp64' in self.spec:
make_defs += ['INTERFACE64=1']
suffix = self.spec.variants['symbol_suffix'].value
if suffix != 'none':
make_defs += ['SYMBOLSUFFIX={0}'.format(suffix)]
# Synchronize floating-point control and status register (FPCSR)
# between threads (x86/x86_64 only).
if '+consistent_fpcsr' in self.spec:
make_defs += ['CONSISTENT_FPCSR=1']
# Flang/f18 does not provide ETIME as an intrinsic
if self.spec.satisfies('%clang'):
make_defs.append('TIMER=INT_CPU_TIME')
# Prevent errors in `as` assembler from newer instructions
if self.spec.satisfies('%gcc@:4.8.4'):
make_defs.append('NO_AVX2=1')
# Fujitsu Compiler dose not add Fortran runtime in rpath.
if self.spec.satisfies('%fj'):
make_defs.append('LDFLAGS=-lfj90i -lfj90f -lfjsrcinfo -lelf')
# Newer versions of openblas will try to find ranlib in the compiler's
# prefix, for instance, .../lib/spack/env/gcc/ranlib, which will fail.
if self.spec.satisfies('@0.3.13:'):
make_defs.append('RANLIB=ranlib')
if self.spec.satisfies('+bignuma'):
make_defs.append('BIGNUMA=1')
return make_defs
@property
def headers(self):
# As in netlib-lapack, the only public headers for cblas and lapacke in
# openblas are cblas.h and lapacke.h. The remaining headers are private
# headers either included in one of these two headers, or included in
# one of the source files implementing functions declared in these
# headers.
return find_headers(['cblas', 'lapacke'], self.prefix.include)
@property
def libs(self):
spec = self.spec
# Look for openblas{symbol_suffix}
name = 'libopenblas'
search_shared = bool(spec.variants['shared'].value)
suffix = spec.variants['symbol_suffix'].value
if suffix != 'none':
name += suffix
return find_libraries(name, spec.prefix, shared=search_shared, recursive=True)
@property
def build_targets(self):
targets = ['libs', 'netlib']
# Build shared if variant is set.
if '+shared' in self.spec:
targets += ['shared']
return self.make_defs + targets
@run_after('build')
@on_package_attributes(run_tests=True)
def check_build(self):
make('tests', *self.make_defs, parallel=False)
@property
def install_targets(self):
make_args = [
'install',
'PREFIX={0}'.format(self.prefix),
]
return make_args + self.make_defs
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
spec = self.spec
# Openblas may pass its own test but still fail to compile Lapack
# symbols. To make sure we get working Blas and Lapack, do a small
# test.
source_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.c')
blessed_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.output')
include_flags = spec['openblas'].headers.cpp_flags
link_flags = spec['openblas'].libs.ld_flags
if self.compiler.name == 'intel':
link_flags += ' -lifcore'
if self.spec.satisfies('threads=pthreads'):
link_flags += ' -lpthread'
if spec.satisfies('threads=openmp'):
link_flags += ' -lpthread ' + self.compiler.openmp_flag
output = compile_c_and_execute(
source_file, [include_flags], link_flags.split()
)
compare_output_file(output, blessed_file)
| en | 0.836932 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) OpenBLAS: An optimized BLAS library # virtual dependency # OpenBLAS >=3.0 has an official way to disable internal parallel builds # This patch is in a pull request to OpenBLAS that has not been handled # https://github.com/xianyi/OpenBLAS/pull/915 # UPD: the patch has been merged starting version 0.2.20 # See https://github.com/spack/spack/issues/15385 # Fixes compilation error on POWER8 with GCC 7 # https://github.com/xianyi/OpenBLAS/pull/1098 # Change file comments to work around clang 3.9 assembler bug # https://github.com/xianyi/OpenBLAS/pull/982 # Fix CMake export symbol error # https://github.com/xianyi/OpenBLAS/pull/1703 # Disable experimental TLS code that lead to many threading issues # https://github.com/xianyi/OpenBLAS/issues/1735#issuecomment-422954465 # https://github.com/xianyi/OpenBLAS/issues/1761#issuecomment-421039174 # https://github.com/xianyi/OpenBLAS/pull/1765 # Fix parallel build issues on filesystems # with missing sub-second timestamp resolution # Fix https://github.com/xianyi/OpenBLAS/issues/2431 # Patch derived from https://github.com/xianyi/OpenBLAS/pull/2424 # Fix ICE in LLVM 9.0.0 https://github.com/xianyi/OpenBLAS/pull/2329 # Patch as in https://github.com/xianyi/OpenBLAS/pull/2597 # There was an error in Reference-LAPACK that is triggeret by Xcode12 # fixed upstream by https://github.com/xianyi/OpenBLAS/pull/2808 and # should be included in post 0.3.10 versions. Application to earlier # versions was not tested. # See also https://github.com/xianyi/OpenBLAS/issues/2870 # Add conditions to f_check to determine the Fujitsu compiler # See https://github.com/xianyi/OpenBLAS/pull/3010 # UPD: the patch has been merged starting version 0.3.13 # Use /usr/bin/env perl in build scripts # See https://github.com/spack/spack/issues/19932#issuecomment-733452619 # See https://github.com/xianyi/OpenBLAS/issues/3074 # See https://github.com/spack/spack/issues/3036 # unclear whether setting `-j N` externally was supported before 0.3 # As of 06/2016 there is no mechanism to specify that packages which # depends on Blas/Lapack need C or/and Fortran symbols. For now # require both. Parse a list of available targets from the OpenBLAS/TargetList.txt file. Given a spack microarchitecture and a list of targets found in OpenBLAS' TargetList.txt, determine the best command-line arguments. # Read available openblas targets # Get our build microarchitecture # List of arguments returned by this function # List of available architectures, and possible aliases # Add spack-only microarchitectures to list # Find closest ancestor that is known to build in blas # 'ARCH' argument causes build errors in older OpenBLAS # see https://github.com/spack/spack/issues/15385 # Apply possible spack->openblas arch name mapping # User requested a generic platform, or we couldn't find a good # match for the requested one. Allow OpenBLAS to determine # an optimized kernel at run time, including older CPUs, while # forcing it not to add flags for the current host compiler. # These are necessary to prevent OpenBLAS from targeting the # host architecture on newer version of OpenBLAS, but they # cause build errors on 0.3.5 . # Special case for renaming skylake family # Special case for disabling avx512 instructions # Special case for renaming the generic riscv64 uarch to the # corresponding OpenBLAS target. riscv64 does not yet support # DYNAMIC_ARCH or TARGET=GENERIC. Once it does, this special # case can go away. # Configure fails to pick up fortran from FC=/abs/path/to/fc, but # works fine with FC=/abs/path/to/gfortran. # When mixing compilers make sure that # $SPACK_ROOT/lib/spack/env/<compiler> have symlinks with reasonable # names and hack them inside lib/spack/spack/compilers/<compiler>.py # force OpenBLAS to use externally defined parallel build # flag defined by our make.patch # flag provided by OpenBLAS # Add target and architecture flags # fix missing _dggsvd_ and _sggsvd_ # serial, but still thread-safe version # Add support for multithreading # 64bit ints # Synchronize floating-point control and status register (FPCSR) # between threads (x86/x86_64 only). # Flang/f18 does not provide ETIME as an intrinsic # Prevent errors in `as` assembler from newer instructions # Fujitsu Compiler dose not add Fortran runtime in rpath. # Newer versions of openblas will try to find ranlib in the compiler's # prefix, for instance, .../lib/spack/env/gcc/ranlib, which will fail. # As in netlib-lapack, the only public headers for cblas and lapacke in # openblas are cblas.h and lapacke.h. The remaining headers are private # headers either included in one of these two headers, or included in # one of the source files implementing functions declared in these # headers. # Look for openblas{symbol_suffix} # Build shared if variant is set. # Openblas may pass its own test but still fail to compile Lapack # symbols. To make sure we get working Blas and Lapack, do a small # test. | 1.729921 | 2 |
api/handlers.py | gabriellsesam/oracle-transform | 0 | 6624391 | def generic_handler(dbconnection, variables, logger, entity):
"""For a given entity, I query the given database and return the entity appended with 'query_result'
:param OracleDB dbconnection: Connection to a given database.
:param VariablesConfig variables: Object with environment variables.
:param sesam_logger logger: Logger to log info/errors to.
:param dict entity: Entity with keys to be used for the query.
:returns: Entity appended with query result.
:rtype: dict
"""
if entity:
entity['query_result'] = dbconnection.do_query(str(variables.query).format(**entity))
return entity
else:
logger.warning(f'Input entity is None! generic_handler Returning None...')
| def generic_handler(dbconnection, variables, logger, entity):
"""For a given entity, I query the given database and return the entity appended with 'query_result'
:param OracleDB dbconnection: Connection to a given database.
:param VariablesConfig variables: Object with environment variables.
:param sesam_logger logger: Logger to log info/errors to.
:param dict entity: Entity with keys to be used for the query.
:returns: Entity appended with query result.
:rtype: dict
"""
if entity:
entity['query_result'] = dbconnection.do_query(str(variables.query).format(**entity))
return entity
else:
logger.warning(f'Input entity is None! generic_handler Returning None...')
| en | 0.623142 | For a given entity, I query the given database and return the entity appended with 'query_result' :param OracleDB dbconnection: Connection to a given database. :param VariablesConfig variables: Object with environment variables. :param sesam_logger logger: Logger to log info/errors to. :param dict entity: Entity with keys to be used for the query. :returns: Entity appended with query result. :rtype: dict | 2.921747 | 3 |
caffe2/contrib/fakelowp/test/test_fc_nnpi_fp16.py | Stonepia/pytorch | 1 | 6624392 | <gh_stars>1-10
import numpy as np
import unittest
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
import caffe2.python.serialized_test.serialized_test_util as serial
core.GlobalInit(["caffe2", "--caffe2_log_level=-3", "--glow_global_fp16=1"])
GLOW_MATMUL_RTOL = 0
class FCTest(serial.SerializedTestCase):
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_clip(self, seed):
np.random.seed(seed)
m, n, k = 8, 8, 8
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "W0", "b0", "W1", "b1"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X", "W0", "b0"],
["X1"],
)
)
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X1", "W1", "b1"],
["Y"],
)
)
workspace.GlobalInit(
['caffe2', '--caffe2_log_level=0', '--glow_global_fp16=1',
'--glow_clip_fp16', '--glow_global_fp16_constants=1'])
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = np.full((n, k), 65536.0, dtype)
b0 = np.random.randint(low=1, high=3, size=(n)).astype(dtype)
W1 = np.random.randint(low=1, high=3, size=(n, k)).astype(dtype)
b1 = np.random.randint(low=1, high=3, size=(n)).astype(dtype)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
workspace.FeedBlob("W1", W1)
workspace.FeedBlob("b1", b1)
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False
)
X = np.random.randint(low=1, high=3, size=(m, k)).astype(dtype)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
np.testing.assert_allclose(Y_glow, np.full((m, n), 65504.0, dtype))
@given(
m=st.integers(4, 50),
k=st.integers(4, 50),
n=st.integers(4, 50),
seed=st.integers(0, 65534)
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_fc_exercise(self, m, k, n, seed):
""" Test that the matmul engine is working, this doesn't test
precision
"""
np.random.seed(seed)
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "W0", "b0"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X", "W0", "b0"],
["Y"],
)
)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = np.random.randint(low=1, high=3, size=(n, k)).astype(dtype)
b0 = np.random.randint(low=1, high=3, size=(n)).astype(dtype)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
X0 = np.random.randint(low=1, high=3, size=(m, k)).astype(dtype)
workspace.FeedBlob("X", X0)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net)
num_iterations = 2
for _ in range(num_iterations):
X0 = np.random.randint(low=1, high=3, size=(m, k)).astype(dtype)
workspace.FeedBlob("X", X0)
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
# Run caffe2 net
workspace.RunNet(pred_net.name)
Y_c2 = workspace.FetchBlob('Y')
if not np.allclose(Y_c2, Y_glow):
print_test_debug_info("fc", {
"seed": seed,
"m": m,
"k": k,
"n": n,
"X": X0,
"W0": W0,
"b0": b0,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff": np.abs((Y_c2 - Y_glow) / Y_c2)})
assert(0)
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_fc_numeric_cases(self, seed):
""" Test numerics, use examples found from the unit test.
Use Fp16FCAcc16NNPI as a reference.
"""
np.random.seed(seed)
m = 1
k = 20
n = 1
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "W0", "b0"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X", "W0", "b0"],
["Y"],
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred"
pred_net_ref.external_input.extend(["X", "W0", "b0"])
pred_net_ref.external_output.append("Y")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"Fp16FCAcc32NNPI",
["X", "W0", "b0"],
["Y"],
)
)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = np.array([[0.04882812, 0.21520996, 0.1027832, 0.04489136,
-0.07635498, 0.14587402,
-0.06240845, 0.3918457, 0.46362305, -0.11657715,
0.29174805, 0.02890015,
0.0680542, 0.4255371, -0.42895508, -0.4128418,
-0.47973633, 0.33251953,
0.27807617, 0.3701172]], dtype=np.float32)
b0 = [0.47851562]
b0 = np.array(b0, dtype=np.float32)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
X_inputs = [
np.array([[
-2.94921875e-01, -3.58642578e-01, -1.92871094e-01,
2.81250000e-01, -1.30126953e-01, 2.32696533e-02,
-4.55566406e-01, -2.31811523e-01, -1.95190430e-01,
-7.76977539e-02, -1.29394531e-01, 2.94677734e-01,
8.96453857e-04, 4.97314453e-01, -6.07604980e-02,
2.55371094e-01, 3.49853516e-01, -1.37695312e-01,
2.95410156e-01, -3.67187500e-01]], dtype=np.float32),
np.array([[
-0.4494629, -0.22192383, -0.1640625, 0.11480713,
-0.09851074, -0.02084351,
0.19091797, -0.17468262, -0.47485352, 0.07489014,
0.03897095, 0.00197601,
0.02835083, -0.27294922, 0.26757812, -0.20996094,
-0.31103516, -0.41601562,
0.09918213, -0.07696533]], dtype=np.float32),
np.array([[
0.01150513, -0.20507812, 0.46704102, 0.00906372,
0.19848633, 0.3720703,
0.46557617, -0.47436523, -0.35107422, -0.0362854,
-0.20812988, 0.41918945,
0.09716797, 0.19897461, 0.3876953, -0.0165863,
0.23535156, 0.29956055,
0.24389648, -0.23486328]], dtype=np.float32)
]
# keep onnxifi happy by feeding something with a shape
workspace.FeedBlob("X", X_inputs[0])
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net_ref)
for i in range(len(X_inputs)):
workspace.FeedBlob("X", X_inputs[i])
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob('Y')
diff = np.abs((Y_c2 - Y_glow) / (Y_c2 + 1e-8))
rowdiff = np.max(diff, axis=1)
n_offenders = np.count_nonzero(rowdiff[rowdiff > GLOW_MATMUL_RTOL])
if n_offenders > 0:
print_test_debug_info("fc", {
"seed": seed,
"iter": i,
"m": m,
"k": k,
"n": n,
"X": X0,
"W0": W0,
"b0": b0,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff": diff,
"rowdiff": rowdiff})
assert(0)
@given(
m=st.integers(1, 50),
k=st.integers(1, 1000),
n=st.integers(1, 50),
seed=st.integers(0, 65534),
use_packed=st.integers(0, 2)
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_fc_num0(self, seed, m, k, n, use_packed):
""" Test numerics, fix a dimension and determine the ranges of error.
Use Fp16FCAcc16 as a reference.
"""
W = "W_packed" if use_packed else "W0"
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", W, "b0"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FbFCPacked" if use_packed else "FC",
["X", W, "b0"],
["Y"],
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred"
pred_net_ref.external_input.extend(["X", W, "b0"])
pred_net_ref.external_output.append("Y")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"Fp16FCAcc32NNPI",
["X", W, "b0"],
["Y"],
)
)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = 10 * (np.random.rand(n, k) - 0.5).astype(np.float16).astype(np.float32)
b0 = 1 * (np.random.rand(n) - 0.5).astype(np.float16).astype(np.float32)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
workspace.RunOperatorOnce(
core.CreateOperator(
"FbGemmPack",
['W0'],
['W_packed'],
no_packing=True,
)
)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
X0 = np.random.rand(m, k).astype(dtype) - 0.5
workspace.FeedBlob("X", X0)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net_ref)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
# Run caffe2 net
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob('Y')
diff = np.abs((Y_c2 - Y_glow) / (Y_c2 + 1e-8))
rowdiff = np.max(diff, axis=1)
n_offenders = np.count_nonzero(rowdiff[rowdiff > GLOW_MATMUL_RTOL])
if n_offenders > 0:
print_test_debug_info("fc", {
"seed": seed,
"use_packed": use_packed,
"m": m,
"k": k,
"n": n,
"X": X0.shape,
"W0": W0.shape,
"b0": b0.shape,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff": diff,
"rowdiff": rowdiff})
assert(0)
if __name__ == '__main__':
unittest.main()
| import numpy as np
import unittest
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
import caffe2.python.serialized_test.serialized_test_util as serial
core.GlobalInit(["caffe2", "--caffe2_log_level=-3", "--glow_global_fp16=1"])
GLOW_MATMUL_RTOL = 0
class FCTest(serial.SerializedTestCase):
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_clip(self, seed):
np.random.seed(seed)
m, n, k = 8, 8, 8
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "W0", "b0", "W1", "b1"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X", "W0", "b0"],
["X1"],
)
)
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X1", "W1", "b1"],
["Y"],
)
)
workspace.GlobalInit(
['caffe2', '--caffe2_log_level=0', '--glow_global_fp16=1',
'--glow_clip_fp16', '--glow_global_fp16_constants=1'])
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = np.full((n, k), 65536.0, dtype)
b0 = np.random.randint(low=1, high=3, size=(n)).astype(dtype)
W1 = np.random.randint(low=1, high=3, size=(n, k)).astype(dtype)
b1 = np.random.randint(low=1, high=3, size=(n)).astype(dtype)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
workspace.FeedBlob("W1", W1)
workspace.FeedBlob("b1", b1)
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False
)
X = np.random.randint(low=1, high=3, size=(m, k)).astype(dtype)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
np.testing.assert_allclose(Y_glow, np.full((m, n), 65504.0, dtype))
@given(
m=st.integers(4, 50),
k=st.integers(4, 50),
n=st.integers(4, 50),
seed=st.integers(0, 65534)
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_fc_exercise(self, m, k, n, seed):
""" Test that the matmul engine is working, this doesn't test
precision
"""
np.random.seed(seed)
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "W0", "b0"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X", "W0", "b0"],
["Y"],
)
)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = np.random.randint(low=1, high=3, size=(n, k)).astype(dtype)
b0 = np.random.randint(low=1, high=3, size=(n)).astype(dtype)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
X0 = np.random.randint(low=1, high=3, size=(m, k)).astype(dtype)
workspace.FeedBlob("X", X0)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net)
num_iterations = 2
for _ in range(num_iterations):
X0 = np.random.randint(low=1, high=3, size=(m, k)).astype(dtype)
workspace.FeedBlob("X", X0)
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
# Run caffe2 net
workspace.RunNet(pred_net.name)
Y_c2 = workspace.FetchBlob('Y')
if not np.allclose(Y_c2, Y_glow):
print_test_debug_info("fc", {
"seed": seed,
"m": m,
"k": k,
"n": n,
"X": X0,
"W0": W0,
"b0": b0,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff": np.abs((Y_c2 - Y_glow) / Y_c2)})
assert(0)
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_fc_numeric_cases(self, seed):
""" Test numerics, use examples found from the unit test.
Use Fp16FCAcc16NNPI as a reference.
"""
np.random.seed(seed)
m = 1
k = 20
n = 1
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "W0", "b0"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X", "W0", "b0"],
["Y"],
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred"
pred_net_ref.external_input.extend(["X", "W0", "b0"])
pred_net_ref.external_output.append("Y")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"Fp16FCAcc32NNPI",
["X", "W0", "b0"],
["Y"],
)
)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = np.array([[0.04882812, 0.21520996, 0.1027832, 0.04489136,
-0.07635498, 0.14587402,
-0.06240845, 0.3918457, 0.46362305, -0.11657715,
0.29174805, 0.02890015,
0.0680542, 0.4255371, -0.42895508, -0.4128418,
-0.47973633, 0.33251953,
0.27807617, 0.3701172]], dtype=np.float32)
b0 = [0.47851562]
b0 = np.array(b0, dtype=np.float32)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
X_inputs = [
np.array([[
-2.94921875e-01, -3.58642578e-01, -1.92871094e-01,
2.81250000e-01, -1.30126953e-01, 2.32696533e-02,
-4.55566406e-01, -2.31811523e-01, -1.95190430e-01,
-7.76977539e-02, -1.29394531e-01, 2.94677734e-01,
8.96453857e-04, 4.97314453e-01, -6.07604980e-02,
2.55371094e-01, 3.49853516e-01, -1.37695312e-01,
2.95410156e-01, -3.67187500e-01]], dtype=np.float32),
np.array([[
-0.4494629, -0.22192383, -0.1640625, 0.11480713,
-0.09851074, -0.02084351,
0.19091797, -0.17468262, -0.47485352, 0.07489014,
0.03897095, 0.00197601,
0.02835083, -0.27294922, 0.26757812, -0.20996094,
-0.31103516, -0.41601562,
0.09918213, -0.07696533]], dtype=np.float32),
np.array([[
0.01150513, -0.20507812, 0.46704102, 0.00906372,
0.19848633, 0.3720703,
0.46557617, -0.47436523, -0.35107422, -0.0362854,
-0.20812988, 0.41918945,
0.09716797, 0.19897461, 0.3876953, -0.0165863,
0.23535156, 0.29956055,
0.24389648, -0.23486328]], dtype=np.float32)
]
# keep onnxifi happy by feeding something with a shape
workspace.FeedBlob("X", X_inputs[0])
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net_ref)
for i in range(len(X_inputs)):
workspace.FeedBlob("X", X_inputs[i])
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob('Y')
diff = np.abs((Y_c2 - Y_glow) / (Y_c2 + 1e-8))
rowdiff = np.max(diff, axis=1)
n_offenders = np.count_nonzero(rowdiff[rowdiff > GLOW_MATMUL_RTOL])
if n_offenders > 0:
print_test_debug_info("fc", {
"seed": seed,
"iter": i,
"m": m,
"k": k,
"n": n,
"X": X0,
"W0": W0,
"b0": b0,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff": diff,
"rowdiff": rowdiff})
assert(0)
@given(
m=st.integers(1, 50),
k=st.integers(1, 1000),
n=st.integers(1, 50),
seed=st.integers(0, 65534),
use_packed=st.integers(0, 2)
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_fc_num0(self, seed, m, k, n, use_packed):
""" Test numerics, fix a dimension and determine the ranges of error.
Use Fp16FCAcc16 as a reference.
"""
W = "W_packed" if use_packed else "W0"
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", W, "b0"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FbFCPacked" if use_packed else "FC",
["X", W, "b0"],
["Y"],
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred"
pred_net_ref.external_input.extend(["X", W, "b0"])
pred_net_ref.external_output.append("Y")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"Fp16FCAcc32NNPI",
["X", W, "b0"],
["Y"],
)
)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = 10 * (np.random.rand(n, k) - 0.5).astype(np.float16).astype(np.float32)
b0 = 1 * (np.random.rand(n) - 0.5).astype(np.float16).astype(np.float32)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
workspace.RunOperatorOnce(
core.CreateOperator(
"FbGemmPack",
['W0'],
['W_packed'],
no_packing=True,
)
)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
X0 = np.random.rand(m, k).astype(dtype) - 0.5
workspace.FeedBlob("X", X0)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net_ref)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
# Run caffe2 net
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob('Y')
diff = np.abs((Y_c2 - Y_glow) / (Y_c2 + 1e-8))
rowdiff = np.max(diff, axis=1)
n_offenders = np.count_nonzero(rowdiff[rowdiff > GLOW_MATMUL_RTOL])
if n_offenders > 0:
print_test_debug_info("fc", {
"seed": seed,
"use_packed": use_packed,
"m": m,
"k": k,
"n": n,
"X": X0.shape,
"W0": W0.shape,
"b0": b0.shape,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff": diff,
"rowdiff": rowdiff})
assert(0)
if __name__ == '__main__':
unittest.main() | en | 0.86203 | # noqa Test that the matmul engine is working, this doesn't test precision # Run Glow net # Run caffe2 net Test numerics, use examples found from the unit test. Use Fp16FCAcc16NNPI as a reference. # keep onnxifi happy by feeding something with a shape # Run Glow net Test numerics, fix a dimension and determine the ranges of error. Use Fp16FCAcc16 as a reference. # Run caffe2 net | 2.117378 | 2 |
nscan/nscan.py | Unam3dd/Train-2018-2020 | 4 | 6624393 | <reponame>Unam3dd/Train-2018-2020<gh_stars>1-10
#!/usr/bin/python2
#-*- coding:utf-8 -*-
import socket
import subprocess
import os
import netifaces
import threading
import thread
from datetime import datetime
import time
import shlex
import platform
import sys
import argparse
banner = '''
███▄ █ ██████ ▄████▄ ▄▄▄ ███▄ █
██ ▀█ █ ▒██ ▒ ▒██▀ ▀█ ▒████▄ ██ ▀█ █
▓██ ▀█ ██▒░ ▓██▄ ▒▓█ ▄ ▒██ ▀█▄ ▓██ ▀█ ██▒
▓██▒ ▐▌██▒ ▒ ██▒▒▓▓▄ ▄██▒░██▄▄▄▄██ ▓██▒ ▐▌██▒
▒██░ ▓██░▒██████▒▒▒ ▓███▀ ░ ▓█ ▓██▒▒██░ ▓██░
░ ▒░ ▒ ▒ ▒ ▒▓▒ ▒ ░░ ░▒ ▒ ░ ▒▒ ▓▒█░░ ▒░ ▒ ▒
░ ░░ ░ ▒░░ ░▒ ░ ░ ░ ▒ ▒ ▒▒ ░░ ░░ ░ ▒░
░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░ ░ ░ ░
░
\033[1;92mBy Dxvistxr
\033[1;96mCreated For Scan Network
\033[1;92m[\033[1:94m*\033[1;92m] Github : \033[1;96mDxvistxr\033[00m
'''
def clear_func():
if 'Linux' not in platform.platform():
os.system('cls')
os.system('cls')
elif 'Windows' not in platform.platform():
os.system('clear')
os.system('clear')
def try_ping(host):
try:
if 'Linux' not in platform.platform():
req_ping = os.system('ping -n 1 %s > /dev/null' % (host))
if req_ping ==0:
get_hostname = socket.gethostbyaddr(host)
t = datetime.now().strftime('%H:%M:%S')
print('\033[1;92m(\033[1;96m%s\033[1;92m) \033[1;96m[\033[1;92mIP\033[1;96m] : \033[00m%s \t \033[1;92m[\033[1;96mHostname\033[1;92m] : \033[00m%s' % (t,host,get_hostname[0]))
else:
pass
elif 'Windows' not in platform.platform():
req_ping = os.system('ping -c 1 -b %s > /dev/null' % (host))
if req_ping ==0:
get_hostname = socket.gethostbyaddr(host)
t = datetime.now().strftime('%H:%M:%S')
print('\033[1;92m(\033[1;96m%s\033[1;92m) \033[1;96m[\033[1;92mIP\033[1;96m] : \033[00m%s \t \033[1;92m[\033[1;96mHostname\033[1;92m] : \033[00m%s' % (t,host,get_hostname[0]))
else:
pass
except Exception as error_req_host:
print('\033[1;91m[\033[1;94m*\033[1;91m] %s' % (error_req_host))
def start_scanner():
gtw = netifaces.gateways()
interface = gtw['default'][2][1]
gtw_ip = gtw['default'][2][0]
scanner_gtw_ip = gtw_ip[:10]
try:
clear_func()
print('\033[1;92m'+banner)
print('\t\033[1;92m[\033[1;94m*\033[1;92m] Gateway : %s' % (gtw_ip))
print('\t\033[1;92m[\033[1;94m*\033[1;92m] Interface : %s' % (interface))
i = 0
while i<256:
thread.start_new_thread(try_ping,(scanner_gtw_ip+str(i),))
time.sleep(0.1)
i = i+1
except Exception as error_start_scanner:
print(error_start_scanner)
if __name__ == '__main__':
clear_func()
clear_func()
print('\033[1;92m'+banner)
start_scanner()
| #!/usr/bin/python2
#-*- coding:utf-8 -*-
import socket
import subprocess
import os
import netifaces
import threading
import thread
from datetime import datetime
import time
import shlex
import platform
import sys
import argparse
banner = '''
███▄ █ ██████ ▄████▄ ▄▄▄ ███▄ █
██ ▀█ █ ▒██ ▒ ▒██▀ ▀█ ▒████▄ ██ ▀█ █
▓██ ▀█ ██▒░ ▓██▄ ▒▓█ ▄ ▒██ ▀█▄ ▓██ ▀█ ██▒
▓██▒ ▐▌██▒ ▒ ██▒▒▓▓▄ ▄██▒░██▄▄▄▄██ ▓██▒ ▐▌██▒
▒██░ ▓██░▒██████▒▒▒ ▓███▀ ░ ▓█ ▓██▒▒██░ ▓██░
░ ▒░ ▒ ▒ ▒ ▒▓▒ ▒ ░░ ░▒ ▒ ░ ▒▒ ▓▒█░░ ▒░ ▒ ▒
░ ░░ ░ ▒░░ ░▒ ░ ░ ░ ▒ ▒ ▒▒ ░░ ░░ ░ ▒░
░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░ ░ ░ ░
░
\033[1;92mBy Dxvistxr
\033[1;96mCreated For Scan Network
\033[1;92m[\033[1:94m*\033[1;92m] Github : \033[1;96mDxvistxr\033[00m
'''
def clear_func():
if 'Linux' not in platform.platform():
os.system('cls')
os.system('cls')
elif 'Windows' not in platform.platform():
os.system('clear')
os.system('clear')
def try_ping(host):
try:
if 'Linux' not in platform.platform():
req_ping = os.system('ping -n 1 %s > /dev/null' % (host))
if req_ping ==0:
get_hostname = socket.gethostbyaddr(host)
t = datetime.now().strftime('%H:%M:%S')
print('\033[1;92m(\033[1;96m%s\033[1;92m) \033[1;96m[\033[1;92mIP\033[1;96m] : \033[00m%s \t \033[1;92m[\033[1;96mHostname\033[1;92m] : \033[00m%s' % (t,host,get_hostname[0]))
else:
pass
elif 'Windows' not in platform.platform():
req_ping = os.system('ping -c 1 -b %s > /dev/null' % (host))
if req_ping ==0:
get_hostname = socket.gethostbyaddr(host)
t = datetime.now().strftime('%H:%M:%S')
print('\033[1;92m(\033[1;96m%s\033[1;92m) \033[1;96m[\033[1;92mIP\033[1;96m] : \033[00m%s \t \033[1;92m[\033[1;96mHostname\033[1;92m] : \033[00m%s' % (t,host,get_hostname[0]))
else:
pass
except Exception as error_req_host:
print('\033[1;91m[\033[1;94m*\033[1;91m] %s' % (error_req_host))
def start_scanner():
gtw = netifaces.gateways()
interface = gtw['default'][2][1]
gtw_ip = gtw['default'][2][0]
scanner_gtw_ip = gtw_ip[:10]
try:
clear_func()
print('\033[1;92m'+banner)
print('\t\033[1;92m[\033[1;94m*\033[1;92m] Gateway : %s' % (gtw_ip))
print('\t\033[1;92m[\033[1;94m*\033[1;92m] Interface : %s' % (interface))
i = 0
while i<256:
thread.start_new_thread(try_ping,(scanner_gtw_ip+str(i),))
time.sleep(0.1)
i = i+1
except Exception as error_start_scanner:
print(error_start_scanner)
if __name__ == '__main__':
clear_func()
clear_func()
print('\033[1;92m'+banner)
start_scanner() | zh | 0.156745 | #!/usr/bin/python2 #-*- coding:utf-8 -*- ███▄ █ ██████ ▄████▄ ▄▄▄ ███▄ █ ██ ▀█ █ ▒██ ▒ ▒██▀ ▀█ ▒████▄ ██ ▀█ █ ▓██ ▀█ ██▒░ ▓██▄ ▒▓█ ▄ ▒██ ▀█▄ ▓██ ▀█ ██▒ ▓██▒ ▐▌██▒ ▒ ██▒▒▓▓▄ ▄██▒░██▄▄▄▄██ ▓██▒ ▐▌██▒ ▒██░ ▓██░▒██████▒▒▒ ▓███▀ ░ ▓█ ▓██▒▒██░ ▓██░ ░ ▒░ ▒ ▒ ▒ ▒▓▒ ▒ ░░ ░▒ ▒ ░ ▒▒ ▓▒█░░ ▒░ ▒ ▒ ░ ░░ ░ ▒░░ ░▒ ░ ░ ░ ▒ ▒ ▒▒ ░░ ░░ ░ ▒░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \033[1;92mBy Dxvistxr \033[1;96mCreated For Scan Network \033[1;92m[\033[1:94m*\033[1;92m] Github : \033[1;96mDxvistxr\033[00m | 2.455667 | 2 |
testsuite/tests/apicast/parameters/apicast_path_routing/test_apicast_path_routing.py | dlaso99/3scale-tests | 5 | 6624394 | """Rewrite of spec/openshift_specs/path_based_routing_two_backends_spec.rb
When `APICAST_PATH_ROUTING` parameter is set to true, the gateway will use path-based routing
in addition to the default host-based routing.
The API request will be routed to the first service that has a matching mapping rule,
from the list of services for which the value of the Host header of the request matches the Public Base URL.
"""
from urllib.parse import urlparse
import pytest
from testsuite.echoed_request import EchoedRequest
from testsuite.capabilities import Capability
pytestmark = pytest.mark.required_capabilities(Capability.STANDARD_GATEWAY, Capability.CUSTOM_ENVIRONMENT)
@pytest.fixture(scope="module")
def gateway_environment(gateway_environment):
"""Enables path routing on gateway"""
gateway_environment.update({"APICAST_PATH_ROUTING": True})
return gateway_environment
def test_get_route_request_returns_ok(client, private_base_url):
"""Call to mapping /get returns 200 OK."""
response = client.get("/get")
echoed = EchoedRequest.create(response)
assert response.status_code == 200
assert echoed.headers["Host"] == urlparse(private_base_url()).hostname
def test_echo_route_request_returns_ok(client2, private_base_url):
"""Call to mapping /echo returns 200 OK."""
response = client2.get("/echo")
assert response.status_code == 200
echoed = EchoedRequest.create(response)
assert echoed.headers["Host"] == urlparse(private_base_url("echo_api")).hostname
def test_not_mapped_route_returns_not_found(application2, api_client):
"""Call to not mapped route /anything/blah returns 404 Not Found.
Path-based routing fails and it fallback to the default host-based routing.
"""
client = api_client(application2, disable_retry_status_list={404})
assert client.get("/anything/blah").status_code == 404
| """Rewrite of spec/openshift_specs/path_based_routing_two_backends_spec.rb
When `APICAST_PATH_ROUTING` parameter is set to true, the gateway will use path-based routing
in addition to the default host-based routing.
The API request will be routed to the first service that has a matching mapping rule,
from the list of services for which the value of the Host header of the request matches the Public Base URL.
"""
from urllib.parse import urlparse
import pytest
from testsuite.echoed_request import EchoedRequest
from testsuite.capabilities import Capability
pytestmark = pytest.mark.required_capabilities(Capability.STANDARD_GATEWAY, Capability.CUSTOM_ENVIRONMENT)
@pytest.fixture(scope="module")
def gateway_environment(gateway_environment):
"""Enables path routing on gateway"""
gateway_environment.update({"APICAST_PATH_ROUTING": True})
return gateway_environment
def test_get_route_request_returns_ok(client, private_base_url):
"""Call to mapping /get returns 200 OK."""
response = client.get("/get")
echoed = EchoedRequest.create(response)
assert response.status_code == 200
assert echoed.headers["Host"] == urlparse(private_base_url()).hostname
def test_echo_route_request_returns_ok(client2, private_base_url):
"""Call to mapping /echo returns 200 OK."""
response = client2.get("/echo")
assert response.status_code == 200
echoed = EchoedRequest.create(response)
assert echoed.headers["Host"] == urlparse(private_base_url("echo_api")).hostname
def test_not_mapped_route_returns_not_found(application2, api_client):
"""Call to not mapped route /anything/blah returns 404 Not Found.
Path-based routing fails and it fallback to the default host-based routing.
"""
client = api_client(application2, disable_retry_status_list={404})
assert client.get("/anything/blah").status_code == 404
| en | 0.857678 | Rewrite of spec/openshift_specs/path_based_routing_two_backends_spec.rb When `APICAST_PATH_ROUTING` parameter is set to true, the gateway will use path-based routing in addition to the default host-based routing. The API request will be routed to the first service that has a matching mapping rule, from the list of services for which the value of the Host header of the request matches the Public Base URL. Enables path routing on gateway Call to mapping /get returns 200 OK. Call to mapping /echo returns 200 OK. Call to not mapped route /anything/blah returns 404 Not Found. Path-based routing fails and it fallback to the default host-based routing. | 2.622244 | 3 |
tests/test_cloudwatch/test_cloudwatch_boto3.py | akingscote/moto | 5,460 | 6624395 | import boto3
import pytest
import pytz
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from dateutil.tz import tzutc
from decimal import Decimal
from freezegun import freeze_time
from operator import itemgetter
from uuid import uuid4
from moto import mock_cloudwatch, mock_s3
from moto.core import ACCOUNT_ID
@mock_cloudwatch
def test_put_metric_data_no_dimensions():
conn = boto3.client("cloudwatch", region_name="us-east-1")
conn.put_metric_data(
Namespace="tester", MetricData=[dict(MetricName="metric", Value=1.5)]
)
metrics = conn.list_metrics()["Metrics"]
metrics.should.have.length_of(1)
metric = metrics[0]
metric["Namespace"].should.equal("tester")
metric["MetricName"].should.equal("metric")
@mock_cloudwatch
def test_put_metric_data_can_not_have_nan():
client = boto3.client("cloudwatch", region_name="us-west-2")
utc_now = datetime.now(tz=pytz.utc)
with pytest.raises(ClientError) as exc:
client.put_metric_data(
Namespace="mynamespace",
MetricData=[
{
"MetricName": "mymetric",
"Timestamp": utc_now,
"Value": Decimal("NaN"),
"Unit": "Count",
}
],
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValue")
err["Message"].should.equal(
"The value NaN for parameter MetricData.member.1.Value is invalid."
)
@mock_cloudwatch
def test_put_metric_data_with_statistics():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName="statmetric",
Timestamp=utc_now,
# no Value to test https://github.com/spulec/moto/issues/1615
StatisticValues=dict(
SampleCount=123.0, Sum=123.0, Minimum=123.0, Maximum=123.0
),
Unit="Milliseconds",
StorageResolution=123,
)
],
)
metrics = conn.list_metrics()["Metrics"]
metrics.should.have.length_of(1)
metric = metrics[0]
metric["Namespace"].should.equal("tester")
metric["MetricName"].should.equal("statmetric")
# TODO: test statistics - https://github.com/spulec/moto/issues/1615
@mock_cloudwatch
def test_get_metric_statistics():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
conn.put_metric_data(
Namespace="tester",
MetricData=[dict(MetricName="metric", Value=1.5, Timestamp=utc_now)],
)
stats = conn.get_metric_statistics(
Namespace="tester",
MetricName="metric",
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=["SampleCount", "Sum"],
)
stats["Datapoints"].should.have.length_of(1)
datapoint = stats["Datapoints"][0]
datapoint["SampleCount"].should.equal(1.0)
datapoint["Sum"].should.equal(1.5)
@mock_cloudwatch
def test_get_metric_statistics_dimensions():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
# put metric data with different dimensions
dimensions1 = [{"Name": "dim1", "Value": "v1"}]
dimensions2 = dimensions1 + [{"Name": "dim2", "Value": "v2"}]
metric_name = "metr-stats-dims"
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName=metric_name,
Value=1,
Timestamp=utc_now,
Dimensions=dimensions1,
)
],
)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName=metric_name,
Value=2,
Timestamp=utc_now,
Dimensions=dimensions1,
)
],
)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName=metric_name,
Value=6,
Timestamp=utc_now,
Dimensions=dimensions2,
)
],
)
# list of (<kwargs>, <expectedSum>, <expectedAverage>)
params_list = (
# get metric stats with no restriction on dimensions
({}, 9, 3),
# get metric stats for dimensions1 (should also cover dimensions2)
({"Dimensions": dimensions1}, 9, 3),
# get metric stats for dimensions2 only
({"Dimensions": dimensions2}, 6, 6),
)
for params in params_list:
stats = conn.get_metric_statistics(
Namespace="tester",
MetricName=metric_name,
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=["Average", "Sum"],
**params[0],
)
print(stats)
stats["Datapoints"].should.have.length_of(1)
datapoint = stats["Datapoints"][0]
datapoint["Sum"].should.equal(params[1])
datapoint["Average"].should.equal(params[2])
@mock_cloudwatch
def test_duplicate_put_metric_data():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName="metric",
Dimensions=[{"Name": "Name", "Value": "B"}],
Value=1.5,
Timestamp=utc_now,
)
],
)
result = conn.list_metrics(
Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}]
)["Metrics"]
len(result).should.equal(1)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName="metric",
Dimensions=[{"Name": "Name", "Value": "B"}],
Value=1.5,
Timestamp=utc_now,
)
],
)
result = conn.list_metrics(
Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}]
)["Metrics"]
len(result).should.equal(1)
result.should.equal(
[
{
"Namespace": "tester",
"MetricName": "metric",
"Dimensions": [{"Name": "Name", "Value": "B"}],
}
]
)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName="metric",
Dimensions=[
{"Name": "Name", "Value": "B"},
{"Name": "Name", "Value": "C"},
],
Value=1.5,
Timestamp=utc_now,
)
],
)
result = conn.list_metrics(
Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}]
)["Metrics"]
result.should.equal(
[
{
"Namespace": "tester",
"MetricName": "metric",
"Dimensions": [{"Name": "Name", "Value": "B"}],
},
{
"Namespace": "tester",
"MetricName": "metric",
"Dimensions": [
{"Name": "Name", "Value": "B"},
{"Name": "Name", "Value": "C"},
],
},
]
)
result = conn.list_metrics(
Namespace="tester", Dimensions=[{"Name": "Name", "Value": "C"}]
)["Metrics"]
result.should.equal(
[
{
"Namespace": "tester",
"MetricName": "metric",
"Dimensions": [
{"Name": "Name", "Value": "B"},
{"Name": "Name", "Value": "C"},
],
}
]
)
@mock_cloudwatch
@freeze_time("2020-02-10 18:44:05")
def test_custom_timestamp():
utc_now = datetime.now(tz=pytz.utc)
time = "2020-02-10T18:44:09Z"
cw = boto3.client("cloudwatch", "eu-west-1")
cw.put_metric_data(
Namespace="tester",
MetricData=[dict(MetricName="metric1", Value=1.5, Timestamp=time)],
)
cw.put_metric_data(
Namespace="tester",
MetricData=[
dict(MetricName="metric2", Value=1.5, Timestamp=datetime(2020, 2, 10))
],
)
cw.get_metric_statistics(
Namespace="tester",
MetricName="metric",
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=["SampleCount", "Sum"],
)
# TODO: What are we actually testing here?
@mock_cloudwatch
def test_list_metrics():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
# Verify namespace has to exist
res = cloudwatch.list_metrics(Namespace="unknown/")["Metrics"]
res.should.be.empty
# Create some metrics to filter on
create_metrics(cloudwatch, namespace="list_test_1/", metrics=4, data_points=2)
create_metrics(cloudwatch, namespace="list_test_2/", metrics=4, data_points=2)
# Verify we can retrieve everything
res = cloudwatch.list_metrics()["Metrics"]
len(res).should.equal(16) # 2 namespaces * 4 metrics * 2 data points
# Verify we can filter by namespace/metric name
res = cloudwatch.list_metrics(Namespace="list_test_1/")["Metrics"]
len(res).should.equal(8) # 1 namespace * 4 metrics * 2 data points
res = cloudwatch.list_metrics(Namespace="list_test_1/", MetricName="metric1")[
"Metrics"
]
len(res).should.equal(2) # 1 namespace * 1 metrics * 2 data points
# Verify format
res.should.equal(
[
{"Namespace": "list_test_1/", "Dimensions": [], "MetricName": "metric1",},
{"Namespace": "list_test_1/", "Dimensions": [], "MetricName": "metric1",},
]
)
# Verify unknown namespace still has no results
res = cloudwatch.list_metrics(Namespace="unknown/")["Metrics"]
res.should.be.empty
@mock_cloudwatch
def test_list_metrics_paginated():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
# Verify that only a single page of metrics is returned
cloudwatch.list_metrics().shouldnt.have.key("NextToken")
# Verify we can't pass a random NextToken
with pytest.raises(ClientError) as e:
cloudwatch.list_metrics(NextToken=str(uuid4()))
e.value.response["Error"]["Message"].should.equal(
"Request parameter NextToken is invalid"
)
# Add a boatload of metrics
create_metrics(cloudwatch, namespace="test", metrics=100, data_points=1)
# Verify that a single page is returned until we've reached 500
first_page = cloudwatch.list_metrics()
first_page["Metrics"].shouldnt.be.empty
len(first_page["Metrics"]).should.equal(100)
create_metrics(cloudwatch, namespace="test", metrics=200, data_points=2)
first_page = cloudwatch.list_metrics()
len(first_page["Metrics"]).should.equal(500)
first_page.shouldnt.contain("NextToken")
# Verify that adding more data points results in pagination
create_metrics(cloudwatch, namespace="test", metrics=60, data_points=10)
first_page = cloudwatch.list_metrics()
len(first_page["Metrics"]).should.equal(500)
first_page["NextToken"].shouldnt.be.empty
# Retrieve second page - and verify there's more where that came from
second_page = cloudwatch.list_metrics(NextToken=first_page["NextToken"])
len(second_page["Metrics"]).should.equal(500)
second_page.should.contain("NextToken")
# Last page should only have the last 100 results, and no NextToken (indicating that pagination is finished)
third_page = cloudwatch.list_metrics(NextToken=second_page["NextToken"])
len(third_page["Metrics"]).should.equal(100)
third_page.shouldnt.contain("NextToken")
# Verify that we can't reuse an existing token
with pytest.raises(ClientError) as e:
cloudwatch.list_metrics(NextToken=first_page["NextToken"])
e.value.response["Error"]["Message"].should.equal(
"Request parameter NextToken is invalid"
)
@mock_cloudwatch
def test_list_metrics_without_value():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
# Create some metrics to filter on
create_metrics_with_dimensions(cloudwatch, namespace="MyNamespace", data_points=3)
# Verify we can filter by namespace/metric name
res = cloudwatch.list_metrics(Namespace="MyNamespace")["Metrics"]
res.should.have.length_of(3)
# Verify we can filter by Dimension without value
results = cloudwatch.list_metrics(
Namespace="MyNamespace", MetricName="MyMetric", Dimensions=[{"Name": "D1"}]
)["Metrics"]
results.should.have.length_of(1)
results[0]["Namespace"].should.equals("MyNamespace")
results[0]["MetricName"].should.equal("MyMetric")
results[0]["Dimensions"].should.equal([{"Name": "D1", "Value": "V1"}])
def create_metrics(cloudwatch, namespace, metrics=5, data_points=5):
for i in range(0, metrics):
metric_name = "metric" + str(i)
for j in range(0, data_points):
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[{"MetricName": metric_name, "Value": j, "Unit": "Seconds"}],
)
def create_metrics_with_dimensions(cloudwatch, namespace, data_points=5):
for j in range(0, data_points):
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "MyMetric",
"Dimensions": [{"Name": f"D{j}", "Value": f"V{j}"}],
"Unit": "Seconds",
}
],
)
@mock_cloudwatch
def test_get_metric_data_within_timeframe():
utc_now = datetime.now(tz=pytz.utc)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
values = [0, 2, 4, 3.5, 7, 100]
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{"MetricName": "metric1", "Value": val, "Unit": "Seconds"} for val in values
],
)
# get_metric_data
stats = ["Average", "Sum", "Minimum", "Maximum"]
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result_" + stat,
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": stat,
},
}
for stat in stats
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Average/Min/Max/Sum is returned as expected
avg = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Average"
][0]
avg["Label"].should.equal("metric1 Average")
avg["StatusCode"].should.equal("Complete")
[int(val) for val in avg["Values"]].should.equal([19])
sum_ = [res for res in response["MetricDataResults"] if res["Id"] == "result_Sum"][
0
]
sum_["Label"].should.equal("metric1 Sum")
sum_["StatusCode"].should.equal("Complete")
[val for val in sum_["Values"]].should.equal([sum(values)])
min_ = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Minimum"
][0]
min_["Label"].should.equal("metric1 Minimum")
min_["StatusCode"].should.equal("Complete")
[int(val) for val in min_["Values"]].should.equal([0])
max_ = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Maximum"
][0]
max_["Label"].should.equal("metric1 Maximum")
max_["StatusCode"].should.equal("Complete")
[int(val) for val in max_["Values"]].should.equal([100])
@mock_cloudwatch
def test_get_metric_data_partially_within_timeframe():
utc_now = datetime.now(tz=pytz.utc)
yesterday = utc_now - timedelta(days=1)
last_week = utc_now - timedelta(days=7)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 10,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 20,
"Unit": "Seconds",
"Timestamp": yesterday,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": last_week,
},
{
"MetricName": "metric1",
"Value": 10,
"Unit": "Seconds",
"Timestamp": last_week + timedelta(seconds=10),
},
{
"MetricName": "metric1",
"Value": 20,
"Unit": "Seconds",
"Timestamp": last_week + timedelta(seconds=15),
},
{
"MetricName": "metric1",
"Value": 40,
"Unit": "Seconds",
"Timestamp": last_week + timedelta(seconds=30),
},
],
)
# data for average, min, max
def get_data(start, end, stat="Sum", scanBy="TimestampAscending"):
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result",
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": stat,
},
}
],
StartTime=start,
EndTime=end,
ScanBy=scanBy,
)
return response
response = get_data(
start=yesterday - timedelta(seconds=60), end=utc_now + timedelta(seconds=60),
)
# Assert Last week's data is not returned
len(response["MetricDataResults"]).should.equal(1)
sum_ = response["MetricDataResults"][0]
sum_["Label"].should.equal("metric1 Sum")
sum_["StatusCode"].should.equal("Complete")
sum_["Values"].should.equal([20.0, 10.0])
response = get_data(
start=yesterday - timedelta(seconds=60),
end=utc_now + timedelta(seconds=60),
scanBy="TimestampDescending",
)
response["MetricDataResults"][0]["Values"].should.equal([10.0, 20.0])
response = get_data(
start=last_week - timedelta(seconds=1),
end=utc_now + timedelta(seconds=60),
stat="Average",
)
# assert average
response["MetricDataResults"][0]["Values"].should.equal([30.0, 20.0, 10.0])
response = get_data(
start=last_week - timedelta(seconds=1),
end=utc_now + timedelta(seconds=60),
stat="Maximum",
)
# assert maximum
response["MetricDataResults"][0]["Values"].should.equal([50.0, 20.0, 10.0])
response = get_data(
start=last_week - timedelta(seconds=1),
end=utc_now + timedelta(seconds=60),
stat="Minimum",
)
# assert minimum
response["MetricDataResults"][0]["Values"].should.equal([10.0, 20.0, 10.0])
@mock_cloudwatch
def test_get_metric_data_outside_timeframe():
utc_now = datetime.now(tz=pytz.utc)
last_week = utc_now - timedelta(days=7)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": last_week,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result",
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": "Sum",
},
}
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Last week's data is not returned
len(response["MetricDataResults"]).should.equal(1)
response["MetricDataResults"][0]["Id"].should.equal("result")
response["MetricDataResults"][0]["StatusCode"].should.equal("Complete")
response["MetricDataResults"][0]["Values"].should.equal([])
@mock_cloudwatch
def test_get_metric_data_for_multiple_metrics():
utc_now = datetime.now(tz=pytz.utc)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "metric2",
"Value": 25,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result1",
"MetricStat": {
"Metric": {"Namespace": namespace, "MetricName": "metric1"},
"Period": 60,
"Stat": "Sum",
},
},
{
"Id": "result2",
"MetricStat": {
"Metric": {"Namespace": namespace, "MetricName": "metric2"},
"Period": 60,
"Stat": "Sum",
},
},
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
len(response["MetricDataResults"]).should.equal(2)
res1 = [res for res in response["MetricDataResults"] if res["Id"] == "result1"][0]
res1["Values"].should.equal([50.0])
res2 = [res for res in response["MetricDataResults"] if res["Id"] == "result2"][0]
res2["Values"].should.equal([25.0])
@mock_cloudwatch
@mock_s3
def test_cloudwatch_return_s3_metrics():
utc_now = datetime.now(tz=pytz.utc)
bucket_name = "examplebucket"
cloudwatch = boto3.client("cloudwatch", "eu-west-3")
# given
s3 = boto3.resource("s3")
s3_client = boto3.client("s3")
bucket = s3.Bucket(bucket_name)
bucket.create(CreateBucketConfiguration={"LocationConstraint": "eu-west-3"})
bucket.put_object(Body=b"ABCD", Key="file.txt")
# when
metrics = cloudwatch.list_metrics(
Dimensions=[{"Name": "BucketName", "Value": bucket_name}]
)["Metrics"]
# then
metrics.should.have.length_of(2)
metrics.should.contain(
{
"Namespace": "AWS/S3",
"MetricName": "NumberOfObjects",
"Dimensions": [
{"Name": "StorageType", "Value": "AllStorageTypes"},
{"Name": "BucketName", "Value": bucket_name},
],
}
)
metrics.should.contain(
{
"Namespace": "AWS/S3",
"MetricName": "BucketSizeBytes",
"Dimensions": [
{"Name": "StorageType", "Value": "StandardStorage"},
{"Name": "BucketName", "Value": bucket_name},
],
}
)
# when
stats = cloudwatch.get_metric_statistics(
Namespace="AWS/S3",
MetricName="BucketSizeBytes",
Dimensions=[
{"Name": "BucketName", "Value": bucket_name},
{"Name": "StorageType", "Value": "StandardStorage"},
],
StartTime=utc_now - timedelta(days=2),
EndTime=utc_now,
Period=86400,
Statistics=["Average"],
Unit="Bytes",
)
# then
stats.should.have.key("Label").equal("BucketSizeBytes")
stats.should.have.key("Datapoints").length_of(1)
data_point = stats["Datapoints"][0]
data_point.should.have.key("Average").being.above(0)
data_point.should.have.key("Unit").being.equal("Bytes")
# when
stats = cloudwatch.get_metric_statistics(
Namespace="AWS/S3",
MetricName="NumberOfObjects",
Dimensions=[
{"Name": "BucketName", "Value": bucket_name},
{"Name": "StorageType", "Value": "AllStorageTypes"},
],
StartTime=utc_now - timedelta(days=2),
EndTime=utc_now,
Period=86400,
Statistics=["Average"],
)
# then
stats.should.have.key("Label").equal("NumberOfObjects")
stats.should.have.key("Datapoints").length_of(1)
data_point = stats["Datapoints"][0]
data_point.should.have.key("Average").being.equal(1)
data_point.should.have.key("Unit").being.equal("Count")
s3_client.delete_object(Bucket=bucket_name, Key="file.txt")
s3_client.delete_bucket(Bucket=bucket_name)
@mock_cloudwatch
def test_put_metric_alarm():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
sns_topic_arn = f"arn:aws:sns:${region_name}:${ACCOUNT_ID}:test-topic"
# when
client.put_metric_alarm(
AlarmName=alarm_name,
AlarmDescription="test alarm",
ActionsEnabled=True,
OKActions=[sns_topic_arn],
AlarmActions=[sns_topic_arn],
InsufficientDataActions=[sns_topic_arn],
MetricName="5XXError",
Namespace="AWS/ApiGateway",
Statistic="Sum",
Dimensions=[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
Period=60,
Unit="Seconds",
EvaluationPeriods=1,
DatapointsToAlarm=1,
Threshold=1.0,
ComparisonOperator="GreaterThanOrEqualToThreshold",
TreatMissingData="notBreaching",
Tags=[{"Key": "key-1", "Value": "value-1"}],
)
# then
alarms = client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"]
alarms.should.have.length_of(1)
alarm = alarms[0]
alarm["AlarmName"].should.equal(alarm_name)
alarm["AlarmArn"].should.equal(
f"arn:aws:cloudwatch:{region_name}:{ACCOUNT_ID}:alarm:{alarm_name}"
)
alarm["AlarmDescription"].should.equal("test alarm")
alarm["AlarmConfigurationUpdatedTimestamp"].should.be.a(datetime)
alarm["AlarmConfigurationUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["ActionsEnabled"].should.be.ok
alarm["OKActions"].should.equal([sns_topic_arn])
alarm["AlarmActions"].should.equal([sns_topic_arn])
alarm["InsufficientDataActions"].should.equal([sns_topic_arn])
alarm["StateValue"].should.equal("OK")
alarm["StateReason"].should.equal("Unchecked: Initial alarm creation")
alarm["StateUpdatedTimestamp"].should.be.a(datetime)
alarm["StateUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["MetricName"].should.equal("5XXError")
alarm["Namespace"].should.equal("AWS/ApiGateway")
alarm["Statistic"].should.equal("Sum")
sorted(alarm["Dimensions"], key=itemgetter("Name")).should.equal(
sorted(
[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
key=itemgetter("Name"),
)
)
alarm["Period"].should.equal(60)
alarm["Unit"].should.equal("Seconds")
alarm["EvaluationPeriods"].should.equal(1)
alarm["DatapointsToAlarm"].should.equal(1)
alarm["Threshold"].should.equal(1.0)
alarm["ComparisonOperator"].should.equal("GreaterThanOrEqualToThreshold")
alarm["TreatMissingData"].should.equal("notBreaching")
@mock_cloudwatch
def test_put_metric_alarm_with_percentile():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
# when
client.put_metric_alarm(
AlarmName=alarm_name,
AlarmDescription="test alarm",
ActionsEnabled=True,
MetricName="5XXError",
Namespace="AWS/ApiGateway",
ExtendedStatistic="p90",
Dimensions=[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
Period=60,
Unit="Seconds",
EvaluationPeriods=1,
DatapointsToAlarm=1,
Threshold=1.0,
ComparisonOperator="GreaterThanOrEqualToThreshold",
TreatMissingData="notBreaching",
EvaluateLowSampleCountPercentile="ignore",
)
# then
alarms = client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"]
alarms.should.have.length_of(1)
alarm = alarms[0]
alarm["AlarmName"].should.equal(alarm_name)
alarm["AlarmArn"].should.equal(
f"arn:aws:cloudwatch:{region_name}:{ACCOUNT_ID}:alarm:{alarm_name}"
)
alarm["AlarmDescription"].should.equal("test alarm")
alarm["AlarmConfigurationUpdatedTimestamp"].should.be.a(datetime)
alarm["AlarmConfigurationUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["ActionsEnabled"].should.be.ok
alarm["StateValue"].should.equal("OK")
alarm["StateReason"].should.equal("Unchecked: Initial alarm creation")
alarm["StateUpdatedTimestamp"].should.be.a(datetime)
alarm["StateUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["MetricName"].should.equal("5XXError")
alarm["Namespace"].should.equal("AWS/ApiGateway")
alarm["ExtendedStatistic"].should.equal("p90")
sorted(alarm["Dimensions"], key=itemgetter("Name")).should.equal(
sorted(
[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
key=itemgetter("Name"),
)
)
alarm["Period"].should.equal(60)
alarm["Unit"].should.equal("Seconds")
alarm["EvaluationPeriods"].should.equal(1)
alarm["DatapointsToAlarm"].should.equal(1)
alarm["Threshold"].should.equal(1.0)
alarm["ComparisonOperator"].should.equal("GreaterThanOrEqualToThreshold")
alarm["TreatMissingData"].should.equal("notBreaching")
alarm["EvaluateLowSampleCountPercentile"].should.equal("ignore")
@mock_cloudwatch
def test_put_metric_alarm_with_anomaly_detection():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
metrics = [
{
"Id": "m1",
"ReturnData": True,
"MetricStat": {
"Metric": {
"MetricName": "CPUUtilization",
"Namespace": "AWS/EC2",
"Dimensions": [
{"Name": "instanceId", "Value": "i-1234567890abcdef0"}
],
},
"Stat": "Average",
"Period": 60,
},
},
{
"Id": "t1",
"ReturnData": False,
"Expression": "ANOMALY_DETECTION_BAND(m1, 3)",
},
]
# when
client.put_metric_alarm(
AlarmName=alarm_name,
ActionsEnabled=True,
Metrics=metrics,
EvaluationPeriods=2,
ComparisonOperator="GreaterThanOrEqualToThreshold",
ThresholdMetricId="t1",
)
# then
alarms = client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"]
alarms.should.have.length_of(1)
alarm = alarms[0]
alarm["AlarmName"].should.equal(alarm_name)
alarm["AlarmArn"].should.equal(
f"arn:aws:cloudwatch:{region_name}:{ACCOUNT_ID}:alarm:{alarm_name}"
)
alarm["AlarmConfigurationUpdatedTimestamp"].should.be.a(datetime)
alarm["AlarmConfigurationUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["StateValue"].should.equal("OK")
alarm["StateReason"].should.equal("Unchecked: Initial alarm creation")
alarm["StateUpdatedTimestamp"].should.be.a(datetime)
alarm["StateUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["EvaluationPeriods"].should.equal(2)
alarm["ComparisonOperator"].should.equal("GreaterThanOrEqualToThreshold")
alarm["Metrics"].should.equal(metrics)
alarm["ThresholdMetricId"].should.equal("t1")
@mock_cloudwatch
def test_put_metric_alarm_error_extended_statistic():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
# when
with pytest.raises(ClientError) as e:
client.put_metric_alarm(
AlarmName=alarm_name,
ActionsEnabled=True,
MetricName="5XXError",
Namespace="AWS/ApiGateway",
ExtendedStatistic="90",
Dimensions=[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
Period=60,
Unit="Seconds",
EvaluationPeriods=1,
DatapointsToAlarm=1,
Threshold=1.0,
ComparisonOperator="GreaterThanOrEqualToThreshold",
TreatMissingData="notBreaching",
)
# then
ex = e.value
ex.operation_name.should.equal("PutMetricAlarm")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidParameterValue")
ex.response["Error"]["Message"].should.equal(
"The value 90 for parameter ExtendedStatistic is not supported."
)
@mock_cloudwatch
def test_put_metric_alarm_error_evaluate_low_sample_count_percentile():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
# when
with pytest.raises(ClientError) as e:
client.put_metric_alarm(
AlarmName=alarm_name,
ActionsEnabled=True,
MetricName="5XXError",
Namespace="AWS/ApiGateway",
ExtendedStatistic="p90",
Dimensions=[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
Period=60,
Unit="Seconds",
EvaluationPeriods=1,
DatapointsToAlarm=1,
Threshold=1.0,
ComparisonOperator="GreaterThanOrEqualToThreshold",
TreatMissingData="notBreaching",
EvaluateLowSampleCountPercentile="unknown",
)
# then
ex = e.value
ex.operation_name.should.equal("PutMetricAlarm")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ValidationError")
ex.response["Error"]["Message"].should.equal(
"Option unknown is not supported. "
"Supported options for parameter EvaluateLowSampleCountPercentile are evaluate and ignore."
)
| import boto3
import pytest
import pytz
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from dateutil.tz import tzutc
from decimal import Decimal
from freezegun import freeze_time
from operator import itemgetter
from uuid import uuid4
from moto import mock_cloudwatch, mock_s3
from moto.core import ACCOUNT_ID
@mock_cloudwatch
def test_put_metric_data_no_dimensions():
conn = boto3.client("cloudwatch", region_name="us-east-1")
conn.put_metric_data(
Namespace="tester", MetricData=[dict(MetricName="metric", Value=1.5)]
)
metrics = conn.list_metrics()["Metrics"]
metrics.should.have.length_of(1)
metric = metrics[0]
metric["Namespace"].should.equal("tester")
metric["MetricName"].should.equal("metric")
@mock_cloudwatch
def test_put_metric_data_can_not_have_nan():
client = boto3.client("cloudwatch", region_name="us-west-2")
utc_now = datetime.now(tz=pytz.utc)
with pytest.raises(ClientError) as exc:
client.put_metric_data(
Namespace="mynamespace",
MetricData=[
{
"MetricName": "mymetric",
"Timestamp": utc_now,
"Value": Decimal("NaN"),
"Unit": "Count",
}
],
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValue")
err["Message"].should.equal(
"The value NaN for parameter MetricData.member.1.Value is invalid."
)
@mock_cloudwatch
def test_put_metric_data_with_statistics():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName="statmetric",
Timestamp=utc_now,
# no Value to test https://github.com/spulec/moto/issues/1615
StatisticValues=dict(
SampleCount=123.0, Sum=123.0, Minimum=123.0, Maximum=123.0
),
Unit="Milliseconds",
StorageResolution=123,
)
],
)
metrics = conn.list_metrics()["Metrics"]
metrics.should.have.length_of(1)
metric = metrics[0]
metric["Namespace"].should.equal("tester")
metric["MetricName"].should.equal("statmetric")
# TODO: test statistics - https://github.com/spulec/moto/issues/1615
@mock_cloudwatch
def test_get_metric_statistics():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
conn.put_metric_data(
Namespace="tester",
MetricData=[dict(MetricName="metric", Value=1.5, Timestamp=utc_now)],
)
stats = conn.get_metric_statistics(
Namespace="tester",
MetricName="metric",
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=["SampleCount", "Sum"],
)
stats["Datapoints"].should.have.length_of(1)
datapoint = stats["Datapoints"][0]
datapoint["SampleCount"].should.equal(1.0)
datapoint["Sum"].should.equal(1.5)
@mock_cloudwatch
def test_get_metric_statistics_dimensions():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
# put metric data with different dimensions
dimensions1 = [{"Name": "dim1", "Value": "v1"}]
dimensions2 = dimensions1 + [{"Name": "dim2", "Value": "v2"}]
metric_name = "metr-stats-dims"
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName=metric_name,
Value=1,
Timestamp=utc_now,
Dimensions=dimensions1,
)
],
)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName=metric_name,
Value=2,
Timestamp=utc_now,
Dimensions=dimensions1,
)
],
)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName=metric_name,
Value=6,
Timestamp=utc_now,
Dimensions=dimensions2,
)
],
)
# list of (<kwargs>, <expectedSum>, <expectedAverage>)
params_list = (
# get metric stats with no restriction on dimensions
({}, 9, 3),
# get metric stats for dimensions1 (should also cover dimensions2)
({"Dimensions": dimensions1}, 9, 3),
# get metric stats for dimensions2 only
({"Dimensions": dimensions2}, 6, 6),
)
for params in params_list:
stats = conn.get_metric_statistics(
Namespace="tester",
MetricName=metric_name,
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=["Average", "Sum"],
**params[0],
)
print(stats)
stats["Datapoints"].should.have.length_of(1)
datapoint = stats["Datapoints"][0]
datapoint["Sum"].should.equal(params[1])
datapoint["Average"].should.equal(params[2])
@mock_cloudwatch
def test_duplicate_put_metric_data():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName="metric",
Dimensions=[{"Name": "Name", "Value": "B"}],
Value=1.5,
Timestamp=utc_now,
)
],
)
result = conn.list_metrics(
Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}]
)["Metrics"]
len(result).should.equal(1)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName="metric",
Dimensions=[{"Name": "Name", "Value": "B"}],
Value=1.5,
Timestamp=utc_now,
)
],
)
result = conn.list_metrics(
Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}]
)["Metrics"]
len(result).should.equal(1)
result.should.equal(
[
{
"Namespace": "tester",
"MetricName": "metric",
"Dimensions": [{"Name": "Name", "Value": "B"}],
}
]
)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName="metric",
Dimensions=[
{"Name": "Name", "Value": "B"},
{"Name": "Name", "Value": "C"},
],
Value=1.5,
Timestamp=utc_now,
)
],
)
result = conn.list_metrics(
Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}]
)["Metrics"]
result.should.equal(
[
{
"Namespace": "tester",
"MetricName": "metric",
"Dimensions": [{"Name": "Name", "Value": "B"}],
},
{
"Namespace": "tester",
"MetricName": "metric",
"Dimensions": [
{"Name": "Name", "Value": "B"},
{"Name": "Name", "Value": "C"},
],
},
]
)
result = conn.list_metrics(
Namespace="tester", Dimensions=[{"Name": "Name", "Value": "C"}]
)["Metrics"]
result.should.equal(
[
{
"Namespace": "tester",
"MetricName": "metric",
"Dimensions": [
{"Name": "Name", "Value": "B"},
{"Name": "Name", "Value": "C"},
],
}
]
)
@mock_cloudwatch
@freeze_time("2020-02-10 18:44:05")
def test_custom_timestamp():
utc_now = datetime.now(tz=pytz.utc)
time = "2020-02-10T18:44:09Z"
cw = boto3.client("cloudwatch", "eu-west-1")
cw.put_metric_data(
Namespace="tester",
MetricData=[dict(MetricName="metric1", Value=1.5, Timestamp=time)],
)
cw.put_metric_data(
Namespace="tester",
MetricData=[
dict(MetricName="metric2", Value=1.5, Timestamp=datetime(2020, 2, 10))
],
)
cw.get_metric_statistics(
Namespace="tester",
MetricName="metric",
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=["SampleCount", "Sum"],
)
# TODO: What are we actually testing here?
@mock_cloudwatch
def test_list_metrics():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
# Verify namespace has to exist
res = cloudwatch.list_metrics(Namespace="unknown/")["Metrics"]
res.should.be.empty
# Create some metrics to filter on
create_metrics(cloudwatch, namespace="list_test_1/", metrics=4, data_points=2)
create_metrics(cloudwatch, namespace="list_test_2/", metrics=4, data_points=2)
# Verify we can retrieve everything
res = cloudwatch.list_metrics()["Metrics"]
len(res).should.equal(16) # 2 namespaces * 4 metrics * 2 data points
# Verify we can filter by namespace/metric name
res = cloudwatch.list_metrics(Namespace="list_test_1/")["Metrics"]
len(res).should.equal(8) # 1 namespace * 4 metrics * 2 data points
res = cloudwatch.list_metrics(Namespace="list_test_1/", MetricName="metric1")[
"Metrics"
]
len(res).should.equal(2) # 1 namespace * 1 metrics * 2 data points
# Verify format
res.should.equal(
[
{"Namespace": "list_test_1/", "Dimensions": [], "MetricName": "metric1",},
{"Namespace": "list_test_1/", "Dimensions": [], "MetricName": "metric1",},
]
)
# Verify unknown namespace still has no results
res = cloudwatch.list_metrics(Namespace="unknown/")["Metrics"]
res.should.be.empty
@mock_cloudwatch
def test_list_metrics_paginated():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
# Verify that only a single page of metrics is returned
cloudwatch.list_metrics().shouldnt.have.key("NextToken")
# Verify we can't pass a random NextToken
with pytest.raises(ClientError) as e:
cloudwatch.list_metrics(NextToken=str(uuid4()))
e.value.response["Error"]["Message"].should.equal(
"Request parameter NextToken is invalid"
)
# Add a boatload of metrics
create_metrics(cloudwatch, namespace="test", metrics=100, data_points=1)
# Verify that a single page is returned until we've reached 500
first_page = cloudwatch.list_metrics()
first_page["Metrics"].shouldnt.be.empty
len(first_page["Metrics"]).should.equal(100)
create_metrics(cloudwatch, namespace="test", metrics=200, data_points=2)
first_page = cloudwatch.list_metrics()
len(first_page["Metrics"]).should.equal(500)
first_page.shouldnt.contain("NextToken")
# Verify that adding more data points results in pagination
create_metrics(cloudwatch, namespace="test", metrics=60, data_points=10)
first_page = cloudwatch.list_metrics()
len(first_page["Metrics"]).should.equal(500)
first_page["NextToken"].shouldnt.be.empty
# Retrieve second page - and verify there's more where that came from
second_page = cloudwatch.list_metrics(NextToken=first_page["NextToken"])
len(second_page["Metrics"]).should.equal(500)
second_page.should.contain("NextToken")
# Last page should only have the last 100 results, and no NextToken (indicating that pagination is finished)
third_page = cloudwatch.list_metrics(NextToken=second_page["NextToken"])
len(third_page["Metrics"]).should.equal(100)
third_page.shouldnt.contain("NextToken")
# Verify that we can't reuse an existing token
with pytest.raises(ClientError) as e:
cloudwatch.list_metrics(NextToken=first_page["NextToken"])
e.value.response["Error"]["Message"].should.equal(
"Request parameter NextToken is invalid"
)
@mock_cloudwatch
def test_list_metrics_without_value():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
# Create some metrics to filter on
create_metrics_with_dimensions(cloudwatch, namespace="MyNamespace", data_points=3)
# Verify we can filter by namespace/metric name
res = cloudwatch.list_metrics(Namespace="MyNamespace")["Metrics"]
res.should.have.length_of(3)
# Verify we can filter by Dimension without value
results = cloudwatch.list_metrics(
Namespace="MyNamespace", MetricName="MyMetric", Dimensions=[{"Name": "D1"}]
)["Metrics"]
results.should.have.length_of(1)
results[0]["Namespace"].should.equals("MyNamespace")
results[0]["MetricName"].should.equal("MyMetric")
results[0]["Dimensions"].should.equal([{"Name": "D1", "Value": "V1"}])
def create_metrics(cloudwatch, namespace, metrics=5, data_points=5):
for i in range(0, metrics):
metric_name = "metric" + str(i)
for j in range(0, data_points):
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[{"MetricName": metric_name, "Value": j, "Unit": "Seconds"}],
)
def create_metrics_with_dimensions(cloudwatch, namespace, data_points=5):
for j in range(0, data_points):
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "MyMetric",
"Dimensions": [{"Name": f"D{j}", "Value": f"V{j}"}],
"Unit": "Seconds",
}
],
)
@mock_cloudwatch
def test_get_metric_data_within_timeframe():
utc_now = datetime.now(tz=pytz.utc)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
values = [0, 2, 4, 3.5, 7, 100]
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{"MetricName": "metric1", "Value": val, "Unit": "Seconds"} for val in values
],
)
# get_metric_data
stats = ["Average", "Sum", "Minimum", "Maximum"]
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result_" + stat,
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": stat,
},
}
for stat in stats
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Average/Min/Max/Sum is returned as expected
avg = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Average"
][0]
avg["Label"].should.equal("metric1 Average")
avg["StatusCode"].should.equal("Complete")
[int(val) for val in avg["Values"]].should.equal([19])
sum_ = [res for res in response["MetricDataResults"] if res["Id"] == "result_Sum"][
0
]
sum_["Label"].should.equal("metric1 Sum")
sum_["StatusCode"].should.equal("Complete")
[val for val in sum_["Values"]].should.equal([sum(values)])
min_ = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Minimum"
][0]
min_["Label"].should.equal("metric1 Minimum")
min_["StatusCode"].should.equal("Complete")
[int(val) for val in min_["Values"]].should.equal([0])
max_ = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Maximum"
][0]
max_["Label"].should.equal("metric1 Maximum")
max_["StatusCode"].should.equal("Complete")
[int(val) for val in max_["Values"]].should.equal([100])
@mock_cloudwatch
def test_get_metric_data_partially_within_timeframe():
utc_now = datetime.now(tz=pytz.utc)
yesterday = utc_now - timedelta(days=1)
last_week = utc_now - timedelta(days=7)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 10,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 20,
"Unit": "Seconds",
"Timestamp": yesterday,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": last_week,
},
{
"MetricName": "metric1",
"Value": 10,
"Unit": "Seconds",
"Timestamp": last_week + timedelta(seconds=10),
},
{
"MetricName": "metric1",
"Value": 20,
"Unit": "Seconds",
"Timestamp": last_week + timedelta(seconds=15),
},
{
"MetricName": "metric1",
"Value": 40,
"Unit": "Seconds",
"Timestamp": last_week + timedelta(seconds=30),
},
],
)
# data for average, min, max
def get_data(start, end, stat="Sum", scanBy="TimestampAscending"):
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result",
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": stat,
},
}
],
StartTime=start,
EndTime=end,
ScanBy=scanBy,
)
return response
response = get_data(
start=yesterday - timedelta(seconds=60), end=utc_now + timedelta(seconds=60),
)
# Assert Last week's data is not returned
len(response["MetricDataResults"]).should.equal(1)
sum_ = response["MetricDataResults"][0]
sum_["Label"].should.equal("metric1 Sum")
sum_["StatusCode"].should.equal("Complete")
sum_["Values"].should.equal([20.0, 10.0])
response = get_data(
start=yesterday - timedelta(seconds=60),
end=utc_now + timedelta(seconds=60),
scanBy="TimestampDescending",
)
response["MetricDataResults"][0]["Values"].should.equal([10.0, 20.0])
response = get_data(
start=last_week - timedelta(seconds=1),
end=utc_now + timedelta(seconds=60),
stat="Average",
)
# assert average
response["MetricDataResults"][0]["Values"].should.equal([30.0, 20.0, 10.0])
response = get_data(
start=last_week - timedelta(seconds=1),
end=utc_now + timedelta(seconds=60),
stat="Maximum",
)
# assert maximum
response["MetricDataResults"][0]["Values"].should.equal([50.0, 20.0, 10.0])
response = get_data(
start=last_week - timedelta(seconds=1),
end=utc_now + timedelta(seconds=60),
stat="Minimum",
)
# assert minimum
response["MetricDataResults"][0]["Values"].should.equal([10.0, 20.0, 10.0])
@mock_cloudwatch
def test_get_metric_data_outside_timeframe():
utc_now = datetime.now(tz=pytz.utc)
last_week = utc_now - timedelta(days=7)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": last_week,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result",
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": "Sum",
},
}
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Last week's data is not returned
len(response["MetricDataResults"]).should.equal(1)
response["MetricDataResults"][0]["Id"].should.equal("result")
response["MetricDataResults"][0]["StatusCode"].should.equal("Complete")
response["MetricDataResults"][0]["Values"].should.equal([])
@mock_cloudwatch
def test_get_metric_data_for_multiple_metrics():
utc_now = datetime.now(tz=pytz.utc)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "metric2",
"Value": 25,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result1",
"MetricStat": {
"Metric": {"Namespace": namespace, "MetricName": "metric1"},
"Period": 60,
"Stat": "Sum",
},
},
{
"Id": "result2",
"MetricStat": {
"Metric": {"Namespace": namespace, "MetricName": "metric2"},
"Period": 60,
"Stat": "Sum",
},
},
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
len(response["MetricDataResults"]).should.equal(2)
res1 = [res for res in response["MetricDataResults"] if res["Id"] == "result1"][0]
res1["Values"].should.equal([50.0])
res2 = [res for res in response["MetricDataResults"] if res["Id"] == "result2"][0]
res2["Values"].should.equal([25.0])
@mock_cloudwatch
@mock_s3
def test_cloudwatch_return_s3_metrics():
utc_now = datetime.now(tz=pytz.utc)
bucket_name = "examplebucket"
cloudwatch = boto3.client("cloudwatch", "eu-west-3")
# given
s3 = boto3.resource("s3")
s3_client = boto3.client("s3")
bucket = s3.Bucket(bucket_name)
bucket.create(CreateBucketConfiguration={"LocationConstraint": "eu-west-3"})
bucket.put_object(Body=b"ABCD", Key="file.txt")
# when
metrics = cloudwatch.list_metrics(
Dimensions=[{"Name": "BucketName", "Value": bucket_name}]
)["Metrics"]
# then
metrics.should.have.length_of(2)
metrics.should.contain(
{
"Namespace": "AWS/S3",
"MetricName": "NumberOfObjects",
"Dimensions": [
{"Name": "StorageType", "Value": "AllStorageTypes"},
{"Name": "BucketName", "Value": bucket_name},
],
}
)
metrics.should.contain(
{
"Namespace": "AWS/S3",
"MetricName": "BucketSizeBytes",
"Dimensions": [
{"Name": "StorageType", "Value": "StandardStorage"},
{"Name": "BucketName", "Value": bucket_name},
],
}
)
# when
stats = cloudwatch.get_metric_statistics(
Namespace="AWS/S3",
MetricName="BucketSizeBytes",
Dimensions=[
{"Name": "BucketName", "Value": bucket_name},
{"Name": "StorageType", "Value": "StandardStorage"},
],
StartTime=utc_now - timedelta(days=2),
EndTime=utc_now,
Period=86400,
Statistics=["Average"],
Unit="Bytes",
)
# then
stats.should.have.key("Label").equal("BucketSizeBytes")
stats.should.have.key("Datapoints").length_of(1)
data_point = stats["Datapoints"][0]
data_point.should.have.key("Average").being.above(0)
data_point.should.have.key("Unit").being.equal("Bytes")
# when
stats = cloudwatch.get_metric_statistics(
Namespace="AWS/S3",
MetricName="NumberOfObjects",
Dimensions=[
{"Name": "BucketName", "Value": bucket_name},
{"Name": "StorageType", "Value": "AllStorageTypes"},
],
StartTime=utc_now - timedelta(days=2),
EndTime=utc_now,
Period=86400,
Statistics=["Average"],
)
# then
stats.should.have.key("Label").equal("NumberOfObjects")
stats.should.have.key("Datapoints").length_of(1)
data_point = stats["Datapoints"][0]
data_point.should.have.key("Average").being.equal(1)
data_point.should.have.key("Unit").being.equal("Count")
s3_client.delete_object(Bucket=bucket_name, Key="file.txt")
s3_client.delete_bucket(Bucket=bucket_name)
@mock_cloudwatch
def test_put_metric_alarm():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
sns_topic_arn = f"arn:aws:sns:${region_name}:${ACCOUNT_ID}:test-topic"
# when
client.put_metric_alarm(
AlarmName=alarm_name,
AlarmDescription="test alarm",
ActionsEnabled=True,
OKActions=[sns_topic_arn],
AlarmActions=[sns_topic_arn],
InsufficientDataActions=[sns_topic_arn],
MetricName="5XXError",
Namespace="AWS/ApiGateway",
Statistic="Sum",
Dimensions=[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
Period=60,
Unit="Seconds",
EvaluationPeriods=1,
DatapointsToAlarm=1,
Threshold=1.0,
ComparisonOperator="GreaterThanOrEqualToThreshold",
TreatMissingData="notBreaching",
Tags=[{"Key": "key-1", "Value": "value-1"}],
)
# then
alarms = client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"]
alarms.should.have.length_of(1)
alarm = alarms[0]
alarm["AlarmName"].should.equal(alarm_name)
alarm["AlarmArn"].should.equal(
f"arn:aws:cloudwatch:{region_name}:{ACCOUNT_ID}:alarm:{alarm_name}"
)
alarm["AlarmDescription"].should.equal("test alarm")
alarm["AlarmConfigurationUpdatedTimestamp"].should.be.a(datetime)
alarm["AlarmConfigurationUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["ActionsEnabled"].should.be.ok
alarm["OKActions"].should.equal([sns_topic_arn])
alarm["AlarmActions"].should.equal([sns_topic_arn])
alarm["InsufficientDataActions"].should.equal([sns_topic_arn])
alarm["StateValue"].should.equal("OK")
alarm["StateReason"].should.equal("Unchecked: Initial alarm creation")
alarm["StateUpdatedTimestamp"].should.be.a(datetime)
alarm["StateUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["MetricName"].should.equal("5XXError")
alarm["Namespace"].should.equal("AWS/ApiGateway")
alarm["Statistic"].should.equal("Sum")
sorted(alarm["Dimensions"], key=itemgetter("Name")).should.equal(
sorted(
[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
key=itemgetter("Name"),
)
)
alarm["Period"].should.equal(60)
alarm["Unit"].should.equal("Seconds")
alarm["EvaluationPeriods"].should.equal(1)
alarm["DatapointsToAlarm"].should.equal(1)
alarm["Threshold"].should.equal(1.0)
alarm["ComparisonOperator"].should.equal("GreaterThanOrEqualToThreshold")
alarm["TreatMissingData"].should.equal("notBreaching")
@mock_cloudwatch
def test_put_metric_alarm_with_percentile():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
# when
client.put_metric_alarm(
AlarmName=alarm_name,
AlarmDescription="test alarm",
ActionsEnabled=True,
MetricName="5XXError",
Namespace="AWS/ApiGateway",
ExtendedStatistic="p90",
Dimensions=[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
Period=60,
Unit="Seconds",
EvaluationPeriods=1,
DatapointsToAlarm=1,
Threshold=1.0,
ComparisonOperator="GreaterThanOrEqualToThreshold",
TreatMissingData="notBreaching",
EvaluateLowSampleCountPercentile="ignore",
)
# then
alarms = client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"]
alarms.should.have.length_of(1)
alarm = alarms[0]
alarm["AlarmName"].should.equal(alarm_name)
alarm["AlarmArn"].should.equal(
f"arn:aws:cloudwatch:{region_name}:{ACCOUNT_ID}:alarm:{alarm_name}"
)
alarm["AlarmDescription"].should.equal("test alarm")
alarm["AlarmConfigurationUpdatedTimestamp"].should.be.a(datetime)
alarm["AlarmConfigurationUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["ActionsEnabled"].should.be.ok
alarm["StateValue"].should.equal("OK")
alarm["StateReason"].should.equal("Unchecked: Initial alarm creation")
alarm["StateUpdatedTimestamp"].should.be.a(datetime)
alarm["StateUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["MetricName"].should.equal("5XXError")
alarm["Namespace"].should.equal("AWS/ApiGateway")
alarm["ExtendedStatistic"].should.equal("p90")
sorted(alarm["Dimensions"], key=itemgetter("Name")).should.equal(
sorted(
[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
key=itemgetter("Name"),
)
)
alarm["Period"].should.equal(60)
alarm["Unit"].should.equal("Seconds")
alarm["EvaluationPeriods"].should.equal(1)
alarm["DatapointsToAlarm"].should.equal(1)
alarm["Threshold"].should.equal(1.0)
alarm["ComparisonOperator"].should.equal("GreaterThanOrEqualToThreshold")
alarm["TreatMissingData"].should.equal("notBreaching")
alarm["EvaluateLowSampleCountPercentile"].should.equal("ignore")
@mock_cloudwatch
def test_put_metric_alarm_with_anomaly_detection():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
metrics = [
{
"Id": "m1",
"ReturnData": True,
"MetricStat": {
"Metric": {
"MetricName": "CPUUtilization",
"Namespace": "AWS/EC2",
"Dimensions": [
{"Name": "instanceId", "Value": "i-1234567890abcdef0"}
],
},
"Stat": "Average",
"Period": 60,
},
},
{
"Id": "t1",
"ReturnData": False,
"Expression": "ANOMALY_DETECTION_BAND(m1, 3)",
},
]
# when
client.put_metric_alarm(
AlarmName=alarm_name,
ActionsEnabled=True,
Metrics=metrics,
EvaluationPeriods=2,
ComparisonOperator="GreaterThanOrEqualToThreshold",
ThresholdMetricId="t1",
)
# then
alarms = client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"]
alarms.should.have.length_of(1)
alarm = alarms[0]
alarm["AlarmName"].should.equal(alarm_name)
alarm["AlarmArn"].should.equal(
f"arn:aws:cloudwatch:{region_name}:{ACCOUNT_ID}:alarm:{alarm_name}"
)
alarm["AlarmConfigurationUpdatedTimestamp"].should.be.a(datetime)
alarm["AlarmConfigurationUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["StateValue"].should.equal("OK")
alarm["StateReason"].should.equal("Unchecked: Initial alarm creation")
alarm["StateUpdatedTimestamp"].should.be.a(datetime)
alarm["StateUpdatedTimestamp"].tzinfo.should.equal(tzutc())
alarm["EvaluationPeriods"].should.equal(2)
alarm["ComparisonOperator"].should.equal("GreaterThanOrEqualToThreshold")
alarm["Metrics"].should.equal(metrics)
alarm["ThresholdMetricId"].should.equal("t1")
@mock_cloudwatch
def test_put_metric_alarm_error_extended_statistic():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
# when
with pytest.raises(ClientError) as e:
client.put_metric_alarm(
AlarmName=alarm_name,
ActionsEnabled=True,
MetricName="5XXError",
Namespace="AWS/ApiGateway",
ExtendedStatistic="90",
Dimensions=[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
Period=60,
Unit="Seconds",
EvaluationPeriods=1,
DatapointsToAlarm=1,
Threshold=1.0,
ComparisonOperator="GreaterThanOrEqualToThreshold",
TreatMissingData="notBreaching",
)
# then
ex = e.value
ex.operation_name.should.equal("PutMetricAlarm")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidParameterValue")
ex.response["Error"]["Message"].should.equal(
"The value 90 for parameter ExtendedStatistic is not supported."
)
@mock_cloudwatch
def test_put_metric_alarm_error_evaluate_low_sample_count_percentile():
# given
region_name = "eu-central-1"
client = boto3.client("cloudwatch", region_name=region_name)
alarm_name = "test-alarm"
# when
with pytest.raises(ClientError) as e:
client.put_metric_alarm(
AlarmName=alarm_name,
ActionsEnabled=True,
MetricName="5XXError",
Namespace="AWS/ApiGateway",
ExtendedStatistic="p90",
Dimensions=[
{"Name": "ApiName", "Value": "test-api"},
{"Name": "Stage", "Value": "default"},
],
Period=60,
Unit="Seconds",
EvaluationPeriods=1,
DatapointsToAlarm=1,
Threshold=1.0,
ComparisonOperator="GreaterThanOrEqualToThreshold",
TreatMissingData="notBreaching",
EvaluateLowSampleCountPercentile="unknown",
)
# then
ex = e.value
ex.operation_name.should.equal("PutMetricAlarm")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ValidationError")
ex.response["Error"]["Message"].should.equal(
"Option unknown is not supported. "
"Supported options for parameter EvaluateLowSampleCountPercentile are evaluate and ignore."
)
| en | 0.79261 | # noqa # pylint: disable=unused-import # no Value to test https://github.com/spulec/moto/issues/1615 # TODO: test statistics - https://github.com/spulec/moto/issues/1615 # put metric data with different dimensions # list of (<kwargs>, <expectedSum>, <expectedAverage>) # get metric stats with no restriction on dimensions # get metric stats for dimensions1 (should also cover dimensions2) # get metric stats for dimensions2 only # TODO: What are we actually testing here? # Verify namespace has to exist # Create some metrics to filter on # Verify we can retrieve everything # 2 namespaces * 4 metrics * 2 data points # Verify we can filter by namespace/metric name # 1 namespace * 4 metrics * 2 data points # 1 namespace * 1 metrics * 2 data points # Verify format # Verify unknown namespace still has no results # Verify that only a single page of metrics is returned # Verify we can't pass a random NextToken # Add a boatload of metrics # Verify that a single page is returned until we've reached 500 # Verify that adding more data points results in pagination # Retrieve second page - and verify there's more where that came from # Last page should only have the last 100 results, and no NextToken (indicating that pagination is finished) # Verify that we can't reuse an existing token # Create some metrics to filter on # Verify we can filter by namespace/metric name # Verify we can filter by Dimension without value # put metric data # get_metric_data # # Assert Average/Min/Max/Sum is returned as expected # put metric data # data for average, min, max # get_metric_data # Assert Last week's data is not returned # assert average # assert maximum # assert minimum # put metric data # get_metric_data # # Assert Last week's data is not returned # put metric data # get_metric_data # # given # when # then # when # then # when # then # given # when # then # given # when # then # given # when # then # given # when # then # given # when # then | 1.870384 | 2 |
summary/sumy/sklearn/multiclass.py | WangWenjun559/MITS | 6 | 6624396 | <reponame>WangWenjun559/MITS
"""
Multiclass and multilabel classification strategies
===================================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import array
import numpy as np
import warnings
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MetaEstimatorMixin, is_regressor
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted
from .utils import deprecated
from .externals.joblib import Parallel
from .externals.joblib import delayed
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." %
str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
if is_regressor(estimator):
return estimator.predict(X)
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _check_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if (not hasattr(estimator, "decision_function") and
not hasattr(estimator, "predict_proba")):
raise ValueError("The base estimator should implement "
"decision_function or predict_proba!")
@deprecated("fit_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def fit_ovr(estimator, X, y, n_jobs=1):
"""Fit a one-vs-the-rest strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
estimators : list of estimators object
The list of fitted estimator.
lb : fitted LabelBinarizer
"""
ovr = OneVsRestClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovr.estimators_, ovr.label_binarizer_
@deprecated("predict_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_ovr(estimators, label_binarizer, X):
"""Predict multi-class targets using the one vs rest strategy.
Parameters
----------
estimators : list of `n_classes` estimators, Estimators used for
predictions. The list must be homogeneous with respect to the type of
estimators. fit_ovr supplies this list as part of its output.
label_binarizer : LabelBinarizer object, Object used to transform
multiclass labels to binary labels and vice-versa. fit_ovr supplies
this object as part of its output.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovr = OneVsRestClassifier(clone(estimators[0]))
ovr.estimators_ = estimators
ovr.label_binarizer_ = label_binarizer
return ovr.predict(X)
@deprecated("predict_proba_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_proba_ovr(estimators, X, is_multilabel):
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
Y = np.array([e.predict_proba(X)[:, 1] for e in estimators]).T
if not is_multilabel:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
self.y_ = y
return self
def predict(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def decision_function(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def predict_proba(self, X):
check_is_fitted(self, 'y_')
return np.repeat([np.hstack([1 - self.y_, self.y_])],
X.shape[0], axis=0)
class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-the-rest (OvR) multiclass/multilabel strategy
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
This strategy can also be used for multilabel learning, where a classifier
is used to predict multiple labels for instance, by fitting on a 2-d matrix
in which cell [i, j] is 1 if sample i has label j and 0 otherwise.
In the multilabel learning literature, OvR is also known as the binary
relevance method.
Read more in the :ref:`User Guide <ovr_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes` estimators
Estimators used for predictions.
classes_ : array, shape = [`n_classes`]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outpreform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)(
self.estimator, X, column, classes=[
"not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i]])
for i, column in enumerate(columns))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
if (hasattr(self.estimators_[0], "decision_function") and
is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = .5
n_samples = _num_samples(X)
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.label_binarizer_.classes_[np.array(argmaxima.T)]
else:
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr),
shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, 'estimators_')
# Y[i,j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
def decision_function(self, X):
"""Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "decision_function"):
raise AttributeError(
"Base estimator doesn't have a decision_function attribute.")
return np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier"""
return self.label_binarizer_.y_type_.startswith('multilabel')
@property
def classes_(self):
return self.label_binarizer_.classes_
@property
def coef_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "coef_"):
raise AttributeError(
"Base estimator doesn't have a coef_ attribute.")
coefs = [e.coef_ for e in self.estimators_]
if sp.issparse(coefs[0]):
return sp.vstack(coefs)
return np.vstack(coefs)
@property
def intercept_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "intercept_"):
raise AttributeError(
"Base estimator doesn't have an intercept_ attribute.")
return np.array([e.intercept_.ravel() for e in self.estimators_])
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
@deprecated("fit_ovo is deprecated and will be removed in 0.18."
"Use the OneVsOneClassifier instead.")
def fit_ovo(estimator, X, y, n_jobs=1):
ovo = OneVsOneClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovo.estimators_, ovo.classes_
@deprecated("predict_ovo is deprecated and will be removed in 0.18."
"Use the OneVsOneClassifier instead.")
def predict_ovo(estimators, classes, X):
"""Make predictions using the one-vs-one strategy."""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovo = OneVsOneClassifier(clone(estimators[0]))
ovo.estimators_ = estimators
ovo.classes_ = classes
return ovo.predict(X)
class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-one multiclass strategy
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Read more in the :ref:`User Guide <ovo_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
check_consistent_length(X, y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y, self.classes_[i], self.classes_[j])
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
def predict(self, X):
"""Estimate the best class label for each sample in X.
This is implemented as ``argmax(decision_function(X), axis=1)`` which
will return the label of the class with most votes by estimators
predicting the outcome of a decision for each possible class pair.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
Y = self.decision_function(X)
return self.classes_[Y.argmax(axis=1)]
def decision_function(self, X):
"""Decision function for the OneVsOneClassifier.
The decision values for the samples are computed by adding the
normalized sum of pair-wise classification confidence levels to the
votes in order to disambiguate between the decision values when the
votes for all the classes are equal leading to a tie.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
Y : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
n_samples = X.shape[0]
n_classes = self.classes_.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = self.estimators_[k].predict(X)
confidence_levels_ij = _predict_binary(self.estimators_[k], X)
sum_of_confidences[:, i] -= confidence_levels_ij
sum_of_confidences[:, j] += confidence_levels_ij
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
@deprecated("fit_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def fit_ecoc(estimator, X, y, code_size=1.5, random_state=None, n_jobs=1):
"""Fit an error-correcting output-code strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float, optional
Percentage of the number of classes to be used to create the code book.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
Returns
--------
estimators : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
"""
ecoc = OutputCodeClassifier(estimator, random_state=random_state,
n_jobs=n_jobs).fit(X, y)
return ecoc.estimators_, ecoc.classes_, ecoc.code_book_
@deprecated("predict_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def predict_ecoc(estimators, classes, code_book, X):
"""Make predictions using the error-correcting output-code strategy."""
ecoc = OutputCodeClassifier(clone(estimators[0]))
ecoc.classes_ = classes
ecoc.estimators_ = estimators
ecoc.code_book_ = code_book
return ecoc.predict(X)
class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""(Error-Correcting) Output-Code multiclass strategy
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Read more in the :ref:`User Guide <ecoc>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
<NAME>., <NAME>.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
<NAME>., <NAME>.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
<NAME>., <NAME>., <NAME>., page 606 (second-edition)
2008.
"""
def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
if self.code_size <= 0:
raise ValueError("code_size should be greater than 0, got {1}"
"".format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = dict((c, i) for i, c in enumerate(self.classes_))
Y = np.array([self.code_book_[classes_index[y[i]]]
for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i])
for i in range(Y.shape[1]))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
| """
Multiclass and multilabel classification strategies
===================================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import array
import numpy as np
import warnings
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MetaEstimatorMixin, is_regressor
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted
from .utils import deprecated
from .externals.joblib import Parallel
from .externals.joblib import delayed
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." %
str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
if is_regressor(estimator):
return estimator.predict(X)
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _check_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if (not hasattr(estimator, "decision_function") and
not hasattr(estimator, "predict_proba")):
raise ValueError("The base estimator should implement "
"decision_function or predict_proba!")
@deprecated("fit_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def fit_ovr(estimator, X, y, n_jobs=1):
"""Fit a one-vs-the-rest strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
estimators : list of estimators object
The list of fitted estimator.
lb : fitted LabelBinarizer
"""
ovr = OneVsRestClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovr.estimators_, ovr.label_binarizer_
@deprecated("predict_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_ovr(estimators, label_binarizer, X):
"""Predict multi-class targets using the one vs rest strategy.
Parameters
----------
estimators : list of `n_classes` estimators, Estimators used for
predictions. The list must be homogeneous with respect to the type of
estimators. fit_ovr supplies this list as part of its output.
label_binarizer : LabelBinarizer object, Object used to transform
multiclass labels to binary labels and vice-versa. fit_ovr supplies
this object as part of its output.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovr = OneVsRestClassifier(clone(estimators[0]))
ovr.estimators_ = estimators
ovr.label_binarizer_ = label_binarizer
return ovr.predict(X)
@deprecated("predict_proba_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_proba_ovr(estimators, X, is_multilabel):
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
Y = np.array([e.predict_proba(X)[:, 1] for e in estimators]).T
if not is_multilabel:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
self.y_ = y
return self
def predict(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def decision_function(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def predict_proba(self, X):
check_is_fitted(self, 'y_')
return np.repeat([np.hstack([1 - self.y_, self.y_])],
X.shape[0], axis=0)
class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-the-rest (OvR) multiclass/multilabel strategy
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
This strategy can also be used for multilabel learning, where a classifier
is used to predict multiple labels for instance, by fitting on a 2-d matrix
in which cell [i, j] is 1 if sample i has label j and 0 otherwise.
In the multilabel learning literature, OvR is also known as the binary
relevance method.
Read more in the :ref:`User Guide <ovr_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes` estimators
Estimators used for predictions.
classes_ : array, shape = [`n_classes`]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outpreform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)(
self.estimator, X, column, classes=[
"not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i]])
for i, column in enumerate(columns))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
if (hasattr(self.estimators_[0], "decision_function") and
is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = .5
n_samples = _num_samples(X)
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.label_binarizer_.classes_[np.array(argmaxima.T)]
else:
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr),
shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, 'estimators_')
# Y[i,j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
def decision_function(self, X):
"""Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "decision_function"):
raise AttributeError(
"Base estimator doesn't have a decision_function attribute.")
return np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier"""
return self.label_binarizer_.y_type_.startswith('multilabel')
@property
def classes_(self):
return self.label_binarizer_.classes_
@property
def coef_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "coef_"):
raise AttributeError(
"Base estimator doesn't have a coef_ attribute.")
coefs = [e.coef_ for e in self.estimators_]
if sp.issparse(coefs[0]):
return sp.vstack(coefs)
return np.vstack(coefs)
@property
def intercept_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "intercept_"):
raise AttributeError(
"Base estimator doesn't have an intercept_ attribute.")
return np.array([e.intercept_.ravel() for e in self.estimators_])
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
@deprecated("fit_ovo is deprecated and will be removed in 0.18."
"Use the OneVsOneClassifier instead.")
def fit_ovo(estimator, X, y, n_jobs=1):
ovo = OneVsOneClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovo.estimators_, ovo.classes_
@deprecated("predict_ovo is deprecated and will be removed in 0.18."
"Use the OneVsOneClassifier instead.")
def predict_ovo(estimators, classes, X):
"""Make predictions using the one-vs-one strategy."""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovo = OneVsOneClassifier(clone(estimators[0]))
ovo.estimators_ = estimators
ovo.classes_ = classes
return ovo.predict(X)
class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-one multiclass strategy
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Read more in the :ref:`User Guide <ovo_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
check_consistent_length(X, y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y, self.classes_[i], self.classes_[j])
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
def predict(self, X):
"""Estimate the best class label for each sample in X.
This is implemented as ``argmax(decision_function(X), axis=1)`` which
will return the label of the class with most votes by estimators
predicting the outcome of a decision for each possible class pair.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
Y = self.decision_function(X)
return self.classes_[Y.argmax(axis=1)]
def decision_function(self, X):
"""Decision function for the OneVsOneClassifier.
The decision values for the samples are computed by adding the
normalized sum of pair-wise classification confidence levels to the
votes in order to disambiguate between the decision values when the
votes for all the classes are equal leading to a tie.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
Y : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
n_samples = X.shape[0]
n_classes = self.classes_.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = self.estimators_[k].predict(X)
confidence_levels_ij = _predict_binary(self.estimators_[k], X)
sum_of_confidences[:, i] -= confidence_levels_ij
sum_of_confidences[:, j] += confidence_levels_ij
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
@deprecated("fit_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def fit_ecoc(estimator, X, y, code_size=1.5, random_state=None, n_jobs=1):
"""Fit an error-correcting output-code strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float, optional
Percentage of the number of classes to be used to create the code book.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
Returns
--------
estimators : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
"""
ecoc = OutputCodeClassifier(estimator, random_state=random_state,
n_jobs=n_jobs).fit(X, y)
return ecoc.estimators_, ecoc.classes_, ecoc.code_book_
@deprecated("predict_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def predict_ecoc(estimators, classes, code_book, X):
"""Make predictions using the error-correcting output-code strategy."""
ecoc = OutputCodeClassifier(clone(estimators[0]))
ecoc.classes_ = classes
ecoc.estimators_ = estimators
ecoc.code_book_ = code_book
return ecoc.predict(X)
class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""(Error-Correcting) Output-Code multiclass strategy
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Read more in the :ref:`User Guide <ecoc>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
<NAME>., <NAME>.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
<NAME>., <NAME>.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
<NAME>., <NAME>., <NAME>., page 606 (second-edition)
2008.
"""
def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
if self.code_size <= 0:
raise ValueError("code_size should be greater than 0, got {1}"
"".format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = dict((c, i) for i, c in enumerate(self.classes_))
Y = np.array([self.code_book_[classes_index[y[i]]]
for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i])
for i in range(Y.shape[1]))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred] | en | 0.764568 | Multiclass and multilabel classification strategies =================================================== This module implements multiclass learning algorithms: - one-vs-the-rest / one-vs-all - one-vs-one - error correcting output codes The estimators provided in this module are meta-estimators: they require a base estimator to be provided in their constructor. For example, it is possible to use these estimators to turn a binary classifier or a regressor into a multiclass classifier. It is also possible to use these estimators with multiclass estimators in the hope that their accuracy or runtime performance improves. All classifiers in scikit-learn implement multiclass classification; you only need to use this module if you want to experiment with custom multiclass strategies. The one-vs-the-rest meta-classifier also implements a `predict_proba` method, so long as such a method is implemented by the base classifier. This method returns probabilities of class membership in both the single label and multilabel case. Note that in the multilabel case, probabilities are the marginal probability that a given sample falls in the given class. As such, in the multilabel case the sum of these probabilities over all possible labels for a given sample *will not* sum to unity, as they do in the single label case. # Author: <NAME> <<EMAIL>> # Author: <NAME> <<EMAIL>> # # License: BSD 3 clause Fit a single binary estimator. Make predictions using a single binary estimator. # probabilities of the positive class Make sure that an estimator implements the necessary methods. Fit a one-vs-the-rest strategy. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and one of `decision_function` or `predict_proba`. X : (sparse) array-like, shape = [n_samples, n_features] Data. y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes] Multi-class targets. An indicator matrix turns on multilabel classification. Returns ------- estimators : list of estimators object The list of fitted estimator. lb : fitted LabelBinarizer Predict multi-class targets using the one vs rest strategy. Parameters ---------- estimators : list of `n_classes` estimators, Estimators used for predictions. The list must be homogeneous with respect to the type of estimators. fit_ovr supplies this list as part of its output. label_binarizer : LabelBinarizer object, Object used to transform multiclass labels to binary labels and vice-versa. fit_ovr supplies this object as part of its output. X : (sparse) array-like, shape = [n_samples, n_features] Data. Returns ------- y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]. Predicted multi-class targets. # Then, probabilities should be normalized to 1. One-vs-the-rest (OvR) multiclass/multilabel strategy Also known as one-vs-all, this strategy consists in fitting one classifier per class. For each classifier, the class is fitted against all the other classes. In addition to its computational efficiency (only `n_classes` classifiers are needed), one advantage of this approach is its interpretability. Since each class is represented by one and one classifier only, it is possible to gain knowledge about the class by inspecting its corresponding classifier. This is the most commonly used strategy for multiclass classification and is a fair default choice. This strategy can also be used for multilabel learning, where a classifier is used to predict multiple labels for instance, by fitting on a 2-d matrix in which cell [i, j] is 1 if sample i has label j and 0 otherwise. In the multilabel learning literature, OvR is also known as the binary relevance method. Read more in the :ref:`User Guide <ovr_classification>`. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and one of `decision_function` or `predict_proba`. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- estimators_ : list of `n_classes` estimators Estimators used for predictions. classes_ : array, shape = [`n_classes`] Class labels. label_binarizer_ : LabelBinarizer object Object used to transform multiclass labels to binary labels and vice-versa. multilabel_ : boolean Whether a OneVsRestClassifier is a multilabel classifier. Fit underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes] Multi-class targets. An indicator matrix turns on multilabel classification. Returns ------- self # A sparse LabelBinarizer, with sparse_output=True, has been shown to # outpreform or match a dense label binarizer in all cases and has also # resulted in less or equal memory consumption in the fit_ovr function # overall. # In cases where individual estimators are very fast to train setting # n_jobs > 1 in can results in slower performance due to the overhead # of spawning threads. See joblib issue #112. Predict multi-class targets using underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. Returns ------- y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]. Predicted multi-class targets. Probability estimates. The returned estimates for all classes are ordered by label of classes. Note that in the multilabel case, each sample can have any number of labels. This returns the marginal probability that the given sample has the label in question. For example, it is entirely consistent that two labels both have a 90% probability of applying to a given sample. In the single label multiclass case, the rows of the returned matrix sum to 1. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : (sparse) array-like, shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. # Y[i,j] gives the probability that sample i has the label j. # In the multi-label case, these are not disjoint. # Only one estimator, but we still want to return probabilities # for two classes. # Then, probabilities should be normalized to 1. Returns the distance of each sample from the decision boundary for each class. This can only be used with estimators which implement the decision_function method. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, n_classes] Whether this is a multilabel classifier Fit a single binary estimator (one-vs-one). Make predictions using the one-vs-one strategy. One-vs-one multiclass strategy This strategy consists in fitting one classifier per class pair. At prediction time, the class which received the most votes is selected. Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers, this method is usually slower than one-vs-the-rest, due to its O(n_classes^2) complexity. However, this method may be advantageous for algorithms such as kernel algorithms which don't scale well with `n_samples`. This is because each individual learning problem only involves a small subset of the data whereas, with one-vs-the-rest, the complete dataset is used `n_classes` times. Read more in the :ref:`User Guide <ovo_classification>`. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and one of `decision_function` or `predict_proba`. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators Estimators used for predictions. classes_ : numpy array of shape [n_classes] Array containing labels. Fit underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. y : array-like, shape = [n_samples] Multi-class targets. Returns ------- self Estimate the best class label for each sample in X. This is implemented as ``argmax(decision_function(X), axis=1)`` which will return the label of the class with most votes by estimators predicting the outcome of a decision for each possible class pair. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. Returns ------- y : numpy array of shape [n_samples] Predicted multi-class targets. Decision function for the OneVsOneClassifier. The decision values for the samples are computed by adding the normalized sum of pair-wise classification confidence levels to the votes in order to disambiguate between the decision values when the votes for all the classes are equal leading to a tie. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- Y : array-like, shape = [n_samples, n_classes] # Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes. # The motivation is to use confidence levels as a way to break ties in # the votes without switching any decision made based on a difference # of 1 vote. Fit an error-correcting output-code strategy. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and one of `decision_function` or `predict_proba`. code_size : float, optional Percentage of the number of classes to be used to create the code book. random_state : numpy.RandomState, optional The generator used to initialize the codebook. Defaults to numpy.random. Returns -------- estimators : list of `int(n_classes * code_size)` estimators Estimators used for predictions. classes : numpy array of shape [n_classes] Array containing labels. code_book_ : numpy array of shape [n_classes, code_size] Binary array containing the code of each class. Make predictions using the error-correcting output-code strategy. (Error-Correcting) Output-Code multiclass strategy Output-code based strategies consist in representing each class with a binary code (an array of 0s and 1s). At fitting time, one binary classifier per bit in the code book is fitted. At prediction time, the classifiers are used to project new points in the class space and the class closest to the points is chosen. The main advantage of these strategies is that the number of classifiers used can be controlled by the user, either for compressing the model (0 < code_size < 1) or for making the model more robust to errors (code_size > 1). See the documentation for more details. Read more in the :ref:`User Guide <ecoc>`. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and one of `decision_function` or `predict_proba`. code_size : float Percentage of the number of classes to be used to create the code book. A number between 0 and 1 will require fewer classifiers than one-vs-the-rest. A number greater than 1 will require more classifiers than one-vs-the-rest. random_state : numpy.RandomState, optional The generator used to initialize the codebook. Defaults to numpy.random. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- estimators_ : list of `int(n_classes * code_size)` estimators Estimators used for predictions. classes_ : numpy array of shape [n_classes] Array containing labels. code_book_ : numpy array of shape [n_classes, code_size] Binary array containing the code of each class. References ---------- .. [1] "Solving multiclass learning problems via error-correcting output codes", <NAME>., <NAME>., Journal of Artificial Intelligence Research 2, 1995. .. [2] "The error coding method and PICTs", <NAME>., <NAME>., Journal of Computational and Graphical statistics 7, 1998. .. [3] "The Elements of Statistical Learning", <NAME>., <NAME>., <NAME>., page 606 (second-edition) 2008. Fit underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. y : numpy array of shape [n_samples] Multi-class targets. Returns ------- self # FIXME: there are more elaborate methods than generating the codebook # randomly. Predict multi-class targets using underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. Returns ------- y : numpy array of shape [n_samples] Predicted multi-class targets. | 3.153011 | 3 |
dffml/cli/dataflow.py | sauravsrijan/dffml | 0 | 6624397 | import sys
import pathlib
import hashlib
import contextlib
from ..base import BaseConfig
from ..df.base import BaseOrchestrator
from ..df.types import DataFlow, Stage, Operation, Input
from ..df.memory import (
MemoryOrchestrator,
MemoryInputSet,
MemoryInputSetConfig,
StringInputSetContext,
)
from ..config.config import BaseConfigLoader
from ..config.json import JSONConfigLoader
from ..source.source import SubsetSources
from ..util.data import merge
from ..util.entrypoint import load
from ..util.cli.arg import Arg
from ..util.cli.cmd import CMD
from ..util.cli.cmds import SourcesCMD, KeysCMD
from ..util.cli.parser import ParseInputsAction
class Merge(CMD):
arg_dataflows = Arg(
"dataflows", help="DataFlows to merge", nargs="+", type=pathlib.Path
)
arg_config = Arg(
"-config",
help="ConfigLoader to use for exporting",
type=BaseConfigLoader.load,
default=JSONConfigLoader,
)
arg_not_linked = Arg(
"-not-linked",
dest="not_linked",
help="Do not export dataflows as linked",
default=False,
action="store_true",
)
async def run(self):
# The merged dataflow
merged: Dict[str, Any] = {}
# For entering ConfigLoader contexts
async with contextlib.AsyncExitStack() as exit_stack:
# Load config loaders we'll need as we see their file types
parsers: Dict[str, BaseConfigLoader] = {}
for path in self.dataflows:
_, exported = await BaseConfigLoader.load_file(
parsers, exit_stack, path
)
merge(merged, exported)
# Export the dataflow
dataflow = DataFlow._fromdict(**merged)
async with self.config(BaseConfig()) as configloader:
async with configloader() as loader:
exported = dataflow.export(linked=not self.not_linked)
print((await loader.dumpb(exported)).decode())
class Create(CMD):
arg_operations = Arg(
"operations", nargs="+", help="Operations to create a dataflow for"
)
arg_config = Arg(
"-config",
help="ConfigLoader to use",
type=BaseConfigLoader.load,
default=JSONConfigLoader,
)
arg_not_linked = Arg(
"-not-linked",
dest="not_linked",
help="Do not export dataflows as linked",
default=False,
action="store_true",
)
async def run(self):
operations = []
for load_operation in self.operations:
if ":" in load_operation:
operations += list(load(load_operation))
else:
operations += [Operation.load(load_operation)]
async with self.config(BaseConfig()) as configloader:
async with configloader() as loader:
dataflow = DataFlow.auto(*operations)
exported = dataflow.export(linked=not self.not_linked)
print((await loader.dumpb(exported)).decode())
class RunCMD(SourcesCMD):
arg_sources = SourcesCMD.arg_sources.modify(required=False)
arg_caching = Arg(
"-caching",
help="Skip running DataFlow if a repo already contains these features",
nargs="+",
required=False,
default=[],
)
arg_no_update = Arg(
"-no-update",
help="Update repo with sources",
required=False,
default=False,
action="store_true",
)
arg_no_strict = Arg(
"-no-strict",
help="Do not exit on operation exceptions, just log errors",
dest="no_strict",
required=False,
default=False,
action="store_true",
)
arg_dataflow = Arg(
"-dataflow", help="File containing exported DataFlow", required=True
)
arg_config = Arg(
"-config",
help="ConfigLoader to use for importing DataFlow",
type=BaseConfigLoader.load,
default=None,
)
arg_orchestrator = Arg(
"-orchestrator", type=BaseOrchestrator.load, default=MemoryOrchestrator
)
arg_inputs = Arg(
"-inputs",
nargs="+",
action=ParseInputsAction,
default=[],
help="Other inputs to add under each ctx (repo's src_url will "
+ "be used as the context)",
)
arg_repo_def = Arg(
"-repo-def",
default=False,
type=str,
help="Definition to be used for repo.src_url."
+ "If set, repo.src_url will be added to the set of inputs "
+ "under each context (which is also the repo's src_url)",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.orchestrator = self.orchestrator.withconfig(self.extra_config)
class RunAllRepos(RunCMD):
"""Run dataflow for all repos in sources"""
async def repos(self, sctx):
"""
This method exists so that it can be overriden by RunRepoSet
"""
async for repo in sctx.repos():
yield repo
async def run_dataflow(self, orchestrator, sources, dataflow):
# Orchestrate the running of these operations
async with orchestrator(dataflow) as octx, sources() as sctx:
# Add our inputs to the input network with the context being the
# repo src_url
inputs = []
async for repo in self.repos(sctx):
# Skip running DataFlow if repo already has features
existing_features = repo.features()
if self.caching and all(
map(
lambda cached: cached in existing_features,
self.caching,
)
):
continue
repo_inputs = []
for value, def_name in self.inputs:
repo_inputs.append(
Input(
value=value,
definition=dataflow.definitions[def_name],
)
)
if self.repo_def:
repo_inputs.append(
Input(
value=repo.src_url,
definition=dataflow.definitions[self.repo_def],
)
)
# TODO(p1) When OrchestratorContext is fixed to accept an
# asyncgenerator we won't have to build a list
inputs.append(
MemoryInputSet(
MemoryInputSetConfig(
ctx=StringInputSetContext(repo.src_url),
inputs=repo_inputs,
)
)
)
if not inputs:
return
async for ctx, results in octx.run(
*inputs, strict=not self.no_strict
):
ctx_str = (await ctx.handle()).as_string()
# TODO(p4) Make a RepoInputSetContext which would let us
# store the repo instead of recalling it by the URL
repo = await sctx.repo(ctx_str)
# Store the results
repo.evaluated(results)
yield repo
if not self.no_update:
await sctx.update(repo)
async def run(self):
dataflow_path = pathlib.Path(self.dataflow)
config_cls = self.config
if config_cls is None:
config_type = dataflow_path.suffix.replace(".", "")
config_cls = BaseConfigLoader.load(config_type)
async with config_cls.withconfig(self.extra_config) as configloader:
async with configloader() as loader:
exported = await loader.loadb(dataflow_path.read_bytes())
dataflow = DataFlow._fromdict(**exported)
async with self.orchestrator as orchestrator, self.sources as sources:
async for repo in self.run_dataflow(
orchestrator, sources, dataflow
):
yield repo
class RunRepoSet(RunAllRepos, KeysCMD):
"""Run dataflow for single repo or set of repos"""
async def repos(self, sctx):
for src_url in self.keys:
yield await sctx.repo(src_url)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sources = SubsetSources(*self.sources, keys=self.keys)
class RunRepos(CMD):
"""Run DataFlow and assign output to a repo"""
_set = RunRepoSet
_all = RunAllRepos
class Run(CMD):
"""Run dataflow"""
repos = RunRepos
class Diagram(CMD):
arg_stages = Arg(
"-stages",
help="Which stages to display: (processing, cleanup, output)",
nargs="+",
default=[],
required=False,
)
arg_simple = Arg(
"-simple",
help="Don't display input and output names",
default=False,
action="store_true",
required=False,
)
arg_display = Arg(
"-display",
help="How to display (TD: top down, LR, RL, BT)",
default="TD",
required=False,
)
arg_dataflow = Arg("dataflow", help="File containing exported DataFlow")
arg_config = Arg(
"-config",
help="ConfigLoader to use for importing",
type=BaseConfigLoader.load,
default=None,
)
async def run(self):
dataflow_path = pathlib.Path(self.dataflow)
config_cls = self.config
if config_cls is None:
config_type = dataflow_path.suffix.replace(".", "")
config_cls = BaseConfigLoader.load(config_type)
async with config_cls.withconfig(self.extra_config) as configloader:
async with configloader() as loader:
exported = await loader.loadb(dataflow_path.read_bytes())
dataflow = DataFlow._fromdict(**exported)
print(f"graph {self.display}")
for stage in Stage:
# Skip stage if not wanted
if self.stages and stage.value not in self.stages:
continue
stage_node = hashlib.md5(
("stage." + stage.value).encode()
).hexdigest()
if len(self.stages) != 1:
print(f"subgraph {stage_node}[{stage.value.title()} Stage]")
print(f"style {stage_node} fill:#afd388b5,stroke:#a4ca7a")
for instance_name, operation in dataflow.operations.items():
if operation.stage != stage:
continue
subgraph_node = hashlib.md5(
("subgraph." + instance_name).encode()
).hexdigest()
node = hashlib.md5(instance_name.encode()).hexdigest()
if not self.simple:
print(f"subgraph {subgraph_node}[{instance_name}]")
print(f"style {subgraph_node} fill:#fff4de,stroke:#cece71")
print(f"{node}[{operation.instance_name}]")
for input_name in operation.inputs.keys():
input_node = hashlib.md5(
("input." + instance_name + "." + input_name).encode()
).hexdigest()
if not self.simple:
print(f"{input_node}({input_name})")
print(f"{input_node} --> {node}")
for output_name in operation.outputs.keys():
output_node = hashlib.md5(
(
"output." + instance_name + "." + output_name
).encode()
).hexdigest()
if not self.simple:
print(f"{output_node}({output_name})")
print(f"{node} --> {output_node}")
if not self.simple:
print(f"end")
if len(self.stages) != 1:
print(f"end")
if len(self.stages) != 1:
print(f"subgraph inputs[Inputs]")
print(f"style inputs fill:#f6dbf9,stroke:#a178ca")
for instance_name, input_flow in dataflow.flow.items():
operation = dataflow.operations[instance_name]
if self.stages and not operation.stage.value in self.stages:
continue
node = hashlib.md5(instance_name.encode()).hexdigest()
for input_name, sources in input_flow.inputs.items():
for source in sources:
# TODO Put various sources in their own "Inputs" subgraphs
if isinstance(source, str):
input_definition = operation.inputs[input_name]
seed_input_node = hashlib.md5(
(source + "." + input_definition.name).encode()
).hexdigest()
print(f"{seed_input_node}({input_definition.name})")
if len(self.stages) == 1:
print(
f"style {seed_input_node} fill:#f6dbf9,stroke:#a178ca"
)
if not self.simple:
input_node = hashlib.md5(
(
"input." + instance_name + "." + input_name
).encode()
).hexdigest()
print(f"{seed_input_node} --> {input_node}")
else:
print(f"{seed_input_node} --> {node}")
else:
if not self.simple:
source_output_node = hashlib.md5(
(
"output."
+ ".".join(list(source.items())[0])
).encode()
).hexdigest()
input_node = hashlib.md5(
(
"input." + instance_name + "." + input_name
).encode()
).hexdigest()
print(f"{source_output_node} --> {input_node}")
else:
source_operation_node = hashlib.md5(
list(source.keys())[0].encode()
).hexdigest()
print(f"{source_operation_node} --> {node}")
if len(self.stages) != 1:
print(f"end")
# Name collision
class Dataflow(CMD):
merge = Merge
create = Create
run = Run
diagram = Diagram
| import sys
import pathlib
import hashlib
import contextlib
from ..base import BaseConfig
from ..df.base import BaseOrchestrator
from ..df.types import DataFlow, Stage, Operation, Input
from ..df.memory import (
MemoryOrchestrator,
MemoryInputSet,
MemoryInputSetConfig,
StringInputSetContext,
)
from ..config.config import BaseConfigLoader
from ..config.json import JSONConfigLoader
from ..source.source import SubsetSources
from ..util.data import merge
from ..util.entrypoint import load
from ..util.cli.arg import Arg
from ..util.cli.cmd import CMD
from ..util.cli.cmds import SourcesCMD, KeysCMD
from ..util.cli.parser import ParseInputsAction
class Merge(CMD):
arg_dataflows = Arg(
"dataflows", help="DataFlows to merge", nargs="+", type=pathlib.Path
)
arg_config = Arg(
"-config",
help="ConfigLoader to use for exporting",
type=BaseConfigLoader.load,
default=JSONConfigLoader,
)
arg_not_linked = Arg(
"-not-linked",
dest="not_linked",
help="Do not export dataflows as linked",
default=False,
action="store_true",
)
async def run(self):
# The merged dataflow
merged: Dict[str, Any] = {}
# For entering ConfigLoader contexts
async with contextlib.AsyncExitStack() as exit_stack:
# Load config loaders we'll need as we see their file types
parsers: Dict[str, BaseConfigLoader] = {}
for path in self.dataflows:
_, exported = await BaseConfigLoader.load_file(
parsers, exit_stack, path
)
merge(merged, exported)
# Export the dataflow
dataflow = DataFlow._fromdict(**merged)
async with self.config(BaseConfig()) as configloader:
async with configloader() as loader:
exported = dataflow.export(linked=not self.not_linked)
print((await loader.dumpb(exported)).decode())
class Create(CMD):
arg_operations = Arg(
"operations", nargs="+", help="Operations to create a dataflow for"
)
arg_config = Arg(
"-config",
help="ConfigLoader to use",
type=BaseConfigLoader.load,
default=JSONConfigLoader,
)
arg_not_linked = Arg(
"-not-linked",
dest="not_linked",
help="Do not export dataflows as linked",
default=False,
action="store_true",
)
async def run(self):
operations = []
for load_operation in self.operations:
if ":" in load_operation:
operations += list(load(load_operation))
else:
operations += [Operation.load(load_operation)]
async with self.config(BaseConfig()) as configloader:
async with configloader() as loader:
dataflow = DataFlow.auto(*operations)
exported = dataflow.export(linked=not self.not_linked)
print((await loader.dumpb(exported)).decode())
class RunCMD(SourcesCMD):
arg_sources = SourcesCMD.arg_sources.modify(required=False)
arg_caching = Arg(
"-caching",
help="Skip running DataFlow if a repo already contains these features",
nargs="+",
required=False,
default=[],
)
arg_no_update = Arg(
"-no-update",
help="Update repo with sources",
required=False,
default=False,
action="store_true",
)
arg_no_strict = Arg(
"-no-strict",
help="Do not exit on operation exceptions, just log errors",
dest="no_strict",
required=False,
default=False,
action="store_true",
)
arg_dataflow = Arg(
"-dataflow", help="File containing exported DataFlow", required=True
)
arg_config = Arg(
"-config",
help="ConfigLoader to use for importing DataFlow",
type=BaseConfigLoader.load,
default=None,
)
arg_orchestrator = Arg(
"-orchestrator", type=BaseOrchestrator.load, default=MemoryOrchestrator
)
arg_inputs = Arg(
"-inputs",
nargs="+",
action=ParseInputsAction,
default=[],
help="Other inputs to add under each ctx (repo's src_url will "
+ "be used as the context)",
)
arg_repo_def = Arg(
"-repo-def",
default=False,
type=str,
help="Definition to be used for repo.src_url."
+ "If set, repo.src_url will be added to the set of inputs "
+ "under each context (which is also the repo's src_url)",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.orchestrator = self.orchestrator.withconfig(self.extra_config)
class RunAllRepos(RunCMD):
"""Run dataflow for all repos in sources"""
async def repos(self, sctx):
"""
This method exists so that it can be overriden by RunRepoSet
"""
async for repo in sctx.repos():
yield repo
async def run_dataflow(self, orchestrator, sources, dataflow):
# Orchestrate the running of these operations
async with orchestrator(dataflow) as octx, sources() as sctx:
# Add our inputs to the input network with the context being the
# repo src_url
inputs = []
async for repo in self.repos(sctx):
# Skip running DataFlow if repo already has features
existing_features = repo.features()
if self.caching and all(
map(
lambda cached: cached in existing_features,
self.caching,
)
):
continue
repo_inputs = []
for value, def_name in self.inputs:
repo_inputs.append(
Input(
value=value,
definition=dataflow.definitions[def_name],
)
)
if self.repo_def:
repo_inputs.append(
Input(
value=repo.src_url,
definition=dataflow.definitions[self.repo_def],
)
)
# TODO(p1) When OrchestratorContext is fixed to accept an
# asyncgenerator we won't have to build a list
inputs.append(
MemoryInputSet(
MemoryInputSetConfig(
ctx=StringInputSetContext(repo.src_url),
inputs=repo_inputs,
)
)
)
if not inputs:
return
async for ctx, results in octx.run(
*inputs, strict=not self.no_strict
):
ctx_str = (await ctx.handle()).as_string()
# TODO(p4) Make a RepoInputSetContext which would let us
# store the repo instead of recalling it by the URL
repo = await sctx.repo(ctx_str)
# Store the results
repo.evaluated(results)
yield repo
if not self.no_update:
await sctx.update(repo)
async def run(self):
dataflow_path = pathlib.Path(self.dataflow)
config_cls = self.config
if config_cls is None:
config_type = dataflow_path.suffix.replace(".", "")
config_cls = BaseConfigLoader.load(config_type)
async with config_cls.withconfig(self.extra_config) as configloader:
async with configloader() as loader:
exported = await loader.loadb(dataflow_path.read_bytes())
dataflow = DataFlow._fromdict(**exported)
async with self.orchestrator as orchestrator, self.sources as sources:
async for repo in self.run_dataflow(
orchestrator, sources, dataflow
):
yield repo
class RunRepoSet(RunAllRepos, KeysCMD):
"""Run dataflow for single repo or set of repos"""
async def repos(self, sctx):
for src_url in self.keys:
yield await sctx.repo(src_url)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sources = SubsetSources(*self.sources, keys=self.keys)
class RunRepos(CMD):
"""Run DataFlow and assign output to a repo"""
_set = RunRepoSet
_all = RunAllRepos
class Run(CMD):
"""Run dataflow"""
repos = RunRepos
class Diagram(CMD):
arg_stages = Arg(
"-stages",
help="Which stages to display: (processing, cleanup, output)",
nargs="+",
default=[],
required=False,
)
arg_simple = Arg(
"-simple",
help="Don't display input and output names",
default=False,
action="store_true",
required=False,
)
arg_display = Arg(
"-display",
help="How to display (TD: top down, LR, RL, BT)",
default="TD",
required=False,
)
arg_dataflow = Arg("dataflow", help="File containing exported DataFlow")
arg_config = Arg(
"-config",
help="ConfigLoader to use for importing",
type=BaseConfigLoader.load,
default=None,
)
async def run(self):
dataflow_path = pathlib.Path(self.dataflow)
config_cls = self.config
if config_cls is None:
config_type = dataflow_path.suffix.replace(".", "")
config_cls = BaseConfigLoader.load(config_type)
async with config_cls.withconfig(self.extra_config) as configloader:
async with configloader() as loader:
exported = await loader.loadb(dataflow_path.read_bytes())
dataflow = DataFlow._fromdict(**exported)
print(f"graph {self.display}")
for stage in Stage:
# Skip stage if not wanted
if self.stages and stage.value not in self.stages:
continue
stage_node = hashlib.md5(
("stage." + stage.value).encode()
).hexdigest()
if len(self.stages) != 1:
print(f"subgraph {stage_node}[{stage.value.title()} Stage]")
print(f"style {stage_node} fill:#afd388b5,stroke:#a4ca7a")
for instance_name, operation in dataflow.operations.items():
if operation.stage != stage:
continue
subgraph_node = hashlib.md5(
("subgraph." + instance_name).encode()
).hexdigest()
node = hashlib.md5(instance_name.encode()).hexdigest()
if not self.simple:
print(f"subgraph {subgraph_node}[{instance_name}]")
print(f"style {subgraph_node} fill:#fff4de,stroke:#cece71")
print(f"{node}[{operation.instance_name}]")
for input_name in operation.inputs.keys():
input_node = hashlib.md5(
("input." + instance_name + "." + input_name).encode()
).hexdigest()
if not self.simple:
print(f"{input_node}({input_name})")
print(f"{input_node} --> {node}")
for output_name in operation.outputs.keys():
output_node = hashlib.md5(
(
"output." + instance_name + "." + output_name
).encode()
).hexdigest()
if not self.simple:
print(f"{output_node}({output_name})")
print(f"{node} --> {output_node}")
if not self.simple:
print(f"end")
if len(self.stages) != 1:
print(f"end")
if len(self.stages) != 1:
print(f"subgraph inputs[Inputs]")
print(f"style inputs fill:#f6dbf9,stroke:#a178ca")
for instance_name, input_flow in dataflow.flow.items():
operation = dataflow.operations[instance_name]
if self.stages and not operation.stage.value in self.stages:
continue
node = hashlib.md5(instance_name.encode()).hexdigest()
for input_name, sources in input_flow.inputs.items():
for source in sources:
# TODO Put various sources in their own "Inputs" subgraphs
if isinstance(source, str):
input_definition = operation.inputs[input_name]
seed_input_node = hashlib.md5(
(source + "." + input_definition.name).encode()
).hexdigest()
print(f"{seed_input_node}({input_definition.name})")
if len(self.stages) == 1:
print(
f"style {seed_input_node} fill:#f6dbf9,stroke:#a178ca"
)
if not self.simple:
input_node = hashlib.md5(
(
"input." + instance_name + "." + input_name
).encode()
).hexdigest()
print(f"{seed_input_node} --> {input_node}")
else:
print(f"{seed_input_node} --> {node}")
else:
if not self.simple:
source_output_node = hashlib.md5(
(
"output."
+ ".".join(list(source.items())[0])
).encode()
).hexdigest()
input_node = hashlib.md5(
(
"input." + instance_name + "." + input_name
).encode()
).hexdigest()
print(f"{source_output_node} --> {input_node}")
else:
source_operation_node = hashlib.md5(
list(source.keys())[0].encode()
).hexdigest()
print(f"{source_operation_node} --> {node}")
if len(self.stages) != 1:
print(f"end")
# Name collision
class Dataflow(CMD):
merge = Merge
create = Create
run = Run
diagram = Diagram
| en | 0.841806 | # The merged dataflow # For entering ConfigLoader contexts # Load config loaders we'll need as we see their file types # Export the dataflow Run dataflow for all repos in sources This method exists so that it can be overriden by RunRepoSet # Orchestrate the running of these operations # Add our inputs to the input network with the context being the # repo src_url # Skip running DataFlow if repo already has features # TODO(p1) When OrchestratorContext is fixed to accept an # asyncgenerator we won't have to build a list # TODO(p4) Make a RepoInputSetContext which would let us # store the repo instead of recalling it by the URL # Store the results Run dataflow for single repo or set of repos Run DataFlow and assign output to a repo Run dataflow # Skip stage if not wanted #afd388b5,stroke:#a4ca7a") #fff4de,stroke:#cece71") #f6dbf9,stroke:#a178ca") # TODO Put various sources in their own "Inputs" subgraphs #f6dbf9,stroke:#a178ca" # Name collision | 2.042589 | 2 |
timm/models/dla.py | guitarmind/pytorch-image-models | 80 | 6624398 | """ Deep Layer Aggregation and DLA w/ Res2Net
DLA original adapted from Official Pytorch impl at:
DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484
Res2Net additions from: https://github.com/gasvn/Res2Net/
Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import load_pretrained
from .layers import SelectAdaptivePool2d
from .registry import register_model
__all__ = ['DLA']
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'base_layer.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
'dla34': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth'),
'dla46_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth'),
'dla46x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth'),
'dla60x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth'),
'dla60': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth'),
'dla60x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth'),
'dla102': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth'),
'dla102x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth'),
'dla102x2': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth'),
'dla169': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth'),
'dla60_res2net': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'),
'dla60_res2next': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'),
}
class DlaBasic(nn.Module):
"""DLA Basic"""
def __init__(self, inplanes, planes, stride=1, dilation=1, **_):
super(DlaBasic, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class DlaBottleneck(nn.Module):
"""DLA/DLA-X Bottleneck"""
expansion = 2
def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64):
super(DlaBottleneck, self).__init__()
self.stride = stride
mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)
mid_planes = mid_planes // self.expansion
self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.conv2 = nn.Conv2d(
mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation,
bias=False, dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class DlaBottle2neck(nn.Module):
""" Res2Net/Res2NeXT DLA Bottleneck
Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py
"""
expansion = 2
def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4):
super(DlaBottle2neck, self).__init__()
self.is_first = stride > 1
self.scale = scale
mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)
mid_planes = mid_planes // self.expansion
self.width = mid_planes
self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes * scale)
num_scale_convs = max(1, scale - 1)
convs = []
bns = []
for _ in range(num_scale_convs):
convs.append(nn.Conv2d(
mid_planes, mid_planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, groups=cardinality, bias=False))
bns.append(nn.BatchNorm2d(mid_planes))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
if self.is_first:
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
spo = []
for i, (conv, bn) in enumerate(zip(self.convs, self.bns)):
sp = spx[i] if i == 0 or self.is_first else sp + spx[i]
sp = conv(sp)
sp = bn(sp)
sp = self.relu(sp)
spo.append(sp)
if self.scale > 1:
spo.append(self.pool(spx[-1]) if self.is_first else spx[-1])
out = torch.cat(spo, 1)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class DlaRoot(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(DlaRoot, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class DlaTree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
dilation=1, cardinality=1, base_width=64,
level_root=False, root_dim=0, root_kernel_size=1, root_residual=False):
super(DlaTree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width)
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride, **cargs)
self.tree2 = block(out_channels, out_channels, 1, **cargs)
else:
cargs.update(dict(root_kernel_size=root_kernel_size, root_residual=root_residual))
self.tree1 = DlaTree(
levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs)
self.tree2 = DlaTree(
levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs)
if levels == 1:
self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else None
self.project = None
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels)
)
self.levels = levels
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
# FIXME the way downsample / project are used here and residual is passed to next level up
# the tree, the residual is overridden and some project weights are thus never used and
# have no gradients. This appears to be an issue with the original model / weights.
bottom = self.downsample(x) if self.downsample is not None else x
residual = self.project(bottom) if self.project is not None else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000, in_chans=3, cardinality=1, base_width=64,
block=DlaBottle2neck, residual_root=False, linear_root=False,
drop_rate=0.0, global_pool='avg'):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.cardinality = cardinality
self.base_width = base_width
self.drop_rate = drop_rate
self.base_layer = nn.Sequential(
nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False),
nn.BatchNorm2d(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2)
cargs = dict(cardinality=cardinality, base_width=base_width, root_residual=residual_root)
self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs)
self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs)
self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs)
self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs)
self.num_features = channels[-1]
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Conv2d(self.num_features * self.global_pool.feat_mult(), num_classes, 1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
if num_classes:
num_features = self.num_features * self.global_pool.feat_mult()
self.fc = nn.Conv2d(num_features, num_classes, kernel_size=1, bias=True)
else:
self.fc = nn.Identity()
def forward_features(self, x):
x = self.base_layer(x)
x = self.level0(x)
x = self.level1(x)
x = self.level2(x)
x = self.level3(x)
x = self.level4(x)
x = self.level5(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.fc(x)
return x.flatten(1)
@register_model
def dla60_res2net(pretrained=None, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['dla60_res2net']
model = DLA(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),
block=DlaBottle2neck, cardinality=1, base_width=28,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla60_res2next(pretrained=None, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['dla60_res2next']
model = DLA(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),
block=DlaBottle2neck, cardinality=8, base_width=4,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla34(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-34
default_cfg = default_cfgs['dla34']
model = DLA([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512], block=DlaBasic,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla46_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-46-C
default_cfg = default_cfgs['dla46_c']
model = DLA(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],
block=DlaBottleneck, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla46x_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-46-C
default_cfg = default_cfgs['dla46x_c']
model = DLA(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],
block=DlaBottleneck, cardinality=32, base_width=4,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla60x_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-60-C
default_cfg = default_cfgs['dla60x_c']
model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 64, 64, 128, 256],
block=DlaBottleneck, cardinality=32, base_width=4,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla60(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-60
default_cfg = default_cfgs['dla60']
model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla60x(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-60
default_cfg = default_cfgs['dla60x']
model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=32, base_width=4,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla102(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-102
default_cfg = default_cfgs['dla102']
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, residual_root=True,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla102x(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-102
default_cfg = default_cfgs['dla102x']
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=32, base_width=4, residual_root=True,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla102x2(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-102 64
default_cfg = default_cfgs['dla102x2']
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=64, base_width=4, residual_root=True,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla169(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-169
default_cfg = default_cfgs['dla169']
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, residual_root=True,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
| """ Deep Layer Aggregation and DLA w/ Res2Net
DLA original adapted from Official Pytorch impl at:
DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484
Res2Net additions from: https://github.com/gasvn/Res2Net/
Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import load_pretrained
from .layers import SelectAdaptivePool2d
from .registry import register_model
__all__ = ['DLA']
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'base_layer.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
'dla34': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth'),
'dla46_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth'),
'dla46x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth'),
'dla60x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth'),
'dla60': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth'),
'dla60x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth'),
'dla102': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth'),
'dla102x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth'),
'dla102x2': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth'),
'dla169': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth'),
'dla60_res2net': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'),
'dla60_res2next': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'),
}
class DlaBasic(nn.Module):
"""DLA Basic"""
def __init__(self, inplanes, planes, stride=1, dilation=1, **_):
super(DlaBasic, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class DlaBottleneck(nn.Module):
"""DLA/DLA-X Bottleneck"""
expansion = 2
def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64):
super(DlaBottleneck, self).__init__()
self.stride = stride
mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)
mid_planes = mid_planes // self.expansion
self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.conv2 = nn.Conv2d(
mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation,
bias=False, dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class DlaBottle2neck(nn.Module):
""" Res2Net/Res2NeXT DLA Bottleneck
Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py
"""
expansion = 2
def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4):
super(DlaBottle2neck, self).__init__()
self.is_first = stride > 1
self.scale = scale
mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)
mid_planes = mid_planes // self.expansion
self.width = mid_planes
self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes * scale)
num_scale_convs = max(1, scale - 1)
convs = []
bns = []
for _ in range(num_scale_convs):
convs.append(nn.Conv2d(
mid_planes, mid_planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, groups=cardinality, bias=False))
bns.append(nn.BatchNorm2d(mid_planes))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
if self.is_first:
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
spo = []
for i, (conv, bn) in enumerate(zip(self.convs, self.bns)):
sp = spx[i] if i == 0 or self.is_first else sp + spx[i]
sp = conv(sp)
sp = bn(sp)
sp = self.relu(sp)
spo.append(sp)
if self.scale > 1:
spo.append(self.pool(spx[-1]) if self.is_first else spx[-1])
out = torch.cat(spo, 1)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class DlaRoot(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(DlaRoot, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class DlaTree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
dilation=1, cardinality=1, base_width=64,
level_root=False, root_dim=0, root_kernel_size=1, root_residual=False):
super(DlaTree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width)
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride, **cargs)
self.tree2 = block(out_channels, out_channels, 1, **cargs)
else:
cargs.update(dict(root_kernel_size=root_kernel_size, root_residual=root_residual))
self.tree1 = DlaTree(
levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs)
self.tree2 = DlaTree(
levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs)
if levels == 1:
self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else None
self.project = None
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels)
)
self.levels = levels
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
# FIXME the way downsample / project are used here and residual is passed to next level up
# the tree, the residual is overridden and some project weights are thus never used and
# have no gradients. This appears to be an issue with the original model / weights.
bottom = self.downsample(x) if self.downsample is not None else x
residual = self.project(bottom) if self.project is not None else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000, in_chans=3, cardinality=1, base_width=64,
block=DlaBottle2neck, residual_root=False, linear_root=False,
drop_rate=0.0, global_pool='avg'):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.cardinality = cardinality
self.base_width = base_width
self.drop_rate = drop_rate
self.base_layer = nn.Sequential(
nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False),
nn.BatchNorm2d(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2)
cargs = dict(cardinality=cardinality, base_width=base_width, root_residual=residual_root)
self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs)
self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs)
self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs)
self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs)
self.num_features = channels[-1]
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Conv2d(self.num_features * self.global_pool.feat_mult(), num_classes, 1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
if num_classes:
num_features = self.num_features * self.global_pool.feat_mult()
self.fc = nn.Conv2d(num_features, num_classes, kernel_size=1, bias=True)
else:
self.fc = nn.Identity()
def forward_features(self, x):
x = self.base_layer(x)
x = self.level0(x)
x = self.level1(x)
x = self.level2(x)
x = self.level3(x)
x = self.level4(x)
x = self.level5(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.fc(x)
return x.flatten(1)
@register_model
def dla60_res2net(pretrained=None, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['dla60_res2net']
model = DLA(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),
block=DlaBottle2neck, cardinality=1, base_width=28,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla60_res2next(pretrained=None, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['dla60_res2next']
model = DLA(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),
block=DlaBottle2neck, cardinality=8, base_width=4,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla34(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-34
default_cfg = default_cfgs['dla34']
model = DLA([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512], block=DlaBasic,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla46_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-46-C
default_cfg = default_cfgs['dla46_c']
model = DLA(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],
block=DlaBottleneck, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla46x_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-46-C
default_cfg = default_cfgs['dla46x_c']
model = DLA(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],
block=DlaBottleneck, cardinality=32, base_width=4,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla60x_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-60-C
default_cfg = default_cfgs['dla60x_c']
model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 64, 64, 128, 256],
block=DlaBottleneck, cardinality=32, base_width=4,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla60(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-60
default_cfg = default_cfgs['dla60']
model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla60x(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-60
default_cfg = default_cfgs['dla60x']
model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=32, base_width=4,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla102(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-102
default_cfg = default_cfgs['dla102']
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, residual_root=True,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla102x(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-102
default_cfg = default_cfgs['dla102x']
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=32, base_width=4, residual_root=True,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla102x2(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-102 64
default_cfg = default_cfgs['dla102x2']
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=64, base_width=4, residual_root=True,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
@register_model
def dla169(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-169
default_cfg = default_cfgs['dla169']
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, residual_root=True,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
| en | 0.748606 | Deep Layer Aggregation and DLA w/ Res2Net DLA original adapted from Official Pytorch impl at: DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 Res2Net additions from: https://github.com/gasvn/Res2Net/ Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 DLA Basic DLA/DLA-X Bottleneck Res2Net/Res2NeXT DLA Bottleneck Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py # FIXME the way downsample / project are used here and residual is passed to next level up # the tree, the residual is overridden and some project weights are thus never used and # have no gradients. This appears to be an issue with the original model / weights. # DLA-34 # DLA-46-C # DLA-X-46-C # DLA-X-60-C # DLA-60 # DLA-X-60 # DLA-102 # DLA-X-102 # DLA-X-102 64 # DLA-169 | 2.200943 | 2 |
molecule/f5bigip_gtm_monitor_http/tests/test_default.py | erjac77/ansible-role-f5 | 1 | 6624399 | <filename>molecule/f5bigip_gtm_monitor_http/tests/test_default.py<gh_stars>1-10
from ansible_vault import Vault
from f5.bigip import ManagementRoot
def test_default(host):
vars = host.ansible.get_variables()
vault_pass = open("../../molecule/.vault_pass", "r").read().replace("\n", "")
vault = Vault(vault_pass)
decrypted_host = vault.load(vars["ansible_host"]["__ansible_vault"])
decrypted_user = vault.load(vars["http_user"]["__ansible_vault"])
decrypted_pass = vault.load(vars["http_pass"]["__ansible_vault"])
mgmt = ManagementRoot(decrypted_host, decrypted_user, decrypted_pass)
monitor = mgmt.tm.gtm.monitor.https.http.load(
name="my_http_monitor", partition="Common"
)
assert monitor.name == "my_http_monitor"
assert monitor.partition == "Common"
assert monitor.description == "My http monitor"
| <filename>molecule/f5bigip_gtm_monitor_http/tests/test_default.py<gh_stars>1-10
from ansible_vault import Vault
from f5.bigip import ManagementRoot
def test_default(host):
vars = host.ansible.get_variables()
vault_pass = open("../../molecule/.vault_pass", "r").read().replace("\n", "")
vault = Vault(vault_pass)
decrypted_host = vault.load(vars["ansible_host"]["__ansible_vault"])
decrypted_user = vault.load(vars["http_user"]["__ansible_vault"])
decrypted_pass = vault.load(vars["http_pass"]["__ansible_vault"])
mgmt = ManagementRoot(decrypted_host, decrypted_user, decrypted_pass)
monitor = mgmt.tm.gtm.monitor.https.http.load(
name="my_http_monitor", partition="Common"
)
assert monitor.name == "my_http_monitor"
assert monitor.partition == "Common"
assert monitor.description == "My http monitor"
| none | 1 | 2.003715 | 2 | |
hexrd/ui/powder_calibration_dialog.py | psavery/hexrdgui | 0 | 6624400 | <reponame>psavery/hexrdgui<filename>hexrd/ui/powder_calibration_dialog.py<gh_stars>0
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.ui_loader import UiLoader
class PowderCalibrationDialog:
def __init__(self, parent=None):
loader = UiLoader()
self.ui = loader.load_file('powder_calibration_dialog.ui', parent)
self.update_gui_from_config()
self.setup_connections()
def setup_connections(self):
pass
def update_gui_from_config(self):
tth_tol = HexrdConfig().config['calibration']['powder']['tth_tol']
eta_tol = HexrdConfig().config['calibration']['powder']['eta_tol']
pk_type = HexrdConfig().config['calibration']['powder']['pk_type']
if pk_type == 'pvoigt':
pk_type = 'PVoigt'
elif pk_type == 'gaussian':
pk_type = 'Gaussian'
self.ui.tth_tolerance.setValue(tth_tol)
self.ui.eta_tolerance.setValue(eta_tol)
self.ui.peak_fit_type.setCurrentText(pk_type)
def exec_(self):
if not self.ui.exec_():
return False
tth_tol = self.ui.tth_tolerance.value()
eta_tol = self.ui.eta_tolerance.value()
pk_type = self.ui.peak_fit_type.currentText().lower()
HexrdConfig().config['calibration']['powder']['tth_tol'] = tth_tol
HexrdConfig().config['calibration']['powder']['eta_tol'] = eta_tol
HexrdConfig().config['calibration']['powder']['pk_type'] = pk_type
return True
| from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.ui_loader import UiLoader
class PowderCalibrationDialog:
def __init__(self, parent=None):
loader = UiLoader()
self.ui = loader.load_file('powder_calibration_dialog.ui', parent)
self.update_gui_from_config()
self.setup_connections()
def setup_connections(self):
pass
def update_gui_from_config(self):
tth_tol = HexrdConfig().config['calibration']['powder']['tth_tol']
eta_tol = HexrdConfig().config['calibration']['powder']['eta_tol']
pk_type = HexrdConfig().config['calibration']['powder']['pk_type']
if pk_type == 'pvoigt':
pk_type = 'PVoigt'
elif pk_type == 'gaussian':
pk_type = 'Gaussian'
self.ui.tth_tolerance.setValue(tth_tol)
self.ui.eta_tolerance.setValue(eta_tol)
self.ui.peak_fit_type.setCurrentText(pk_type)
def exec_(self):
if not self.ui.exec_():
return False
tth_tol = self.ui.tth_tolerance.value()
eta_tol = self.ui.eta_tolerance.value()
pk_type = self.ui.peak_fit_type.currentText().lower()
HexrdConfig().config['calibration']['powder']['tth_tol'] = tth_tol
HexrdConfig().config['calibration']['powder']['eta_tol'] = eta_tol
HexrdConfig().config['calibration']['powder']['pk_type'] = pk_type
return True | none | 1 | 1.998263 | 2 | |
MinorScripts/Sort_Line/Sort_Line.py | Rickodesea/GeneralScripts | 0 | 6624401 | <filename>MinorScripts/Sort_Line/Sort_Line.py
import argparse
import re
def readtext_lines():
with open("text.txt", "r") as f:
return f.readlines();
return ""
def write(name, slist):
with open(name, "w") as f:
for s in slist:
f.write(s + "\n")
def main():
arg = argparse.ArgumentParser(description='Sort Lines Alphabetically')
arg.add_argument('-o', '--output', help='File to write the output to. Otherwise, output to stdout.')
cmd = arg.parse_args()
textlines = readtext_lines()
sortedlines = sorted(textlines)
for i in range(len(sortedlines)):
print(f'{sortedlines[i].strip()}')
if __name__ == "__main__":
main()
| <filename>MinorScripts/Sort_Line/Sort_Line.py
import argparse
import re
def readtext_lines():
with open("text.txt", "r") as f:
return f.readlines();
return ""
def write(name, slist):
with open(name, "w") as f:
for s in slist:
f.write(s + "\n")
def main():
arg = argparse.ArgumentParser(description='Sort Lines Alphabetically')
arg.add_argument('-o', '--output', help='File to write the output to. Otherwise, output to stdout.')
cmd = arg.parse_args()
textlines = readtext_lines()
sortedlines = sorted(textlines)
for i in range(len(sortedlines)):
print(f'{sortedlines[i].strip()}')
if __name__ == "__main__":
main()
| none | 1 | 3.804686 | 4 | |
samples/cli/accelbyte_py_sdk_cli/platform/_public_get_category.py | AccelByte/accelbyte-python-sdk | 0 | 6624402 | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-platform-service (4.10.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import public_get_category as public_get_category_internal
from accelbyte_py_sdk.api.platform.models import CategoryInfo
from accelbyte_py_sdk.api.platform.models import ErrorEntity
@click.command()
@click.argument("category_path", type=str)
@click.option("--language", "language", type=str)
@click.option("--store_id", "store_id", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def public_get_category(
category_path: str,
language: Optional[str] = None,
store_id: Optional[str] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(public_get_category_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = public_get_category_internal(
category_path=category_path,
language=language,
store_id=store_id,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"publicGetCategory failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
public_get_category.operation_id = "publicGetCategory"
public_get_category.is_deprecated = False
| # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-platform-service (4.10.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import public_get_category as public_get_category_internal
from accelbyte_py_sdk.api.platform.models import CategoryInfo
from accelbyte_py_sdk.api.platform.models import ErrorEntity
@click.command()
@click.argument("category_path", type=str)
@click.option("--language", "language", type=str)
@click.option("--store_id", "store_id", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def public_get_category(
category_path: str,
language: Optional[str] = None,
store_id: Optional[str] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(public_get_category_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = public_get_category_internal(
category_path=category_path,
language=language,
store_id=store_id,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"publicGetCategory failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
public_get_category.operation_id = "publicGetCategory"
public_get_category.is_deprecated = False
| en | 0.656805 | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved. # This is licensed software from AccelByte Inc, for limitations # and restrictions contact your company contract manager. # # Code generated. DO NOT EDIT! # template_file: python-cli-command.j2 # justice-platform-service (4.10.0) # pylint: disable=duplicate-code # pylint: disable=line-too-long # pylint: disable=missing-function-docstring # pylint: disable=missing-module-docstring # pylint: disable=too-many-arguments # pylint: disable=too-many-branches # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-lines # pylint: disable=too-many-locals # pylint: disable=too-many-public-methods # pylint: disable=too-many-return-statements # pylint: disable=too-many-statements # pylint: disable=unused-import | 1.826384 | 2 |
sympy/integrals/integrals.py | skieffer/sympy | 0 | 6624403 | <reponame>skieffer/sympy
from sympy.concrete.expr_with_limits import AddWithLimits
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import diff
from sympy.core.logic import fuzzy_bool
from sympy.core.mul import Mul
from sympy.core.numbers import oo, pi
from sympy.core.relational import Ne
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, Wild)
from sympy.core.sympify import sympify
from sympy.functions import Piecewise, sqrt, piecewise_fold, tan, cot, atan
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.complexes import Abs, sign
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.integrals.manualintegrate import manualintegrate
from sympy.integrals.trigonometry import trigintegrate
from sympy.integrals.meijerint import meijerint_definite, meijerint_indefinite
from sympy.matrices import MatrixBase
from sympy.polys import Poly, PolynomialError
from sympy.series import limit
from sympy.series.order import Order
from sympy.series.formal import FormalPowerSeries
from sympy.simplify.fu import sincos_to_sum
from sympy.tensor.functions import shape
from sympy.utilities.misc import filldedent
from sympy.utilities.exceptions import SymPyDeprecationWarning
class Integral(AddWithLimits):
"""Represents unevaluated integral."""
__slots__ = ('is_commutative',)
def __new__(cls, function, *symbols, **assumptions):
"""Create an unevaluated integral.
Explanation
===========
Arguments are an integrand followed by one or more limits.
If no limits are given and there is only one free symbol in the
expression, that symbol will be used, otherwise an error will be
raised.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x)
Integral(x, x)
>>> Integral(y)
Integral(y, y)
When limits are provided, they are interpreted as follows (using
``x`` as though it were the variable of integration):
(x,) or x - indefinite integral
(x, a) - "evaluate at" integral is an abstract antiderivative
(x, a, b) - definite integral
The ``as_dummy`` method can be used to see which symbols cannot be
targeted by subs: those with a prepended underscore cannot be
changed with ``subs``. (Also, the integration variables themselves --
the first element of a limit -- can never be changed by subs.)
>>> i = Integral(x, x)
>>> at = Integral(x, (x, x))
>>> i.as_dummy()
Integral(x, x)
>>> at.as_dummy()
Integral(_0, (_0, x))
"""
#This will help other classes define their own definitions
#of behaviour with Integral.
if hasattr(function, '_eval_Integral'):
return function._eval_Integral(*symbols, **assumptions)
if isinstance(function, Poly):
SymPyDeprecationWarning(
feature="Using integrate/Integral with Poly",
issue=18613,
deprecated_since_version="1.6",
useinstead="the as_expr or integrate methods of Poly").warn()
obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral depends on a certain
symbol or not.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
{y}
See Also
========
sympy.concrete.expr_with_limits.ExprWithLimits.function
sympy.concrete.expr_with_limits.ExprWithLimits.limits
sympy.concrete.expr_with_limits.ExprWithLimits.variables
"""
return AddWithLimits.free_symbols.fget(self)
def _eval_is_zero(self):
# This is a very naive and quick test, not intended to do the integral to
# answer whether it is zero or not, e.g. Integral(sin(x), (x, 0, 2*pi))
# is zero but this routine should return None for that case. But, like
# Mul, there are trivial situations for which the integral will be
# zero so we check for those.
if self.function.is_zero:
return True
got_none = False
for l in self.limits:
if len(l) == 3:
z = (l[1] == l[2]) or (l[1] - l[2]).is_zero
if z:
return True
elif z is None:
got_none = True
free = self.function.free_symbols
for xab in self.limits:
if len(xab) == 1:
free.add(xab[0])
continue
if len(xab) == 2 and xab[0] not in free:
if xab[1].is_zero:
return True
elif xab[1].is_zero is None:
got_none = True
# take integration symbol out of free since it will be replaced
# with the free symbols in the limits
free.discard(xab[0])
# add in the new symbols
for i in xab[1:]:
free.update(i.free_symbols)
if self.function.is_zero is False and got_none is False:
return False
def transform(self, x, u):
r"""
Performs a change of variables from `x` to `u` using the relationship
given by `x` and `u` which will define the transformations `f` and `F`
(which are inverses of each other) as follows:
1) If `x` is a Symbol (which is a variable of integration) then `u`
will be interpreted as some function, f(u), with inverse F(u).
This, in effect, just makes the substitution of x with f(x).
2) If `u` is a Symbol then `x` will be interpreted as some function,
F(x), with inverse f(u). This is commonly referred to as
u-substitution.
Once f and F have been identified, the transformation is made as
follows:
.. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x)
\frac{\mathrm{d}}{\mathrm{d}x}
where `F(x)` is the inverse of `f(x)` and the limits and integrand have
been corrected so as to retain the same value after integration.
Notes
=====
The mappings, F(x) or f(u), must lead to a unique integral. Linear
or rational linear expression, ``2*x``, ``1/x`` and ``sqrt(x)``, will
always work; quadratic expressions like ``x**2 - 1`` are acceptable
as long as the resulting integrand does not depend on the sign of
the solutions (see examples).
The integral will be returned unchanged if ``x`` is not a variable of
integration.
``x`` must be (or contain) only one of of the integration variables. If
``u`` has more than one free symbol then it should be sent as a tuple
(``u``, ``uvar``) where ``uvar`` identifies which variable is replacing
the integration variable.
XXX can it contain another integration variable?
Examples
========
>>> from sympy.abc import a, x, u
>>> from sympy import Integral, cos, sqrt
>>> i = Integral(x*cos(x**2 - 1), (x, 0, 1))
transform can change the variable of integration
>>> i.transform(x, u)
Integral(u*cos(u**2 - 1), (u, 0, 1))
transform can perform u-substitution as long as a unique
integrand is obtained:
>>> i.transform(x**2 - 1, u)
Integral(cos(u)/2, (u, -1, 0))
This attempt fails because x = +/-sqrt(u + 1) and the
sign does not cancel out of the integrand:
>>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u)
Traceback (most recent call last):
...
ValueError:
The mapping between F(x) and f(u) did not give a unique integrand.
transform can do a substitution. Here, the previous
result is transformed back into the original expression
using "u-substitution":
>>> ui = _
>>> _.transform(sqrt(u + 1), x) == i
True
We can accomplish the same with a regular substitution:
>>> ui.transform(u, x**2 - 1) == i
True
If the `x` does not contain a symbol of integration then
the integral will be returned unchanged. Integral `i` does
not have an integration variable `a` so no change is made:
>>> i.transform(a, x) == i
True
When `u` has more than one free symbol the symbol that is
replacing `x` must be identified by passing `u` as a tuple:
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, u))
Integral(a + u, (u, -a, 1 - a))
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, a))
Integral(a + u, (a, -u, 1 - u))
See Also
========
sympy.concrete.expr_with_limits.ExprWithLimits.variables : Lists the integration variables
as_dummy : Replace integration variables with dummy ones
"""
from sympy.solvers.solvers import solve, posify
d = Dummy('d')
xfree = x.free_symbols.intersection(self.variables)
if len(xfree) > 1:
raise ValueError(
'F(x) can only contain one of: %s' % self.variables)
xvar = xfree.pop() if xfree else d
if xvar not in self.variables:
return self
u = sympify(u)
if isinstance(u, Expr):
ufree = u.free_symbols
if len(ufree) == 0:
raise ValueError(filldedent('''
f(u) cannot be a constant'''))
if len(ufree) > 1:
raise ValueError(filldedent('''
When f(u) has more than one free symbol, the one replacing x
must be identified: pass f(u) as (f(u), u)'''))
uvar = ufree.pop()
else:
u, uvar = u
if uvar not in u.free_symbols:
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) where symbol identified
a free symbol in expr, but symbol is not in expr's free
symbols.'''))
if not isinstance(uvar, Symbol):
# This probably never evaluates to True
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) but didn't get
a symbol; got %s''' % uvar))
if x.is_Symbol and u.is_Symbol:
return self.xreplace({x: u})
if not x.is_Symbol and not u.is_Symbol:
raise ValueError('either x or u must be a symbol')
if uvar == xvar:
return self.transform(x, (u.subs(uvar, d), d)).xreplace({d: uvar})
if uvar in self.limits:
raise ValueError(filldedent('''
u must contain the same variable as in x
or a variable that is not already an integration variable'''))
if not x.is_Symbol:
F = [x.subs(xvar, d)]
soln = solve(u - x, xvar, check=False)
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), x)')
f = [fi.subs(uvar, d) for fi in soln]
else:
f = [u.subs(uvar, d)]
pdiff, reps = posify(u - x)
puvar = uvar.subs([(v, k) for k, v in reps.items()])
soln = [s.subs(reps) for s in solve(pdiff, puvar)]
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), u)')
F = [fi.subs(xvar, d) for fi in soln]
newfuncs = {(self.function.subs(xvar, fi)*fi.diff(d)
).subs(d, uvar) for fi in f}
if len(newfuncs) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not give
a unique integrand.'''))
newfunc = newfuncs.pop()
def _calc_limit_1(F, a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
wok = F.subs(d, a)
if wok is S.NaN or wok.is_finite is False and a.is_finite:
return limit(sign(b)*F, d, a)
return wok
def _calc_limit(a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
avals = list({_calc_limit_1(Fi, a, b) for Fi in F})
if len(avals) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not
give a unique limit.'''))
return avals[0]
newlimits = []
for xab in self.limits:
sym = xab[0]
if sym == xvar:
if len(xab) == 3:
a, b = xab[1:]
a, b = _calc_limit(a, b), _calc_limit(b, a)
if fuzzy_bool(a - b > 0):
a, b = b, a
newfunc = -newfunc
newlimits.append((uvar, a, b))
elif len(xab) == 2:
a = _calc_limit(xab[1], 1)
newlimits.append((uvar, a))
else:
newlimits.append(uvar)
else:
newlimits.append(xab)
return self.func(newfunc, *newlimits)
def doit(self, **hints):
"""
Perform the integration using any hints given.
Examples
========
>>> from sympy import Piecewise, S
>>> from sympy.abc import x, t
>>> p = x**2 + Piecewise((0, x/t < 0), (1, True))
>>> p.integrate((t, S(4)/5, 1), (x, -1, 1))
1/3
See Also
========
sympy.integrals.trigonometry.trigintegrate
sympy.integrals.heurisch.heurisch
sympy.integrals.rationaltools.ratint
as_sum : Approximate the integral using a sum
"""
from sympy.concrete.summations import Sum
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
meijerg = hints.get('meijerg', None)
conds = hints.get('conds', 'piecewise')
risch = hints.get('risch', None)
heurisch = hints.get('heurisch', None)
manual = hints.get('manual', None)
if len(list(filter(None, (manual, meijerg, risch, heurisch)))) > 1:
raise ValueError("At most one of manual, meijerg, risch, heurisch can be True")
elif manual:
meijerg = risch = heurisch = False
elif meijerg:
manual = risch = heurisch = False
elif risch:
manual = meijerg = heurisch = False
elif heurisch:
manual = meijerg = risch = False
eval_kwargs = dict(meijerg=meijerg, risch=risch, manual=manual, heurisch=heurisch,
conds=conds)
if conds not in ('separate', 'piecewise', 'none'):
raise ValueError('conds must be one of "separate", "piecewise", '
'"none", got: %s' % conds)
if risch and any(len(xab) > 1 for xab in self.limits):
raise ValueError('risch=True is only allowed for indefinite integrals.')
# check for the trivial zero
if self.is_zero:
return S.Zero
# hacks to handle integrals of
# nested summations
if isinstance(self.function, Sum):
if any(v in self.function.limits[0] for v in self.variables):
raise ValueError('Limit of the sum cannot be an integration variable.')
if any(l.is_infinite for l in self.function.limits[0][1:]):
return self
_i = self
_sum = self.function
return _sum.func(_i.func(_sum.function, *_i.limits).doit(), *_sum.limits).doit()
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# hacks to handle special cases
if isinstance(function, MatrixBase):
return function.applyfunc(
lambda f: self.func(f, self.limits).doit(**hints))
if isinstance(function, FormalPowerSeries):
if len(self.limits) > 1:
raise NotImplementedError
xab = self.limits[0]
if len(xab) > 1:
return function.integrate(xab, **eval_kwargs)
else:
return function.integrate(xab[0], **eval_kwargs)
# There is no trivial answer and special handling
# is done so continue
# first make sure any definite limits have integration
# variables with matching assumptions
reps = {}
for xab in self.limits:
if len(xab) != 3:
continue
x, a, b = xab
l = (a, b)
if all(i.is_nonnegative for i in l) and not x.is_nonnegative:
d = Dummy(positive=True)
elif all(i.is_nonpositive for i in l) and not x.is_nonpositive:
d = Dummy(negative=True)
elif all(i.is_real for i in l) and not x.is_real:
d = Dummy(real=True)
else:
d = None
if d:
reps[x] = d
if reps:
undo = {v: k for k, v in reps.items()}
did = self.xreplace(reps).doit(**hints)
if isinstance(did, tuple): # when separate=True
did = tuple([i.xreplace(undo) for i in did])
else:
did = did.xreplace(undo)
return did
# continue with existing assumptions
undone_limits = []
# ulj = free symbols of any undone limits' upper and lower limits
ulj = set()
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
if function.has(Abs, sign) and (
(len(xab) < 3 and all(x.is_extended_real for x in xab)) or
(len(xab) == 3 and all(x.is_extended_real and not x.is_infinite for
x in xab[1:]))):
# some improper integrals are better off with Abs
xr = Dummy("xr", real=True)
function = (function.xreplace({xab[0]: xr})
.rewrite(Piecewise).xreplace({xr: xab[0]}))
elif function.has(Min, Max):
function = function.rewrite(Piecewise)
if (function.has(Piecewise) and
not isinstance(function, Piecewise)):
function = piecewise_fold(function)
if isinstance(function, Piecewise):
if len(xab) == 1:
antideriv = function._eval_integral(xab[0],
**eval_kwargs)
else:
antideriv = self._eval_integral(
function, xab[0], **eval_kwargs)
else:
# There are a number of tradeoffs in using the
# Meijer G method. It can sometimes be a lot faster
# than other methods, and sometimes slower. And
# there are certain types of integrals for which it
# is more likely to work than others. These
# heuristics are incorporated in deciding what
# integration methods to try, in what order. See the
# integrate() docstring for details.
def try_meijerg(function, xab):
ret = None
if len(xab) == 3 and meijerg is not False:
x, a, b = xab
try:
res = meijerint_definite(function, x, a, b)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError '
'from meijerint_definite')
res = None
if res is not None:
f, cond = res
if conds == 'piecewise':
ret = Piecewise(
(f, cond),
(self.func(
function, (x, a, b)), True))
elif conds == 'separate':
if len(self.limits) != 1:
raise ValueError(filldedent('''
conds=separate not supported in
multiple integrals'''))
ret = f, cond
else:
ret = f
return ret
meijerg1 = meijerg
if (meijerg is not False and
len(xab) == 3 and xab[1].is_extended_real and xab[2].is_extended_real
and not function.is_Poly and
(xab[1].has(oo, -oo) or xab[2].has(oo, -oo))):
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
meijerg1 = False
# If the special meijerg code did not succeed in
# finding a definite integral, then the code using
# meijerint_indefinite will not either (it might
# find an antiderivative, but the answer is likely
# to be nonsensical). Thus if we are requested to
# only use Meijer G-function methods, we give up at
# this stage. Otherwise we just disable G-function
# methods.
if meijerg1 is False and meijerg is True:
antideriv = None
else:
antideriv = self._eval_integral(
function, xab[0], **eval_kwargs)
if antideriv is None and meijerg is True:
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
final = hints.get('final', True)
# dotit may be iterated but floor terms making atan and acot
# continous should only be added in the final round
if (final and not isinstance(antideriv, Integral) and
antideriv is not None):
for atan_term in antideriv.atoms(atan):
atan_arg = atan_term.args[0]
# Checking `atan_arg` to be linear combination of `tan` or `cot`
for tan_part in atan_arg.atoms(tan):
x1 = Dummy('x1')
tan_exp1 = atan_arg.subs(tan_part, x1)
# The coefficient of `tan` should be constant
coeff = tan_exp1.diff(x1)
if x1 not in coeff.free_symbols:
a = tan_part.args[0]
antideriv = antideriv.subs(atan_term, Add(atan_term,
sign(coeff)*pi*floor((a-pi/2)/pi)))
for cot_part in atan_arg.atoms(cot):
x1 = Dummy('x1')
cot_exp1 = atan_arg.subs(cot_part, x1)
# The coefficient of `cot` should be constant
coeff = cot_exp1.diff(x1)
if x1 not in coeff.free_symbols:
a = cot_part.args[0]
antideriv = antideriv.subs(atan_term, Add(atan_term,
sign(coeff)*pi*floor((a)/pi)))
if antideriv is None:
undone_limits.append(xab)
function = self.func(*([function] + [xab])).factor()
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
elif len(xab) == 2:
x, b = xab
a = None
else:
raise NotImplementedError
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
else:
def is_indef_int(g, x):
return (isinstance(g, Integral) and
any(i == (x,) for i in g.limits))
def eval_factored(f, x, a, b):
# _eval_interval for integrals with
# (constant) factors
# a single indefinite integral is assumed
args = []
for g in Mul.make_args(f):
if is_indef_int(g, x):
args.append(g._eval_interval(x, a, b))
else:
args.append(g)
return Mul(*args)
integrals, others, piecewises = [], [], []
for f in Add.make_args(antideriv):
if any(is_indef_int(g, x)
for g in Mul.make_args(f)):
integrals.append(f)
elif any(isinstance(g, Piecewise)
for g in Mul.make_args(f)):
piecewises.append(piecewise_fold(f))
else:
others.append(f)
uneval = Add(*[eval_factored(f, x, a, b)
for f in integrals])
try:
evalued = Add(*others)._eval_interval(x, a, b)
evalued_pw = piecewise_fold(Add(*piecewises))._eval_interval(x, a, b)
function = uneval + evalued + evalued_pw
except NotImplementedError:
# This can happen if _eval_interval depends in a
# complicated way on limits that cannot be computed
undone_limits.append(xab)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
return function
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Explanation
===========
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References
==========
.. [1] https://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
.. [2] https://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
{x}
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 4215
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = self.func(f, *tuple(limits))
# assemble the pieces
def _do(f, ab):
dab_dsym = diff(ab, sym)
if not dab_dsym:
return S.Zero
if isinstance(f, Integral):
limits = [(x, x) if (len(l) == 1 and l[0] == x) else l
for l in f.limits]
f = self.func(f.function, *limits)
return f.subs(x, ab)*dab_dsym
rv = S.Zero
if b is not None:
rv += _do(f, b)
if a is not None:
rv -= _do(f, a)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
if arg:
rv += self.func(arg, Tuple(x, a, b))
return rv
def _eval_integral(self, f, x, meijerg=None, risch=None, manual=None,
heurisch=None, conds='piecewise',final=None):
"""
Calculate the anti-derivative to the function f(x).
Explanation
===========
The following algorithms are applied (roughly in this order):
1. Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials, products of
trig functions)
2. Integration of rational functions:
- A complete algorithm for integrating rational functions is
implemented (the Lazard-Rioboo-Trager algorithm). The algorithm
also uses the partial fraction decomposition algorithm
implemented in apart() as a preprocessor to make this process
faster. Note that the integral of a rational function is always
elementary, but in general, it may include a RootSum.
3. Full Risch algorithm:
- The Risch algorithm is a complete decision
procedure for integrating elementary functions, which means that
given any elementary function, it will either compute an
elementary antiderivative, or else prove that none exists.
Currently, part of transcendental case is implemented, meaning
elementary integrals containing exponentials, logarithms, and
(soon!) trigonometric functions can be computed. The algebraic
case, e.g., functions containing roots, is much more difficult
and is not implemented yet.
- If the routine fails (because the integrand is not elementary, or
because a case is not implemented yet), it continues on to the
next algorithms below. If the routine proves that the integrals
is nonelementary, it still moves on to the algorithms below,
because we might be able to find a closed-form solution in terms
of special functions. If risch=True, however, it will stop here.
4. The Meijer G-Function algorithm:
- This algorithm works by first rewriting the integrand in terms of
very general Meijer G-Function (meijerg in SymPy), integrating
it, and then rewriting the result back, if possible. This
algorithm is particularly powerful for definite integrals (which
is actually part of a different method of Integral), since it can
compute closed-form solutions of definite integrals even when no
closed-form indefinite integral exists. But it also is capable
of computing many indefinite integrals as well.
- Another advantage of this method is that it can use some results
about the Meijer G-Function to give a result in terms of a
Piecewise expression, which allows to express conditionally
convergent integrals.
- Setting meijerg=True will cause integrate() to use only this
method.
5. The "manual integration" algorithm:
- This algorithm tries to mimic how a person would find an
antiderivative by hand, for example by looking for a
substitution or applying integration by parts. This algorithm
does not handle as many integrands but can return results in a
more familiar form.
- Sometimes this algorithm can evaluate parts of an integral; in
this case integrate() will try to evaluate the rest of the
integrand using the other methods here.
- Setting manual=True will cause integrate() to use only this
method.
6. The Heuristic Risch algorithm:
- This is a heuristic version of the Risch algorithm, meaning that
it is not deterministic. This is tried as a last resort because
it can be very slow. It is still used because not enough of the
full Risch algorithm is implemented, so that there are still some
integrals that can only be computed using this method. The goal
is to implement enough of the Risch and Meijer G-function methods
so that this can be deleted.
Setting heurisch=True will cause integrate() to use only this
method. Set heurisch=False to not use it.
"""
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.integrals.singularityfunctions import singularityintegrate
from sympy.integrals.heurisch import heurisch as heurisch_, heurisch_wrapper
from sympy.integrals.rationaltools import ratint
from sympy.integrals.risch import risch_integrate
if risch:
try:
return risch_integrate(f, x, conds=conds)
except NotImplementedError:
return None
if manual:
try:
result = manualintegrate(f, x)
if result is not None and result.func != Integral:
return result
except (ValueError, PolynomialError):
pass
eval_kwargs = dict(meijerg=meijerg, risch=risch, manual=manual,
heurisch=heurisch, conds=conds)
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a sympy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly) and not (manual or meijerg or risch):
SymPyDeprecationWarning(
feature="Using integrate/Integral with Poly",
issue=18613,
deprecated_since_version="1.6",
useinstead="the as_expr or integrate methods of Poly").warn()
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if isinstance(f, Piecewise):
return f.piecewise_integrate(x, **eval_kwargs)
# let's cut it short if `f` does not depend on `x`; if
# x is only a dummy, that will be handled below
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None and not (manual or meijerg or risch):
return poly.integrate().as_expr()
if risch is not False:
try:
result, i = risch_integrate(f, x, separate_integral=True,
conds=conds)
except NotImplementedError:
pass
else:
if i:
# There was a nonelementary integral. Try integrating it.
# if no part of the NonElementaryIntegral is integrated by
# the Risch algorithm, then use the original function to
# integrate, instead of re-written one
if result == 0:
from sympy.integrals.risch import NonElementaryIntegral
return NonElementaryIntegral(f, x).doit(risch=False)
else:
return result + i.doit(risch=False)
else:
return result
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
# Note that in general, this is a bad idea, because Integral(g1) +
# Integral(g2) might not be computable, even if Integral(g1 + g2) is.
# For example, Integral(x**x + x**x*log(x)). But many heuristics only
# work term-wise. So we compute this step last, after trying
# risch_integrate. We also try risch_integrate again in this loop,
# because maybe the integral is a sum of an elementary part and a
# nonelementary part (like erf(x) + exp(x)). risch_integrate() is
# quite fast, so this is acceptable.
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One and not meijerg:
parts.append(coeff*x)
continue
# g(x) = expr + O(x**n)
order_term = g.getO()
if order_term is not None:
h = self._eval_integral(g.removeO(), x, **eval_kwargs)
if h is not None:
h_order_expr = self._eval_integral(order_term.expr, x, **eval_kwargs)
if h_order_expr is not None:
h_order_term = order_term.func(
h_order_expr, *order_term.variables)
parts.append(coeff*(h + h_order_term))
continue
# NOTE: if there is O(x**n) and we fail to integrate then
# there is no point in trying other methods because they
# will fail, too.
return None
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x) and not meijerg:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = log(g.base)
elif conds != 'piecewise':
h = g.base**(g.exp + 1) / (g.exp + 1)
else:
h1 = log(g.base)
h2 = g.base**(g.exp + 1) / (g.exp + 1)
h = Piecewise((h2, Ne(g.exp, -1)), (h1, True))
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x) and not (manual or meijerg or risch):
parts.append(coeff * ratint(g, x))
continue
if not (manual or meijerg or risch):
# g(x) = Mul(trig)
h = trigintegrate(g, x, conds=conds)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a Singularity Function term
h = singularityintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# Try risch again.
if risch is not False:
try:
h, i = risch_integrate(g, x,
separate_integral=True, conds=conds)
except NotImplementedError:
h = None
else:
if i:
h = h + i.doit(risch=False)
parts.append(coeff*h)
continue
# fall back to heurisch
if heurisch is not False:
try:
if conds == 'piecewise':
h = heurisch_wrapper(g, x, hints=[])
else:
h = heurisch_(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
else:
h = None
if meijerg is not False and h is None:
# rewrite using G functions
try:
h = meijerint_indefinite(g, x)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
if h is not None:
parts.append(coeff * h)
continue
if h is None and manual is not False:
try:
result = manualintegrate(g, x)
if result is not None and not isinstance(result, Integral):
if result.has(Integral) and not manual:
# Try to have other algorithms do the integrals
# manualintegrate can't handle,
# unless we were asked to use manual only.
# Keep the rest of eval_kwargs in case another
# method was set to False already
new_eval_kwargs = eval_kwargs
new_eval_kwargs["manual"] = False
new_eval_kwargs["final"] = False
result = result.func(*[
arg.doit(**new_eval_kwargs) if
arg.has(Integral) else arg
for arg in result.args
]).expand(multinomial=False,
log=False,
power_exp=False,
power_base=False)
if not result.has(Integral):
parts.append(coeff * result)
continue
except (ValueError, PolynomialError):
# can't handle some SymPy expressions
pass
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# at the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = sincos_to_sum(f).expand(mul=True, deep=False)
if f.is_Add:
# Note: risch will be identical on the expanded
# expression, but maybe it will be able to pick out parts,
# like x*(exp(x) + erf(x)).
return self._eval_integral(f, x, **eval_kwargs)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x, logx=None, cdir=0):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
for term in expr.function.lseries(symb, logx):
yield integrate(term, *expr.limits)
def _eval_nseries(self, x, n, logx=None, cdir=0):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
terms, order = expr.function.nseries(
x=symb, n=n, logx=logx).as_coeff_add(Order)
order = [o.subs(symb, x) for o in order]
return integrate(terms, *expr.limits) + Add(*order)*x
def _eval_as_leading_term(self, x, logx=None, cdir=0):
series_gen = self.args[0].lseries(x)
for leading_term in series_gen:
if leading_term != 0:
break
return integrate(leading_term, *self.args[1:])
def _eval_simplify(self, **kwargs):
from sympy.core.exprtools import factor_terms
from sympy.simplify.simplify import simplify
expr = factor_terms(self)
if isinstance(expr, Integral):
return expr.func(*[simplify(i, **kwargs) for i in expr.args])
return expr.simplify(**kwargs)
def as_sum(self, n=None, method="midpoint", evaluate=True):
"""
Approximates a definite integral by a sum.
Parameters
==========
n :
The number of subintervals to use, optional.
method :
One of: 'left', 'right', 'midpoint', 'trapezoid'.
evaluate : bool
If False, returns an unevaluated Sum expression. The default
is True, evaluate the sum.
Notes
=====
These methods of approximate integration are described in [1].
Examples
========
>>> from sympy import sin, sqrt
>>> from sympy.abc import x, n
>>> from sympy.integrals import Integral
>>> e = Integral(sin(x), (x, 3, 7))
>>> e
Integral(sin(x), (x, 3, 7))
For demonstration purposes, this interval will only be split into 2
regions, bounded by [3, 5] and [5, 7].
The left-hand rule uses function evaluations at the left of each
interval:
>>> e.as_sum(2, 'left')
2*sin(5) + 2*sin(3)
The midpoint rule uses evaluations at the center of each interval:
>>> e.as_sum(2, 'midpoint')
2*sin(4) + 2*sin(6)
The right-hand rule uses function evaluations at the right of each
interval:
>>> e.as_sum(2, 'right')
2*sin(5) + 2*sin(7)
The trapezoid rule uses function evaluations on both sides of the
intervals. This is equivalent to taking the average of the left and
right hand rule results:
>>> e.as_sum(2, 'trapezoid')
2*sin(5) + sin(3) + sin(7)
>>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == _
True
Here, the discontinuity at x = 0 can be avoided by using the
midpoint or right-hand method:
>>> e = Integral(1/sqrt(x), (x, 0, 1))
>>> e.as_sum(5).n(4)
1.730
>>> e.as_sum(10).n(4)
1.809
>>> e.doit().n(4) # the actual value is 2
2.000
The left- or trapezoid method will encounter the discontinuity and
return infinity:
>>> e.as_sum(5, 'left')
zoo
The number of intervals can be symbolic. If omitted, a dummy symbol
will be used for it.
>>> e = Integral(x**2, (x, 0, 2))
>>> e.as_sum(n, 'right').expand()
8/3 + 4/n + 4/(3*n**2)
This shows that the midpoint rule is more accurate, as its error
term decays as the square of n:
>>> e.as_sum(method='midpoint').expand()
8/3 - 2/(3*_n**2)
A symbolic sum is returned with evaluate=False:
>>> e.as_sum(n, 'midpoint', evaluate=False)
2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n
See Also
========
Integral.doit : Perform the integration using any hints
References
==========
.. [1] https://en.wikipedia.org/wiki/Riemann_sum#Methods
"""
from sympy.concrete.summations import Sum
limits = self.limits
if len(limits) > 1:
raise NotImplementedError(
"Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if (len(limit) != 3 or limit[1].is_finite is False or
limit[2].is_finite is False):
raise ValueError("Expecting a definite integral over "
"a finite interval.")
if n is None:
n = Dummy('n', integer=True, positive=True)
else:
n = sympify(n)
if (n.is_positive is False or n.is_integer is False or
n.is_finite is False):
raise ValueError("n must be a positive integer, got %s" % n)
x, a, b = limit
dx = (b - a)/n
k = Dummy('k', integer=True, positive=True)
f = self.function
if method == "left":
result = dx*Sum(f.subs(x, a + (k-1)*dx), (k, 1, n))
elif method == "right":
result = dx*Sum(f.subs(x, a + k*dx), (k, 1, n))
elif method == "midpoint":
result = dx*Sum(f.subs(x, a + k*dx - dx/2), (k, 1, n))
elif method == "trapezoid":
result = dx*((f.subs(x, a) + f.subs(x, b))/2 +
Sum(f.subs(x, a + k*dx), (k, 1, n - 1)))
else:
raise ValueError("Unknown method %s" % method)
return result.doit() if evaluate else result
def principal_value(self, **kwargs):
"""
Compute the Cauchy Principal Value of the definite integral of a real function in the given interval
on the real axis.
Explanation
===========
In mathematics, the Cauchy principal value, is a method for assigning values to certain improper
integrals which would otherwise be undefined.
Examples
========
>>> from sympy import oo
>>> from sympy.integrals.integrals import Integral
>>> from sympy.abc import x
>>> Integral(x+1, (x, -oo, oo)).principal_value()
oo
>>> f = 1 / (x**3)
>>> Integral(f, (x, -oo, oo)).principal_value()
0
>>> Integral(f, (x, -10, 10)).principal_value()
0
>>> Integral(f, (x, -10, oo)).principal_value() + Integral(f, (x, -oo, 10)).principal_value()
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Cauchy_principal_value
.. [2] http://mathworld.wolfram.com/CauchyPrincipalValue.html
"""
from sympy.calculus import singularities
if len(self.limits) != 1 or len(list(self.limits[0])) != 3:
raise ValueError("You need to insert a variable, lower_limit, and upper_limit correctly to calculate "
"cauchy's principal value")
x, a, b = self.limits[0]
if not (a.is_comparable and b.is_comparable and a <= b):
raise ValueError("The lower_limit must be smaller than or equal to the upper_limit to calculate "
"cauchy's principal value. Also, a and b need to be comparable.")
if a == b:
return 0
r = Dummy('r')
f = self.function
singularities_list = [s for s in singularities(f, x) if s.is_comparable and a <= s <= b]
for i in singularities_list:
if (i == b) or (i == a):
raise ValueError(
'The principal value is not defined in the given interval due to singularity at %d.' % (i))
F = integrate(f, x, **kwargs)
if F.has(Integral):
return self
if a is -oo and b is oo:
I = limit(F - F.subs(x, -x), x, oo)
else:
I = limit(F, x, b, '-') - limit(F, x, a, '+')
for s in singularities_list:
I += limit(((F.subs(x, s - r)) - F.subs(x, s + r)), r, 0, '+')
return I
def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs):
"""integrate(f, var, ...)
Explanation
===========
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a) -- indefinite integration with result
given with `a` replacing `symbol`
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is
multiple integration. (If var is omitted and the integrand is
univariate, the indefinite integral in that variable will be performed.)
Indefinite integrals are returned without terms that are independent
of the integration variables. (see examples)
Definite improper integrals often entail delicate convergence
conditions. Pass conds='piecewise', 'separate' or 'none' to have
these returned, respectively, as a Piecewise function, as a separate
result (i.e. result will be a tuple), or not at all (default is
'piecewise').
**Strategy**
SymPy uses various approaches to definite integration. One method is to
find an antiderivative for the integrand, and then use the fundamental
theorem of calculus. Various functions are implemented to integrate
polynomial, rational and trigonometric functions, and integrands
containing DiracDelta terms.
SymPy also implements the part of the Risch algorithm, which is a decision
procedure for integrating elementary functions, i.e., the algorithm can
either find an elementary antiderivative, or prove that one does not
exist. There is also a (very successful, albeit somewhat slow) general
implementation of the heuristic Risch algorithm. This algorithm will
eventually be phased out as more of the full Risch algorithm is
implemented. See the docstring of Integral._eval_integral() for more
details on computing the antiderivative using algebraic methods.
The option risch=True can be used to use only the (full) Risch algorithm.
This is useful if you want to know if an elementary function has an
elementary antiderivative. If the indefinite Integral returned by this
function is an instance of NonElementaryIntegral, that means that the
Risch algorithm has proven that integral to be non-elementary. Note that
by default, additional methods (such as the Meijer G method outlined
below) are tried on these integrals, as they may be expressible in terms
of special functions, so if you only care about elementary answers, use
risch=True. Also note that an unevaluated Integral returned by this
function is not necessarily a NonElementaryIntegral, even with risch=True,
as it may just be an indication that the particular part of the Risch
algorithm needed to integrate that function is not yet implemented.
Another family of strategies comes from re-writing the integrand in
terms of so-called Meijer G-functions. Indefinite integrals of a
single G-function can always be computed, and the definite integral
of a product of two G-functions can be computed from zero to
infinity. Various strategies are implemented to rewrite integrands
as G-functions, and use this information to compute integrals (see
the ``meijerint`` module).
The option manual=True can be used to use only an algorithm that tries
to mimic integration by hand. This algorithm does not handle as many
integrands as the other algorithms implemented but may return results in
a more familiar form. The ``manualintegrate`` module has functions that
return the steps used (see the module docstring for more information).
In general, the algebraic methods work best for computing
antiderivatives of (possibly complicated) combinations of elementary
functions. The G-function methods work best for computing definite
integrals from zero to infinity of moderately complicated
combinations of special functions, or indefinite integrals of very
simple combinations of special functions.
The strategy employed by the integration code is as follows:
- If computing a definite integral, and both limits are real,
and at least one limit is +- oo, try the G-function method of
definite integration first.
- Try to find an antiderivative, using all available methods, ordered
by performance (that is try fastest method first, slowest last; in
particular polynomial integration is tried first, Meijer
G-functions second to last, and heuristic Risch last).
- If still not successful, try G-functions irrespective of the
limits.
The option meijerg=True, False, None can be used to, respectively:
always use G-function methods and no others, never use G-function
methods, or use all available methods (in order as described above).
It defaults to None.
Examples
========
>>> from sympy import integrate, log, exp, oo
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
Terms that are independent of x are dropped by indefinite integration:
>>> from sympy import sqrt
>>> integrate(sqrt(1 + x), (x, 0, x))
2*(x + 1)**(3/2)/3 - 2/3
>>> integrate(sqrt(1 + x), x)
2*(x + 1)**(3/2)/3
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
>>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
Piecewise((gamma(a + 1), re(a) > -1),
(Integral(x**a*exp(-x), (x, 0, oo)), True))
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
gamma(a + 1)
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
(gamma(a + 1), -re(a) < 1)
See Also
========
Integral, Integral.doit
"""
doit_flags = {
'deep': False,
'meijerg': meijerg,
'conds': conds,
'risch': risch,
'heurisch': heurisch,
'manual': manual
}
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(**doit_flags)
else:
new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a
for a in integral.args]
return integral.func(*new_args)
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
========
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*sqrt(2)
See Also
========
sympy.integrals.integrals.integrate, Integral
"""
from sympy.geometry import Curve
F = sympify(field)
if not F:
raise ValueError(
"Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not is_sequence(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * sqrt(dldt)
integral = Integral(Ft, curve.limits).doit(deep=False)
return integral
### Property function dispatching ###
@shape.register(Integral)
def _(expr):
return shape(expr.function)
| from sympy.concrete.expr_with_limits import AddWithLimits
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import diff
from sympy.core.logic import fuzzy_bool
from sympy.core.mul import Mul
from sympy.core.numbers import oo, pi
from sympy.core.relational import Ne
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, Wild)
from sympy.core.sympify import sympify
from sympy.functions import Piecewise, sqrt, piecewise_fold, tan, cot, atan
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.complexes import Abs, sign
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.integrals.manualintegrate import manualintegrate
from sympy.integrals.trigonometry import trigintegrate
from sympy.integrals.meijerint import meijerint_definite, meijerint_indefinite
from sympy.matrices import MatrixBase
from sympy.polys import Poly, PolynomialError
from sympy.series import limit
from sympy.series.order import Order
from sympy.series.formal import FormalPowerSeries
from sympy.simplify.fu import sincos_to_sum
from sympy.tensor.functions import shape
from sympy.utilities.misc import filldedent
from sympy.utilities.exceptions import SymPyDeprecationWarning
class Integral(AddWithLimits):
"""Represents unevaluated integral."""
__slots__ = ('is_commutative',)
def __new__(cls, function, *symbols, **assumptions):
"""Create an unevaluated integral.
Explanation
===========
Arguments are an integrand followed by one or more limits.
If no limits are given and there is only one free symbol in the
expression, that symbol will be used, otherwise an error will be
raised.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x)
Integral(x, x)
>>> Integral(y)
Integral(y, y)
When limits are provided, they are interpreted as follows (using
``x`` as though it were the variable of integration):
(x,) or x - indefinite integral
(x, a) - "evaluate at" integral is an abstract antiderivative
(x, a, b) - definite integral
The ``as_dummy`` method can be used to see which symbols cannot be
targeted by subs: those with a prepended underscore cannot be
changed with ``subs``. (Also, the integration variables themselves --
the first element of a limit -- can never be changed by subs.)
>>> i = Integral(x, x)
>>> at = Integral(x, (x, x))
>>> i.as_dummy()
Integral(x, x)
>>> at.as_dummy()
Integral(_0, (_0, x))
"""
#This will help other classes define their own definitions
#of behaviour with Integral.
if hasattr(function, '_eval_Integral'):
return function._eval_Integral(*symbols, **assumptions)
if isinstance(function, Poly):
SymPyDeprecationWarning(
feature="Using integrate/Integral with Poly",
issue=18613,
deprecated_since_version="1.6",
useinstead="the as_expr or integrate methods of Poly").warn()
obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral depends on a certain
symbol or not.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
{y}
See Also
========
sympy.concrete.expr_with_limits.ExprWithLimits.function
sympy.concrete.expr_with_limits.ExprWithLimits.limits
sympy.concrete.expr_with_limits.ExprWithLimits.variables
"""
return AddWithLimits.free_symbols.fget(self)
def _eval_is_zero(self):
# This is a very naive and quick test, not intended to do the integral to
# answer whether it is zero or not, e.g. Integral(sin(x), (x, 0, 2*pi))
# is zero but this routine should return None for that case. But, like
# Mul, there are trivial situations for which the integral will be
# zero so we check for those.
if self.function.is_zero:
return True
got_none = False
for l in self.limits:
if len(l) == 3:
z = (l[1] == l[2]) or (l[1] - l[2]).is_zero
if z:
return True
elif z is None:
got_none = True
free = self.function.free_symbols
for xab in self.limits:
if len(xab) == 1:
free.add(xab[0])
continue
if len(xab) == 2 and xab[0] not in free:
if xab[1].is_zero:
return True
elif xab[1].is_zero is None:
got_none = True
# take integration symbol out of free since it will be replaced
# with the free symbols in the limits
free.discard(xab[0])
# add in the new symbols
for i in xab[1:]:
free.update(i.free_symbols)
if self.function.is_zero is False and got_none is False:
return False
def transform(self, x, u):
r"""
Performs a change of variables from `x` to `u` using the relationship
given by `x` and `u` which will define the transformations `f` and `F`
(which are inverses of each other) as follows:
1) If `x` is a Symbol (which is a variable of integration) then `u`
will be interpreted as some function, f(u), with inverse F(u).
This, in effect, just makes the substitution of x with f(x).
2) If `u` is a Symbol then `x` will be interpreted as some function,
F(x), with inverse f(u). This is commonly referred to as
u-substitution.
Once f and F have been identified, the transformation is made as
follows:
.. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x)
\frac{\mathrm{d}}{\mathrm{d}x}
where `F(x)` is the inverse of `f(x)` and the limits and integrand have
been corrected so as to retain the same value after integration.
Notes
=====
The mappings, F(x) or f(u), must lead to a unique integral. Linear
or rational linear expression, ``2*x``, ``1/x`` and ``sqrt(x)``, will
always work; quadratic expressions like ``x**2 - 1`` are acceptable
as long as the resulting integrand does not depend on the sign of
the solutions (see examples).
The integral will be returned unchanged if ``x`` is not a variable of
integration.
``x`` must be (or contain) only one of of the integration variables. If
``u`` has more than one free symbol then it should be sent as a tuple
(``u``, ``uvar``) where ``uvar`` identifies which variable is replacing
the integration variable.
XXX can it contain another integration variable?
Examples
========
>>> from sympy.abc import a, x, u
>>> from sympy import Integral, cos, sqrt
>>> i = Integral(x*cos(x**2 - 1), (x, 0, 1))
transform can change the variable of integration
>>> i.transform(x, u)
Integral(u*cos(u**2 - 1), (u, 0, 1))
transform can perform u-substitution as long as a unique
integrand is obtained:
>>> i.transform(x**2 - 1, u)
Integral(cos(u)/2, (u, -1, 0))
This attempt fails because x = +/-sqrt(u + 1) and the
sign does not cancel out of the integrand:
>>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u)
Traceback (most recent call last):
...
ValueError:
The mapping between F(x) and f(u) did not give a unique integrand.
transform can do a substitution. Here, the previous
result is transformed back into the original expression
using "u-substitution":
>>> ui = _
>>> _.transform(sqrt(u + 1), x) == i
True
We can accomplish the same with a regular substitution:
>>> ui.transform(u, x**2 - 1) == i
True
If the `x` does not contain a symbol of integration then
the integral will be returned unchanged. Integral `i` does
not have an integration variable `a` so no change is made:
>>> i.transform(a, x) == i
True
When `u` has more than one free symbol the symbol that is
replacing `x` must be identified by passing `u` as a tuple:
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, u))
Integral(a + u, (u, -a, 1 - a))
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, a))
Integral(a + u, (a, -u, 1 - u))
See Also
========
sympy.concrete.expr_with_limits.ExprWithLimits.variables : Lists the integration variables
as_dummy : Replace integration variables with dummy ones
"""
from sympy.solvers.solvers import solve, posify
d = Dummy('d')
xfree = x.free_symbols.intersection(self.variables)
if len(xfree) > 1:
raise ValueError(
'F(x) can only contain one of: %s' % self.variables)
xvar = xfree.pop() if xfree else d
if xvar not in self.variables:
return self
u = sympify(u)
if isinstance(u, Expr):
ufree = u.free_symbols
if len(ufree) == 0:
raise ValueError(filldedent('''
f(u) cannot be a constant'''))
if len(ufree) > 1:
raise ValueError(filldedent('''
When f(u) has more than one free symbol, the one replacing x
must be identified: pass f(u) as (f(u), u)'''))
uvar = ufree.pop()
else:
u, uvar = u
if uvar not in u.free_symbols:
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) where symbol identified
a free symbol in expr, but symbol is not in expr's free
symbols.'''))
if not isinstance(uvar, Symbol):
# This probably never evaluates to True
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) but didn't get
a symbol; got %s''' % uvar))
if x.is_Symbol and u.is_Symbol:
return self.xreplace({x: u})
if not x.is_Symbol and not u.is_Symbol:
raise ValueError('either x or u must be a symbol')
if uvar == xvar:
return self.transform(x, (u.subs(uvar, d), d)).xreplace({d: uvar})
if uvar in self.limits:
raise ValueError(filldedent('''
u must contain the same variable as in x
or a variable that is not already an integration variable'''))
if not x.is_Symbol:
F = [x.subs(xvar, d)]
soln = solve(u - x, xvar, check=False)
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), x)')
f = [fi.subs(uvar, d) for fi in soln]
else:
f = [u.subs(uvar, d)]
pdiff, reps = posify(u - x)
puvar = uvar.subs([(v, k) for k, v in reps.items()])
soln = [s.subs(reps) for s in solve(pdiff, puvar)]
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), u)')
F = [fi.subs(xvar, d) for fi in soln]
newfuncs = {(self.function.subs(xvar, fi)*fi.diff(d)
).subs(d, uvar) for fi in f}
if len(newfuncs) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not give
a unique integrand.'''))
newfunc = newfuncs.pop()
def _calc_limit_1(F, a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
wok = F.subs(d, a)
if wok is S.NaN or wok.is_finite is False and a.is_finite:
return limit(sign(b)*F, d, a)
return wok
def _calc_limit(a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
avals = list({_calc_limit_1(Fi, a, b) for Fi in F})
if len(avals) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not
give a unique limit.'''))
return avals[0]
newlimits = []
for xab in self.limits:
sym = xab[0]
if sym == xvar:
if len(xab) == 3:
a, b = xab[1:]
a, b = _calc_limit(a, b), _calc_limit(b, a)
if fuzzy_bool(a - b > 0):
a, b = b, a
newfunc = -newfunc
newlimits.append((uvar, a, b))
elif len(xab) == 2:
a = _calc_limit(xab[1], 1)
newlimits.append((uvar, a))
else:
newlimits.append(uvar)
else:
newlimits.append(xab)
return self.func(newfunc, *newlimits)
def doit(self, **hints):
"""
Perform the integration using any hints given.
Examples
========
>>> from sympy import Piecewise, S
>>> from sympy.abc import x, t
>>> p = x**2 + Piecewise((0, x/t < 0), (1, True))
>>> p.integrate((t, S(4)/5, 1), (x, -1, 1))
1/3
See Also
========
sympy.integrals.trigonometry.trigintegrate
sympy.integrals.heurisch.heurisch
sympy.integrals.rationaltools.ratint
as_sum : Approximate the integral using a sum
"""
from sympy.concrete.summations import Sum
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
meijerg = hints.get('meijerg', None)
conds = hints.get('conds', 'piecewise')
risch = hints.get('risch', None)
heurisch = hints.get('heurisch', None)
manual = hints.get('manual', None)
if len(list(filter(None, (manual, meijerg, risch, heurisch)))) > 1:
raise ValueError("At most one of manual, meijerg, risch, heurisch can be True")
elif manual:
meijerg = risch = heurisch = False
elif meijerg:
manual = risch = heurisch = False
elif risch:
manual = meijerg = heurisch = False
elif heurisch:
manual = meijerg = risch = False
eval_kwargs = dict(meijerg=meijerg, risch=risch, manual=manual, heurisch=heurisch,
conds=conds)
if conds not in ('separate', 'piecewise', 'none'):
raise ValueError('conds must be one of "separate", "piecewise", '
'"none", got: %s' % conds)
if risch and any(len(xab) > 1 for xab in self.limits):
raise ValueError('risch=True is only allowed for indefinite integrals.')
# check for the trivial zero
if self.is_zero:
return S.Zero
# hacks to handle integrals of
# nested summations
if isinstance(self.function, Sum):
if any(v in self.function.limits[0] for v in self.variables):
raise ValueError('Limit of the sum cannot be an integration variable.')
if any(l.is_infinite for l in self.function.limits[0][1:]):
return self
_i = self
_sum = self.function
return _sum.func(_i.func(_sum.function, *_i.limits).doit(), *_sum.limits).doit()
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# hacks to handle special cases
if isinstance(function, MatrixBase):
return function.applyfunc(
lambda f: self.func(f, self.limits).doit(**hints))
if isinstance(function, FormalPowerSeries):
if len(self.limits) > 1:
raise NotImplementedError
xab = self.limits[0]
if len(xab) > 1:
return function.integrate(xab, **eval_kwargs)
else:
return function.integrate(xab[0], **eval_kwargs)
# There is no trivial answer and special handling
# is done so continue
# first make sure any definite limits have integration
# variables with matching assumptions
reps = {}
for xab in self.limits:
if len(xab) != 3:
continue
x, a, b = xab
l = (a, b)
if all(i.is_nonnegative for i in l) and not x.is_nonnegative:
d = Dummy(positive=True)
elif all(i.is_nonpositive for i in l) and not x.is_nonpositive:
d = Dummy(negative=True)
elif all(i.is_real for i in l) and not x.is_real:
d = Dummy(real=True)
else:
d = None
if d:
reps[x] = d
if reps:
undo = {v: k for k, v in reps.items()}
did = self.xreplace(reps).doit(**hints)
if isinstance(did, tuple): # when separate=True
did = tuple([i.xreplace(undo) for i in did])
else:
did = did.xreplace(undo)
return did
# continue with existing assumptions
undone_limits = []
# ulj = free symbols of any undone limits' upper and lower limits
ulj = set()
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
if function.has(Abs, sign) and (
(len(xab) < 3 and all(x.is_extended_real for x in xab)) or
(len(xab) == 3 and all(x.is_extended_real and not x.is_infinite for
x in xab[1:]))):
# some improper integrals are better off with Abs
xr = Dummy("xr", real=True)
function = (function.xreplace({xab[0]: xr})
.rewrite(Piecewise).xreplace({xr: xab[0]}))
elif function.has(Min, Max):
function = function.rewrite(Piecewise)
if (function.has(Piecewise) and
not isinstance(function, Piecewise)):
function = piecewise_fold(function)
if isinstance(function, Piecewise):
if len(xab) == 1:
antideriv = function._eval_integral(xab[0],
**eval_kwargs)
else:
antideriv = self._eval_integral(
function, xab[0], **eval_kwargs)
else:
# There are a number of tradeoffs in using the
# Meijer G method. It can sometimes be a lot faster
# than other methods, and sometimes slower. And
# there are certain types of integrals for which it
# is more likely to work than others. These
# heuristics are incorporated in deciding what
# integration methods to try, in what order. See the
# integrate() docstring for details.
def try_meijerg(function, xab):
ret = None
if len(xab) == 3 and meijerg is not False:
x, a, b = xab
try:
res = meijerint_definite(function, x, a, b)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError '
'from meijerint_definite')
res = None
if res is not None:
f, cond = res
if conds == 'piecewise':
ret = Piecewise(
(f, cond),
(self.func(
function, (x, a, b)), True))
elif conds == 'separate':
if len(self.limits) != 1:
raise ValueError(filldedent('''
conds=separate not supported in
multiple integrals'''))
ret = f, cond
else:
ret = f
return ret
meijerg1 = meijerg
if (meijerg is not False and
len(xab) == 3 and xab[1].is_extended_real and xab[2].is_extended_real
and not function.is_Poly and
(xab[1].has(oo, -oo) or xab[2].has(oo, -oo))):
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
meijerg1 = False
# If the special meijerg code did not succeed in
# finding a definite integral, then the code using
# meijerint_indefinite will not either (it might
# find an antiderivative, but the answer is likely
# to be nonsensical). Thus if we are requested to
# only use Meijer G-function methods, we give up at
# this stage. Otherwise we just disable G-function
# methods.
if meijerg1 is False and meijerg is True:
antideriv = None
else:
antideriv = self._eval_integral(
function, xab[0], **eval_kwargs)
if antideriv is None and meijerg is True:
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
final = hints.get('final', True)
# dotit may be iterated but floor terms making atan and acot
# continous should only be added in the final round
if (final and not isinstance(antideriv, Integral) and
antideriv is not None):
for atan_term in antideriv.atoms(atan):
atan_arg = atan_term.args[0]
# Checking `atan_arg` to be linear combination of `tan` or `cot`
for tan_part in atan_arg.atoms(tan):
x1 = Dummy('x1')
tan_exp1 = atan_arg.subs(tan_part, x1)
# The coefficient of `tan` should be constant
coeff = tan_exp1.diff(x1)
if x1 not in coeff.free_symbols:
a = tan_part.args[0]
antideriv = antideriv.subs(atan_term, Add(atan_term,
sign(coeff)*pi*floor((a-pi/2)/pi)))
for cot_part in atan_arg.atoms(cot):
x1 = Dummy('x1')
cot_exp1 = atan_arg.subs(cot_part, x1)
# The coefficient of `cot` should be constant
coeff = cot_exp1.diff(x1)
if x1 not in coeff.free_symbols:
a = cot_part.args[0]
antideriv = antideriv.subs(atan_term, Add(atan_term,
sign(coeff)*pi*floor((a)/pi)))
if antideriv is None:
undone_limits.append(xab)
function = self.func(*([function] + [xab])).factor()
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
elif len(xab) == 2:
x, b = xab
a = None
else:
raise NotImplementedError
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
else:
def is_indef_int(g, x):
return (isinstance(g, Integral) and
any(i == (x,) for i in g.limits))
def eval_factored(f, x, a, b):
# _eval_interval for integrals with
# (constant) factors
# a single indefinite integral is assumed
args = []
for g in Mul.make_args(f):
if is_indef_int(g, x):
args.append(g._eval_interval(x, a, b))
else:
args.append(g)
return Mul(*args)
integrals, others, piecewises = [], [], []
for f in Add.make_args(antideriv):
if any(is_indef_int(g, x)
for g in Mul.make_args(f)):
integrals.append(f)
elif any(isinstance(g, Piecewise)
for g in Mul.make_args(f)):
piecewises.append(piecewise_fold(f))
else:
others.append(f)
uneval = Add(*[eval_factored(f, x, a, b)
for f in integrals])
try:
evalued = Add(*others)._eval_interval(x, a, b)
evalued_pw = piecewise_fold(Add(*piecewises))._eval_interval(x, a, b)
function = uneval + evalued + evalued_pw
except NotImplementedError:
# This can happen if _eval_interval depends in a
# complicated way on limits that cannot be computed
undone_limits.append(xab)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
return function
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Explanation
===========
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References
==========
.. [1] https://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
.. [2] https://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
{x}
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 4215
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = self.func(f, *tuple(limits))
# assemble the pieces
def _do(f, ab):
dab_dsym = diff(ab, sym)
if not dab_dsym:
return S.Zero
if isinstance(f, Integral):
limits = [(x, x) if (len(l) == 1 and l[0] == x) else l
for l in f.limits]
f = self.func(f.function, *limits)
return f.subs(x, ab)*dab_dsym
rv = S.Zero
if b is not None:
rv += _do(f, b)
if a is not None:
rv -= _do(f, a)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
if arg:
rv += self.func(arg, Tuple(x, a, b))
return rv
def _eval_integral(self, f, x, meijerg=None, risch=None, manual=None,
heurisch=None, conds='piecewise',final=None):
"""
Calculate the anti-derivative to the function f(x).
Explanation
===========
The following algorithms are applied (roughly in this order):
1. Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials, products of
trig functions)
2. Integration of rational functions:
- A complete algorithm for integrating rational functions is
implemented (the Lazard-Rioboo-Trager algorithm). The algorithm
also uses the partial fraction decomposition algorithm
implemented in apart() as a preprocessor to make this process
faster. Note that the integral of a rational function is always
elementary, but in general, it may include a RootSum.
3. Full Risch algorithm:
- The Risch algorithm is a complete decision
procedure for integrating elementary functions, which means that
given any elementary function, it will either compute an
elementary antiderivative, or else prove that none exists.
Currently, part of transcendental case is implemented, meaning
elementary integrals containing exponentials, logarithms, and
(soon!) trigonometric functions can be computed. The algebraic
case, e.g., functions containing roots, is much more difficult
and is not implemented yet.
- If the routine fails (because the integrand is not elementary, or
because a case is not implemented yet), it continues on to the
next algorithms below. If the routine proves that the integrals
is nonelementary, it still moves on to the algorithms below,
because we might be able to find a closed-form solution in terms
of special functions. If risch=True, however, it will stop here.
4. The Meijer G-Function algorithm:
- This algorithm works by first rewriting the integrand in terms of
very general Meijer G-Function (meijerg in SymPy), integrating
it, and then rewriting the result back, if possible. This
algorithm is particularly powerful for definite integrals (which
is actually part of a different method of Integral), since it can
compute closed-form solutions of definite integrals even when no
closed-form indefinite integral exists. But it also is capable
of computing many indefinite integrals as well.
- Another advantage of this method is that it can use some results
about the Meijer G-Function to give a result in terms of a
Piecewise expression, which allows to express conditionally
convergent integrals.
- Setting meijerg=True will cause integrate() to use only this
method.
5. The "manual integration" algorithm:
- This algorithm tries to mimic how a person would find an
antiderivative by hand, for example by looking for a
substitution or applying integration by parts. This algorithm
does not handle as many integrands but can return results in a
more familiar form.
- Sometimes this algorithm can evaluate parts of an integral; in
this case integrate() will try to evaluate the rest of the
integrand using the other methods here.
- Setting manual=True will cause integrate() to use only this
method.
6. The Heuristic Risch algorithm:
- This is a heuristic version of the Risch algorithm, meaning that
it is not deterministic. This is tried as a last resort because
it can be very slow. It is still used because not enough of the
full Risch algorithm is implemented, so that there are still some
integrals that can only be computed using this method. The goal
is to implement enough of the Risch and Meijer G-function methods
so that this can be deleted.
Setting heurisch=True will cause integrate() to use only this
method. Set heurisch=False to not use it.
"""
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.integrals.singularityfunctions import singularityintegrate
from sympy.integrals.heurisch import heurisch as heurisch_, heurisch_wrapper
from sympy.integrals.rationaltools import ratint
from sympy.integrals.risch import risch_integrate
if risch:
try:
return risch_integrate(f, x, conds=conds)
except NotImplementedError:
return None
if manual:
try:
result = manualintegrate(f, x)
if result is not None and result.func != Integral:
return result
except (ValueError, PolynomialError):
pass
eval_kwargs = dict(meijerg=meijerg, risch=risch, manual=manual,
heurisch=heurisch, conds=conds)
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a sympy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly) and not (manual or meijerg or risch):
SymPyDeprecationWarning(
feature="Using integrate/Integral with Poly",
issue=18613,
deprecated_since_version="1.6",
useinstead="the as_expr or integrate methods of Poly").warn()
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if isinstance(f, Piecewise):
return f.piecewise_integrate(x, **eval_kwargs)
# let's cut it short if `f` does not depend on `x`; if
# x is only a dummy, that will be handled below
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None and not (manual or meijerg or risch):
return poly.integrate().as_expr()
if risch is not False:
try:
result, i = risch_integrate(f, x, separate_integral=True,
conds=conds)
except NotImplementedError:
pass
else:
if i:
# There was a nonelementary integral. Try integrating it.
# if no part of the NonElementaryIntegral is integrated by
# the Risch algorithm, then use the original function to
# integrate, instead of re-written one
if result == 0:
from sympy.integrals.risch import NonElementaryIntegral
return NonElementaryIntegral(f, x).doit(risch=False)
else:
return result + i.doit(risch=False)
else:
return result
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
# Note that in general, this is a bad idea, because Integral(g1) +
# Integral(g2) might not be computable, even if Integral(g1 + g2) is.
# For example, Integral(x**x + x**x*log(x)). But many heuristics only
# work term-wise. So we compute this step last, after trying
# risch_integrate. We also try risch_integrate again in this loop,
# because maybe the integral is a sum of an elementary part and a
# nonelementary part (like erf(x) + exp(x)). risch_integrate() is
# quite fast, so this is acceptable.
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One and not meijerg:
parts.append(coeff*x)
continue
# g(x) = expr + O(x**n)
order_term = g.getO()
if order_term is not None:
h = self._eval_integral(g.removeO(), x, **eval_kwargs)
if h is not None:
h_order_expr = self._eval_integral(order_term.expr, x, **eval_kwargs)
if h_order_expr is not None:
h_order_term = order_term.func(
h_order_expr, *order_term.variables)
parts.append(coeff*(h + h_order_term))
continue
# NOTE: if there is O(x**n) and we fail to integrate then
# there is no point in trying other methods because they
# will fail, too.
return None
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x) and not meijerg:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = log(g.base)
elif conds != 'piecewise':
h = g.base**(g.exp + 1) / (g.exp + 1)
else:
h1 = log(g.base)
h2 = g.base**(g.exp + 1) / (g.exp + 1)
h = Piecewise((h2, Ne(g.exp, -1)), (h1, True))
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x) and not (manual or meijerg or risch):
parts.append(coeff * ratint(g, x))
continue
if not (manual or meijerg or risch):
# g(x) = Mul(trig)
h = trigintegrate(g, x, conds=conds)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a Singularity Function term
h = singularityintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# Try risch again.
if risch is not False:
try:
h, i = risch_integrate(g, x,
separate_integral=True, conds=conds)
except NotImplementedError:
h = None
else:
if i:
h = h + i.doit(risch=False)
parts.append(coeff*h)
continue
# fall back to heurisch
if heurisch is not False:
try:
if conds == 'piecewise':
h = heurisch_wrapper(g, x, hints=[])
else:
h = heurisch_(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
else:
h = None
if meijerg is not False and h is None:
# rewrite using G functions
try:
h = meijerint_indefinite(g, x)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
if h is not None:
parts.append(coeff * h)
continue
if h is None and manual is not False:
try:
result = manualintegrate(g, x)
if result is not None and not isinstance(result, Integral):
if result.has(Integral) and not manual:
# Try to have other algorithms do the integrals
# manualintegrate can't handle,
# unless we were asked to use manual only.
# Keep the rest of eval_kwargs in case another
# method was set to False already
new_eval_kwargs = eval_kwargs
new_eval_kwargs["manual"] = False
new_eval_kwargs["final"] = False
result = result.func(*[
arg.doit(**new_eval_kwargs) if
arg.has(Integral) else arg
for arg in result.args
]).expand(multinomial=False,
log=False,
power_exp=False,
power_base=False)
if not result.has(Integral):
parts.append(coeff * result)
continue
except (ValueError, PolynomialError):
# can't handle some SymPy expressions
pass
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# at the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = sincos_to_sum(f).expand(mul=True, deep=False)
if f.is_Add:
# Note: risch will be identical on the expanded
# expression, but maybe it will be able to pick out parts,
# like x*(exp(x) + erf(x)).
return self._eval_integral(f, x, **eval_kwargs)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x, logx=None, cdir=0):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
for term in expr.function.lseries(symb, logx):
yield integrate(term, *expr.limits)
def _eval_nseries(self, x, n, logx=None, cdir=0):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
terms, order = expr.function.nseries(
x=symb, n=n, logx=logx).as_coeff_add(Order)
order = [o.subs(symb, x) for o in order]
return integrate(terms, *expr.limits) + Add(*order)*x
def _eval_as_leading_term(self, x, logx=None, cdir=0):
series_gen = self.args[0].lseries(x)
for leading_term in series_gen:
if leading_term != 0:
break
return integrate(leading_term, *self.args[1:])
def _eval_simplify(self, **kwargs):
from sympy.core.exprtools import factor_terms
from sympy.simplify.simplify import simplify
expr = factor_terms(self)
if isinstance(expr, Integral):
return expr.func(*[simplify(i, **kwargs) for i in expr.args])
return expr.simplify(**kwargs)
def as_sum(self, n=None, method="midpoint", evaluate=True):
"""
Approximates a definite integral by a sum.
Parameters
==========
n :
The number of subintervals to use, optional.
method :
One of: 'left', 'right', 'midpoint', 'trapezoid'.
evaluate : bool
If False, returns an unevaluated Sum expression. The default
is True, evaluate the sum.
Notes
=====
These methods of approximate integration are described in [1].
Examples
========
>>> from sympy import sin, sqrt
>>> from sympy.abc import x, n
>>> from sympy.integrals import Integral
>>> e = Integral(sin(x), (x, 3, 7))
>>> e
Integral(sin(x), (x, 3, 7))
For demonstration purposes, this interval will only be split into 2
regions, bounded by [3, 5] and [5, 7].
The left-hand rule uses function evaluations at the left of each
interval:
>>> e.as_sum(2, 'left')
2*sin(5) + 2*sin(3)
The midpoint rule uses evaluations at the center of each interval:
>>> e.as_sum(2, 'midpoint')
2*sin(4) + 2*sin(6)
The right-hand rule uses function evaluations at the right of each
interval:
>>> e.as_sum(2, 'right')
2*sin(5) + 2*sin(7)
The trapezoid rule uses function evaluations on both sides of the
intervals. This is equivalent to taking the average of the left and
right hand rule results:
>>> e.as_sum(2, 'trapezoid')
2*sin(5) + sin(3) + sin(7)
>>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == _
True
Here, the discontinuity at x = 0 can be avoided by using the
midpoint or right-hand method:
>>> e = Integral(1/sqrt(x), (x, 0, 1))
>>> e.as_sum(5).n(4)
1.730
>>> e.as_sum(10).n(4)
1.809
>>> e.doit().n(4) # the actual value is 2
2.000
The left- or trapezoid method will encounter the discontinuity and
return infinity:
>>> e.as_sum(5, 'left')
zoo
The number of intervals can be symbolic. If omitted, a dummy symbol
will be used for it.
>>> e = Integral(x**2, (x, 0, 2))
>>> e.as_sum(n, 'right').expand()
8/3 + 4/n + 4/(3*n**2)
This shows that the midpoint rule is more accurate, as its error
term decays as the square of n:
>>> e.as_sum(method='midpoint').expand()
8/3 - 2/(3*_n**2)
A symbolic sum is returned with evaluate=False:
>>> e.as_sum(n, 'midpoint', evaluate=False)
2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n
See Also
========
Integral.doit : Perform the integration using any hints
References
==========
.. [1] https://en.wikipedia.org/wiki/Riemann_sum#Methods
"""
from sympy.concrete.summations import Sum
limits = self.limits
if len(limits) > 1:
raise NotImplementedError(
"Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if (len(limit) != 3 or limit[1].is_finite is False or
limit[2].is_finite is False):
raise ValueError("Expecting a definite integral over "
"a finite interval.")
if n is None:
n = Dummy('n', integer=True, positive=True)
else:
n = sympify(n)
if (n.is_positive is False or n.is_integer is False or
n.is_finite is False):
raise ValueError("n must be a positive integer, got %s" % n)
x, a, b = limit
dx = (b - a)/n
k = Dummy('k', integer=True, positive=True)
f = self.function
if method == "left":
result = dx*Sum(f.subs(x, a + (k-1)*dx), (k, 1, n))
elif method == "right":
result = dx*Sum(f.subs(x, a + k*dx), (k, 1, n))
elif method == "midpoint":
result = dx*Sum(f.subs(x, a + k*dx - dx/2), (k, 1, n))
elif method == "trapezoid":
result = dx*((f.subs(x, a) + f.subs(x, b))/2 +
Sum(f.subs(x, a + k*dx), (k, 1, n - 1)))
else:
raise ValueError("Unknown method %s" % method)
return result.doit() if evaluate else result
def principal_value(self, **kwargs):
"""
Compute the Cauchy Principal Value of the definite integral of a real function in the given interval
on the real axis.
Explanation
===========
In mathematics, the Cauchy principal value, is a method for assigning values to certain improper
integrals which would otherwise be undefined.
Examples
========
>>> from sympy import oo
>>> from sympy.integrals.integrals import Integral
>>> from sympy.abc import x
>>> Integral(x+1, (x, -oo, oo)).principal_value()
oo
>>> f = 1 / (x**3)
>>> Integral(f, (x, -oo, oo)).principal_value()
0
>>> Integral(f, (x, -10, 10)).principal_value()
0
>>> Integral(f, (x, -10, oo)).principal_value() + Integral(f, (x, -oo, 10)).principal_value()
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Cauchy_principal_value
.. [2] http://mathworld.wolfram.com/CauchyPrincipalValue.html
"""
from sympy.calculus import singularities
if len(self.limits) != 1 or len(list(self.limits[0])) != 3:
raise ValueError("You need to insert a variable, lower_limit, and upper_limit correctly to calculate "
"cauchy's principal value")
x, a, b = self.limits[0]
if not (a.is_comparable and b.is_comparable and a <= b):
raise ValueError("The lower_limit must be smaller than or equal to the upper_limit to calculate "
"cauchy's principal value. Also, a and b need to be comparable.")
if a == b:
return 0
r = Dummy('r')
f = self.function
singularities_list = [s for s in singularities(f, x) if s.is_comparable and a <= s <= b]
for i in singularities_list:
if (i == b) or (i == a):
raise ValueError(
'The principal value is not defined in the given interval due to singularity at %d.' % (i))
F = integrate(f, x, **kwargs)
if F.has(Integral):
return self
if a is -oo and b is oo:
I = limit(F - F.subs(x, -x), x, oo)
else:
I = limit(F, x, b, '-') - limit(F, x, a, '+')
for s in singularities_list:
I += limit(((F.subs(x, s - r)) - F.subs(x, s + r)), r, 0, '+')
return I
def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs):
"""integrate(f, var, ...)
Explanation
===========
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a) -- indefinite integration with result
given with `a` replacing `symbol`
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is
multiple integration. (If var is omitted and the integrand is
univariate, the indefinite integral in that variable will be performed.)
Indefinite integrals are returned without terms that are independent
of the integration variables. (see examples)
Definite improper integrals often entail delicate convergence
conditions. Pass conds='piecewise', 'separate' or 'none' to have
these returned, respectively, as a Piecewise function, as a separate
result (i.e. result will be a tuple), or not at all (default is
'piecewise').
**Strategy**
SymPy uses various approaches to definite integration. One method is to
find an antiderivative for the integrand, and then use the fundamental
theorem of calculus. Various functions are implemented to integrate
polynomial, rational and trigonometric functions, and integrands
containing DiracDelta terms.
SymPy also implements the part of the Risch algorithm, which is a decision
procedure for integrating elementary functions, i.e., the algorithm can
either find an elementary antiderivative, or prove that one does not
exist. There is also a (very successful, albeit somewhat slow) general
implementation of the heuristic Risch algorithm. This algorithm will
eventually be phased out as more of the full Risch algorithm is
implemented. See the docstring of Integral._eval_integral() for more
details on computing the antiderivative using algebraic methods.
The option risch=True can be used to use only the (full) Risch algorithm.
This is useful if you want to know if an elementary function has an
elementary antiderivative. If the indefinite Integral returned by this
function is an instance of NonElementaryIntegral, that means that the
Risch algorithm has proven that integral to be non-elementary. Note that
by default, additional methods (such as the Meijer G method outlined
below) are tried on these integrals, as they may be expressible in terms
of special functions, so if you only care about elementary answers, use
risch=True. Also note that an unevaluated Integral returned by this
function is not necessarily a NonElementaryIntegral, even with risch=True,
as it may just be an indication that the particular part of the Risch
algorithm needed to integrate that function is not yet implemented.
Another family of strategies comes from re-writing the integrand in
terms of so-called Meijer G-functions. Indefinite integrals of a
single G-function can always be computed, and the definite integral
of a product of two G-functions can be computed from zero to
infinity. Various strategies are implemented to rewrite integrands
as G-functions, and use this information to compute integrals (see
the ``meijerint`` module).
The option manual=True can be used to use only an algorithm that tries
to mimic integration by hand. This algorithm does not handle as many
integrands as the other algorithms implemented but may return results in
a more familiar form. The ``manualintegrate`` module has functions that
return the steps used (see the module docstring for more information).
In general, the algebraic methods work best for computing
antiderivatives of (possibly complicated) combinations of elementary
functions. The G-function methods work best for computing definite
integrals from zero to infinity of moderately complicated
combinations of special functions, or indefinite integrals of very
simple combinations of special functions.
The strategy employed by the integration code is as follows:
- If computing a definite integral, and both limits are real,
and at least one limit is +- oo, try the G-function method of
definite integration first.
- Try to find an antiderivative, using all available methods, ordered
by performance (that is try fastest method first, slowest last; in
particular polynomial integration is tried first, Meijer
G-functions second to last, and heuristic Risch last).
- If still not successful, try G-functions irrespective of the
limits.
The option meijerg=True, False, None can be used to, respectively:
always use G-function methods and no others, never use G-function
methods, or use all available methods (in order as described above).
It defaults to None.
Examples
========
>>> from sympy import integrate, log, exp, oo
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
Terms that are independent of x are dropped by indefinite integration:
>>> from sympy import sqrt
>>> integrate(sqrt(1 + x), (x, 0, x))
2*(x + 1)**(3/2)/3 - 2/3
>>> integrate(sqrt(1 + x), x)
2*(x + 1)**(3/2)/3
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
>>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
Piecewise((gamma(a + 1), re(a) > -1),
(Integral(x**a*exp(-x), (x, 0, oo)), True))
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
gamma(a + 1)
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
(gamma(a + 1), -re(a) < 1)
See Also
========
Integral, Integral.doit
"""
doit_flags = {
'deep': False,
'meijerg': meijerg,
'conds': conds,
'risch': risch,
'heurisch': heurisch,
'manual': manual
}
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(**doit_flags)
else:
new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a
for a in integral.args]
return integral.func(*new_args)
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
========
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*sqrt(2)
See Also
========
sympy.integrals.integrals.integrate, Integral
"""
from sympy.geometry import Curve
F = sympify(field)
if not F:
raise ValueError(
"Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not is_sequence(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * sqrt(dldt)
integral = Integral(Ft, curve.limits).doit(deep=False)
return integral
### Property function dispatching ###
@shape.register(Integral)
def _(expr):
return shape(expr.function) | en | 0.819471 | Represents unevaluated integral. Create an unevaluated integral. Explanation =========== Arguments are an integrand followed by one or more limits. If no limits are given and there is only one free symbol in the expression, that symbol will be used, otherwise an error will be raised. >>> from sympy import Integral >>> from sympy.abc import x, y >>> Integral(x) Integral(x, x) >>> Integral(y) Integral(y, y) When limits are provided, they are interpreted as follows (using ``x`` as though it were the variable of integration): (x,) or x - indefinite integral (x, a) - "evaluate at" integral is an abstract antiderivative (x, a, b) - definite integral The ``as_dummy`` method can be used to see which symbols cannot be targeted by subs: those with a prepended underscore cannot be changed with ``subs``. (Also, the integration variables themselves -- the first element of a limit -- can never be changed by subs.) >>> i = Integral(x, x) >>> at = Integral(x, (x, x)) >>> i.as_dummy() Integral(x, x) >>> at.as_dummy() Integral(_0, (_0, x)) #This will help other classes define their own definitions #of behaviour with Integral. This method returns the symbols that will exist when the integral is evaluated. This is useful if one is trying to determine whether an integral depends on a certain symbol or not. Examples ======== >>> from sympy import Integral >>> from sympy.abc import x, y >>> Integral(x, (x, y, 1)).free_symbols {y} See Also ======== sympy.concrete.expr_with_limits.ExprWithLimits.function sympy.concrete.expr_with_limits.ExprWithLimits.limits sympy.concrete.expr_with_limits.ExprWithLimits.variables # This is a very naive and quick test, not intended to do the integral to # answer whether it is zero or not, e.g. Integral(sin(x), (x, 0, 2*pi)) # is zero but this routine should return None for that case. But, like # Mul, there are trivial situations for which the integral will be # zero so we check for those. # take integration symbol out of free since it will be replaced # with the free symbols in the limits # add in the new symbols Performs a change of variables from `x` to `u` using the relationship given by `x` and `u` which will define the transformations `f` and `F` (which are inverses of each other) as follows: 1) If `x` is a Symbol (which is a variable of integration) then `u` will be interpreted as some function, f(u), with inverse F(u). This, in effect, just makes the substitution of x with f(x). 2) If `u` is a Symbol then `x` will be interpreted as some function, F(x), with inverse f(u). This is commonly referred to as u-substitution. Once f and F have been identified, the transformation is made as follows: .. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x) \frac{\mathrm{d}}{\mathrm{d}x} where `F(x)` is the inverse of `f(x)` and the limits and integrand have been corrected so as to retain the same value after integration. Notes ===== The mappings, F(x) or f(u), must lead to a unique integral. Linear or rational linear expression, ``2*x``, ``1/x`` and ``sqrt(x)``, will always work; quadratic expressions like ``x**2 - 1`` are acceptable as long as the resulting integrand does not depend on the sign of the solutions (see examples). The integral will be returned unchanged if ``x`` is not a variable of integration. ``x`` must be (or contain) only one of of the integration variables. If ``u`` has more than one free symbol then it should be sent as a tuple (``u``, ``uvar``) where ``uvar`` identifies which variable is replacing the integration variable. XXX can it contain another integration variable? Examples ======== >>> from sympy.abc import a, x, u >>> from sympy import Integral, cos, sqrt >>> i = Integral(x*cos(x**2 - 1), (x, 0, 1)) transform can change the variable of integration >>> i.transform(x, u) Integral(u*cos(u**2 - 1), (u, 0, 1)) transform can perform u-substitution as long as a unique integrand is obtained: >>> i.transform(x**2 - 1, u) Integral(cos(u)/2, (u, -1, 0)) This attempt fails because x = +/-sqrt(u + 1) and the sign does not cancel out of the integrand: >>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u) Traceback (most recent call last): ... ValueError: The mapping between F(x) and f(u) did not give a unique integrand. transform can do a substitution. Here, the previous result is transformed back into the original expression using "u-substitution": >>> ui = _ >>> _.transform(sqrt(u + 1), x) == i True We can accomplish the same with a regular substitution: >>> ui.transform(u, x**2 - 1) == i True If the `x` does not contain a symbol of integration then the integral will be returned unchanged. Integral `i` does not have an integration variable `a` so no change is made: >>> i.transform(a, x) == i True When `u` has more than one free symbol the symbol that is replacing `x` must be identified by passing `u` as a tuple: >>> Integral(x, (x, 0, 1)).transform(x, (u + a, u)) Integral(a + u, (u, -a, 1 - a)) >>> Integral(x, (x, 0, 1)).transform(x, (u + a, a)) Integral(a + u, (a, -u, 1 - u)) See Also ======== sympy.concrete.expr_with_limits.ExprWithLimits.variables : Lists the integration variables as_dummy : Replace integration variables with dummy ones f(u) cannot be a constant When f(u) has more than one free symbol, the one replacing x must be identified: pass f(u) as (f(u), u) Expecting a tuple (expr, symbol) where symbol identified a free symbol in expr, but symbol is not in expr's free symbols. # This probably never evaluates to True Expecting a tuple (expr, symbol) but didn't get a symbol; got %s u must contain the same variable as in x or a variable that is not already an integration variable The mapping between F(x) and f(u) did not give a unique integrand. replace d with a, using subs if possible, otherwise limit where sign of b is considered replace d with a, using subs if possible, otherwise limit where sign of b is considered The mapping between F(x) and f(u) did not give a unique limit. Perform the integration using any hints given. Examples ======== >>> from sympy import Piecewise, S >>> from sympy.abc import x, t >>> p = x**2 + Piecewise((0, x/t < 0), (1, True)) >>> p.integrate((t, S(4)/5, 1), (x, -1, 1)) 1/3 See Also ======== sympy.integrals.trigonometry.trigintegrate sympy.integrals.heurisch.heurisch sympy.integrals.rationaltools.ratint as_sum : Approximate the integral using a sum # check for the trivial zero # hacks to handle integrals of # nested summations # now compute and check the function # hacks to handle special cases # There is no trivial answer and special handling # is done so continue # first make sure any definite limits have integration # variables with matching assumptions # when separate=True # continue with existing assumptions # ulj = free symbols of any undone limits' upper and lower limits # compute uli, the free symbols in the # Upper and Lower limits of limit I # this integral can be done as long as there is no blocking # limit that has been undone. An undone limit is blocking if # it contains an integration variable that is in this limit's # upper or lower free symbols or vice versa # some improper integrals are better off with Abs # There are a number of tradeoffs in using the # Meijer G method. It can sometimes be a lot faster # than other methods, and sometimes slower. And # there are certain types of integrals for which it # is more likely to work than others. These # heuristics are incorporated in deciding what # integration methods to try, in what order. See the # integrate() docstring for details. conds=separate not supported in multiple integrals # If the special meijerg code did not succeed in # finding a definite integral, then the code using # meijerint_indefinite will not either (it might # find an antiderivative, but the answer is likely # to be nonsensical). Thus if we are requested to # only use Meijer G-function methods, we give up at # this stage. Otherwise we just disable G-function # methods. # dotit may be iterated but floor terms making atan and acot # continous should only be added in the final round # Checking `atan_arg` to be linear combination of `tan` or `cot` # The coefficient of `tan` should be constant # The coefficient of `cot` should be constant # _eval_interval for integrals with # (constant) factors # a single indefinite integral is assumed # This can happen if _eval_interval depends in a # complicated way on limits that cannot be computed Evaluate the derivative of the current Integral object by differentiating under the integral sign [1], using the Fundamental Theorem of Calculus [2] when possible. Explanation =========== Whenever an Integral is encountered that is equivalent to zero or has an integrand that is independent of the variable of integration those integrals are performed. All others are returned as Integral instances which can be resolved with doit() (provided they are integrable). References ========== .. [1] https://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign .. [2] https://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus Examples ======== >>> from sympy import Integral >>> from sympy.abc import x, y >>> i = Integral(x + y, y, (y, 1, x)) >>> i.diff(x) Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x)) >>> i.doit().diff(x) == i.diff(x).doit() True >>> i.diff(y) 0 The previous must be true since there is no y in the evaluated integral: >>> i.free_symbols {x} >>> i.doit() 2*x**3/3 - x/2 - 1/6 # differentiate under the integral sign; we do not # check for regularity conditions (TODO), see issue 4215 # get limits and the function # the order matters if variables of integration appear in the limits # so work our way in from the outside to the inside. # f is the argument to an integral # assemble the pieces # the dummy variable *is* also the real-world variable # the dummy variable might match sym but it's # only a dummy and the actual variable is determined # by the limits, so mask off the variable of integration # while differentiating Calculate the anti-derivative to the function f(x). Explanation =========== The following algorithms are applied (roughly in this order): 1. Simple heuristics (based on pattern matching and integral table): - most frequently used functions (e.g. polynomials, products of trig functions) 2. Integration of rational functions: - A complete algorithm for integrating rational functions is implemented (the Lazard-Rioboo-Trager algorithm). The algorithm also uses the partial fraction decomposition algorithm implemented in apart() as a preprocessor to make this process faster. Note that the integral of a rational function is always elementary, but in general, it may include a RootSum. 3. Full Risch algorithm: - The Risch algorithm is a complete decision procedure for integrating elementary functions, which means that given any elementary function, it will either compute an elementary antiderivative, or else prove that none exists. Currently, part of transcendental case is implemented, meaning elementary integrals containing exponentials, logarithms, and (soon!) trigonometric functions can be computed. The algebraic case, e.g., functions containing roots, is much more difficult and is not implemented yet. - If the routine fails (because the integrand is not elementary, or because a case is not implemented yet), it continues on to the next algorithms below. If the routine proves that the integrals is nonelementary, it still moves on to the algorithms below, because we might be able to find a closed-form solution in terms of special functions. If risch=True, however, it will stop here. 4. The Meijer G-Function algorithm: - This algorithm works by first rewriting the integrand in terms of very general Meijer G-Function (meijerg in SymPy), integrating it, and then rewriting the result back, if possible. This algorithm is particularly powerful for definite integrals (which is actually part of a different method of Integral), since it can compute closed-form solutions of definite integrals even when no closed-form indefinite integral exists. But it also is capable of computing many indefinite integrals as well. - Another advantage of this method is that it can use some results about the Meijer G-Function to give a result in terms of a Piecewise expression, which allows to express conditionally convergent integrals. - Setting meijerg=True will cause integrate() to use only this method. 5. The "manual integration" algorithm: - This algorithm tries to mimic how a person would find an antiderivative by hand, for example by looking for a substitution or applying integration by parts. This algorithm does not handle as many integrands but can return results in a more familiar form. - Sometimes this algorithm can evaluate parts of an integral; in this case integrate() will try to evaluate the rest of the integrand using the other methods here. - Setting manual=True will cause integrate() to use only this method. 6. The Heuristic Risch algorithm: - This is a heuristic version of the Risch algorithm, meaning that it is not deterministic. This is tried as a last resort because it can be very slow. It is still used because not enough of the full Risch algorithm is implemented, so that there are still some integrals that can only be computed using this method. The goal is to implement enough of the Risch and Meijer G-function methods so that this can be deleted. Setting heurisch=True will cause integrate() to use only this method. Set heurisch=False to not use it. # if it is a poly(x) then let the polynomial integrate itself (fast) # # It is important to make this check first, otherwise the other code # will return a sympy expression instead of a Polynomial. # # see Polynomial for details. # Piecewise antiderivatives need to call special integrate. # let's cut it short if `f` does not depend on `x`; if # x is only a dummy, that will be handled below # try to convert to poly(x) and then integrate if successful (fast) # There was a nonelementary integral. Try integrating it. # if no part of the NonElementaryIntegral is integrated by # the Risch algorithm, then use the original function to # integrate, instead of re-written one # since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ... # we are going to handle Add terms separately, # if `f` is not Add -- we only have one term # Note that in general, this is a bad idea, because Integral(g1) + # Integral(g2) might not be computable, even if Integral(g1 + g2) is. # For example, Integral(x**x + x**x*log(x)). But many heuristics only # work term-wise. So we compute this step last, after trying # risch_integrate. We also try risch_integrate again in this loop, # because maybe the integral is a sum of an elementary part and a # nonelementary part (like erf(x) + exp(x)). risch_integrate() is # quite fast, so this is acceptable. # g(x) = const # g(x) = expr + O(x**n) # NOTE: if there is O(x**n) and we fail to integrate then # there is no point in trying other methods because they # will fail, too. # c # g(x) = (a*x+b) # poly(x) # g(x) = ------- # poly(x) # g(x) = Mul(trig) # g(x) has at least a DiracDelta term # g(x) has at least a Singularity Function term # Try risch again. # fall back to heurisch # XXX: this exception means there is a bug in the # implementation of heuristic Risch integration # algorithm. # rewrite using G functions # Try to have other algorithms do the integrals # manualintegrate can't handle, # unless we were asked to use manual only. # Keep the rest of eval_kwargs in case another # method was set to False already # can't handle some SymPy expressions # if we failed maybe it was because we had # a product that could have been expanded, # so let's try an expansion of the whole # thing before giving up; we don't try this # at the outset because there are things # that cannot be solved unless they are # NOT expanded e.g., x**x*(1+log(x)). There # should probably be a checker somewhere in this # routine to look for such cases and try to do # collection on the expressions if they are already # in an expanded form # Note: risch will be identical on the expanded # expression, but maybe it will be able to pick out parts, # like x*(exp(x) + erf(x)). Approximates a definite integral by a sum. Parameters ========== n : The number of subintervals to use, optional. method : One of: 'left', 'right', 'midpoint', 'trapezoid'. evaluate : bool If False, returns an unevaluated Sum expression. The default is True, evaluate the sum. Notes ===== These methods of approximate integration are described in [1]. Examples ======== >>> from sympy import sin, sqrt >>> from sympy.abc import x, n >>> from sympy.integrals import Integral >>> e = Integral(sin(x), (x, 3, 7)) >>> e Integral(sin(x), (x, 3, 7)) For demonstration purposes, this interval will only be split into 2 regions, bounded by [3, 5] and [5, 7]. The left-hand rule uses function evaluations at the left of each interval: >>> e.as_sum(2, 'left') 2*sin(5) + 2*sin(3) The midpoint rule uses evaluations at the center of each interval: >>> e.as_sum(2, 'midpoint') 2*sin(4) + 2*sin(6) The right-hand rule uses function evaluations at the right of each interval: >>> e.as_sum(2, 'right') 2*sin(5) + 2*sin(7) The trapezoid rule uses function evaluations on both sides of the intervals. This is equivalent to taking the average of the left and right hand rule results: >>> e.as_sum(2, 'trapezoid') 2*sin(5) + sin(3) + sin(7) >>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == _ True Here, the discontinuity at x = 0 can be avoided by using the midpoint or right-hand method: >>> e = Integral(1/sqrt(x), (x, 0, 1)) >>> e.as_sum(5).n(4) 1.730 >>> e.as_sum(10).n(4) 1.809 >>> e.doit().n(4) # the actual value is 2 2.000 The left- or trapezoid method will encounter the discontinuity and return infinity: >>> e.as_sum(5, 'left') zoo The number of intervals can be symbolic. If omitted, a dummy symbol will be used for it. >>> e = Integral(x**2, (x, 0, 2)) >>> e.as_sum(n, 'right').expand() 8/3 + 4/n + 4/(3*n**2) This shows that the midpoint rule is more accurate, as its error term decays as the square of n: >>> e.as_sum(method='midpoint').expand() 8/3 - 2/(3*_n**2) A symbolic sum is returned with evaluate=False: >>> e.as_sum(n, 'midpoint', evaluate=False) 2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n See Also ======== Integral.doit : Perform the integration using any hints References ========== .. [1] https://en.wikipedia.org/wiki/Riemann_sum#Methods Compute the Cauchy Principal Value of the definite integral of a real function in the given interval on the real axis. Explanation =========== In mathematics, the Cauchy principal value, is a method for assigning values to certain improper integrals which would otherwise be undefined. Examples ======== >>> from sympy import oo >>> from sympy.integrals.integrals import Integral >>> from sympy.abc import x >>> Integral(x+1, (x, -oo, oo)).principal_value() oo >>> f = 1 / (x**3) >>> Integral(f, (x, -oo, oo)).principal_value() 0 >>> Integral(f, (x, -10, 10)).principal_value() 0 >>> Integral(f, (x, -10, oo)).principal_value() + Integral(f, (x, -oo, 10)).principal_value() 0 References ========== .. [1] https://en.wikipedia.org/wiki/Cauchy_principal_value .. [2] http://mathworld.wolfram.com/CauchyPrincipalValue.html integrate(f, var, ...) Explanation =========== Compute definite or indefinite integral of one or more variables using Risch-Norman algorithm and table lookup. This procedure is able to handle elementary algebraic and transcendental functions and also a huge class of special functions, including Airy, Bessel, Whittaker and Lambert. var can be: - a symbol -- indefinite integration - a tuple (symbol, a) -- indefinite integration with result given with `a` replacing `symbol` - a tuple (symbol, a, b) -- definite integration Several variables can be specified, in which case the result is multiple integration. (If var is omitted and the integrand is univariate, the indefinite integral in that variable will be performed.) Indefinite integrals are returned without terms that are independent of the integration variables. (see examples) Definite improper integrals often entail delicate convergence conditions. Pass conds='piecewise', 'separate' or 'none' to have these returned, respectively, as a Piecewise function, as a separate result (i.e. result will be a tuple), or not at all (default is 'piecewise'). **Strategy** SymPy uses various approaches to definite integration. One method is to find an antiderivative for the integrand, and then use the fundamental theorem of calculus. Various functions are implemented to integrate polynomial, rational and trigonometric functions, and integrands containing DiracDelta terms. SymPy also implements the part of the Risch algorithm, which is a decision procedure for integrating elementary functions, i.e., the algorithm can either find an elementary antiderivative, or prove that one does not exist. There is also a (very successful, albeit somewhat slow) general implementation of the heuristic Risch algorithm. This algorithm will eventually be phased out as more of the full Risch algorithm is implemented. See the docstring of Integral._eval_integral() for more details on computing the antiderivative using algebraic methods. The option risch=True can be used to use only the (full) Risch algorithm. This is useful if you want to know if an elementary function has an elementary antiderivative. If the indefinite Integral returned by this function is an instance of NonElementaryIntegral, that means that the Risch algorithm has proven that integral to be non-elementary. Note that by default, additional methods (such as the Meijer G method outlined below) are tried on these integrals, as they may be expressible in terms of special functions, so if you only care about elementary answers, use risch=True. Also note that an unevaluated Integral returned by this function is not necessarily a NonElementaryIntegral, even with risch=True, as it may just be an indication that the particular part of the Risch algorithm needed to integrate that function is not yet implemented. Another family of strategies comes from re-writing the integrand in terms of so-called Meijer G-functions. Indefinite integrals of a single G-function can always be computed, and the definite integral of a product of two G-functions can be computed from zero to infinity. Various strategies are implemented to rewrite integrands as G-functions, and use this information to compute integrals (see the ``meijerint`` module). The option manual=True can be used to use only an algorithm that tries to mimic integration by hand. This algorithm does not handle as many integrands as the other algorithms implemented but may return results in a more familiar form. The ``manualintegrate`` module has functions that return the steps used (see the module docstring for more information). In general, the algebraic methods work best for computing antiderivatives of (possibly complicated) combinations of elementary functions. The G-function methods work best for computing definite integrals from zero to infinity of moderately complicated combinations of special functions, or indefinite integrals of very simple combinations of special functions. The strategy employed by the integration code is as follows: - If computing a definite integral, and both limits are real, and at least one limit is +- oo, try the G-function method of definite integration first. - Try to find an antiderivative, using all available methods, ordered by performance (that is try fastest method first, slowest last; in particular polynomial integration is tried first, Meijer G-functions second to last, and heuristic Risch last). - If still not successful, try G-functions irrespective of the limits. The option meijerg=True, False, None can be used to, respectively: always use G-function methods and no others, never use G-function methods, or use all available methods (in order as described above). It defaults to None. Examples ======== >>> from sympy import integrate, log, exp, oo >>> from sympy.abc import a, x, y >>> integrate(x*y, x) x**2*y/2 >>> integrate(log(x), x) x*log(x) - x >>> integrate(log(x), (x, 1, a)) a*log(a) - a + 1 >>> integrate(x) x**2/2 Terms that are independent of x are dropped by indefinite integration: >>> from sympy import sqrt >>> integrate(sqrt(1 + x), (x, 0, x)) 2*(x + 1)**(3/2)/3 - 2/3 >>> integrate(sqrt(1 + x), x) 2*(x + 1)**(3/2)/3 >>> integrate(x*y) Traceback (most recent call last): ... ValueError: specify integration variables to integrate x*y Note that ``integrate(x)`` syntax is meant only for convenience in interactive sessions and should be avoided in library code. >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise' Piecewise((gamma(a + 1), re(a) > -1), (Integral(x**a*exp(-x), (x, 0, oo)), True)) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none') gamma(a + 1) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate') (gamma(a + 1), -re(a) < 1) See Also ======== Integral, Integral.doit line_integrate(field, Curve, variables) Compute the line integral. Examples ======== >>> from sympy import Curve, line_integrate, E, ln >>> from sympy.abc import x, y, t >>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2))) >>> line_integrate(x + y, C, [x, y]) 3*sqrt(2) See Also ======== sympy.integrals.integrals.integrate, Integral # Calculate derivatives for line parameter functions # F(r) -> F(r(t)) and finally F(r(t)*r'(t)) # ...arc length ### Property function dispatching ### | 1.896061 | 2 |
tests/providers/http/operators/test_http.py | gtossou/airflow | 2 | 6624404 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import requests_mock
from airflow.exceptions import AirflowException
from airflow.providers.http.operators.http import SimpleHttpOperator
@mock.patch.dict('os.environ', AIRFLOW_CONN_HTTP_EXAMPLE='http://www.example.com')
class TestSimpleHttpOp(unittest.TestCase):
@requests_mock.mock()
def test_response_in_logs(self, m):
"""
Test that when using SimpleHttpOperator with 'GET',
the log contains 'Example Domain' in it
"""
m.get('http://www.example.com', text='Example.com fake response')
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
log_response=True,
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute(None)
calls = [mock.call('Example.com fake response'), mock.call('Example.com fake response')]
mock_info.has_calls(calls)
@requests_mock.mock()
def test_response_in_logs_after_failed_check(self, m):
"""
Test that when using SimpleHttpOperator with log_response=True,
the response is logged even if request_check fails
"""
def response_check(response):
return response.text != 'invalid response'
m.get('http://www.example.com', text='invalid response')
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
log_response=True,
response_check=response_check,
)
with mock.patch.object(operator.log, 'info') as mock_info:
self.assertRaises(AirflowException, operator.execute, None)
calls = [mock.call('Calling HTTP method'), mock.call('invalid response')]
mock_info.assert_has_calls(calls, any_order=True)
@requests_mock.mock()
def test_filters_response(self, m):
m.get('http://www.example.com', json={'value': 5})
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
response_filter=lambda response: response.json(),
)
result = operator.execute(None)
assert result == {'value': 5}
| #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import requests_mock
from airflow.exceptions import AirflowException
from airflow.providers.http.operators.http import SimpleHttpOperator
@mock.patch.dict('os.environ', AIRFLOW_CONN_HTTP_EXAMPLE='http://www.example.com')
class TestSimpleHttpOp(unittest.TestCase):
@requests_mock.mock()
def test_response_in_logs(self, m):
"""
Test that when using SimpleHttpOperator with 'GET',
the log contains 'Example Domain' in it
"""
m.get('http://www.example.com', text='Example.com fake response')
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
log_response=True,
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute(None)
calls = [mock.call('Example.com fake response'), mock.call('Example.com fake response')]
mock_info.has_calls(calls)
@requests_mock.mock()
def test_response_in_logs_after_failed_check(self, m):
"""
Test that when using SimpleHttpOperator with log_response=True,
the response is logged even if request_check fails
"""
def response_check(response):
return response.text != 'invalid response'
m.get('http://www.example.com', text='invalid response')
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
log_response=True,
response_check=response_check,
)
with mock.patch.object(operator.log, 'info') as mock_info:
self.assertRaises(AirflowException, operator.execute, None)
calls = [mock.call('Calling HTTP method'), mock.call('invalid response')]
mock_info.assert_has_calls(calls, any_order=True)
@requests_mock.mock()
def test_filters_response(self, m):
m.get('http://www.example.com', json={'value': 5})
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
response_filter=lambda response: response.json(),
)
result = operator.execute(None)
assert result == {'value': 5}
| en | 0.836834 | # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Test that when using SimpleHttpOperator with 'GET', the log contains 'Example Domain' in it Test that when using SimpleHttpOperator with log_response=True, the response is logged even if request_check fails | 2.274054 | 2 |
jupyter_notebook_config.py | DigiKlausur/e2x-nbtools | 0 | 6624405 | <gh_stars>0
from e2x_nbtools.contents.filecheckpoints import E2XFileCheckpoints
c = get_config()
c.ContentsManager.checkpoints_class = E2XFileCheckpoints
c.ContentsManager.checkpoints_class.number_of_checkpoints = 3
| from e2x_nbtools.contents.filecheckpoints import E2XFileCheckpoints
c = get_config()
c.ContentsManager.checkpoints_class = E2XFileCheckpoints
c.ContentsManager.checkpoints_class.number_of_checkpoints = 3 | none | 1 | 1.244221 | 1 | |
example.py | elebow/confuse | 257 | 6624406 | #!/usr/bin/env python
import example
example.main()
| #!/usr/bin/env python
import example
example.main()
| ru | 0.26433 | #!/usr/bin/env python | 1.277698 | 1 |
testsuite/utils/callinfo.py | itrofimow/yandex-taxi-testsuite | 0 | 6624407 | <filename>testsuite/utils/callinfo.py
import asyncio
import inspect
import typing
class BaseError(Exception):
"""Base exception class for this module."""
class CallQueueError(BaseError):
pass
class CallQueueEmptyError(CallQueueError):
"""Call queue is empty error."""
class CallQueueTimeoutError(CallQueueError):
"""Timed out while waiting for call."""
class AsyncCallQueue:
"""Function wrapper that puts information about function call into async
queue.
This class provides methods to wait/check function underlying function
calls.
"""
def __init__(self, func: typing.Callable):
self._func = func
self._name = func.__name__
self._queue: asyncio.Queue = asyncio.Queue()
self._get_callinfo = callinfo(func)
self._is_coro = inspect.iscoroutinefunction(func)
async def __call__(self, *args, **kwargs):
"""Call underlying function."""
try:
if self._is_coro:
return await self._func(*args, **kwargs)
return self._func(*args, **kwargs)
finally:
await self._queue.put((args, kwargs))
def flush(self) -> None:
"""Clear call queue."""
self._queue = asyncio.Queue()
@property
def has_calls(self) -> bool:
"""Returns ``True`` if call queue is not empty."""
return self.times_called > 0
@property
def times_called(self) -> int:
"""Returns call queue length."""
return self._queue.qsize()
def next_call(self) -> dict:
"""Pops call from queue and return its arguments dict.
Raises ``CallQueueError`` if queue is empty
"""
try:
return self._get_callinfo(*self._queue.get_nowait())
except asyncio.queues.QueueEmpty:
raise CallQueueEmptyError(
'No calls for %s() left in the queue' % (self._name,),
)
async def wait_call(self, timeout=10.0) -> dict:
"""Wait for fucntion to be called. Pops call from queue. Blocks if
it's empty.
:param timeout: timeout in seconds
Raises ``CallQueueTimeoutError`` if queue is empty for ``timeout``
seconds.
"""
try:
item = await asyncio.wait_for(self._queue.get(), timeout=timeout)
return self._get_callinfo(*item)
except asyncio.TimeoutError:
raise CallQueueTimeoutError(
'Timeout while waiting for %s() to be called' % (self._name,),
)
def getfullargspec(func):
if isinstance(func, staticmethod):
func = func.__func__
func = getattr(func, '__wrapped__', func)
return inspect.getfullargspec(func)
def callinfo(func):
func_spec = getfullargspec(func)
func_varkw = func_spec.varkw
func_kwonlyargs = func_spec.kwonlyargs
func_kwonlydefaults = func_spec.kwonlydefaults
func_args = func_spec.args
func_varargs = func_spec.varargs
defaults = func_spec.defaults or ()
func_defaults = dict(zip(func_args[-len(defaults) :], defaults))
def callinfo_getter(args, kwargs):
dct = dict(zip(func_args, args))
for argname in func_args[len(args) :]:
if argname in kwargs:
dct[argname] = kwargs[argname]
else:
dct[argname] = func_defaults.get(argname)
if func_varargs is not None:
dct[func_varargs] = args[len(dct) :]
for argname in func_kwonlyargs:
if argname in kwargs:
dct[argname] = kwargs[argname]
else:
dct[argname] = func_kwonlydefaults[argname]
if func_varkw is not None:
dct[func_varkw] = {k: v for k, v in kwargs.items() if k not in dct}
return dct
return callinfo_getter
def acallqueue(func: typing.Callable) -> AsyncCallQueue:
"""Turn function into async call queue.
:param func: async or sync callable, can be decorated with @staticmethod
"""
if isinstance(func, AsyncCallQueue):
return func
if isinstance(func, staticmethod):
func = func.__func__
return AsyncCallQueue(func)
| <filename>testsuite/utils/callinfo.py
import asyncio
import inspect
import typing
class BaseError(Exception):
"""Base exception class for this module."""
class CallQueueError(BaseError):
pass
class CallQueueEmptyError(CallQueueError):
"""Call queue is empty error."""
class CallQueueTimeoutError(CallQueueError):
"""Timed out while waiting for call."""
class AsyncCallQueue:
"""Function wrapper that puts information about function call into async
queue.
This class provides methods to wait/check function underlying function
calls.
"""
def __init__(self, func: typing.Callable):
self._func = func
self._name = func.__name__
self._queue: asyncio.Queue = asyncio.Queue()
self._get_callinfo = callinfo(func)
self._is_coro = inspect.iscoroutinefunction(func)
async def __call__(self, *args, **kwargs):
"""Call underlying function."""
try:
if self._is_coro:
return await self._func(*args, **kwargs)
return self._func(*args, **kwargs)
finally:
await self._queue.put((args, kwargs))
def flush(self) -> None:
"""Clear call queue."""
self._queue = asyncio.Queue()
@property
def has_calls(self) -> bool:
"""Returns ``True`` if call queue is not empty."""
return self.times_called > 0
@property
def times_called(self) -> int:
"""Returns call queue length."""
return self._queue.qsize()
def next_call(self) -> dict:
"""Pops call from queue and return its arguments dict.
Raises ``CallQueueError`` if queue is empty
"""
try:
return self._get_callinfo(*self._queue.get_nowait())
except asyncio.queues.QueueEmpty:
raise CallQueueEmptyError(
'No calls for %s() left in the queue' % (self._name,),
)
async def wait_call(self, timeout=10.0) -> dict:
"""Wait for fucntion to be called. Pops call from queue. Blocks if
it's empty.
:param timeout: timeout in seconds
Raises ``CallQueueTimeoutError`` if queue is empty for ``timeout``
seconds.
"""
try:
item = await asyncio.wait_for(self._queue.get(), timeout=timeout)
return self._get_callinfo(*item)
except asyncio.TimeoutError:
raise CallQueueTimeoutError(
'Timeout while waiting for %s() to be called' % (self._name,),
)
def getfullargspec(func):
if isinstance(func, staticmethod):
func = func.__func__
func = getattr(func, '__wrapped__', func)
return inspect.getfullargspec(func)
def callinfo(func):
func_spec = getfullargspec(func)
func_varkw = func_spec.varkw
func_kwonlyargs = func_spec.kwonlyargs
func_kwonlydefaults = func_spec.kwonlydefaults
func_args = func_spec.args
func_varargs = func_spec.varargs
defaults = func_spec.defaults or ()
func_defaults = dict(zip(func_args[-len(defaults) :], defaults))
def callinfo_getter(args, kwargs):
dct = dict(zip(func_args, args))
for argname in func_args[len(args) :]:
if argname in kwargs:
dct[argname] = kwargs[argname]
else:
dct[argname] = func_defaults.get(argname)
if func_varargs is not None:
dct[func_varargs] = args[len(dct) :]
for argname in func_kwonlyargs:
if argname in kwargs:
dct[argname] = kwargs[argname]
else:
dct[argname] = func_kwonlydefaults[argname]
if func_varkw is not None:
dct[func_varkw] = {k: v for k, v in kwargs.items() if k not in dct}
return dct
return callinfo_getter
def acallqueue(func: typing.Callable) -> AsyncCallQueue:
"""Turn function into async call queue.
:param func: async or sync callable, can be decorated with @staticmethod
"""
if isinstance(func, AsyncCallQueue):
return func
if isinstance(func, staticmethod):
func = func.__func__
return AsyncCallQueue(func)
| en | 0.72873 | Base exception class for this module. Call queue is empty error. Timed out while waiting for call. Function wrapper that puts information about function call into async queue. This class provides methods to wait/check function underlying function calls. Call underlying function. Clear call queue. Returns ``True`` if call queue is not empty. Returns call queue length. Pops call from queue and return its arguments dict. Raises ``CallQueueError`` if queue is empty Wait for fucntion to be called. Pops call from queue. Blocks if it's empty. :param timeout: timeout in seconds Raises ``CallQueueTimeoutError`` if queue is empty for ``timeout`` seconds. Turn function into async call queue. :param func: async or sync callable, can be decorated with @staticmethod | 2.678311 | 3 |
Tutorial/playon_fem/plotFEMDomain.py | kazulagi/plantfem_min | 21 | 6624408 | <reponame>kazulagi/plantfem_min
import json
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
json_file = open('domain.json', 'r')
json_object = json.load(json_file)
# メッシュオブジェクト中の節点座標配列を取り出す
nodcoord = np.array(json_object["mesh"]["NodCoord"])
# 以下、matplotlibで描画
#x軸とy軸にラベル付け
fig = plt.figure()
ax = Axes3D(fig)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
# 節点を描画
x = nodcoord[:,0]
y = nodcoord[:,1]
z = nodcoord[:,2]
ax.plot(x,y,z,marker="o",linestyle='None')
# 図を表示
plt.show() | import json
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
json_file = open('domain.json', 'r')
json_object = json.load(json_file)
# メッシュオブジェクト中の節点座標配列を取り出す
nodcoord = np.array(json_object["mesh"]["NodCoord"])
# 以下、matplotlibで描画
#x軸とy軸にラベル付け
fig = plt.figure()
ax = Axes3D(fig)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
# 節点を描画
x = nodcoord[:,0]
y = nodcoord[:,1]
z = nodcoord[:,2]
ax.plot(x,y,z,marker="o",linestyle='None')
# 図を表示
plt.show() | ja | 0.986686 | # メッシュオブジェクト中の節点座標配列を取り出す # 以下、matplotlibで描画 #x軸とy軸にラベル付け # 節点を描画 # 図を表示 | 2.880128 | 3 |
pypi-tools/examples/example-package/tests/test.py | strangeworks/cloud-builders-community | 0 | 6624409 | from myPackage import somePython
def test_fahrToKelv():
'''
make sure freezing is calculated correctly
'''
assert somePython.fahrToKelv(32) == 273.15, 'incorrect freezing point!'
| from myPackage import somePython
def test_fahrToKelv():
'''
make sure freezing is calculated correctly
'''
assert somePython.fahrToKelv(32) == 273.15, 'incorrect freezing point!'
| en | 0.971626 | make sure freezing is calculated correctly | 2.555982 | 3 |
pyroomacoustics/denoise/spectral_subtraction.py | HemaZ/pyroomacoustics | 1 | 6624410 | <reponame>HemaZ/pyroomacoustics
# Single Channel Noise Removal using Spectral Subtraction
# Copyright (C) 2019 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
import numpy as np
class SpectralSub(object):
"""
Here we have a class for performing **single channel** noise reduction via
spectral subtraction. The instantaneous signal energy and noise floor is
estimated at each time instance (for each frequency bin) and this is used
to compute a gain filter with which to perform spectral subtraction.
For a given frame `n`, the gain for frequency bin `k` is given by:
.. math::
G[k, n] = \max \\left \{ \\left ( \dfrac{P[k, n]-\\beta P_N[k, n]}{P[k, n]} \\right )^\\alpha, G_{min} \\right \},
where :math:`G_{min} = 10^{-(db\_reduc/20)}` and :math:`db\_reduc` is the
maximum reduction (in dB) that we are willing to perform for each bin (a
high value can actually be detrimental, see below). The instantaneous
energy :math:`P[k,n]` is computed by simply squaring the frequency
amplitude at the bin `k`. The time-frequency decomposition of the input
signal is typically done with the STFT and overlapping frames. The noise
estimate :math:`P_N[k, n]` for frequency bin `k` is given by looking back a
certain number of frames :math:`L` and selecting the bin with the lowest
energy:
.. math::
P_N[k, n] = \min_{[n-L, n]} P[k, n]
This approach works best when the SNR is positive and the noise is rather
stationary. An alternative approach for the noise estimate (also in the
case of stationary noise) would be to apply a lowpass filter for each
frequency bin.
With a large suppression, i.e. large values for :math:`db\_reduc`, we can
observe a typical artefact of such spectral subtraction approaches, namely
"musical noise".
`Here <https://www.vocal.com/noise-reduction/musical-noise/>`_ is nice
article about noise reduction and musical noise.
Adjusting the constants :math:`\\beta` and :math:`\\alpha` also presents a
trade-off between suppression and undesirable artefacts, i.e. more
noticeable musical noise.
Below is an example of how to use this class to emulate a streaming/online
input. A full example can be found
`here <https://github.com/LCAV/pyroomacoustics/blob/master/examples/noise_reduction_spectral_subtraction.py>`__.
::
# initialize STFT and SpectralSub objects
nfft = 512
stft = pra.transform.STFT(nfft, hop=nfft//2,
analysis_window=pra.hann(nfft))
scnr = pra.denoise.SpectralSub(nfft, db_reduc=10, lookback=5,
beta=20, alpha=3)
# apply block-by-block
for n in range(num_blocks):
# go to frequency domain for noise reduction
stft.analysis(mono_noisy)
gain_filt = scnr.compute_gain_filter(stft.X)
# estimating input convolved with unknown response
mono_denoised = stft.synthesis(gain_filt*stft.X)
There also exists a "one-shot" function.
::
# import or create `noisy_signal`
denoised_signal = apply_spectral_sub(noisy_signal, nfft=512,
db_reduc=10, lookback=5,
beta=20, alpha=3)
Parameters
----------
nfft: int
FFT size. Length of gain filter, i.e. the number of frequency bins, is
given by ``nfft//2+1``.
db_reduc: float
Maximum reduction in dB for each bin.
lookback: int
How many frames to look back for the noise estimate.
beta: float
Overestimation factor to "push" the gain filter value (at each
frequency) closer to the dB reduction specified by ``db_reduc``.
alpha: float, optional
Exponent factor to modify transition behavior towards the dB reduction
specified by ``db_reduc``. Default is 1.
"""
def __init__(self, nfft, db_reduc, lookback, beta, alpha=1):
self.beta = beta
self.alpha = alpha
self.n_bins = nfft//2+1
self.p_prev = np.zeros((self.n_bins, lookback+1))
self.gmin = 10**(-db_reduc/20)
self.p_sn = np.zeros(self.n_bins)
self.p_n = np.zeros(self.n_bins)
def compute_gain_filter(self, X):
"""
Parameters
----------
X: numpy array
Complex spectrum of length ``nfft//2+1``.
Returns
-------
numpy array
Gain filter to multiply given spectrum with.
"""
# estimate of signal + noise at current time
self.p_sn[:] = np.real(np.conj(X)*X)
# estimate of noise level
self.p_prev[:, -1] = self.p_sn
self.p_n[:] = np.min(self.p_prev, axis=1)
# compute gain filter
gain_filter = [max((max(self.p_sn[k]-self.beta*self.p_n[k], 0) /
self.p_sn[k])**self.alpha, self.gmin)
for k in range(self.n_bins)]
# update
self.p_prev = np.roll(self.p_prev, -1, axis=1)
return gain_filter
def apply_spectral_sub(noisy_signal, nfft=512, db_reduc=25, lookback=12,
beta=30, alpha=1):
"""
One-shot function to apply spectral subtraction approach.
Parameters
----------
noisy_signal : numpy array
Real signal in time domain.
nfft: int
FFT size. Length of gain filter, i.e. the number of frequency bins, is
given by ``nfft//2+1``.
db_reduc: float
Maximum reduction in dB for each bin.
lookback: int
How many frames to look back for the noise estimate.
beta: float
Overestimation factor to "push" the gain filter value (at each
frequency) closer to the dB reduction specified by ``db_reduc``.
alpha: float, optional
Exponent factor to modify transition behavior towards the dB reduction
specified by ``db_reduc``. Default is 1.
Returns
-------
numpy array
Enhanced/denoised signal.
"""
from pyroomacoustics import hann
from pyroomacoustics.transform import STFT
hop = nfft // 2
window = hann(nfft, flag='asymmetric', length='full')
stft = STFT(nfft, hop=hop, analysis_window=window, streaming=True)
scnr = SpectralSub(nfft, db_reduc, lookback, beta, alpha)
processed_audio = np.zeros(noisy_signal.shape)
n = 0
while noisy_signal.shape[0] - n >= hop:
# SCNR in frequency domain
stft.analysis(noisy_signal[n:(n + hop), ])
gain_filt = scnr.compute_gain_filter(stft.X)
# back to time domain
processed_audio[n:n + hop, ] = stft.synthesis(gain_filt * stft.X)
# update step
n += hop
return processed_audio
| # Single Channel Noise Removal using Spectral Subtraction
# Copyright (C) 2019 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
import numpy as np
class SpectralSub(object):
"""
Here we have a class for performing **single channel** noise reduction via
spectral subtraction. The instantaneous signal energy and noise floor is
estimated at each time instance (for each frequency bin) and this is used
to compute a gain filter with which to perform spectral subtraction.
For a given frame `n`, the gain for frequency bin `k` is given by:
.. math::
G[k, n] = \max \\left \{ \\left ( \dfrac{P[k, n]-\\beta P_N[k, n]}{P[k, n]} \\right )^\\alpha, G_{min} \\right \},
where :math:`G_{min} = 10^{-(db\_reduc/20)}` and :math:`db\_reduc` is the
maximum reduction (in dB) that we are willing to perform for each bin (a
high value can actually be detrimental, see below). The instantaneous
energy :math:`P[k,n]` is computed by simply squaring the frequency
amplitude at the bin `k`. The time-frequency decomposition of the input
signal is typically done with the STFT and overlapping frames. The noise
estimate :math:`P_N[k, n]` for frequency bin `k` is given by looking back a
certain number of frames :math:`L` and selecting the bin with the lowest
energy:
.. math::
P_N[k, n] = \min_{[n-L, n]} P[k, n]
This approach works best when the SNR is positive and the noise is rather
stationary. An alternative approach for the noise estimate (also in the
case of stationary noise) would be to apply a lowpass filter for each
frequency bin.
With a large suppression, i.e. large values for :math:`db\_reduc`, we can
observe a typical artefact of such spectral subtraction approaches, namely
"musical noise".
`Here <https://www.vocal.com/noise-reduction/musical-noise/>`_ is nice
article about noise reduction and musical noise.
Adjusting the constants :math:`\\beta` and :math:`\\alpha` also presents a
trade-off between suppression and undesirable artefacts, i.e. more
noticeable musical noise.
Below is an example of how to use this class to emulate a streaming/online
input. A full example can be found
`here <https://github.com/LCAV/pyroomacoustics/blob/master/examples/noise_reduction_spectral_subtraction.py>`__.
::
# initialize STFT and SpectralSub objects
nfft = 512
stft = pra.transform.STFT(nfft, hop=nfft//2,
analysis_window=pra.hann(nfft))
scnr = pra.denoise.SpectralSub(nfft, db_reduc=10, lookback=5,
beta=20, alpha=3)
# apply block-by-block
for n in range(num_blocks):
# go to frequency domain for noise reduction
stft.analysis(mono_noisy)
gain_filt = scnr.compute_gain_filter(stft.X)
# estimating input convolved with unknown response
mono_denoised = stft.synthesis(gain_filt*stft.X)
There also exists a "one-shot" function.
::
# import or create `noisy_signal`
denoised_signal = apply_spectral_sub(noisy_signal, nfft=512,
db_reduc=10, lookback=5,
beta=20, alpha=3)
Parameters
----------
nfft: int
FFT size. Length of gain filter, i.e. the number of frequency bins, is
given by ``nfft//2+1``.
db_reduc: float
Maximum reduction in dB for each bin.
lookback: int
How many frames to look back for the noise estimate.
beta: float
Overestimation factor to "push" the gain filter value (at each
frequency) closer to the dB reduction specified by ``db_reduc``.
alpha: float, optional
Exponent factor to modify transition behavior towards the dB reduction
specified by ``db_reduc``. Default is 1.
"""
def __init__(self, nfft, db_reduc, lookback, beta, alpha=1):
self.beta = beta
self.alpha = alpha
self.n_bins = nfft//2+1
self.p_prev = np.zeros((self.n_bins, lookback+1))
self.gmin = 10**(-db_reduc/20)
self.p_sn = np.zeros(self.n_bins)
self.p_n = np.zeros(self.n_bins)
def compute_gain_filter(self, X):
"""
Parameters
----------
X: numpy array
Complex spectrum of length ``nfft//2+1``.
Returns
-------
numpy array
Gain filter to multiply given spectrum with.
"""
# estimate of signal + noise at current time
self.p_sn[:] = np.real(np.conj(X)*X)
# estimate of noise level
self.p_prev[:, -1] = self.p_sn
self.p_n[:] = np.min(self.p_prev, axis=1)
# compute gain filter
gain_filter = [max((max(self.p_sn[k]-self.beta*self.p_n[k], 0) /
self.p_sn[k])**self.alpha, self.gmin)
for k in range(self.n_bins)]
# update
self.p_prev = np.roll(self.p_prev, -1, axis=1)
return gain_filter
def apply_spectral_sub(noisy_signal, nfft=512, db_reduc=25, lookback=12,
beta=30, alpha=1):
"""
One-shot function to apply spectral subtraction approach.
Parameters
----------
noisy_signal : numpy array
Real signal in time domain.
nfft: int
FFT size. Length of gain filter, i.e. the number of frequency bins, is
given by ``nfft//2+1``.
db_reduc: float
Maximum reduction in dB for each bin.
lookback: int
How many frames to look back for the noise estimate.
beta: float
Overestimation factor to "push" the gain filter value (at each
frequency) closer to the dB reduction specified by ``db_reduc``.
alpha: float, optional
Exponent factor to modify transition behavior towards the dB reduction
specified by ``db_reduc``. Default is 1.
Returns
-------
numpy array
Enhanced/denoised signal.
"""
from pyroomacoustics import hann
from pyroomacoustics.transform import STFT
hop = nfft // 2
window = hann(nfft, flag='asymmetric', length='full')
stft = STFT(nfft, hop=hop, analysis_window=window, streaming=True)
scnr = SpectralSub(nfft, db_reduc, lookback, beta, alpha)
processed_audio = np.zeros(noisy_signal.shape)
n = 0
while noisy_signal.shape[0] - n >= hop:
# SCNR in frequency domain
stft.analysis(noisy_signal[n:(n + hop), ])
gain_filt = scnr.compute_gain_filter(stft.X)
# back to time domain
processed_audio[n:n + hop, ] = stft.synthesis(gain_filt * stft.X)
# update step
n += hop
return processed_audio | en | 0.778641 | # Single Channel Noise Removal using Spectral Subtraction # Copyright (C) 2019 <NAME>, <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # You should have received a copy of the MIT License along with this program. If # not, see <https://opensource.org/licenses/MIT>. Here we have a class for performing **single channel** noise reduction via spectral subtraction. The instantaneous signal energy and noise floor is estimated at each time instance (for each frequency bin) and this is used to compute a gain filter with which to perform spectral subtraction. For a given frame `n`, the gain for frequency bin `k` is given by: .. math:: G[k, n] = \max \\left \{ \\left ( \dfrac{P[k, n]-\\beta P_N[k, n]}{P[k, n]} \\right )^\\alpha, G_{min} \\right \}, where :math:`G_{min} = 10^{-(db\_reduc/20)}` and :math:`db\_reduc` is the maximum reduction (in dB) that we are willing to perform for each bin (a high value can actually be detrimental, see below). The instantaneous energy :math:`P[k,n]` is computed by simply squaring the frequency amplitude at the bin `k`. The time-frequency decomposition of the input signal is typically done with the STFT and overlapping frames. The noise estimate :math:`P_N[k, n]` for frequency bin `k` is given by looking back a certain number of frames :math:`L` and selecting the bin with the lowest energy: .. math:: P_N[k, n] = \min_{[n-L, n]} P[k, n] This approach works best when the SNR is positive and the noise is rather stationary. An alternative approach for the noise estimate (also in the case of stationary noise) would be to apply a lowpass filter for each frequency bin. With a large suppression, i.e. large values for :math:`db\_reduc`, we can observe a typical artefact of such spectral subtraction approaches, namely "musical noise". `Here <https://www.vocal.com/noise-reduction/musical-noise/>`_ is nice article about noise reduction and musical noise. Adjusting the constants :math:`\\beta` and :math:`\\alpha` also presents a trade-off between suppression and undesirable artefacts, i.e. more noticeable musical noise. Below is an example of how to use this class to emulate a streaming/online input. A full example can be found `here <https://github.com/LCAV/pyroomacoustics/blob/master/examples/noise_reduction_spectral_subtraction.py>`__. :: # initialize STFT and SpectralSub objects nfft = 512 stft = pra.transform.STFT(nfft, hop=nfft//2, analysis_window=pra.hann(nfft)) scnr = pra.denoise.SpectralSub(nfft, db_reduc=10, lookback=5, beta=20, alpha=3) # apply block-by-block for n in range(num_blocks): # go to frequency domain for noise reduction stft.analysis(mono_noisy) gain_filt = scnr.compute_gain_filter(stft.X) # estimating input convolved with unknown response mono_denoised = stft.synthesis(gain_filt*stft.X) There also exists a "one-shot" function. :: # import or create `noisy_signal` denoised_signal = apply_spectral_sub(noisy_signal, nfft=512, db_reduc=10, lookback=5, beta=20, alpha=3) Parameters ---------- nfft: int FFT size. Length of gain filter, i.e. the number of frequency bins, is given by ``nfft//2+1``. db_reduc: float Maximum reduction in dB for each bin. lookback: int How many frames to look back for the noise estimate. beta: float Overestimation factor to "push" the gain filter value (at each frequency) closer to the dB reduction specified by ``db_reduc``. alpha: float, optional Exponent factor to modify transition behavior towards the dB reduction specified by ``db_reduc``. Default is 1. Parameters ---------- X: numpy array Complex spectrum of length ``nfft//2+1``. Returns ------- numpy array Gain filter to multiply given spectrum with. # estimate of signal + noise at current time # estimate of noise level # compute gain filter # update One-shot function to apply spectral subtraction approach. Parameters ---------- noisy_signal : numpy array Real signal in time domain. nfft: int FFT size. Length of gain filter, i.e. the number of frequency bins, is given by ``nfft//2+1``. db_reduc: float Maximum reduction in dB for each bin. lookback: int How many frames to look back for the noise estimate. beta: float Overestimation factor to "push" the gain filter value (at each frequency) closer to the dB reduction specified by ``db_reduc``. alpha: float, optional Exponent factor to modify transition behavior towards the dB reduction specified by ``db_reduc``. Default is 1. Returns ------- numpy array Enhanced/denoised signal. # SCNR in frequency domain # back to time domain # update step | 2.237237 | 2 |
Tools/scripts/patchcheck.py | jaswdr/cpython | 1 | 6624411 | #!/usr/bin/env python3
"""Check proposed changes for common issues."""
import re
import sys
import shutil
import os.path
import subprocess
import sysconfig
import reindent
import untabify
# Excluded directories which are copies of external libraries:
# don't check their coding style
EXCLUDE_DIRS = [os.path.join('Modules', '_ctypes', 'libffi_osx'),
os.path.join('Modules', '_ctypes', 'libffi_msvc'),
os.path.join('Modules', '_decimal', 'libmpdec'),
os.path.join('Modules', 'expat'),
os.path.join('Modules', 'zlib')]
SRCDIR = sysconfig.get_config_var('srcdir')
def n_files_str(count):
"""Return 'N file(s)' with the proper plurality on 'file'."""
return "{} file{}".format(count, "s" if count != 1 else "")
def status(message, modal=False, info=None):
"""Decorator to output status info to stdout."""
def decorated_fxn(fxn):
def call_fxn(*args, **kwargs):
sys.stdout.write(message + ' ... ')
sys.stdout.flush()
result = fxn(*args, **kwargs)
if not modal and not info:
print("done")
elif info:
print(info(result))
else:
print("yes" if result else "NO")
return result
return call_fxn
return decorated_fxn
def get_git_branch():
"""Get the symbolic name for the current git branch"""
cmd = "git rev-parse --abbrev-ref HEAD".split()
try:
return subprocess.check_output(cmd,
stderr=subprocess.DEVNULL,
cwd=SRCDIR,
encoding='UTF-8')
except subprocess.CalledProcessError:
return None
def get_git_upstream_remote():
"""Get the remote name to use for upstream branches
Uses "upstream" if it exists, "origin" otherwise
"""
cmd = "git remote get-url upstream".split()
try:
subprocess.check_output(cmd,
stderr=subprocess.DEVNULL,
cwd=SRCDIR,
encoding='UTF-8')
except subprocess.CalledProcessError:
return "origin"
return "upstream"
def get_git_remote_default_branch(remote_name):
"""Get the name of the default branch for the given remote
It is typically called 'main', but may differ
"""
cmd = "git remote show {}".format(remote_name).split()
try:
remote_info = subprocess.check_output(cmd,
stderr=subprocess.DEVNULL,
cwd=SRCDIR,
encoding='UTF-8')
except subprocess.CalledProcessError:
return None
for line in remote_info.splitlines():
if "HEAD branch:" in line:
base_branch = line.split(":")[1].strip()
return base_branch
return None
@status("Getting base branch for PR",
info=lambda x: x if x is not None else "not a PR branch")
def get_base_branch():
if not os.path.exists(os.path.join(SRCDIR, '.git')):
# Not a git checkout, so there's no base branch
return None
upstream_remote = get_git_upstream_remote()
version = sys.version_info
if version.releaselevel == 'alpha':
base_branch = get_git_remote_default_branch(upstream_remote)
else:
base_branch = "{0.major}.{0.minor}".format(version)
this_branch = get_git_branch()
if this_branch is None or this_branch == base_branch:
# Not on a git PR branch, so there's no base branch
return None
return upstream_remote + "/" + base_branch
@status("Getting the list of files that have been added/changed",
info=lambda x: n_files_str(len(x)))
def changed_files(base_branch=None):
"""Get the list of changed or added files from git."""
if os.path.exists(os.path.join(SRCDIR, '.git')):
# We just use an existence check here as:
# directory = normal git checkout/clone
# file = git worktree directory
if base_branch:
cmd = 'git diff --name-status ' + base_branch
else:
cmd = 'git status --porcelain'
filenames = []
with subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
cwd=SRCDIR) as st:
for line in st.stdout:
line = line.decode().rstrip()
status_text, filename = line.split(maxsplit=1)
status = set(status_text)
# modified, added or unmerged files
if not status.intersection('MAU'):
continue
if ' -> ' in filename:
# file is renamed
filename = filename.split(' -> ', 2)[1].strip()
filenames.append(filename)
else:
sys.exit('need a git checkout to get modified files')
filenames2 = []
for filename in filenames:
# Normalize the path to be able to match using .startswith()
filename = os.path.normpath(filename)
if any(filename.startswith(path) for path in EXCLUDE_DIRS):
# Exclude the file
continue
filenames2.append(filename)
return filenames2
def report_modified_files(file_paths):
count = len(file_paths)
if count == 0:
return n_files_str(count)
else:
lines = ["{}:".format(n_files_str(count))]
for path in file_paths:
lines.append(" {}".format(path))
return "\n".join(lines)
@status("Fixing Python file whitespace", info=report_modified_files)
def normalize_whitespace(file_paths):
"""Make sure that the whitespace for .py files have been normalized."""
reindent.makebackup = False # No need to create backups.
fixed = [path for path in file_paths if path.endswith('.py') and
reindent.check(os.path.join(SRCDIR, path))]
return fixed
@status("Fixing C file whitespace", info=report_modified_files)
def normalize_c_whitespace(file_paths):
"""Report if any C files """
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
with open(abspath, 'r') as f:
if '\t' not in f.read():
continue
untabify.process(abspath, 8, verbose=False)
fixed.append(path)
return fixed
ws_re = re.compile(br'\s+(\r?\n)$')
@status("Fixing docs whitespace", info=report_modified_files)
def normalize_docs_whitespace(file_paths):
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
try:
with open(abspath, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(br'\1', line) for line in lines]
if new_lines != lines:
shutil.copyfile(abspath, abspath + '.bak')
with open(abspath, 'wb') as f:
f.writelines(new_lines)
fixed.append(path)
except Exception as err:
print('Cannot fix %s: %s' % (path, err))
return fixed
@status("Docs modified", modal=True)
def docs_modified(file_paths):
"""Report if any file in the Doc directory has been changed."""
return bool(file_paths)
@status("Misc/ACKS updated", modal=True)
def credit_given(file_paths):
"""Check if Misc/ACKS has been changed."""
return os.path.join('Misc', 'ACKS') in file_paths
@status("Misc/NEWS.d updated with `blurb`", modal=True)
def reported_news(file_paths):
"""Check if Misc/NEWS.d has been changed."""
return any(p.startswith(os.path.join('Misc', 'NEWS.d', 'next'))
for p in file_paths)
@status("configure regenerated", modal=True, info=str)
def regenerated_configure(file_paths):
"""Check if configure has been regenerated."""
if 'configure.ac' in file_paths:
return "yes" if 'configure' in file_paths else "no"
else:
return "not needed"
@status("pyconfig.h.in regenerated", modal=True, info=str)
def regenerated_pyconfig_h_in(file_paths):
"""Check if pyconfig.h.in has been regenerated."""
if 'configure.ac' in file_paths:
return "yes" if 'pyconfig.h.in' in file_paths else "no"
else:
return "not needed"
def travis(pull_request):
if pull_request == 'false':
print('Not a pull request; skipping')
return
base_branch = get_base_branch()
file_paths = changed_files(base_branch)
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
fn.endswith(('.rst', '.inc'))]
fixed = []
fixed.extend(normalize_whitespace(python_files))
fixed.extend(normalize_c_whitespace(c_files))
fixed.extend(normalize_docs_whitespace(doc_files))
if not fixed:
print('No whitespace issues found')
else:
print(f'Please fix the {len(fixed)} file(s) with whitespace issues')
print('(on UNIX you can run `make patchcheck` to make the fixes)')
sys.exit(1)
def main():
base_branch = get_base_branch()
file_paths = changed_files(base_branch)
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
fn.endswith(('.rst', '.inc'))]
misc_files = {p for p in file_paths if p.startswith('Misc')}
# PEP 8 whitespace rules enforcement.
normalize_whitespace(python_files)
# C rules enforcement.
normalize_c_whitespace(c_files)
# Doc whitespace enforcement.
normalize_docs_whitespace(doc_files)
# Docs updated.
docs_modified(doc_files)
# Misc/ACKS changed.
credit_given(misc_files)
# Misc/NEWS changed.
reported_news(misc_files)
# Regenerated configure, if necessary.
regenerated_configure(file_paths)
# Regenerated pyconfig.h.in, if necessary.
regenerated_pyconfig_h_in(file_paths)
# Test suite run and passed.
if python_files or c_files:
end = " and check for refleaks?" if c_files else "?"
print()
print("Did you run the test suite" + end)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--travis',
help='Perform pass/fail checks')
args = parser.parse_args()
if args.travis:
travis(args.travis)
else:
main()
| #!/usr/bin/env python3
"""Check proposed changes for common issues."""
import re
import sys
import shutil
import os.path
import subprocess
import sysconfig
import reindent
import untabify
# Excluded directories which are copies of external libraries:
# don't check their coding style
EXCLUDE_DIRS = [os.path.join('Modules', '_ctypes', 'libffi_osx'),
os.path.join('Modules', '_ctypes', 'libffi_msvc'),
os.path.join('Modules', '_decimal', 'libmpdec'),
os.path.join('Modules', 'expat'),
os.path.join('Modules', 'zlib')]
SRCDIR = sysconfig.get_config_var('srcdir')
def n_files_str(count):
"""Return 'N file(s)' with the proper plurality on 'file'."""
return "{} file{}".format(count, "s" if count != 1 else "")
def status(message, modal=False, info=None):
"""Decorator to output status info to stdout."""
def decorated_fxn(fxn):
def call_fxn(*args, **kwargs):
sys.stdout.write(message + ' ... ')
sys.stdout.flush()
result = fxn(*args, **kwargs)
if not modal and not info:
print("done")
elif info:
print(info(result))
else:
print("yes" if result else "NO")
return result
return call_fxn
return decorated_fxn
def get_git_branch():
"""Get the symbolic name for the current git branch"""
cmd = "git rev-parse --abbrev-ref HEAD".split()
try:
return subprocess.check_output(cmd,
stderr=subprocess.DEVNULL,
cwd=SRCDIR,
encoding='UTF-8')
except subprocess.CalledProcessError:
return None
def get_git_upstream_remote():
"""Get the remote name to use for upstream branches
Uses "upstream" if it exists, "origin" otherwise
"""
cmd = "git remote get-url upstream".split()
try:
subprocess.check_output(cmd,
stderr=subprocess.DEVNULL,
cwd=SRCDIR,
encoding='UTF-8')
except subprocess.CalledProcessError:
return "origin"
return "upstream"
def get_git_remote_default_branch(remote_name):
"""Get the name of the default branch for the given remote
It is typically called 'main', but may differ
"""
cmd = "git remote show {}".format(remote_name).split()
try:
remote_info = subprocess.check_output(cmd,
stderr=subprocess.DEVNULL,
cwd=SRCDIR,
encoding='UTF-8')
except subprocess.CalledProcessError:
return None
for line in remote_info.splitlines():
if "HEAD branch:" in line:
base_branch = line.split(":")[1].strip()
return base_branch
return None
@status("Getting base branch for PR",
info=lambda x: x if x is not None else "not a PR branch")
def get_base_branch():
if not os.path.exists(os.path.join(SRCDIR, '.git')):
# Not a git checkout, so there's no base branch
return None
upstream_remote = get_git_upstream_remote()
version = sys.version_info
if version.releaselevel == 'alpha':
base_branch = get_git_remote_default_branch(upstream_remote)
else:
base_branch = "{0.major}.{0.minor}".format(version)
this_branch = get_git_branch()
if this_branch is None or this_branch == base_branch:
# Not on a git PR branch, so there's no base branch
return None
return upstream_remote + "/" + base_branch
@status("Getting the list of files that have been added/changed",
info=lambda x: n_files_str(len(x)))
def changed_files(base_branch=None):
"""Get the list of changed or added files from git."""
if os.path.exists(os.path.join(SRCDIR, '.git')):
# We just use an existence check here as:
# directory = normal git checkout/clone
# file = git worktree directory
if base_branch:
cmd = 'git diff --name-status ' + base_branch
else:
cmd = 'git status --porcelain'
filenames = []
with subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
cwd=SRCDIR) as st:
for line in st.stdout:
line = line.decode().rstrip()
status_text, filename = line.split(maxsplit=1)
status = set(status_text)
# modified, added or unmerged files
if not status.intersection('MAU'):
continue
if ' -> ' in filename:
# file is renamed
filename = filename.split(' -> ', 2)[1].strip()
filenames.append(filename)
else:
sys.exit('need a git checkout to get modified files')
filenames2 = []
for filename in filenames:
# Normalize the path to be able to match using .startswith()
filename = os.path.normpath(filename)
if any(filename.startswith(path) for path in EXCLUDE_DIRS):
# Exclude the file
continue
filenames2.append(filename)
return filenames2
def report_modified_files(file_paths):
count = len(file_paths)
if count == 0:
return n_files_str(count)
else:
lines = ["{}:".format(n_files_str(count))]
for path in file_paths:
lines.append(" {}".format(path))
return "\n".join(lines)
@status("Fixing Python file whitespace", info=report_modified_files)
def normalize_whitespace(file_paths):
"""Make sure that the whitespace for .py files have been normalized."""
reindent.makebackup = False # No need to create backups.
fixed = [path for path in file_paths if path.endswith('.py') and
reindent.check(os.path.join(SRCDIR, path))]
return fixed
@status("Fixing C file whitespace", info=report_modified_files)
def normalize_c_whitespace(file_paths):
"""Report if any C files """
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
with open(abspath, 'r') as f:
if '\t' not in f.read():
continue
untabify.process(abspath, 8, verbose=False)
fixed.append(path)
return fixed
ws_re = re.compile(br'\s+(\r?\n)$')
@status("Fixing docs whitespace", info=report_modified_files)
def normalize_docs_whitespace(file_paths):
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
try:
with open(abspath, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(br'\1', line) for line in lines]
if new_lines != lines:
shutil.copyfile(abspath, abspath + '.bak')
with open(abspath, 'wb') as f:
f.writelines(new_lines)
fixed.append(path)
except Exception as err:
print('Cannot fix %s: %s' % (path, err))
return fixed
@status("Docs modified", modal=True)
def docs_modified(file_paths):
"""Report if any file in the Doc directory has been changed."""
return bool(file_paths)
@status("Misc/ACKS updated", modal=True)
def credit_given(file_paths):
"""Check if Misc/ACKS has been changed."""
return os.path.join('Misc', 'ACKS') in file_paths
@status("Misc/NEWS.d updated with `blurb`", modal=True)
def reported_news(file_paths):
"""Check if Misc/NEWS.d has been changed."""
return any(p.startswith(os.path.join('Misc', 'NEWS.d', 'next'))
for p in file_paths)
@status("configure regenerated", modal=True, info=str)
def regenerated_configure(file_paths):
"""Check if configure has been regenerated."""
if 'configure.ac' in file_paths:
return "yes" if 'configure' in file_paths else "no"
else:
return "not needed"
@status("pyconfig.h.in regenerated", modal=True, info=str)
def regenerated_pyconfig_h_in(file_paths):
"""Check if pyconfig.h.in has been regenerated."""
if 'configure.ac' in file_paths:
return "yes" if 'pyconfig.h.in' in file_paths else "no"
else:
return "not needed"
def travis(pull_request):
if pull_request == 'false':
print('Not a pull request; skipping')
return
base_branch = get_base_branch()
file_paths = changed_files(base_branch)
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
fn.endswith(('.rst', '.inc'))]
fixed = []
fixed.extend(normalize_whitespace(python_files))
fixed.extend(normalize_c_whitespace(c_files))
fixed.extend(normalize_docs_whitespace(doc_files))
if not fixed:
print('No whitespace issues found')
else:
print(f'Please fix the {len(fixed)} file(s) with whitespace issues')
print('(on UNIX you can run `make patchcheck` to make the fixes)')
sys.exit(1)
def main():
base_branch = get_base_branch()
file_paths = changed_files(base_branch)
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
fn.endswith(('.rst', '.inc'))]
misc_files = {p for p in file_paths if p.startswith('Misc')}
# PEP 8 whitespace rules enforcement.
normalize_whitespace(python_files)
# C rules enforcement.
normalize_c_whitespace(c_files)
# Doc whitespace enforcement.
normalize_docs_whitespace(doc_files)
# Docs updated.
docs_modified(doc_files)
# Misc/ACKS changed.
credit_given(misc_files)
# Misc/NEWS changed.
reported_news(misc_files)
# Regenerated configure, if necessary.
regenerated_configure(file_paths)
# Regenerated pyconfig.h.in, if necessary.
regenerated_pyconfig_h_in(file_paths)
# Test suite run and passed.
if python_files or c_files:
end = " and check for refleaks?" if c_files else "?"
print()
print("Did you run the test suite" + end)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--travis',
help='Perform pass/fail checks')
args = parser.parse_args()
if args.travis:
travis(args.travis)
else:
main()
| en | 0.842187 | #!/usr/bin/env python3 Check proposed changes for common issues. # Excluded directories which are copies of external libraries: # don't check their coding style Return 'N file(s)' with the proper plurality on 'file'. Decorator to output status info to stdout. Get the symbolic name for the current git branch Get the remote name to use for upstream branches Uses "upstream" if it exists, "origin" otherwise Get the name of the default branch for the given remote It is typically called 'main', but may differ # Not a git checkout, so there's no base branch # Not on a git PR branch, so there's no base branch Get the list of changed or added files from git. # We just use an existence check here as: # directory = normal git checkout/clone # file = git worktree directory # modified, added or unmerged files # file is renamed # Normalize the path to be able to match using .startswith() # Exclude the file Make sure that the whitespace for .py files have been normalized. # No need to create backups. Report if any C files Report if any file in the Doc directory has been changed. Check if Misc/ACKS has been changed. Check if Misc/NEWS.d has been changed. Check if configure has been regenerated. Check if pyconfig.h.in has been regenerated. # PEP 8 whitespace rules enforcement. # C rules enforcement. # Doc whitespace enforcement. # Docs updated. # Misc/ACKS changed. # Misc/NEWS changed. # Regenerated configure, if necessary. # Regenerated pyconfig.h.in, if necessary. # Test suite run and passed. | 2.119668 | 2 |
apps/simple_history.py | kevingduck/transmission | 19 | 6624412 | <gh_stars>10-100
"""
Copyright 2019 ShipChain, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import json
from functools import lru_cache
from django.conf import settings
from django.core.serializers import serialize
from django.db import models
from django.utils.timezone import now
from django.db.models.fields.proxy import OrderWrt
from simple_history.signals import post_create_historical_record, pre_create_historical_record
from simple_history.models import HistoricalRecords, ModelChange, ModelDelta, transform_field
LOG = logging.getLogger('transmission')
def _model_to_dict(model):
return json.loads(serialize("json", [model]))[0]["fields"]
def get_user(request=None, **kwargs):
if request and request.user:
return request.user.id
return None
class HistoricalChangesMixin:
def diff(self, old_history, json_fields_only=False):
changes_map = {}
if json_fields_only:
for field_name in self.json_fields:
current_values = getattr(self, field_name, None)
old_values = getattr(old_history, field_name, None)
if not old_values and isinstance(current_values, dict):
old_values = {f: None for f in current_values.keys()}
elif not current_values and isinstance(old_values, dict):
current_values = {f: None for f in old_values.keys()}
elif not old_values and not current_values:
continue
changes_map[field_name] = self.build_changes(current_values, old_values, old_history,
from_json_field=True)
return changes_map
current_values = _model_to_dict(self.instance)
if old_history:
old_values = _model_to_dict(old_history.instance)
else:
old_values = {f: None for f in current_values.keys()}
return self.build_changes(current_values, old_values, old_history)
def build_changes(self, new_obj_dict, old_obj_dict, old_historical_obj, from_json_field=False):
changes = []
changed_fields = []
for field, new_value in new_obj_dict.items():
if field in old_obj_dict:
old_value = old_obj_dict[field]
if old_value != new_value:
changes.append(ModelChange(field, old_value, new_value))
elif from_json_field:
changes.append(ModelChange(field, None, new_value))
changed_fields.append(field)
return ModelDelta(changes, changed_fields, old_historical_obj, self)
@property
@lru_cache()
def json_fields(self):
list_json_fields = []
for field in self.instance._meta.get_fields():
if field.__class__.__name__.lower() == 'jsonfield':
list_json_fields.append(field.name)
return list_json_fields
class TxmHistoricalRecords(HistoricalRecords):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bases += (HistoricalChangesMixin,)
self.manager_name = 'history'
self.user_id_field = True
self.get_user = get_user
def _get_history_user_fields(self):
if self.user_id_field:
# We simply track the updated_by field
history_user_fields = {
"history_user": models.CharField(null=True, blank=True, max_length=36),
}
else:
history_user_fields = {}
return history_user_fields
def copy_fields(self, model):
fields = {}
for field in self.fields_included(model):
field = copy.copy(field)
field.remote_field = copy.copy(field.remote_field)
if isinstance(field, OrderWrt):
field.__class__ = models.IntegerField
if isinstance(field, models.ForeignKey):
old_field = field
old_swappable = old_field.swappable
old_field.swappable = False
try:
_name, _path, args, field_args = old_field.deconstruct()
finally:
old_field.swappable = old_swappable
if getattr(old_field, "one_to_one", False) or \
isinstance(old_field, (models.OneToOneField, models.ForeignKey)):
field_type = models.ForeignKey
if old_field.name in settings.RELATED_FIELDS_WITH_HISTORY_MAP.keys():
field_args["to"] = f'shipments.Historical' \
f'{settings.RELATED_FIELDS_WITH_HISTORY_MAP[old_field.name]}'
else:
field_type = type(old_field)
if field_args.get("to", None) == "self":
field_args["to"] = old_field.model
# Override certain arguments passed when creating the field
# so that they work for the historical field.
field_args.update(
db_constraint=False,
null=True,
blank=True,
primary_key=False,
db_index=True,
serialize=True,
unique=False,
on_delete=models.DO_NOTHING,
)
field = field_type(*args, **field_args)
field.name = old_field.name
else:
transform_field(field)
fields[field.name] = field
return fields
def create_historical_record(self, instance, history_type, using=None):
history_date = getattr(instance, "_history_date", now())
history_user = self.get_history_user(instance)
history_change_reason = getattr(instance, "changeReason", None)
manager = getattr(instance, self.manager_name)
attrs = {}
for field in self.fields_included(instance):
related_instance = getattr(instance, field.name, None)
if related_instance and hasattr(related_instance, 'history'):
attrs[field.name] = related_instance.history.first()
else:
attrs[field.name] = getattr(instance, field.name)
history_instance = manager.model(
history_date=history_date,
history_type=history_type,
history_user=history_user,
history_change_reason=history_change_reason,
**attrs
)
pre_create_historical_record.send(
sender=manager.model,
instance=instance,
history_date=history_date,
history_user=history_user,
history_change_reason=history_change_reason,
history_instance=history_instance,
using=using,
)
history_instance.save(using=using)
post_create_historical_record.send(
sender=manager.model,
instance=instance,
history_instance=history_instance,
history_date=history_date,
history_user=history_user,
history_change_reason=history_change_reason,
using=using,
)
def get_extra_fields(self, model, fields):
extra_fields = super().get_extra_fields(model, fields)
extra_fields['history_date'] = models.DateTimeField(db_index=True)
return extra_fields
class AnonymousHistoricalMixin:
@classmethod
def get_class_instance(cls):
return cls
def anonymous_historical_change(self, only_history=False, history_type='~', user=None, **kwargs):
"""
Update a shipment and / or create a related anonymous historical record.
:param kwargs: key value fields to update.
:param only_history: Boolean for creating a historical record without changes to the object instance
:param history_type: Type for the historical object in creation, '+' or '~' respectively for
created and changed
:param user: Authoring of the historical record
"""
def create_historical_instance(obj, h_type):
# Manual creation of a historical object
TxmHistoricalRecords().create_historical_record(obj, h_type)
h_instance = obj.history.first()
h_instance.history_user = user
h_instance.updated_by = user
h_instance.save()
return h_instance
if only_history:
historical_object = create_historical_instance(self, history_type)
LOG.debug(f'Created anonymous historical record: {historical_object.id}')
return historical_object
instance = self.get_class_instance().objects.filter(id=self.id)
instance.update(**kwargs)
instance = instance.first()
historical_instance = create_historical_instance(instance, history_type)
LOG.debug(f'Updated shipment: {instance.id}, with: {kwargs} and created related anonymous historical record: '
f'{historical_instance.id}')
| """
Copyright 2019 ShipChain, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import json
from functools import lru_cache
from django.conf import settings
from django.core.serializers import serialize
from django.db import models
from django.utils.timezone import now
from django.db.models.fields.proxy import OrderWrt
from simple_history.signals import post_create_historical_record, pre_create_historical_record
from simple_history.models import HistoricalRecords, ModelChange, ModelDelta, transform_field
LOG = logging.getLogger('transmission')
def _model_to_dict(model):
return json.loads(serialize("json", [model]))[0]["fields"]
def get_user(request=None, **kwargs):
if request and request.user:
return request.user.id
return None
class HistoricalChangesMixin:
def diff(self, old_history, json_fields_only=False):
changes_map = {}
if json_fields_only:
for field_name in self.json_fields:
current_values = getattr(self, field_name, None)
old_values = getattr(old_history, field_name, None)
if not old_values and isinstance(current_values, dict):
old_values = {f: None for f in current_values.keys()}
elif not current_values and isinstance(old_values, dict):
current_values = {f: None for f in old_values.keys()}
elif not old_values and not current_values:
continue
changes_map[field_name] = self.build_changes(current_values, old_values, old_history,
from_json_field=True)
return changes_map
current_values = _model_to_dict(self.instance)
if old_history:
old_values = _model_to_dict(old_history.instance)
else:
old_values = {f: None for f in current_values.keys()}
return self.build_changes(current_values, old_values, old_history)
def build_changes(self, new_obj_dict, old_obj_dict, old_historical_obj, from_json_field=False):
changes = []
changed_fields = []
for field, new_value in new_obj_dict.items():
if field in old_obj_dict:
old_value = old_obj_dict[field]
if old_value != new_value:
changes.append(ModelChange(field, old_value, new_value))
elif from_json_field:
changes.append(ModelChange(field, None, new_value))
changed_fields.append(field)
return ModelDelta(changes, changed_fields, old_historical_obj, self)
@property
@lru_cache()
def json_fields(self):
list_json_fields = []
for field in self.instance._meta.get_fields():
if field.__class__.__name__.lower() == 'jsonfield':
list_json_fields.append(field.name)
return list_json_fields
class TxmHistoricalRecords(HistoricalRecords):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bases += (HistoricalChangesMixin,)
self.manager_name = 'history'
self.user_id_field = True
self.get_user = get_user
def _get_history_user_fields(self):
if self.user_id_field:
# We simply track the updated_by field
history_user_fields = {
"history_user": models.CharField(null=True, blank=True, max_length=36),
}
else:
history_user_fields = {}
return history_user_fields
def copy_fields(self, model):
fields = {}
for field in self.fields_included(model):
field = copy.copy(field)
field.remote_field = copy.copy(field.remote_field)
if isinstance(field, OrderWrt):
field.__class__ = models.IntegerField
if isinstance(field, models.ForeignKey):
old_field = field
old_swappable = old_field.swappable
old_field.swappable = False
try:
_name, _path, args, field_args = old_field.deconstruct()
finally:
old_field.swappable = old_swappable
if getattr(old_field, "one_to_one", False) or \
isinstance(old_field, (models.OneToOneField, models.ForeignKey)):
field_type = models.ForeignKey
if old_field.name in settings.RELATED_FIELDS_WITH_HISTORY_MAP.keys():
field_args["to"] = f'shipments.Historical' \
f'{settings.RELATED_FIELDS_WITH_HISTORY_MAP[old_field.name]}'
else:
field_type = type(old_field)
if field_args.get("to", None) == "self":
field_args["to"] = old_field.model
# Override certain arguments passed when creating the field
# so that they work for the historical field.
field_args.update(
db_constraint=False,
null=True,
blank=True,
primary_key=False,
db_index=True,
serialize=True,
unique=False,
on_delete=models.DO_NOTHING,
)
field = field_type(*args, **field_args)
field.name = old_field.name
else:
transform_field(field)
fields[field.name] = field
return fields
def create_historical_record(self, instance, history_type, using=None):
history_date = getattr(instance, "_history_date", now())
history_user = self.get_history_user(instance)
history_change_reason = getattr(instance, "changeReason", None)
manager = getattr(instance, self.manager_name)
attrs = {}
for field in self.fields_included(instance):
related_instance = getattr(instance, field.name, None)
if related_instance and hasattr(related_instance, 'history'):
attrs[field.name] = related_instance.history.first()
else:
attrs[field.name] = getattr(instance, field.name)
history_instance = manager.model(
history_date=history_date,
history_type=history_type,
history_user=history_user,
history_change_reason=history_change_reason,
**attrs
)
pre_create_historical_record.send(
sender=manager.model,
instance=instance,
history_date=history_date,
history_user=history_user,
history_change_reason=history_change_reason,
history_instance=history_instance,
using=using,
)
history_instance.save(using=using)
post_create_historical_record.send(
sender=manager.model,
instance=instance,
history_instance=history_instance,
history_date=history_date,
history_user=history_user,
history_change_reason=history_change_reason,
using=using,
)
def get_extra_fields(self, model, fields):
extra_fields = super().get_extra_fields(model, fields)
extra_fields['history_date'] = models.DateTimeField(db_index=True)
return extra_fields
class AnonymousHistoricalMixin:
@classmethod
def get_class_instance(cls):
return cls
def anonymous_historical_change(self, only_history=False, history_type='~', user=None, **kwargs):
"""
Update a shipment and / or create a related anonymous historical record.
:param kwargs: key value fields to update.
:param only_history: Boolean for creating a historical record without changes to the object instance
:param history_type: Type for the historical object in creation, '+' or '~' respectively for
created and changed
:param user: Authoring of the historical record
"""
def create_historical_instance(obj, h_type):
# Manual creation of a historical object
TxmHistoricalRecords().create_historical_record(obj, h_type)
h_instance = obj.history.first()
h_instance.history_user = user
h_instance.updated_by = user
h_instance.save()
return h_instance
if only_history:
historical_object = create_historical_instance(self, history_type)
LOG.debug(f'Created anonymous historical record: {historical_object.id}')
return historical_object
instance = self.get_class_instance().objects.filter(id=self.id)
instance.update(**kwargs)
instance = instance.first()
historical_instance = create_historical_instance(instance, history_type)
LOG.debug(f'Updated shipment: {instance.id}, with: {kwargs} and created related anonymous historical record: '
f'{historical_instance.id}') | en | 0.831746 | Copyright 2019 ShipChain, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # We simply track the updated_by field # Override certain arguments passed when creating the field # so that they work for the historical field. Update a shipment and / or create a related anonymous historical record. :param kwargs: key value fields to update. :param only_history: Boolean for creating a historical record without changes to the object instance :param history_type: Type for the historical object in creation, '+' or '~' respectively for created and changed :param user: Authoring of the historical record # Manual creation of a historical object | 1.764202 | 2 |
fdia_simulation/tests/filters/test_filters_ta.py | QDucasse/FDIA_simulation | 7 | 6624413 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 09:19:36 2019
@author: qde
"""
import unittest
import numpy as np
from pprint import pprint
from copy import deepcopy
from math import sqrt,atan2, exp
from nose.tools import raises
from numpy.linalg import inv
from scipy.linalg import block_diag
from fdia_simulation.models import Radar, LabeledMeasurement
from fdia_simulation.filters import RadarFilterTA, MultipleRadarsFilterTA, MultiplePeriodRadarsFilterTA
class RadarFilterTATestCase(unittest.TestCase):
def setUp(self):
self.radar = Radar(x=0,y=0)
self.q = 10.
self.filter_ta = RadarFilterTA(dim_x = 9, dim_z = 3, q = self.q,radar = self.radar)
# ==========================================================================
# ========================= Initialization tests ===========================
def test_initial_F(self):
dt = self.filter_ta.dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.assertTrue(np.array_equal(self.filter_ta.F,F))
def test_initial_Q(self):
dt = self.filter_ta.dt
q = self.q
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.assertTrue(np.array_equal(self.filter_ta.Q,Q))
def test_initial_R(self):
dt = self.filter_ta.dt
R = np.array([[1., 0. , 0. ],
[0., 0.001, 0. ],
[0., 0. , 0.001]])
self.assertTrue(np.array_equal(self.filter_ta.R,R))
def test_initial_positions(self):
x0 = self.filter_ta.x[0,0]
y0 = self.filter_ta.x[3,0]
z0 = self.filter_ta.x[6,0]
self.assertEqual(x0, 1e-6)
self.assertEqual(y0, 1e-6)
self.assertEqual(z0, 1e-6)
def test_initial_velocities(self):
vx0 = self.filter_ta.x[1,0]
vy0 = self.filter_ta.x[4,0]
vz0 = self.filter_ta.x[7,0]
self.assertEqual(vx0, 1e-6)
self.assertEqual(vy0, 1e-6)
self.assertEqual(vz0, 1e-6)
def test_initial_accelerations(self):
vx0 = self.filter_ta.x[2,0]
vy0 = self.filter_ta.x[5,0]
vz0 = self.filter_ta.x[8,0]
self.assertEqual(vx0, 1e-6)
self.assertEqual(vy0, 1e-6)
self.assertEqual(vz0, 1e-6)
def test_initial_radar_positions(self):
x_rad = self.filter_ta.x_rad
y_rad = self.filter_ta.y_rad
z_rad = self.filter_ta.z_rad
self.assertEqual(x_rad, 0.)
self.assertEqual(y_rad, 0.)
self.assertEqual(z_rad, 0.)
# ==========================================================================
# ========================= Q/F generation tests ===========================
def test_F_computing(self):
dt = 5.
self.filter_ta.dt = dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
computed_F = self.filter_ta.compute_F(self.filter_ta.x)
self.assertTrue(np.array_equal(self.filter_ta.F,F))
self.assertTrue(np.array_equal(computed_F,F))
def test_Q_computing(self):
dt = 5.
q = 20.
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.filter_ta.dt = dt
computed_Q = self.filter_ta.compute_Q(q)
self.assertTrue(np.array_equal(self.filter_ta.Q,Q))
self.assertTrue(np.array_equal(computed_Q,Q))
# ==========================================================================
# ========================= hx/HJacob tests ================================
def test_HJacob_computing(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
x = X[0,0]
y = X[3,0]
z = X[6,0]
H = np.array([[x/sqrt(x**2 + y**2 + z**2), 0, 0, y/sqrt(x**2 + y**2 + z**2), 0, 0, z/sqrt(x**2 + y**2 + z**2),0 ,0],
[-y/(x**2 + y**2), 0, 0, x/(x**2 + y**2), 0, 0, 0, 0, 0],
[-x*z/(sqrt(x**2 + y**2)*(x**2 + y**2 + z**2)), 0, 0, -y*z/(sqrt(x**2 + y**2)*(x**2 + y**2 + z**2)), 0, 0, sqrt(x**2 + y**2)/(x**2 + y**2 + z**2), 0, 0]])
computed_H = self.filter_ta.HJacob(X)
self.assertTrue(np.array_equal(computed_H,H))
def test_hx_computing(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
x = X[0,0]
y = X[3,0]
z = X[6,0]
r = sqrt(x**2 + y**2 + z**2)
theta = atan2(y,x)
phi = atan2(z,sqrt(x**2 + y**2))
Zk = np.array([[r,theta,phi]]).T
computed_Zk = self.filter_ta.hx(X)
self.assertTrue(np.array_equal(Zk,computed_Zk))
# ==========================================================================
# ========================= predict/update cycle tests =====================
def test_residual_of(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
X_prior = np.array([[2000, 200, 20, 2000, 200, 20, 8000, 2, 10]]).T
z = np.array([[200, 10, 10]]).T
computed_resid = z - self.filter_ta.HJacob(X)@X_prior
self.filter_ta.x = X
self.filter_ta.x_prior = X_prior
resid = self.filter_ta.residual_of(z)
self.assertTrue(np.array_equal(computed_resid,resid))
def test_predict(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
filt = self.filter_ta
filt.x = X
pre_F = deepcopy(filt.F)
F = filt.compute_F(X)
P = filt.P
Q = filt.Q
predicted_X = F@X
predicted_P = F@P@F.T + Q
filt.F = pre_F # Needed to keep F unaltered as before the predict step
filt.predict()
self.assertTrue(np.array_equal(predicted_X,filt.x))
self.assertTrue(np.array_equal(predicted_P,filt.P))
self.assertTrue(np.array_equal(predicted_X,filt.x_prior))
self.assertTrue(np.array_equal(predicted_P,filt.P_prior))
def test_update(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
z = np.array([[200, 10, 10]]).T
filt = self.filter_ta
filt.x = X
filt.predict()
H = filt.HJacob(filt.x)
S = H@filt.P@H.T + filt.R
K = filt.P@H.T@inv(S)
hx = filt.hx(filt.x)
y = z - hx
new_X = filt.x + K@y
IKH = (filt._I - K@H)
new_P = (IKH@filt.P)@IKH.T + (K@filt.R)@K.T
filt.update(z)
self.assertTrue(np.array_equal(filt.P,new_P))
self.assertTrue(np.array_equal(filt.x,new_X))
class MultipleRadarsTATestCase(unittest.TestCase):
def setUp(self):
self.radar1 = Radar(x=800,y=800)
self.radar2 = Radar(x=200,y=200)
radars = [self.radar1,self.radar2]
self.multiple_ta = MultipleRadarsFilterTA(dim_x = 9, dim_z = 3, q = 1., radars = radars,
x0 = 100, y0 = 100)
# ==========================================================================
# ========================= Initialization tests ===========================
def test_initial_radar_positions(self):
positions = [[self.radar1.x,self.radar1.y,self.radar1.z],[self.radar2.x,self.radar2.y,self.radar2.z]]
computed_positions = self.multiple_ta.radar_positions
self.assertEqual(computed_positions,positions)
def test_initial_R(self):
dt = self.multiple_ta.dt
R = np.array([[1., 0. , 0. , 0., 0. , 0. ],
[0., 0.001, 0. , 0., 0. , 0. ],
[0., 0. , 0.001, 0., 0. , 0. ],
[0., 0. , 0. , 1., 0. , 0. ],
[0., 0. , 0. , 0., 0.001, 0. ],
[0., 0. , 0. , 0., 0. , 0.001]])
self.assertTrue(np.array_equal(self.multiple_ta.R,R))
def test_initial_F(self):
dt = self.multiple_ta.dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.assertTrue(np.array_equal(self.multiple_ta.F,F))
# ==========================================================================
# ========================= Q/F generation tests ===========================
def test_F_computing(self):
dt = 5.
self.multiple_ta.dt = dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
computed_F = self.multiple_ta.compute_F(self.multiple_ta.x)
self.assertTrue(np.array_equal(self.multiple_ta.F,F))
self.assertTrue(np.array_equal(computed_F,F))
def test_Q_computing(self):
dt = 5.
q = 20.
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.multiple_ta.dt = dt
computed_Q = self.multiple_ta.compute_Q(q)
self.assertTrue(np.array_equal(self.multiple_ta.Q,Q))
self.assertTrue(np.array_equal(computed_Q,Q))
# ==========================================================================
# ============================= HJacob/hx generation =======================
def test_HJacob_computing(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
x1 = X[0,0] - self.radar1.x
y1 = X[3,0] - self.radar1.y
z1 = X[6,0] - self.radar1.z
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
H = np.array([[x1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, y1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, z1/sqrt(x1**2 + y1**2 + z1**2),0 ,0],
[-y1/(x1**2 + y1**2), 0, 0, x1/(x1**2 + y1**2), 0, 0, 0, 0, 0],
[-x1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, -y1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, sqrt(x1**2 + y1**2)/(x1**2 + y1**2 + z1**2), 0, 0],
[x2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, y2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, z2/sqrt(x2**2 + y2**2 + z2**2),0 ,0],
[-y2/(x2**2 + y2**2), 0, 0, x2/(x2**2 + y2**2), 0, 0, 0, 0, 0],
[-x2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, -y2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, sqrt(x2**2 + y2**2)/(x2**2 + y2**2 + z2**2), 0, 0]])
computed_H = self.multiple_ta.HJacob(X)
self.assertTrue(np.array_equal(computed_H,H))
def test_hx_computing(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
x1 = X[0,0] - self.radar1.x
y1 = X[3,0] - self.radar1.y
z1 = X[6,0] - self.radar1.z
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
r1 = sqrt(x1**2 + y1**2 + z1**2)
theta1 = atan2(y1,x1)
phi1 = atan2(z1,sqrt(x1**2 + y1**2))
r2 = sqrt(x2**2 + y2**2 + z2**2)
theta2 = atan2(y2,x2)
phi2 = atan2(z2,sqrt(x2**2 + y2**2))
Zk = np.array([[r1,theta1,phi1,r2,theta2,phi2]]).T
computed_Zk = self.multiple_ta.hx(X)
self.assertTrue(np.array_equal(Zk,computed_Zk))
# ==========================================================================
# ========================= predict/update cycle tests =====================
def test_residual_of(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
X_prior = np.array([[2000, 200, 20, 2000, 200, 20, 8000, 2, 10]]).T
z = np.array([[200, 10, 10, 210, 9, 8]]).T
computed_resid = z - self.multiple_ta.HJacob(X)@X_prior
self.multiple_ta.x = X
self.multiple_ta.x_prior = X_prior
resid = self.multiple_ta.residual_of(z)
self.assertTrue(np.array_equal(computed_resid,resid))
def test_predict(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
filt = self.multiple_ta
filt.x = X
predicted_X = filt.F@filt.x
predicted_P = filt.F@filt.P@filt.F.T + filt.Q
filt.predict()
self.assertTrue(np.array_equal(predicted_X,filt.x))
self.assertTrue(np.array_equal(predicted_P,filt.P))
self.assertTrue(np.array_equal(predicted_X,filt.x_prior))
self.assertTrue(np.array_equal(predicted_P,filt.P_prior))
def test_update(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
z = np.array([[200, 10, 10, 210, 9, 8]]).T
filt = self.multiple_ta
filt.x = X
filt.predict()
H = filt.HJacob(filt.x)
S = H@filt.P@H.T + filt.R
K = filt.P@H.T@inv(S)
hx = filt.hx(filt.x)
y = z - hx
new_X = filt.x + K@y
IKH = (filt._I - K@H)
new_P = (IKH@filt.P)@IKH.T + (K@filt.R)@K.T
filt.update(z)
self.assertTrue(np.allclose(filt.P,new_P))
self.assertTrue(np.allclose(filt.x,new_X))
class MultiplePeriodRadarsTATestCase(unittest.TestCase):
def setUp(self):
self.radar1 = Radar(x=800,y=800)
self.radar2 = Radar(x=200,y=200)
radars = [self.radar1,self.radar2]
self.q = 10.
self.multiplef_ta = MultiplePeriodRadarsFilterTA(dim_x = 9, dim_z = 3, q = self.q,
radars = radars,
x0 = 100, y0 = 100)
# ==========================================================================
# ========================= Initialization tests ===========================
def test_initial_radar_positions(self):
positions = [[self.radar1.x,self.radar1.y,self.radar1.z],[self.radar2.x,self.radar2.y,self.radar2.z]]
computed_positions = self.multiplef_ta.radar_positions
self.assertEqual(computed_positions,positions)
def test_initial_R(self):
dt = self.multiplef_ta.dt
R = np.array([[1., 0. , 0. , 0., 0. , 0. ],
[0., 0.001, 0. , 0., 0. , 0. ],
[0., 0. , 0.001, 0., 0. , 0. ],
[0., 0. , 0. , 1., 0. , 0. ],
[0., 0. , 0. , 0., 0.001, 0. ],
[0., 0. , 0. , 0., 0. , 0.001]])
self.assertTrue(np.array_equal(self.multiplef_ta.R,R))
def test_initial_F(self):
dt = self.multiplef_ta.dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.assertTrue(np.array_equal(self.multiplef_ta.F,F))
def test_initial_Q(self):
dt = self.multiplef_ta.dt
q = self.q
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.assertTrue(np.array_equal(self.multiplef_ta.Q,Q))
def test_tag_radars(self):
self.assertEqual(self.radar1.tag, 0)
self.assertEqual(self.radar2.tag, 1)
# ==========================================================================
# ========================= Q/F generation tests ===========================
def test_F_computing(self):
dt = 5.
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.multiplef_ta.dt = dt
computed_F = self.multiplef_ta.compute_F(self.multiplef_ta.x)
self.assertTrue(np.array_equal(self.multiplef_ta.F,F))
self.assertTrue(np.array_equal(computed_F,F))
def test_Q_computing(self):
dt = 5.
q = 20.
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.multiplef_ta.dt = dt
computed_Q = self.multiplef_ta.compute_Q(q)
self.assertTrue(np.array_equal(self.multiplef_ta.Q,Q))
self.assertTrue(np.array_equal(computed_Q,Q))
# ==========================================================================
# ============================= HJacob/hx generation =======================
def test_HJacob_computing_tag_is_0(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 0
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
H = np.array([[x1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, y1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, z1/sqrt(x1**2 + y1**2 + z1**2),0 ,0],
[-y1/(x1**2 + y1**2), 0, 0, x1/(x1**2 + y1**2), 0, 0, 0, 0, 0],
[-x1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, -y1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, sqrt(x1**2 + y1**2)/(x1**2 + y1**2 + z1**2), 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
computed_H = self.multiplef_ta.HJacob(X,tag = tag)
self.assertTrue(np.array_equal(computed_H,H))
def test_HJacob_computing_tag_is_0(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 1
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
H = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[x2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, y2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, z2/sqrt(x2**2 + y2**2 + z2**2),0 ,0],
[-y2/(x2**2 + y2**2), 0, 0, x2/(x2**2 + y2**2), 0, 0, 0, 0, 0],
[-x2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, -y2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, sqrt(x2**2 + y2**2)/(x2**2 + y2**2 + z2**2), 0, 0]])
computed_H = self.multiplef_ta.HJacob(X,tag = tag)
self.assertTrue(np.array_equal(computed_H,H))
def test_hx_computing_tag_is_0(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 0
x1 = X[0,0] - self.radar1.x
y1 = X[3,0] - self.radar1.y
z1 = X[6,0] - self.radar1.z
r1 = sqrt(x1**2 + y1**2 + z1**2)
theta1 = atan2(y1,x1)
phi1 = atan2(z1,sqrt(x1**2 + y1**2))
r2 = 0
theta2 = 0
phi2 = 0
Zk = np.array([[r1,theta1,phi1,r2,theta2,phi2]]).T
computed_Zk = self.multiplef_ta.hx(X, tag = tag)
self.assertTrue(np.array_equal(Zk,computed_Zk))
def test_hx_computing_tag_is_1(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 1
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
r1 = 0
theta1 = 0
phi1 = 0
r2 = sqrt(x2**2 + y2**2 + z2**2)
theta2 = atan2(y2,x2)
phi2 = atan2(z2,sqrt(x2**2 + y2**2))
Zk = np.array([[r1,theta1,phi1,r2,theta2,phi2]]).T
computed_Zk = self.multiplef_ta.hx(X, tag = tag)
self.assertTrue(np.array_equal(Zk,computed_Zk))
# ==========================================================================
# ========================= predict/update cycle tests =====================
def test_residual_of(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
X_prior = np.array([[2000, 200, 20, 2000, 200, 20, 8000, 2, 10]]).T
z = np.array([[200, 10, 10]]).T
tag = 0
z_input = self.multiplef_ta.gen_complete_measurement(tag = tag, z = z)
computed_resid = z_input - self.multiplef_ta.HJacob(X,tag = 0)@X_prior
self.multiplef_ta.x = X
self.multiplef_ta.x_prior = X_prior
resid = self.multiplef_ta.residual_of(z = z, tag = tag)
self.assertTrue(np.array_equal(computed_resid,resid))
def test_predict(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
filt = self.multiplef_ta
filt.x = X
predicted_X = filt.F@filt.x
predicted_P = filt.F@filt.P@filt.F.T + filt.Q
filt.predict()
self.assertTrue(np.array_equal(predicted_X,filt.x))
self.assertTrue(np.array_equal(predicted_P,filt.P))
self.assertTrue(np.array_equal(predicted_X,filt.x_prior))
self.assertTrue(np.array_equal(predicted_P,filt.P_prior))
def test_update_times(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 1
time = 1.
z = np.array([[210, 9, 8]]).T
labeled_z = LabeledMeasurement(tag = tag, time = 1., value = z)
filt = self.multiplef_ta
filt.x = X
filt._last_t = 0.5
dt = time - filt._last_t
new_last_t = time
filt.predict()
filt.update(labeled_z)
self.assertEqual(new_last_t, filt._last_t)
self.assertEqual(dt, filt.dt)
def test_update(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 0
z = np.array([[200, 10, 10]]).T
labeled_z = LabeledMeasurement(tag = tag, value = z, time = 1.)
filt = self.multiplef_ta
filt.x = X
filt.predict()
H = filt.HJacob(filt.x, tag = tag)
S = H@filt.P@H.T + filt.R
K = filt.P@H.T@inv(S)
hx = filt.hx(filt.x, tag = tag)
z_input = filt.gen_complete_measurement(tag = tag, z = z)
y = z_input - hx
new_X = filt.x + K@y
IKH = (filt._I - K@H)
new_P = (IKH@filt.P)@IKH.T + (K@filt.R)@K.T
filt.update(labeled_z)
self.assertTrue(np.allclose(filt.P,new_P))
self.assertTrue(np.allclose(filt.x,new_X))
if __name__ == "__main__":
unittest.main()
| # -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 09:19:36 2019
@author: qde
"""
import unittest
import numpy as np
from pprint import pprint
from copy import deepcopy
from math import sqrt,atan2, exp
from nose.tools import raises
from numpy.linalg import inv
from scipy.linalg import block_diag
from fdia_simulation.models import Radar, LabeledMeasurement
from fdia_simulation.filters import RadarFilterTA, MultipleRadarsFilterTA, MultiplePeriodRadarsFilterTA
class RadarFilterTATestCase(unittest.TestCase):
def setUp(self):
self.radar = Radar(x=0,y=0)
self.q = 10.
self.filter_ta = RadarFilterTA(dim_x = 9, dim_z = 3, q = self.q,radar = self.radar)
# ==========================================================================
# ========================= Initialization tests ===========================
def test_initial_F(self):
dt = self.filter_ta.dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.assertTrue(np.array_equal(self.filter_ta.F,F))
def test_initial_Q(self):
dt = self.filter_ta.dt
q = self.q
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.assertTrue(np.array_equal(self.filter_ta.Q,Q))
def test_initial_R(self):
dt = self.filter_ta.dt
R = np.array([[1., 0. , 0. ],
[0., 0.001, 0. ],
[0., 0. , 0.001]])
self.assertTrue(np.array_equal(self.filter_ta.R,R))
def test_initial_positions(self):
x0 = self.filter_ta.x[0,0]
y0 = self.filter_ta.x[3,0]
z0 = self.filter_ta.x[6,0]
self.assertEqual(x0, 1e-6)
self.assertEqual(y0, 1e-6)
self.assertEqual(z0, 1e-6)
def test_initial_velocities(self):
vx0 = self.filter_ta.x[1,0]
vy0 = self.filter_ta.x[4,0]
vz0 = self.filter_ta.x[7,0]
self.assertEqual(vx0, 1e-6)
self.assertEqual(vy0, 1e-6)
self.assertEqual(vz0, 1e-6)
def test_initial_accelerations(self):
vx0 = self.filter_ta.x[2,0]
vy0 = self.filter_ta.x[5,0]
vz0 = self.filter_ta.x[8,0]
self.assertEqual(vx0, 1e-6)
self.assertEqual(vy0, 1e-6)
self.assertEqual(vz0, 1e-6)
def test_initial_radar_positions(self):
x_rad = self.filter_ta.x_rad
y_rad = self.filter_ta.y_rad
z_rad = self.filter_ta.z_rad
self.assertEqual(x_rad, 0.)
self.assertEqual(y_rad, 0.)
self.assertEqual(z_rad, 0.)
# ==========================================================================
# ========================= Q/F generation tests ===========================
def test_F_computing(self):
dt = 5.
self.filter_ta.dt = dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
computed_F = self.filter_ta.compute_F(self.filter_ta.x)
self.assertTrue(np.array_equal(self.filter_ta.F,F))
self.assertTrue(np.array_equal(computed_F,F))
def test_Q_computing(self):
dt = 5.
q = 20.
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.filter_ta.dt = dt
computed_Q = self.filter_ta.compute_Q(q)
self.assertTrue(np.array_equal(self.filter_ta.Q,Q))
self.assertTrue(np.array_equal(computed_Q,Q))
# ==========================================================================
# ========================= hx/HJacob tests ================================
def test_HJacob_computing(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
x = X[0,0]
y = X[3,0]
z = X[6,0]
H = np.array([[x/sqrt(x**2 + y**2 + z**2), 0, 0, y/sqrt(x**2 + y**2 + z**2), 0, 0, z/sqrt(x**2 + y**2 + z**2),0 ,0],
[-y/(x**2 + y**2), 0, 0, x/(x**2 + y**2), 0, 0, 0, 0, 0],
[-x*z/(sqrt(x**2 + y**2)*(x**2 + y**2 + z**2)), 0, 0, -y*z/(sqrt(x**2 + y**2)*(x**2 + y**2 + z**2)), 0, 0, sqrt(x**2 + y**2)/(x**2 + y**2 + z**2), 0, 0]])
computed_H = self.filter_ta.HJacob(X)
self.assertTrue(np.array_equal(computed_H,H))
def test_hx_computing(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
x = X[0,0]
y = X[3,0]
z = X[6,0]
r = sqrt(x**2 + y**2 + z**2)
theta = atan2(y,x)
phi = atan2(z,sqrt(x**2 + y**2))
Zk = np.array([[r,theta,phi]]).T
computed_Zk = self.filter_ta.hx(X)
self.assertTrue(np.array_equal(Zk,computed_Zk))
# ==========================================================================
# ========================= predict/update cycle tests =====================
def test_residual_of(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
X_prior = np.array([[2000, 200, 20, 2000, 200, 20, 8000, 2, 10]]).T
z = np.array([[200, 10, 10]]).T
computed_resid = z - self.filter_ta.HJacob(X)@X_prior
self.filter_ta.x = X
self.filter_ta.x_prior = X_prior
resid = self.filter_ta.residual_of(z)
self.assertTrue(np.array_equal(computed_resid,resid))
def test_predict(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
filt = self.filter_ta
filt.x = X
pre_F = deepcopy(filt.F)
F = filt.compute_F(X)
P = filt.P
Q = filt.Q
predicted_X = F@X
predicted_P = F@P@F.T + Q
filt.F = pre_F # Needed to keep F unaltered as before the predict step
filt.predict()
self.assertTrue(np.array_equal(predicted_X,filt.x))
self.assertTrue(np.array_equal(predicted_P,filt.P))
self.assertTrue(np.array_equal(predicted_X,filt.x_prior))
self.assertTrue(np.array_equal(predicted_P,filt.P_prior))
def test_update(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
z = np.array([[200, 10, 10]]).T
filt = self.filter_ta
filt.x = X
filt.predict()
H = filt.HJacob(filt.x)
S = H@filt.P@H.T + filt.R
K = filt.P@H.T@inv(S)
hx = filt.hx(filt.x)
y = z - hx
new_X = filt.x + K@y
IKH = (filt._I - K@H)
new_P = (IKH@filt.P)@IKH.T + (K@filt.R)@K.T
filt.update(z)
self.assertTrue(np.array_equal(filt.P,new_P))
self.assertTrue(np.array_equal(filt.x,new_X))
class MultipleRadarsTATestCase(unittest.TestCase):
def setUp(self):
self.radar1 = Radar(x=800,y=800)
self.radar2 = Radar(x=200,y=200)
radars = [self.radar1,self.radar2]
self.multiple_ta = MultipleRadarsFilterTA(dim_x = 9, dim_z = 3, q = 1., radars = radars,
x0 = 100, y0 = 100)
# ==========================================================================
# ========================= Initialization tests ===========================
def test_initial_radar_positions(self):
positions = [[self.radar1.x,self.radar1.y,self.radar1.z],[self.radar2.x,self.radar2.y,self.radar2.z]]
computed_positions = self.multiple_ta.radar_positions
self.assertEqual(computed_positions,positions)
def test_initial_R(self):
dt = self.multiple_ta.dt
R = np.array([[1., 0. , 0. , 0., 0. , 0. ],
[0., 0.001, 0. , 0., 0. , 0. ],
[0., 0. , 0.001, 0., 0. , 0. ],
[0., 0. , 0. , 1., 0. , 0. ],
[0., 0. , 0. , 0., 0.001, 0. ],
[0., 0. , 0. , 0., 0. , 0.001]])
self.assertTrue(np.array_equal(self.multiple_ta.R,R))
def test_initial_F(self):
dt = self.multiple_ta.dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.assertTrue(np.array_equal(self.multiple_ta.F,F))
# ==========================================================================
# ========================= Q/F generation tests ===========================
def test_F_computing(self):
dt = 5.
self.multiple_ta.dt = dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
computed_F = self.multiple_ta.compute_F(self.multiple_ta.x)
self.assertTrue(np.array_equal(self.multiple_ta.F,F))
self.assertTrue(np.array_equal(computed_F,F))
def test_Q_computing(self):
dt = 5.
q = 20.
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.multiple_ta.dt = dt
computed_Q = self.multiple_ta.compute_Q(q)
self.assertTrue(np.array_equal(self.multiple_ta.Q,Q))
self.assertTrue(np.array_equal(computed_Q,Q))
# ==========================================================================
# ============================= HJacob/hx generation =======================
def test_HJacob_computing(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
x1 = X[0,0] - self.radar1.x
y1 = X[3,0] - self.radar1.y
z1 = X[6,0] - self.radar1.z
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
H = np.array([[x1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, y1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, z1/sqrt(x1**2 + y1**2 + z1**2),0 ,0],
[-y1/(x1**2 + y1**2), 0, 0, x1/(x1**2 + y1**2), 0, 0, 0, 0, 0],
[-x1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, -y1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, sqrt(x1**2 + y1**2)/(x1**2 + y1**2 + z1**2), 0, 0],
[x2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, y2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, z2/sqrt(x2**2 + y2**2 + z2**2),0 ,0],
[-y2/(x2**2 + y2**2), 0, 0, x2/(x2**2 + y2**2), 0, 0, 0, 0, 0],
[-x2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, -y2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, sqrt(x2**2 + y2**2)/(x2**2 + y2**2 + z2**2), 0, 0]])
computed_H = self.multiple_ta.HJacob(X)
self.assertTrue(np.array_equal(computed_H,H))
def test_hx_computing(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
x1 = X[0,0] - self.radar1.x
y1 = X[3,0] - self.radar1.y
z1 = X[6,0] - self.radar1.z
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
r1 = sqrt(x1**2 + y1**2 + z1**2)
theta1 = atan2(y1,x1)
phi1 = atan2(z1,sqrt(x1**2 + y1**2))
r2 = sqrt(x2**2 + y2**2 + z2**2)
theta2 = atan2(y2,x2)
phi2 = atan2(z2,sqrt(x2**2 + y2**2))
Zk = np.array([[r1,theta1,phi1,r2,theta2,phi2]]).T
computed_Zk = self.multiple_ta.hx(X)
self.assertTrue(np.array_equal(Zk,computed_Zk))
# ==========================================================================
# ========================= predict/update cycle tests =====================
def test_residual_of(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
X_prior = np.array([[2000, 200, 20, 2000, 200, 20, 8000, 2, 10]]).T
z = np.array([[200, 10, 10, 210, 9, 8]]).T
computed_resid = z - self.multiple_ta.HJacob(X)@X_prior
self.multiple_ta.x = X
self.multiple_ta.x_prior = X_prior
resid = self.multiple_ta.residual_of(z)
self.assertTrue(np.array_equal(computed_resid,resid))
def test_predict(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
filt = self.multiple_ta
filt.x = X
predicted_X = filt.F@filt.x
predicted_P = filt.F@filt.P@filt.F.T + filt.Q
filt.predict()
self.assertTrue(np.array_equal(predicted_X,filt.x))
self.assertTrue(np.array_equal(predicted_P,filt.P))
self.assertTrue(np.array_equal(predicted_X,filt.x_prior))
self.assertTrue(np.array_equal(predicted_P,filt.P_prior))
def test_update(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
z = np.array([[200, 10, 10, 210, 9, 8]]).T
filt = self.multiple_ta
filt.x = X
filt.predict()
H = filt.HJacob(filt.x)
S = H@filt.P@H.T + filt.R
K = filt.P@H.T@inv(S)
hx = filt.hx(filt.x)
y = z - hx
new_X = filt.x + K@y
IKH = (filt._I - K@H)
new_P = (IKH@filt.P)@IKH.T + (K@filt.R)@K.T
filt.update(z)
self.assertTrue(np.allclose(filt.P,new_P))
self.assertTrue(np.allclose(filt.x,new_X))
class MultiplePeriodRadarsTATestCase(unittest.TestCase):
def setUp(self):
self.radar1 = Radar(x=800,y=800)
self.radar2 = Radar(x=200,y=200)
radars = [self.radar1,self.radar2]
self.q = 10.
self.multiplef_ta = MultiplePeriodRadarsFilterTA(dim_x = 9, dim_z = 3, q = self.q,
radars = radars,
x0 = 100, y0 = 100)
# ==========================================================================
# ========================= Initialization tests ===========================
def test_initial_radar_positions(self):
positions = [[self.radar1.x,self.radar1.y,self.radar1.z],[self.radar2.x,self.radar2.y,self.radar2.z]]
computed_positions = self.multiplef_ta.radar_positions
self.assertEqual(computed_positions,positions)
def test_initial_R(self):
dt = self.multiplef_ta.dt
R = np.array([[1., 0. , 0. , 0., 0. , 0. ],
[0., 0.001, 0. , 0., 0. , 0. ],
[0., 0. , 0.001, 0., 0. , 0. ],
[0., 0. , 0. , 1., 0. , 0. ],
[0., 0. , 0. , 0., 0.001, 0. ],
[0., 0. , 0. , 0., 0. , 0.001]])
self.assertTrue(np.array_equal(self.multiplef_ta.R,R))
def test_initial_F(self):
dt = self.multiplef_ta.dt
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.assertTrue(np.array_equal(self.multiplef_ta.F,F))
def test_initial_Q(self):
dt = self.multiplef_ta.dt
q = self.q
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.assertTrue(np.array_equal(self.multiplef_ta.Q,Q))
def test_tag_radars(self):
self.assertEqual(self.radar1.tag, 0)
self.assertEqual(self.radar2.tag, 1)
# ==========================================================================
# ========================= Q/F generation tests ===========================
def test_F_computing(self):
dt = 5.
edt = exp(dt)
F = np.array([[1,edt-1, 0, 0, 0, 0, 0, 0, 0],
[0, edt, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1,edt-1, 0, 0, 0, 0],
[0, 0, 0, 0, edt, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1,edt-1, 0],
[0, 0, 0, 0, 0, 0, 0, edt, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.multiplef_ta.dt = dt
computed_F = self.multiplef_ta.compute_F(self.multiplef_ta.x)
self.assertTrue(np.array_equal(self.multiplef_ta.F,F))
self.assertTrue(np.array_equal(computed_F,F))
def test_Q_computing(self):
dt = 5.
q = 20.
Q = q*np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0,dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0,dt, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,dt]])
self.multiplef_ta.dt = dt
computed_Q = self.multiplef_ta.compute_Q(q)
self.assertTrue(np.array_equal(self.multiplef_ta.Q,Q))
self.assertTrue(np.array_equal(computed_Q,Q))
# ==========================================================================
# ============================= HJacob/hx generation =======================
def test_HJacob_computing_tag_is_0(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 0
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
H = np.array([[x1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, y1/sqrt(x1**2 + y1**2 + z1**2), 0, 0, z1/sqrt(x1**2 + y1**2 + z1**2),0 ,0],
[-y1/(x1**2 + y1**2), 0, 0, x1/(x1**2 + y1**2), 0, 0, 0, 0, 0],
[-x1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, -y1*z1/(sqrt(x1**2 + y1**2)*(x1**2 + y1**2 + z1**2)), 0, 0, sqrt(x1**2 + y1**2)/(x1**2 + y1**2 + z1**2), 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
computed_H = self.multiplef_ta.HJacob(X,tag = tag)
self.assertTrue(np.array_equal(computed_H,H))
def test_HJacob_computing_tag_is_0(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 1
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
H = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[x2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, y2/sqrt(x2**2 + y2**2 + z2**2), 0, 0, z2/sqrt(x2**2 + y2**2 + z2**2),0 ,0],
[-y2/(x2**2 + y2**2), 0, 0, x2/(x2**2 + y2**2), 0, 0, 0, 0, 0],
[-x2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, -y2*z2/(sqrt(x2**2 + y2**2)*(x2**2 + y2**2 + z2**2)), 0, 0, sqrt(x2**2 + y2**2)/(x2**2 + y2**2 + z2**2), 0, 0]])
computed_H = self.multiplef_ta.HJacob(X,tag = tag)
self.assertTrue(np.array_equal(computed_H,H))
def test_hx_computing_tag_is_0(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 0
x1 = X[0,0] - self.radar1.x
y1 = X[3,0] - self.radar1.y
z1 = X[6,0] - self.radar1.z
r1 = sqrt(x1**2 + y1**2 + z1**2)
theta1 = atan2(y1,x1)
phi1 = atan2(z1,sqrt(x1**2 + y1**2))
r2 = 0
theta2 = 0
phi2 = 0
Zk = np.array([[r1,theta1,phi1,r2,theta2,phi2]]).T
computed_Zk = self.multiplef_ta.hx(X, tag = tag)
self.assertTrue(np.array_equal(Zk,computed_Zk))
def test_hx_computing_tag_is_1(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 1
x2 = X[0,0] - self.radar2.x
y2 = X[3,0] - self.radar2.y
z2 = X[6,0] - self.radar2.z
r1 = 0
theta1 = 0
phi1 = 0
r2 = sqrt(x2**2 + y2**2 + z2**2)
theta2 = atan2(y2,x2)
phi2 = atan2(z2,sqrt(x2**2 + y2**2))
Zk = np.array([[r1,theta1,phi1,r2,theta2,phi2]]).T
computed_Zk = self.multiplef_ta.hx(X, tag = tag)
self.assertTrue(np.array_equal(Zk,computed_Zk))
# ==========================================================================
# ========================= predict/update cycle tests =====================
def test_residual_of(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
X_prior = np.array([[2000, 200, 20, 2000, 200, 20, 8000, 2, 10]]).T
z = np.array([[200, 10, 10]]).T
tag = 0
z_input = self.multiplef_ta.gen_complete_measurement(tag = tag, z = z)
computed_resid = z_input - self.multiplef_ta.HJacob(X,tag = 0)@X_prior
self.multiplef_ta.x = X
self.multiplef_ta.x_prior = X_prior
resid = self.multiplef_ta.residual_of(z = z, tag = tag)
self.assertTrue(np.array_equal(computed_resid,resid))
def test_predict(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
filt = self.multiplef_ta
filt.x = X
predicted_X = filt.F@filt.x
predicted_P = filt.F@filt.P@filt.F.T + filt.Q
filt.predict()
self.assertTrue(np.array_equal(predicted_X,filt.x))
self.assertTrue(np.array_equal(predicted_P,filt.P))
self.assertTrue(np.array_equal(predicted_X,filt.x_prior))
self.assertTrue(np.array_equal(predicted_P,filt.P_prior))
def test_update_times(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 1
time = 1.
z = np.array([[210, 9, 8]]).T
labeled_z = LabeledMeasurement(tag = tag, time = 1., value = z)
filt = self.multiplef_ta
filt.x = X
filt._last_t = 0.5
dt = time - filt._last_t
new_last_t = time
filt.predict()
filt.update(labeled_z)
self.assertEqual(new_last_t, filt._last_t)
self.assertEqual(dt, filt.dt)
def test_update(self):
X = np.array([[1000, 100, 10, 1000, 100, 10, 8000, 2, 10]]).T
tag = 0
z = np.array([[200, 10, 10]]).T
labeled_z = LabeledMeasurement(tag = tag, value = z, time = 1.)
filt = self.multiplef_ta
filt.x = X
filt.predict()
H = filt.HJacob(filt.x, tag = tag)
S = H@filt.P@H.T + filt.R
K = filt.P@H.T@inv(S)
hx = filt.hx(filt.x, tag = tag)
z_input = filt.gen_complete_measurement(tag = tag, z = z)
y = z_input - hx
new_X = filt.x + K@y
IKH = (filt._I - K@H)
new_P = (IKH@filt.P)@IKH.T + (K@filt.R)@K.T
filt.update(labeled_z)
self.assertTrue(np.allclose(filt.P,new_P))
self.assertTrue(np.allclose(filt.x,new_X))
if __name__ == "__main__":
unittest.main() | en | 0.436819 | # -*- coding: utf-8 -*- Created on Mon Jul 22 09:19:36 2019 @author: qde # ========================================================================== # ========================= Initialization tests =========================== # ========================================================================== # ========================= Q/F generation tests =========================== # ========================================================================== # ========================= hx/HJacob tests ================================ # ========================================================================== # ========================= predict/update cycle tests ===================== # Needed to keep F unaltered as before the predict step # ========================================================================== # ========================= Initialization tests =========================== # ========================================================================== # ========================= Q/F generation tests =========================== # ========================================================================== # ============================= HJacob/hx generation ======================= # ========================================================================== # ========================= predict/update cycle tests ===================== # ========================================================================== # ========================= Initialization tests =========================== # ========================================================================== # ========================= Q/F generation tests =========================== # ========================================================================== # ============================= HJacob/hx generation ======================= # ========================================================================== # ========================= predict/update cycle tests ===================== | 2.291819 | 2 |
gas/outside_solution.py | corbinmcneill/codejam | 0 | 6624414 | #!/usr/bin/env python3
# Round 1C 2012
import sys
from fractions import Fraction
from math import sqrt
line = sys.stdin.readline()
fields = line.split()
assert len(fields) == 1
ntc = int(fields[0])
def solve(d, a, other_car):
wait_time = Fraction(0)
first = True
for time, distance in other_car:
if distance > d:
if first:
break
time = last_time + (time - last_time) * (d - last_distance) / (distance - last_distance)
distance = d
first = False
arrival_time = sqrt(2 * distance / a)
if arrival_time < time:
cur_wait_time = time - arrival_time
else:
cur_wait_time = Fraction(0)
if cur_wait_time > wait_time:
wait_time = cur_wait_time
last_time, last_distance = time, distance
arrival_time = sqrt(2 * d / a)
return wait_time + arrival_time
for tc in range(1, ntc + 1):
line = sys.stdin.readline()
fields = line.split()
assert len(fields) == 3
d = Fraction(fields[0])
n = int(fields[1])
a = int(fields[2])
other_car = []
for _ in range(n):
line = sys.stdin.readline()
fields = line.split()
assert len(fields) == 2
time = Fraction(fields[0])
distance = Fraction(fields[1])
other_car.append((time, distance))
line = sys.stdin.readline()
fields = line.split()
assert len(fields) == a
print('Case #{0}:'.format(tc))
for i in range(a):
accel = Fraction(fields[i])
ans = solve(d, accel, other_car)
print(ans)
| #!/usr/bin/env python3
# Round 1C 2012
import sys
from fractions import Fraction
from math import sqrt
line = sys.stdin.readline()
fields = line.split()
assert len(fields) == 1
ntc = int(fields[0])
def solve(d, a, other_car):
wait_time = Fraction(0)
first = True
for time, distance in other_car:
if distance > d:
if first:
break
time = last_time + (time - last_time) * (d - last_distance) / (distance - last_distance)
distance = d
first = False
arrival_time = sqrt(2 * distance / a)
if arrival_time < time:
cur_wait_time = time - arrival_time
else:
cur_wait_time = Fraction(0)
if cur_wait_time > wait_time:
wait_time = cur_wait_time
last_time, last_distance = time, distance
arrival_time = sqrt(2 * d / a)
return wait_time + arrival_time
for tc in range(1, ntc + 1):
line = sys.stdin.readline()
fields = line.split()
assert len(fields) == 3
d = Fraction(fields[0])
n = int(fields[1])
a = int(fields[2])
other_car = []
for _ in range(n):
line = sys.stdin.readline()
fields = line.split()
assert len(fields) == 2
time = Fraction(fields[0])
distance = Fraction(fields[1])
other_car.append((time, distance))
line = sys.stdin.readline()
fields = line.split()
assert len(fields) == a
print('Case #{0}:'.format(tc))
for i in range(a):
accel = Fraction(fields[i])
ans = solve(d, accel, other_car)
print(ans)
| en | 0.175623 | #!/usr/bin/env python3 # Round 1C 2012 #{0}:'.format(tc)) | 3.663368 | 4 |
01_syntax/09_set.py | amamov/Pythonic | 2 | 6624415 | #### set : 데이터의 중복이 없고 순서가 없다. 데이터의 중복을 확인할때 주로 많이 사용하는 자료형이다.
#### 순서(X), 중복(X), 수정(O), 삭제(O)
"""
교집합 연산 : & ex) set1 & set2
합집합 연산 : | ex) set1 | set2
차집합 연산 : -
"""
my_set_1 = set((1, 2, 3))
my_set_2 = set((3, 4, 5))
print(my_set_1) # {1, 2, 3}
print(my_set_1 & my_set_2) # {3}
print(my_set_1 | my_set_2) # {1, 2, 3, 4, 5}
print(my_set_1 - my_set_2) # {1, 2}
# 추가
my_set_1.add(18)
print(my_set_1) # {18, 1, 2, 3}
# 제거
my_set_1.remove(18)
print(my_set_1) # {1, 2, 3} | #### set : 데이터의 중복이 없고 순서가 없다. 데이터의 중복을 확인할때 주로 많이 사용하는 자료형이다.
#### 순서(X), 중복(X), 수정(O), 삭제(O)
"""
교집합 연산 : & ex) set1 & set2
합집합 연산 : | ex) set1 | set2
차집합 연산 : -
"""
my_set_1 = set((1, 2, 3))
my_set_2 = set((3, 4, 5))
print(my_set_1) # {1, 2, 3}
print(my_set_1 & my_set_2) # {3}
print(my_set_1 | my_set_2) # {1, 2, 3, 4, 5}
print(my_set_1 - my_set_2) # {1, 2}
# 추가
my_set_1.add(18)
print(my_set_1) # {18, 1, 2, 3}
# 제거
my_set_1.remove(18)
print(my_set_1) # {1, 2, 3} | ko | 0.997709 | #### set : 데이터의 중복이 없고 순서가 없다. 데이터의 중복을 확인할때 주로 많이 사용하는 자료형이다. #### 순서(X), 중복(X), 수정(O), 삭제(O) 교집합 연산 : & ex) set1 & set2 합집합 연산 : | ex) set1 | set2 차집합 연산 : - # {1, 2, 3} # {3} # {1, 2, 3, 4, 5} # {1, 2} # 추가 # {18, 1, 2, 3} # 제거 # {1, 2, 3} | 4.185008 | 4 |
hello-strongbox-pypi/setup.py | BjoernAkAManf/strongbox-examples | 0 | 6624416 | <filename>hello-strongbox-pypi/setup.py<gh_stars>0
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="hello-world-pypi",
packages = ['hello-world-pypi'],
license='Apache 2.0',
version="1.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="Sample Hello world package",
long_description="This is long description",
long_description_content_type="text/markdown",
url="https://github.com/anki2189/strongbox-examples",
keywords = ['Hello', 'world', 'pypi'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'pip-hello-world',
],
) | <filename>hello-strongbox-pypi/setup.py<gh_stars>0
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="hello-world-pypi",
packages = ['hello-world-pypi'],
license='Apache 2.0',
version="1.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="Sample Hello world package",
long_description="This is long description",
long_description_content_type="text/markdown",
url="https://github.com/anki2189/strongbox-examples",
keywords = ['Hello', 'world', 'pypi'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'pip-hello-world',
],
) | none | 1 | 1.845983 | 2 | |
volview/slice_widget.py | cmbruns/volview | 0 | 6624417 | import math
from PySide6 import QtCore, QtGui, QtWidgets
from PySide6.QtCore import Qt
class SliceWidget(QtWidgets.QWidget):
"""
Canvas widget showing 2D slice of volume rendering stuff.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.eye_image = QtGui.QPixmap("EyeIcon2.png")
self.scaled_eye = self.eye_image.scaledToWidth(
self.eye_image.width() // 2,
Qt.SmoothTransformation,
)
self.setMouseTracking(True) # enable hover for mouseMoveEvent
self.angle = 0
def _eye_xy(self):
x = self.width() / 2
y = self.height() - 40
return x, y
def mouseMoveEvent(self, event: QtGui.QMouseEvent):
x = event.pos().x()
y = event.pos().y()
ex, ey = self._eye_xy()
dx = x - ex
dy = ey - y
self.angle = math.degrees(math.atan2(dx, dy))
# print(self.angle)
self.update()
def paintEvent(self, event: QtGui.QPaintEvent):
# print("paintEvent")
super().paintEvent(event)
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
x, y = self._eye_xy()
painter.translate(x, y)
img = self.scaled_eye
x = -img.width() / 2
y = -img.height() / 2
painter.rotate(self.angle)
# view ray
pen = QtGui.QPen(Qt.green)
pen.setStyle(Qt.DashLine)
pen.setWidth(2)
pen.setBrush(QtGui.QBrush(QtGui.QColor(0, 255, 0, 100)))
painter.setPen(pen)
painter.drawLine(0, 0, 0, -(self.height() + self.width()))
# picture of eye
painter.translate(x, y)
painter.drawPixmap(0, 0, img)
| import math
from PySide6 import QtCore, QtGui, QtWidgets
from PySide6.QtCore import Qt
class SliceWidget(QtWidgets.QWidget):
"""
Canvas widget showing 2D slice of volume rendering stuff.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.eye_image = QtGui.QPixmap("EyeIcon2.png")
self.scaled_eye = self.eye_image.scaledToWidth(
self.eye_image.width() // 2,
Qt.SmoothTransformation,
)
self.setMouseTracking(True) # enable hover for mouseMoveEvent
self.angle = 0
def _eye_xy(self):
x = self.width() / 2
y = self.height() - 40
return x, y
def mouseMoveEvent(self, event: QtGui.QMouseEvent):
x = event.pos().x()
y = event.pos().y()
ex, ey = self._eye_xy()
dx = x - ex
dy = ey - y
self.angle = math.degrees(math.atan2(dx, dy))
# print(self.angle)
self.update()
def paintEvent(self, event: QtGui.QPaintEvent):
# print("paintEvent")
super().paintEvent(event)
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
x, y = self._eye_xy()
painter.translate(x, y)
img = self.scaled_eye
x = -img.width() / 2
y = -img.height() / 2
painter.rotate(self.angle)
# view ray
pen = QtGui.QPen(Qt.green)
pen.setStyle(Qt.DashLine)
pen.setWidth(2)
pen.setBrush(QtGui.QBrush(QtGui.QColor(0, 255, 0, 100)))
painter.setPen(pen)
painter.drawLine(0, 0, 0, -(self.height() + self.width()))
# picture of eye
painter.translate(x, y)
painter.drawPixmap(0, 0, img)
| en | 0.608695 | Canvas widget showing 2D slice of volume rendering stuff. # enable hover for mouseMoveEvent # print(self.angle) # print("paintEvent") # view ray # picture of eye | 2.731307 | 3 |
setup.py | lrq3000/fdict | 8 | 6624418 | <reponame>lrq3000/fdict
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
from subprocess import check_call
from io import open as io_open
# For Makefile parsing
import shlex
try: # pragma: no cover
import ConfigParser
import StringIO
except ImportError: # pragma: no cover
import configparser as ConfigParser
import io as StringIO
import re
# Makefile auxiliary functions #
RE_MAKE_CMD = re.compile('^\t(@\+?)(make)?', flags=re.M)
def parse_makefile_aliases(filepath):
'''
Parse a makefile to find commands and substitute variables. Expects a
makefile with only aliases and a line return between each command.
Returns a dict, with a list of commands for each alias.
'''
# -- Parsing the Makefile using ConfigParser
# Adding a fake section to make the Makefile a valid Ini file
ini_str = '[root]\n'
with io_open(filepath, mode='r') as fd:
ini_str = ini_str + RE_MAKE_CMD.sub('\t', fd.read())
ini_fp = StringIO.StringIO(ini_str)
# Parse using ConfigParser
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
# Fetch the list of aliases
aliases = config.options('root')
# -- Extracting commands for each alias
commands = {}
for alias in aliases:
if alias.lower() in ['.phony']:
continue
# strip the first line return, and then split by any line return
commands[alias] = config.get('root', alias).lstrip('\n').split('\n')
# -- Commands substitution
# Loop until all aliases are substituted by their commands:
# Check each command of each alias, and if there is one command that is to
# be substituted by an alias, try to do it right away. If this is not
# possible because this alias itself points to other aliases , then stop
# and put the current alias back in the queue to be processed again later.
# Create the queue of aliases to process
aliases_todo = list(commands.keys())
# Create the dict that will hold the full commands
commands_new = {}
# Loop until we have processed all aliases
while aliases_todo:
# Pick the first alias in the queue
alias = aliases_todo.pop(0)
# Create a new entry in the resulting dict
commands_new[alias] = []
# For each command of this alias
for cmd in commands[alias]:
# Ignore self-referencing (alias points to itself)
if cmd == alias:
pass
# Substitute full command
elif cmd in aliases and cmd in commands_new:
# Append all the commands referenced by the alias
commands_new[alias].extend(commands_new[cmd])
# Delay substituting another alias, waiting for the other alias to
# be substituted first
elif cmd in aliases and cmd not in commands_new:
# Delete the current entry to avoid other aliases
# to reference this one wrongly (as it is empty)
del commands_new[alias]
aliases_todo.append(alias)
break
# Full command (no aliases)
else:
commands_new[alias].append(cmd)
commands = commands_new
del commands_new
# -- Prepending prefix to avoid conflicts with standard setup.py commands
# for alias in commands.keys():
# commands['make_'+alias] = commands[alias]
# del commands[alias]
return commands
def execute_makefile_commands(commands, alias, verbose=False):
cmds = commands[alias]
for cmd in cmds:
# Parse string in a shell-like fashion
# (incl quoted strings and comments)
parsed_cmd = shlex.split(cmd, comments=True)
# Execute command if not empty (ie, not just a comment)
if parsed_cmd:
if verbose:
print("Running command: " + cmd)
# Launch the command and wait to finish (synchronized call)
check_call(parsed_cmd,
cwd=os.path.dirname(os.path.abspath(__file__)))
# Main setup.py config #
# Get version from fdict/_version.py
__version__ = None
version_file = os.path.join(os.path.dirname(__file__), 'fdict', '_version.py')
with io_open(version_file, mode='r') as fd:
exec(fd.read())
# Executing makefile commands if specified
if sys.argv[1].lower().strip() == 'make':
# Filename of the makefile
fpath = os.path.join(os.path.dirname(__file__), 'Makefile')
# Parse the makefile, substitute the aliases and extract the commands
commands = parse_makefile_aliases(fpath)
# If no alias (only `python setup.py make`), print the list of aliases
if len(sys.argv) < 3 or sys.argv[-1] == '--help':
print("Shortcut to use commands via aliases. List of aliases:")
print('\n'.join(alias for alias in sorted(commands.keys())))
# Else process the commands for this alias
else:
arg = sys.argv[-1]
# if unit testing, we do nothing (we just checked the makefile parsing)
if arg == 'none':
sys.exit(0)
# else if the alias exists, we execute its commands
elif arg in commands.keys():
execute_makefile_commands(commands, arg, verbose=True)
# else the alias cannot be found
else:
raise Exception("Provided alias cannot be found: make " + arg)
# Stop the processing of setup.py here:
# It's important to avoid setup.py raising an error because of the command
# not being standard
sys.exit(0)
# Python package config #
README_rst = ''
fndoc = os.path.join(os.path.dirname(__file__), 'README.rst')
with io_open(fndoc, mode='r', encoding='utf-8') as fd:
README_rst = fd.read()
setup(
name='fdict',
version=__version__,
description='Easy out-of-core computing of recursive dict',
license='MIT Licence',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/LRQ3000/fdict',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
platforms=['any'],
packages=['fdict'],
long_description=README_rst,
classifiers=[
# Trove classifiers
# (https://pypi.python.org/pypi?%3Aaction=list_classifiers)
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Framework :: IPython',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: SunOS/Solaris',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Intended Audience :: Developers',
],
keywords='dict file disk out-of-core bigdata',
test_suite='nose.collector',
tests_require=['nose', 'coverage'],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
from subprocess import check_call
from io import open as io_open
# For Makefile parsing
import shlex
try: # pragma: no cover
import ConfigParser
import StringIO
except ImportError: # pragma: no cover
import configparser as ConfigParser
import io as StringIO
import re
# Makefile auxiliary functions #
RE_MAKE_CMD = re.compile('^\t(@\+?)(make)?', flags=re.M)
def parse_makefile_aliases(filepath):
'''
Parse a makefile to find commands and substitute variables. Expects a
makefile with only aliases and a line return between each command.
Returns a dict, with a list of commands for each alias.
'''
# -- Parsing the Makefile using ConfigParser
# Adding a fake section to make the Makefile a valid Ini file
ini_str = '[root]\n'
with io_open(filepath, mode='r') as fd:
ini_str = ini_str + RE_MAKE_CMD.sub('\t', fd.read())
ini_fp = StringIO.StringIO(ini_str)
# Parse using ConfigParser
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
# Fetch the list of aliases
aliases = config.options('root')
# -- Extracting commands for each alias
commands = {}
for alias in aliases:
if alias.lower() in ['.phony']:
continue
# strip the first line return, and then split by any line return
commands[alias] = config.get('root', alias).lstrip('\n').split('\n')
# -- Commands substitution
# Loop until all aliases are substituted by their commands:
# Check each command of each alias, and if there is one command that is to
# be substituted by an alias, try to do it right away. If this is not
# possible because this alias itself points to other aliases , then stop
# and put the current alias back in the queue to be processed again later.
# Create the queue of aliases to process
aliases_todo = list(commands.keys())
# Create the dict that will hold the full commands
commands_new = {}
# Loop until we have processed all aliases
while aliases_todo:
# Pick the first alias in the queue
alias = aliases_todo.pop(0)
# Create a new entry in the resulting dict
commands_new[alias] = []
# For each command of this alias
for cmd in commands[alias]:
# Ignore self-referencing (alias points to itself)
if cmd == alias:
pass
# Substitute full command
elif cmd in aliases and cmd in commands_new:
# Append all the commands referenced by the alias
commands_new[alias].extend(commands_new[cmd])
# Delay substituting another alias, waiting for the other alias to
# be substituted first
elif cmd in aliases and cmd not in commands_new:
# Delete the current entry to avoid other aliases
# to reference this one wrongly (as it is empty)
del commands_new[alias]
aliases_todo.append(alias)
break
# Full command (no aliases)
else:
commands_new[alias].append(cmd)
commands = commands_new
del commands_new
# -- Prepending prefix to avoid conflicts with standard setup.py commands
# for alias in commands.keys():
# commands['make_'+alias] = commands[alias]
# del commands[alias]
return commands
def execute_makefile_commands(commands, alias, verbose=False):
cmds = commands[alias]
for cmd in cmds:
# Parse string in a shell-like fashion
# (incl quoted strings and comments)
parsed_cmd = shlex.split(cmd, comments=True)
# Execute command if not empty (ie, not just a comment)
if parsed_cmd:
if verbose:
print("Running command: " + cmd)
# Launch the command and wait to finish (synchronized call)
check_call(parsed_cmd,
cwd=os.path.dirname(os.path.abspath(__file__)))
# Main setup.py config #
# Get version from fdict/_version.py
__version__ = None
version_file = os.path.join(os.path.dirname(__file__), 'fdict', '_version.py')
with io_open(version_file, mode='r') as fd:
exec(fd.read())
# Executing makefile commands if specified
if sys.argv[1].lower().strip() == 'make':
# Filename of the makefile
fpath = os.path.join(os.path.dirname(__file__), 'Makefile')
# Parse the makefile, substitute the aliases and extract the commands
commands = parse_makefile_aliases(fpath)
# If no alias (only `python setup.py make`), print the list of aliases
if len(sys.argv) < 3 or sys.argv[-1] == '--help':
print("Shortcut to use commands via aliases. List of aliases:")
print('\n'.join(alias for alias in sorted(commands.keys())))
# Else process the commands for this alias
else:
arg = sys.argv[-1]
# if unit testing, we do nothing (we just checked the makefile parsing)
if arg == 'none':
sys.exit(0)
# else if the alias exists, we execute its commands
elif arg in commands.keys():
execute_makefile_commands(commands, arg, verbose=True)
# else the alias cannot be found
else:
raise Exception("Provided alias cannot be found: make " + arg)
# Stop the processing of setup.py here:
# It's important to avoid setup.py raising an error because of the command
# not being standard
sys.exit(0)
# Python package config #
README_rst = ''
fndoc = os.path.join(os.path.dirname(__file__), 'README.rst')
with io_open(fndoc, mode='r', encoding='utf-8') as fd:
README_rst = fd.read()
setup(
name='fdict',
version=__version__,
description='Easy out-of-core computing of recursive dict',
license='MIT Licence',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/LRQ3000/fdict',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
platforms=['any'],
packages=['fdict'],
long_description=README_rst,
classifiers=[
# Trove classifiers
# (https://pypi.python.org/pypi?%3Aaction=list_classifiers)
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Framework :: IPython',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: SunOS/Solaris',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Intended Audience :: Developers',
],
keywords='dict file disk out-of-core bigdata',
test_suite='nose.collector',
tests_require=['nose', 'coverage'],
) | en | 0.794776 | #!/usr/bin/env python # -*- coding: utf-8 -*- # For Makefile parsing # pragma: no cover # pragma: no cover # Makefile auxiliary functions # Parse a makefile to find commands and substitute variables. Expects a makefile with only aliases and a line return between each command. Returns a dict, with a list of commands for each alias. # -- Parsing the Makefile using ConfigParser # Adding a fake section to make the Makefile a valid Ini file # Parse using ConfigParser # Fetch the list of aliases # -- Extracting commands for each alias # strip the first line return, and then split by any line return # -- Commands substitution # Loop until all aliases are substituted by their commands: # Check each command of each alias, and if there is one command that is to # be substituted by an alias, try to do it right away. If this is not # possible because this alias itself points to other aliases , then stop # and put the current alias back in the queue to be processed again later. # Create the queue of aliases to process # Create the dict that will hold the full commands # Loop until we have processed all aliases # Pick the first alias in the queue # Create a new entry in the resulting dict # For each command of this alias # Ignore self-referencing (alias points to itself) # Substitute full command # Append all the commands referenced by the alias # Delay substituting another alias, waiting for the other alias to # be substituted first # Delete the current entry to avoid other aliases # to reference this one wrongly (as it is empty) # Full command (no aliases) # -- Prepending prefix to avoid conflicts with standard setup.py commands # for alias in commands.keys(): # commands['make_'+alias] = commands[alias] # del commands[alias] # Parse string in a shell-like fashion # (incl quoted strings and comments) # Execute command if not empty (ie, not just a comment) # Launch the command and wait to finish (synchronized call) # Main setup.py config # # Get version from fdict/_version.py # Executing makefile commands if specified # Filename of the makefile # Parse the makefile, substitute the aliases and extract the commands # If no alias (only `python setup.py make`), print the list of aliases # Else process the commands for this alias # if unit testing, we do nothing (we just checked the makefile parsing) # else if the alias exists, we execute its commands # else the alias cannot be found # Stop the processing of setup.py here: # It's important to avoid setup.py raising an error because of the command # not being standard # Python package config # # Trove classifiers # (https://pypi.python.org/pypi?%3Aaction=list_classifiers) | 2.188389 | 2 |
tools/wraptypes/preprocessor.py | regular/pyglet-avbin-optimizations | 2 | 6624419 | #!/usr/bin/env python
'''Preprocess a C source file.
Limitations:
* Whitespace is not preserved.
* # and ## operators not handled.
Reference is C99:
* http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
* Also understands Objective-C #import directive
* Also understands GNU #include_next
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import operator
import os.path
import cPickle
import re
import sys
import lex
from lex import TOKEN
import yacc
tokens = (
'HEADER_NAME', 'IDENTIFIER', 'PP_NUMBER', 'CHARACTER_CONSTANT',
'STRING_LITERAL', 'OTHER',
'PTR_OP', 'INC_OP', 'DEC_OP', 'LEFT_OP', 'RIGHT_OP', 'LE_OP', 'GE_OP',
'EQ_OP', 'NE_OP', 'AND_OP', 'OR_OP', 'MUL_ASSIGN', 'DIV_ASSIGN',
'MOD_ASSIGN', 'ADD_ASSIGN', 'SUB_ASSIGN', 'LEFT_ASSIGN', 'RIGHT_ASSIGN',
'AND_ASSIGN', 'XOR_ASSIGN', 'OR_ASSIGN', 'HASH_HASH', 'PERIOD',
'ELLIPSIS',
'IF', 'IFDEF', 'IFNDEF', 'ELIF', 'ELSE', 'ENDIF', 'INCLUDE',
'INCLUDE_NEXT', 'DEFINE', 'UNDEF', 'LINE', 'ERROR', 'PRAGMA', 'DEFINED',
'IMPORT',
'NEWLINE', 'LPAREN'
)
subs = {
'D': '[0-9]',
'L': '[a-zA-Z_]',
'H': '[a-fA-F0-9]',
'E': '[Ee][+-]?{D}+',
'FS': '[FflL]',
'IS': '[uUlL]*',
}
# Helper: substitute {foo} with subs[foo] in string (makes regexes more lexy)
sub_pattern = re.compile('{([^}]*)}')
def sub_repl_match(m):
return subs[m.groups()[0]]
def sub(s):
return sub_pattern.sub(sub_repl_match, s)
CHARACTER_CONSTANT = sub(r"L?'(\\.|[^\\'])+'")
STRING_LITERAL = sub(r'L?"(\\.|[^\\"])*"')
IDENTIFIER = sub('{L}({L}|{D})*')
# --------------------------------------------------------------------------
# Token value types
# --------------------------------------------------------------------------
# Numbers represented as int and float types.
# For all other tokens, type is just str representation.
class StringLiteral(str):
def __new__(cls, value):
assert value[0] == '"' and value[-1] == '"'
# Unescaping probably not perfect but close enough.
value = value[1:-1].decode('string_escape')
return str.__new__(cls, value)
class SystemHeaderName(str):
def __new__(cls, value):
assert value[0] == '<' and value[-1] == '>'
return str.__new__(cls, value[1:-1])
def __repr__(self):
return '<%s>' % (str(self))
# --------------------------------------------------------------------------
# Token declarations
# --------------------------------------------------------------------------
punctuators = {
# value: (regex, type)
r'...': (r'\.\.\.', 'ELLIPSIS'),
r'>>=': (r'>>=', 'RIGHT_ASSIGN'),
r'<<=': (r'<<=', 'LEFT_ASSIGN'),
r'+=': (r'\+=', 'ADD_ASSIGN'),
r'-=': (r'-=', 'SUB_ASSIGN'),
r'*=': (r'\*=', 'MUL_ASSIGN'),
r'/=': (r'/=', 'DIV_ASSIGN'),
r'%=': (r'%=', 'MOD_ASSIGN'),
r'&=': (r'&=', 'AND_ASSIGN'),
r'^=': (r'\^=', 'XOR_ASSIGN'),
r'|=': (r'\|=', 'OR_ASSIGN'),
r'>>': (r'>>', 'RIGHT_OP'),
r'<<': (r'<<', 'LEFT_OP'),
r'++': (r'\+\+', 'INC_OP'),
r'--': (r'--', 'DEC_OP'),
r'->': (r'->', 'PTR_OP'),
r'&&': (r'&&', 'AND_OP'),
r'||': (r'\|\|', 'OR_OP'),
r'<=': (r'<=', 'LE_OP'),
r'>=': (r'>=', 'GE_OP'),
r'==': (r'==', 'EQ_OP'),
r'!=': (r'!=', 'NE_OP'),
r'<:': (r'<:', '['),
r':>': (r':>', ']'),
r'<%': (r'<%', '{'),
r'%>': (r'%>', '}'),
r'%:%:': (r'%:%:', 'HASH_HASH'),
r';': (r';', ';'),
r'{': (r'{', '{'),
r'}': (r'}', '}'),
r',': (r',', ','),
r':': (r':', ':'),
r'=': (r'=', '='),
r')': (r'\)', ')'),
r'[': (r'\[', '['),
r']': (r']', ']'),
r'.': (r'\.', 'PERIOD'),
r'&': (r'&', '&'),
r'!': (r'!', '!'),
r'~': (r'~', '~'),
r'-': (r'-', '-'),
r'+': (r'\+', '+'),
r'*': (r'\*', '*'),
r'/': (r'/', '/'),
r'%': (r'%', '%'),
r'<': (r'<', '<'),
r'>': (r'>', '>'),
r'^': (r'\^', '^'),
r'|': (r'\|', '|'),
r'?': (r'\?', '?'),
r'#': (r'\#', '#'),
}
def punctuator_regex(punctuators):
punctuator_regexes = [v[0] for v in punctuators.values()]
punctuator_regexes.sort(lambda a, b: -cmp(len(a), len(b)))
return '(%s)' % '|'.join(punctuator_regexes)
def t_clinecomment(t):
r'//[^\n]*'
t.lexer.lineno += 1
def t_cr(t):
r'\r'
# Skip over CR characters. Only necessary on urlopen'd files.
# C /* comments */. Copied from the ylex.py example in PLY: it's not 100%
# correct for ANSI C, but close enough for anything that's not crazy.
def t_ccomment(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
def t_header_name(t):
r'<([\/]?[^\/\*\n>])*[\/]?>(?=[ \t\f\v\r\n])'
# Should allow any character from charset, but that wreaks havok (skips
# comment delimiter, for instance), so also don't permit '*' or '//'
# The non-matching group at the end prevents false-positives with
# operators like '>='.
# In the event of a false positive (e.g. "if (a < b || c > d)"), the
# token will be split and rescanned if it appears in a text production;
# see PreprocessorParser.write.
# Is also r'"[^\n"]"', but handled in STRING_LITERAL instead.
t.type = 'HEADER_NAME'
t.value = SystemHeaderName(t.value)
return t
def t_directive(t):
r'\#[ \t]*(ifdef|ifndef|if|elif|else|endif|define|undef|include_next|include|import|line|error|pragma)'
if t.lexer.lasttoken in ('NEWLINE', None):
t.type = t.value[1:].lstrip().upper()
else:
# TODO
t.type = '#'
t.lexer.nexttoken = ('IDENTIFIER', t.value[1:].lstrip())
return t
@TOKEN(punctuator_regex(punctuators))
def t_punctuator(t):
t.type = punctuators[t.value][1]
return t
@TOKEN(IDENTIFIER)
def t_identifier(t):
if t.value == 'defined':
t.type = 'DEFINED'
else:
t.type = 'IDENTIFIER'
return t
# missing: universal-character-constant
@TOKEN(sub(r'({D}|\.{D})({D}|{L}|e[+-]|E[+-]|p[+-]|P[+-]|\.)*'))
def t_pp_number(t):
t.type = 'PP_NUMBER'
return t
@TOKEN(CHARACTER_CONSTANT)
def t_character_constant(t):
t.type = 'CHARACTER_CONSTANT'
return t
@TOKEN(STRING_LITERAL)
def t_string_literal(t):
t.type = 'STRING_LITERAL'
t.value = StringLiteral(t.value)
return t
def t_lparen(t):
r'\('
if t.lexpos == 0 or t.lexer.lexdata[t.lexpos-1] not in (' \t\f\v\n'):
t.type = 'LPAREN'
else:
t.type = '('
return t
def t_continuation(t):
r'\\\n'
t.lexer.lineno += 1
return None
def t_newline(t):
r'\n'
t.lexer.lineno += 1
t.type = 'NEWLINE'
return t
def t_error(t):
t.type = 'OTHER'
return t
t_ignore = ' \t\v\f'
# --------------------------------------------------------------------------
# Expression Object Model
# --------------------------------------------------------------------------
class EvaluationContext(object):
'''Interface for evaluating expression nodes.
'''
def is_defined(self, identifier):
return False
class ExpressionNode(object):
def evaluate(self, context):
return 0
def __str__(self):
return ''
class ConstantExpressionNode(ExpressionNode):
def __init__(self, value):
self.value = value
def evaluate(self, context):
return self.value
def __str__(self):
return str(self.value)
class UnaryExpressionNode(ExpressionNode):
def __init__(self, op, op_str, child):
self.op = op
self.op_str = op_str
self.child = child
def evaluate(self, context):
return self.op(self.child.evaluate(context))
def __str__(self):
return '(%s %s)' % (self.op_str, self.child)
class BinaryExpressionNode(ExpressionNode):
def __init__(self, op, op_str, left, right):
self.op = op
self.op_str = op_str
self.left = left
self.right = right
def evaluate(self, context):
return self.op(self.left.evaluate(context),
self.right.evaluate(context))
def __str__(self):
return '(%s %s %s)' % (self.left, self.op_str, self.right)
class LogicalAndExpressionNode(ExpressionNode):
def __init__(self, left, right):
self.left = left
self.right = right
def evaluate(self, context):
return self.left.evaluate(context) and self.right.evaluate(context)
def __str__(self):
return '(%s && %s)' % (self.left, self.right)
class LogicalOrExpressionNode(ExpressionNode):
def __init__(self, left, right):
self.left = left
self.right = right
def evaluate(self, context):
return self.left.evaluate(context) or self.right.evaluate(context)
def __str__(self):
return '(%s || %s)' % (self.left, self.right)
class ConditionalExpressionNode(ExpressionNode):
def __init__(self, condition, left, right):
self.condition = condition
self.left = left
self.right = right
def evaluate(self, context):
if self.condition.evaluate(context):
return self.left.evaluate(context)
else:
return self.right.evaluate(context)
def __str__(self):
return '(%s ? %s : %s)' % (self.condition, self.left, self.right)
# --------------------------------------------------------------------------
# Lexers
# --------------------------------------------------------------------------
class PreprocessorLexer(lex.Lexer):
def __init__(self):
lex.Lexer.__init__(self)
self.filename = '<input>'
def input(self, data, filename=None):
if filename:
self.filename = filename
self.lasttoken = None
self.input_stack = []
lex.Lexer.input(self, data)
def push_input(self, data, filename):
self.input_stack.append(
(self.lexdata, self.lexpos, self.filename, self.lineno))
self.lexdata = data
self.lexpos = 0
self.lineno = 1
self.filename = filename
self.lexlen = len(self.lexdata)
def pop_input(self):
self.lexdata, self.lexpos, self.filename, self.lineno = \
self.input_stack.pop()
self.lexlen = len(self.lexdata)
def token(self):
result = lex.Lexer.token(self)
while result is None and self.input_stack:
self.pop_input()
result = lex.Lexer.token(self)
if result:
self.lasttoken = result.type
result.filename = self.filename
else:
self.lasttoken = None
return result
class TokenListLexer(object):
def __init__(self, tokens):
self.tokens = tokens
self.pos = 0
def token(self):
if self.pos < len(self.tokens):
t = self.tokens[self.pos]
self.pos += 1
return t
else:
return None
def symbol_to_token(sym):
if isinstance(sym, yacc.YaccSymbol):
return sym.value
elif isinstance(sym, lex.LexToken):
return sym
else:
assert False, 'Not a symbol: %r' % sym
def create_token(type, value, production=None):
'''Create a token of type and value, at the position where 'production'
was reduced. Don't specify production if the token is built-in'''
t = lex.LexToken()
t.type = type
t.value = value
t.lexpos = -1
if production:
t.lineno = production.slice[1].lineno
t.filename = production.slice[1].filename
else:
t.lineno = -1
t.filename = '<builtin>'
return t
# --------------------------------------------------------------------------
# Grammars
# --------------------------------------------------------------------------
class Grammar(object):
prototype = None
name = 'grammar'
@classmethod
def get_prototype(cls):
if not cls.prototype:
instance = cls()
tabmodule = '%stab' % cls.name
cls.prototype = yacc.yacc(module=instance, tabmodule=tabmodule)
return cls.prototype
class PreprocessorGrammar(Grammar):
tokens = tokens
name = 'pp'
def p_preprocessing_file(self, p):
'''preprocessing_file : group_opt
'''
def p_group_opt(self, p):
'''group_opt : group
|
'''
def p_group(self, p):
'''group : group_part
| group group_part
'''
def p_group_part(self, p):
'''group_part : if_section
| control_line
| text_line
'''
def p_if_section(self, p):
'''if_section : if_group elif_groups_opt else_group_opt endif_line
'''
def p_if_group(self, p):
'''if_group : if_line group_opt
'''
def p_if_line(self, p):
'''if_line : IF replaced_constant_expression NEWLINE
| IFDEF IDENTIFIER NEWLINE
| IFNDEF IDENTIFIER NEWLINE
'''
if p.parser.enable_declaratives():
type = p.slice[1].type
if type == 'IF':
if p[2]:
result = p[2].evaluate(p.parser.namespace)
else:
# error
result = False
elif type == 'IFDEF':
result = p.parser.namespace.is_defined(p[2])
elif type == 'IFNDEF':
result = not p.parser.namespace.is_defined(p[2])
p.parser.write((create_token('PP_IFNDEF', p[2], p),))
else:
result = False
p.parser.condition_if(result)
def p_elif_groups_opt(self, p):
'''elif_groups_opt : elif_groups
|
'''
def p_elif_groups(self, p):
'''elif_groups : elif_group
| elif_groups elif_group
'''
def p_elif_group(self, p):
'''elif_group : elif_line group_opt
'''
def p_elif_line(self, p):
'''elif_line : ELIF replaced_elif_constant_expression NEWLINE
'''
result = p[2].evaluate(p.parser.namespace)
p.parser.condition_elif(result)
def p_else_group_opt(self, p):
'''else_group_opt : else_group
|
'''
def p_else_group(self, p):
'''else_group : else_line group_opt
'''
def p_else_line(self, p):
'''else_line : ELSE NEWLINE
'''
p.parser.condition_else()
def p_endif_line(self, p):
'''endif_line : ENDIF pp_tokens_opt NEWLINE
'''
# pp_tokens needed (ignored) here for Apple.
p.parser.condition_endif()
def p_control_line(self, p):
'''control_line : include_line NEWLINE
| define_object
| define_function
| undef_line
| LINE pp_tokens NEWLINE
| error_line
| PRAGMA pp_tokens_opt NEWLINE
'''
def p_include_line(self, p):
'''include_line : INCLUDE pp_tokens
| INCLUDE_NEXT pp_tokens
| IMPORT pp_tokens
'''
if p.parser.enable_declaratives():
tokens = p[2]
tokens = p.parser.namespace.apply_macros(tokens)
if len(tokens) > 0:
if p.slice[1].type == 'INCLUDE':
if tokens[0].type == 'STRING_LITERAL':
p.parser.include(tokens[0].value)
return
elif tokens[0].type == 'HEADER_NAME':
p.parser.include_system(tokens[0].value)
return
elif p.slice[1].type == 'INCLUDE_NEXT':
p.parser.include_next(tokens[0].value, p.slice[1].filename)
return
else:
if tokens[0].type == 'STRING_LITERAL':
p.parser.import_(tokens[0].value)
return
elif tokens[0].type == 'HEADER_NAME':
p.parser.import_system(tokens[0].value)
return
# TODO
print >> sys.stderr, 'Invalid #include'
def p_define_object(self, p):
'''define_object : DEFINE IDENTIFIER replacement_list NEWLINE
'''
if p.parser.enable_declaratives():
p.parser.namespace.define_object(p[2], p[3])
# Try to parse replacement list as an expression
tokens = p.parser.namespace.apply_macros(p[3])
lexer = TokenListLexer(tokens)
expr_parser = StrictConstantExpressionParser(lexer,
p.parser.namespace)
value = expr_parser.parse(debug=False)
if value is not None:
value = value.evaluate(p.parser.namespace)
p.parser.write(
(create_token('PP_DEFINE_CONSTANT', (p[2], value), p),))
else:
# Didn't parse, pass on as string
value = ' '.join([str(t.value) for t in p[3]])
p.parser.write((create_token('PP_DEFINE', (p[2], value), p),))
def p_define_function(self, p):
'''define_function : DEFINE IDENTIFIER LPAREN define_function_params ')' pp_tokens_opt NEWLINE
'''
if p.parser.enable_declaratives():
p.parser.namespace.define_function(p[2], p[4], p[6])
def p_define_function_params(self, p):
'''define_function_params : identifier_list_opt
| ELLIPSIS
| identifier_list ',' ELLIPSIS
'''
if len(p) == 2:
if p[1] == 'ELLIPSIS':
p[0] = ('...',)
else:
p[0] = p[1]
else:
p[0] = p[1] + ('...',)
def p_undef_line(self, p):
'''undef_line : UNDEF IDENTIFIER NEWLINE
'''
if p.parser.enable_declaratives():
p.parser.namespace.undef(p[2])
def p_error_line(self, p):
'''error_line : ERROR pp_tokens_opt NEWLINE
'''
if p.parser.enable_declaratives():
p.parser.error(' '.join([t.value for t in p[2]]),
p.slice[1].filename, p.slice[1].lineno)
def p_text_line(self, p):
'''text_line : pp_tokens_opt NEWLINE
'''
if p.parser.enable_declaratives():
tokens = p[1]
tokens = p.parser.namespace.apply_macros(tokens)
p.parser.write(tokens)
def p_replacement_list(self, p):
'''replacement_list :
| preprocessing_token_no_lparen
| preprocessing_token_no_lparen pp_tokens
'''
if len(p) == 3:
p[0] = (p[1],) + p[2]
elif len(p) == 2:
p[0] = (p[1],)
else:
p[0] = ()
def p_identifier_list_opt(self, p):
'''identifier_list_opt : identifier_list
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ()
def p_identifier_list(self, p):
'''identifier_list : IDENTIFIER
| identifier_list ',' IDENTIFIER
'''
if len(p) > 2:
p[0] = p[1] + (p[3],)
else:
p[0] = (p[1],)
def p_replaced_constant_expression(self, p):
'''replaced_constant_expression : pp_tokens'''
if p.parser.enable_conditionals():
tokens = p[1]
tokens = p.parser.namespace.apply_macros(tokens)
lexer = TokenListLexer(tokens)
parser = ConstantExpressionParser(lexer, p.parser.namespace)
p[0] = parser.parse(debug=True)
else:
p[0] = ConstantExpressionNode(0)
def p_replaced_elif_constant_expression(self, p):
'''replaced_elif_constant_expression : pp_tokens'''
if p.parser.enable_elif_conditionals():
tokens = p[1]
tokens = p.parser.namespace.apply_macros(tokens)
lexer = TokenListLexer(tokens)
parser = ConstantExpressionParser(lexer, p.parser.namespace)
p[0] = parser.parse(debug=True)
else:
p[0] = ConstantExpressionNode(0)
def p_pp_tokens_opt(self, p):
'''pp_tokens_opt : pp_tokens
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ()
def p_pp_tokens(self, p):
'''pp_tokens : preprocessing_token
| pp_tokens preprocessing_token
'''
if len(p) == 2:
p[0] = (p[1],)
else:
p[0] = p[1] + (p[2],)
def p_preprocessing_token_no_lparen(self, p):
'''preprocessing_token_no_lparen : HEADER_NAME
| IDENTIFIER
| PP_NUMBER
| CHARACTER_CONSTANT
| STRING_LITERAL
| punctuator
| DEFINED
| OTHER
'''
p[0] = symbol_to_token(p.slice[1])
def p_preprocessing_token(self, p):
'''preprocessing_token : preprocessing_token_no_lparen
| LPAREN
'''
p[0] = symbol_to_token(p.slice[1])
def p_punctuator(self, p):
'''punctuator : ELLIPSIS
| RIGHT_ASSIGN
| LEFT_ASSIGN
| ADD_ASSIGN
| SUB_ASSIGN
| MUL_ASSIGN
| DIV_ASSIGN
| MOD_ASSIGN
| AND_ASSIGN
| XOR_ASSIGN
| OR_ASSIGN
| RIGHT_OP
| LEFT_OP
| INC_OP
| DEC_OP
| PTR_OP
| AND_OP
| OR_OP
| LE_OP
| GE_OP
| EQ_OP
| NE_OP
| HASH_HASH
| ';'
| '{'
| '}'
| ','
| ':'
| '='
| '('
| ')'
| '['
| ']'
| PERIOD
| '&'
| '!'
| '~'
| '-'
| '+'
| '*'
| '/'
| '%'
| '<'
| '>'
| '^'
| '|'
| '?'
| '#'
'''
p[0] = symbol_to_token(p.slice[1])
def p_error(self, t):
if not t:
# Crap, no way to get to Parser instance. FIXME TODO
print >> sys.stderr, 'Syntax error at end of file.'
else:
# TODO
print >> sys.stderr, '%s:%d Syntax error at %r' % \
(t.lexer.filename, t.lexer.lineno, t.value)
#t.lexer.cparser.handle_error('Syntax error at %r' % t.value,
# t.lexer.filename, t.lexer.lineno)
# Don't alter lexer: default behaviour is to pass error production
# up until it hits the catch-all at declaration, at which point
# parsing continues (synchronisation).
class ConstantExpressionParseException(Exception):
pass
class ConstantExpressionGrammar(Grammar):
name = 'expr'
tokens = tokens
def p_constant_expression(self, p):
'''constant_expression : conditional_expression
'''
p[0] = p[1]
p.parser.result = p[0]
def p_constant(self, p):
'''constant : PP_NUMBER
'''
value = p[1].rstrip('LlUu')
try:
if value[:2] == '0x':
value = int(value[2:], 16)
elif value[0] == '0':
value = int(value, 8)
else:
value = int(value)
except ValueError:
value = value.rstrip('eEfF')
try:
value = float(value)
except ValueError:
value = 0
p[0] = ConstantExpressionNode(value)
def p_identifier(self, p):
'''identifier : IDENTIFIER
'''
p[0] = ConstantExpressionNode(0)
def p_primary_expression(self, p):
'''primary_expression : constant
| identifier
| '(' expression ')'
| LPAREN expression ')'
'''
if p[1] == '(':
p[0] = p[2]
else:
p[0] = p[1]
def p_postfix_expression(self, p):
'''postfix_expression : primary_expression
'''
p[0] = p[1]
def p_unary_expression(self, p):
'''unary_expression : postfix_expression
| unary_operator cast_expression
'''
if len(p) == 2:
p[0] = p[1]
elif type(p[1]) == tuple:
# unary_operator reduces to (op, op_str)
p[0] = UnaryExpressionNode(p[1][0], p[1][1], p[2])
else:
# TODO
p[0] = None
def p_unary_operator(self, p):
'''unary_operator : '+'
| '-'
| '~'
| '!'
'''
# reduces to (op, op_str)
p[0] = ({
'+': operator.pos,
'-': operator.neg,
'~': operator.inv,
'!': operator.not_}[p[1]], p[1])
def p_cast_expression(self, p):
'''cast_expression : unary_expression
'''
p[0] = p[len(p) - 1]
def p_multiplicative_expression(self, p):
'''multiplicative_expression : cast_expression
| multiplicative_expression '*' cast_expression
| multiplicative_expression '/' cast_expression
| multiplicative_expression '%' cast_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'*': operator.mul,
'/': operator.div,
'%': operator.mod}[p[2]], p[2], p[1], p[3])
def p_additive_expression(self, p):
'''additive_expression : multiplicative_expression
| additive_expression '+' multiplicative_expression
| additive_expression '-' multiplicative_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'+': operator.add,
'-': operator.sub}[p[2]], p[2], p[1], p[3])
def p_shift_expression(self, p):
'''shift_expression : additive_expression
| shift_expression LEFT_OP additive_expression
| shift_expression RIGHT_OP additive_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'<<': operator.lshift,
'>>': operator.rshift}[p[2]], p[2], p[1], p[3])
def p_relational_expression(self, p):
'''relational_expression : shift_expression
| relational_expression '<' shift_expression
| relational_expression '>' shift_expression
| relational_expression LE_OP shift_expression
| relational_expression GE_OP shift_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge}[p[2]], p[2], p[1], p[3])
def p_equality_expression(self, p):
'''equality_expression : relational_expression
| equality_expression EQ_OP relational_expression
| equality_expression NE_OP relational_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'==': operator.eq,
'!=': operator.ne}[p[2]], p[2], p[1], p[3])
def p_and_expression(self, p):
'''and_expression : equality_expression
| and_expression '&' equality_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode(operator.and_, '&', p[1], p[3])
def p_exclusive_or_expression(self, p):
'''exclusive_or_expression : and_expression
| exclusive_or_expression '^' and_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode(operator.xor, '^', p[1], p[3])
def p_inclusive_or_expression(self, p):
'''inclusive_or_expression : exclusive_or_expression
| inclusive_or_expression '|' exclusive_or_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode(operator.or_, '|', p[1], p[3])
def p_logical_and_expression(self, p):
'''logical_and_expression : inclusive_or_expression
| logical_and_expression AND_OP inclusive_or_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = LogicalAndExpressionNode(p[1], p[3])
def p_logical_or_expression(self, p):
'''logical_or_expression : logical_and_expression
| logical_or_expression OR_OP logical_and_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = LogicalOrExpressionNode(p[1], p[3])
def p_conditional_expression(self, p):
'''conditional_expression : logical_or_expression
| logical_or_expression '?' expression ':' conditional_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ConditionalExpressionNode(p[1], p[3], p[5])
def p_assignment_expression(self, p):
'''assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
'''
# TODO assignment
if len(p) == 2:
p[0] = p[1]
def p_assignment_operator(self, p):
'''assignment_operator : '='
| MUL_ASSIGN
| DIV_ASSIGN
| MOD_ASSIGN
| ADD_ASSIGN
| SUB_ASSIGN
| LEFT_ASSIGN
| RIGHT_ASSIGN
| AND_ASSIGN
| XOR_ASSIGN
| OR_ASSIGN
'''
def p_expression(self, p):
'''expression : assignment_expression
| expression ',' assignment_expression
'''
# TODO sequence
if len(p) == 2:
p[0] = p[1]
def p_error(self, t):
raise ConstantExpressionParseException()
class StrictConstantExpressionGrammar(ConstantExpressionGrammar):
name = 'strict_expr'
tokens = tokens
def p_identifier(self, p):
'''identifier : IDENTIFIER
'''
raise ConstantExpressionParseException()
class ExecutionState(object):
def __init__(self, parent_enabled, enabled):
self.enabled = parent_enabled and enabled
self.context_enabled = enabled
self.parent_enabled = parent_enabled
def enable(self, result):
if result:
self.enabled = self.parent_enabled and not self.context_enabled
self.context_enabled = True
else:
self.enabled = False
class PreprocessorParser(yacc.Parser):
def __init__(self, gcc_search_path=True):
yacc.Parser.__init__(self)
self.lexer = lex.lex(cls=PreprocessorLexer)
PreprocessorGrammar.get_prototype().init_parser(self)
# Map system header name to data, overrides path search and open()
self.system_headers = {}
self.include_path = ['/usr/local/include', '/usr/include']
if sys.platform == 'darwin':
self.framework_path = ['/System/Library/Frameworks',
'/Library/Frameworks']
else:
self.framework_path = []
if gcc_search_path:
self.add_gcc_search_path()
self.lexer.filename = ''
self.defines = {}
self.namespace = PreprocessorNamespace()
def define(self, name, value):
self.defines[name] = value
def add_gcc_search_path(self):
from subprocess import Popen, PIPE
path = Popen('gcc -print-file-name=include',
shell=True, stdout=PIPE).communicate()[0].strip()
if path:
self.include_path.append(path)
def parse(self, filename=None, data=None, namespace=None, debug=False):
self.output = []
if not namespace:
namespace = self.namespace
for name, value in self.defines.items():
namespace.define_object(name, (create_token('IDENTIFIER', value),))
self.namespace = namespace
self.imported_headers = set()
self.condition_stack = [ExecutionState(True, True)]
if filename:
if not data:
data = open(filename, 'r').read()
self.lexer.input(data, filename)
elif data:
self.lexer.input(data, '<input>')
return yacc.Parser.parse(self, debug=debug)
def push_file(self, filename, data=None):
print >> sys.stderr, filename
if not data:
data = open(filename).read()
self.lexer.push_input(data, filename)
def include(self, header):
path = self.get_header_path(header)
if path:
self.push_file(path)
else:
print >> sys.stderr, '"%s" not found' % header # TODO
def include_system(self, header):
if header in self.system_headers:
self.push_file(header, self.system_headers[header])
return
path = self.get_system_header_path(header)
if path:
self.push_file(path)
else:
print >> sys.stderr, '"%s" not found' % header # TODO
def include_next(self, header, reference):
# XXX doesn't go via get_system_header
next = False
for path in self.include_path:
p = os.path.join(path, header)
if os.path.exists(p):
if next:
self.push_file(p)
return
elif p == reference:
next = True
print >> sys.stderr, '%s: cannot include_next from %s' % \
(header, reference) # TODO
def import_(self, header):
path = self.get_header_path(header)
if path:
if path not in self.imported_headers:
self.imported_headers.add(path)
self.push_file(path)
else:
print >> sys.stderr, '"%s" not found' % header # TODO
def import_system(self, header):
if header in self.system_headers:
if path not in self.imported_headers:
self.imported_headers.add(path)
self.push_file(header, self.system_headers[header])
return
path = self.get_system_header_path(header)
if path:
if path not in self.imported_headers:
self.imported_headers.add(path)
self.push_file(path)
else:
print >> sys.stderr, '"%s" not found' % header # TODO
def get_header_path(self, header):
p = os.path.join(os.path.dirname(self.lexer.filename), header)
if os.path.exists(p):
self.push_file(p)
return p
elif sys.platform == 'darwin':
p = self.get_framework_header_path(header)
if not p:
p = self.get_system_header_path(header)
return p
def get_system_header_path(self, header):
for path in self.include_path:
p = os.path.join(path, header)
if os.path.exists(p):
return p
if sys.platform == 'darwin':
return self.get_framework_header_path(header)
def get_framework_header_path(self, header):
if '/' in header:
# header is 'Framework/Framework.h' (e.g. OpenGL/OpenGL.h).
framework, header = header.split('/', 1)
paths = self.framework_path[:]
# Add ancestor frameworks of current file
localpath = ''
for parent in self.lexer.filename.split('.framework/')[:-1]:
localpath += parent + '.framework'
paths.append(os.path.join(localpath, 'Frameworks'))
for path in paths:
p = os.path.join(path, '%s.framework' % framework,
'Headers', header)
if os.path.exists(p):
return p
def error(self, message, filename, line):
print >> sys.stderr, '%s:%d #error %s' % (filename, line, message)
def condition_if(self, result):
self.condition_stack.append(
ExecutionState(self.condition_stack[-1].enabled, result))
def condition_elif(self, result):
self.condition_stack[-1].enable(result)
def condition_else(self):
self.condition_stack[-1].enable(True)
def condition_endif(self):
self.condition_stack.pop()
def enable_declaratives(self):
return self.condition_stack[-1].enabled
def enable_conditionals(self):
return self.condition_stack[-1].enabled
def enable_elif_conditionals(self):
return self.condition_stack[-1].parent_enabled and \
not self.condition_stack[-1].context_enabled
def write(self, tokens):
for t in tokens:
if t.type == 'HEADER_NAME':
# token was mis-parsed. Do it again, without the '<', '>'.
ta = create_token('<', '<')
ta.filename = t.filename
ta.lineno = t.lineno
self.output.append(ta)
l = lex.lex(cls=PreprocessorLexer)
l.input(t.value, t.filename)
l.lineno = t.lineno
tb = l.token()
while tb is not None:
if hasattr(tb, 'lexer'):
del tb.lexer
self.output.append(tb)
tb = l.token()
tc = create_token('>', '>')
tc.filename = t.filename
tc.lineno = t.lineno
self.output.append(tc)
continue
if hasattr(t, 'lexer'):
del t.lexer
self.output.append(t)
def get_memento(self):
return (set(self.namespace.objects.keys()),
set(self.namespace.functions.keys()))
class ConstantExpressionParser(yacc.Parser):
_const_grammar = ConstantExpressionGrammar
def __init__(self, lexer, namespace):
yacc.Parser.__init__(self)
self.lexer = lexer
self.namespace = namespace
self._const_grammar.get_prototype().init_parser(self)
def parse(self, debug=False):
self.result = None
try:
yacc.Parser.parse(self, lexer=self.lexer, debug=debug)
except ConstantExpressionParseException:
# XXX warning here?
pass
return self.result
class StrictConstantExpressionParser(ConstantExpressionParser):
_const_grammar = StrictConstantExpressionGrammar
class PreprocessorNamespace(EvaluationContext):
def __init__(self, gcc_macros=True,
stdc_macros=True,
workaround_macros=True):
self.objects = {}
self.functions = {}
if stdc_macros:
self.add_stdc_macros()
if gcc_macros:
self.add_gcc_macros()
if workaround_macros:
self.add_workaround_macros()
def add_stdc_macros(self):
'''Add macros defined in 6.10.8 except __FILE__ and __LINE__.
This is potentially dangerous, as this preprocessor is not ISO
compliant in many ways (the most obvious is the lack of # and ##
operators). It is required for Apple headers, however, which
otherwise assume some truly bizarre syntax is ok.
'''
import time
date = time.strftime('%b %d %Y') # XXX %d should have leading space
t = time.strftime('%H:%M:S')
self.define_object('__DATE__',
(create_token('STRING_LITERAL', date),))
self.define_object('__TIME__',
(create_token('STRING_LITERAL', t),))
self.define_object('__STDC__',
(create_token('PP_NUMBER', '1'),))
self.define_object('__STDC_HOSTED__',
(create_token('PP_NUMBER', '1'),))
self.define_object('__STDC_VERSION',
(create_token('PP_NUMBER', '199901L'),))
def add_gcc_macros(self):
import platform
import sys
gcc_macros = ('__GLIBC_HAVE_LONG_LONG', '__GNUC__',)
# Get these from `gcc -E -dD empty.c`
machine_macros = {
'x86_64': ('__amd64', '__amd64__', '__x86_64', '__x86_64__',
'__tune_k8__', '__MMX__', '__SSE__', '__SSE2__',
'__SSE_MATH__', '__k8', '__k8__'),
'Power Macintosh': ('_ARCH_PPC', '__BIG_ENDIAN__', '_BIG_ENDIAN',
'__ppc__', '__POWERPC__'),
# TODO everyone else.
}.get(platform.machine(), ())
platform_macros = {
'linux2': ('__gnu_linux__', '__linux', '__linux__', 'linux',
'__unix', '__unix__', 'unix'),
'darwin': ('__MACH__', '__APPLE__', '__DYNAMIC__', '__APPLE_CC__'),
'win32': ('_WIN32',),
# TODO everyone else
}.get(sys.platform, ())
tok1 = lex.LexToken()
tok1.type = 'PP_NUMBER'
tok1.value = '1'
tok1.lineno = -1
tok1.lexpos = -1
for macro in machine_macros + platform_macros + gcc_macros:
self.define_object(macro, (tok1,))
self.define_object('inline', ())
self.define_object('__inline', ())
self.define_object('__inline__', ())
self.define_object('__const', (create_token('IDENTIFIER', 'const'),))
def add_workaround_macros(self):
if sys.platform == 'darwin':
self.define_object('CF_INLINE', ())
def is_defined(self, name):
return name in self.objects or name in self.functions
def undef(self, name):
if name in self.objects:
del self.objects[name]
if name in self.functions:
del self.functions[name]
def define_object(self, name, replacements):
# TODO check not already existing in objects or functions
for r in replacements:
if hasattr(r, 'lexer'):
del r.lexer
self.objects[name] = replacements
def define_function(self, name, params, replacements):
# TODO check not already existing in objects or functions
for r in replacements:
if hasattr(r, 'lexer'):
del r.lexer
replacements = list(replacements)
params = list(params)
numargs = len(params)
for i, t in enumerate(replacements):
if hasattr(t, 'lexer'):
del t.lexer
if t.type == 'IDENTIFIER' and t.value in params:
replacements[i] = params.index(t.value)
elif t.type == 'IDENTIFIER' and t.value == '__VA_ARGS__' and \
'...' in params:
replacements[i] = len(params) - 1
self.functions[name] = replacements, numargs
def apply_macros(self, tokens, replacing=None):
repl = []
i = 0
while i < len(tokens):
token = tokens[i]
if token.type == 'IDENTIFIER' and token.value in self.objects:
r = self.objects[token.value]
if token.value != replacing and r:
repl += self.apply_macros(r, token.value)
elif token.type == 'IDENTIFIER' and \
token.value in self.functions and \
len(tokens) - i > 2 and \
tokens[i+1].value == '(':
r, numargs = self.functions[token.value][:]
# build params list
i += 2
params = [[]]
parens = 0 # balance parantheses within each arg
while i < len(tokens):
if tokens[i].value == ',' and parens == 0 and \
len(params) < numargs:
params.append([])
elif tokens[i].value == ')' and parens == 0:
break
else:
if tokens[i].value == '(':
parens += 1
elif tokens[i].value == ')':
parens -= 1
params[-1].append(tokens[i])
i += 1
if token.value != replacing and r:
newr = []
for t in r:
if type(t) == int:
newr += params[t]
else:
newr.append(t)
repl += self.apply_macros(newr, token.value)
elif token.type == 'DEFINED':
if len(tokens) - i > 3 and \
tokens[i + 1].type in ('(', 'LPAREN') and \
tokens[i + 2].type == 'IDENTIFIER' and \
tokens[i + 3].type == ')':
result = self.is_defined(tokens[i + 2].value)
i += 3
elif len(tokens) - i > 1 and \
tokens[i + 1].type == 'IDENTIFIER':
result = self.is_defined(tokens[i + 1].value)
i += 1
else:
# TODO
print >> sys.stderr, 'Invalid use of "defined"'
result = 0
t = lex.LexToken()
t.value = str(int(result))
t.type = 'PP_NUMBER'
t.lexpos = token.lexpos
t.lineno = token.lineno
repl.append(t)
else:
repl.append(token)
i += 1
return repl
def copy(self):
n = PreprocessorNamespace(gcc_macros=False, workaround_macros=False)
n.functions = self.functions.copy()
n.objects = self.objects.copy()
return n
if __name__ == '__main__':
filename = sys.argv[1]
parser = PreprocessorParser()
parser.parse(filename, debug=True)
print ' '.join([str(t.value) for t in parser.output])
| #!/usr/bin/env python
'''Preprocess a C source file.
Limitations:
* Whitespace is not preserved.
* # and ## operators not handled.
Reference is C99:
* http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
* Also understands Objective-C #import directive
* Also understands GNU #include_next
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import operator
import os.path
import cPickle
import re
import sys
import lex
from lex import TOKEN
import yacc
tokens = (
'HEADER_NAME', 'IDENTIFIER', 'PP_NUMBER', 'CHARACTER_CONSTANT',
'STRING_LITERAL', 'OTHER',
'PTR_OP', 'INC_OP', 'DEC_OP', 'LEFT_OP', 'RIGHT_OP', 'LE_OP', 'GE_OP',
'EQ_OP', 'NE_OP', 'AND_OP', 'OR_OP', 'MUL_ASSIGN', 'DIV_ASSIGN',
'MOD_ASSIGN', 'ADD_ASSIGN', 'SUB_ASSIGN', 'LEFT_ASSIGN', 'RIGHT_ASSIGN',
'AND_ASSIGN', 'XOR_ASSIGN', 'OR_ASSIGN', 'HASH_HASH', 'PERIOD',
'ELLIPSIS',
'IF', 'IFDEF', 'IFNDEF', 'ELIF', 'ELSE', 'ENDIF', 'INCLUDE',
'INCLUDE_NEXT', 'DEFINE', 'UNDEF', 'LINE', 'ERROR', 'PRAGMA', 'DEFINED',
'IMPORT',
'NEWLINE', 'LPAREN'
)
subs = {
'D': '[0-9]',
'L': '[a-zA-Z_]',
'H': '[a-fA-F0-9]',
'E': '[Ee][+-]?{D}+',
'FS': '[FflL]',
'IS': '[uUlL]*',
}
# Helper: substitute {foo} with subs[foo] in string (makes regexes more lexy)
sub_pattern = re.compile('{([^}]*)}')
def sub_repl_match(m):
return subs[m.groups()[0]]
def sub(s):
return sub_pattern.sub(sub_repl_match, s)
CHARACTER_CONSTANT = sub(r"L?'(\\.|[^\\'])+'")
STRING_LITERAL = sub(r'L?"(\\.|[^\\"])*"')
IDENTIFIER = sub('{L}({L}|{D})*')
# --------------------------------------------------------------------------
# Token value types
# --------------------------------------------------------------------------
# Numbers represented as int and float types.
# For all other tokens, type is just str representation.
class StringLiteral(str):
def __new__(cls, value):
assert value[0] == '"' and value[-1] == '"'
# Unescaping probably not perfect but close enough.
value = value[1:-1].decode('string_escape')
return str.__new__(cls, value)
class SystemHeaderName(str):
def __new__(cls, value):
assert value[0] == '<' and value[-1] == '>'
return str.__new__(cls, value[1:-1])
def __repr__(self):
return '<%s>' % (str(self))
# --------------------------------------------------------------------------
# Token declarations
# --------------------------------------------------------------------------
punctuators = {
# value: (regex, type)
r'...': (r'\.\.\.', 'ELLIPSIS'),
r'>>=': (r'>>=', 'RIGHT_ASSIGN'),
r'<<=': (r'<<=', 'LEFT_ASSIGN'),
r'+=': (r'\+=', 'ADD_ASSIGN'),
r'-=': (r'-=', 'SUB_ASSIGN'),
r'*=': (r'\*=', 'MUL_ASSIGN'),
r'/=': (r'/=', 'DIV_ASSIGN'),
r'%=': (r'%=', 'MOD_ASSIGN'),
r'&=': (r'&=', 'AND_ASSIGN'),
r'^=': (r'\^=', 'XOR_ASSIGN'),
r'|=': (r'\|=', 'OR_ASSIGN'),
r'>>': (r'>>', 'RIGHT_OP'),
r'<<': (r'<<', 'LEFT_OP'),
r'++': (r'\+\+', 'INC_OP'),
r'--': (r'--', 'DEC_OP'),
r'->': (r'->', 'PTR_OP'),
r'&&': (r'&&', 'AND_OP'),
r'||': (r'\|\|', 'OR_OP'),
r'<=': (r'<=', 'LE_OP'),
r'>=': (r'>=', 'GE_OP'),
r'==': (r'==', 'EQ_OP'),
r'!=': (r'!=', 'NE_OP'),
r'<:': (r'<:', '['),
r':>': (r':>', ']'),
r'<%': (r'<%', '{'),
r'%>': (r'%>', '}'),
r'%:%:': (r'%:%:', 'HASH_HASH'),
r';': (r';', ';'),
r'{': (r'{', '{'),
r'}': (r'}', '}'),
r',': (r',', ','),
r':': (r':', ':'),
r'=': (r'=', '='),
r')': (r'\)', ')'),
r'[': (r'\[', '['),
r']': (r']', ']'),
r'.': (r'\.', 'PERIOD'),
r'&': (r'&', '&'),
r'!': (r'!', '!'),
r'~': (r'~', '~'),
r'-': (r'-', '-'),
r'+': (r'\+', '+'),
r'*': (r'\*', '*'),
r'/': (r'/', '/'),
r'%': (r'%', '%'),
r'<': (r'<', '<'),
r'>': (r'>', '>'),
r'^': (r'\^', '^'),
r'|': (r'\|', '|'),
r'?': (r'\?', '?'),
r'#': (r'\#', '#'),
}
def punctuator_regex(punctuators):
punctuator_regexes = [v[0] for v in punctuators.values()]
punctuator_regexes.sort(lambda a, b: -cmp(len(a), len(b)))
return '(%s)' % '|'.join(punctuator_regexes)
def t_clinecomment(t):
r'//[^\n]*'
t.lexer.lineno += 1
def t_cr(t):
r'\r'
# Skip over CR characters. Only necessary on urlopen'd files.
# C /* comments */. Copied from the ylex.py example in PLY: it's not 100%
# correct for ANSI C, but close enough for anything that's not crazy.
def t_ccomment(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
def t_header_name(t):
r'<([\/]?[^\/\*\n>])*[\/]?>(?=[ \t\f\v\r\n])'
# Should allow any character from charset, but that wreaks havok (skips
# comment delimiter, for instance), so also don't permit '*' or '//'
# The non-matching group at the end prevents false-positives with
# operators like '>='.
# In the event of a false positive (e.g. "if (a < b || c > d)"), the
# token will be split and rescanned if it appears in a text production;
# see PreprocessorParser.write.
# Is also r'"[^\n"]"', but handled in STRING_LITERAL instead.
t.type = 'HEADER_NAME'
t.value = SystemHeaderName(t.value)
return t
def t_directive(t):
r'\#[ \t]*(ifdef|ifndef|if|elif|else|endif|define|undef|include_next|include|import|line|error|pragma)'
if t.lexer.lasttoken in ('NEWLINE', None):
t.type = t.value[1:].lstrip().upper()
else:
# TODO
t.type = '#'
t.lexer.nexttoken = ('IDENTIFIER', t.value[1:].lstrip())
return t
@TOKEN(punctuator_regex(punctuators))
def t_punctuator(t):
t.type = punctuators[t.value][1]
return t
@TOKEN(IDENTIFIER)
def t_identifier(t):
if t.value == 'defined':
t.type = 'DEFINED'
else:
t.type = 'IDENTIFIER'
return t
# missing: universal-character-constant
@TOKEN(sub(r'({D}|\.{D})({D}|{L}|e[+-]|E[+-]|p[+-]|P[+-]|\.)*'))
def t_pp_number(t):
t.type = 'PP_NUMBER'
return t
@TOKEN(CHARACTER_CONSTANT)
def t_character_constant(t):
t.type = 'CHARACTER_CONSTANT'
return t
@TOKEN(STRING_LITERAL)
def t_string_literal(t):
t.type = 'STRING_LITERAL'
t.value = StringLiteral(t.value)
return t
def t_lparen(t):
r'\('
if t.lexpos == 0 or t.lexer.lexdata[t.lexpos-1] not in (' \t\f\v\n'):
t.type = 'LPAREN'
else:
t.type = '('
return t
def t_continuation(t):
r'\\\n'
t.lexer.lineno += 1
return None
def t_newline(t):
r'\n'
t.lexer.lineno += 1
t.type = 'NEWLINE'
return t
def t_error(t):
t.type = 'OTHER'
return t
t_ignore = ' \t\v\f'
# --------------------------------------------------------------------------
# Expression Object Model
# --------------------------------------------------------------------------
class EvaluationContext(object):
'''Interface for evaluating expression nodes.
'''
def is_defined(self, identifier):
return False
class ExpressionNode(object):
def evaluate(self, context):
return 0
def __str__(self):
return ''
class ConstantExpressionNode(ExpressionNode):
def __init__(self, value):
self.value = value
def evaluate(self, context):
return self.value
def __str__(self):
return str(self.value)
class UnaryExpressionNode(ExpressionNode):
def __init__(self, op, op_str, child):
self.op = op
self.op_str = op_str
self.child = child
def evaluate(self, context):
return self.op(self.child.evaluate(context))
def __str__(self):
return '(%s %s)' % (self.op_str, self.child)
class BinaryExpressionNode(ExpressionNode):
def __init__(self, op, op_str, left, right):
self.op = op
self.op_str = op_str
self.left = left
self.right = right
def evaluate(self, context):
return self.op(self.left.evaluate(context),
self.right.evaluate(context))
def __str__(self):
return '(%s %s %s)' % (self.left, self.op_str, self.right)
class LogicalAndExpressionNode(ExpressionNode):
def __init__(self, left, right):
self.left = left
self.right = right
def evaluate(self, context):
return self.left.evaluate(context) and self.right.evaluate(context)
def __str__(self):
return '(%s && %s)' % (self.left, self.right)
class LogicalOrExpressionNode(ExpressionNode):
def __init__(self, left, right):
self.left = left
self.right = right
def evaluate(self, context):
return self.left.evaluate(context) or self.right.evaluate(context)
def __str__(self):
return '(%s || %s)' % (self.left, self.right)
class ConditionalExpressionNode(ExpressionNode):
def __init__(self, condition, left, right):
self.condition = condition
self.left = left
self.right = right
def evaluate(self, context):
if self.condition.evaluate(context):
return self.left.evaluate(context)
else:
return self.right.evaluate(context)
def __str__(self):
return '(%s ? %s : %s)' % (self.condition, self.left, self.right)
# --------------------------------------------------------------------------
# Lexers
# --------------------------------------------------------------------------
class PreprocessorLexer(lex.Lexer):
def __init__(self):
lex.Lexer.__init__(self)
self.filename = '<input>'
def input(self, data, filename=None):
if filename:
self.filename = filename
self.lasttoken = None
self.input_stack = []
lex.Lexer.input(self, data)
def push_input(self, data, filename):
self.input_stack.append(
(self.lexdata, self.lexpos, self.filename, self.lineno))
self.lexdata = data
self.lexpos = 0
self.lineno = 1
self.filename = filename
self.lexlen = len(self.lexdata)
def pop_input(self):
self.lexdata, self.lexpos, self.filename, self.lineno = \
self.input_stack.pop()
self.lexlen = len(self.lexdata)
def token(self):
result = lex.Lexer.token(self)
while result is None and self.input_stack:
self.pop_input()
result = lex.Lexer.token(self)
if result:
self.lasttoken = result.type
result.filename = self.filename
else:
self.lasttoken = None
return result
class TokenListLexer(object):
def __init__(self, tokens):
self.tokens = tokens
self.pos = 0
def token(self):
if self.pos < len(self.tokens):
t = self.tokens[self.pos]
self.pos += 1
return t
else:
return None
def symbol_to_token(sym):
if isinstance(sym, yacc.YaccSymbol):
return sym.value
elif isinstance(sym, lex.LexToken):
return sym
else:
assert False, 'Not a symbol: %r' % sym
def create_token(type, value, production=None):
'''Create a token of type and value, at the position where 'production'
was reduced. Don't specify production if the token is built-in'''
t = lex.LexToken()
t.type = type
t.value = value
t.lexpos = -1
if production:
t.lineno = production.slice[1].lineno
t.filename = production.slice[1].filename
else:
t.lineno = -1
t.filename = '<builtin>'
return t
# --------------------------------------------------------------------------
# Grammars
# --------------------------------------------------------------------------
class Grammar(object):
prototype = None
name = 'grammar'
@classmethod
def get_prototype(cls):
if not cls.prototype:
instance = cls()
tabmodule = '%stab' % cls.name
cls.prototype = yacc.yacc(module=instance, tabmodule=tabmodule)
return cls.prototype
class PreprocessorGrammar(Grammar):
tokens = tokens
name = 'pp'
def p_preprocessing_file(self, p):
'''preprocessing_file : group_opt
'''
def p_group_opt(self, p):
'''group_opt : group
|
'''
def p_group(self, p):
'''group : group_part
| group group_part
'''
def p_group_part(self, p):
'''group_part : if_section
| control_line
| text_line
'''
def p_if_section(self, p):
'''if_section : if_group elif_groups_opt else_group_opt endif_line
'''
def p_if_group(self, p):
'''if_group : if_line group_opt
'''
def p_if_line(self, p):
'''if_line : IF replaced_constant_expression NEWLINE
| IFDEF IDENTIFIER NEWLINE
| IFNDEF IDENTIFIER NEWLINE
'''
if p.parser.enable_declaratives():
type = p.slice[1].type
if type == 'IF':
if p[2]:
result = p[2].evaluate(p.parser.namespace)
else:
# error
result = False
elif type == 'IFDEF':
result = p.parser.namespace.is_defined(p[2])
elif type == 'IFNDEF':
result = not p.parser.namespace.is_defined(p[2])
p.parser.write((create_token('PP_IFNDEF', p[2], p),))
else:
result = False
p.parser.condition_if(result)
def p_elif_groups_opt(self, p):
'''elif_groups_opt : elif_groups
|
'''
def p_elif_groups(self, p):
'''elif_groups : elif_group
| elif_groups elif_group
'''
def p_elif_group(self, p):
'''elif_group : elif_line group_opt
'''
def p_elif_line(self, p):
'''elif_line : ELIF replaced_elif_constant_expression NEWLINE
'''
result = p[2].evaluate(p.parser.namespace)
p.parser.condition_elif(result)
def p_else_group_opt(self, p):
'''else_group_opt : else_group
|
'''
def p_else_group(self, p):
'''else_group : else_line group_opt
'''
def p_else_line(self, p):
'''else_line : ELSE NEWLINE
'''
p.parser.condition_else()
def p_endif_line(self, p):
'''endif_line : ENDIF pp_tokens_opt NEWLINE
'''
# pp_tokens needed (ignored) here for Apple.
p.parser.condition_endif()
def p_control_line(self, p):
'''control_line : include_line NEWLINE
| define_object
| define_function
| undef_line
| LINE pp_tokens NEWLINE
| error_line
| PRAGMA pp_tokens_opt NEWLINE
'''
def p_include_line(self, p):
'''include_line : INCLUDE pp_tokens
| INCLUDE_NEXT pp_tokens
| IMPORT pp_tokens
'''
if p.parser.enable_declaratives():
tokens = p[2]
tokens = p.parser.namespace.apply_macros(tokens)
if len(tokens) > 0:
if p.slice[1].type == 'INCLUDE':
if tokens[0].type == 'STRING_LITERAL':
p.parser.include(tokens[0].value)
return
elif tokens[0].type == 'HEADER_NAME':
p.parser.include_system(tokens[0].value)
return
elif p.slice[1].type == 'INCLUDE_NEXT':
p.parser.include_next(tokens[0].value, p.slice[1].filename)
return
else:
if tokens[0].type == 'STRING_LITERAL':
p.parser.import_(tokens[0].value)
return
elif tokens[0].type == 'HEADER_NAME':
p.parser.import_system(tokens[0].value)
return
# TODO
print >> sys.stderr, 'Invalid #include'
def p_define_object(self, p):
'''define_object : DEFINE IDENTIFIER replacement_list NEWLINE
'''
if p.parser.enable_declaratives():
p.parser.namespace.define_object(p[2], p[3])
# Try to parse replacement list as an expression
tokens = p.parser.namespace.apply_macros(p[3])
lexer = TokenListLexer(tokens)
expr_parser = StrictConstantExpressionParser(lexer,
p.parser.namespace)
value = expr_parser.parse(debug=False)
if value is not None:
value = value.evaluate(p.parser.namespace)
p.parser.write(
(create_token('PP_DEFINE_CONSTANT', (p[2], value), p),))
else:
# Didn't parse, pass on as string
value = ' '.join([str(t.value) for t in p[3]])
p.parser.write((create_token('PP_DEFINE', (p[2], value), p),))
def p_define_function(self, p):
'''define_function : DEFINE IDENTIFIER LPAREN define_function_params ')' pp_tokens_opt NEWLINE
'''
if p.parser.enable_declaratives():
p.parser.namespace.define_function(p[2], p[4], p[6])
def p_define_function_params(self, p):
'''define_function_params : identifier_list_opt
| ELLIPSIS
| identifier_list ',' ELLIPSIS
'''
if len(p) == 2:
if p[1] == 'ELLIPSIS':
p[0] = ('...',)
else:
p[0] = p[1]
else:
p[0] = p[1] + ('...',)
def p_undef_line(self, p):
'''undef_line : UNDEF IDENTIFIER NEWLINE
'''
if p.parser.enable_declaratives():
p.parser.namespace.undef(p[2])
def p_error_line(self, p):
'''error_line : ERROR pp_tokens_opt NEWLINE
'''
if p.parser.enable_declaratives():
p.parser.error(' '.join([t.value for t in p[2]]),
p.slice[1].filename, p.slice[1].lineno)
def p_text_line(self, p):
'''text_line : pp_tokens_opt NEWLINE
'''
if p.parser.enable_declaratives():
tokens = p[1]
tokens = p.parser.namespace.apply_macros(tokens)
p.parser.write(tokens)
def p_replacement_list(self, p):
'''replacement_list :
| preprocessing_token_no_lparen
| preprocessing_token_no_lparen pp_tokens
'''
if len(p) == 3:
p[0] = (p[1],) + p[2]
elif len(p) == 2:
p[0] = (p[1],)
else:
p[0] = ()
def p_identifier_list_opt(self, p):
'''identifier_list_opt : identifier_list
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ()
def p_identifier_list(self, p):
'''identifier_list : IDENTIFIER
| identifier_list ',' IDENTIFIER
'''
if len(p) > 2:
p[0] = p[1] + (p[3],)
else:
p[0] = (p[1],)
def p_replaced_constant_expression(self, p):
'''replaced_constant_expression : pp_tokens'''
if p.parser.enable_conditionals():
tokens = p[1]
tokens = p.parser.namespace.apply_macros(tokens)
lexer = TokenListLexer(tokens)
parser = ConstantExpressionParser(lexer, p.parser.namespace)
p[0] = parser.parse(debug=True)
else:
p[0] = ConstantExpressionNode(0)
def p_replaced_elif_constant_expression(self, p):
'''replaced_elif_constant_expression : pp_tokens'''
if p.parser.enable_elif_conditionals():
tokens = p[1]
tokens = p.parser.namespace.apply_macros(tokens)
lexer = TokenListLexer(tokens)
parser = ConstantExpressionParser(lexer, p.parser.namespace)
p[0] = parser.parse(debug=True)
else:
p[0] = ConstantExpressionNode(0)
def p_pp_tokens_opt(self, p):
'''pp_tokens_opt : pp_tokens
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ()
def p_pp_tokens(self, p):
'''pp_tokens : preprocessing_token
| pp_tokens preprocessing_token
'''
if len(p) == 2:
p[0] = (p[1],)
else:
p[0] = p[1] + (p[2],)
def p_preprocessing_token_no_lparen(self, p):
'''preprocessing_token_no_lparen : HEADER_NAME
| IDENTIFIER
| PP_NUMBER
| CHARACTER_CONSTANT
| STRING_LITERAL
| punctuator
| DEFINED
| OTHER
'''
p[0] = symbol_to_token(p.slice[1])
def p_preprocessing_token(self, p):
'''preprocessing_token : preprocessing_token_no_lparen
| LPAREN
'''
p[0] = symbol_to_token(p.slice[1])
def p_punctuator(self, p):
'''punctuator : ELLIPSIS
| RIGHT_ASSIGN
| LEFT_ASSIGN
| ADD_ASSIGN
| SUB_ASSIGN
| MUL_ASSIGN
| DIV_ASSIGN
| MOD_ASSIGN
| AND_ASSIGN
| XOR_ASSIGN
| OR_ASSIGN
| RIGHT_OP
| LEFT_OP
| INC_OP
| DEC_OP
| PTR_OP
| AND_OP
| OR_OP
| LE_OP
| GE_OP
| EQ_OP
| NE_OP
| HASH_HASH
| ';'
| '{'
| '}'
| ','
| ':'
| '='
| '('
| ')'
| '['
| ']'
| PERIOD
| '&'
| '!'
| '~'
| '-'
| '+'
| '*'
| '/'
| '%'
| '<'
| '>'
| '^'
| '|'
| '?'
| '#'
'''
p[0] = symbol_to_token(p.slice[1])
def p_error(self, t):
if not t:
# Crap, no way to get to Parser instance. FIXME TODO
print >> sys.stderr, 'Syntax error at end of file.'
else:
# TODO
print >> sys.stderr, '%s:%d Syntax error at %r' % \
(t.lexer.filename, t.lexer.lineno, t.value)
#t.lexer.cparser.handle_error('Syntax error at %r' % t.value,
# t.lexer.filename, t.lexer.lineno)
# Don't alter lexer: default behaviour is to pass error production
# up until it hits the catch-all at declaration, at which point
# parsing continues (synchronisation).
class ConstantExpressionParseException(Exception):
pass
class ConstantExpressionGrammar(Grammar):
name = 'expr'
tokens = tokens
def p_constant_expression(self, p):
'''constant_expression : conditional_expression
'''
p[0] = p[1]
p.parser.result = p[0]
def p_constant(self, p):
'''constant : PP_NUMBER
'''
value = p[1].rstrip('LlUu')
try:
if value[:2] == '0x':
value = int(value[2:], 16)
elif value[0] == '0':
value = int(value, 8)
else:
value = int(value)
except ValueError:
value = value.rstrip('eEfF')
try:
value = float(value)
except ValueError:
value = 0
p[0] = ConstantExpressionNode(value)
def p_identifier(self, p):
'''identifier : IDENTIFIER
'''
p[0] = ConstantExpressionNode(0)
def p_primary_expression(self, p):
'''primary_expression : constant
| identifier
| '(' expression ')'
| LPAREN expression ')'
'''
if p[1] == '(':
p[0] = p[2]
else:
p[0] = p[1]
def p_postfix_expression(self, p):
'''postfix_expression : primary_expression
'''
p[0] = p[1]
def p_unary_expression(self, p):
'''unary_expression : postfix_expression
| unary_operator cast_expression
'''
if len(p) == 2:
p[0] = p[1]
elif type(p[1]) == tuple:
# unary_operator reduces to (op, op_str)
p[0] = UnaryExpressionNode(p[1][0], p[1][1], p[2])
else:
# TODO
p[0] = None
def p_unary_operator(self, p):
'''unary_operator : '+'
| '-'
| '~'
| '!'
'''
# reduces to (op, op_str)
p[0] = ({
'+': operator.pos,
'-': operator.neg,
'~': operator.inv,
'!': operator.not_}[p[1]], p[1])
def p_cast_expression(self, p):
'''cast_expression : unary_expression
'''
p[0] = p[len(p) - 1]
def p_multiplicative_expression(self, p):
'''multiplicative_expression : cast_expression
| multiplicative_expression '*' cast_expression
| multiplicative_expression '/' cast_expression
| multiplicative_expression '%' cast_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'*': operator.mul,
'/': operator.div,
'%': operator.mod}[p[2]], p[2], p[1], p[3])
def p_additive_expression(self, p):
'''additive_expression : multiplicative_expression
| additive_expression '+' multiplicative_expression
| additive_expression '-' multiplicative_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'+': operator.add,
'-': operator.sub}[p[2]], p[2], p[1], p[3])
def p_shift_expression(self, p):
'''shift_expression : additive_expression
| shift_expression LEFT_OP additive_expression
| shift_expression RIGHT_OP additive_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'<<': operator.lshift,
'>>': operator.rshift}[p[2]], p[2], p[1], p[3])
def p_relational_expression(self, p):
'''relational_expression : shift_expression
| relational_expression '<' shift_expression
| relational_expression '>' shift_expression
| relational_expression LE_OP shift_expression
| relational_expression GE_OP shift_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge}[p[2]], p[2], p[1], p[3])
def p_equality_expression(self, p):
'''equality_expression : relational_expression
| equality_expression EQ_OP relational_expression
| equality_expression NE_OP relational_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'==': operator.eq,
'!=': operator.ne}[p[2]], p[2], p[1], p[3])
def p_and_expression(self, p):
'''and_expression : equality_expression
| and_expression '&' equality_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode(operator.and_, '&', p[1], p[3])
def p_exclusive_or_expression(self, p):
'''exclusive_or_expression : and_expression
| exclusive_or_expression '^' and_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode(operator.xor, '^', p[1], p[3])
def p_inclusive_or_expression(self, p):
'''inclusive_or_expression : exclusive_or_expression
| inclusive_or_expression '|' exclusive_or_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode(operator.or_, '|', p[1], p[3])
def p_logical_and_expression(self, p):
'''logical_and_expression : inclusive_or_expression
| logical_and_expression AND_OP inclusive_or_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = LogicalAndExpressionNode(p[1], p[3])
def p_logical_or_expression(self, p):
'''logical_or_expression : logical_and_expression
| logical_or_expression OR_OP logical_and_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = LogicalOrExpressionNode(p[1], p[3])
def p_conditional_expression(self, p):
'''conditional_expression : logical_or_expression
| logical_or_expression '?' expression ':' conditional_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ConditionalExpressionNode(p[1], p[3], p[5])
def p_assignment_expression(self, p):
'''assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
'''
# TODO assignment
if len(p) == 2:
p[0] = p[1]
def p_assignment_operator(self, p):
'''assignment_operator : '='
| MUL_ASSIGN
| DIV_ASSIGN
| MOD_ASSIGN
| ADD_ASSIGN
| SUB_ASSIGN
| LEFT_ASSIGN
| RIGHT_ASSIGN
| AND_ASSIGN
| XOR_ASSIGN
| OR_ASSIGN
'''
def p_expression(self, p):
'''expression : assignment_expression
| expression ',' assignment_expression
'''
# TODO sequence
if len(p) == 2:
p[0] = p[1]
def p_error(self, t):
raise ConstantExpressionParseException()
class StrictConstantExpressionGrammar(ConstantExpressionGrammar):
name = 'strict_expr'
tokens = tokens
def p_identifier(self, p):
'''identifier : IDENTIFIER
'''
raise ConstantExpressionParseException()
class ExecutionState(object):
def __init__(self, parent_enabled, enabled):
self.enabled = parent_enabled and enabled
self.context_enabled = enabled
self.parent_enabled = parent_enabled
def enable(self, result):
if result:
self.enabled = self.parent_enabled and not self.context_enabled
self.context_enabled = True
else:
self.enabled = False
class PreprocessorParser(yacc.Parser):
def __init__(self, gcc_search_path=True):
yacc.Parser.__init__(self)
self.lexer = lex.lex(cls=PreprocessorLexer)
PreprocessorGrammar.get_prototype().init_parser(self)
# Map system header name to data, overrides path search and open()
self.system_headers = {}
self.include_path = ['/usr/local/include', '/usr/include']
if sys.platform == 'darwin':
self.framework_path = ['/System/Library/Frameworks',
'/Library/Frameworks']
else:
self.framework_path = []
if gcc_search_path:
self.add_gcc_search_path()
self.lexer.filename = ''
self.defines = {}
self.namespace = PreprocessorNamespace()
def define(self, name, value):
self.defines[name] = value
def add_gcc_search_path(self):
from subprocess import Popen, PIPE
path = Popen('gcc -print-file-name=include',
shell=True, stdout=PIPE).communicate()[0].strip()
if path:
self.include_path.append(path)
def parse(self, filename=None, data=None, namespace=None, debug=False):
self.output = []
if not namespace:
namespace = self.namespace
for name, value in self.defines.items():
namespace.define_object(name, (create_token('IDENTIFIER', value),))
self.namespace = namespace
self.imported_headers = set()
self.condition_stack = [ExecutionState(True, True)]
if filename:
if not data:
data = open(filename, 'r').read()
self.lexer.input(data, filename)
elif data:
self.lexer.input(data, '<input>')
return yacc.Parser.parse(self, debug=debug)
def push_file(self, filename, data=None):
print >> sys.stderr, filename
if not data:
data = open(filename).read()
self.lexer.push_input(data, filename)
def include(self, header):
path = self.get_header_path(header)
if path:
self.push_file(path)
else:
print >> sys.stderr, '"%s" not found' % header # TODO
def include_system(self, header):
if header in self.system_headers:
self.push_file(header, self.system_headers[header])
return
path = self.get_system_header_path(header)
if path:
self.push_file(path)
else:
print >> sys.stderr, '"%s" not found' % header # TODO
def include_next(self, header, reference):
# XXX doesn't go via get_system_header
next = False
for path in self.include_path:
p = os.path.join(path, header)
if os.path.exists(p):
if next:
self.push_file(p)
return
elif p == reference:
next = True
print >> sys.stderr, '%s: cannot include_next from %s' % \
(header, reference) # TODO
def import_(self, header):
path = self.get_header_path(header)
if path:
if path not in self.imported_headers:
self.imported_headers.add(path)
self.push_file(path)
else:
print >> sys.stderr, '"%s" not found' % header # TODO
def import_system(self, header):
if header in self.system_headers:
if path not in self.imported_headers:
self.imported_headers.add(path)
self.push_file(header, self.system_headers[header])
return
path = self.get_system_header_path(header)
if path:
if path not in self.imported_headers:
self.imported_headers.add(path)
self.push_file(path)
else:
print >> sys.stderr, '"%s" not found' % header # TODO
def get_header_path(self, header):
p = os.path.join(os.path.dirname(self.lexer.filename), header)
if os.path.exists(p):
self.push_file(p)
return p
elif sys.platform == 'darwin':
p = self.get_framework_header_path(header)
if not p:
p = self.get_system_header_path(header)
return p
def get_system_header_path(self, header):
for path in self.include_path:
p = os.path.join(path, header)
if os.path.exists(p):
return p
if sys.platform == 'darwin':
return self.get_framework_header_path(header)
def get_framework_header_path(self, header):
if '/' in header:
# header is 'Framework/Framework.h' (e.g. OpenGL/OpenGL.h).
framework, header = header.split('/', 1)
paths = self.framework_path[:]
# Add ancestor frameworks of current file
localpath = ''
for parent in self.lexer.filename.split('.framework/')[:-1]:
localpath += parent + '.framework'
paths.append(os.path.join(localpath, 'Frameworks'))
for path in paths:
p = os.path.join(path, '%s.framework' % framework,
'Headers', header)
if os.path.exists(p):
return p
def error(self, message, filename, line):
print >> sys.stderr, '%s:%d #error %s' % (filename, line, message)
def condition_if(self, result):
self.condition_stack.append(
ExecutionState(self.condition_stack[-1].enabled, result))
def condition_elif(self, result):
self.condition_stack[-1].enable(result)
def condition_else(self):
self.condition_stack[-1].enable(True)
def condition_endif(self):
self.condition_stack.pop()
def enable_declaratives(self):
return self.condition_stack[-1].enabled
def enable_conditionals(self):
return self.condition_stack[-1].enabled
def enable_elif_conditionals(self):
return self.condition_stack[-1].parent_enabled and \
not self.condition_stack[-1].context_enabled
def write(self, tokens):
for t in tokens:
if t.type == 'HEADER_NAME':
# token was mis-parsed. Do it again, without the '<', '>'.
ta = create_token('<', '<')
ta.filename = t.filename
ta.lineno = t.lineno
self.output.append(ta)
l = lex.lex(cls=PreprocessorLexer)
l.input(t.value, t.filename)
l.lineno = t.lineno
tb = l.token()
while tb is not None:
if hasattr(tb, 'lexer'):
del tb.lexer
self.output.append(tb)
tb = l.token()
tc = create_token('>', '>')
tc.filename = t.filename
tc.lineno = t.lineno
self.output.append(tc)
continue
if hasattr(t, 'lexer'):
del t.lexer
self.output.append(t)
def get_memento(self):
return (set(self.namespace.objects.keys()),
set(self.namespace.functions.keys()))
class ConstantExpressionParser(yacc.Parser):
_const_grammar = ConstantExpressionGrammar
def __init__(self, lexer, namespace):
yacc.Parser.__init__(self)
self.lexer = lexer
self.namespace = namespace
self._const_grammar.get_prototype().init_parser(self)
def parse(self, debug=False):
self.result = None
try:
yacc.Parser.parse(self, lexer=self.lexer, debug=debug)
except ConstantExpressionParseException:
# XXX warning here?
pass
return self.result
class StrictConstantExpressionParser(ConstantExpressionParser):
_const_grammar = StrictConstantExpressionGrammar
class PreprocessorNamespace(EvaluationContext):
def __init__(self, gcc_macros=True,
stdc_macros=True,
workaround_macros=True):
self.objects = {}
self.functions = {}
if stdc_macros:
self.add_stdc_macros()
if gcc_macros:
self.add_gcc_macros()
if workaround_macros:
self.add_workaround_macros()
def add_stdc_macros(self):
'''Add macros defined in 6.10.8 except __FILE__ and __LINE__.
This is potentially dangerous, as this preprocessor is not ISO
compliant in many ways (the most obvious is the lack of # and ##
operators). It is required for Apple headers, however, which
otherwise assume some truly bizarre syntax is ok.
'''
import time
date = time.strftime('%b %d %Y') # XXX %d should have leading space
t = time.strftime('%H:%M:S')
self.define_object('__DATE__',
(create_token('STRING_LITERAL', date),))
self.define_object('__TIME__',
(create_token('STRING_LITERAL', t),))
self.define_object('__STDC__',
(create_token('PP_NUMBER', '1'),))
self.define_object('__STDC_HOSTED__',
(create_token('PP_NUMBER', '1'),))
self.define_object('__STDC_VERSION',
(create_token('PP_NUMBER', '199901L'),))
def add_gcc_macros(self):
import platform
import sys
gcc_macros = ('__GLIBC_HAVE_LONG_LONG', '__GNUC__',)
# Get these from `gcc -E -dD empty.c`
machine_macros = {
'x86_64': ('__amd64', '__amd64__', '__x86_64', '__x86_64__',
'__tune_k8__', '__MMX__', '__SSE__', '__SSE2__',
'__SSE_MATH__', '__k8', '__k8__'),
'Power Macintosh': ('_ARCH_PPC', '__BIG_ENDIAN__', '_BIG_ENDIAN',
'__ppc__', '__POWERPC__'),
# TODO everyone else.
}.get(platform.machine(), ())
platform_macros = {
'linux2': ('__gnu_linux__', '__linux', '__linux__', 'linux',
'__unix', '__unix__', 'unix'),
'darwin': ('__MACH__', '__APPLE__', '__DYNAMIC__', '__APPLE_CC__'),
'win32': ('_WIN32',),
# TODO everyone else
}.get(sys.platform, ())
tok1 = lex.LexToken()
tok1.type = 'PP_NUMBER'
tok1.value = '1'
tok1.lineno = -1
tok1.lexpos = -1
for macro in machine_macros + platform_macros + gcc_macros:
self.define_object(macro, (tok1,))
self.define_object('inline', ())
self.define_object('__inline', ())
self.define_object('__inline__', ())
self.define_object('__const', (create_token('IDENTIFIER', 'const'),))
def add_workaround_macros(self):
if sys.platform == 'darwin':
self.define_object('CF_INLINE', ())
def is_defined(self, name):
return name in self.objects or name in self.functions
def undef(self, name):
if name in self.objects:
del self.objects[name]
if name in self.functions:
del self.functions[name]
def define_object(self, name, replacements):
# TODO check not already existing in objects or functions
for r in replacements:
if hasattr(r, 'lexer'):
del r.lexer
self.objects[name] = replacements
def define_function(self, name, params, replacements):
# TODO check not already existing in objects or functions
for r in replacements:
if hasattr(r, 'lexer'):
del r.lexer
replacements = list(replacements)
params = list(params)
numargs = len(params)
for i, t in enumerate(replacements):
if hasattr(t, 'lexer'):
del t.lexer
if t.type == 'IDENTIFIER' and t.value in params:
replacements[i] = params.index(t.value)
elif t.type == 'IDENTIFIER' and t.value == '__VA_ARGS__' and \
'...' in params:
replacements[i] = len(params) - 1
self.functions[name] = replacements, numargs
def apply_macros(self, tokens, replacing=None):
repl = []
i = 0
while i < len(tokens):
token = tokens[i]
if token.type == 'IDENTIFIER' and token.value in self.objects:
r = self.objects[token.value]
if token.value != replacing and r:
repl += self.apply_macros(r, token.value)
elif token.type == 'IDENTIFIER' and \
token.value in self.functions and \
len(tokens) - i > 2 and \
tokens[i+1].value == '(':
r, numargs = self.functions[token.value][:]
# build params list
i += 2
params = [[]]
parens = 0 # balance parantheses within each arg
while i < len(tokens):
if tokens[i].value == ',' and parens == 0 and \
len(params) < numargs:
params.append([])
elif tokens[i].value == ')' and parens == 0:
break
else:
if tokens[i].value == '(':
parens += 1
elif tokens[i].value == ')':
parens -= 1
params[-1].append(tokens[i])
i += 1
if token.value != replacing and r:
newr = []
for t in r:
if type(t) == int:
newr += params[t]
else:
newr.append(t)
repl += self.apply_macros(newr, token.value)
elif token.type == 'DEFINED':
if len(tokens) - i > 3 and \
tokens[i + 1].type in ('(', 'LPAREN') and \
tokens[i + 2].type == 'IDENTIFIER' and \
tokens[i + 3].type == ')':
result = self.is_defined(tokens[i + 2].value)
i += 3
elif len(tokens) - i > 1 and \
tokens[i + 1].type == 'IDENTIFIER':
result = self.is_defined(tokens[i + 1].value)
i += 1
else:
# TODO
print >> sys.stderr, 'Invalid use of "defined"'
result = 0
t = lex.LexToken()
t.value = str(int(result))
t.type = 'PP_NUMBER'
t.lexpos = token.lexpos
t.lineno = token.lineno
repl.append(t)
else:
repl.append(token)
i += 1
return repl
def copy(self):
n = PreprocessorNamespace(gcc_macros=False, workaround_macros=False)
n.functions = self.functions.copy()
n.objects = self.objects.copy()
return n
if __name__ == '__main__':
filename = sys.argv[1]
parser = PreprocessorParser()
parser.parse(filename, debug=True)
print ' '.join([str(t.value) for t in parser.output])
| en | 0.470174 | #!/usr/bin/env python Preprocess a C source file. Limitations: * Whitespace is not preserved. * # and ## operators not handled. Reference is C99: * http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf * Also understands Objective-C #import directive * Also understands GNU #include_next # Helper: substitute {foo} with subs[foo] in string (makes regexes more lexy) # -------------------------------------------------------------------------- # Token value types # -------------------------------------------------------------------------- # Numbers represented as int and float types. # For all other tokens, type is just str representation. # Unescaping probably not perfect but close enough. # -------------------------------------------------------------------------- # Token declarations # -------------------------------------------------------------------------- # value: (regex, type) #', '#'), # Skip over CR characters. Only necessary on urlopen'd files. # C /* comments */. Copied from the ylex.py example in PLY: it's not 100% # correct for ANSI C, but close enough for anything that's not crazy. # Should allow any character from charset, but that wreaks havok (skips # comment delimiter, for instance), so also don't permit '*' or '//' # The non-matching group at the end prevents false-positives with # operators like '>='. # In the event of a false positive (e.g. "if (a < b || c > d)"), the # token will be split and rescanned if it appears in a text production; # see PreprocessorParser.write. # Is also r'"[^\n"]"', but handled in STRING_LITERAL instead. #[ \t]*(ifdef|ifndef|if|elif|else|endif|define|undef|include_next|include|import|line|error|pragma)' # TODO # missing: universal-character-constant # -------------------------------------------------------------------------- # Expression Object Model # -------------------------------------------------------------------------- Interface for evaluating expression nodes. # -------------------------------------------------------------------------- # Lexers # -------------------------------------------------------------------------- Create a token of type and value, at the position where 'production' was reduced. Don't specify production if the token is built-in # -------------------------------------------------------------------------- # Grammars # -------------------------------------------------------------------------- preprocessing_file : group_opt group_opt : group | group : group_part | group group_part group_part : if_section | control_line | text_line if_section : if_group elif_groups_opt else_group_opt endif_line if_group : if_line group_opt if_line : IF replaced_constant_expression NEWLINE | IFDEF IDENTIFIER NEWLINE | IFNDEF IDENTIFIER NEWLINE # error elif_groups_opt : elif_groups | elif_groups : elif_group | elif_groups elif_group elif_group : elif_line group_opt elif_line : ELIF replaced_elif_constant_expression NEWLINE else_group_opt : else_group | else_group : else_line group_opt else_line : ELSE NEWLINE endif_line : ENDIF pp_tokens_opt NEWLINE # pp_tokens needed (ignored) here for Apple. control_line : include_line NEWLINE | define_object | define_function | undef_line | LINE pp_tokens NEWLINE | error_line | PRAGMA pp_tokens_opt NEWLINE include_line : INCLUDE pp_tokens | INCLUDE_NEXT pp_tokens | IMPORT pp_tokens # TODO #include' define_object : DEFINE IDENTIFIER replacement_list NEWLINE # Try to parse replacement list as an expression # Didn't parse, pass on as string define_function : DEFINE IDENTIFIER LPAREN define_function_params ')' pp_tokens_opt NEWLINE define_function_params : identifier_list_opt | ELLIPSIS | identifier_list ',' ELLIPSIS undef_line : UNDEF IDENTIFIER NEWLINE error_line : ERROR pp_tokens_opt NEWLINE text_line : pp_tokens_opt NEWLINE replacement_list : | preprocessing_token_no_lparen | preprocessing_token_no_lparen pp_tokens identifier_list_opt : identifier_list | identifier_list : IDENTIFIER | identifier_list ',' IDENTIFIER replaced_constant_expression : pp_tokens replaced_elif_constant_expression : pp_tokens pp_tokens_opt : pp_tokens | pp_tokens : preprocessing_token | pp_tokens preprocessing_token preprocessing_token_no_lparen : HEADER_NAME | IDENTIFIER | PP_NUMBER | CHARACTER_CONSTANT | STRING_LITERAL | punctuator | DEFINED | OTHER preprocessing_token : preprocessing_token_no_lparen | LPAREN punctuator : ELLIPSIS | RIGHT_ASSIGN | LEFT_ASSIGN | ADD_ASSIGN | SUB_ASSIGN | MUL_ASSIGN | DIV_ASSIGN | MOD_ASSIGN | AND_ASSIGN | XOR_ASSIGN | OR_ASSIGN | RIGHT_OP | LEFT_OP | INC_OP | DEC_OP | PTR_OP | AND_OP | OR_OP | LE_OP | GE_OP | EQ_OP | NE_OP | HASH_HASH | ';' | '{' | '}' | ',' | ':' | '=' | '(' | ')' | '[' | ']' | PERIOD | '&' | '!' | '~' | '-' | '+' | '*' | '/' | '%' | '<' | '>' | '^' | '|' | '?' | '#' # Crap, no way to get to Parser instance. FIXME TODO # TODO #t.lexer.cparser.handle_error('Syntax error at %r' % t.value, # t.lexer.filename, t.lexer.lineno) # Don't alter lexer: default behaviour is to pass error production # up until it hits the catch-all at declaration, at which point # parsing continues (synchronisation). constant_expression : conditional_expression constant : PP_NUMBER identifier : IDENTIFIER primary_expression : constant | identifier | '(' expression ')' | LPAREN expression ')' postfix_expression : primary_expression unary_expression : postfix_expression | unary_operator cast_expression # unary_operator reduces to (op, op_str) # TODO unary_operator : '+' | '-' | '~' | '!' # reduces to (op, op_str) cast_expression : unary_expression multiplicative_expression : cast_expression | multiplicative_expression '*' cast_expression | multiplicative_expression '/' cast_expression | multiplicative_expression '%' cast_expression additive_expression : multiplicative_expression | additive_expression '+' multiplicative_expression | additive_expression '-' multiplicative_expression shift_expression : additive_expression | shift_expression LEFT_OP additive_expression | shift_expression RIGHT_OP additive_expression relational_expression : shift_expression | relational_expression '<' shift_expression | relational_expression '>' shift_expression | relational_expression LE_OP shift_expression | relational_expression GE_OP shift_expression equality_expression : relational_expression | equality_expression EQ_OP relational_expression | equality_expression NE_OP relational_expression and_expression : equality_expression | and_expression '&' equality_expression exclusive_or_expression : and_expression | exclusive_or_expression '^' and_expression inclusive_or_expression : exclusive_or_expression | inclusive_or_expression '|' exclusive_or_expression logical_and_expression : inclusive_or_expression | logical_and_expression AND_OP inclusive_or_expression logical_or_expression : logical_and_expression | logical_or_expression OR_OP logical_and_expression conditional_expression : logical_or_expression | logical_or_expression '?' expression ':' conditional_expression assignment_expression : conditional_expression | unary_expression assignment_operator assignment_expression # TODO assignment assignment_operator : '=' | MUL_ASSIGN | DIV_ASSIGN | MOD_ASSIGN | ADD_ASSIGN | SUB_ASSIGN | LEFT_ASSIGN | RIGHT_ASSIGN | AND_ASSIGN | XOR_ASSIGN | OR_ASSIGN expression : assignment_expression | expression ',' assignment_expression # TODO sequence identifier : IDENTIFIER # Map system header name to data, overrides path search and open() # TODO # TODO # XXX doesn't go via get_system_header # TODO # TODO # TODO # header is 'Framework/Framework.h' (e.g. OpenGL/OpenGL.h). # Add ancestor frameworks of current file #error %s' % (filename, line, message) # token was mis-parsed. Do it again, without the '<', '>'. # XXX warning here? Add macros defined in 6.10.8 except __FILE__ and __LINE__. This is potentially dangerous, as this preprocessor is not ISO compliant in many ways (the most obvious is the lack of # and ## operators). It is required for Apple headers, however, which otherwise assume some truly bizarre syntax is ok. # XXX %d should have leading space # Get these from `gcc -E -dD empty.c` # TODO everyone else. # TODO everyone else # TODO check not already existing in objects or functions # TODO check not already existing in objects or functions # build params list # balance parantheses within each arg # TODO | 1.915795 | 2 |
q3/q3/bootstrap/tests/benSAP1/cpu_D_latch_8bitreg_ei_test.py | virtimus/makaronLab | 2 | 6624420 | from q3.api import *
import q3.bootstrap.tests.benSAP1.common as cmn
import q3.bootstrap.tests.benSAP1.cpu_D_latch_8bitreg_ei as reg8bit
if __name__ == '__main__':
modv = modvAdd('testtt1')
m =modv.module()
m1 = reg8bit.makeDlatch8bitregEI('cpu-D-latch-8bitreg-ei1',m)
m2 = reg8bit.makeDlatch8bitregEI('cpu-D-latch-8bitreg-ei2',m)
m3 = reg8bit.makeDlatch8bitregEI('cpu-D-latch-8bitreg-ei3',m)
mb1 = m.modAdd('BUSI')
mb2 = m.modAdd('BUSO')
mb1.iAdd('I1',size=8)
mb1.oAdd('O1',size=8)
mb2.ioAdd('I1',size=8, ioType = IoType.DYNAMIC, direction = direction.LEFT)
mb2.oAdd('O1',size=8 )
mb1.view().setInvert(True)
mb2.view().setInvert(True)
m.iAdd('ID',size=8)
e1 = m.iAdd('ENABLEB1')
l1 =m.iAdd('LOADB1')
e2 = m.iAdd('ENABLEB2')
l2 = m.iAdd('LOADB2')
e3 = m.iAdd('ENABLEB3')
l3 = m.iAdd('LOADB3')
clk = m.iAdd('CLK')
m1.setPos(0,-200)
m2.setPos(0,-50)
m3.setPos(0,100)
mb1.setPos(-140,-300)
mb2.setPos(140.0,-300.0)
mb1.setSigFormula('O1','=I1.value()')
mb2.setSigFormula('O1','=I1.value()')
m1.nod('O').c(mb2.n('I1'))
m2.nod('O').c(mb2.n('I1'))
m3.nod('O').c(mb2.n('I1'))
m.nod('ID').c(mb2.n('I1'))
mb2.n('O1').c(mb1.n('I1'))
mb1.n('O1').c(m1.n('D'))
mb1.n('O1').c(m2.n('D'))
mb1.n('O1').c(m3.n('D'))
e1.c(m1.n('ENABLEB'))
e2.c(m2.n('ENABLEB'))
e3.c(m3.n('ENABLEB'))
e1.intSignal().setValue(True)
e2.intSignal().setValue(True)
e3.intSignal().setValue(True)
l1.c(m1.n('LOADB'))
l2.c(m2.n('LOADB'))
l3.c(m3.n('LOADB'))
l1.intSignal().setValue(True)
l2.intSignal().setValue(True)
l3.intSignal().setValue(True)
clk.c(m1.n('CLK'))
clk.c(m2.n('CLK'))
clk.c(m3.n('CLK'))
#mod('cpu-D-latch-8bitreg-ei1').n('8O').setIntSignalAsDrive()
| from q3.api import *
import q3.bootstrap.tests.benSAP1.common as cmn
import q3.bootstrap.tests.benSAP1.cpu_D_latch_8bitreg_ei as reg8bit
if __name__ == '__main__':
modv = modvAdd('testtt1')
m =modv.module()
m1 = reg8bit.makeDlatch8bitregEI('cpu-D-latch-8bitreg-ei1',m)
m2 = reg8bit.makeDlatch8bitregEI('cpu-D-latch-8bitreg-ei2',m)
m3 = reg8bit.makeDlatch8bitregEI('cpu-D-latch-8bitreg-ei3',m)
mb1 = m.modAdd('BUSI')
mb2 = m.modAdd('BUSO')
mb1.iAdd('I1',size=8)
mb1.oAdd('O1',size=8)
mb2.ioAdd('I1',size=8, ioType = IoType.DYNAMIC, direction = direction.LEFT)
mb2.oAdd('O1',size=8 )
mb1.view().setInvert(True)
mb2.view().setInvert(True)
m.iAdd('ID',size=8)
e1 = m.iAdd('ENABLEB1')
l1 =m.iAdd('LOADB1')
e2 = m.iAdd('ENABLEB2')
l2 = m.iAdd('LOADB2')
e3 = m.iAdd('ENABLEB3')
l3 = m.iAdd('LOADB3')
clk = m.iAdd('CLK')
m1.setPos(0,-200)
m2.setPos(0,-50)
m3.setPos(0,100)
mb1.setPos(-140,-300)
mb2.setPos(140.0,-300.0)
mb1.setSigFormula('O1','=I1.value()')
mb2.setSigFormula('O1','=I1.value()')
m1.nod('O').c(mb2.n('I1'))
m2.nod('O').c(mb2.n('I1'))
m3.nod('O').c(mb2.n('I1'))
m.nod('ID').c(mb2.n('I1'))
mb2.n('O1').c(mb1.n('I1'))
mb1.n('O1').c(m1.n('D'))
mb1.n('O1').c(m2.n('D'))
mb1.n('O1').c(m3.n('D'))
e1.c(m1.n('ENABLEB'))
e2.c(m2.n('ENABLEB'))
e3.c(m3.n('ENABLEB'))
e1.intSignal().setValue(True)
e2.intSignal().setValue(True)
e3.intSignal().setValue(True)
l1.c(m1.n('LOADB'))
l2.c(m2.n('LOADB'))
l3.c(m3.n('LOADB'))
l1.intSignal().setValue(True)
l2.intSignal().setValue(True)
l3.intSignal().setValue(True)
clk.c(m1.n('CLK'))
clk.c(m2.n('CLK'))
clk.c(m3.n('CLK'))
#mod('cpu-D-latch-8bitreg-ei1').n('8O').setIntSignalAsDrive()
| zh | 0.097471 | #mod('cpu-D-latch-8bitreg-ei1').n('8O').setIntSignalAsDrive() | 1.798779 | 2 |
deeprobust/image/netmodels/train_model.py | CrownX/DeepRobust | 3 | 6624421 | """
This function help to train model of different archtecture easily. Select model archtecture and training data, then output corresponding model.
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F #233
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
from PIL import Image
def train(model, data, device, maxepoch, data_path = './', save_per_epoch = 10, seed = 100):
"""train.
Parameters
----------
model :
model(option:'CNN', 'ResNet18', 'ResNet34', 'ResNet50', 'densenet', 'vgg11', 'vgg13', 'vgg16', 'vgg19')
data :
data(option:'MNIST','CIFAR10')
device :
device(option:'cpu', 'cuda')
maxepoch :
training epoch
data_path :
data path(default = './')
save_per_epoch :
save_per_epoch(default = 10)
seed :
seed
Examples
--------
>>>import deeprobust.image.netmodels.train_model as trainmodel
>>>trainmodel.train('CNN', 'MNIST', 'cuda', 20)
"""
torch.manual_seed(seed)
train_loader, test_loader = feed_dataset(data, data_path)
if (model == 'CNN'):
import deeprobust.image.netmodels.CNN as MODEL
#from deeprobust.image.netmodels.CNN import Net
train_net = MODEL.Net().to(device)
elif (model == 'ResNet18'):
import deeprobust.image.netmodels.resnet as MODEL
train_net = MODEL.ResNet18().to(device)
elif (model == 'ResNet34'):
import deeprobust.image.netmodels.resnet as MODEL
train_net = MODEL.ResNet34().to(device)
elif (model == 'ResNet50'):
import deeprobust.image.netmodels.resnet as MODEL
train_net = MODEL.ResNet50().to(device)
elif (model == 'densenet'):
import deeprobust.image.netmodels.densenet as MODEL
train_net = MODEL.densenet_cifar().to(device)
elif (model == 'vgg11'):
import deeprobust.image.netmodels.vgg as MODEL
train_net = MODEL.VGG('VGG11').to(device)
elif (model == 'vgg13'):
import deeprobust.image.netmodels.vgg as MODEL
train_net = MODEL.VGG('VGG13').to(device)
elif (model == 'vgg16'):
import deeprobust.image.netmodels.vgg as MODEL
train_net = MODEL.VGG('VGG16').to(device)
elif (model == 'vgg19'):
import deeprobust.image.netmodels.vgg as MODEL
train_net = MODEL.VGG('VGG19').to(device)
optimizer = optim.SGD(train_net.parameters(), lr= 0.1, momentum=0.5)
scheduler = lr_scheduler.StepLR(optimizer, step_size = 100, gamma = 0.1)
save_model = True
for epoch in range(1, maxepoch + 1): ## 5 batches
print(epoch)
MODEL.train(train_net, device, train_loader, optimizer, epoch)
MODEL.test(train_net, device, test_loader)
if (save_model and (epoch % (save_per_epoch) == 0 or epoch == maxepoch)):
if os.path.isdir('./trained_models/'):
print('Save model.')
torch.save(train_net.state_dict(), './trained_models/'+ data + "_" + model + "_epoch_" + str(epoch) + ".pt")
else:
os.mkdir('./trained_models/')
print('Make directory and save model.')
torch.save(train_net.state_dict(), './trained_models/'+ data + "_" + model + "_epoch_" + str(epoch) + ".pt")
scheduler.step()
def feed_dataset(data, data_dict):
if(data == 'CIFAR10'):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(data_dict, train=True, download = True,
transform=transform_train),
batch_size= 128, shuffle=True) #, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(data_dict, train=False, download = True,
transform=transform_val),
batch_size= 1000, shuffle=True) #, **kwargs)
elif(data == 'MNIST'):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(data_dict, train=True, download = True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=128,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download = True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=1000,
shuffle=True)
elif(data == 'ImageNet'):
pass
return train_loader, test_loader
| """
This function help to train model of different archtecture easily. Select model archtecture and training data, then output corresponding model.
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F #233
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
from PIL import Image
def train(model, data, device, maxepoch, data_path = './', save_per_epoch = 10, seed = 100):
"""train.
Parameters
----------
model :
model(option:'CNN', 'ResNet18', 'ResNet34', 'ResNet50', 'densenet', 'vgg11', 'vgg13', 'vgg16', 'vgg19')
data :
data(option:'MNIST','CIFAR10')
device :
device(option:'cpu', 'cuda')
maxepoch :
training epoch
data_path :
data path(default = './')
save_per_epoch :
save_per_epoch(default = 10)
seed :
seed
Examples
--------
>>>import deeprobust.image.netmodels.train_model as trainmodel
>>>trainmodel.train('CNN', 'MNIST', 'cuda', 20)
"""
torch.manual_seed(seed)
train_loader, test_loader = feed_dataset(data, data_path)
if (model == 'CNN'):
import deeprobust.image.netmodels.CNN as MODEL
#from deeprobust.image.netmodels.CNN import Net
train_net = MODEL.Net().to(device)
elif (model == 'ResNet18'):
import deeprobust.image.netmodels.resnet as MODEL
train_net = MODEL.ResNet18().to(device)
elif (model == 'ResNet34'):
import deeprobust.image.netmodels.resnet as MODEL
train_net = MODEL.ResNet34().to(device)
elif (model == 'ResNet50'):
import deeprobust.image.netmodels.resnet as MODEL
train_net = MODEL.ResNet50().to(device)
elif (model == 'densenet'):
import deeprobust.image.netmodels.densenet as MODEL
train_net = MODEL.densenet_cifar().to(device)
elif (model == 'vgg11'):
import deeprobust.image.netmodels.vgg as MODEL
train_net = MODEL.VGG('VGG11').to(device)
elif (model == 'vgg13'):
import deeprobust.image.netmodels.vgg as MODEL
train_net = MODEL.VGG('VGG13').to(device)
elif (model == 'vgg16'):
import deeprobust.image.netmodels.vgg as MODEL
train_net = MODEL.VGG('VGG16').to(device)
elif (model == 'vgg19'):
import deeprobust.image.netmodels.vgg as MODEL
train_net = MODEL.VGG('VGG19').to(device)
optimizer = optim.SGD(train_net.parameters(), lr= 0.1, momentum=0.5)
scheduler = lr_scheduler.StepLR(optimizer, step_size = 100, gamma = 0.1)
save_model = True
for epoch in range(1, maxepoch + 1): ## 5 batches
print(epoch)
MODEL.train(train_net, device, train_loader, optimizer, epoch)
MODEL.test(train_net, device, test_loader)
if (save_model and (epoch % (save_per_epoch) == 0 or epoch == maxepoch)):
if os.path.isdir('./trained_models/'):
print('Save model.')
torch.save(train_net.state_dict(), './trained_models/'+ data + "_" + model + "_epoch_" + str(epoch) + ".pt")
else:
os.mkdir('./trained_models/')
print('Make directory and save model.')
torch.save(train_net.state_dict(), './trained_models/'+ data + "_" + model + "_epoch_" + str(epoch) + ".pt")
scheduler.step()
def feed_dataset(data, data_dict):
if(data == 'CIFAR10'):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(data_dict, train=True, download = True,
transform=transform_train),
batch_size= 128, shuffle=True) #, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(data_dict, train=False, download = True,
transform=transform_val),
batch_size= 1000, shuffle=True) #, **kwargs)
elif(data == 'MNIST'):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(data_dict, train=True, download = True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=128,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download = True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=1000,
shuffle=True)
elif(data == 'ImageNet'):
pass
return train_loader, test_loader
| en | 0.288343 | This function help to train model of different archtecture easily. Select model archtecture and training data, then output corresponding model. #233 train. Parameters ---------- model : model(option:'CNN', 'ResNet18', 'ResNet34', 'ResNet50', 'densenet', 'vgg11', 'vgg13', 'vgg16', 'vgg19') data : data(option:'MNIST','CIFAR10') device : device(option:'cpu', 'cuda') maxepoch : training epoch data_path : data path(default = './') save_per_epoch : save_per_epoch(default = 10) seed : seed Examples -------- >>>import deeprobust.image.netmodels.train_model as trainmodel >>>trainmodel.train('CNN', 'MNIST', 'cuda', 20) #from deeprobust.image.netmodels.CNN import Net ## 5 batches #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), #, **kwargs) #, **kwargs) | 3.114006 | 3 |
World/Object/Unit/Constants/NPCFlags.py | sundayz/idewave-core | 10 | 6624422 | <reponame>sundayz/idewave-core
from enum import Enum
class NPCFlags(Enum):
NONE = 0
GOSSIP = 1
QUESTGIVER = 2
VENDOR = 4
FLIGHTMASTER = 8
TRAINER = 16
SPIRITHEALER = 32
SPIRITGUIDE = 64
INNKEEPER = 128
BANKER = 256
PETITIONER = 512
TABARDDESIGNER = 1024
BATTLEMASTER = 2048
AUCTIONEER = 4096
STABLEMASTER = 8192
REPAIR = 16384
OUTDOOR_PVP = 536870912
| from enum import Enum
class NPCFlags(Enum):
NONE = 0
GOSSIP = 1
QUESTGIVER = 2
VENDOR = 4
FLIGHTMASTER = 8
TRAINER = 16
SPIRITHEALER = 32
SPIRITGUIDE = 64
INNKEEPER = 128
BANKER = 256
PETITIONER = 512
TABARDDESIGNER = 1024
BATTLEMASTER = 2048
AUCTIONEER = 4096
STABLEMASTER = 8192
REPAIR = 16384
OUTDOOR_PVP = 536870912 | none | 1 | 1.782918 | 2 | |
Calculadora de IMC.py | wobegone/Calculadora_de_IMC | 0 | 6624423 | <reponame>wobegone/Calculadora_de_IMC
peso = int(input('¿Cuál es tu peso?(en kg): '))
altura = int(input('¿Cuál es tu altura?(en cm): '))
altura2 = altura / 100
imc = peso / ((altura2) ** 2)
total = round(imc,2)
print('Tu índice de masa corporal es '+str(total))
#incluido
if total >= 18 and total < 25:
print('Tu IBM está normal')
elif total >= 25:
print('Tienes sobrepeso (REVISAR)')
elif total < 18:
print('Tienes bajo peso (REVISAR)') | peso = int(input('¿Cuál es tu peso?(en kg): '))
altura = int(input('¿Cuál es tu altura?(en cm): '))
altura2 = altura / 100
imc = peso / ((altura2) ** 2)
total = round(imc,2)
print('Tu índice de masa corporal es '+str(total))
#incluido
if total >= 18 and total < 25:
print('Tu IBM está normal')
elif total >= 25:
print('Tienes sobrepeso (REVISAR)')
elif total < 18:
print('Tienes bajo peso (REVISAR)') | none | 1 | 3.723524 | 4 | |
parsl/monitoring/visualization/plots/default/workflow_resource_plots.py | aquanauts/parsl | 0 | 6624424 | <reponame>aquanauts/parsl
import math
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from plotly.offline import plot
def resource_distribution_plot(df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='avg', columns=20,):
# E.g., psutil_process_time_user or psutil_process_memory_percent
min_range = min(df_resources[type].astype('float'))
max_range = max(df_resources[type].astype('float'))
time_step = (max_range - min_range) / columns
if min_range == max_range:
x_axis = [min_range]
else:
x_axis = []
for i in np.arange(min_range, max_range + time_step, time_step):
x_axis.append(i)
apps_dict = dict()
for i in range(len(df_task)):
row = df_task.iloc[i]
apps_dict[row['task_id']] = []
def y_axis_setup():
items = [0] * len(x_axis)
for app, tasks in apps_dict.items():
if option == 'avg':
task = df_resources[df_resources['task_id'] ==
app][type].astype('float').mean()
elif option == 'max':
task = df_resources[df_resources['task_id'] == app][type].astype('float').max()
for i in range(len(x_axis) - 1):
a = task >= x_axis[i]
b = task < x_axis[i + 1]
if a and b:
items[i] += 1
break
if task >= x_axis[-1]:
items[-1] += 1
return items
if "memory" not in type:
xaxis = dict(autorange=True,
title='CPU user time (seconds)')
else:
xaxis = dict(autorange=True,
title='Memory usage (bytes)')
fig = go.Figure(
data=[go.Bar(x=x_axis,
y=y_axis_setup(),
name='tasks')],
layout=go.Layout(xaxis=xaxis,
yaxis=dict(title='Tasks'),
title=label + '(' + option + ')'))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
def resource_time_series(tasks, type='psutil_process_time_user', label='CPU user time'):
tasks['epoch_time'] = (pd.to_datetime(
tasks['timestamp']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
step = int(tasks['resource_monitoring_interval'][0])
start = tasks['epoch_time'].min()
end = tasks['epoch_time'].max()
tasks['relative_time'] = tasks['epoch_time'] - start
if end != start:
bins = pd.cut(tasks['relative_time'],
range(0, end - start + 1, step),
include_lowest=True)
df = tasks.groupby(bins, as_index=False)[type].mean()
df['time'] = step * df.index
fig = go.Figure(
data=[go.Scatter(x=df['time'],
y=df[type],
)],
layout=go.Layout(xaxis=dict(autorange=True,
title='Time (seconds)'),
yaxis=dict(title=label),
title=label))
else:
fig = go.Figure(
data=[go.Scatter(x=[0],
y=[tasks[type].mean()],
)],
layout=go.Layout(xaxis=dict(autorange=True,
title='Time (seconds)'),
yaxis=dict(title=label),
title=label))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
def worker_efficiency(task, node):
try:
node['epoch_time'] = (pd.to_datetime(
node['reg_time']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
task['epoch_time_start'] = (pd.to_datetime(
task['task_time_submitted']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
task['epoch_time_running'] = (pd.to_datetime(
task['task_time_running']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
task['epoch_time_returned'] = (pd.to_datetime(
task['task_time_returned']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
start = int(min(task['epoch_time_start'].min(), node['epoch_time'].min()))
end = int(task['epoch_time_returned'].max())
worker_plot = [0] * (end - start + 1)
total_workers = node['worker_count'].sum()
for i, row in task.iterrows():
if math.isnan(row['epoch_time_running']):
# skip tasks with no running start time.
continue
for j in range(int(row['epoch_time_running']), int(row['epoch_time_returned']) + 1):
worker_plot[j - start] += 1
fig = go.Figure(
data=[go.Scatter(x=list(range(0, end - start + 1)),
y=worker_plot,
name='Total busy workers',
),
go.Scatter(x=list(range(0, end - start + 1)),
y=[total_workers] * (end - start + 1),
name='Total online workers',
)
],
layout=go.Layout(xaxis=dict(autorange=True,
title='Time (seconds)'),
yaxis=dict(title='Number of workers'),
title="Worker efficiency"))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
except Exception as e:
print(e)
return "The worker efficiency plot cannot be generated due to missing data."
def resource_efficiency(resource, node, label='CPU'):
try:
resource['epoch_time'] = (pd.to_datetime(
resource['timestamp']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
node['epoch_time'] = (pd.to_datetime(
node['reg_time']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
resource = resource.sort_values(by='epoch_time')
start = min(resource['epoch_time'].min(), node['epoch_time'].min())
end = resource['epoch_time'].max()
resource['relative_time'] = resource['epoch_time'] - start
node['relative_time'] = node['epoch_time'] - start
task_plot = [0] * (end - start + 1)
if label == 'CPU':
total = node['cpu_count'].sum()
elif label == 'mem':
total = node['total_memory'].sum() / 1024 / 1024 / 1024
resource['total_cpu_time'] = resource['psutil_process_time_user'] + resource['psutil_process_time_system']
for task_id in resource['task_id'].unique():
tmp = resource[resource['task_id'] == task_id]
tmp['last_timestamp'] = tmp['relative_time'].shift(1)
if label == 'CPU':
tmp['last_cputime'] = tmp['total_cpu_time'].shift(1)
for index, row in tmp.iterrows():
if np.isnan(row['last_timestamp']):
continue
for i in range(int(row['last_timestamp']), int(row['relative_time'])):
if label == 'CPU':
diff = (row['total_cpu_time'] - row['last_cputime']) / (row['relative_time'] - row['last_timestamp'])
elif label == 'mem':
diff = row['psutil_process_memory_resident'] / 1024 / 1024 / 1024
task_plot[i] += diff
if label == 'CPU':
name1 = 'Used CPU cores'
name2 = 'Total CPU cores'
yaxis = 'Number of CPU cores'
title = 'CPU usage'
elif label == 'mem':
name1 = 'Used memory'
name2 = 'Total memory'
yaxis = 'Memory (GB)'
title = 'Memory usage'
fig = go.Figure(
data=[go.Scatter(x=list(range(0, end - start + 1)),
y=task_plot,
name=name1,
),
go.Scatter(x=list(range(0, end - start + 1)),
y=[total] * (end - start + 1),
name=name2,
)
],
layout=go.Layout(xaxis=dict(autorange=True,
title='Time (seconds)'),
yaxis=dict(title=yaxis),
title=title))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
except Exception as e:
print(e)
return "The resource efficiency plot cannot be generated because of exception {}.".format(e)
| import math
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from plotly.offline import plot
def resource_distribution_plot(df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='avg', columns=20,):
# E.g., psutil_process_time_user or psutil_process_memory_percent
min_range = min(df_resources[type].astype('float'))
max_range = max(df_resources[type].astype('float'))
time_step = (max_range - min_range) / columns
if min_range == max_range:
x_axis = [min_range]
else:
x_axis = []
for i in np.arange(min_range, max_range + time_step, time_step):
x_axis.append(i)
apps_dict = dict()
for i in range(len(df_task)):
row = df_task.iloc[i]
apps_dict[row['task_id']] = []
def y_axis_setup():
items = [0] * len(x_axis)
for app, tasks in apps_dict.items():
if option == 'avg':
task = df_resources[df_resources['task_id'] ==
app][type].astype('float').mean()
elif option == 'max':
task = df_resources[df_resources['task_id'] == app][type].astype('float').max()
for i in range(len(x_axis) - 1):
a = task >= x_axis[i]
b = task < x_axis[i + 1]
if a and b:
items[i] += 1
break
if task >= x_axis[-1]:
items[-1] += 1
return items
if "memory" not in type:
xaxis = dict(autorange=True,
title='CPU user time (seconds)')
else:
xaxis = dict(autorange=True,
title='Memory usage (bytes)')
fig = go.Figure(
data=[go.Bar(x=x_axis,
y=y_axis_setup(),
name='tasks')],
layout=go.Layout(xaxis=xaxis,
yaxis=dict(title='Tasks'),
title=label + '(' + option + ')'))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
def resource_time_series(tasks, type='psutil_process_time_user', label='CPU user time'):
tasks['epoch_time'] = (pd.to_datetime(
tasks['timestamp']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
step = int(tasks['resource_monitoring_interval'][0])
start = tasks['epoch_time'].min()
end = tasks['epoch_time'].max()
tasks['relative_time'] = tasks['epoch_time'] - start
if end != start:
bins = pd.cut(tasks['relative_time'],
range(0, end - start + 1, step),
include_lowest=True)
df = tasks.groupby(bins, as_index=False)[type].mean()
df['time'] = step * df.index
fig = go.Figure(
data=[go.Scatter(x=df['time'],
y=df[type],
)],
layout=go.Layout(xaxis=dict(autorange=True,
title='Time (seconds)'),
yaxis=dict(title=label),
title=label))
else:
fig = go.Figure(
data=[go.Scatter(x=[0],
y=[tasks[type].mean()],
)],
layout=go.Layout(xaxis=dict(autorange=True,
title='Time (seconds)'),
yaxis=dict(title=label),
title=label))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
def worker_efficiency(task, node):
try:
node['epoch_time'] = (pd.to_datetime(
node['reg_time']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
task['epoch_time_start'] = (pd.to_datetime(
task['task_time_submitted']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
task['epoch_time_running'] = (pd.to_datetime(
task['task_time_running']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
task['epoch_time_returned'] = (pd.to_datetime(
task['task_time_returned']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
start = int(min(task['epoch_time_start'].min(), node['epoch_time'].min()))
end = int(task['epoch_time_returned'].max())
worker_plot = [0] * (end - start + 1)
total_workers = node['worker_count'].sum()
for i, row in task.iterrows():
if math.isnan(row['epoch_time_running']):
# skip tasks with no running start time.
continue
for j in range(int(row['epoch_time_running']), int(row['epoch_time_returned']) + 1):
worker_plot[j - start] += 1
fig = go.Figure(
data=[go.Scatter(x=list(range(0, end - start + 1)),
y=worker_plot,
name='Total busy workers',
),
go.Scatter(x=list(range(0, end - start + 1)),
y=[total_workers] * (end - start + 1),
name='Total online workers',
)
],
layout=go.Layout(xaxis=dict(autorange=True,
title='Time (seconds)'),
yaxis=dict(title='Number of workers'),
title="Worker efficiency"))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
except Exception as e:
print(e)
return "The worker efficiency plot cannot be generated due to missing data."
def resource_efficiency(resource, node, label='CPU'):
try:
resource['epoch_time'] = (pd.to_datetime(
resource['timestamp']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
node['epoch_time'] = (pd.to_datetime(
node['reg_time']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
resource = resource.sort_values(by='epoch_time')
start = min(resource['epoch_time'].min(), node['epoch_time'].min())
end = resource['epoch_time'].max()
resource['relative_time'] = resource['epoch_time'] - start
node['relative_time'] = node['epoch_time'] - start
task_plot = [0] * (end - start + 1)
if label == 'CPU':
total = node['cpu_count'].sum()
elif label == 'mem':
total = node['total_memory'].sum() / 1024 / 1024 / 1024
resource['total_cpu_time'] = resource['psutil_process_time_user'] + resource['psutil_process_time_system']
for task_id in resource['task_id'].unique():
tmp = resource[resource['task_id'] == task_id]
tmp['last_timestamp'] = tmp['relative_time'].shift(1)
if label == 'CPU':
tmp['last_cputime'] = tmp['total_cpu_time'].shift(1)
for index, row in tmp.iterrows():
if np.isnan(row['last_timestamp']):
continue
for i in range(int(row['last_timestamp']), int(row['relative_time'])):
if label == 'CPU':
diff = (row['total_cpu_time'] - row['last_cputime']) / (row['relative_time'] - row['last_timestamp'])
elif label == 'mem':
diff = row['psutil_process_memory_resident'] / 1024 / 1024 / 1024
task_plot[i] += diff
if label == 'CPU':
name1 = 'Used CPU cores'
name2 = 'Total CPU cores'
yaxis = 'Number of CPU cores'
title = 'CPU usage'
elif label == 'mem':
name1 = 'Used memory'
name2 = 'Total memory'
yaxis = 'Memory (GB)'
title = 'Memory usage'
fig = go.Figure(
data=[go.Scatter(x=list(range(0, end - start + 1)),
y=task_plot,
name=name1,
),
go.Scatter(x=list(range(0, end - start + 1)),
y=[total] * (end - start + 1),
name=name2,
)
],
layout=go.Layout(xaxis=dict(autorange=True,
title='Time (seconds)'),
yaxis=dict(title=yaxis),
title=title))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
except Exception as e:
print(e)
return "The resource efficiency plot cannot be generated because of exception {}.".format(e) | en | 0.874965 | # E.g., psutil_process_time_user or psutil_process_memory_percent # skip tasks with no running start time. | 2.697743 | 3 |
ucsmsdk/mometa/trig/TrigLocalAbsWindow.py | anoop1984/python_sdk | 0 | 6624425 | <reponame>anoop1984/python_sdk
"""This module contains the general information for TrigLocalAbsWindow ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class TrigLocalAbsWindowConsts():
CONCUR_CAP_UNLIMITED = "unlimited"
PROC_BREAK_NONE = "none"
PROC_CAP_UNLIMITED = "unlimited"
TIME_CAP_NONE = "none"
TIME_CAPPED_FALSE = "false"
TIME_CAPPED_NO = "no"
TIME_CAPPED_TRUE = "true"
TIME_CAPPED_YES = "yes"
class TrigLocalAbsWindow(ManagedObject):
"""This is TrigLocalAbsWindow class."""
consts = TrigLocalAbsWindowConsts()
naming_props = set([])
mo_meta = MoMeta("TrigLocalAbsWindow", "trigLocalAbsWindow", "local-abs-default", VersionMeta.Version211a, "InputOutput", 0x3ff, [], ["admin"], [u'trigLocalSched', u'trigSched'], [], ["Add", "Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"concur_cap": MoPropertyMeta("concur_cap", "concurCap", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["unlimited"], ["0-65535"]),
"date": MoPropertyMeta("date", "date", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"job_count": MoPropertyMeta("job_count", "jobCount", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"proc_break": MoPropertyMeta("proc_break", "procBreak", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""[0-9]+:([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\.[0-9]{1,3})?""", ["none"], ["0-4294967295"]),
"proc_cap": MoPropertyMeta("proc_cap", "procCap", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["unlimited"], ["0-65535"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"time_cap": MoPropertyMeta("time_cap", "timeCap", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""[0-9]+:([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\.[0-9]{1,3})?""", ["none"], ["0-4294967295"]),
"time_capped": MoPropertyMeta("time_capped", "timeCapped", "string", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
}
prop_map = {
"childAction": "child_action",
"concurCap": "concur_cap",
"date": "date",
"dn": "dn",
"jobCount": "job_count",
"name": "name",
"procBreak": "proc_break",
"procCap": "proc_cap",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"timeCap": "time_cap",
"timeCapped": "time_capped",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.concur_cap = None
self.date = None
self.job_count = None
self.name = None
self.proc_break = None
self.proc_cap = None
self.sacl = None
self.status = None
self.time_cap = None
self.time_capped = None
ManagedObject.__init__(self, "TrigLocalAbsWindow", parent_mo_or_dn, **kwargs)
| """This module contains the general information for TrigLocalAbsWindow ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class TrigLocalAbsWindowConsts():
CONCUR_CAP_UNLIMITED = "unlimited"
PROC_BREAK_NONE = "none"
PROC_CAP_UNLIMITED = "unlimited"
TIME_CAP_NONE = "none"
TIME_CAPPED_FALSE = "false"
TIME_CAPPED_NO = "no"
TIME_CAPPED_TRUE = "true"
TIME_CAPPED_YES = "yes"
class TrigLocalAbsWindow(ManagedObject):
"""This is TrigLocalAbsWindow class."""
consts = TrigLocalAbsWindowConsts()
naming_props = set([])
mo_meta = MoMeta("TrigLocalAbsWindow", "trigLocalAbsWindow", "local-abs-default", VersionMeta.Version211a, "InputOutput", 0x3ff, [], ["admin"], [u'trigLocalSched', u'trigSched'], [], ["Add", "Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"concur_cap": MoPropertyMeta("concur_cap", "concurCap", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["unlimited"], ["0-65535"]),
"date": MoPropertyMeta("date", "date", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"job_count": MoPropertyMeta("job_count", "jobCount", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"proc_break": MoPropertyMeta("proc_break", "procBreak", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""[0-9]+:([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\.[0-9]{1,3})?""", ["none"], ["0-4294967295"]),
"proc_cap": MoPropertyMeta("proc_cap", "procCap", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["unlimited"], ["0-65535"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"time_cap": MoPropertyMeta("time_cap", "timeCap", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""[0-9]+:([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\.[0-9]{1,3})?""", ["none"], ["0-4294967295"]),
"time_capped": MoPropertyMeta("time_capped", "timeCapped", "string", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
}
prop_map = {
"childAction": "child_action",
"concurCap": "concur_cap",
"date": "date",
"dn": "dn",
"jobCount": "job_count",
"name": "name",
"procBreak": "proc_break",
"procCap": "proc_cap",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"timeCap": "time_cap",
"timeCapped": "time_capped",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.concur_cap = None
self.date = None
self.job_count = None
self.name = None
self.proc_break = None
self.proc_cap = None
self.sacl = None
self.status = None
self.time_cap = None
self.time_capped = None
ManagedObject.__init__(self, "TrigLocalAbsWindow", parent_mo_or_dn, **kwargs) | en | 0.350234 | This module contains the general information for TrigLocalAbsWindow ManagedObject. This is TrigLocalAbsWindow class. ((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1} ([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1} [\-\.:_a-zA-Z0-9]{0,16} [0-9]+:([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\.[0-9]{1,3})? ((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1} ((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1} [0-9]+:([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\.[0-9]{1,3})? | 1.909034 | 2 |
python/ray/tests/test_memstat.py | dsctt/ray | 1 | 6624426 | import numpy as np
import os
import time
import pytest
import ray
from ray.cluster_utils import Cluster, cluster_not_supported
from ray.internal.internal_api import memory_summary
# RayConfig to enable recording call sites during ObjectRej creations.
ray_config = {"record_ref_creation_sites": True}
# Unique strings.
DRIVER_PID = "Driver"
WORKER_PID = "Worker"
UNKNOWN_SIZE = " ? "
# Reference status.
PINNED_IN_MEMORY = "PINNED_IN_MEMORY"
LOCAL_REF = "LOCAL_REFERENCE"
USED_BY_PENDING_TASK = "USED_BY_PENDING_TASK"
CAPTURED_IN_OBJECT = "CAPTURED_IN_OBJECT"
ACTOR_HANDLE = "ACTOR_HANDLE"
# Call sites.
PUT_OBJ = "(put object)"
TASK_CALL_OBJ = "(task call)"
ACTOR_TASK_CALL_OBJ = "(actor call)"
DESER_TASK_ARG = "(deserialize task arg)"
# Only 22 characters can be matched because longer strings are wrapped around.
DESER_ACTOR_TASK_ARG = "(deserialize actor tas"
# Group by and sort by parameters.
NODE_ADDRESS = "node address"
STACK_TRACE = "stack trace"
PID = "pid"
OBJECT_SIZE = "object size"
REFERENCE_TYPE = "reference type"
def data_lines(memory_str):
for line in memory_str.split("\n"):
if (
PINNED_IN_MEMORY in line
or LOCAL_REF in line
or USED_BY_PENDING_TASK in line
or CAPTURED_IN_OBJECT in line
or ACTOR_HANDLE in line
):
yield line
else:
continue
def num_objects(memory_str):
n = 0
for line in data_lines(memory_str):
n += 1
return n
def count(memory_str, substr):
substr = substr[:42]
n = 0
for line in memory_str.split("\n"):
if substr in line:
n += 1
return n
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_driver_put_ref(ray_start_regular):
address = ray_start_regular["address"]
info = memory_summary(address)
assert num_objects(info) == 0, info
x_id = ray.put("HI")
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, DRIVER_PID) == 1, info
assert count(info, WORKER_PID) == 0, info
del x_id
info = memory_summary(address)
assert num_objects(info) == 0, info
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_worker_task_refs(ray_start_regular):
address = ray_start_regular["address"]
@ray.remote
def f(y):
from ray.internal.internal_api import memory_summary
x_id = ray.put("HI")
info = memory_summary(address)
del x_id
return info
x_id = f.remote(np.zeros(100000))
info = ray.get(x_id)
print(info)
assert num_objects(info) == 4, info
# Task argument plus task return ids.
assert count(info, TASK_CALL_OBJ) == 2, info
assert count(info, DRIVER_PID) == 2, info
assert count(info, WORKER_PID) == 2, info
assert count(info, LOCAL_REF) == 2, info
assert count(info, PINNED_IN_MEMORY) == 1, info
assert count(info, PUT_OBJ) == 1, info
assert count(info, DESER_TASK_ARG) == 1, info
assert count(info, UNKNOWN_SIZE) == 1, info
print(ray_start_regular)
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, DRIVER_PID) == 1, info
assert count(info, TASK_CALL_OBJ) == 1, info
assert count(info, UNKNOWN_SIZE) == 0, info
assert count(info, x_id.hex()) == 1, info
del x_id
info = memory_summary(address)
assert num_objects(info) == 0, info
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_actor_task_refs(ray_start_regular):
address = ray_start_regular["address"]
@ray.remote
class Actor:
def __init__(self):
self.refs = []
def f(self, x):
from ray.internal.internal_api import memory_summary
self.refs.append(x)
return memory_summary(address)
def make_actor():
return Actor.remote()
actor = make_actor()
x_id = actor.f.remote(np.zeros(100000))
info = ray.get(x_id)
print(info)
# Note, the actor will always hold a handle to the actor itself.
assert num_objects(info) == 5, info
# Actor handle, task argument id, task return id.
assert count(info, ACTOR_TASK_CALL_OBJ) == 3, info
assert count(info, DRIVER_PID) == 3, info
assert count(info, WORKER_PID) == 2, info
assert count(info, LOCAL_REF) == 1, info
assert count(info, PINNED_IN_MEMORY) == 1, info
assert count(info, USED_BY_PENDING_TASK) == 1, info
assert count(info, ACTOR_HANDLE) == 2, info
assert count(info, DESER_ACTOR_TASK_ARG) == 1, info
del x_id
# These should accumulate in the actor.
for _ in range(5):
ray.get(actor.f.remote([ray.put(np.zeros(100000))]))
info = memory_summary(address)
print(info)
assert count(info, DESER_ACTOR_TASK_ARG) == 5, info
assert count(info, ACTOR_TASK_CALL_OBJ) == 1, info
# Cleanup.
del actor
time.sleep(1)
info = memory_summary(address)
assert num_objects(info) == 0, info
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_nested_object_refs(ray_start_regular):
address = ray_start_regular["address"]
x_id = ray.put(np.zeros(100000))
y_id = ray.put([x_id])
z_id = ray.put([y_id])
del x_id, y_id
info = memory_summary(address)
print(info)
assert num_objects(info) == 3, info
assert count(info, LOCAL_REF) == 1, info
assert count(info, CAPTURED_IN_OBJECT) == 2, info
del z_id
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_pinned_object_call_site(ray_start_regular):
address = ray_start_regular["address"]
# Local ref only.
x_id = ray.put(np.zeros(100000))
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, LOCAL_REF) == 1, info
assert count(info, PINNED_IN_MEMORY) == 0, info
# Local ref + pinned buffer.
buf = ray.get(x_id)
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, LOCAL_REF) == 0, info
assert count(info, PINNED_IN_MEMORY) == 1, info
# Just pinned buffer.
del x_id
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, LOCAL_REF) == 0, info
assert count(info, PINNED_IN_MEMORY) == 1, info
# Nothing.
del buf
info = memory_summary(address)
print(info)
assert num_objects(info) == 0, info
@pytest.mark.xfail(cluster_not_supported, reason="cluster not supported")
def test_multi_node_stats(shutdown_only):
# NOTE(mwtian): using env var only enables the feature on workers, while
# using head_node_args={"_system_config": ray_config} only enables the
# feature on the driver.
os.environ["RAY_record_ref_creation_sites"] = "1"
cluster = Cluster()
for _ in range(2):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
self.ref = ray.put(np.zeros(100000))
def ping(self):
pass
# Each actor will be on a different node.
a = Actor.remote()
b = Actor.remote()
ray.get(a.ping.remote())
ray.get(b.ping.remote())
# Verify we have collected stats across the nodes.
info = memory_summary(cluster.address)
print(info)
assert count(info, PUT_OBJ) == 2, info
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_group_by_sort_by(ray_start_regular):
address = ray_start_regular["address"]
@ray.remote
def f(y):
x_id = ray.put("HI")
info_a = memory_summary(
address, group_by="STACK_TRACE", sort_by="REFERENCE_TYPE"
)
info_b = memory_summary(address, group_by="NODE_ADDRESS", sort_by="OBJECT_SIZE")
info_c = memory_summary(address, group_by="NODE_ADDRESS", sort_by="PID")
del x_id
return info_a, info_b, info_c
x_id = f.remote(np.zeros(100000))
info_a, info_b, info_c = ray.get(x_id)
print(info_c)
assert count(info_a, STACK_TRACE) == 7, info_a
assert count(info_a, REFERENCE_TYPE) == 1, info_a
assert count(info_b, NODE_ADDRESS) == 3, info_b
assert count(info_b, OBJECT_SIZE) == 1, info_b
assert count(info_c, NODE_ADDRESS) == 3, info_c
assert count(info_c, PID) == 1, info_c
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_memory_used_output(ray_start_regular):
address = ray_start_regular["address"]
import numpy as np
_ = ray.put(np.ones(8 * 1024 * 1024, dtype=np.int8))
info = memory_summary(address)
print(info)
assert count(info, "Plasma memory usage 8 MiB") == 1, info
assert count(info, "8388861.0 B") == 2, info
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| import numpy as np
import os
import time
import pytest
import ray
from ray.cluster_utils import Cluster, cluster_not_supported
from ray.internal.internal_api import memory_summary
# RayConfig to enable recording call sites during ObjectRej creations.
ray_config = {"record_ref_creation_sites": True}
# Unique strings.
DRIVER_PID = "Driver"
WORKER_PID = "Worker"
UNKNOWN_SIZE = " ? "
# Reference status.
PINNED_IN_MEMORY = "PINNED_IN_MEMORY"
LOCAL_REF = "LOCAL_REFERENCE"
USED_BY_PENDING_TASK = "USED_BY_PENDING_TASK"
CAPTURED_IN_OBJECT = "CAPTURED_IN_OBJECT"
ACTOR_HANDLE = "ACTOR_HANDLE"
# Call sites.
PUT_OBJ = "(put object)"
TASK_CALL_OBJ = "(task call)"
ACTOR_TASK_CALL_OBJ = "(actor call)"
DESER_TASK_ARG = "(deserialize task arg)"
# Only 22 characters can be matched because longer strings are wrapped around.
DESER_ACTOR_TASK_ARG = "(deserialize actor tas"
# Group by and sort by parameters.
NODE_ADDRESS = "node address"
STACK_TRACE = "stack trace"
PID = "pid"
OBJECT_SIZE = "object size"
REFERENCE_TYPE = "reference type"
def data_lines(memory_str):
for line in memory_str.split("\n"):
if (
PINNED_IN_MEMORY in line
or LOCAL_REF in line
or USED_BY_PENDING_TASK in line
or CAPTURED_IN_OBJECT in line
or ACTOR_HANDLE in line
):
yield line
else:
continue
def num_objects(memory_str):
n = 0
for line in data_lines(memory_str):
n += 1
return n
def count(memory_str, substr):
substr = substr[:42]
n = 0
for line in memory_str.split("\n"):
if substr in line:
n += 1
return n
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_driver_put_ref(ray_start_regular):
address = ray_start_regular["address"]
info = memory_summary(address)
assert num_objects(info) == 0, info
x_id = ray.put("HI")
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, DRIVER_PID) == 1, info
assert count(info, WORKER_PID) == 0, info
del x_id
info = memory_summary(address)
assert num_objects(info) == 0, info
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_worker_task_refs(ray_start_regular):
address = ray_start_regular["address"]
@ray.remote
def f(y):
from ray.internal.internal_api import memory_summary
x_id = ray.put("HI")
info = memory_summary(address)
del x_id
return info
x_id = f.remote(np.zeros(100000))
info = ray.get(x_id)
print(info)
assert num_objects(info) == 4, info
# Task argument plus task return ids.
assert count(info, TASK_CALL_OBJ) == 2, info
assert count(info, DRIVER_PID) == 2, info
assert count(info, WORKER_PID) == 2, info
assert count(info, LOCAL_REF) == 2, info
assert count(info, PINNED_IN_MEMORY) == 1, info
assert count(info, PUT_OBJ) == 1, info
assert count(info, DESER_TASK_ARG) == 1, info
assert count(info, UNKNOWN_SIZE) == 1, info
print(ray_start_regular)
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, DRIVER_PID) == 1, info
assert count(info, TASK_CALL_OBJ) == 1, info
assert count(info, UNKNOWN_SIZE) == 0, info
assert count(info, x_id.hex()) == 1, info
del x_id
info = memory_summary(address)
assert num_objects(info) == 0, info
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_actor_task_refs(ray_start_regular):
address = ray_start_regular["address"]
@ray.remote
class Actor:
def __init__(self):
self.refs = []
def f(self, x):
from ray.internal.internal_api import memory_summary
self.refs.append(x)
return memory_summary(address)
def make_actor():
return Actor.remote()
actor = make_actor()
x_id = actor.f.remote(np.zeros(100000))
info = ray.get(x_id)
print(info)
# Note, the actor will always hold a handle to the actor itself.
assert num_objects(info) == 5, info
# Actor handle, task argument id, task return id.
assert count(info, ACTOR_TASK_CALL_OBJ) == 3, info
assert count(info, DRIVER_PID) == 3, info
assert count(info, WORKER_PID) == 2, info
assert count(info, LOCAL_REF) == 1, info
assert count(info, PINNED_IN_MEMORY) == 1, info
assert count(info, USED_BY_PENDING_TASK) == 1, info
assert count(info, ACTOR_HANDLE) == 2, info
assert count(info, DESER_ACTOR_TASK_ARG) == 1, info
del x_id
# These should accumulate in the actor.
for _ in range(5):
ray.get(actor.f.remote([ray.put(np.zeros(100000))]))
info = memory_summary(address)
print(info)
assert count(info, DESER_ACTOR_TASK_ARG) == 5, info
assert count(info, ACTOR_TASK_CALL_OBJ) == 1, info
# Cleanup.
del actor
time.sleep(1)
info = memory_summary(address)
assert num_objects(info) == 0, info
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_nested_object_refs(ray_start_regular):
address = ray_start_regular["address"]
x_id = ray.put(np.zeros(100000))
y_id = ray.put([x_id])
z_id = ray.put([y_id])
del x_id, y_id
info = memory_summary(address)
print(info)
assert num_objects(info) == 3, info
assert count(info, LOCAL_REF) == 1, info
assert count(info, CAPTURED_IN_OBJECT) == 2, info
del z_id
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_pinned_object_call_site(ray_start_regular):
address = ray_start_regular["address"]
# Local ref only.
x_id = ray.put(np.zeros(100000))
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, LOCAL_REF) == 1, info
assert count(info, PINNED_IN_MEMORY) == 0, info
# Local ref + pinned buffer.
buf = ray.get(x_id)
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, LOCAL_REF) == 0, info
assert count(info, PINNED_IN_MEMORY) == 1, info
# Just pinned buffer.
del x_id
info = memory_summary(address)
print(info)
assert num_objects(info) == 1, info
assert count(info, LOCAL_REF) == 0, info
assert count(info, PINNED_IN_MEMORY) == 1, info
# Nothing.
del buf
info = memory_summary(address)
print(info)
assert num_objects(info) == 0, info
@pytest.mark.xfail(cluster_not_supported, reason="cluster not supported")
def test_multi_node_stats(shutdown_only):
# NOTE(mwtian): using env var only enables the feature on workers, while
# using head_node_args={"_system_config": ray_config} only enables the
# feature on the driver.
os.environ["RAY_record_ref_creation_sites"] = "1"
cluster = Cluster()
for _ in range(2):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
self.ref = ray.put(np.zeros(100000))
def ping(self):
pass
# Each actor will be on a different node.
a = Actor.remote()
b = Actor.remote()
ray.get(a.ping.remote())
ray.get(b.ping.remote())
# Verify we have collected stats across the nodes.
info = memory_summary(cluster.address)
print(info)
assert count(info, PUT_OBJ) == 2, info
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_group_by_sort_by(ray_start_regular):
address = ray_start_regular["address"]
@ray.remote
def f(y):
x_id = ray.put("HI")
info_a = memory_summary(
address, group_by="STACK_TRACE", sort_by="REFERENCE_TYPE"
)
info_b = memory_summary(address, group_by="NODE_ADDRESS", sort_by="OBJECT_SIZE")
info_c = memory_summary(address, group_by="NODE_ADDRESS", sort_by="PID")
del x_id
return info_a, info_b, info_c
x_id = f.remote(np.zeros(100000))
info_a, info_b, info_c = ray.get(x_id)
print(info_c)
assert count(info_a, STACK_TRACE) == 7, info_a
assert count(info_a, REFERENCE_TYPE) == 1, info_a
assert count(info_b, NODE_ADDRESS) == 3, info_b
assert count(info_b, OBJECT_SIZE) == 1, info_b
assert count(info_c, NODE_ADDRESS) == 3, info_c
assert count(info_c, PID) == 1, info_c
@pytest.mark.parametrize(
"ray_start_regular", [{"_system_config": ray_config}], indirect=True
)
def test_memory_used_output(ray_start_regular):
address = ray_start_regular["address"]
import numpy as np
_ = ray.put(np.ones(8 * 1024 * 1024, dtype=np.int8))
info = memory_summary(address)
print(info)
assert count(info, "Plasma memory usage 8 MiB") == 1, info
assert count(info, "8388861.0 B") == 2, info
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| en | 0.778435 | # RayConfig to enable recording call sites during ObjectRej creations. # Unique strings. # Reference status. # Call sites. # Only 22 characters can be matched because longer strings are wrapped around. # Group by and sort by parameters. # Task argument plus task return ids. # Note, the actor will always hold a handle to the actor itself. # Actor handle, task argument id, task return id. # These should accumulate in the actor. # Cleanup. # Local ref only. # Local ref + pinned buffer. # Just pinned buffer. # Nothing. # NOTE(mwtian): using env var only enables the feature on workers, while # using head_node_args={"_system_config": ray_config} only enables the # feature on the driver. # Each actor will be on a different node. # Verify we have collected stats across the nodes. | 2.225714 | 2 |
core/completion.py | gnprice/oil | 0 | 6624427 | #!/usr/bin/env python
# Copyright 2016 <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
completion.py - Tab completion.
TODO: Is this specific to osh/oil, or common?
Architecture:
Completion should run in threads? For two reasons:
- Completion can be slow -- e.g. completion for distributed resources
- Because readline has a weird interface, and then you can implement
"iterators" in C++ or oil. They just push onto a PIPE. Use a netstring
protocol and self-pipe?
- completion can be in another process anyway?
Does that mean the user code gets run in an entirely separate interpreter? The
whole lexer/parser/cmd_exec combo has to be thread-safe. Does it get a copy of
the same startup state?
bash note: most of this stuff is in pcomplete.c and bashline.c (4K lines!).
Uses ITEMLIST with a bunch of flags.
"""
from __future__ import print_function
import atexit
import posix
import sys
import time
#import traceback
from osh.meta import ast, runtime
from core import alloc
from core import os_path
from core import state
from core import util
import libc
command_e = ast.command_e
value_e = runtime.value_e
completion_state_e = runtime.completion_state_e
log = util.log
class _RetryCompletion(Exception):
"""For the 'exit 124' protocol."""
pass
class NullCompleter(object):
def Matches(self, comp):
return []
_NULL_COMPLETER = NullCompleter()
class CompletionLookup(object):
"""Stores completion hooks registered by the user."""
def __init__(self):
# command name -> ChainedCompleter
# There are pseudo commands __first and __fallback for -E and -D.
self.lookup = {
'__fallback': _NULL_COMPLETER,
'__first': _NULL_COMPLETER,
}
# So you can register *.sh, unlike bash. List of (glob, [actions]),
# searched linearly.
self.patterns = []
def PrintSpecs(self):
for name in sorted(self.lookup):
print('%-15r %s' % (name, self.lookup[name]))
print('---')
for pat, chain in self.patterns:
print('%s = %s' % (pat, chain))
def RegisterName(self, name, chain):
"""Register a completion action with a name.
Used by the 'complete' builtin.
"""
self.lookup[name] = chain
def RegisterGlob(self, glob_pat, chain):
self.patterns.append((glob_pat, chain))
def GetFirstCompleter(self):
return self.lookup['__first']
def GetCompleterForName(self, argv0):
"""
Args:
argv0: A finished argv0 to lookup
"""
if not argv0:
return self.GetFirstCompleter()
chain = self.lookup.get(argv0) # NOTE: Could be ''
if chain:
return chain
key = os_path.basename(argv0)
actions = self.lookup.get(key)
if chain:
return chain
for glob_pat, chain in self.patterns:
#log('Matching %r %r', key, glob_pat)
if libc.fnmatch(glob_pat, key):
return chain
# Nothing matched
return self.lookup['__fallback']
class CompletionApi(object):
def __init__(self, line='', begin=0, end=0):
"""
Args:
index: if -1, then we're running through compgen
"""
self.line = line
self.begin = begin
self.end = end
# NOTE: COMP_WORDBREAKS is initliazed in Mem().
def Update(self, words=None, index=0, to_complete=''):
"""Added after we've done parsing."""
self.words = words or [] # COMP_WORDS
self.index = index # COMP_CWORD
self.to_complete = to_complete #
def GetApiInput(self):
"""Returns argv and comp_words."""
command = self.words[0]
if self.index == -1: # called directly by compgen, not by hitting TAB
prev = ''
comp_words = [] # not completing anything
else:
prev = '' if self.index == 0 else self.words[self.index - 1]
comp_words = self.words
return [command, self.to_complete, prev], comp_words
#
# Actions
#
class CompletionAction(object):
"""Returns a list of words.
Function
Literal words
"""
def __init__(self):
pass
def Matches(self, comp):
pass
class WordsAction(CompletionAction):
# NOTE: Have to split the words passed to -W. Using IFS or something else?
def __init__(self, words, delay=None):
self.words = words
self.delay = delay
def Matches(self, comp):
for w in self.words:
if w.startswith(comp.to_complete):
if self.delay:
time.sleep(self.delay)
yield w
class FileSystemAction(CompletionAction):
"""Complete paths from the file system.
Directories will have a / suffix.
TODO: We need a variant that tests for an executable bit.
"""
def __init__(self, dirs_only=False):
self.dirs_only = dirs_only
def Matches(self, comp):
to_complete = comp.to_complete
i = to_complete.rfind('/')
if i == -1: # it looks like 'foo'
to_list = '.'
base = ''
elif i == 0: # it's an absolute path to_complete like / or /b
to_list ='/'
base = '/'
else:
to_list = to_complete[:i]
base = to_list
#log('to_list %r', to_list)
try:
names = posix.listdir(to_list)
except OSError as e:
return # nothing
for name in names:
path = os_path.join(base, name)
if path.startswith(to_complete):
if self.dirs_only:
if os_path.isdir(path):
yield path
else:
if os_path.isdir(path):
yield path + '/'
else:
yield path
class ShellFuncAction(CompletionAction):
"""Call a user-defined function using bash's completion protocol."""
def __init__(self, ex, func):
self.ex = ex
self.func = func
def __repr__(self):
# TODO: Add file and line number here!
return '<ShellFuncAction %r>' % (self.func.name,)
def log(self, *args):
self.ex.debug_f.log(*args)
def Matches(self, comp):
# TODO: Delete COMPREPLY here? It doesn't seem to be defined in bash by
# default.
argv, comp_words = comp.GetApiInput()
state.SetGlobalArray(self.ex.mem, 'COMP_WORDS', comp_words)
state.SetGlobalString(self.ex.mem, 'COMP_CWORD', str(comp.index))
state.SetGlobalString(self.ex.mem, 'COMP_LINE', comp.line)
state.SetGlobalString(self.ex.mem, 'COMP_POINT', str(comp.end))
self.log('Running completion function %r with arguments %s',
self.func.name, argv)
status = self.ex.RunFuncForCompletion(self.func, argv)
if status == 124:
self.log('Got status 124 from %r', self.func.name)
raise _RetryCompletion()
# Lame: COMP_REPLY would follow the naming convention!
val = state.GetGlobal(self.ex.mem, 'COMPREPLY')
if val.tag == value_e.Undef:
util.error('Ran function %s but COMPREPLY was not defined', self.func.name)
return []
if val.tag != value_e.StrArray:
log('ERROR: COMPREPLY should be an array, got %s', val)
return []
self.log('COMPREPLY %s', val)
# Return this all at once so we don't have a generator. COMPREPLY happens
# all at once anyway.
return val.strs
class VariablesAction(object):
"""compgen -A variable."""
def __init__(self, mem):
self.mem = mem
def Matches(self, comp):
for var_name in self.mem.VarNames():
yield var_name
class VariablesActionInternal(object):
"""When we parse $VAR ourselves.
TODO: Also need to complete ${P (BracedVarSub)
"""
def __init__(self, mem):
self.mem = mem
def Matches(self, comp):
to_complete = comp.to_complete
assert to_complete.startswith('$')
to_complete = to_complete[1:]
for name in self.mem.VarNames():
if name.startswith(to_complete):
yield '$' + name + ' ' # full word
class ExternalCommandAction(object):
"""Complete commands in $PATH.
NOTE: -A command in bash is FIVE things: aliases, builtins, functions,
keywords, etc.
"""
def __init__(self, mem):
"""
Args:
mem: for looking up Path
"""
self.mem = mem
# Should we list everything executable in $PATH here? And then whenever
# $PATH is changed, regenerated it?
# Or we can cache directory listings? What if the contents of the dir
# changed?
# Can we look at the dir timestamp?
#
# (dir, timestamp) -> list of entries perhaps? And then every time you hit
# tab, do you have to check the timestamp? It should be cached by the
# kernel, so yes.
self.ext = []
# (dir, timestamp) -> list
# NOTE: This cache assumes that listing a directory is slower than statting
# it to get the mtime. That may not be true on all systems? Either way
# you are reading blocks of metadata. But I guess /bin on many systems is
# huge, and will require lots of sys calls.
self.cache = {}
def Matches(self, comp):
"""
TODO: Cache is never cleared.
- When we get a newer timestamp, we should clear the old one.
- When PATH is changed, we can remove old entries.
"""
val = self.mem.GetVar('PATH')
if val.tag != value_e.Str:
# No matches if not a string
return
path_dirs = val.s.split(':')
#print(path_dirs)
names = []
for d in path_dirs:
try:
st = posix.stat(d)
except OSError as e:
# There could be a directory that doesn't exist in the $PATH.
continue
key = (d, st.st_mtime)
listing = self.cache.get(key)
if listing is None:
listing = posix.listdir(d)
self.cache[key] = listing
names.extend(listing)
# TODO: Shouldn't do the prefix / space thing ourselves. readline does
# that at the END of the line.
for word in listing:
if word.startswith(comp.to_complete):
yield word + ' '
class GlobPredicate(object):
"""Expand into files that match a pattern. !*.py filters them.
Weird syntax:
-X *.py or -X !*.py
Also & is a placeholder for the string being completed?. Yeah I probably
want to get rid of this feature.
"""
def __init__(self, glob_pat):
self.glob_pat = glob_pat
def __call__(self, match):
return libc.fnmatch(self.glob_pat, match)
class ChainedCompleter(object):
"""A completer that tries a bunch of them in order.
NOTE: plus_dirs happens AFTER filtering with predicates? We add BACK the
dirs, e.g. -A file -X '!*.sh' -o plusdirs.
NOTE: plusdirs can just create another chained completer. I think you should
probably get rid of the predicate. That should just be a Filter(). prefix
and suffix can be adhoc for now I guess, since they are trivial.
"""
def __init__(self, actions, predicate=None, prefix='', suffix=''):
self.actions = actions
# TODO: predicate is for GlobPredicate, for -X
self.predicate = predicate or (lambda word: True)
self.prefix = prefix
self.suffix = suffix
def Matches(self, comp, filter_func_matches=True):
# NOTE: This has to be evaluated eagerly so we get the _RetryCompletion
# exception.
for a in self.actions:
for match in a.Matches(comp):
# Special case hack to match bash for compgen -F. It doesn't filter by
# to_complete!
show = (
match.startswith(comp.to_complete) and self.predicate(match) or
(isinstance(a, ShellFuncAction) and not filter_func_matches)
)
# There are two kinds of filters: changing the string, and filtering
# the set of strings. So maybe have modifiers AND filters? A triple.
if show:
yield self.prefix + match + self.suffix
# Prefix is the current one?
# What if the cursor is not at the end of line? See readline interface.
# That's OK -- we just truncate the line at the cursor?
# Hm actually zsh does something smarter, and which is probably preferable.
# It completes the word that
def __str__(self):
return '<ChainedCompleter %s %s %r %r>' % (
self.actions, self.predicate, self.prefix, self.suffix)
class DummyParser(object):
def GetWords(self, buf):
words = buf.split()
# 'grep ' -> ['grep', ''], so we're completing the second word
if buf.endswith(' '):
words.append('')
return words
def _FindLastSimpleCommand(node):
"""
The last thing has to be a simple command. Cases:
echo a; echo b
ls | wc -l
test -f foo && hello
"""
if node.tag == command_e.SimpleCommand:
return node
if node.tag == command_e.Sentence:
return node.child
if node.tag == command_e.TimeBlock:
child = node.pipeline
if child.tag == command_e.SimpleCommand:
return child
if child.tag == command_e.Pipeline:
return child.children[0]
if node.tag == command_e.Assignment:
return None
if node.tag == command_e.ControlFlow:
return None
assert hasattr(node, 'children'), node
n = len(node.children)
if n == 0:
return None
# Go as deep as we need.
return _FindLastSimpleCommand(node.children[-1])
def _GetCompletionType(w_parser, c_parser, ev, debug_f):
"""
Parser returns completion state.
Then we translate that into completion_state_e.
Returns:
comp_type
prefix: the prefix to complete
comp_words: list of words. First word is used for dispatching.
TODO: what about hash table name?
"""
# TODO: Fill these in
comp_type = completion_state_e.FIRST
prefix = ''
words = []
try:
node = c_parser.ParseLogicalLine()
except util.ParseError as e:
return comp_type, prefix, words # EARLY RETURN
# Inspect state after parsing. Hm I'm getting the newline. Can I view the
# one before that?
cur_token = w_parser.cur_token
prev_token = w_parser.PrevToken()
cur_word = w_parser.cursor
comp_state = c_parser.GetCompletionState()
com_node = None
if node:
# These 4 should all parse
if node.tag == command_e.SimpleCommand:
# NOTE: prev_token can be ;, then complete a new one
#print('WORDS', node.words)
# TODO:
# - EvalVarSub depends on memory
# - EvalTildeSub needs to be somewhere else
# - EvalCommandSub needs to be
#
# maybe write a version of Executor._EvalWordSequence that doesn't do
# CommandSub. Or honestly you can just reuse it for now. Can you pass
# the same cmd_exec in? What about side effects? I guess it can't
# really have any. It can only have them on the file system. Hm.
# Defining funcitons? Yeah if you complete partial functions that could
# be bad. That is, you could change the name of the function.
argv = []
for w in node.words:
try:
# TODO: Should we call EvalWordSequence? But turn globbing off? It
# can do splitting and such.
val = ev.EvalWordToString(w)
except util.FatalRuntimeError:
# Why would it fail?
continue
if val.tag == value_e.Str:
argv.append(val.s)
else:
pass
# Oh I have to handle $@ on the command line?
#print(argv)
com_node = node
elif node.tag == command_e.CommandList: # echo a; echo b
com_node = _FindLastSimpleCommand(node)
elif node.tag == command_e.AndOr: # echo a && echo b
com_node = _FindLastSimpleCommand(node)
elif node.tag == command_e.Pipeline: # echo a | wc -l
com_node = _FindLastSimpleCommand(node)
else:
# Return NONE? Not handling it for now
pass
else: # No node.
pass
# TODO: Need to show buf... Need a multiline display for debugging?
if 0:
debug_f.log('prev_token %s cur_token %s cur_word %s',
prev_token, cur_token, cur_word)
debug_f.log('comp_state %s error %s', comp_state, c_parser.Error())
# This one can be multiple lines
debug_f.log('node: %s %s', repr(node) if node else '<Parse Error>',
node.tag if node else '')
# This one can be multiple lines
debug_f.log('com_node: %s', repr(com_node) if com_node else '<None>')
# IMPORTANT: if the last token is Id.Ignored_Space, then we want to add a
# dummy word! empty word
# initial simple algorithm
# If we got a node:
# 1. Look at c_parser.LastCompletionState()
# 1. don't complete unless it's SIMPLE_COMMAND?
# 2. look at node.words -- first word or not?
# 3. EvalStatic() of the first word
# If we got None:
# 1. Look at c_parser.LastCompletionState()
# 2. If it is $ or ${, complete var names
#
# Is there any case where we shoudl fall back on buf.split()?
# Now parse it. And then look at the AST, but don't eval? Or actually we
# CAN eval, but we probably don't want to.
#
# completion state also has to know about ${pre<TAB> and ${foo[pre<TAB>
# Those are invalid parses. But the LAST TOKEN is the one we want to
# complete? Will it be a proper group of LIT tokens? I don't think you
# complete anything else besides that?
#
# $<TAB> will be Id.Lit_Other -- but you might want to special case
# $na<TAB> will be VS_NAME
# NOTE: The LineLexer adds \n to the buf? Should we disable it and add \0?
# I guess the shortest way to do it is to just Eval(), and even run command
# sub. Or maybe SafeEval() for command sub returns __DUMMY__ or None or some
# other crap.
# I guess in oil you could have some arbitrarily long function in $split(bar,
# baz). That is what you would want to run the completion in a subprocess
# with a timeout.
return comp_type, prefix, words
def _GetCompletionType1(parser, buf):
words = parser.GetWords(buf) # just does a dummy split for now
n = len(words)
# Complete variables
# TODO: Parser should tell if we saw $, ${, but are NOT in a single quoted
# state. And also we didn't see $${, which would be a special var. Oil
# rules are almost the same.
if n > 0 and words[-1].startswith('$'):
comp_type = completion_state_e.VAR_NAME
to_complete = words[-1]
# Otherwise complete words
elif n == 0:
comp_type = completion_state_e.FIRST
to_complete = ''
elif n == 1:
comp_type = completion_state_e.FIRST
to_complete = words[-1]
else:
comp_type = completion_state_e.REST
to_complete = words[-1]
comp_index = len(words) - 1
return comp_type, to_complete, words
class RootCompleter(object):
"""
Provide completion of a buffer according to the configured rules.
"""
def __init__(self, ev, comp_lookup, var_comp, parse_ctx, progress_f,
debug_f):
self.ev = ev
self.comp_lookup = comp_lookup
# This can happen in any position, with any command
self.var_comp = var_comp
self.parse_ctx = parse_ctx
self.progress_f = progress_f
self.debug_f = debug_f
# This simply splits words!
self.parser = DummyParser() # TODO: remove
def Matches(self, comp):
arena = alloc.SideArena('<completion>')
# Two strategies:
# 1. COMP_WORDBREAKS like bash. set_completer_delims()
# 2. Use the actual OSH parser. Parse these cases:
# - echo
# - $VA
# - ${VA
# - $(echo h)
# - <(echo h)
# - >(echo h)
# - ``
# - $(( VA # This should be a variable name
# - while false; do <TAB>
# - if <TAB>
# - while <TAB> -- bash gets this wrong!
# - command <TAB> -- bash-completion fills this in
# - alias completion?
# - alias ll='ls -l'
# - also var expansion?
# foo=ls
# $foo <TAB> (even ZSH doesn't seem to handle this)
#
# the empty completer is consistently wrong. Only works in the first
# position.
#
# I think bash-completion is fighting with bash?
#
# completing aliases -- someone mentioned about zsh
if 0:
w_parser, c_parser = self.parse_ctx.MakeParserForCompletion(comp.line, arena)
comp_type, to_complete, comp_words = _GetCompletionType(
w_parser, c_parser, self.ev, self.debug_f)
else:
comp_type, to_complete, comp_words = _GetCompletionType1(self.parser, comp.line)
index = len(comp_words) - 1 # COMP_CWORD is -1 when it's empty
# After parsing
comp.Update(words=comp_words, index=index, to_complete=to_complete)
if comp_type == completion_state_e.VAR_NAME:
# Non-user chain
chain = self.var_comp
elif comp_type == completion_state_e.HASH_KEY:
# Non-user chain
chain = 'TODO'
elif comp_type == completion_state_e.REDIR_FILENAME:
# Non-user chain
chain = FileSystemAction()
elif comp_type == completion_state_e.FIRST:
chain = self.comp_lookup.GetFirstCompleter()
elif comp_type == completion_state_e.REST:
chain = self.comp_lookup.GetCompleterForName(comp_words[0])
elif comp_type == completion_state_e.NONE:
# Null chain? No completion? For example,
# ${a:- <TAB> -- we have no idea what to put here
chain = 'TODO'
else:
raise AssertionError(comp_type)
self.progress_f.Write('Completing %r ... (Ctrl-C to cancel)', comp.line)
start_time = time.time()
self.debug_f.log('Using %s', chain)
i = 0
for m in chain.Matches(comp):
# TODO: need to dedupe these
yield m
i += 1
elapsed = time.time() - start_time
plural = '' if i == 1 else 'es'
self.progress_f.Write(
'... %d match%s for %r in %.2f seconds (Ctrl-C to cancel)', i,
plural, comp.line, elapsed)
elapsed = time.time() - start_time
plural = '' if i == 1 else 'es'
self.progress_f.Write(
'Found %d match%s for %r in %.2f seconds', i,
plural, comp.line, elapsed)
done = True
# TODO: Have to de-dupe and sort these? Because 'echo' is a builtin as
# well as a command, and we don't want to show it twice. Although then
# it's not incremental. We can still show progress though. Need
# status_line.
class ReadlineCompleter(object):
def __init__(self, readline_mod, root_comp, debug_f):
self.readline_mod = readline_mod
self.root_comp = root_comp
self.debug_f = debug_f
self.comp_iter = None # current completion being processed
def _GetNextCompletion(self, state):
if state == 0:
# TODO: Tokenize it according to our language. If this is $PS2, we also
# need previous lines! Could make a VirtualLineReader instead of
# StringLineReader?
buf = self.readline_mod.get_line_buffer()
# Begin: the index of the first char of the 'word' in the line. Words
# are parsed according to readline delims (which we won't use).
begin = self.readline_mod.get_begidx()
# The current position of the cursor. The thing being completed.
end = self.readline_mod.get_endidx()
comp = CompletionApi(line=buf, begin=begin, end=end)
self.debug_f.log(
'line: %r / begin - end: %d - %d, part: %r', buf, begin, end,
buf[begin:end])
self.comp_iter = self.root_comp.Matches(comp)
assert self.comp_iter is not None, self.comp_iter
done = False
while not done:
self.debug_f.log('comp_iter.next()')
try:
next_completion = self.comp_iter.next()
done = True
except _RetryCompletion:
# TODO: Is it OK to retry here? Shouldn't we retry in
# RootCompleter, after we already know the words? That seems to run
# into some problems with Python generators and exceptions.
# I kind of want the 'g.send()' pattern to "prime the generator",
# revealing the first exception.
pass
except StopIteration:
next_completion = None # sentinel?
done = True
return next_completion
def __call__(self, unused_word, state):
"""Return a single match."""
# NOTE: The readline library tokenizes words. We bypass that and use
# get_line_buffer(). So we get 'for x in l' instead of just 'l'.
#self.debug_f.log(0, 'word %r state %s', unused_word, state)
try:
return self._GetNextCompletion(state)
except Exception as e:
#traceback.print_exc()
self.debug_f.log('Unhandled exception while completing: %s', e)
except SystemExit as e:
# Because readline ignores SystemExit!
posix._exit(e.code)
def InitReadline(readline_mod, complete_cb):
home_dir = posix.environ.get('HOME')
if home_dir is None:
home_dir = util.GetHomeDir()
if home_dir is None:
print("Couldn't find home dir in $HOME or /etc/passwd", file=sys.stderr)
return
history_filename = os_path.join(home_dir, 'oil_history')
try:
readline_mod.read_history_file(history_filename)
except IOError:
pass
# The 'atexit' module is a small wrapper around sys.exitfunc.
atexit.register(readline_mod.write_history_file, history_filename)
readline_mod.parse_and_bind("tab: complete")
# How does this map to C?
# https://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC45
readline_mod.set_completer(complete_cb)
# NOTE: This apparently matters for -a -n completion -- why? Is space the
# right value?
# http://web.mit.edu/gnu/doc/html/rlman_2.html#SEC39
# "The basic list of characters that signal a break between words for the
# completer routine. The default value of this variable is the characters
# which break words for completion in Bash, i.e., " \t\n\"\\'`@$><=;|&{(""
#
# Hm I don't get this.
readline_mod.set_completer_delims(' ')
def Init(readline_mod, root_comp, debug_f):
complete_cb = ReadlineCompleter(readline_mod, root_comp, debug_f)
InitReadline(readline_mod, complete_cb)
if __name__ == '__main__':
# This does basic filename copmletion
import readline
readline.parse_and_bind('tab: complete')
while True:
x = raw_input('$ ')
print(x)
| #!/usr/bin/env python
# Copyright 2016 <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
completion.py - Tab completion.
TODO: Is this specific to osh/oil, or common?
Architecture:
Completion should run in threads? For two reasons:
- Completion can be slow -- e.g. completion for distributed resources
- Because readline has a weird interface, and then you can implement
"iterators" in C++ or oil. They just push onto a PIPE. Use a netstring
protocol and self-pipe?
- completion can be in another process anyway?
Does that mean the user code gets run in an entirely separate interpreter? The
whole lexer/parser/cmd_exec combo has to be thread-safe. Does it get a copy of
the same startup state?
bash note: most of this stuff is in pcomplete.c and bashline.c (4K lines!).
Uses ITEMLIST with a bunch of flags.
"""
from __future__ import print_function
import atexit
import posix
import sys
import time
#import traceback
from osh.meta import ast, runtime
from core import alloc
from core import os_path
from core import state
from core import util
import libc
command_e = ast.command_e
value_e = runtime.value_e
completion_state_e = runtime.completion_state_e
log = util.log
class _RetryCompletion(Exception):
"""For the 'exit 124' protocol."""
pass
class NullCompleter(object):
def Matches(self, comp):
return []
_NULL_COMPLETER = NullCompleter()
class CompletionLookup(object):
"""Stores completion hooks registered by the user."""
def __init__(self):
# command name -> ChainedCompleter
# There are pseudo commands __first and __fallback for -E and -D.
self.lookup = {
'__fallback': _NULL_COMPLETER,
'__first': _NULL_COMPLETER,
}
# So you can register *.sh, unlike bash. List of (glob, [actions]),
# searched linearly.
self.patterns = []
def PrintSpecs(self):
for name in sorted(self.lookup):
print('%-15r %s' % (name, self.lookup[name]))
print('---')
for pat, chain in self.patterns:
print('%s = %s' % (pat, chain))
def RegisterName(self, name, chain):
"""Register a completion action with a name.
Used by the 'complete' builtin.
"""
self.lookup[name] = chain
def RegisterGlob(self, glob_pat, chain):
self.patterns.append((glob_pat, chain))
def GetFirstCompleter(self):
return self.lookup['__first']
def GetCompleterForName(self, argv0):
"""
Args:
argv0: A finished argv0 to lookup
"""
if not argv0:
return self.GetFirstCompleter()
chain = self.lookup.get(argv0) # NOTE: Could be ''
if chain:
return chain
key = os_path.basename(argv0)
actions = self.lookup.get(key)
if chain:
return chain
for glob_pat, chain in self.patterns:
#log('Matching %r %r', key, glob_pat)
if libc.fnmatch(glob_pat, key):
return chain
# Nothing matched
return self.lookup['__fallback']
class CompletionApi(object):
def __init__(self, line='', begin=0, end=0):
"""
Args:
index: if -1, then we're running through compgen
"""
self.line = line
self.begin = begin
self.end = end
# NOTE: COMP_WORDBREAKS is initliazed in Mem().
def Update(self, words=None, index=0, to_complete=''):
"""Added after we've done parsing."""
self.words = words or [] # COMP_WORDS
self.index = index # COMP_CWORD
self.to_complete = to_complete #
def GetApiInput(self):
"""Returns argv and comp_words."""
command = self.words[0]
if self.index == -1: # called directly by compgen, not by hitting TAB
prev = ''
comp_words = [] # not completing anything
else:
prev = '' if self.index == 0 else self.words[self.index - 1]
comp_words = self.words
return [command, self.to_complete, prev], comp_words
#
# Actions
#
class CompletionAction(object):
"""Returns a list of words.
Function
Literal words
"""
def __init__(self):
pass
def Matches(self, comp):
pass
class WordsAction(CompletionAction):
# NOTE: Have to split the words passed to -W. Using IFS or something else?
def __init__(self, words, delay=None):
self.words = words
self.delay = delay
def Matches(self, comp):
for w in self.words:
if w.startswith(comp.to_complete):
if self.delay:
time.sleep(self.delay)
yield w
class FileSystemAction(CompletionAction):
"""Complete paths from the file system.
Directories will have a / suffix.
TODO: We need a variant that tests for an executable bit.
"""
def __init__(self, dirs_only=False):
self.dirs_only = dirs_only
def Matches(self, comp):
to_complete = comp.to_complete
i = to_complete.rfind('/')
if i == -1: # it looks like 'foo'
to_list = '.'
base = ''
elif i == 0: # it's an absolute path to_complete like / or /b
to_list ='/'
base = '/'
else:
to_list = to_complete[:i]
base = to_list
#log('to_list %r', to_list)
try:
names = posix.listdir(to_list)
except OSError as e:
return # nothing
for name in names:
path = os_path.join(base, name)
if path.startswith(to_complete):
if self.dirs_only:
if os_path.isdir(path):
yield path
else:
if os_path.isdir(path):
yield path + '/'
else:
yield path
class ShellFuncAction(CompletionAction):
"""Call a user-defined function using bash's completion protocol."""
def __init__(self, ex, func):
self.ex = ex
self.func = func
def __repr__(self):
# TODO: Add file and line number here!
return '<ShellFuncAction %r>' % (self.func.name,)
def log(self, *args):
self.ex.debug_f.log(*args)
def Matches(self, comp):
# TODO: Delete COMPREPLY here? It doesn't seem to be defined in bash by
# default.
argv, comp_words = comp.GetApiInput()
state.SetGlobalArray(self.ex.mem, 'COMP_WORDS', comp_words)
state.SetGlobalString(self.ex.mem, 'COMP_CWORD', str(comp.index))
state.SetGlobalString(self.ex.mem, 'COMP_LINE', comp.line)
state.SetGlobalString(self.ex.mem, 'COMP_POINT', str(comp.end))
self.log('Running completion function %r with arguments %s',
self.func.name, argv)
status = self.ex.RunFuncForCompletion(self.func, argv)
if status == 124:
self.log('Got status 124 from %r', self.func.name)
raise _RetryCompletion()
# Lame: COMP_REPLY would follow the naming convention!
val = state.GetGlobal(self.ex.mem, 'COMPREPLY')
if val.tag == value_e.Undef:
util.error('Ran function %s but COMPREPLY was not defined', self.func.name)
return []
if val.tag != value_e.StrArray:
log('ERROR: COMPREPLY should be an array, got %s', val)
return []
self.log('COMPREPLY %s', val)
# Return this all at once so we don't have a generator. COMPREPLY happens
# all at once anyway.
return val.strs
class VariablesAction(object):
"""compgen -A variable."""
def __init__(self, mem):
self.mem = mem
def Matches(self, comp):
for var_name in self.mem.VarNames():
yield var_name
class VariablesActionInternal(object):
"""When we parse $VAR ourselves.
TODO: Also need to complete ${P (BracedVarSub)
"""
def __init__(self, mem):
self.mem = mem
def Matches(self, comp):
to_complete = comp.to_complete
assert to_complete.startswith('$')
to_complete = to_complete[1:]
for name in self.mem.VarNames():
if name.startswith(to_complete):
yield '$' + name + ' ' # full word
class ExternalCommandAction(object):
"""Complete commands in $PATH.
NOTE: -A command in bash is FIVE things: aliases, builtins, functions,
keywords, etc.
"""
def __init__(self, mem):
"""
Args:
mem: for looking up Path
"""
self.mem = mem
# Should we list everything executable in $PATH here? And then whenever
# $PATH is changed, regenerated it?
# Or we can cache directory listings? What if the contents of the dir
# changed?
# Can we look at the dir timestamp?
#
# (dir, timestamp) -> list of entries perhaps? And then every time you hit
# tab, do you have to check the timestamp? It should be cached by the
# kernel, so yes.
self.ext = []
# (dir, timestamp) -> list
# NOTE: This cache assumes that listing a directory is slower than statting
# it to get the mtime. That may not be true on all systems? Either way
# you are reading blocks of metadata. But I guess /bin on many systems is
# huge, and will require lots of sys calls.
self.cache = {}
def Matches(self, comp):
"""
TODO: Cache is never cleared.
- When we get a newer timestamp, we should clear the old one.
- When PATH is changed, we can remove old entries.
"""
val = self.mem.GetVar('PATH')
if val.tag != value_e.Str:
# No matches if not a string
return
path_dirs = val.s.split(':')
#print(path_dirs)
names = []
for d in path_dirs:
try:
st = posix.stat(d)
except OSError as e:
# There could be a directory that doesn't exist in the $PATH.
continue
key = (d, st.st_mtime)
listing = self.cache.get(key)
if listing is None:
listing = posix.listdir(d)
self.cache[key] = listing
names.extend(listing)
# TODO: Shouldn't do the prefix / space thing ourselves. readline does
# that at the END of the line.
for word in listing:
if word.startswith(comp.to_complete):
yield word + ' '
class GlobPredicate(object):
"""Expand into files that match a pattern. !*.py filters them.
Weird syntax:
-X *.py or -X !*.py
Also & is a placeholder for the string being completed?. Yeah I probably
want to get rid of this feature.
"""
def __init__(self, glob_pat):
self.glob_pat = glob_pat
def __call__(self, match):
return libc.fnmatch(self.glob_pat, match)
class ChainedCompleter(object):
"""A completer that tries a bunch of them in order.
NOTE: plus_dirs happens AFTER filtering with predicates? We add BACK the
dirs, e.g. -A file -X '!*.sh' -o plusdirs.
NOTE: plusdirs can just create another chained completer. I think you should
probably get rid of the predicate. That should just be a Filter(). prefix
and suffix can be adhoc for now I guess, since they are trivial.
"""
def __init__(self, actions, predicate=None, prefix='', suffix=''):
self.actions = actions
# TODO: predicate is for GlobPredicate, for -X
self.predicate = predicate or (lambda word: True)
self.prefix = prefix
self.suffix = suffix
def Matches(self, comp, filter_func_matches=True):
# NOTE: This has to be evaluated eagerly so we get the _RetryCompletion
# exception.
for a in self.actions:
for match in a.Matches(comp):
# Special case hack to match bash for compgen -F. It doesn't filter by
# to_complete!
show = (
match.startswith(comp.to_complete) and self.predicate(match) or
(isinstance(a, ShellFuncAction) and not filter_func_matches)
)
# There are two kinds of filters: changing the string, and filtering
# the set of strings. So maybe have modifiers AND filters? A triple.
if show:
yield self.prefix + match + self.suffix
# Prefix is the current one?
# What if the cursor is not at the end of line? See readline interface.
# That's OK -- we just truncate the line at the cursor?
# Hm actually zsh does something smarter, and which is probably preferable.
# It completes the word that
def __str__(self):
return '<ChainedCompleter %s %s %r %r>' % (
self.actions, self.predicate, self.prefix, self.suffix)
class DummyParser(object):
def GetWords(self, buf):
words = buf.split()
# 'grep ' -> ['grep', ''], so we're completing the second word
if buf.endswith(' '):
words.append('')
return words
def _FindLastSimpleCommand(node):
"""
The last thing has to be a simple command. Cases:
echo a; echo b
ls | wc -l
test -f foo && hello
"""
if node.tag == command_e.SimpleCommand:
return node
if node.tag == command_e.Sentence:
return node.child
if node.tag == command_e.TimeBlock:
child = node.pipeline
if child.tag == command_e.SimpleCommand:
return child
if child.tag == command_e.Pipeline:
return child.children[0]
if node.tag == command_e.Assignment:
return None
if node.tag == command_e.ControlFlow:
return None
assert hasattr(node, 'children'), node
n = len(node.children)
if n == 0:
return None
# Go as deep as we need.
return _FindLastSimpleCommand(node.children[-1])
def _GetCompletionType(w_parser, c_parser, ev, debug_f):
"""
Parser returns completion state.
Then we translate that into completion_state_e.
Returns:
comp_type
prefix: the prefix to complete
comp_words: list of words. First word is used for dispatching.
TODO: what about hash table name?
"""
# TODO: Fill these in
comp_type = completion_state_e.FIRST
prefix = ''
words = []
try:
node = c_parser.ParseLogicalLine()
except util.ParseError as e:
return comp_type, prefix, words # EARLY RETURN
# Inspect state after parsing. Hm I'm getting the newline. Can I view the
# one before that?
cur_token = w_parser.cur_token
prev_token = w_parser.PrevToken()
cur_word = w_parser.cursor
comp_state = c_parser.GetCompletionState()
com_node = None
if node:
# These 4 should all parse
if node.tag == command_e.SimpleCommand:
# NOTE: prev_token can be ;, then complete a new one
#print('WORDS', node.words)
# TODO:
# - EvalVarSub depends on memory
# - EvalTildeSub needs to be somewhere else
# - EvalCommandSub needs to be
#
# maybe write a version of Executor._EvalWordSequence that doesn't do
# CommandSub. Or honestly you can just reuse it for now. Can you pass
# the same cmd_exec in? What about side effects? I guess it can't
# really have any. It can only have them on the file system. Hm.
# Defining funcitons? Yeah if you complete partial functions that could
# be bad. That is, you could change the name of the function.
argv = []
for w in node.words:
try:
# TODO: Should we call EvalWordSequence? But turn globbing off? It
# can do splitting and such.
val = ev.EvalWordToString(w)
except util.FatalRuntimeError:
# Why would it fail?
continue
if val.tag == value_e.Str:
argv.append(val.s)
else:
pass
# Oh I have to handle $@ on the command line?
#print(argv)
com_node = node
elif node.tag == command_e.CommandList: # echo a; echo b
com_node = _FindLastSimpleCommand(node)
elif node.tag == command_e.AndOr: # echo a && echo b
com_node = _FindLastSimpleCommand(node)
elif node.tag == command_e.Pipeline: # echo a | wc -l
com_node = _FindLastSimpleCommand(node)
else:
# Return NONE? Not handling it for now
pass
else: # No node.
pass
# TODO: Need to show buf... Need a multiline display for debugging?
if 0:
debug_f.log('prev_token %s cur_token %s cur_word %s',
prev_token, cur_token, cur_word)
debug_f.log('comp_state %s error %s', comp_state, c_parser.Error())
# This one can be multiple lines
debug_f.log('node: %s %s', repr(node) if node else '<Parse Error>',
node.tag if node else '')
# This one can be multiple lines
debug_f.log('com_node: %s', repr(com_node) if com_node else '<None>')
# IMPORTANT: if the last token is Id.Ignored_Space, then we want to add a
# dummy word! empty word
# initial simple algorithm
# If we got a node:
# 1. Look at c_parser.LastCompletionState()
# 1. don't complete unless it's SIMPLE_COMMAND?
# 2. look at node.words -- first word or not?
# 3. EvalStatic() of the first word
# If we got None:
# 1. Look at c_parser.LastCompletionState()
# 2. If it is $ or ${, complete var names
#
# Is there any case where we shoudl fall back on buf.split()?
# Now parse it. And then look at the AST, but don't eval? Or actually we
# CAN eval, but we probably don't want to.
#
# completion state also has to know about ${pre<TAB> and ${foo[pre<TAB>
# Those are invalid parses. But the LAST TOKEN is the one we want to
# complete? Will it be a proper group of LIT tokens? I don't think you
# complete anything else besides that?
#
# $<TAB> will be Id.Lit_Other -- but you might want to special case
# $na<TAB> will be VS_NAME
# NOTE: The LineLexer adds \n to the buf? Should we disable it and add \0?
# I guess the shortest way to do it is to just Eval(), and even run command
# sub. Or maybe SafeEval() for command sub returns __DUMMY__ or None or some
# other crap.
# I guess in oil you could have some arbitrarily long function in $split(bar,
# baz). That is what you would want to run the completion in a subprocess
# with a timeout.
return comp_type, prefix, words
def _GetCompletionType1(parser, buf):
words = parser.GetWords(buf) # just does a dummy split for now
n = len(words)
# Complete variables
# TODO: Parser should tell if we saw $, ${, but are NOT in a single quoted
# state. And also we didn't see $${, which would be a special var. Oil
# rules are almost the same.
if n > 0 and words[-1].startswith('$'):
comp_type = completion_state_e.VAR_NAME
to_complete = words[-1]
# Otherwise complete words
elif n == 0:
comp_type = completion_state_e.FIRST
to_complete = ''
elif n == 1:
comp_type = completion_state_e.FIRST
to_complete = words[-1]
else:
comp_type = completion_state_e.REST
to_complete = words[-1]
comp_index = len(words) - 1
return comp_type, to_complete, words
class RootCompleter(object):
"""
Provide completion of a buffer according to the configured rules.
"""
def __init__(self, ev, comp_lookup, var_comp, parse_ctx, progress_f,
debug_f):
self.ev = ev
self.comp_lookup = comp_lookup
# This can happen in any position, with any command
self.var_comp = var_comp
self.parse_ctx = parse_ctx
self.progress_f = progress_f
self.debug_f = debug_f
# This simply splits words!
self.parser = DummyParser() # TODO: remove
def Matches(self, comp):
arena = alloc.SideArena('<completion>')
# Two strategies:
# 1. COMP_WORDBREAKS like bash. set_completer_delims()
# 2. Use the actual OSH parser. Parse these cases:
# - echo
# - $VA
# - ${VA
# - $(echo h)
# - <(echo h)
# - >(echo h)
# - ``
# - $(( VA # This should be a variable name
# - while false; do <TAB>
# - if <TAB>
# - while <TAB> -- bash gets this wrong!
# - command <TAB> -- bash-completion fills this in
# - alias completion?
# - alias ll='ls -l'
# - also var expansion?
# foo=ls
# $foo <TAB> (even ZSH doesn't seem to handle this)
#
# the empty completer is consistently wrong. Only works in the first
# position.
#
# I think bash-completion is fighting with bash?
#
# completing aliases -- someone mentioned about zsh
if 0:
w_parser, c_parser = self.parse_ctx.MakeParserForCompletion(comp.line, arena)
comp_type, to_complete, comp_words = _GetCompletionType(
w_parser, c_parser, self.ev, self.debug_f)
else:
comp_type, to_complete, comp_words = _GetCompletionType1(self.parser, comp.line)
index = len(comp_words) - 1 # COMP_CWORD is -1 when it's empty
# After parsing
comp.Update(words=comp_words, index=index, to_complete=to_complete)
if comp_type == completion_state_e.VAR_NAME:
# Non-user chain
chain = self.var_comp
elif comp_type == completion_state_e.HASH_KEY:
# Non-user chain
chain = 'TODO'
elif comp_type == completion_state_e.REDIR_FILENAME:
# Non-user chain
chain = FileSystemAction()
elif comp_type == completion_state_e.FIRST:
chain = self.comp_lookup.GetFirstCompleter()
elif comp_type == completion_state_e.REST:
chain = self.comp_lookup.GetCompleterForName(comp_words[0])
elif comp_type == completion_state_e.NONE:
# Null chain? No completion? For example,
# ${a:- <TAB> -- we have no idea what to put here
chain = 'TODO'
else:
raise AssertionError(comp_type)
self.progress_f.Write('Completing %r ... (Ctrl-C to cancel)', comp.line)
start_time = time.time()
self.debug_f.log('Using %s', chain)
i = 0
for m in chain.Matches(comp):
# TODO: need to dedupe these
yield m
i += 1
elapsed = time.time() - start_time
plural = '' if i == 1 else 'es'
self.progress_f.Write(
'... %d match%s for %r in %.2f seconds (Ctrl-C to cancel)', i,
plural, comp.line, elapsed)
elapsed = time.time() - start_time
plural = '' if i == 1 else 'es'
self.progress_f.Write(
'Found %d match%s for %r in %.2f seconds', i,
plural, comp.line, elapsed)
done = True
# TODO: Have to de-dupe and sort these? Because 'echo' is a builtin as
# well as a command, and we don't want to show it twice. Although then
# it's not incremental. We can still show progress though. Need
# status_line.
class ReadlineCompleter(object):
def __init__(self, readline_mod, root_comp, debug_f):
self.readline_mod = readline_mod
self.root_comp = root_comp
self.debug_f = debug_f
self.comp_iter = None # current completion being processed
def _GetNextCompletion(self, state):
if state == 0:
# TODO: Tokenize it according to our language. If this is $PS2, we also
# need previous lines! Could make a VirtualLineReader instead of
# StringLineReader?
buf = self.readline_mod.get_line_buffer()
# Begin: the index of the first char of the 'word' in the line. Words
# are parsed according to readline delims (which we won't use).
begin = self.readline_mod.get_begidx()
# The current position of the cursor. The thing being completed.
end = self.readline_mod.get_endidx()
comp = CompletionApi(line=buf, begin=begin, end=end)
self.debug_f.log(
'line: %r / begin - end: %d - %d, part: %r', buf, begin, end,
buf[begin:end])
self.comp_iter = self.root_comp.Matches(comp)
assert self.comp_iter is not None, self.comp_iter
done = False
while not done:
self.debug_f.log('comp_iter.next()')
try:
next_completion = self.comp_iter.next()
done = True
except _RetryCompletion:
# TODO: Is it OK to retry here? Shouldn't we retry in
# RootCompleter, after we already know the words? That seems to run
# into some problems with Python generators and exceptions.
# I kind of want the 'g.send()' pattern to "prime the generator",
# revealing the first exception.
pass
except StopIteration:
next_completion = None # sentinel?
done = True
return next_completion
def __call__(self, unused_word, state):
"""Return a single match."""
# NOTE: The readline library tokenizes words. We bypass that and use
# get_line_buffer(). So we get 'for x in l' instead of just 'l'.
#self.debug_f.log(0, 'word %r state %s', unused_word, state)
try:
return self._GetNextCompletion(state)
except Exception as e:
#traceback.print_exc()
self.debug_f.log('Unhandled exception while completing: %s', e)
except SystemExit as e:
# Because readline ignores SystemExit!
posix._exit(e.code)
def InitReadline(readline_mod, complete_cb):
home_dir = posix.environ.get('HOME')
if home_dir is None:
home_dir = util.GetHomeDir()
if home_dir is None:
print("Couldn't find home dir in $HOME or /etc/passwd", file=sys.stderr)
return
history_filename = os_path.join(home_dir, 'oil_history')
try:
readline_mod.read_history_file(history_filename)
except IOError:
pass
# The 'atexit' module is a small wrapper around sys.exitfunc.
atexit.register(readline_mod.write_history_file, history_filename)
readline_mod.parse_and_bind("tab: complete")
# How does this map to C?
# https://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC45
readline_mod.set_completer(complete_cb)
# NOTE: This apparently matters for -a -n completion -- why? Is space the
# right value?
# http://web.mit.edu/gnu/doc/html/rlman_2.html#SEC39
# "The basic list of characters that signal a break between words for the
# completer routine. The default value of this variable is the characters
# which break words for completion in Bash, i.e., " \t\n\"\\'`@$><=;|&{(""
#
# Hm I don't get this.
readline_mod.set_completer_delims(' ')
def Init(readline_mod, root_comp, debug_f):
complete_cb = ReadlineCompleter(readline_mod, root_comp, debug_f)
InitReadline(readline_mod, complete_cb)
if __name__ == '__main__':
# This does basic filename copmletion
import readline
readline.parse_and_bind('tab: complete')
while True:
x = raw_input('$ ')
print(x)
| en | 0.870563 | #!/usr/bin/env python # Copyright 2016 <NAME>. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 completion.py - Tab completion. TODO: Is this specific to osh/oil, or common? Architecture: Completion should run in threads? For two reasons: - Completion can be slow -- e.g. completion for distributed resources - Because readline has a weird interface, and then you can implement "iterators" in C++ or oil. They just push onto a PIPE. Use a netstring protocol and self-pipe? - completion can be in another process anyway? Does that mean the user code gets run in an entirely separate interpreter? The whole lexer/parser/cmd_exec combo has to be thread-safe. Does it get a copy of the same startup state? bash note: most of this stuff is in pcomplete.c and bashline.c (4K lines!). Uses ITEMLIST with a bunch of flags. #import traceback For the 'exit 124' protocol. Stores completion hooks registered by the user. # command name -> ChainedCompleter # There are pseudo commands __first and __fallback for -E and -D. # So you can register *.sh, unlike bash. List of (glob, [actions]), # searched linearly. Register a completion action with a name. Used by the 'complete' builtin. Args: argv0: A finished argv0 to lookup # NOTE: Could be '' #log('Matching %r %r', key, glob_pat) # Nothing matched Args: index: if -1, then we're running through compgen # NOTE: COMP_WORDBREAKS is initliazed in Mem(). Added after we've done parsing. # COMP_WORDS # COMP_CWORD # Returns argv and comp_words. # called directly by compgen, not by hitting TAB # not completing anything # # Actions # Returns a list of words. Function Literal words # NOTE: Have to split the words passed to -W. Using IFS or something else? Complete paths from the file system. Directories will have a / suffix. TODO: We need a variant that tests for an executable bit. # it looks like 'foo' # it's an absolute path to_complete like / or /b #log('to_list %r', to_list) # nothing Call a user-defined function using bash's completion protocol. # TODO: Add file and line number here! # TODO: Delete COMPREPLY here? It doesn't seem to be defined in bash by # default. # Lame: COMP_REPLY would follow the naming convention! # Return this all at once so we don't have a generator. COMPREPLY happens # all at once anyway. compgen -A variable. When we parse $VAR ourselves. TODO: Also need to complete ${P (BracedVarSub) # full word Complete commands in $PATH. NOTE: -A command in bash is FIVE things: aliases, builtins, functions, keywords, etc. Args: mem: for looking up Path # Should we list everything executable in $PATH here? And then whenever # $PATH is changed, regenerated it? # Or we can cache directory listings? What if the contents of the dir # changed? # Can we look at the dir timestamp? # # (dir, timestamp) -> list of entries perhaps? And then every time you hit # tab, do you have to check the timestamp? It should be cached by the # kernel, so yes. # (dir, timestamp) -> list # NOTE: This cache assumes that listing a directory is slower than statting # it to get the mtime. That may not be true on all systems? Either way # you are reading blocks of metadata. But I guess /bin on many systems is # huge, and will require lots of sys calls. TODO: Cache is never cleared. - When we get a newer timestamp, we should clear the old one. - When PATH is changed, we can remove old entries. # No matches if not a string #print(path_dirs) # There could be a directory that doesn't exist in the $PATH. # TODO: Shouldn't do the prefix / space thing ourselves. readline does # that at the END of the line. Expand into files that match a pattern. !*.py filters them. Weird syntax: -X *.py or -X !*.py Also & is a placeholder for the string being completed?. Yeah I probably want to get rid of this feature. A completer that tries a bunch of them in order. NOTE: plus_dirs happens AFTER filtering with predicates? We add BACK the dirs, e.g. -A file -X '!*.sh' -o plusdirs. NOTE: plusdirs can just create another chained completer. I think you should probably get rid of the predicate. That should just be a Filter(). prefix and suffix can be adhoc for now I guess, since they are trivial. # TODO: predicate is for GlobPredicate, for -X # NOTE: This has to be evaluated eagerly so we get the _RetryCompletion # exception. # Special case hack to match bash for compgen -F. It doesn't filter by # to_complete! # There are two kinds of filters: changing the string, and filtering # the set of strings. So maybe have modifiers AND filters? A triple. # Prefix is the current one? # What if the cursor is not at the end of line? See readline interface. # That's OK -- we just truncate the line at the cursor? # Hm actually zsh does something smarter, and which is probably preferable. # It completes the word that # 'grep ' -> ['grep', ''], so we're completing the second word The last thing has to be a simple command. Cases: echo a; echo b ls | wc -l test -f foo && hello # Go as deep as we need. Parser returns completion state. Then we translate that into completion_state_e. Returns: comp_type prefix: the prefix to complete comp_words: list of words. First word is used for dispatching. TODO: what about hash table name? # TODO: Fill these in # EARLY RETURN # Inspect state after parsing. Hm I'm getting the newline. Can I view the # one before that? # These 4 should all parse # NOTE: prev_token can be ;, then complete a new one #print('WORDS', node.words) # TODO: # - EvalVarSub depends on memory # - EvalTildeSub needs to be somewhere else # - EvalCommandSub needs to be # # maybe write a version of Executor._EvalWordSequence that doesn't do # CommandSub. Or honestly you can just reuse it for now. Can you pass # the same cmd_exec in? What about side effects? I guess it can't # really have any. It can only have them on the file system. Hm. # Defining funcitons? Yeah if you complete partial functions that could # be bad. That is, you could change the name of the function. # TODO: Should we call EvalWordSequence? But turn globbing off? It # can do splitting and such. # Why would it fail? # Oh I have to handle $@ on the command line? #print(argv) # echo a; echo b # echo a && echo b # echo a | wc -l # Return NONE? Not handling it for now # No node. # TODO: Need to show buf... Need a multiline display for debugging? # This one can be multiple lines # This one can be multiple lines # IMPORTANT: if the last token is Id.Ignored_Space, then we want to add a # dummy word! empty word # initial simple algorithm # If we got a node: # 1. Look at c_parser.LastCompletionState() # 1. don't complete unless it's SIMPLE_COMMAND? # 2. look at node.words -- first word or not? # 3. EvalStatic() of the first word # If we got None: # 1. Look at c_parser.LastCompletionState() # 2. If it is $ or ${, complete var names # # Is there any case where we shoudl fall back on buf.split()? # Now parse it. And then look at the AST, but don't eval? Or actually we # CAN eval, but we probably don't want to. # # completion state also has to know about ${pre<TAB> and ${foo[pre<TAB> # Those are invalid parses. But the LAST TOKEN is the one we want to # complete? Will it be a proper group of LIT tokens? I don't think you # complete anything else besides that? # # $<TAB> will be Id.Lit_Other -- but you might want to special case # $na<TAB> will be VS_NAME # NOTE: The LineLexer adds \n to the buf? Should we disable it and add \0? # I guess the shortest way to do it is to just Eval(), and even run command # sub. Or maybe SafeEval() for command sub returns __DUMMY__ or None or some # other crap. # I guess in oil you could have some arbitrarily long function in $split(bar, # baz). That is what you would want to run the completion in a subprocess # with a timeout. # just does a dummy split for now # Complete variables # TODO: Parser should tell if we saw $, ${, but are NOT in a single quoted # state. And also we didn't see $${, which would be a special var. Oil # rules are almost the same. # Otherwise complete words Provide completion of a buffer according to the configured rules. # This can happen in any position, with any command # This simply splits words! # TODO: remove # Two strategies: # 1. COMP_WORDBREAKS like bash. set_completer_delims() # 2. Use the actual OSH parser. Parse these cases: # - echo # - $VA # - ${VA # - $(echo h) # - <(echo h) # - >(echo h) # - `` # - $(( VA # This should be a variable name # - while false; do <TAB> # - if <TAB> # - while <TAB> -- bash gets this wrong! # - command <TAB> -- bash-completion fills this in # - alias completion? # - alias ll='ls -l' # - also var expansion? # foo=ls # $foo <TAB> (even ZSH doesn't seem to handle this) # # the empty completer is consistently wrong. Only works in the first # position. # # I think bash-completion is fighting with bash? # # completing aliases -- someone mentioned about zsh # COMP_CWORD is -1 when it's empty # After parsing # Non-user chain # Non-user chain # Non-user chain # Null chain? No completion? For example, # ${a:- <TAB> -- we have no idea what to put here # TODO: need to dedupe these # TODO: Have to de-dupe and sort these? Because 'echo' is a builtin as # well as a command, and we don't want to show it twice. Although then # it's not incremental. We can still show progress though. Need # status_line. # current completion being processed # TODO: Tokenize it according to our language. If this is $PS2, we also # need previous lines! Could make a VirtualLineReader instead of # StringLineReader? # Begin: the index of the first char of the 'word' in the line. Words # are parsed according to readline delims (which we won't use). # The current position of the cursor. The thing being completed. # TODO: Is it OK to retry here? Shouldn't we retry in # RootCompleter, after we already know the words? That seems to run # into some problems with Python generators and exceptions. # I kind of want the 'g.send()' pattern to "prime the generator", # revealing the first exception. # sentinel? Return a single match. # NOTE: The readline library tokenizes words. We bypass that and use # get_line_buffer(). So we get 'for x in l' instead of just 'l'. #self.debug_f.log(0, 'word %r state %s', unused_word, state) #traceback.print_exc() # Because readline ignores SystemExit! # The 'atexit' module is a small wrapper around sys.exitfunc. # How does this map to C? # https://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC45 # NOTE: This apparently matters for -a -n completion -- why? Is space the # right value? # http://web.mit.edu/gnu/doc/html/rlman_2.html#SEC39 # "The basic list of characters that signal a break between words for the # completer routine. The default value of this variable is the characters # which break words for completion in Bash, i.e., " \t\n\"\\'`@$><=;|&{("" # # Hm I don't get this. # This does basic filename copmletion | 1.854989 | 2 |
monai/transforms/spatial/dictionary.py | ajabri/MONAI | 1 | 6624428 | <filename>monai/transforms/spatial/dictionary.py<gh_stars>1-10
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of dictionary-based wrappers around the "vanilla" transforms for spatial operations
defined in :py:class:`monai.transforms.spatial.array`.
Class names are ended with 'd' to denote dictionary-based transforms.
"""
from typing import Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from monai.config import DtypeLike, KeysCollection
from monai.networks.layers.simplelayers import GaussianFilter
from monai.transforms.croppad.array import CenterSpatialCrop
from monai.transforms.spatial.array import (
Affine,
Flip,
Orientation,
Rand2DElastic,
Rand3DElastic,
RandAffine,
Resize,
Rotate,
Rotate90,
Spacing,
Zoom,
)
from monai.transforms.transform import MapTransform, RandomizableTransform
from monai.transforms.utils import create_grid
from monai.utils import (
GridSampleMode,
GridSamplePadMode,
InterpolateMode,
NumpyPadMode,
ensure_tuple,
ensure_tuple_rep,
fall_back_tuple,
)
__all__ = [
"Spacingd",
"Orientationd",
"Rotate90d",
"RandRotate90d",
"Resized",
"Affined",
"RandAffined",
"Rand2DElasticd",
"Rand3DElasticd",
"Flipd",
"RandFlipd",
"RandAxisFlipd",
"Rotated",
"RandRotated",
"Zoomd",
"RandZoomd",
"SpacingD",
"SpacingDict",
"OrientationD",
"OrientationDict",
"Rotate90D",
"Rotate90Dict",
"RandRotate90D",
"RandRotate90Dict",
"ResizeD",
"ResizeDict",
"AffineD",
"AffineDict",
"RandAffineD",
"RandAffineDict",
"Rand2DElasticD",
"Rand2DElasticDict",
"Rand3DElasticD",
"Rand3DElasticDict",
"FlipD",
"FlipDict",
"RandFlipD",
"RandFlipDict",
"RandAxisFlipD",
"RandAxisFlipDict",
"RotateD",
"RotateDict",
"RandRotateD",
"RandRotateDict",
"ZoomD",
"ZoomDict",
"RandZoomD",
"RandZoomDict",
]
GridSampleModeSequence = Union[Sequence[Union[GridSampleMode, str]], GridSampleMode, str]
GridSamplePadModeSequence = Union[Sequence[Union[GridSamplePadMode, str]], GridSamplePadMode, str]
InterpolateModeSequence = Union[Sequence[Union[InterpolateMode, str]], InterpolateMode, str]
NumpyPadModeSequence = Union[Sequence[Union[NumpyPadMode, str]], NumpyPadMode, str]
class Spacingd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Spacing`.
This transform assumes the ``data`` dictionary has a key for the input
data's metadata and contains `affine` field. The key is formed by ``key_{meta_key_postfix}``.
After resampling the input array, this transform will write the new affine
to the `affine` field of metadata which is formed by ``key_{meta_key_postfix}``.
see also:
:py:class:`monai.transforms.Spacing`
"""
def __init__(
self,
keys: KeysCollection,
pixdim: Sequence[float],
diagonal: bool = False,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
dtype: Optional[Union[Sequence[DtypeLike], DtypeLike]] = np.float64,
meta_key_postfix: str = "meta_dict",
allow_missing_keys: bool = False,
) -> None:
"""
Args:
pixdim: output voxel spacing.
diagonal: whether to resample the input to have a diagonal affine matrix.
If True, the input data is resampled to the following affine::
np.diag((pixdim_0, pixdim_1, pixdim_2, 1))
This effectively resets the volume to the world coordinate system (RAS+ in nibabel).
The original orientation, rotation, shearing are not preserved.
If False, the axes orientation, orthogonal rotation and
translations components from the original affine will be
preserved in the target affine. This option will not flip/swap
axes against the original ones.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of bool, each element corresponds to a key in ``keys``.
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
It also can be a sequence of dtypes, each element corresponds to a key in ``keys``.
meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
allow_missing_keys: don't raise exception if key is missing.
Raises:
TypeError: When ``meta_key_postfix`` is not a ``str``.
"""
super().__init__(keys, allow_missing_keys)
self.spacing_transform = Spacing(pixdim, diagonal=diagonal)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_key_postfix = meta_key_postfix
def __call__(
self, data: Mapping[Union[Hashable, str], Dict[str, np.ndarray]]
) -> Dict[Union[Hashable, str], Union[np.ndarray, Dict[str, np.ndarray]]]:
d: Dict = dict(data)
for key, mode, padding_mode, align_corners, dtype in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners, self.dtype
):
meta_data = d[f"{key}_{self.meta_key_postfix}"]
# resample array of each corresponding key
# using affine fetched from d[affine_key]
d[key], _, new_affine = self.spacing_transform(
data_array=np.asarray(d[key]),
affine=meta_data["affine"],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
# set the 'affine' key
meta_data["affine"] = new_affine
return d
class Orientationd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Orientation`.
This transform assumes the ``data`` dictionary has a key for the input
data's metadata and contains `affine` field. The key is formed by ``key_{meta_key_postfix}``.
After reorienting the input array, this transform will write the new affine
to the `affine` field of metadata which is formed by ``key_{meta_key_postfix}``.
"""
def __init__(
self,
keys: KeysCollection,
axcodes: Optional[str] = None,
as_closest_canonical: bool = False,
labels: Optional[Sequence[Tuple[str, str]]] = tuple(zip("LPI", "RAS")),
meta_key_postfix: str = "meta_dict",
allow_missing_keys: bool = False,
) -> None:
"""
Args:
axcodes: N elements sequence for spatial ND input's orientation.
e.g. axcodes='RAS' represents 3D orientation:
(Left, Right), (Posterior, Anterior), (Inferior, Superior).
default orientation labels options are: 'L' and 'R' for the first dimension,
'P' and 'A' for the second, 'I' and 'S' for the third.
as_closest_canonical: if True, load the image as closest to canonical axis format.
labels: optional, None or sequence of (2,) sequences
(2,) sequences are labels for (beginning, end) of output axis.
Defaults to ``(('L', 'R'), ('P', 'A'), ('I', 'S'))``.
meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
allow_missing_keys: don't raise exception if key is missing.
Raises:
TypeError: When ``meta_key_postfix`` is not a ``str``.
See Also:
`nibabel.orientations.ornt2axcodes`.
"""
super().__init__(keys, allow_missing_keys)
self.ornt_transform = Orientation(axcodes=axcodes, as_closest_canonical=as_closest_canonical, labels=labels)
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_key_postfix = meta_key_postfix
def __call__(
self, data: Mapping[Union[Hashable, str], Dict[str, np.ndarray]]
) -> Dict[Union[Hashable, str], Union[np.ndarray, Dict[str, np.ndarray]]]:
d: Dict = dict(data)
for key in self.key_iterator(d):
meta_data = d[f"{key}_{self.meta_key_postfix}"]
d[key], _, new_affine = self.ornt_transform(d[key], affine=meta_data["affine"])
meta_data["affine"] = new_affine
return d
class Rotate90d(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Rotate90`.
"""
def __init__(
self, keys: KeysCollection, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1), allow_missing_keys: bool = False
) -> None:
"""
Args:
k: number of times to rotate by 90 degrees.
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.rotator = Rotate90(k, spatial_axes)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.rotator(d[key])
return d
class RandRotate90d(RandomizableTransform, MapTransform):
"""
Dictionary-based version :py:class:`monai.transforms.RandRotate90`.
With probability `prob`, input arrays are rotated by 90 degrees
in the plane specified by `spatial_axes`.
"""
def __init__(
self,
keys: KeysCollection,
prob: float = 0.1,
max_k: int = 3,
spatial_axes: Tuple[int, int] = (0, 1),
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
prob: probability of rotating.
(Default 0.1, with 10% probability it returns a rotated array.)
max_k: number of rotations will be sampled from `np.random.randint(max_k) + 1`.
(Default 3)
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
allow_missing_keys: don't raise exception if key is missing.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.max_k = max_k
self.spatial_axes = spatial_axes
self._rand_k = 0
def randomize(self, data: Optional[Any] = None) -> None:
self._rand_k = self.R.randint(self.max_k) + 1
super().randomize(None)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Mapping[Hashable, np.ndarray]:
self.randomize()
d = dict(data)
rotator = Rotate90(self._rand_k, self.spatial_axes)
for key in self.key_iterator(d):
if self._do_transform:
d[key] = rotator(d[key])
return d
class Resized(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Resize`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
spatial_size: expected shape of spatial dimensions after resize operation.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of string, each element corresponds to a key in ``keys``.
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of bool or None, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
spatial_size: Union[Sequence[int], int],
mode: InterpolateModeSequence = InterpolateMode.AREA,
align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.resizer = Resize(spatial_size=spatial_size)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners):
d[key] = self.resizer(d[key], mode=mode, align_corners=align_corners)
return d
class Affined(RandomizableTransform, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Affine`.
"""
def __init__(
self,
keys: KeysCollection,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.
Defaults to no rotation.
shear_params: a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.
translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in
pixel/voxel relative to the center of the input image. Defaults to no translation.
scale_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Defaults to no scaling.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
as_tensor_output: the computation is implemented using pytorch tensors, this option specifies
whether to convert it back to numpy arrays.
device: device on which the tensor will be allocated.
allow_missing_keys: don't raise exception if key is missing.
See also:
- :py:class:`monai.transforms.compose.MapTransform`
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
self.affine = Affine(
rotate_params=rotate_params,
shear_params=shear_params,
translate_params=translate_params,
scale_params=scale_params,
spatial_size=spatial_size,
as_tensor_output=as_tensor_output,
device=device,
)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode):
d[key] = self.affine(d[key], mode=mode, padding_mode=padding_mode)
return d
class RandAffined(RandomizableTransform, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.RandAffine`.
"""
def __init__(
self,
keys: KeysCollection,
spatial_size: Optional[Union[Sequence[int], int]] = None,
prob: float = 0.1,
rotate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
shear_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
translate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
scale_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
prob: probability of returning a randomized affine grid.
defaults to 0.1, with 10% chance returns a randomized grid.
rotate_range: angle range in radians. If element `i` is iterable, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can
be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range
`[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0
and nothing for the remaining dimensions.
shear_range: shear_range with format matching `rotate_range`.
translate_range: translate_range with format matching `rotate_range`.
scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
as_tensor_output: the computation is implemented using pytorch tensors, this option specifies
whether to convert it back to numpy arrays.
device: device on which the tensor will be allocated.
allow_missing_keys: don't raise exception if key is missing.
See also:
- :py:class:`monai.transforms.compose.MapTransform`
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.rand_affine = RandAffine(
prob=1.0, # because probability handled in this class
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
spatial_size=spatial_size,
as_tensor_output=as_tensor_output,
device=device,
)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "RandAffined":
self.rand_affine.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
self.rand_affine.randomize()
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
self.randomize()
sp_size = fall_back_tuple(self.rand_affine.spatial_size, data[self.keys[0]].shape[1:])
if self._do_transform:
grid = self.rand_affine.rand_affine_grid(spatial_size=sp_size)
else:
grid = create_grid(spatial_size=sp_size)
for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode):
d[key] = self.rand_affine.resampler(d[key], grid, mode=mode, padding_mode=padding_mode)
return d
class Rand2DElasticd(RandomizableTransform, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Rand2DElastic`.
"""
def __init__(
self,
keys: KeysCollection,
spacing: Union[Tuple[float, float], float],
magnitude_range: Tuple[float, float],
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
prob: float = 0.1,
rotate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
shear_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
translate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
scale_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
spacing: distance in between the control points.
magnitude_range: 2 int numbers, the random offsets will be generated from
``uniform[magnitude[0], magnitude[1])``.
spatial_size: specifying output image spatial size [h, w].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
prob: probability of returning a randomized affine grid.
defaults to 0.1, with 10% chance returns a randomized grid,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is iterable, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can
be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range
`[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0
and nothing for the remaining dimensions.
shear_range: shear_range with format matching `rotate_range`.
translate_range: translate_range with format matching `rotate_range`.
scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
as_tensor_output: the computation is implemented using pytorch tensors, this option specifies
whether to convert it back to numpy arrays.
device: device on which the tensor will be allocated.
allow_missing_keys: don't raise exception if key is missing.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.rand_2d_elastic = Rand2DElastic(
spacing=spacing,
magnitude_range=magnitude_range,
prob=1.0, # because probability controlled by this class
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
spatial_size=spatial_size,
as_tensor_output=as_tensor_output,
device=device,
)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand2DElasticd":
self.rand_2d_elastic.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, spatial_size: Sequence[int]) -> None:
super().randomize(None)
self.rand_2d_elastic.randomize(spatial_size)
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
sp_size = fall_back_tuple(self.rand_2d_elastic.spatial_size, data[self.keys[0]].shape[1:])
self.randomize(spatial_size=sp_size)
if self._do_transform:
grid = self.rand_2d_elastic.deform_grid(spatial_size=sp_size)
grid = self.rand_2d_elastic.rand_affine_grid(grid=grid)
grid = torch.nn.functional.interpolate( # type: ignore
recompute_scale_factor=True,
input=grid.unsqueeze(0),
scale_factor=ensure_tuple_rep(self.rand_2d_elastic.deform_grid.spacing, 2),
mode=InterpolateMode.BICUBIC.value,
align_corners=False,
)
grid = CenterSpatialCrop(roi_size=sp_size)(grid[0])
else:
grid = create_grid(spatial_size=sp_size)
for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode):
d[key] = self.rand_2d_elastic.resampler(d[key], grid, mode=mode, padding_mode=padding_mode)
return d
class Rand3DElasticd(RandomizableTransform, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Rand3DElastic`.
"""
def __init__(
self,
keys: KeysCollection,
sigma_range: Tuple[float, float],
magnitude_range: Tuple[float, float],
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
prob: float = 0.1,
rotate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
shear_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
translate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
scale_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
sigma_range: a Gaussian kernel with standard deviation sampled from
``uniform[sigma_range[0], sigma_range[1])`` will be used to smooth the random offset grid.
magnitude_range: the random offsets on the grid will be generated from
``uniform[magnitude[0], magnitude[1])``.
spatial_size: specifying output image spatial size [h, w, d].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, 32, -1)` will be adapted
to `(32, 32, 64)` if the third spatial dimension size of img is `64`.
prob: probability of returning a randomized affine grid.
defaults to 0.1, with 10% chance returns a randomized grid,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is iterable, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can
be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range
`[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0
and nothing for the remaining dimensions.
shear_range: shear_range with format matching `rotate_range`.
translate_range: translate_range with format matching `rotate_range`.
scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
as_tensor_output: the computation is implemented using pytorch tensors, this option specifies
whether to convert it back to numpy arrays.
device: device on which the tensor will be allocated.
allow_missing_keys: don't raise exception if key is missing.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.rand_3d_elastic = Rand3DElastic(
sigma_range=sigma_range,
magnitude_range=magnitude_range,
prob=1.0, # because probability controlled by this class
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
spatial_size=spatial_size,
as_tensor_output=as_tensor_output,
device=device,
)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand3DElasticd":
self.rand_3d_elastic.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, grid_size: Sequence[int]) -> None:
super().randomize(None)
self.rand_3d_elastic.randomize(grid_size)
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
sp_size = fall_back_tuple(self.rand_3d_elastic.spatial_size, data[self.keys[0]].shape[1:])
self.randomize(grid_size=sp_size)
grid = create_grid(spatial_size=sp_size)
if self._do_transform:
device = self.rand_3d_elastic.device
grid = torch.tensor(grid).to(device)
gaussian = GaussianFilter(spatial_dims=3, sigma=self.rand_3d_elastic.sigma, truncated=3.0).to(device)
offset = torch.tensor(self.rand_3d_elastic.rand_offset, device=device).unsqueeze(0)
grid[:3] += gaussian(offset)[0] * self.rand_3d_elastic.magnitude
grid = self.rand_3d_elastic.rand_affine_grid(grid=grid)
for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode):
d[key] = self.rand_3d_elastic.resampler(d[key], grid, mode=mode, padding_mode=padding_mode)
return d
class Flipd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Flip`.
See `numpy.flip` for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
keys: Keys to pick data for transformation.
spatial_axis: Spatial axes along which to flip over. Default is None.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
spatial_axis: Optional[Union[Sequence[int], int]] = None,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.flipper = Flip(spatial_axis=spatial_axis)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.flipper(d[key])
return d
class RandFlipd(RandomizableTransform, MapTransform):
"""
Dictionary-based version :py:class:`monai.transforms.RandFlip`.
See `numpy.flip` for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
keys: Keys to pick data for transformation.
prob: Probability of flipping.
spatial_axis: Spatial axes along which to flip over. Default is None.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
prob: float = 0.1,
spatial_axis: Optional[Union[Sequence[int], int]] = None,
allow_missing_keys: bool = False,
) -> None:
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.spatial_axis = spatial_axis
self.flipper = Flip(spatial_axis=spatial_axis)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
self.randomize(None)
d = dict(data)
for key in self.key_iterator(d):
if self._do_transform:
d[key] = self.flipper(d[key])
return d
class RandAxisFlipd(RandomizableTransform, MapTransform):
"""
Dictionary-based version :py:class:`monai.transforms.RandAxisFlip`.
See `numpy.flip` for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
keys: Keys to pick data for transformation.
prob: Probability of flipping.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(self, keys: KeysCollection, prob: float = 0.1, allow_missing_keys: bool = False) -> None:
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self._axis: Optional[int] = None
def randomize(self, data: np.ndarray) -> None:
super().randomize(None)
self._axis = self.R.randint(data.ndim - 1)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
self.randomize(data=data[self.keys[0]])
flipper = Flip(spatial_axis=self._axis)
d = dict(data)
for key in self.key_iterator(d):
if self._do_transform:
d[key] = flipper(d[key])
return d
class Rotated(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Rotate`.
Args:
keys: Keys to pick data for transformation.
angle: Rotation angle(s) in radians.
keep_size: If it is False, the output shape is adapted so that the
input array is contained completely in the output.
If it is True, the output shape is the same as the input. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of bool, each element corresponds to a key in ``keys``.
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
It also can be a sequence of dtype or None, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
angle: Union[Sequence[float], float],
keep_size: bool = True,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
dtype: Union[Sequence[DtypeLike], DtypeLike] = np.float64,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.rotator = Rotate(angle=angle, keep_size=keep_size)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key, mode, padding_mode, align_corners, dtype in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners, self.dtype
):
d[key] = self.rotator(
d[key],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
return d
class RandRotated(RandomizableTransform, MapTransform):
"""
Dictionary-based version :py:class:`monai.transforms.RandRotate`
Randomly rotates the input arrays.
Args:
keys: Keys to pick data for transformation.
range_x: Range of rotation angle in radians in the plane defined by the first and second axes.
If single number, angle is uniformly sampled from (-range_x, range_x).
range_y: Range of rotation angle in radians in the plane defined by the first and third axes.
If single number, angle is uniformly sampled from (-range_y, range_y).
range_z: Range of rotation angle in radians in the plane defined by the second and third axes.
If single number, angle is uniformly sampled from (-range_z, range_z).
prob: Probability of rotation.
keep_size: If it is False, the output shape is adapted so that the
input array is contained completely in the output.
If it is True, the output shape is the same as the input. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of bool, each element corresponds to a key in ``keys``.
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
It also can be a sequence of dtype or None, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
range_x: Union[Tuple[float, float], float] = 0.0,
range_y: Union[Tuple[float, float], float] = 0.0,
range_z: Union[Tuple[float, float], float] = 0.0,
prob: float = 0.1,
keep_size: bool = True,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
dtype: Union[Sequence[DtypeLike], DtypeLike] = np.float64,
allow_missing_keys: bool = False,
) -> None:
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.range_x = ensure_tuple(range_x)
if len(self.range_x) == 1:
self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))
self.range_y = ensure_tuple(range_y)
if len(self.range_y) == 1:
self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))
self.range_z = ensure_tuple(range_z)
if len(self.range_z) == 1:
self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))
self.keep_size = keep_size
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
self.x = 0.0
self.y = 0.0
self.z = 0.0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1])
self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1])
self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1])
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
self.randomize()
d = dict(data)
if not self._do_transform:
return d
rotator = Rotate(
angle=self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z),
keep_size=self.keep_size,
)
for key, mode, padding_mode, align_corners, dtype in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners, self.dtype
):
d[key] = rotator(
d[key],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
return d
class Zoomd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Zoom`.
Args:
keys: Keys to pick data for transformation.
zoom: The zoom factor along the spatial axes.
If a float, zoom is the same for each spatial axis.
If a sequence, zoom should contain one value for each spatial axis.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"constant"``, ``"edge``", ``"linear_ramp``", ``"maximum``", ``"mean``", `"median``",
``"minimum``", `"reflect``", ``"symmetric``", ``"wrap``", ``"empty``", ``"<function>``"}
The mode to pad data after zooming.
See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of bool or None, each element corresponds to a key in ``keys``.
keep_size: Should keep original size (pad if needed), default is True.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
zoom: Union[Sequence[float], float],
mode: InterpolateModeSequence = InterpolateMode.AREA,
padding_mode: NumpyPadModeSequence = NumpyPadMode.EDGE,
align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None,
keep_size: bool = True,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.zoomer = Zoom(zoom=zoom, keep_size=keep_size)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key, mode, padding_mode, align_corners in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners
):
d[key] = self.zoomer(
d[key],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
return d
class RandZoomd(RandomizableTransform, MapTransform):
"""
Dict-based version :py:class:`monai.transforms.RandZoom`.
Args:
keys: Keys to pick data for transformation.
prob: Probability of zooming.
min_zoom: Min zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, min_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
max_zoom: Max zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, max_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"constant"``, ``"edge``", ``"linear_ramp``", ``"maximum``", ``"mean``", `"median``",
``"minimum``", `"reflect``", ``"symmetric``", ``"wrap``", ``"empty``", ``"<function>``"}
The mode to pad data after zooming.
See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of bool or None, each element corresponds to a key in ``keys``.
keep_size: Should keep original size (pad if needed), default is True.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
prob: float = 0.1,
min_zoom: Union[Sequence[float], float] = 0.9,
max_zoom: Union[Sequence[float], float] = 1.1,
mode: InterpolateModeSequence = InterpolateMode.AREA,
padding_mode: NumpyPadModeSequence = NumpyPadMode.EDGE,
align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None,
keep_size: bool = True,
allow_missing_keys: bool = False,
) -> None:
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.min_zoom = ensure_tuple(min_zoom)
self.max_zoom = ensure_tuple(max_zoom)
if len(self.min_zoom) != len(self.max_zoom):
raise AssertionError("min_zoom and max_zoom must have same length.")
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.keep_size = keep_size
self._zoom: Sequence[float] = [1.0]
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)]
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
# match the spatial dim of first item
self.randomize()
d = dict(data)
if not self._do_transform:
return d
img_dims = data[self.keys[0]].ndim
if len(self._zoom) == 1:
# to keep the spatial shape ratio, use same random zoom factor for all dims
self._zoom = ensure_tuple_rep(self._zoom[0], img_dims - 1)
elif len(self._zoom) == 2 and img_dims > 3:
# if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim
self._zoom = ensure_tuple_rep(self._zoom[0], img_dims - 2) + ensure_tuple(self._zoom[-1])
zoomer = Zoom(self._zoom, keep_size=self.keep_size)
for key, mode, padding_mode, align_corners in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners
):
d[key] = zoomer(
d[key],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
return d
SpacingD = SpacingDict = Spacingd
OrientationD = OrientationDict = Orientationd
Rotate90D = Rotate90Dict = Rotate90d
RandRotate90D = RandRotate90Dict = RandRotate90d
ResizeD = ResizeDict = Resized
AffineD = AffineDict = Affined
RandAffineD = RandAffineDict = RandAffined
Rand2DElasticD = Rand2DElasticDict = Rand2DElasticd
Rand3DElasticD = Rand3DElasticDict = Rand3DElasticd
FlipD = FlipDict = Flipd
RandFlipD = RandFlipDict = RandFlipd
RandAxisFlipD = RandAxisFlipDict = RandAxisFlipd
RotateD = RotateDict = Rotated
RandRotateD = RandRotateDict = RandRotated
ZoomD = ZoomDict = Zoomd
RandZoomD = RandZoomDict = RandZoomd
| <filename>monai/transforms/spatial/dictionary.py<gh_stars>1-10
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of dictionary-based wrappers around the "vanilla" transforms for spatial operations
defined in :py:class:`monai.transforms.spatial.array`.
Class names are ended with 'd' to denote dictionary-based transforms.
"""
from typing import Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from monai.config import DtypeLike, KeysCollection
from monai.networks.layers.simplelayers import GaussianFilter
from monai.transforms.croppad.array import CenterSpatialCrop
from monai.transforms.spatial.array import (
Affine,
Flip,
Orientation,
Rand2DElastic,
Rand3DElastic,
RandAffine,
Resize,
Rotate,
Rotate90,
Spacing,
Zoom,
)
from monai.transforms.transform import MapTransform, RandomizableTransform
from monai.transforms.utils import create_grid
from monai.utils import (
GridSampleMode,
GridSamplePadMode,
InterpolateMode,
NumpyPadMode,
ensure_tuple,
ensure_tuple_rep,
fall_back_tuple,
)
__all__ = [
"Spacingd",
"Orientationd",
"Rotate90d",
"RandRotate90d",
"Resized",
"Affined",
"RandAffined",
"Rand2DElasticd",
"Rand3DElasticd",
"Flipd",
"RandFlipd",
"RandAxisFlipd",
"Rotated",
"RandRotated",
"Zoomd",
"RandZoomd",
"SpacingD",
"SpacingDict",
"OrientationD",
"OrientationDict",
"Rotate90D",
"Rotate90Dict",
"RandRotate90D",
"RandRotate90Dict",
"ResizeD",
"ResizeDict",
"AffineD",
"AffineDict",
"RandAffineD",
"RandAffineDict",
"Rand2DElasticD",
"Rand2DElasticDict",
"Rand3DElasticD",
"Rand3DElasticDict",
"FlipD",
"FlipDict",
"RandFlipD",
"RandFlipDict",
"RandAxisFlipD",
"RandAxisFlipDict",
"RotateD",
"RotateDict",
"RandRotateD",
"RandRotateDict",
"ZoomD",
"ZoomDict",
"RandZoomD",
"RandZoomDict",
]
GridSampleModeSequence = Union[Sequence[Union[GridSampleMode, str]], GridSampleMode, str]
GridSamplePadModeSequence = Union[Sequence[Union[GridSamplePadMode, str]], GridSamplePadMode, str]
InterpolateModeSequence = Union[Sequence[Union[InterpolateMode, str]], InterpolateMode, str]
NumpyPadModeSequence = Union[Sequence[Union[NumpyPadMode, str]], NumpyPadMode, str]
class Spacingd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Spacing`.
This transform assumes the ``data`` dictionary has a key for the input
data's metadata and contains `affine` field. The key is formed by ``key_{meta_key_postfix}``.
After resampling the input array, this transform will write the new affine
to the `affine` field of metadata which is formed by ``key_{meta_key_postfix}``.
see also:
:py:class:`monai.transforms.Spacing`
"""
def __init__(
self,
keys: KeysCollection,
pixdim: Sequence[float],
diagonal: bool = False,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
dtype: Optional[Union[Sequence[DtypeLike], DtypeLike]] = np.float64,
meta_key_postfix: str = "meta_dict",
allow_missing_keys: bool = False,
) -> None:
"""
Args:
pixdim: output voxel spacing.
diagonal: whether to resample the input to have a diagonal affine matrix.
If True, the input data is resampled to the following affine::
np.diag((pixdim_0, pixdim_1, pixdim_2, 1))
This effectively resets the volume to the world coordinate system (RAS+ in nibabel).
The original orientation, rotation, shearing are not preserved.
If False, the axes orientation, orthogonal rotation and
translations components from the original affine will be
preserved in the target affine. This option will not flip/swap
axes against the original ones.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of bool, each element corresponds to a key in ``keys``.
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
It also can be a sequence of dtypes, each element corresponds to a key in ``keys``.
meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
allow_missing_keys: don't raise exception if key is missing.
Raises:
TypeError: When ``meta_key_postfix`` is not a ``str``.
"""
super().__init__(keys, allow_missing_keys)
self.spacing_transform = Spacing(pixdim, diagonal=diagonal)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_key_postfix = meta_key_postfix
def __call__(
self, data: Mapping[Union[Hashable, str], Dict[str, np.ndarray]]
) -> Dict[Union[Hashable, str], Union[np.ndarray, Dict[str, np.ndarray]]]:
d: Dict = dict(data)
for key, mode, padding_mode, align_corners, dtype in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners, self.dtype
):
meta_data = d[f"{key}_{self.meta_key_postfix}"]
# resample array of each corresponding key
# using affine fetched from d[affine_key]
d[key], _, new_affine = self.spacing_transform(
data_array=np.asarray(d[key]),
affine=meta_data["affine"],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
# set the 'affine' key
meta_data["affine"] = new_affine
return d
class Orientationd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Orientation`.
This transform assumes the ``data`` dictionary has a key for the input
data's metadata and contains `affine` field. The key is formed by ``key_{meta_key_postfix}``.
After reorienting the input array, this transform will write the new affine
to the `affine` field of metadata which is formed by ``key_{meta_key_postfix}``.
"""
def __init__(
self,
keys: KeysCollection,
axcodes: Optional[str] = None,
as_closest_canonical: bool = False,
labels: Optional[Sequence[Tuple[str, str]]] = tuple(zip("LPI", "RAS")),
meta_key_postfix: str = "meta_dict",
allow_missing_keys: bool = False,
) -> None:
"""
Args:
axcodes: N elements sequence for spatial ND input's orientation.
e.g. axcodes='RAS' represents 3D orientation:
(Left, Right), (Posterior, Anterior), (Inferior, Superior).
default orientation labels options are: 'L' and 'R' for the first dimension,
'P' and 'A' for the second, 'I' and 'S' for the third.
as_closest_canonical: if True, load the image as closest to canonical axis format.
labels: optional, None or sequence of (2,) sequences
(2,) sequences are labels for (beginning, end) of output axis.
Defaults to ``(('L', 'R'), ('P', 'A'), ('I', 'S'))``.
meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
allow_missing_keys: don't raise exception if key is missing.
Raises:
TypeError: When ``meta_key_postfix`` is not a ``str``.
See Also:
`nibabel.orientations.ornt2axcodes`.
"""
super().__init__(keys, allow_missing_keys)
self.ornt_transform = Orientation(axcodes=axcodes, as_closest_canonical=as_closest_canonical, labels=labels)
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_key_postfix = meta_key_postfix
def __call__(
self, data: Mapping[Union[Hashable, str], Dict[str, np.ndarray]]
) -> Dict[Union[Hashable, str], Union[np.ndarray, Dict[str, np.ndarray]]]:
d: Dict = dict(data)
for key in self.key_iterator(d):
meta_data = d[f"{key}_{self.meta_key_postfix}"]
d[key], _, new_affine = self.ornt_transform(d[key], affine=meta_data["affine"])
meta_data["affine"] = new_affine
return d
class Rotate90d(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Rotate90`.
"""
def __init__(
self, keys: KeysCollection, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1), allow_missing_keys: bool = False
) -> None:
"""
Args:
k: number of times to rotate by 90 degrees.
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.rotator = Rotate90(k, spatial_axes)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.rotator(d[key])
return d
class RandRotate90d(RandomizableTransform, MapTransform):
"""
Dictionary-based version :py:class:`monai.transforms.RandRotate90`.
With probability `prob`, input arrays are rotated by 90 degrees
in the plane specified by `spatial_axes`.
"""
def __init__(
self,
keys: KeysCollection,
prob: float = 0.1,
max_k: int = 3,
spatial_axes: Tuple[int, int] = (0, 1),
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
prob: probability of rotating.
(Default 0.1, with 10% probability it returns a rotated array.)
max_k: number of rotations will be sampled from `np.random.randint(max_k) + 1`.
(Default 3)
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
allow_missing_keys: don't raise exception if key is missing.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.max_k = max_k
self.spatial_axes = spatial_axes
self._rand_k = 0
def randomize(self, data: Optional[Any] = None) -> None:
self._rand_k = self.R.randint(self.max_k) + 1
super().randomize(None)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Mapping[Hashable, np.ndarray]:
self.randomize()
d = dict(data)
rotator = Rotate90(self._rand_k, self.spatial_axes)
for key in self.key_iterator(d):
if self._do_transform:
d[key] = rotator(d[key])
return d
class Resized(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Resize`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
spatial_size: expected shape of spatial dimensions after resize operation.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of string, each element corresponds to a key in ``keys``.
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of bool or None, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
spatial_size: Union[Sequence[int], int],
mode: InterpolateModeSequence = InterpolateMode.AREA,
align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.resizer = Resize(spatial_size=spatial_size)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners):
d[key] = self.resizer(d[key], mode=mode, align_corners=align_corners)
return d
class Affined(RandomizableTransform, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Affine`.
"""
def __init__(
self,
keys: KeysCollection,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.
Defaults to no rotation.
shear_params: a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.
translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in
pixel/voxel relative to the center of the input image. Defaults to no translation.
scale_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Defaults to no scaling.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
as_tensor_output: the computation is implemented using pytorch tensors, this option specifies
whether to convert it back to numpy arrays.
device: device on which the tensor will be allocated.
allow_missing_keys: don't raise exception if key is missing.
See also:
- :py:class:`monai.transforms.compose.MapTransform`
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
self.affine = Affine(
rotate_params=rotate_params,
shear_params=shear_params,
translate_params=translate_params,
scale_params=scale_params,
spatial_size=spatial_size,
as_tensor_output=as_tensor_output,
device=device,
)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode):
d[key] = self.affine(d[key], mode=mode, padding_mode=padding_mode)
return d
class RandAffined(RandomizableTransform, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.RandAffine`.
"""
def __init__(
self,
keys: KeysCollection,
spatial_size: Optional[Union[Sequence[int], int]] = None,
prob: float = 0.1,
rotate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
shear_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
translate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
scale_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
prob: probability of returning a randomized affine grid.
defaults to 0.1, with 10% chance returns a randomized grid.
rotate_range: angle range in radians. If element `i` is iterable, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can
be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range
`[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0
and nothing for the remaining dimensions.
shear_range: shear_range with format matching `rotate_range`.
translate_range: translate_range with format matching `rotate_range`.
scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
as_tensor_output: the computation is implemented using pytorch tensors, this option specifies
whether to convert it back to numpy arrays.
device: device on which the tensor will be allocated.
allow_missing_keys: don't raise exception if key is missing.
See also:
- :py:class:`monai.transforms.compose.MapTransform`
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.rand_affine = RandAffine(
prob=1.0, # because probability handled in this class
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
spatial_size=spatial_size,
as_tensor_output=as_tensor_output,
device=device,
)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "RandAffined":
self.rand_affine.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
self.rand_affine.randomize()
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
self.randomize()
sp_size = fall_back_tuple(self.rand_affine.spatial_size, data[self.keys[0]].shape[1:])
if self._do_transform:
grid = self.rand_affine.rand_affine_grid(spatial_size=sp_size)
else:
grid = create_grid(spatial_size=sp_size)
for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode):
d[key] = self.rand_affine.resampler(d[key], grid, mode=mode, padding_mode=padding_mode)
return d
class Rand2DElasticd(RandomizableTransform, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Rand2DElastic`.
"""
def __init__(
self,
keys: KeysCollection,
spacing: Union[Tuple[float, float], float],
magnitude_range: Tuple[float, float],
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
prob: float = 0.1,
rotate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
shear_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
translate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
scale_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
spacing: distance in between the control points.
magnitude_range: 2 int numbers, the random offsets will be generated from
``uniform[magnitude[0], magnitude[1])``.
spatial_size: specifying output image spatial size [h, w].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
prob: probability of returning a randomized affine grid.
defaults to 0.1, with 10% chance returns a randomized grid,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is iterable, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can
be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range
`[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0
and nothing for the remaining dimensions.
shear_range: shear_range with format matching `rotate_range`.
translate_range: translate_range with format matching `rotate_range`.
scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
as_tensor_output: the computation is implemented using pytorch tensors, this option specifies
whether to convert it back to numpy arrays.
device: device on which the tensor will be allocated.
allow_missing_keys: don't raise exception if key is missing.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.rand_2d_elastic = Rand2DElastic(
spacing=spacing,
magnitude_range=magnitude_range,
prob=1.0, # because probability controlled by this class
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
spatial_size=spatial_size,
as_tensor_output=as_tensor_output,
device=device,
)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand2DElasticd":
self.rand_2d_elastic.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, spatial_size: Sequence[int]) -> None:
super().randomize(None)
self.rand_2d_elastic.randomize(spatial_size)
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
sp_size = fall_back_tuple(self.rand_2d_elastic.spatial_size, data[self.keys[0]].shape[1:])
self.randomize(spatial_size=sp_size)
if self._do_transform:
grid = self.rand_2d_elastic.deform_grid(spatial_size=sp_size)
grid = self.rand_2d_elastic.rand_affine_grid(grid=grid)
grid = torch.nn.functional.interpolate( # type: ignore
recompute_scale_factor=True,
input=grid.unsqueeze(0),
scale_factor=ensure_tuple_rep(self.rand_2d_elastic.deform_grid.spacing, 2),
mode=InterpolateMode.BICUBIC.value,
align_corners=False,
)
grid = CenterSpatialCrop(roi_size=sp_size)(grid[0])
else:
grid = create_grid(spatial_size=sp_size)
for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode):
d[key] = self.rand_2d_elastic.resampler(d[key], grid, mode=mode, padding_mode=padding_mode)
return d
class Rand3DElasticd(RandomizableTransform, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Rand3DElastic`.
"""
def __init__(
self,
keys: KeysCollection,
sigma_range: Tuple[float, float],
magnitude_range: Tuple[float, float],
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
prob: float = 0.1,
rotate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
shear_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
translate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
scale_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
sigma_range: a Gaussian kernel with standard deviation sampled from
``uniform[sigma_range[0], sigma_range[1])`` will be used to smooth the random offset grid.
magnitude_range: the random offsets on the grid will be generated from
``uniform[magnitude[0], magnitude[1])``.
spatial_size: specifying output image spatial size [h, w, d].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if the components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, 32, -1)` will be adapted
to `(32, 32, 64)` if the third spatial dimension size of img is `64`.
prob: probability of returning a randomized affine grid.
defaults to 0.1, with 10% chance returns a randomized grid,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is iterable, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can
be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range
`[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0
and nothing for the remaining dimensions.
shear_range: shear_range with format matching `rotate_range`.
translate_range: translate_range with format matching `rotate_range`.
scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
as_tensor_output: the computation is implemented using pytorch tensors, this option specifies
whether to convert it back to numpy arrays.
device: device on which the tensor will be allocated.
allow_missing_keys: don't raise exception if key is missing.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.rand_3d_elastic = Rand3DElastic(
sigma_range=sigma_range,
magnitude_range=magnitude_range,
prob=1.0, # because probability controlled by this class
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
spatial_size=spatial_size,
as_tensor_output=as_tensor_output,
device=device,
)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand3DElasticd":
self.rand_3d_elastic.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, grid_size: Sequence[int]) -> None:
super().randomize(None)
self.rand_3d_elastic.randomize(grid_size)
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
sp_size = fall_back_tuple(self.rand_3d_elastic.spatial_size, data[self.keys[0]].shape[1:])
self.randomize(grid_size=sp_size)
grid = create_grid(spatial_size=sp_size)
if self._do_transform:
device = self.rand_3d_elastic.device
grid = torch.tensor(grid).to(device)
gaussian = GaussianFilter(spatial_dims=3, sigma=self.rand_3d_elastic.sigma, truncated=3.0).to(device)
offset = torch.tensor(self.rand_3d_elastic.rand_offset, device=device).unsqueeze(0)
grid[:3] += gaussian(offset)[0] * self.rand_3d_elastic.magnitude
grid = self.rand_3d_elastic.rand_affine_grid(grid=grid)
for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode):
d[key] = self.rand_3d_elastic.resampler(d[key], grid, mode=mode, padding_mode=padding_mode)
return d
class Flipd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Flip`.
See `numpy.flip` for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
keys: Keys to pick data for transformation.
spatial_axis: Spatial axes along which to flip over. Default is None.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
spatial_axis: Optional[Union[Sequence[int], int]] = None,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.flipper = Flip(spatial_axis=spatial_axis)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.flipper(d[key])
return d
class RandFlipd(RandomizableTransform, MapTransform):
"""
Dictionary-based version :py:class:`monai.transforms.RandFlip`.
See `numpy.flip` for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
keys: Keys to pick data for transformation.
prob: Probability of flipping.
spatial_axis: Spatial axes along which to flip over. Default is None.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
prob: float = 0.1,
spatial_axis: Optional[Union[Sequence[int], int]] = None,
allow_missing_keys: bool = False,
) -> None:
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.spatial_axis = spatial_axis
self.flipper = Flip(spatial_axis=spatial_axis)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
self.randomize(None)
d = dict(data)
for key in self.key_iterator(d):
if self._do_transform:
d[key] = self.flipper(d[key])
return d
class RandAxisFlipd(RandomizableTransform, MapTransform):
"""
Dictionary-based version :py:class:`monai.transforms.RandAxisFlip`.
See `numpy.flip` for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
keys: Keys to pick data for transformation.
prob: Probability of flipping.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(self, keys: KeysCollection, prob: float = 0.1, allow_missing_keys: bool = False) -> None:
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self._axis: Optional[int] = None
def randomize(self, data: np.ndarray) -> None:
super().randomize(None)
self._axis = self.R.randint(data.ndim - 1)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
self.randomize(data=data[self.keys[0]])
flipper = Flip(spatial_axis=self._axis)
d = dict(data)
for key in self.key_iterator(d):
if self._do_transform:
d[key] = flipper(d[key])
return d
class Rotated(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Rotate`.
Args:
keys: Keys to pick data for transformation.
angle: Rotation angle(s) in radians.
keep_size: If it is False, the output shape is adapted so that the
input array is contained completely in the output.
If it is True, the output shape is the same as the input. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of bool, each element corresponds to a key in ``keys``.
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
It also can be a sequence of dtype or None, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
angle: Union[Sequence[float], float],
keep_size: bool = True,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
dtype: Union[Sequence[DtypeLike], DtypeLike] = np.float64,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.rotator = Rotate(angle=angle, keep_size=keep_size)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key, mode, padding_mode, align_corners, dtype in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners, self.dtype
):
d[key] = self.rotator(
d[key],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
return d
class RandRotated(RandomizableTransform, MapTransform):
"""
Dictionary-based version :py:class:`monai.transforms.RandRotate`
Randomly rotates the input arrays.
Args:
keys: Keys to pick data for transformation.
range_x: Range of rotation angle in radians in the plane defined by the first and second axes.
If single number, angle is uniformly sampled from (-range_x, range_x).
range_y: Range of rotation angle in radians in the plane defined by the first and third axes.
If single number, angle is uniformly sampled from (-range_y, range_y).
range_z: Range of rotation angle in radians in the plane defined by the second and third axes.
If single number, angle is uniformly sampled from (-range_z, range_z).
prob: Probability of rotation.
keep_size: If it is False, the output shape is adapted so that the
input array is contained completely in the output.
If it is True, the output shape is the same as the input. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of string, each element corresponds to a key in ``keys``.
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of bool, each element corresponds to a key in ``keys``.
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
It also can be a sequence of dtype or None, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
range_x: Union[Tuple[float, float], float] = 0.0,
range_y: Union[Tuple[float, float], float] = 0.0,
range_z: Union[Tuple[float, float], float] = 0.0,
prob: float = 0.1,
keep_size: bool = True,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
dtype: Union[Sequence[DtypeLike], DtypeLike] = np.float64,
allow_missing_keys: bool = False,
) -> None:
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.range_x = ensure_tuple(range_x)
if len(self.range_x) == 1:
self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))
self.range_y = ensure_tuple(range_y)
if len(self.range_y) == 1:
self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))
self.range_z = ensure_tuple(range_z)
if len(self.range_z) == 1:
self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))
self.keep_size = keep_size
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
self.x = 0.0
self.y = 0.0
self.z = 0.0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1])
self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1])
self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1])
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
self.randomize()
d = dict(data)
if not self._do_transform:
return d
rotator = Rotate(
angle=self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z),
keep_size=self.keep_size,
)
for key, mode, padding_mode, align_corners, dtype in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners, self.dtype
):
d[key] = rotator(
d[key],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
return d
class Zoomd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Zoom`.
Args:
keys: Keys to pick data for transformation.
zoom: The zoom factor along the spatial axes.
If a float, zoom is the same for each spatial axis.
If a sequence, zoom should contain one value for each spatial axis.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"constant"``, ``"edge``", ``"linear_ramp``", ``"maximum``", ``"mean``", `"median``",
``"minimum``", `"reflect``", ``"symmetric``", ``"wrap``", ``"empty``", ``"<function>``"}
The mode to pad data after zooming.
See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of bool or None, each element corresponds to a key in ``keys``.
keep_size: Should keep original size (pad if needed), default is True.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
zoom: Union[Sequence[float], float],
mode: InterpolateModeSequence = InterpolateMode.AREA,
padding_mode: NumpyPadModeSequence = NumpyPadMode.EDGE,
align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None,
keep_size: bool = True,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.zoomer = Zoom(zoom=zoom, keep_size=keep_size)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key, mode, padding_mode, align_corners in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners
):
d[key] = self.zoomer(
d[key],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
return d
class RandZoomd(RandomizableTransform, MapTransform):
"""
Dict-based version :py:class:`monai.transforms.RandZoom`.
Args:
keys: Keys to pick data for transformation.
prob: Probability of zooming.
min_zoom: Min zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, min_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
max_zoom: Max zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, max_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of string, each element corresponds to a key in ``keys``.
padding_mode: {``"constant"``, ``"edge``", ``"linear_ramp``", ``"maximum``", ``"mean``", `"median``",
``"minimum``", `"reflect``", ``"symmetric``", ``"wrap``", ``"empty``", ``"<function>``"}
The mode to pad data after zooming.
See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
It also can be a sequence of bool or None, each element corresponds to a key in ``keys``.
keep_size: Should keep original size (pad if needed), default is True.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
prob: float = 0.1,
min_zoom: Union[Sequence[float], float] = 0.9,
max_zoom: Union[Sequence[float], float] = 1.1,
mode: InterpolateModeSequence = InterpolateMode.AREA,
padding_mode: NumpyPadModeSequence = NumpyPadMode.EDGE,
align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None,
keep_size: bool = True,
allow_missing_keys: bool = False,
) -> None:
MapTransform.__init__(self, keys, allow_missing_keys)
RandomizableTransform.__init__(self, prob)
self.min_zoom = ensure_tuple(min_zoom)
self.max_zoom = ensure_tuple(max_zoom)
if len(self.min_zoom) != len(self.max_zoom):
raise AssertionError("min_zoom and max_zoom must have same length.")
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.keep_size = keep_size
self._zoom: Sequence[float] = [1.0]
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)]
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
# match the spatial dim of first item
self.randomize()
d = dict(data)
if not self._do_transform:
return d
img_dims = data[self.keys[0]].ndim
if len(self._zoom) == 1:
# to keep the spatial shape ratio, use same random zoom factor for all dims
self._zoom = ensure_tuple_rep(self._zoom[0], img_dims - 1)
elif len(self._zoom) == 2 and img_dims > 3:
# if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim
self._zoom = ensure_tuple_rep(self._zoom[0], img_dims - 2) + ensure_tuple(self._zoom[-1])
zoomer = Zoom(self._zoom, keep_size=self.keep_size)
for key, mode, padding_mode, align_corners in self.key_iterator(
d, self.mode, self.padding_mode, self.align_corners
):
d[key] = zoomer(
d[key],
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
return d
SpacingD = SpacingDict = Spacingd
OrientationD = OrientationDict = Orientationd
Rotate90D = Rotate90Dict = Rotate90d
RandRotate90D = RandRotate90Dict = RandRotate90d
ResizeD = ResizeDict = Resized
AffineD = AffineDict = Affined
RandAffineD = RandAffineDict = RandAffined
Rand2DElasticD = Rand2DElasticDict = Rand2DElasticd
Rand3DElasticD = Rand3DElasticDict = Rand3DElasticd
FlipD = FlipDict = Flipd
RandFlipD = RandFlipDict = RandFlipd
RandAxisFlipD = RandAxisFlipDict = RandAxisFlipd
RotateD = RotateDict = Rotated
RandRotateD = RandRotateDict = RandRotated
ZoomD = ZoomDict = Zoomd
RandZoomD = RandZoomDict = RandZoomd
| en | 0.651416 | # Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A collection of dictionary-based wrappers around the "vanilla" transforms for spatial operations defined in :py:class:`monai.transforms.spatial.array`. Class names are ended with 'd' to denote dictionary-based transforms. Dictionary-based wrapper of :py:class:`monai.transforms.Spacing`. This transform assumes the ``data`` dictionary has a key for the input data's metadata and contains `affine` field. The key is formed by ``key_{meta_key_postfix}``. After resampling the input array, this transform will write the new affine to the `affine` field of metadata which is formed by ``key_{meta_key_postfix}``. see also: :py:class:`monai.transforms.Spacing` Args: pixdim: output voxel spacing. diagonal: whether to resample the input to have a diagonal affine matrix. If True, the input data is resampled to the following affine:: np.diag((pixdim_0, pixdim_1, pixdim_2, 1)) This effectively resets the volume to the world coordinate system (RAS+ in nibabel). The original orientation, rotation, shearing are not preserved. If False, the axes orientation, orthogonal rotation and translations components from the original affine will be preserved in the target affine. This option will not flip/swap axes against the original ones. mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} Padding mode for outside grid values. Defaults to ``"border"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. align_corners: Geometrically, we consider the pixels of the input as squares rather than points. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of bool, each element corresponds to a key in ``keys``. dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision. If None, use the data type of input data. To be compatible with other modules, the output data type is always ``np.float32``. It also can be a sequence of dtypes, each element corresponds to a key in ``keys``. meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data, default is `meta_dict`, the meta data is a dictionary object. For example, to handle key `image`, read/write affine matrices from the metadata `image_meta_dict` dictionary's `affine` field. allow_missing_keys: don't raise exception if key is missing. Raises: TypeError: When ``meta_key_postfix`` is not a ``str``. # resample array of each corresponding key # using affine fetched from d[affine_key] # set the 'affine' key Dictionary-based wrapper of :py:class:`monai.transforms.Orientation`. This transform assumes the ``data`` dictionary has a key for the input data's metadata and contains `affine` field. The key is formed by ``key_{meta_key_postfix}``. After reorienting the input array, this transform will write the new affine to the `affine` field of metadata which is formed by ``key_{meta_key_postfix}``. Args: axcodes: N elements sequence for spatial ND input's orientation. e.g. axcodes='RAS' represents 3D orientation: (Left, Right), (Posterior, Anterior), (Inferior, Superior). default orientation labels options are: 'L' and 'R' for the first dimension, 'P' and 'A' for the second, 'I' and 'S' for the third. as_closest_canonical: if True, load the image as closest to canonical axis format. labels: optional, None or sequence of (2,) sequences (2,) sequences are labels for (beginning, end) of output axis. Defaults to ``(('L', 'R'), ('P', 'A'), ('I', 'S'))``. meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data, default is `meta_dict`, the meta data is a dictionary object. For example, to handle key `image`, read/write affine matrices from the metadata `image_meta_dict` dictionary's `affine` field. allow_missing_keys: don't raise exception if key is missing. Raises: TypeError: When ``meta_key_postfix`` is not a ``str``. See Also: `nibabel.orientations.ornt2axcodes`. Dictionary-based wrapper of :py:class:`monai.transforms.Rotate90`. Args: k: number of times to rotate by 90 degrees. spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes. Default: (0, 1), this is the first two axis in spatial dimensions. allow_missing_keys: don't raise exception if key is missing. Dictionary-based version :py:class:`monai.transforms.RandRotate90`. With probability `prob`, input arrays are rotated by 90 degrees in the plane specified by `spatial_axes`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` prob: probability of rotating. (Default 0.1, with 10% probability it returns a rotated array.) max_k: number of rotations will be sampled from `np.random.randint(max_k) + 1`. (Default 3) spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes. Default: (0, 1), this is the first two axis in spatial dimensions. allow_missing_keys: don't raise exception if key is missing. Dictionary-based wrapper of :py:class:`monai.transforms.Resize`. Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` spatial_size: expected shape of spatial dimensions after resize operation. if the components of the `spatial_size` are non-positive values, the transform will use the corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted to `(32, 64)` if the second spatial dimension size of img is `64`. mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``} The interpolation mode. Defaults to ``"area"``. See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of string, each element corresponds to a key in ``keys``. align_corners: This only has an effect when mode is 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None. See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of bool or None, each element corresponds to a key in ``keys``. allow_missing_keys: don't raise exception if key is missing. Dictionary-based wrapper of :py:class:`monai.transforms.Affine`. Args: keys: keys of the corresponding items to be transformed. rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D. Defaults to no rotation. shear_params: a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing. translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in pixel/voxel relative to the center of the input image. Defaults to no translation. scale_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Defaults to no scaling. spatial_size: output image spatial size. if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1, the transform will use the spatial size of `img`. if the components of the `spatial_size` are non-positive values, the transform will use the corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted to `(32, 64)` if the second spatial dimension size of img is `64`. mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} Padding mode for outside grid values. Defaults to ``"reflection"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. as_tensor_output: the computation is implemented using pytorch tensors, this option specifies whether to convert it back to numpy arrays. device: device on which the tensor will be allocated. allow_missing_keys: don't raise exception if key is missing. See also: - :py:class:`monai.transforms.compose.MapTransform` - :py:class:`RandAffineGrid` for the random affine parameters configurations. Dictionary-based wrapper of :py:class:`monai.transforms.RandAffine`. Args: keys: keys of the corresponding items to be transformed. spatial_size: output image spatial size. if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1, the transform will use the spatial size of `img`. if the components of the `spatial_size` are non-positive values, the transform will use the corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted to `(32, 64)` if the second spatial dimension size of img is `64`. prob: probability of returning a randomized affine grid. defaults to 0.1, with 10% chance returns a randomized grid. rotate_range: angle range in radians. If element `i` is iterable, then `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 and nothing for the remaining dimensions. shear_range: shear_range with format matching `rotate_range`. translate_range: translate_range with format matching `rotate_range`. scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. This allows 0 to correspond to no change (i.e., a scaling of 1). mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} Padding mode for outside grid values. Defaults to ``"reflection"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. as_tensor_output: the computation is implemented using pytorch tensors, this option specifies whether to convert it back to numpy arrays. device: device on which the tensor will be allocated. allow_missing_keys: don't raise exception if key is missing. See also: - :py:class:`monai.transforms.compose.MapTransform` - :py:class:`RandAffineGrid` for the random affine parameters configurations. # because probability handled in this class Dictionary-based wrapper of :py:class:`monai.transforms.Rand2DElastic`. Args: keys: keys of the corresponding items to be transformed. spacing: distance in between the control points. magnitude_range: 2 int numbers, the random offsets will be generated from ``uniform[magnitude[0], magnitude[1])``. spatial_size: specifying output image spatial size [h, w]. if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1, the transform will use the spatial size of `img`. if the components of the `spatial_size` are non-positive values, the transform will use the corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted to `(32, 64)` if the second spatial dimension size of img is `64`. prob: probability of returning a randomized affine grid. defaults to 0.1, with 10% chance returns a randomized grid, otherwise returns a ``spatial_size`` centered area extracted from the input image. rotate_range: angle range in radians. If element `i` is iterable, then `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 and nothing for the remaining dimensions. shear_range: shear_range with format matching `rotate_range`. translate_range: translate_range with format matching `rotate_range`. scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. This allows 0 to correspond to no change (i.e., a scaling of 1). mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} Padding mode for outside grid values. Defaults to ``"reflection"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. as_tensor_output: the computation is implemented using pytorch tensors, this option specifies whether to convert it back to numpy arrays. device: device on which the tensor will be allocated. allow_missing_keys: don't raise exception if key is missing. See also: - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. # because probability controlled by this class # type: ignore Dictionary-based wrapper of :py:class:`monai.transforms.Rand3DElastic`. Args: keys: keys of the corresponding items to be transformed. sigma_range: a Gaussian kernel with standard deviation sampled from ``uniform[sigma_range[0], sigma_range[1])`` will be used to smooth the random offset grid. magnitude_range: the random offsets on the grid will be generated from ``uniform[magnitude[0], magnitude[1])``. spatial_size: specifying output image spatial size [h, w, d]. if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1, the transform will use the spatial size of `img`. if the components of the `spatial_size` are non-positive values, the transform will use the corresponding components of img size. For example, `spatial_size=(32, 32, -1)` will be adapted to `(32, 32, 64)` if the third spatial dimension size of img is `64`. prob: probability of returning a randomized affine grid. defaults to 0.1, with 10% chance returns a randomized grid, otherwise returns a ``spatial_size`` centered area extracted from the input image. rotate_range: angle range in radians. If element `i` is iterable, then `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 and nothing for the remaining dimensions. shear_range: shear_range with format matching `rotate_range`. translate_range: translate_range with format matching `rotate_range`. scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. This allows 0 to correspond to no change (i.e., a scaling of 1). mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} Padding mode for outside grid values. Defaults to ``"reflection"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. as_tensor_output: the computation is implemented using pytorch tensors, this option specifies whether to convert it back to numpy arrays. device: device on which the tensor will be allocated. allow_missing_keys: don't raise exception if key is missing. See also: - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. # because probability controlled by this class Dictionary-based wrapper of :py:class:`monai.transforms.Flip`. See `numpy.flip` for additional details. https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html Args: keys: Keys to pick data for transformation. spatial_axis: Spatial axes along which to flip over. Default is None. allow_missing_keys: don't raise exception if key is missing. Dictionary-based version :py:class:`monai.transforms.RandFlip`. See `numpy.flip` for additional details. https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html Args: keys: Keys to pick data for transformation. prob: Probability of flipping. spatial_axis: Spatial axes along which to flip over. Default is None. allow_missing_keys: don't raise exception if key is missing. Dictionary-based version :py:class:`monai.transforms.RandAxisFlip`. See `numpy.flip` for additional details. https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html Args: keys: Keys to pick data for transformation. prob: Probability of flipping. allow_missing_keys: don't raise exception if key is missing. Dictionary-based wrapper of :py:class:`monai.transforms.Rotate`. Args: keys: Keys to pick data for transformation. angle: Rotation angle(s) in radians. keep_size: If it is False, the output shape is adapted so that the input array is contained completely in the output. If it is True, the output shape is the same as the input. Default is True. mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} Padding mode for outside grid values. Defaults to ``"border"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. align_corners: Defaults to False. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of bool, each element corresponds to a key in ``keys``. dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision. If None, use the data type of input data. To be compatible with other modules, the output data type is always ``np.float32``. It also can be a sequence of dtype or None, each element corresponds to a key in ``keys``. allow_missing_keys: don't raise exception if key is missing. Dictionary-based version :py:class:`monai.transforms.RandRotate` Randomly rotates the input arrays. Args: keys: Keys to pick data for transformation. range_x: Range of rotation angle in radians in the plane defined by the first and second axes. If single number, angle is uniformly sampled from (-range_x, range_x). range_y: Range of rotation angle in radians in the plane defined by the first and third axes. If single number, angle is uniformly sampled from (-range_y, range_y). range_z: Range of rotation angle in radians in the plane defined by the second and third axes. If single number, angle is uniformly sampled from (-range_z, range_z). prob: Probability of rotation. keep_size: If it is False, the output shape is adapted so that the input array is contained completely in the output. If it is True, the output shape is the same as the input. Default is True. mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} Padding mode for outside grid values. Defaults to ``"border"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample It also can be a sequence of string, each element corresponds to a key in ``keys``. align_corners: Defaults to False. See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of bool, each element corresponds to a key in ``keys``. dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision. If None, use the data type of input data. To be compatible with other modules, the output data type is always ``np.float32``. It also can be a sequence of dtype or None, each element corresponds to a key in ``keys``. allow_missing_keys: don't raise exception if key is missing. Dictionary-based wrapper of :py:class:`monai.transforms.Zoom`. Args: keys: Keys to pick data for transformation. zoom: The zoom factor along the spatial axes. If a float, zoom is the same for each spatial axis. If a sequence, zoom should contain one value for each spatial axis. mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``} The interpolation mode. Defaults to ``"area"``. See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of string, each element corresponds to a key in ``keys``. padding_mode: {``"constant"``, ``"edge``", ``"linear_ramp``", ``"maximum``", ``"mean``", `"median``", ``"minimum``", `"reflect``", ``"symmetric``", ``"wrap``", ``"empty``", ``"<function>``"} The mode to pad data after zooming. See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html align_corners: This only has an effect when mode is 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None. See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of bool or None, each element corresponds to a key in ``keys``. keep_size: Should keep original size (pad if needed), default is True. allow_missing_keys: don't raise exception if key is missing. Dict-based version :py:class:`monai.transforms.RandZoom`. Args: keys: Keys to pick data for transformation. prob: Probability of zooming. min_zoom: Min zoom factor. Can be float or sequence same size as image. If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims to keep the original spatial shape ratio. If a sequence, min_zoom should contain one value for each spatial axis. If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio. max_zoom: Max zoom factor. Can be float or sequence same size as image. If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims to keep the original spatial shape ratio. If a sequence, max_zoom should contain one value for each spatial axis. If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio. mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``} The interpolation mode. Defaults to ``"area"``. See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of string, each element corresponds to a key in ``keys``. padding_mode: {``"constant"``, ``"edge``", ``"linear_ramp``", ``"maximum``", ``"mean``", `"median``", ``"minimum``", `"reflect``", ``"symmetric``", ``"wrap``", ``"empty``", ``"<function>``"} The mode to pad data after zooming. See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html align_corners: This only has an effect when mode is 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None. See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of bool or None, each element corresponds to a key in ``keys``. keep_size: Should keep original size (pad if needed), default is True. allow_missing_keys: don't raise exception if key is missing. # match the spatial dim of first item # to keep the spatial shape ratio, use same random zoom factor for all dims # if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim | 1.524124 | 2 |
habitat_baselines/rl/ppo/policy.py | tommyz94/objectnav | 0 | 6624429 | <reponame>tommyz94/objectnav
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Dict
import numpy as np
from gym import spaces
import torch
from torch import nn as nn
from torch.jit import Final
from habitat.tasks.nav.nav import (
ImageGoalSensor,
)
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.utils import CategoricalNet, Flatten
from habitat_baselines.rl.models.rnn_state_encoder import RNNStateEncoder
import habitat_baselines.rl.models.resnet as resnet
from habitat_baselines.rl.models.resnet import ResNetEncoder
from habitat_baselines.common.running_mean_and_var import RunningMeanAndVar
from habitat_baselines.rl.models.simple_cnn import SimpleCNN
GOAL_EMBEDDING_SIZE = 32
@torch.jit.script
def _process_depth(observations: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
if "depth" in observations:
depth_observations = observations["depth"]
depth_observations = torch.clamp(depth_observations, 0.0, 10.0)
depth_observations /= 10.0
observations["depth"] = depth_observations
return observations
class ObservationSequential(nn.Sequential):
def __init__(self, *args):
super().__init__(*args)
r""" Sequential, but with annotation for JIT compatibility in forwarding of dict"""
def forward(self, x: Dict[str, torch.Tensor]):
for module in self: # copied from sequential
x = module(x)
return x
class Policy(nn.Module):
# The following configurations are used in the trainer to create the appropriate rollout
# As well as the appropriate auxiliary task wiring
# Whether to use multiple beliefs
IS_MULTIPLE_BELIEF = False
# Whether to section a single belief for auxiliary tasks, keeping a single GRU core
IS_SECTIONED = False
# Whether the fusion module is an RNN (see RecurrentAttentivePolicy)
IS_RECURRENT = False
# Has JIT support
IS_JITTABLE = False
# Policy fuses multiple inputs
LATE_FUSION = True
def __init__(self, net, dim_actions, observation_space=None, config=None, **kwargs):
super().__init__()
self.net = net
self.dim_actions = dim_actions
actor_head_layers = getattr(config, "ACTOR_HEAD_LAYERS", 1)
critic_head_layers = getattr(config, "CRITIC_HEAD_LAYERS", 1)
self.action_distribution = CategoricalNet(
self.net.output_size*2, self.dim_actions, layers=actor_head_layers
)
self.critic = CriticHead(self.net.output_size*2, layers=critic_head_layers)
if "rgb" in observation_space.spaces:
self.running_mean_and_var = RunningMeanAndVar(
observation_space.spaces["rgb"].shape[-1]
+ (
observation_space.spaces["depth"].shape[-1]
if "depth" in observation_space.spaces
else 0
),
initial_count=1e4,
)
else:
self.running_mean_and_var = None
def forward(self, *x):
raise NotImplementedError
def _preprocess_obs(self, observations):
dtype = next(self.parameters()).dtype
observations = {k: v.to(dtype=dtype) for k, v in observations.items()}
# since this seems to be what running_mean_and_var is expecting
observations = {k: v.permute(0, 3, 1, 2) if len(v.size()) == 4 else v for k, v in observations.items()}
observations = _process_depth(observations)
if "rgb" in observations:
rgb = observations["rgb"].to(dtype=next(self.parameters()).dtype) / 255.0
x = [rgb]
if "depth" in observations:
x.append(observations["depth"])
x = self.running_mean_and_var(torch.cat(x, 1))
# this preprocesses depth and rgb, but not semantics. we're still embedding that in our encoder
observations["rgb"] = x[:, 0:3]
if "depth" in observations:
observations["depth"] = x[:, 3:]
# ! Permute them back, because the rest of our code expects unpermuted
observations = {k: v.permute(0, 2, 3, 1) if len(v.size()) == 4 else v for k, v in observations.items()}
return observations
def act(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
deterministic=False,
**kwargs
):
observations = self._preprocess_obs(observations)
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution = self.action_distribution(features)
value = self.critic(features)
if deterministic:
action = distribution.mode()
else:
action = distribution.sample()
action_log_probs = distribution.log_probs(action)
return value, action, action_log_probs, rnn_hidden_states
def get_value(self, observations, rnn_hidden_states, prev_actions, masks):
observations = self._preprocess_obs(observations)
features, _ = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
return self.critic(features)
def evaluate_actions(
self, observations, rnn_hidden_states, prev_actions, masks, action
):
observations = self._preprocess_obs(observations)
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution = self.action_distribution(features)
value = self.critic(features)
action_log_probs = distribution.log_probs(action)
distribution_entropy = distribution.entropy().mean()
return value, action_log_probs, distribution_entropy, rnn_hidden_states, features, None, None, None
class CriticHead(nn.Module):
HIDDEN_SIZE = 32
def __init__(self, input_size, layers=1):
super().__init__()
if layers == 1:
self.fc = nn.Linear(input_size, 1)
nn.init.orthogonal_(self.fc.weight)
nn.init.constant_(self.fc.bias, 0)
else: # Only support 2 layers max
self.fc = nn.Sequential(
nn.Linear(input_size, self.HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(self.HIDDEN_SIZE, 1)
)
nn.init.orthogonal_(self.fc[0].weight)
nn.init.constant_(self.fc[0].bias, 0)
def forward(self, x):
return self.fc(x)
@baseline_registry.register_policy
class PointNavBaselinePolicy(Policy):
def __init__(
self,
observation_space,
action_space,
goal_sensor_uuid=None,
hidden_size=512,
**kwargs,
):
super().__init__(
BaselineNet(
observation_space=observation_space,
hidden_size=hidden_size,
goal_sensor_uuid=goal_sensor_uuid,
),
action_space.n,
)
class Net(nn.Module, metaclass=abc.ABCMeta):
@abc.abstractmethod
def forward(self, observations, rnn_hidden_states, prev_actions, masks):
pass
@property
@abc.abstractmethod
def output_size(self):
pass
@property
@abc.abstractmethod
def num_recurrent_layers(self):
pass
@property
@abc.abstractmethod
def is_blind(self):
pass
class BaselineNet(Net):
r"""Network which passes the input image through CNN and passes through RNN.
"""
def __init__(
self,
observation_space,
hidden_size,
goal_sensor_uuid=None,
additional_sensors=[] # low dim sensors corresponding to registered name
):
# TODO OURS
self.goal_sensor_uuid = goal_sensor_uuid
self.additional_sensors = additional_sensors
self._n_input_goal = 0
if goal_sensor_uuid is not None and goal_sensor_uuid != "no_sensor":
self.goal_sensor_uuid = goal_sensor_uuid
self._initialize_goal_encoder(observation_space)
# END
self._hidden_size = hidden_size
resnet_baseplanes = 32
backbone="resnet18"
visual_resnet = ResNetEncoder(
observation_space,
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
)
self.visual_encoder = ObservationSequential(
visual_resnet,
Flatten(),
nn.Linear(
np.prod(visual_resnet.output_shape), hidden_size
),
nn.ReLU(True),
)
final_embedding_size = (0 if self.is_blind else self._hidden_size) + self._n_input_goal
for sensor in additional_sensors:
final_embedding_size += observation_space.spaces[sensor].shape[0]
self.state_encoder = RNNStateEncoder(final_embedding_size, self._hidden_size)
self.train()
@property
def output_size(self):
return self._hidden_size
@property
def is_blind(self):
return False
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
def _initialize_goal_encoder(self, observation_space):
if self.goal_sensor_uuid == ImageGoalSensor.cls_uuid:
goal_observation_space = spaces.Dict(
{"rgb": observation_space.spaces[ImageGoalSensor.cls_uuid]}
)
self.goal_visual_encoder = SimpleCNN(
goal_observation_space, self._hidden_size
)
self._n_input_goal = self._hidden_size
else:
self._n_input_goal = observation_space.spaces[
self.goal_sensor_uuid
].shape[0]
# if (
# IntegratedPointGoalGPSAndCompassSensor.cls_uuid
# in observation_space.spaces
# ):
# self._n_input_goal = observation_space.spaces[
# IntegratedPointGoalGPSAndCompassSensor.cls_uuid
# ].shape[0]
# elif PointGoalSensor.cls_uuid in observation_space.spaces:
# self._n_input_goal = observation_space.spaces[
# PointGoalSensor.cls_uuid
# ].shape[0]
# elif ImageGoalSensor.cls_uuid in observation_space.spaces:
# goal_observation_space = spaces.Dict(
# {"rgb": observation_space.spaces[ImageGoalSensor.cls_uuid]}
# )
# self.goal_visual_encoder = SimpleCNN(
# goal_observation_space, hidden_size
# )
# self._n_input_goal = hidden_size
def get_target_encoding(self, observations):
if self.goal_sensor_uuid == ImageGoalSensor.cls_uuid:
image_goal = observations[ImageGoalSensor.cls_uuid]
return self.goal_visual_encoder({"rgb": image_goal})
# if IntegratedPointGoalGPSAndCompassSensor.cls_uuid in observations:
# target_encoding = observations[
# IntegratedPointGoalGPSAndCompassSensor.cls_uuid
# ]
# elif PointGoalSensor.cls_uuid in observations:
# target_encoding = observations[PointGoalSensor.cls_uuid]
# elif ImageGoalSensor.cls_uuid in observations:
# image_goal = observations[ImageGoalSensor.cls_uuid]
# target_encoding = self.goal_visual_encoder({"rgb": image_goal})
return observations[self.goal_sensor_uuid]
def _append_additional_sensors(self, x, observations):
for sensor in self.additional_sensors:
x.append(observations[sensor])
return x
def forward(self, observations, rnn_hidden_states, prev_actions, masks):
x = []
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
x.append(perception_embed)
if self.goal_sensor_uuid is not None:
x.append(self.get_target_encoding(observations))
x = self._append_additional_sensors(x, observations)
x = torch.cat(x, dim=-1) # t x n x -1
x, rnn_hidden_states = self.state_encoder(x, rnn_hidden_states, masks)
return x, rnn_hidden_states
@baseline_registry.register_policy
class ObjectNavBaselinePolicy(Policy):
def __init__(
self,
observation_space,
action_space,
goal_sensor_uuid=None,
hidden_size=512,
**kwargs,
):
super().__init__(
BaselineNet(
observation_space=observation_space,
hidden_size=hidden_size,
goal_sensor_uuid=goal_sensor_uuid,
additional_sensors=["gps", "compass"]
),
action_space.n,
)
def _initialize_goal_encoder(self, observation_space):
self._n_input_goal = GOAL_EMBEDDING_SIZE
goal_space = observation_space.spaces[
self.goal_sensor_uuid
]
self.goal_embedder = nn.Embedding(goal_space.high + 1, self._n_input_goal) # low is 0, high is given (see object nav task)
def get_target_encoding(self, observations):
return self.goal_embedder(observations[self.goal_sensor_uuid])
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Dict
import numpy as np
from gym import spaces
import torch
from torch import nn as nn
from torch.jit import Final
from habitat.tasks.nav.nav import (
ImageGoalSensor,
)
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.utils import CategoricalNet, Flatten
from habitat_baselines.rl.models.rnn_state_encoder import RNNStateEncoder
import habitat_baselines.rl.models.resnet as resnet
from habitat_baselines.rl.models.resnet import ResNetEncoder
from habitat_baselines.common.running_mean_and_var import RunningMeanAndVar
from habitat_baselines.rl.models.simple_cnn import SimpleCNN
GOAL_EMBEDDING_SIZE = 32
@torch.jit.script
def _process_depth(observations: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
if "depth" in observations:
depth_observations = observations["depth"]
depth_observations = torch.clamp(depth_observations, 0.0, 10.0)
depth_observations /= 10.0
observations["depth"] = depth_observations
return observations
class ObservationSequential(nn.Sequential):
def __init__(self, *args):
super().__init__(*args)
r""" Sequential, but with annotation for JIT compatibility in forwarding of dict"""
def forward(self, x: Dict[str, torch.Tensor]):
for module in self: # copied from sequential
x = module(x)
return x
class Policy(nn.Module):
# The following configurations are used in the trainer to create the appropriate rollout
# As well as the appropriate auxiliary task wiring
# Whether to use multiple beliefs
IS_MULTIPLE_BELIEF = False
# Whether to section a single belief for auxiliary tasks, keeping a single GRU core
IS_SECTIONED = False
# Whether the fusion module is an RNN (see RecurrentAttentivePolicy)
IS_RECURRENT = False
# Has JIT support
IS_JITTABLE = False
# Policy fuses multiple inputs
LATE_FUSION = True
def __init__(self, net, dim_actions, observation_space=None, config=None, **kwargs):
super().__init__()
self.net = net
self.dim_actions = dim_actions
actor_head_layers = getattr(config, "ACTOR_HEAD_LAYERS", 1)
critic_head_layers = getattr(config, "CRITIC_HEAD_LAYERS", 1)
self.action_distribution = CategoricalNet(
self.net.output_size*2, self.dim_actions, layers=actor_head_layers
)
self.critic = CriticHead(self.net.output_size*2, layers=critic_head_layers)
if "rgb" in observation_space.spaces:
self.running_mean_and_var = RunningMeanAndVar(
observation_space.spaces["rgb"].shape[-1]
+ (
observation_space.spaces["depth"].shape[-1]
if "depth" in observation_space.spaces
else 0
),
initial_count=1e4,
)
else:
self.running_mean_and_var = None
def forward(self, *x):
raise NotImplementedError
def _preprocess_obs(self, observations):
dtype = next(self.parameters()).dtype
observations = {k: v.to(dtype=dtype) for k, v in observations.items()}
# since this seems to be what running_mean_and_var is expecting
observations = {k: v.permute(0, 3, 1, 2) if len(v.size()) == 4 else v for k, v in observations.items()}
observations = _process_depth(observations)
if "rgb" in observations:
rgb = observations["rgb"].to(dtype=next(self.parameters()).dtype) / 255.0
x = [rgb]
if "depth" in observations:
x.append(observations["depth"])
x = self.running_mean_and_var(torch.cat(x, 1))
# this preprocesses depth and rgb, but not semantics. we're still embedding that in our encoder
observations["rgb"] = x[:, 0:3]
if "depth" in observations:
observations["depth"] = x[:, 3:]
# ! Permute them back, because the rest of our code expects unpermuted
observations = {k: v.permute(0, 2, 3, 1) if len(v.size()) == 4 else v for k, v in observations.items()}
return observations
def act(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
deterministic=False,
**kwargs
):
observations = self._preprocess_obs(observations)
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution = self.action_distribution(features)
value = self.critic(features)
if deterministic:
action = distribution.mode()
else:
action = distribution.sample()
action_log_probs = distribution.log_probs(action)
return value, action, action_log_probs, rnn_hidden_states
def get_value(self, observations, rnn_hidden_states, prev_actions, masks):
observations = self._preprocess_obs(observations)
features, _ = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
return self.critic(features)
def evaluate_actions(
self, observations, rnn_hidden_states, prev_actions, masks, action
):
observations = self._preprocess_obs(observations)
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution = self.action_distribution(features)
value = self.critic(features)
action_log_probs = distribution.log_probs(action)
distribution_entropy = distribution.entropy().mean()
return value, action_log_probs, distribution_entropy, rnn_hidden_states, features, None, None, None
class CriticHead(nn.Module):
HIDDEN_SIZE = 32
def __init__(self, input_size, layers=1):
super().__init__()
if layers == 1:
self.fc = nn.Linear(input_size, 1)
nn.init.orthogonal_(self.fc.weight)
nn.init.constant_(self.fc.bias, 0)
else: # Only support 2 layers max
self.fc = nn.Sequential(
nn.Linear(input_size, self.HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(self.HIDDEN_SIZE, 1)
)
nn.init.orthogonal_(self.fc[0].weight)
nn.init.constant_(self.fc[0].bias, 0)
def forward(self, x):
return self.fc(x)
@baseline_registry.register_policy
class PointNavBaselinePolicy(Policy):
def __init__(
self,
observation_space,
action_space,
goal_sensor_uuid=None,
hidden_size=512,
**kwargs,
):
super().__init__(
BaselineNet(
observation_space=observation_space,
hidden_size=hidden_size,
goal_sensor_uuid=goal_sensor_uuid,
),
action_space.n,
)
class Net(nn.Module, metaclass=abc.ABCMeta):
@abc.abstractmethod
def forward(self, observations, rnn_hidden_states, prev_actions, masks):
pass
@property
@abc.abstractmethod
def output_size(self):
pass
@property
@abc.abstractmethod
def num_recurrent_layers(self):
pass
@property
@abc.abstractmethod
def is_blind(self):
pass
class BaselineNet(Net):
r"""Network which passes the input image through CNN and passes through RNN.
"""
def __init__(
self,
observation_space,
hidden_size,
goal_sensor_uuid=None,
additional_sensors=[] # low dim sensors corresponding to registered name
):
# TODO OURS
self.goal_sensor_uuid = goal_sensor_uuid
self.additional_sensors = additional_sensors
self._n_input_goal = 0
if goal_sensor_uuid is not None and goal_sensor_uuid != "no_sensor":
self.goal_sensor_uuid = goal_sensor_uuid
self._initialize_goal_encoder(observation_space)
# END
self._hidden_size = hidden_size
resnet_baseplanes = 32
backbone="resnet18"
visual_resnet = ResNetEncoder(
observation_space,
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
)
self.visual_encoder = ObservationSequential(
visual_resnet,
Flatten(),
nn.Linear(
np.prod(visual_resnet.output_shape), hidden_size
),
nn.ReLU(True),
)
final_embedding_size = (0 if self.is_blind else self._hidden_size) + self._n_input_goal
for sensor in additional_sensors:
final_embedding_size += observation_space.spaces[sensor].shape[0]
self.state_encoder = RNNStateEncoder(final_embedding_size, self._hidden_size)
self.train()
@property
def output_size(self):
return self._hidden_size
@property
def is_blind(self):
return False
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
def _initialize_goal_encoder(self, observation_space):
if self.goal_sensor_uuid == ImageGoalSensor.cls_uuid:
goal_observation_space = spaces.Dict(
{"rgb": observation_space.spaces[ImageGoalSensor.cls_uuid]}
)
self.goal_visual_encoder = SimpleCNN(
goal_observation_space, self._hidden_size
)
self._n_input_goal = self._hidden_size
else:
self._n_input_goal = observation_space.spaces[
self.goal_sensor_uuid
].shape[0]
# if (
# IntegratedPointGoalGPSAndCompassSensor.cls_uuid
# in observation_space.spaces
# ):
# self._n_input_goal = observation_space.spaces[
# IntegratedPointGoalGPSAndCompassSensor.cls_uuid
# ].shape[0]
# elif PointGoalSensor.cls_uuid in observation_space.spaces:
# self._n_input_goal = observation_space.spaces[
# PointGoalSensor.cls_uuid
# ].shape[0]
# elif ImageGoalSensor.cls_uuid in observation_space.spaces:
# goal_observation_space = spaces.Dict(
# {"rgb": observation_space.spaces[ImageGoalSensor.cls_uuid]}
# )
# self.goal_visual_encoder = SimpleCNN(
# goal_observation_space, hidden_size
# )
# self._n_input_goal = hidden_size
def get_target_encoding(self, observations):
if self.goal_sensor_uuid == ImageGoalSensor.cls_uuid:
image_goal = observations[ImageGoalSensor.cls_uuid]
return self.goal_visual_encoder({"rgb": image_goal})
# if IntegratedPointGoalGPSAndCompassSensor.cls_uuid in observations:
# target_encoding = observations[
# IntegratedPointGoalGPSAndCompassSensor.cls_uuid
# ]
# elif PointGoalSensor.cls_uuid in observations:
# target_encoding = observations[PointGoalSensor.cls_uuid]
# elif ImageGoalSensor.cls_uuid in observations:
# image_goal = observations[ImageGoalSensor.cls_uuid]
# target_encoding = self.goal_visual_encoder({"rgb": image_goal})
return observations[self.goal_sensor_uuid]
def _append_additional_sensors(self, x, observations):
for sensor in self.additional_sensors:
x.append(observations[sensor])
return x
def forward(self, observations, rnn_hidden_states, prev_actions, masks):
x = []
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
x.append(perception_embed)
if self.goal_sensor_uuid is not None:
x.append(self.get_target_encoding(observations))
x = self._append_additional_sensors(x, observations)
x = torch.cat(x, dim=-1) # t x n x -1
x, rnn_hidden_states = self.state_encoder(x, rnn_hidden_states, masks)
return x, rnn_hidden_states
@baseline_registry.register_policy
class ObjectNavBaselinePolicy(Policy):
def __init__(
self,
observation_space,
action_space,
goal_sensor_uuid=None,
hidden_size=512,
**kwargs,
):
super().__init__(
BaselineNet(
observation_space=observation_space,
hidden_size=hidden_size,
goal_sensor_uuid=goal_sensor_uuid,
additional_sensors=["gps", "compass"]
),
action_space.n,
)
def _initialize_goal_encoder(self, observation_space):
self._n_input_goal = GOAL_EMBEDDING_SIZE
goal_space = observation_space.spaces[
self.goal_sensor_uuid
]
self.goal_embedder = nn.Embedding(goal_space.high + 1, self._n_input_goal) # low is 0, high is given (see object nav task)
def get_target_encoding(self, observations):
return self.goal_embedder(observations[self.goal_sensor_uuid]) | en | 0.768273 | #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Sequential, but with annotation for JIT compatibility in forwarding of dict # copied from sequential # The following configurations are used in the trainer to create the appropriate rollout # As well as the appropriate auxiliary task wiring # Whether to use multiple beliefs # Whether to section a single belief for auxiliary tasks, keeping a single GRU core # Whether the fusion module is an RNN (see RecurrentAttentivePolicy) # Has JIT support # Policy fuses multiple inputs # since this seems to be what running_mean_and_var is expecting # this preprocesses depth and rgb, but not semantics. we're still embedding that in our encoder # ! Permute them back, because the rest of our code expects unpermuted # Only support 2 layers max Network which passes the input image through CNN and passes through RNN. # low dim sensors corresponding to registered name # TODO OURS # END # if ( # IntegratedPointGoalGPSAndCompassSensor.cls_uuid # in observation_space.spaces # ): # self._n_input_goal = observation_space.spaces[ # IntegratedPointGoalGPSAndCompassSensor.cls_uuid # ].shape[0] # elif PointGoalSensor.cls_uuid in observation_space.spaces: # self._n_input_goal = observation_space.spaces[ # PointGoalSensor.cls_uuid # ].shape[0] # elif ImageGoalSensor.cls_uuid in observation_space.spaces: # goal_observation_space = spaces.Dict( # {"rgb": observation_space.spaces[ImageGoalSensor.cls_uuid]} # ) # self.goal_visual_encoder = SimpleCNN( # goal_observation_space, hidden_size # ) # self._n_input_goal = hidden_size # if IntegratedPointGoalGPSAndCompassSensor.cls_uuid in observations: # target_encoding = observations[ # IntegratedPointGoalGPSAndCompassSensor.cls_uuid # ] # elif PointGoalSensor.cls_uuid in observations: # target_encoding = observations[PointGoalSensor.cls_uuid] # elif ImageGoalSensor.cls_uuid in observations: # image_goal = observations[ImageGoalSensor.cls_uuid] # target_encoding = self.goal_visual_encoder({"rgb": image_goal}) # t x n x -1 # low is 0, high is given (see object nav task) | 2.235401 | 2 |
filu/apps.py | pythonmentor/teiva-p11 | 0 | 6624430 | <filename>filu/apps.py
from django.apps import AppConfig
class FiluConfig(AppConfig):
name = "filu"
| <filename>filu/apps.py
from django.apps import AppConfig
class FiluConfig(AppConfig):
name = "filu"
| none | 1 | 1.305364 | 1 | |
dripy/survey.py | bthornton191/drilling | 0 | 6624431 | <reponame>bthornton191/drilling<gh_stars>0
import pandas
from numpy import arange
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from thornpy import plotting
class SurveyData():
"""Stores data from a well survey.
Parameters
----------
filename : str
Filename of raw survey data file. Can be in excel format or csv format.
vendor : str
Vendor that produced the file. This determines the expected format of the file. For options see the keys of :attr:`VENDOR_SETTINGS`.
Attributes
----------
x : pandas.Series
x coordinates of wellbore
y : pandas.Series
y coordinates of wellbore
z : pandas.Series
z coordinates of wellbore
tvd : pandas.Series
True vertical depth of wellbore
md : pandas.Series
Measured depth of wellbore
VENDOR_SETTINGS : dict
Class attribute containing information about how the survey file should be read depending on what vendor provided it.
"""
VENDOR_SETTINGS = {
'leam': {
'read_settings': {
'skiprows': list(arange(13)) + [14],
'header': 0,
},
'column_names': {
'x': 'N-S ',
'y': 'E-W ',
'z': 'TVD',
'md': 'MD'
}
}
}
def __init__(self, filename, vendor='leam', _corva_params=None):
if _corva_params is None:
if filename.endswith('.csv'):
self.data = pandas.read_csv(filename, **self.VENDOR_SETTINGS[vendor]['read_settings'])
elif filename.endswith('.xls') or filename.endswith('.xlsx'):
self.data = pandas.read_excel(filename, **self.VENDOR_SETTINGS[vendor]['read_settings'])
self.data.dropna(subset=[self.VENDOR_SETTINGS[vendor]['column_names']['md']], inplace=True)
self.x = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['x']]
self.y = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['y']]
self.z = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['z']]
self.tvd = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['z']]
self.md = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['md']]
else:
def plot_3d(self, figure=None):
"""Plots the survey data on a 3d
Parameters
----------
figure : matplotlib.figure.Figure, optional
An existing (the default is None, which [default_description])
Returns
-------
matplotlib.figure.Figure
3D plot of wellbore
"""
figure = plotting.plot_3d(x=self.x-self.x[0], y=self.y-self.y[0], z=-self.z, x_label='X (ft)', y_label='Y (ft)', z_label='Z (ft)', figure=figure)
return figure
| import pandas
from numpy import arange
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from thornpy import plotting
class SurveyData():
"""Stores data from a well survey.
Parameters
----------
filename : str
Filename of raw survey data file. Can be in excel format or csv format.
vendor : str
Vendor that produced the file. This determines the expected format of the file. For options see the keys of :attr:`VENDOR_SETTINGS`.
Attributes
----------
x : pandas.Series
x coordinates of wellbore
y : pandas.Series
y coordinates of wellbore
z : pandas.Series
z coordinates of wellbore
tvd : pandas.Series
True vertical depth of wellbore
md : pandas.Series
Measured depth of wellbore
VENDOR_SETTINGS : dict
Class attribute containing information about how the survey file should be read depending on what vendor provided it.
"""
VENDOR_SETTINGS = {
'leam': {
'read_settings': {
'skiprows': list(arange(13)) + [14],
'header': 0,
},
'column_names': {
'x': 'N-S ',
'y': 'E-W ',
'z': 'TVD',
'md': 'MD'
}
}
}
def __init__(self, filename, vendor='leam', _corva_params=None):
if _corva_params is None:
if filename.endswith('.csv'):
self.data = pandas.read_csv(filename, **self.VENDOR_SETTINGS[vendor]['read_settings'])
elif filename.endswith('.xls') or filename.endswith('.xlsx'):
self.data = pandas.read_excel(filename, **self.VENDOR_SETTINGS[vendor]['read_settings'])
self.data.dropna(subset=[self.VENDOR_SETTINGS[vendor]['column_names']['md']], inplace=True)
self.x = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['x']]
self.y = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['y']]
self.z = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['z']]
self.tvd = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['z']]
self.md = self.data[self.VENDOR_SETTINGS[vendor]['column_names']['md']]
else:
def plot_3d(self, figure=None):
"""Plots the survey data on a 3d
Parameters
----------
figure : matplotlib.figure.Figure, optional
An existing (the default is None, which [default_description])
Returns
-------
matplotlib.figure.Figure
3D plot of wellbore
"""
figure = plotting.plot_3d(x=self.x-self.x[0], y=self.y-self.y[0], z=-self.z, x_label='X (ft)', y_label='Y (ft)', z_label='Z (ft)', figure=figure)
return figure | en | 0.513307 | Stores data from a well survey. Parameters ---------- filename : str Filename of raw survey data file. Can be in excel format or csv format. vendor : str Vendor that produced the file. This determines the expected format of the file. For options see the keys of :attr:`VENDOR_SETTINGS`. Attributes ---------- x : pandas.Series x coordinates of wellbore y : pandas.Series y coordinates of wellbore z : pandas.Series z coordinates of wellbore tvd : pandas.Series True vertical depth of wellbore md : pandas.Series Measured depth of wellbore VENDOR_SETTINGS : dict Class attribute containing information about how the survey file should be read depending on what vendor provided it. Plots the survey data on a 3d Parameters ---------- figure : matplotlib.figure.Figure, optional An existing (the default is None, which [default_description]) Returns ------- matplotlib.figure.Figure 3D plot of wellbore | 3.040629 | 3 |
djangoserver/server/logic/graph/test_example.py | simonneuville/runamic_server | 1 | 6624432 | from django.test import TestCase
from server.logic.graph.graph import Graph
from server.logic.graph.poison import PoisonedGraph
class TestExample(TestCase):
"""
Example of how to use this graph library
Please read this carefully if you're going to work with it.
"""
"""
While this example is still useful in understanding how the graph works,
the example itself does not work anymore, since the ffi boundary screwed
dynamic graph entries.
Check the Rust tests for more information.
"""
def example(self):
nodes = [(0, "A"), (1, "B"), (2, "C"), (3, "D")]
edges = [(0, "AB", 1), (1, "BC", 2), (2, "CA", 0), (0, "AD", 3)]
graph = Graph(nodes, edges)
self.assertEquals(graph.get(0), "A")
self.assertEquals(graph.get(1), "B")
self.assertEquals(graph.get_conn_idval(0), [(1, "AB"), (3, "AD")])
self.assertEquals(graph.get_edges(0), ["AB", "AD"])
self.assertEquals(graph.get_connids(0), [1, 3])
self.assertEquals(sorted(graph.list_ids()), [0, 1, 2, 3])
generator = graph.gen_dijkstra(0, lambda _: 1, lambda x: -len(x))
collect = [(graph.get(node_id), length)
for node_id, length, _ in generator]
self.assertEquals(collect,
[("A", 0), ("B", -2), ("D", -2), ("C", -4)])
graph = graph.map_graph(lambda n: n * 2, lambda e: e * 3)
self.assertEquals(graph.get(0), "AA")
self.assertEquals(graph.get_edges(0), ["ABABAB", "ADADAD"])
nodes = [(0, "A")]
edges = [(0, "AD", 3)]
poison = PoisonedGraph(nodes, edges, graph)
self.assertEquals(poison.get(0), "A")
self.assertEquals(poison.get(1), "BB")
generator = poison.gen_dijkstra(0, lambda _: 1, lambda x: -len(x))
collect = [(poison.get(node_id), length)
for node_id, length, _ in generator]
self.assertEquals(collect,
[("A", 0), ("BB", -6), ("DD", -2), ("CC", -12)])
generator = poison.gen_dijkstra(0, lambda x: len(x), lambda x: -len(x))
collect = [(poison.get(node_id), length)
for node_id, length, _ in generator]
self.assertEquals(collect,
[("A", 0), ("DD", -2), ("BB", -6), ("CC", -12)])
| from django.test import TestCase
from server.logic.graph.graph import Graph
from server.logic.graph.poison import PoisonedGraph
class TestExample(TestCase):
"""
Example of how to use this graph library
Please read this carefully if you're going to work with it.
"""
"""
While this example is still useful in understanding how the graph works,
the example itself does not work anymore, since the ffi boundary screwed
dynamic graph entries.
Check the Rust tests for more information.
"""
def example(self):
nodes = [(0, "A"), (1, "B"), (2, "C"), (3, "D")]
edges = [(0, "AB", 1), (1, "BC", 2), (2, "CA", 0), (0, "AD", 3)]
graph = Graph(nodes, edges)
self.assertEquals(graph.get(0), "A")
self.assertEquals(graph.get(1), "B")
self.assertEquals(graph.get_conn_idval(0), [(1, "AB"), (3, "AD")])
self.assertEquals(graph.get_edges(0), ["AB", "AD"])
self.assertEquals(graph.get_connids(0), [1, 3])
self.assertEquals(sorted(graph.list_ids()), [0, 1, 2, 3])
generator = graph.gen_dijkstra(0, lambda _: 1, lambda x: -len(x))
collect = [(graph.get(node_id), length)
for node_id, length, _ in generator]
self.assertEquals(collect,
[("A", 0), ("B", -2), ("D", -2), ("C", -4)])
graph = graph.map_graph(lambda n: n * 2, lambda e: e * 3)
self.assertEquals(graph.get(0), "AA")
self.assertEquals(graph.get_edges(0), ["ABABAB", "ADADAD"])
nodes = [(0, "A")]
edges = [(0, "AD", 3)]
poison = PoisonedGraph(nodes, edges, graph)
self.assertEquals(poison.get(0), "A")
self.assertEquals(poison.get(1), "BB")
generator = poison.gen_dijkstra(0, lambda _: 1, lambda x: -len(x))
collect = [(poison.get(node_id), length)
for node_id, length, _ in generator]
self.assertEquals(collect,
[("A", 0), ("BB", -6), ("DD", -2), ("CC", -12)])
generator = poison.gen_dijkstra(0, lambda x: len(x), lambda x: -len(x))
collect = [(poison.get(node_id), length)
for node_id, length, _ in generator]
self.assertEquals(collect,
[("A", 0), ("DD", -2), ("BB", -6), ("CC", -12)])
| en | 0.955293 | Example of how to use this graph library Please read this carefully if you're going to work with it. While this example is still useful in understanding how the graph works, the example itself does not work anymore, since the ffi boundary screwed dynamic graph entries. Check the Rust tests for more information. | 2.862687 | 3 |
library/periphery.py | lompal/USBIPManager | 24 | 6624433 | <filename>library/periphery.py
from library import bar, compatibility, config, ini, lang, log, queue
from os import path
from json import load
from threading import Thread, Event
from asyncio import get_event_loop, sleep
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QLabel, QWidgetAction, QAction
from paramiko import SSHClient, AutoAddPolicy, ssh_exception
# noinspection PyPep8Naming, PyMethodMayBeStatic
class Base(metaclass=config.Singleton):
""" Base class for periphery manager """
def __init__(self, base, ip_addr):
self._base = base
self._ip_addr = ip_addr
self._dmn_config = ini.Daemon(self._base, self._ip_addr)
self._heading = ('Global', 'Local')
self._lang = None
for heading in self._heading:
setattr(self, f'_get{heading}Heading', self._get_heading(heading))
def _get_heading(self, heading):
""" Function template for separator instance """
def _template():
""" Peripheral action separator - generator """
_label = QLabel(getattr(self._lang, f'Action{heading}Heading'))
_label.setStyleSheet('QLabel {color: gray;}')
_label.setAlignment(Qt.AlignCenter)
_action = QWidgetAction(None)
_action.setDefaultWidget(_label)
_action.setObjectName(f'{self._lang.__name__}{heading}')
yield _action
return _template
def _getGlobalHeading(self):
""" Global peripheral action separator - dummy function to resolve reference issue """
yield from ()
def _getLocalHeading(self):
""" Local peripheral action separator - dummy function to resolve reference issue """
yield from ()
# noinspection PyPep8Naming
class SSH(Base, metaclass=config.Singleton):
""" SSH connection manager """
def __init__(self, base, ip_addr):
super().__init__(base, ip_addr)
self._loop = get_event_loop()
self._manager = queue.Manager(self._base)
self._name = f'SSH connection : {self._ip_addr}'
self._log = log.Manager(self._base)
self._lang = lang.SSH
self._ssh_param = f'{self._ip_addr}:{self._dmn_config.ssh_port}'
self._connection = SSHClient()
self._connection.set_missing_host_key_policy(AutoAddPolicy())
def _action_param(self, param):
""" Switch-case structure - get action type depending on the current connection state """
return {
False: self._open,
True: self.close
}.get(param)
def _exec(self):
""" Open the SSH connection - daemon thread """
try:
self._connection.connect(
self._ip_addr,
self._dmn_config.ssh_port,
self._dmn_config.ssh_usr,
self._dmn_config.ssh_pwd
)
except ssh_exception.NoValidConnectionsError:
self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.NoValidConnectionsError}')
except ssh_exception.AuthenticationException:
self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.AuthenticationException}')
else:
self._log.setSuccess(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.OpenSuccess}')
return self._event.set()
def __open(self):
""" Open the SSH connection - inner function """
self._event = Event()
self._thread = Thread(target=self._exec, name=self._name)
self._thread.start()
self._event.wait()
return self._connection.get_transport()
def action(self):
""" Global/Local daemon action over SSH - generator """
for action in self._getGlobalHeading():
yield action
_trigger = self._action_param(self.isOpen())
_name = _trigger.__name__.strip('_')
_icon = QIcon(f'icon/ssh_{_name}.png')
_lang = getattr(self._lang, f'ActionGlobal{_name.capitalize()}')
_action = QAction(_icon, f'{_lang}')
_action.setObjectName(f'{_name}')
_action.triggered.connect(lambda __bool: _trigger())
yield _action
for action in self._getLocalHeading():
yield action
yield from ()
def _open(self):
""" Open the SSH connection - calling coroutine """
self._manager.exec(self.open, self._name)
async def open(self):
""" Open the SSH connection - coroutine """
if not self._dmn_config.ssh:
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.EnableRequired}')
if self.isOpen():
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.AforeOpen}')
return await self._loop.run_in_executor(None, self.__open)
def close(self):
""" Close the SSH connection """
if not self._dmn_config.ssh:
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.EnableRequired}')
if not self.isOpen():
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.AforeClose}')
self._connection.close()
if not self.isOpen():
return self._log.setWarning(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.CloseSuccess}')
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.CloseError}')
def isOpen(self):
""" Check if the SSH connection is open """
return self._connection.get_transport() is not None
async def establish(self, message):
""" Check if the SSH connection is close / Establish connection """
if not self.isOpen():
self._log.setInformation(f'{message} {self._ip_addr} : {self._lang.ConnectionRequired}')
await self.open()
return self.isOpen()
def exec(self, action):
""" Execute a custom action over the SSH connection / Get the PID """
if not self.isOpen():
return None, None, None, None
_stdin, _stdout, _stderr = self._connection.exec_command(f'echo $$; exec {action}')
_pid = _stdout.readline().strip()
return _pid, _stdin, _stdout, _stderr
def kill(self, pid):
""" Kill the long-running action over SSH connection """
return self.exec(f'sudo kill {pid}')
# noinspection PyMethodMayBeStatic
def isError(self, channel):
""" Get the exit status from the action on the daemon """
# TODO Checking code for everything
return channel.channel.recv_exit_status()
# noinspection PyPep8Naming
class USB(Base, metaclass=config.Singleton):
""" USB device recharging manager """
def __init__(self, base, obj, ip_addr):
super().__init__(base, ip_addr)
self._obj = obj
self._loop = get_event_loop()
self._manager = queue.Manager(self._base)
self._rchrg_name = f'USB recharge : {self._ip_addr}'
self._glob_rchrg_name = f'USB global recharge : {self._ip_addr}'
self._bar = bar.Manager(self._obj.progress)
self._usb_comp = compatibility.USB(self._base, self._ip_addr)
self._ssh = SSH(self._base, self._ip_addr)
self._log = log.Manager(self._base)
self._lang = lang.USB
self._rchrg = list()
async def __actionGlobal(self):
""" Recharge the entire USB hub - coroutine """
if not await self._ssh.establish(self._lang.LogSeparator):
return self._log.setInformation(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RechargeCancel}')
_ep = 100 / len(self._rchrg)
_span = self._dmn_config.hub_cfg_tmo
for device in self._rchrg:
_location, _hole = device
self.recharge(_location, _hole, _ep, False)
await sleep(_span + 0.25)
def __recharge(self, location, hole):
""" Recharge the USB device - inner function """
self._ssh.exec(self._usb_comp.off(location, hole))
_query = self._usb_comp.on(location, hole)
_echo = self._ssh.exec(_query)
if not all(_echo):
return self._log.setError(
f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RechargeQuery} : {_query}')
_pid, _stdin, _stdout, _stderr = _echo
_param = self._ssh.isError(_stdout)
_log, _message = self._action_param(_param)
return _log(f'{self._lang.LogSeparator} {self._ip_addr} : {_message} : ID {location} #{hole}')
def _action_param(self, param):
""" Switch-case structure - get message type depending on the SSH received exit status """
return {
0: (self._log.setSuccess, self._lang.RechargeSuccess),
1: (self._log.setError, self._lang.RechargeError)
}.get(param, 0)
def _actionGlobal(self):
""" Recharge the entire USB hub - calling coroutine """
self._manager.exec(self.__actionGlobal, self._glob_rchrg_name)
async def _recharge(self, location, hole, ep=None, hang=True):
""" Recharge the USB device - coroutine """
if not await self._ssh.establish(self._lang.LogSeparator):
return self._log.setInformation(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RechargeCancel}')
_span = self._dmn_config.hub_cfg_tmo
self._bar.setRange(_span, ep, self._obj.menu)
await self._loop.run_in_executor(None, self.__recharge, location, hole)
if hang:
await sleep(_span)
def action(self):
""" Global/Local USB device recharge action - generator """
for action in self._getGlobalHeading():
yield action
_action = QAction(QIcon('icon/reload.png'), f'{self._lang.ActionGlobalRecharge}')
_action.setEnabled(self._dmn_config.ssh)
_action.setObjectName(f'{self._lang.__name__}')
_action.triggered.connect(lambda __bool: self._actionGlobal())
yield _action
for action in self._getLocalHeading():
yield action
try:
with open(path.join('hub', f'{self._dmn_config.hub_cfg}.json')) as fp:
_hub_cfg = load(fp)
except FileNotFoundError:
return
self._rchrg = list()
for location in _hub_cfg:
_hub_param = _hub_cfg[location]
for hole in _hub_param:
_active, _name = _hub_param[hole]
if not _active:
continue
_icon = QIcon('icon/reload_single.png')
_action = QAction(_icon, f'{self._lang.ActionLocalRecharge} : ID {location} #{hole}')
if _name:
_action.setText(f'{_action.text()} : [{_name}]')
_action.setEnabled(self._dmn_config.ssh)
_action.setObjectName(f'{location}{hole}')
_action.triggered.connect(
lambda __bool, __location=location, __hole=hole: self.recharge(__location, __hole))
self._rchrg.append((location, hole))
yield _action
def recharge(self, location, hole, ep=None, hang=True):
""" Recharge the USB device - calling coroutine """
self._manager.exec(self._recharge, self._rchrg_name, location, hole, ep, hang)
| <filename>library/periphery.py
from library import bar, compatibility, config, ini, lang, log, queue
from os import path
from json import load
from threading import Thread, Event
from asyncio import get_event_loop, sleep
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QLabel, QWidgetAction, QAction
from paramiko import SSHClient, AutoAddPolicy, ssh_exception
# noinspection PyPep8Naming, PyMethodMayBeStatic
class Base(metaclass=config.Singleton):
""" Base class for periphery manager """
def __init__(self, base, ip_addr):
self._base = base
self._ip_addr = ip_addr
self._dmn_config = ini.Daemon(self._base, self._ip_addr)
self._heading = ('Global', 'Local')
self._lang = None
for heading in self._heading:
setattr(self, f'_get{heading}Heading', self._get_heading(heading))
def _get_heading(self, heading):
""" Function template for separator instance """
def _template():
""" Peripheral action separator - generator """
_label = QLabel(getattr(self._lang, f'Action{heading}Heading'))
_label.setStyleSheet('QLabel {color: gray;}')
_label.setAlignment(Qt.AlignCenter)
_action = QWidgetAction(None)
_action.setDefaultWidget(_label)
_action.setObjectName(f'{self._lang.__name__}{heading}')
yield _action
return _template
def _getGlobalHeading(self):
""" Global peripheral action separator - dummy function to resolve reference issue """
yield from ()
def _getLocalHeading(self):
""" Local peripheral action separator - dummy function to resolve reference issue """
yield from ()
# noinspection PyPep8Naming
class SSH(Base, metaclass=config.Singleton):
""" SSH connection manager """
def __init__(self, base, ip_addr):
super().__init__(base, ip_addr)
self._loop = get_event_loop()
self._manager = queue.Manager(self._base)
self._name = f'SSH connection : {self._ip_addr}'
self._log = log.Manager(self._base)
self._lang = lang.SSH
self._ssh_param = f'{self._ip_addr}:{self._dmn_config.ssh_port}'
self._connection = SSHClient()
self._connection.set_missing_host_key_policy(AutoAddPolicy())
def _action_param(self, param):
""" Switch-case structure - get action type depending on the current connection state """
return {
False: self._open,
True: self.close
}.get(param)
def _exec(self):
""" Open the SSH connection - daemon thread """
try:
self._connection.connect(
self._ip_addr,
self._dmn_config.ssh_port,
self._dmn_config.ssh_usr,
self._dmn_config.ssh_pwd
)
except ssh_exception.NoValidConnectionsError:
self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.NoValidConnectionsError}')
except ssh_exception.AuthenticationException:
self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.AuthenticationException}')
else:
self._log.setSuccess(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.OpenSuccess}')
return self._event.set()
def __open(self):
""" Open the SSH connection - inner function """
self._event = Event()
self._thread = Thread(target=self._exec, name=self._name)
self._thread.start()
self._event.wait()
return self._connection.get_transport()
def action(self):
""" Global/Local daemon action over SSH - generator """
for action in self._getGlobalHeading():
yield action
_trigger = self._action_param(self.isOpen())
_name = _trigger.__name__.strip('_')
_icon = QIcon(f'icon/ssh_{_name}.png')
_lang = getattr(self._lang, f'ActionGlobal{_name.capitalize()}')
_action = QAction(_icon, f'{_lang}')
_action.setObjectName(f'{_name}')
_action.triggered.connect(lambda __bool: _trigger())
yield _action
for action in self._getLocalHeading():
yield action
yield from ()
def _open(self):
""" Open the SSH connection - calling coroutine """
self._manager.exec(self.open, self._name)
async def open(self):
""" Open the SSH connection - coroutine """
if not self._dmn_config.ssh:
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.EnableRequired}')
if self.isOpen():
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.AforeOpen}')
return await self._loop.run_in_executor(None, self.__open)
def close(self):
""" Close the SSH connection """
if not self._dmn_config.ssh:
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.EnableRequired}')
if not self.isOpen():
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.AforeClose}')
self._connection.close()
if not self.isOpen():
return self._log.setWarning(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.CloseSuccess}')
return self._log.setError(f'{self._lang.LogSeparator} {self._ssh_param} : {self._lang.CloseError}')
def isOpen(self):
""" Check if the SSH connection is open """
return self._connection.get_transport() is not None
async def establish(self, message):
""" Check if the SSH connection is close / Establish connection """
if not self.isOpen():
self._log.setInformation(f'{message} {self._ip_addr} : {self._lang.ConnectionRequired}')
await self.open()
return self.isOpen()
def exec(self, action):
""" Execute a custom action over the SSH connection / Get the PID """
if not self.isOpen():
return None, None, None, None
_stdin, _stdout, _stderr = self._connection.exec_command(f'echo $$; exec {action}')
_pid = _stdout.readline().strip()
return _pid, _stdin, _stdout, _stderr
def kill(self, pid):
""" Kill the long-running action over SSH connection """
return self.exec(f'sudo kill {pid}')
# noinspection PyMethodMayBeStatic
def isError(self, channel):
""" Get the exit status from the action on the daemon """
# TODO Checking code for everything
return channel.channel.recv_exit_status()
# noinspection PyPep8Naming
class USB(Base, metaclass=config.Singleton):
""" USB device recharging manager """
def __init__(self, base, obj, ip_addr):
super().__init__(base, ip_addr)
self._obj = obj
self._loop = get_event_loop()
self._manager = queue.Manager(self._base)
self._rchrg_name = f'USB recharge : {self._ip_addr}'
self._glob_rchrg_name = f'USB global recharge : {self._ip_addr}'
self._bar = bar.Manager(self._obj.progress)
self._usb_comp = compatibility.USB(self._base, self._ip_addr)
self._ssh = SSH(self._base, self._ip_addr)
self._log = log.Manager(self._base)
self._lang = lang.USB
self._rchrg = list()
async def __actionGlobal(self):
""" Recharge the entire USB hub - coroutine """
if not await self._ssh.establish(self._lang.LogSeparator):
return self._log.setInformation(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RechargeCancel}')
_ep = 100 / len(self._rchrg)
_span = self._dmn_config.hub_cfg_tmo
for device in self._rchrg:
_location, _hole = device
self.recharge(_location, _hole, _ep, False)
await sleep(_span + 0.25)
def __recharge(self, location, hole):
""" Recharge the USB device - inner function """
self._ssh.exec(self._usb_comp.off(location, hole))
_query = self._usb_comp.on(location, hole)
_echo = self._ssh.exec(_query)
if not all(_echo):
return self._log.setError(
f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RechargeQuery} : {_query}')
_pid, _stdin, _stdout, _stderr = _echo
_param = self._ssh.isError(_stdout)
_log, _message = self._action_param(_param)
return _log(f'{self._lang.LogSeparator} {self._ip_addr} : {_message} : ID {location} #{hole}')
def _action_param(self, param):
""" Switch-case structure - get message type depending on the SSH received exit status """
return {
0: (self._log.setSuccess, self._lang.RechargeSuccess),
1: (self._log.setError, self._lang.RechargeError)
}.get(param, 0)
def _actionGlobal(self):
""" Recharge the entire USB hub - calling coroutine """
self._manager.exec(self.__actionGlobal, self._glob_rchrg_name)
async def _recharge(self, location, hole, ep=None, hang=True):
""" Recharge the USB device - coroutine """
if not await self._ssh.establish(self._lang.LogSeparator):
return self._log.setInformation(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RechargeCancel}')
_span = self._dmn_config.hub_cfg_tmo
self._bar.setRange(_span, ep, self._obj.menu)
await self._loop.run_in_executor(None, self.__recharge, location, hole)
if hang:
await sleep(_span)
def action(self):
""" Global/Local USB device recharge action - generator """
for action in self._getGlobalHeading():
yield action
_action = QAction(QIcon('icon/reload.png'), f'{self._lang.ActionGlobalRecharge}')
_action.setEnabled(self._dmn_config.ssh)
_action.setObjectName(f'{self._lang.__name__}')
_action.triggered.connect(lambda __bool: self._actionGlobal())
yield _action
for action in self._getLocalHeading():
yield action
try:
with open(path.join('hub', f'{self._dmn_config.hub_cfg}.json')) as fp:
_hub_cfg = load(fp)
except FileNotFoundError:
return
self._rchrg = list()
for location in _hub_cfg:
_hub_param = _hub_cfg[location]
for hole in _hub_param:
_active, _name = _hub_param[hole]
if not _active:
continue
_icon = QIcon('icon/reload_single.png')
_action = QAction(_icon, f'{self._lang.ActionLocalRecharge} : ID {location} #{hole}')
if _name:
_action.setText(f'{_action.text()} : [{_name}]')
_action.setEnabled(self._dmn_config.ssh)
_action.setObjectName(f'{location}{hole}')
_action.triggered.connect(
lambda __bool, __location=location, __hole=hole: self.recharge(__location, __hole))
self._rchrg.append((location, hole))
yield _action
def recharge(self, location, hole, ep=None, hang=True):
""" Recharge the USB device - calling coroutine """
self._manager.exec(self._recharge, self._rchrg_name, location, hole, ep, hang)
| en | 0.666954 | # noinspection PyPep8Naming, PyMethodMayBeStatic Base class for periphery manager Function template for separator instance Peripheral action separator - generator Global peripheral action separator - dummy function to resolve reference issue Local peripheral action separator - dummy function to resolve reference issue # noinspection PyPep8Naming SSH connection manager Switch-case structure - get action type depending on the current connection state Open the SSH connection - daemon thread Open the SSH connection - inner function Global/Local daemon action over SSH - generator Open the SSH connection - calling coroutine Open the SSH connection - coroutine Close the SSH connection Check if the SSH connection is open Check if the SSH connection is close / Establish connection Execute a custom action over the SSH connection / Get the PID Kill the long-running action over SSH connection # noinspection PyMethodMayBeStatic Get the exit status from the action on the daemon # TODO Checking code for everything # noinspection PyPep8Naming USB device recharging manager Recharge the entire USB hub - coroutine Recharge the USB device - inner function #{hole}') Switch-case structure - get message type depending on the SSH received exit status Recharge the entire USB hub - calling coroutine Recharge the USB device - coroutine Global/Local USB device recharge action - generator #{hole}') Recharge the USB device - calling coroutine | 2.236718 | 2 |
main_seq2seq.py | saparina/SeaRNN-open | 47 | 6624434 | import argparse
import random
from itertools import chain
import numpy as np
import torch.backends.cudnn
import optimization
import train
from datasets.conll import ConllDataset
from datasets.nmt import NmtDataset
from datasets.ocr import OcrDataset
from evaluation.logging_utils import restore_from_checkpoint
from models import EncoderRNN, DecoderRNN
"""
Entry point.
"""
parser = argparse.ArgumentParser()
# dataset
parser.add_argument('--dataset', required=True, help='ocr | conll | nmt')
parser.add_argument('--dataroot', required=True, help='Path to dataset')
parser.add_argument('--split', type=str, default='valid', help='Split (only used for NMT)')
parser.add_argument('--split_id', type=int, default=0, help='Split ID (only used for OCR)')
parser.add_argument('--revert_input_sequence', action='store_true', help='Revert input sequence')
parser.add_argument('--max_train_items', type=int, default=None, help='Training set is cropped to this number of items')
parser.add_argument('--num_buckets', type=int, default=1,
help='Number of buckets used to group the input data, default 1 (no buckets)')
# CONLL specific settings
parser.add_argument('--min_word_count', type=int, default=10,
help='Minimum number of word entries required to be in the dictionary')
parser.add_argument('--max_seq_length', type=int, default=None, help='Max length of sequences for training')
parser.add_argument('--lower_case', action='store_true', help='Put all the words to lower case')
parser.add_argument('--senna_emb', type=str, default='', help='Path to Senna embedding')
# model
parser.add_argument('--memory_size', type=int, default=128, help='RNN memory size, default=128')
parser.add_argument('--memory_size_encoder', type=int, default=None,
help='Memory size of RNN cells in the encoder, default - same as the decoder if not BRNN otherwise'
' twice smaller')
parser.add_argument('--rnn_depth', type=int, default=1, help='Depth of RNN layers')
parser.add_argument('--bidirectional', action='store_true', help='Use bidirectional encoder')
parser.add_argument('--attention', action='store_true', help='Use attention mechanism')
parser.add_argument('--attn_type', type=str, default='matrix', help='Type of attention: matrix | sum-tanh')
parser.add_argument('--input_feed', type=int, default=0,
help='Feed the context vector at each time step as additional input (via concatenation with the '
'word embeddings) to the decoder.')
parser.add_argument('--decoder_emb_size', type=int, default=None,
help='Size of the decoder embedding, default - the same as the decoder hidden units')
parser.add_argument('--dropout', type=float, default=0.0, help='Dropout between RNN layers (default: 0 - no dropout)')
parser.add_argument('--target_noise_std', type=float, default=1e-5,
help='When selecting a target label, we add noise to the costs to break ties randomly')
parser.add_argument('--encoder_file', type=str, default='', help='File to load the encoder model')
parser.add_argument('--decoder_file', type=str, default='', help='File to load the decoder model')
parser.add_argument('--checkpoint_file', type=str, default='', help='File to load encoder, decoder, optimizer')
parser.add_argument('--decoding', type=str, default='greedy', help='Decoding algorithm: greedy|beam')
parser.add_argument('--beam_size', type=int, default=1, help='Size of the beam if using beam search for decoding')
parser.add_argument('--beam_scaling', type=float, default=1.0, help='scaling factor for output distributions')
# training approach
parser.add_argument('--loss', type=str, default='hamming',
help='hamming | hamming-unnorm | bleu-smoothed | bleu1-smoothed | gleu | sentence-F1')
parser.add_argument('--rollin', type=str, default='gt', help='gt | learned | mixed | mixed-cells')
parser.add_argument('--rollout', type=str, default='gt', help='gt | learned | mixed | mixed-matched | focused-costing')
parser.add_argument('--reference_policy', type=str, default='copy-gt',
help='copy-gt | bleu-best-suffix | bleu1-best-suffix | gleu-best-suffix | maximize-F1')
parser.add_argument('--num_cells_to_rollout', type=int, default=100, help='Number of cells to do rollout')
parser.add_argument('--objective', type=str, default='mle',
help='mle | target-learning | target-learning-all-labels | loss-softmax | kl | inverse_kl | js | l2 | svm-cs')
parser.add_argument('--temperature', type=int, default=1, help='Temperature used for KL, LLCAS and other losses')
parser.add_argument('--obj_normalization', type=str, default='cell-per-seq-batch',
help="How to normalize training loss: 'none' | 'batch' | 'cell-global' | 'cell-per-seq-batch' | "
"'batch-maxlen'")
parser.add_argument('--rollin_ref_prob', type=float, default=0.5,
help='Probability to pick reference rollin in mixed strategies')
parser.add_argument('--rollout_ref_prob', type=float, default=0.5,
help='Probability to pick reference rollout in mixed strategies')
parser.add_argument('--data_sampling', type=str, default='shuffle', help='shuffle | random | fixed-order')
parser.add_argument('--scheduled_sampling', type=str, default='none', help='none | sigmoid')
parser.add_argument('--fc_initial_value', type=int, default=0, help='Amount of learned steps in FC rollouts')
parser.add_argument('--fc_increment', type=int, default=0,
help='Increment to the amount of learned steps in focused costing rollouts')
parser.add_argument('--fc_epoch', type=int, default=1e8,
help='Number of steps before increase of focused costing learned steps in rollouts')
# labels sampling
parser.add_argument('--sample_labels_uniform', type=int, default=30,
help='Number of tokens to sample uniformly, default=30 (all labels for OCR and CONLL)')
parser.add_argument('--sample_labels_policy_topk', type=int, default=5,
help='Number of best tokens according to the current policy')
parser.add_argument('--sample_labels_neighbors', type=int, default=2,
help='Number of neighboring labels (on each side) in ground truth (skipped words in nmt)')
parser.add_argument('--targeted_sampling', action='store_true', help='Whether or not to do targeted sampling')
parser.add_argument('--ts_threshold', type=float, default=0.0, help='Threshold to pick actions in targeted sampling')
parser.add_argument('--ts_max_samples', type=int, default=0,
help='Maximum amount of samples to pick in targeted sampling')
# optimization
parser.add_argument('--optim_method', default='adam',
help='Optimization method: sgd | adagrad | adadelta | adam | adamax | asgd | rmsprop | rprop')
parser.add_argument('--batch_size', type=int, default=64, help='input batch size, default=64')
parser.add_argument('--learning_rate', type=float, default=None,
help='learning rate, default: sgd - 0.5, adagrad - 0.01, adadelta - 1, adam - 0.001, '
'adamax - 0.002, asgd - 0.01, rmsprop - 0.01, rprop - 0.01')
parser.add_argument('--max_iter', type=int, default=10000, help='Number of iterations to train, default=10000')
parser.add_argument('--max_grad_norm', type=float, default=5, help='Maximum gradient norm. Renormalize if necessary.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay (L2 regularization), default=0.0')
parser.add_argument('--param_init', type=float, default=0.0,
help='Parameters are initialized over uniform distribution with support (-param_init, param_init)')
parser.add_argument('--change_learning_rate', action='store_true', help='Change the learning rate when warm starting')
# parameters of the schedule to anneal learning rate
parser.add_argument('--anneal_learning_rate', action='store_true',
help='Anneal the learning rate with torch.optim.lr_scheduler.ReduceLROnPlateau strategy')
parser.add_argument('--lr_quantity_to_monitor', default='log_loss_val', type=str,
help='Monitor this quantity when deciding to anneal learning rate')
parser.add_argument('--lr_quantity_mode', default='min', type=str,
help='"min" | "max" depending on whether the quantity of interest is supposed to go up or down')
parser.add_argument('--lr_quantity_epsilon', default=1e-3, type=float,
help='Quantity improvement has to be at least this much to be significant '
'(this threshold is relative to the characteritic value)')
parser.add_argument('--lr_reduce_factor', default=0.5, type=float,
help='Multiply learning rate by this factor when decreasing, default: 0.5')
parser.add_argument('--lr_min_value', default=1e-5, type=float, help='The minimal value of learning rate')
parser.add_argument('--lr_patience', default=1000, type=int,
help='Wait for this number of steps before annealing the learning rate after previous lr decrease')
parser.add_argument('--lr_initial_patience', default=0, type=int,
help='Wait for this number of steps before annealing the learning rate initially '
'(e.g. for warm starting)')
parser.add_argument('--lr_cooldown', default=5000, type=int,
help='Number of calls to wait before resuming normal operation after lr has been reduced.')
parser.add_argument('--lr_quantity_smoothness', default=0, type=int,
help='When deciding to reduce LR use sliding window averages of this width.')
# logging
parser.add_argument('--log_path', type=str, default='', help='Where to store results and models (default: do not save)')
parser.add_argument('--print_iter', type=int, default=10, help='Print after this number of steps')
parser.add_argument('--eval_iter', type=int, default=200, help='Evaluate after this number of steps')
parser.add_argument('--save_iter', type=int, default=1000, help='Save models at these iterations')
parser.add_argument('--eval_size', type=int, default=10000, help='Max number of items for intermediate evaluations')
# misc
parser.add_argument('--cuda', type=int, default=1, help='GPU vs CPU')
parser.add_argument('--free_random_seed', action='store_true', help='Fix random seed or not.')
parser.add_argument('--random_seed', type=int, default=42, help='Random seed, default=42')
parser.add_argument('--rollout_batch_size', type=int, default=512,
help='Size of the batch to use for rollout computations, default 512 is safe for 12G GPUs')
opt = parser.parse_args()
# set this to use faster convolutions
opt.cuda = torch.cuda.is_available() and opt.cuda == 1
if opt.cuda:
torch.backends.cudnn.benchmark = True
# print all the options
print(opt)
# random seed
if not opt.free_random_seed:
random.seed(opt.random_seed)
np.random.seed(opt.random_seed)
torch.manual_seed(opt.random_seed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.random_seed)
# load the dataset
if opt.dataset == 'ocr':
train_set = OcrDataset(opt.dataroot, opt.batch_size, is_train=True, split_id=opt.split_id,
num_buckets=opt.num_buckets, revert_input_sequence=opt.revert_input_sequence,
max_num_items=opt.max_train_items)
val_set = OcrDataset(opt.dataroot, opt.batch_size, is_train=False, split_id=opt.split_id,
revert_input_sequence=opt.revert_input_sequence)
opt.output_fixed_size = True
elif opt.dataset == 'conll':
train_set = ConllDataset(opt.dataroot, opt.batch_size, is_train=True,
min_word_count=opt.min_word_count, max_seq_length=opt.max_seq_length,
num_buckets=opt.num_buckets, revert_input_sequence=opt.revert_input_sequence,
max_num_items=opt.max_train_items, senna_emb=opt.senna_emb)
val_set = ConllDataset(opt.dataroot, opt.batch_size, is_train=False, dicts=train_set.dicts,
revert_input_sequence=opt.revert_input_sequence)
opt.output_fixed_size = True
elif opt.dataset == 'nmt':
train_set = NmtDataset(opt.dataroot, opt.batch_size, split='train',
revert_input_sequence=opt.revert_input_sequence, input_embedding_size=opt.memory_size,
num_buckets=opt.num_buckets, max_num_items=opt.max_train_items)
val_set = NmtDataset(opt.dataroot, opt.batch_size, split='valid',
revert_input_sequence=opt.revert_input_sequence)
opt.output_fixed_size = False
opt.output_unknown_word_token = train_set.output_rare_word_token
else:
raise (RuntimeError("Unknown dataset"))
opt.max_pred_length = train_set.get_max_output_length()
opt.get_train_output_length = train_set.get_max_output_length_per_input
# function to init params
def init_params(model):
for p in model.parameters():
p.data.uniform_(-opt.param_init, opt.param_init)
# encoder
input_embedding, input_embedding_size = train_set.get_embedding_layer()
if opt.memory_size_encoder is None:
encoder_state_size = (opt.memory_size // 2) if opt.bidirectional else opt.memory_size
else:
encoder_state_size = opt.memory_size_encoder
encoder = EncoderRNN(input_embedding_size, encoder_state_size, num_layers=opt.rnn_depth,
input_embedding=input_embedding, bidirectional=opt.bidirectional, dropout=opt.dropout)
print(encoder)
if opt.param_init:
init_params(encoder)
# decoder
decoder = DecoderRNN(opt.memory_size, train_set.num_output_labels, train_set.output_end_of_string_token,
num_layers=opt.rnn_depth, emb_size=opt.decoder_emb_size, use_attention=opt.attention,
encoder_state_size=encoder_state_size, bidirectional_encoder=opt.bidirectional,
dropout=opt.dropout, input_feed=opt.input_feed, attn_type=opt.attn_type)
print(decoder)
if opt.param_init:
init_params(decoder)
# optimizer
def good_params(model):
return filter(lambda p: p.requires_grad, model.parameters())
parameters = chain(good_params(encoder), good_params(decoder))
optimizer = optimization.create_optimizer(parameters, opt)
# check number of parameters
num_params_encoder = sum([p.nelement() for p in encoder.parameters()])
num_params_decoder = sum([p.nelement() for p in decoder.parameters()])
print('Number of parameters: encoder - %d; decoder - %d' % (num_params_encoder, num_params_decoder))
# restore from checkpoint
encoder, decoder, optimizer = restore_from_checkpoint(encoder, decoder, optimizer, opt)
# move models to GPU
if opt.cuda:
encoder.cuda()
decoder.cuda()
# start training
train.train_seq2seq(encoder, decoder, optimizer, train_set,
train_set.num_output_labels, opt, dataset_val=val_set)
| import argparse
import random
from itertools import chain
import numpy as np
import torch.backends.cudnn
import optimization
import train
from datasets.conll import ConllDataset
from datasets.nmt import NmtDataset
from datasets.ocr import OcrDataset
from evaluation.logging_utils import restore_from_checkpoint
from models import EncoderRNN, DecoderRNN
"""
Entry point.
"""
parser = argparse.ArgumentParser()
# dataset
parser.add_argument('--dataset', required=True, help='ocr | conll | nmt')
parser.add_argument('--dataroot', required=True, help='Path to dataset')
parser.add_argument('--split', type=str, default='valid', help='Split (only used for NMT)')
parser.add_argument('--split_id', type=int, default=0, help='Split ID (only used for OCR)')
parser.add_argument('--revert_input_sequence', action='store_true', help='Revert input sequence')
parser.add_argument('--max_train_items', type=int, default=None, help='Training set is cropped to this number of items')
parser.add_argument('--num_buckets', type=int, default=1,
help='Number of buckets used to group the input data, default 1 (no buckets)')
# CONLL specific settings
parser.add_argument('--min_word_count', type=int, default=10,
help='Minimum number of word entries required to be in the dictionary')
parser.add_argument('--max_seq_length', type=int, default=None, help='Max length of sequences for training')
parser.add_argument('--lower_case', action='store_true', help='Put all the words to lower case')
parser.add_argument('--senna_emb', type=str, default='', help='Path to Senna embedding')
# model
parser.add_argument('--memory_size', type=int, default=128, help='RNN memory size, default=128')
parser.add_argument('--memory_size_encoder', type=int, default=None,
help='Memory size of RNN cells in the encoder, default - same as the decoder if not BRNN otherwise'
' twice smaller')
parser.add_argument('--rnn_depth', type=int, default=1, help='Depth of RNN layers')
parser.add_argument('--bidirectional', action='store_true', help='Use bidirectional encoder')
parser.add_argument('--attention', action='store_true', help='Use attention mechanism')
parser.add_argument('--attn_type', type=str, default='matrix', help='Type of attention: matrix | sum-tanh')
parser.add_argument('--input_feed', type=int, default=0,
help='Feed the context vector at each time step as additional input (via concatenation with the '
'word embeddings) to the decoder.')
parser.add_argument('--decoder_emb_size', type=int, default=None,
help='Size of the decoder embedding, default - the same as the decoder hidden units')
parser.add_argument('--dropout', type=float, default=0.0, help='Dropout between RNN layers (default: 0 - no dropout)')
parser.add_argument('--target_noise_std', type=float, default=1e-5,
help='When selecting a target label, we add noise to the costs to break ties randomly')
parser.add_argument('--encoder_file', type=str, default='', help='File to load the encoder model')
parser.add_argument('--decoder_file', type=str, default='', help='File to load the decoder model')
parser.add_argument('--checkpoint_file', type=str, default='', help='File to load encoder, decoder, optimizer')
parser.add_argument('--decoding', type=str, default='greedy', help='Decoding algorithm: greedy|beam')
parser.add_argument('--beam_size', type=int, default=1, help='Size of the beam if using beam search for decoding')
parser.add_argument('--beam_scaling', type=float, default=1.0, help='scaling factor for output distributions')
# training approach
parser.add_argument('--loss', type=str, default='hamming',
help='hamming | hamming-unnorm | bleu-smoothed | bleu1-smoothed | gleu | sentence-F1')
parser.add_argument('--rollin', type=str, default='gt', help='gt | learned | mixed | mixed-cells')
parser.add_argument('--rollout', type=str, default='gt', help='gt | learned | mixed | mixed-matched | focused-costing')
parser.add_argument('--reference_policy', type=str, default='copy-gt',
help='copy-gt | bleu-best-suffix | bleu1-best-suffix | gleu-best-suffix | maximize-F1')
parser.add_argument('--num_cells_to_rollout', type=int, default=100, help='Number of cells to do rollout')
parser.add_argument('--objective', type=str, default='mle',
help='mle | target-learning | target-learning-all-labels | loss-softmax | kl | inverse_kl | js | l2 | svm-cs')
parser.add_argument('--temperature', type=int, default=1, help='Temperature used for KL, LLCAS and other losses')
parser.add_argument('--obj_normalization', type=str, default='cell-per-seq-batch',
help="How to normalize training loss: 'none' | 'batch' | 'cell-global' | 'cell-per-seq-batch' | "
"'batch-maxlen'")
parser.add_argument('--rollin_ref_prob', type=float, default=0.5,
help='Probability to pick reference rollin in mixed strategies')
parser.add_argument('--rollout_ref_prob', type=float, default=0.5,
help='Probability to pick reference rollout in mixed strategies')
parser.add_argument('--data_sampling', type=str, default='shuffle', help='shuffle | random | fixed-order')
parser.add_argument('--scheduled_sampling', type=str, default='none', help='none | sigmoid')
parser.add_argument('--fc_initial_value', type=int, default=0, help='Amount of learned steps in FC rollouts')
parser.add_argument('--fc_increment', type=int, default=0,
help='Increment to the amount of learned steps in focused costing rollouts')
parser.add_argument('--fc_epoch', type=int, default=1e8,
help='Number of steps before increase of focused costing learned steps in rollouts')
# labels sampling
parser.add_argument('--sample_labels_uniform', type=int, default=30,
help='Number of tokens to sample uniformly, default=30 (all labels for OCR and CONLL)')
parser.add_argument('--sample_labels_policy_topk', type=int, default=5,
help='Number of best tokens according to the current policy')
parser.add_argument('--sample_labels_neighbors', type=int, default=2,
help='Number of neighboring labels (on each side) in ground truth (skipped words in nmt)')
parser.add_argument('--targeted_sampling', action='store_true', help='Whether or not to do targeted sampling')
parser.add_argument('--ts_threshold', type=float, default=0.0, help='Threshold to pick actions in targeted sampling')
parser.add_argument('--ts_max_samples', type=int, default=0,
help='Maximum amount of samples to pick in targeted sampling')
# optimization
parser.add_argument('--optim_method', default='adam',
help='Optimization method: sgd | adagrad | adadelta | adam | adamax | asgd | rmsprop | rprop')
parser.add_argument('--batch_size', type=int, default=64, help='input batch size, default=64')
parser.add_argument('--learning_rate', type=float, default=None,
help='learning rate, default: sgd - 0.5, adagrad - 0.01, adadelta - 1, adam - 0.001, '
'adamax - 0.002, asgd - 0.01, rmsprop - 0.01, rprop - 0.01')
parser.add_argument('--max_iter', type=int, default=10000, help='Number of iterations to train, default=10000')
parser.add_argument('--max_grad_norm', type=float, default=5, help='Maximum gradient norm. Renormalize if necessary.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay (L2 regularization), default=0.0')
parser.add_argument('--param_init', type=float, default=0.0,
help='Parameters are initialized over uniform distribution with support (-param_init, param_init)')
parser.add_argument('--change_learning_rate', action='store_true', help='Change the learning rate when warm starting')
# parameters of the schedule to anneal learning rate
parser.add_argument('--anneal_learning_rate', action='store_true',
help='Anneal the learning rate with torch.optim.lr_scheduler.ReduceLROnPlateau strategy')
parser.add_argument('--lr_quantity_to_monitor', default='log_loss_val', type=str,
help='Monitor this quantity when deciding to anneal learning rate')
parser.add_argument('--lr_quantity_mode', default='min', type=str,
help='"min" | "max" depending on whether the quantity of interest is supposed to go up or down')
parser.add_argument('--lr_quantity_epsilon', default=1e-3, type=float,
help='Quantity improvement has to be at least this much to be significant '
'(this threshold is relative to the characteritic value)')
parser.add_argument('--lr_reduce_factor', default=0.5, type=float,
help='Multiply learning rate by this factor when decreasing, default: 0.5')
parser.add_argument('--lr_min_value', default=1e-5, type=float, help='The minimal value of learning rate')
parser.add_argument('--lr_patience', default=1000, type=int,
help='Wait for this number of steps before annealing the learning rate after previous lr decrease')
parser.add_argument('--lr_initial_patience', default=0, type=int,
help='Wait for this number of steps before annealing the learning rate initially '
'(e.g. for warm starting)')
parser.add_argument('--lr_cooldown', default=5000, type=int,
help='Number of calls to wait before resuming normal operation after lr has been reduced.')
parser.add_argument('--lr_quantity_smoothness', default=0, type=int,
help='When deciding to reduce LR use sliding window averages of this width.')
# logging
parser.add_argument('--log_path', type=str, default='', help='Where to store results and models (default: do not save)')
parser.add_argument('--print_iter', type=int, default=10, help='Print after this number of steps')
parser.add_argument('--eval_iter', type=int, default=200, help='Evaluate after this number of steps')
parser.add_argument('--save_iter', type=int, default=1000, help='Save models at these iterations')
parser.add_argument('--eval_size', type=int, default=10000, help='Max number of items for intermediate evaluations')
# misc
parser.add_argument('--cuda', type=int, default=1, help='GPU vs CPU')
parser.add_argument('--free_random_seed', action='store_true', help='Fix random seed or not.')
parser.add_argument('--random_seed', type=int, default=42, help='Random seed, default=42')
parser.add_argument('--rollout_batch_size', type=int, default=512,
help='Size of the batch to use for rollout computations, default 512 is safe for 12G GPUs')
opt = parser.parse_args()
# set this to use faster convolutions
opt.cuda = torch.cuda.is_available() and opt.cuda == 1
if opt.cuda:
torch.backends.cudnn.benchmark = True
# print all the options
print(opt)
# random seed
if not opt.free_random_seed:
random.seed(opt.random_seed)
np.random.seed(opt.random_seed)
torch.manual_seed(opt.random_seed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.random_seed)
# load the dataset
if opt.dataset == 'ocr':
train_set = OcrDataset(opt.dataroot, opt.batch_size, is_train=True, split_id=opt.split_id,
num_buckets=opt.num_buckets, revert_input_sequence=opt.revert_input_sequence,
max_num_items=opt.max_train_items)
val_set = OcrDataset(opt.dataroot, opt.batch_size, is_train=False, split_id=opt.split_id,
revert_input_sequence=opt.revert_input_sequence)
opt.output_fixed_size = True
elif opt.dataset == 'conll':
train_set = ConllDataset(opt.dataroot, opt.batch_size, is_train=True,
min_word_count=opt.min_word_count, max_seq_length=opt.max_seq_length,
num_buckets=opt.num_buckets, revert_input_sequence=opt.revert_input_sequence,
max_num_items=opt.max_train_items, senna_emb=opt.senna_emb)
val_set = ConllDataset(opt.dataroot, opt.batch_size, is_train=False, dicts=train_set.dicts,
revert_input_sequence=opt.revert_input_sequence)
opt.output_fixed_size = True
elif opt.dataset == 'nmt':
train_set = NmtDataset(opt.dataroot, opt.batch_size, split='train',
revert_input_sequence=opt.revert_input_sequence, input_embedding_size=opt.memory_size,
num_buckets=opt.num_buckets, max_num_items=opt.max_train_items)
val_set = NmtDataset(opt.dataroot, opt.batch_size, split='valid',
revert_input_sequence=opt.revert_input_sequence)
opt.output_fixed_size = False
opt.output_unknown_word_token = train_set.output_rare_word_token
else:
raise (RuntimeError("Unknown dataset"))
opt.max_pred_length = train_set.get_max_output_length()
opt.get_train_output_length = train_set.get_max_output_length_per_input
# function to init params
def init_params(model):
for p in model.parameters():
p.data.uniform_(-opt.param_init, opt.param_init)
# encoder
input_embedding, input_embedding_size = train_set.get_embedding_layer()
if opt.memory_size_encoder is None:
encoder_state_size = (opt.memory_size // 2) if opt.bidirectional else opt.memory_size
else:
encoder_state_size = opt.memory_size_encoder
encoder = EncoderRNN(input_embedding_size, encoder_state_size, num_layers=opt.rnn_depth,
input_embedding=input_embedding, bidirectional=opt.bidirectional, dropout=opt.dropout)
print(encoder)
if opt.param_init:
init_params(encoder)
# decoder
decoder = DecoderRNN(opt.memory_size, train_set.num_output_labels, train_set.output_end_of_string_token,
num_layers=opt.rnn_depth, emb_size=opt.decoder_emb_size, use_attention=opt.attention,
encoder_state_size=encoder_state_size, bidirectional_encoder=opt.bidirectional,
dropout=opt.dropout, input_feed=opt.input_feed, attn_type=opt.attn_type)
print(decoder)
if opt.param_init:
init_params(decoder)
# optimizer
def good_params(model):
return filter(lambda p: p.requires_grad, model.parameters())
parameters = chain(good_params(encoder), good_params(decoder))
optimizer = optimization.create_optimizer(parameters, opt)
# check number of parameters
num_params_encoder = sum([p.nelement() for p in encoder.parameters()])
num_params_decoder = sum([p.nelement() for p in decoder.parameters()])
print('Number of parameters: encoder - %d; decoder - %d' % (num_params_encoder, num_params_decoder))
# restore from checkpoint
encoder, decoder, optimizer = restore_from_checkpoint(encoder, decoder, optimizer, opt)
# move models to GPU
if opt.cuda:
encoder.cuda()
decoder.cuda()
# start training
train.train_seq2seq(encoder, decoder, optimizer, train_set,
train_set.num_output_labels, opt, dataset_val=val_set)
| en | 0.612064 | Entry point. # dataset # CONLL specific settings # model # training approach # labels sampling # optimization # parameters of the schedule to anneal learning rate # logging # misc # set this to use faster convolutions # print all the options # random seed # load the dataset # function to init params # encoder # decoder # optimizer # check number of parameters # restore from checkpoint # move models to GPU # start training | 2.168174 | 2 |
libs/util/decorators.py | chicosilva/csms | 0 | 6624435 | <gh_stars>0
# coding=utf-8
def threaded(func):
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func | # coding=utf-8
def threaded(func):
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func | en | 0.644078 | # coding=utf-8 | 2.954667 | 3 |
HSM/utils/db.py | 18F/10x-MLaaS | 13 | 6624436 | <filename>HSM/utils/db.py
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, JSON, Integer, String, ForeignKey
from sqlalchemy.orm import relationship, sessionmaker
from utils.config import SQLALCHEMY_URI
Base = declarative_base()
class DataAccessLayer:
def __init__(self):
self.engine = None
self.conn_string = SQLALCHEMY_URI
def connect(self):
self.engine = create_engine(self.conn_string, echo=True)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
dal = DataAccessLayer()
class Data(Base):
__tablename__ = 'data'
id = Column(Integer, primary_key=True, index=True)
filter_feature = Column(String(10000), nullable=True)
validation = Column(Integer)
support_data = relationship("SupportData", uselist=False, back_populates="data")
class SupportData(Base):
__tablename__ = 'support_data'
id = Column(Integer, primary_key=True, index=True)
support_data = Column(JSON)
data_id = Column(Integer, ForeignKey('data.id'), nullable=False)
data = relationship("Data", back_populates="support_data")
| <filename>HSM/utils/db.py
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, JSON, Integer, String, ForeignKey
from sqlalchemy.orm import relationship, sessionmaker
from utils.config import SQLALCHEMY_URI
Base = declarative_base()
class DataAccessLayer:
def __init__(self):
self.engine = None
self.conn_string = SQLALCHEMY_URI
def connect(self):
self.engine = create_engine(self.conn_string, echo=True)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
dal = DataAccessLayer()
class Data(Base):
__tablename__ = 'data'
id = Column(Integer, primary_key=True, index=True)
filter_feature = Column(String(10000), nullable=True)
validation = Column(Integer)
support_data = relationship("SupportData", uselist=False, back_populates="data")
class SupportData(Base):
__tablename__ = 'support_data'
id = Column(Integer, primary_key=True, index=True)
support_data = Column(JSON)
data_id = Column(Integer, ForeignKey('data.id'), nullable=False)
data = relationship("Data", back_populates="support_data")
| none | 1 | 2.506149 | 3 | |
EigVal.py | OceanNuclear/Statistics | 0 | 6624437 | <reponame>OceanNuclear/Statistics<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rn
from numpy import array as ary
from numpy import sqrt
from numpy.linalg import svd, eig, eigvals, inv, pinv
def set_offdiag(mat, triu, inplace=True):
'''sets the off-diagonal elements of a symmetric matrix when the top triangle's values are given.'''
triu = ary(triu).flatten()
indices = ary(np.triu_indices_from(mat, k=1)).T
if inplace:
for ij, ord in zip(indices, triu):
i,j = ij
mat[i,j] = ord
mat[j,i] = ord
return mat
else:
matcopy = mat.copy()
for ij, ord in zip(indices, triu):
i,j = ij
matcopy[i,j] = ord
matcopy[j,i] = ord
return matcopy
if __name__=="__main__":
main_diag = [sqrt(1),.01]
covar_mat = np.diag(ary(main_diag, dtype=float))
set_offdiag(covar_mat, [0], inplace=True)
eigval, eigvec = eig(covar_mat)
print("eigval=", eigval)
print("eigvec=\n", eigvec)
xy = rn.multivariate_normal([0,0], covar_mat, size=1000)
x, y = xy.T
ax = plt.subplot()
ax.scatter(x,y)
ax.set_aspect(1) # equal aspect ratio
plt.show()
plt.clf() | import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rn
from numpy import array as ary
from numpy import sqrt
from numpy.linalg import svd, eig, eigvals, inv, pinv
def set_offdiag(mat, triu, inplace=True):
'''sets the off-diagonal elements of a symmetric matrix when the top triangle's values are given.'''
triu = ary(triu).flatten()
indices = ary(np.triu_indices_from(mat, k=1)).T
if inplace:
for ij, ord in zip(indices, triu):
i,j = ij
mat[i,j] = ord
mat[j,i] = ord
return mat
else:
matcopy = mat.copy()
for ij, ord in zip(indices, triu):
i,j = ij
matcopy[i,j] = ord
matcopy[j,i] = ord
return matcopy
if __name__=="__main__":
main_diag = [sqrt(1),.01]
covar_mat = np.diag(ary(main_diag, dtype=float))
set_offdiag(covar_mat, [0], inplace=True)
eigval, eigvec = eig(covar_mat)
print("eigval=", eigval)
print("eigvec=\n", eigvec)
xy = rn.multivariate_normal([0,0], covar_mat, size=1000)
x, y = xy.T
ax = plt.subplot()
ax.scatter(x,y)
ax.set_aspect(1) # equal aspect ratio
plt.show()
plt.clf() | en | 0.672509 | sets the off-diagonal elements of a symmetric matrix when the top triangle's values are given. # equal aspect ratio | 2.888093 | 3 |
PaddleCV/rcnn/infer.py | XiaoguangHu01/models | 4 | 6624438 | <reponame>XiaoguangHu01/models<gh_stars>1-10
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import numpy as np
from eval_helper import *
import paddle
import paddle.fluid as fluid
import reader
from utility import print_arguments, parse_args, check_gpu
import models.model_builder as model_builder
import models.resnet as resnet
from config import cfg
from data_utils import DatasetPath
def infer():
try:
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval, Params
data_path = DatasetPath('val')
test_list = data_path.get_file_list()
coco_api = COCO(test_list)
cid = coco_api.getCatIds()
cat_id_to_num_id_map = {
v: i + 1
for i, v in enumerate(coco_api.getCatIds())
}
category_ids = coco_api.getCatIds()
labels_map = {
cat_id_to_num_id_map[item['id']]: item['name']
for item in coco_api.loadCats(category_ids)
}
labels_map[0] = 'background'
except:
print("The COCO dataset or COCO API is not exist, use the default "
"mapping of class index and real category name on COCO17.")
assert cfg.dataset == 'coco2017'
labels_map = coco17_labels()
image_shape = [3, cfg.TEST.max_size, cfg.TEST.max_size]
class_nums = cfg.class_num
model = model_builder.RCNN(
add_conv_body_func=resnet.add_ResNet50_conv4_body,
add_roi_box_head_func=resnet.add_ResNet_roi_conv5_head,
use_pyreader=False,
mode='infer')
model.build_model(image_shape)
pred_boxes = model.eval_bbox_out()
if cfg.MASK_ON:
masks = model.eval_mask_out()
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# yapf: disable
if not os.path.exists(cfg.pretrained_model):
raise ValueError("Model path [%s] does not exist." % (cfg.pretrained_model))
def if_exist(var):
return os.path.exists(os.path.join(cfg.pretrained_model, var.name))
fluid.io.load_vars(exe, cfg.pretrained_model, predicate=if_exist)
# yapf: enable
infer_reader = reader.infer(cfg.image_path)
feeder = fluid.DataFeeder(place=place, feed_list=model.feeds())
dts_res = []
segms_res = []
if cfg.MASK_ON:
fetch_list = [pred_boxes, masks]
else:
fetch_list = [pred_boxes]
data = next(infer_reader())
im_info = [data[0][1]]
result = exe.run(fetch_list=[v.name for v in fetch_list],
feed=feeder.feed(data),
return_numpy=False)
pred_boxes_v = result[0]
if cfg.MASK_ON:
masks_v = result[1]
new_lod = pred_boxes_v.lod()
nmsed_out = pred_boxes_v
image = None
if cfg.MASK_ON:
segms_out = segm_results(nmsed_out, masks_v, im_info)
image = draw_mask_on_image(cfg.image_path, segms_out,
cfg.draw_threshold)
draw_bounding_box_on_image(cfg.image_path, nmsed_out, cfg.draw_threshold,
labels_map, image)
if __name__ == '__main__':
args = parse_args()
print_arguments(args)
check_gpu(args.use_gpu)
infer()
| # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import numpy as np
from eval_helper import *
import paddle
import paddle.fluid as fluid
import reader
from utility import print_arguments, parse_args, check_gpu
import models.model_builder as model_builder
import models.resnet as resnet
from config import cfg
from data_utils import DatasetPath
def infer():
try:
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval, Params
data_path = DatasetPath('val')
test_list = data_path.get_file_list()
coco_api = COCO(test_list)
cid = coco_api.getCatIds()
cat_id_to_num_id_map = {
v: i + 1
for i, v in enumerate(coco_api.getCatIds())
}
category_ids = coco_api.getCatIds()
labels_map = {
cat_id_to_num_id_map[item['id']]: item['name']
for item in coco_api.loadCats(category_ids)
}
labels_map[0] = 'background'
except:
print("The COCO dataset or COCO API is not exist, use the default "
"mapping of class index and real category name on COCO17.")
assert cfg.dataset == 'coco2017'
labels_map = coco17_labels()
image_shape = [3, cfg.TEST.max_size, cfg.TEST.max_size]
class_nums = cfg.class_num
model = model_builder.RCNN(
add_conv_body_func=resnet.add_ResNet50_conv4_body,
add_roi_box_head_func=resnet.add_ResNet_roi_conv5_head,
use_pyreader=False,
mode='infer')
model.build_model(image_shape)
pred_boxes = model.eval_bbox_out()
if cfg.MASK_ON:
masks = model.eval_mask_out()
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# yapf: disable
if not os.path.exists(cfg.pretrained_model):
raise ValueError("Model path [%s] does not exist." % (cfg.pretrained_model))
def if_exist(var):
return os.path.exists(os.path.join(cfg.pretrained_model, var.name))
fluid.io.load_vars(exe, cfg.pretrained_model, predicate=if_exist)
# yapf: enable
infer_reader = reader.infer(cfg.image_path)
feeder = fluid.DataFeeder(place=place, feed_list=model.feeds())
dts_res = []
segms_res = []
if cfg.MASK_ON:
fetch_list = [pred_boxes, masks]
else:
fetch_list = [pred_boxes]
data = next(infer_reader())
im_info = [data[0][1]]
result = exe.run(fetch_list=[v.name for v in fetch_list],
feed=feeder.feed(data),
return_numpy=False)
pred_boxes_v = result[0]
if cfg.MASK_ON:
masks_v = result[1]
new_lod = pred_boxes_v.lod()
nmsed_out = pred_boxes_v
image = None
if cfg.MASK_ON:
segms_out = segm_results(nmsed_out, masks_v, im_info)
image = draw_mask_on_image(cfg.image_path, segms_out,
cfg.draw_threshold)
draw_bounding_box_on_image(cfg.image_path, nmsed_out, cfg.draw_threshold,
labels_map, image)
if __name__ == '__main__':
args = parse_args()
print_arguments(args)
check_gpu(args.use_gpu)
infer() | en | 0.851415 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # yapf: disable # yapf: enable | 2.019158 | 2 |
test/io_tests/tunnel_test.py | gr0vity-dev/xknx | 0 | 6624439 | """Unit test for KNX/IP Tunnelling Request/Response."""
import asyncio
from unittest.mock import AsyncMock, Mock, patch
import pytest
from xknx import XKNX
from xknx.dpt import DPTArray
from xknx.io import Tunnel
from xknx.knxip import (
HPAI,
CEMIFrame,
ConnectRequest,
ConnectResponse,
DisconnectRequest,
DisconnectResponse,
KNXIPFrame,
TunnellingAck,
TunnellingRequest,
)
from xknx.knxip.knxip_enum import CEMIMessageCode
from xknx.telegram import IndividualAddress, Telegram, TelegramDirection
from xknx.telegram.apci import GroupValueWrite
@pytest.mark.asyncio
class TestTunnel:
"""Test class for xknx/io/Tunnel objects."""
def setup_method(self):
"""Set up test class."""
# pylint: disable=attribute-defined-outside-init
self.xknx = XKNX()
self.tg_received_mock = Mock()
self.tunnel = Tunnel(
self.xknx,
gateway_ip="192.168.1.2",
gateway_port=3671,
local_ip="192.168.1.1",
local_port=0,
telegram_received_callback=self.tg_received_mock,
auto_reconnect=False,
auto_reconnect_wait=3,
route_back=False,
)
@patch("xknx.io.Tunnel._send_tunnelling_ack")
def test_tunnel_request_received(self, send_ack_mock):
"""Test Tunnel for calling send_ack on normal frames."""
# LDataInd GroupValueWrite from 1.1.22 to to 5/1/22 with DPT9 payload 0C 3F
# communication_channel_id: 0x02 sequence_counter: 0x21
raw = bytes.fromhex("0610 0420 0017 04 02 21 00 2900bcd011162916030080 0c 3f")
_cemi = CEMIFrame(self.xknx)
_cemi.from_knx(raw[10:])
telegram = _cemi.telegram
telegram.direction = TelegramDirection.INCOMING
self.tunnel.udp_client.data_received_callback(raw, ("192.168.1.2", 3671))
self.tg_received_mock.assert_called_once_with(telegram)
send_ack_mock.assert_called_once_with(0x02, 0x21)
@patch("xknx.io.Tunnel._send_tunnelling_ack")
def test_tunnel_request_received_cemi_too_small(self, send_ack_mock):
"""Test Tunnel sending ACK for unsupported frames."""
# LDataInd T_Connect from 1.0.250 to 1.0.255 (xknx tunnel endpoint) - ETS Line-Scan
# <UnsupportedCEMIMessage description="CEMI too small. Length: 10; CEMI: 2900b06010fa10ff0080" />
# communication_channel_id: 0x02 sequence_counter: 0x81
raw = bytes.fromhex("0610 0420 0014 04 02 81 00 2900b06010fa10ff0080")
self.tunnel.udp_client.data_received_callback(raw, ("192.168.1.2", 3671))
self.tg_received_mock.assert_not_called()
send_ack_mock.assert_called_once_with(0x02, 0x81)
@patch("xknx.io.Tunnel._send_tunnelling_ack")
def test_tunnel_request_received_apci_unsupported(self, send_ack_mock):
"""Test Tunnel sending ACK for unsupported frames."""
# LDataInd Unsupported Extended APCI from 0.0.1 to 0/0/0 broadcast
# <UnsupportedCEMIMessage description="APCI not supported: 0b1111111000 in CEMI: 2900b0d0000100000103f8" />
# communication_channel_id: 0x02 sequence_counter: 0x4f
raw = bytes.fromhex("0610 0420 0015 04 02 4f 00 2900b0d0000100000103f8")
self.tunnel.udp_client.data_received_callback(raw, ("192.168.1.2", 3671))
self.tg_received_mock.assert_not_called()
send_ack_mock.assert_called_once_with(0x02, 0x4F)
async def test_tunnel_wait_for_l2_confirmation(self, time_travel):
"""Test tunnel waits for L_DATA.con before sending another L_DATA.req."""
self.tunnel.udp_client.send = Mock()
self.tunnel.communication_channel = 1
test_telegram = Telegram(payload=GroupValueWrite(DPTArray((1,))))
test_ack = KNXIPFrame.init_from_body(
TunnellingAck(self.xknx, sequence_counter=23)
)
confirmation = KNXIPFrame.init_from_body(
TunnellingRequest(
self.xknx,
communication_channel_id=1,
sequence_counter=23,
cemi=CEMIFrame.init_from_telegram(
self.xknx, test_telegram, code=CEMIMessageCode.L_DATA_CON
),
)
)
task = asyncio.create_task(self.tunnel.send_telegram(test_telegram))
await time_travel(0)
self.tunnel.udp_client.handle_knxipframe(test_ack, HPAI())
await time_travel(0)
assert not task.done()
assert self.tunnel.udp_client.send.call_count == 1
self.tunnel.udp_client.handle_knxipframe(confirmation, HPAI())
await time_travel(0)
assert task.done()
# one call for the outgoing request and one for the ACK for the confirmation
assert self.tunnel.udp_client.send.call_count == 2
await task
async def test_tunnel_connect_send_disconnect(self, time_travel):
"""Test initiating a tunnelling connection."""
local_addr = ("192.168.1.1", 12345)
gateway_control_addr = ("192.168.1.2", 3671)
gateway_data_addr = ("192.168.1.2", 56789)
self.tunnel.udp_client.connect = AsyncMock()
self.tunnel.udp_client.getsockname = Mock(return_value=local_addr)
self.tunnel.udp_client.send = Mock()
self.tunnel.udp_client.stop = AsyncMock()
# Connect
connect_request = ConnectRequest(
self.xknx,
control_endpoint=HPAI(*local_addr),
data_endpoint=HPAI(*local_addr),
)
connect_frame = KNXIPFrame.init_from_body(connect_request)
connection_task = asyncio.create_task(self.tunnel.connect())
await time_travel(0)
self.tunnel.udp_client.connect.assert_called_once()
self.tunnel.udp_client.send.assert_called_once_with(connect_frame)
connect_response_frame = KNXIPFrame.init_from_body(
ConnectResponse(
self.xknx,
communication_channel=23,
data_endpoint=HPAI(*gateway_data_addr),
identifier=7,
)
)
self.tunnel.udp_client.handle_knxipframe(
connect_response_frame, gateway_control_addr
)
await connection_task
assert self.tunnel._data_endpoint_addr == gateway_data_addr
assert self.tunnel._src_address == IndividualAddress(7)
# Send - use data endpoint
self.tunnel.udp_client.send.reset_mock()
test_telegram = Telegram(payload=GroupValueWrite(DPTArray((1,))))
test_telegram_frame = KNXIPFrame.init_from_body(
TunnellingRequest(
self.xknx,
communication_channel_id=23,
sequence_counter=0,
cemi=CEMIFrame.init_from_telegram(
self.xknx,
test_telegram,
code=CEMIMessageCode.L_DATA_REQ,
src_addr=IndividualAddress(7),
),
)
)
asyncio.create_task(self.tunnel.send_telegram(test_telegram))
await time_travel(0)
self.tunnel.udp_client.send.assert_called_once_with(
test_telegram_frame, addr=gateway_data_addr
)
# skip ack and confirmation
# Disconnect
self.tunnel.udp_client.send.reset_mock()
disconnect_request = DisconnectRequest(
self.xknx, communication_channel_id=23, control_endpoint=HPAI(*local_addr)
)
disconnect_frame = KNXIPFrame.init_from_body(disconnect_request)
disconnection_task = asyncio.create_task(self.tunnel.disconnect())
await time_travel(0)
self.tunnel.udp_client.send.assert_called_once_with(disconnect_frame)
disconnect_response_frame = KNXIPFrame.init_from_body(
DisconnectResponse(
self.xknx,
communication_channel_id=23,
)
)
self.tunnel.udp_client.handle_knxipframe(
disconnect_response_frame, gateway_control_addr
)
await disconnection_task
assert self.tunnel._data_endpoint_addr is None
self.tunnel.udp_client.stop.assert_called_once()
| """Unit test for KNX/IP Tunnelling Request/Response."""
import asyncio
from unittest.mock import AsyncMock, Mock, patch
import pytest
from xknx import XKNX
from xknx.dpt import DPTArray
from xknx.io import Tunnel
from xknx.knxip import (
HPAI,
CEMIFrame,
ConnectRequest,
ConnectResponse,
DisconnectRequest,
DisconnectResponse,
KNXIPFrame,
TunnellingAck,
TunnellingRequest,
)
from xknx.knxip.knxip_enum import CEMIMessageCode
from xknx.telegram import IndividualAddress, Telegram, TelegramDirection
from xknx.telegram.apci import GroupValueWrite
@pytest.mark.asyncio
class TestTunnel:
"""Test class for xknx/io/Tunnel objects."""
def setup_method(self):
"""Set up test class."""
# pylint: disable=attribute-defined-outside-init
self.xknx = XKNX()
self.tg_received_mock = Mock()
self.tunnel = Tunnel(
self.xknx,
gateway_ip="192.168.1.2",
gateway_port=3671,
local_ip="192.168.1.1",
local_port=0,
telegram_received_callback=self.tg_received_mock,
auto_reconnect=False,
auto_reconnect_wait=3,
route_back=False,
)
@patch("xknx.io.Tunnel._send_tunnelling_ack")
def test_tunnel_request_received(self, send_ack_mock):
"""Test Tunnel for calling send_ack on normal frames."""
# LDataInd GroupValueWrite from 1.1.22 to to 5/1/22 with DPT9 payload 0C 3F
# communication_channel_id: 0x02 sequence_counter: 0x21
raw = bytes.fromhex("0610 0420 0017 04 02 21 00 2900bcd011162916030080 0c 3f")
_cemi = CEMIFrame(self.xknx)
_cemi.from_knx(raw[10:])
telegram = _cemi.telegram
telegram.direction = TelegramDirection.INCOMING
self.tunnel.udp_client.data_received_callback(raw, ("192.168.1.2", 3671))
self.tg_received_mock.assert_called_once_with(telegram)
send_ack_mock.assert_called_once_with(0x02, 0x21)
@patch("xknx.io.Tunnel._send_tunnelling_ack")
def test_tunnel_request_received_cemi_too_small(self, send_ack_mock):
"""Test Tunnel sending ACK for unsupported frames."""
# LDataInd T_Connect from 1.0.250 to 1.0.255 (xknx tunnel endpoint) - ETS Line-Scan
# <UnsupportedCEMIMessage description="CEMI too small. Length: 10; CEMI: 2900b06010fa10ff0080" />
# communication_channel_id: 0x02 sequence_counter: 0x81
raw = bytes.fromhex("0610 0420 0014 04 02 81 00 2900b06010fa10ff0080")
self.tunnel.udp_client.data_received_callback(raw, ("192.168.1.2", 3671))
self.tg_received_mock.assert_not_called()
send_ack_mock.assert_called_once_with(0x02, 0x81)
@patch("xknx.io.Tunnel._send_tunnelling_ack")
def test_tunnel_request_received_apci_unsupported(self, send_ack_mock):
"""Test Tunnel sending ACK for unsupported frames."""
# LDataInd Unsupported Extended APCI from 0.0.1 to 0/0/0 broadcast
# <UnsupportedCEMIMessage description="APCI not supported: 0b1111111000 in CEMI: 2900b0d0000100000103f8" />
# communication_channel_id: 0x02 sequence_counter: 0x4f
raw = bytes.fromhex("0610 0420 0015 04 02 4f 00 2900b0d0000100000103f8")
self.tunnel.udp_client.data_received_callback(raw, ("192.168.1.2", 3671))
self.tg_received_mock.assert_not_called()
send_ack_mock.assert_called_once_with(0x02, 0x4F)
async def test_tunnel_wait_for_l2_confirmation(self, time_travel):
"""Test tunnel waits for L_DATA.con before sending another L_DATA.req."""
self.tunnel.udp_client.send = Mock()
self.tunnel.communication_channel = 1
test_telegram = Telegram(payload=GroupValueWrite(DPTArray((1,))))
test_ack = KNXIPFrame.init_from_body(
TunnellingAck(self.xknx, sequence_counter=23)
)
confirmation = KNXIPFrame.init_from_body(
TunnellingRequest(
self.xknx,
communication_channel_id=1,
sequence_counter=23,
cemi=CEMIFrame.init_from_telegram(
self.xknx, test_telegram, code=CEMIMessageCode.L_DATA_CON
),
)
)
task = asyncio.create_task(self.tunnel.send_telegram(test_telegram))
await time_travel(0)
self.tunnel.udp_client.handle_knxipframe(test_ack, HPAI())
await time_travel(0)
assert not task.done()
assert self.tunnel.udp_client.send.call_count == 1
self.tunnel.udp_client.handle_knxipframe(confirmation, HPAI())
await time_travel(0)
assert task.done()
# one call for the outgoing request and one for the ACK for the confirmation
assert self.tunnel.udp_client.send.call_count == 2
await task
async def test_tunnel_connect_send_disconnect(self, time_travel):
"""Test initiating a tunnelling connection."""
local_addr = ("192.168.1.1", 12345)
gateway_control_addr = ("192.168.1.2", 3671)
gateway_data_addr = ("192.168.1.2", 56789)
self.tunnel.udp_client.connect = AsyncMock()
self.tunnel.udp_client.getsockname = Mock(return_value=local_addr)
self.tunnel.udp_client.send = Mock()
self.tunnel.udp_client.stop = AsyncMock()
# Connect
connect_request = ConnectRequest(
self.xknx,
control_endpoint=HPAI(*local_addr),
data_endpoint=HPAI(*local_addr),
)
connect_frame = KNXIPFrame.init_from_body(connect_request)
connection_task = asyncio.create_task(self.tunnel.connect())
await time_travel(0)
self.tunnel.udp_client.connect.assert_called_once()
self.tunnel.udp_client.send.assert_called_once_with(connect_frame)
connect_response_frame = KNXIPFrame.init_from_body(
ConnectResponse(
self.xknx,
communication_channel=23,
data_endpoint=HPAI(*gateway_data_addr),
identifier=7,
)
)
self.tunnel.udp_client.handle_knxipframe(
connect_response_frame, gateway_control_addr
)
await connection_task
assert self.tunnel._data_endpoint_addr == gateway_data_addr
assert self.tunnel._src_address == IndividualAddress(7)
# Send - use data endpoint
self.tunnel.udp_client.send.reset_mock()
test_telegram = Telegram(payload=GroupValueWrite(DPTArray((1,))))
test_telegram_frame = KNXIPFrame.init_from_body(
TunnellingRequest(
self.xknx,
communication_channel_id=23,
sequence_counter=0,
cemi=CEMIFrame.init_from_telegram(
self.xknx,
test_telegram,
code=CEMIMessageCode.L_DATA_REQ,
src_addr=IndividualAddress(7),
),
)
)
asyncio.create_task(self.tunnel.send_telegram(test_telegram))
await time_travel(0)
self.tunnel.udp_client.send.assert_called_once_with(
test_telegram_frame, addr=gateway_data_addr
)
# skip ack and confirmation
# Disconnect
self.tunnel.udp_client.send.reset_mock()
disconnect_request = DisconnectRequest(
self.xknx, communication_channel_id=23, control_endpoint=HPAI(*local_addr)
)
disconnect_frame = KNXIPFrame.init_from_body(disconnect_request)
disconnection_task = asyncio.create_task(self.tunnel.disconnect())
await time_travel(0)
self.tunnel.udp_client.send.assert_called_once_with(disconnect_frame)
disconnect_response_frame = KNXIPFrame.init_from_body(
DisconnectResponse(
self.xknx,
communication_channel_id=23,
)
)
self.tunnel.udp_client.handle_knxipframe(
disconnect_response_frame, gateway_control_addr
)
await disconnection_task
assert self.tunnel._data_endpoint_addr is None
self.tunnel.udp_client.stop.assert_called_once()
| en | 0.691568 | Unit test for KNX/IP Tunnelling Request/Response. Test class for xknx/io/Tunnel objects. Set up test class. # pylint: disable=attribute-defined-outside-init Test Tunnel for calling send_ack on normal frames. # LDataInd GroupValueWrite from 1.1.22 to to 5/1/22 with DPT9 payload 0C 3F # communication_channel_id: 0x02 sequence_counter: 0x21 Test Tunnel sending ACK for unsupported frames. # LDataInd T_Connect from 1.0.250 to 1.0.255 (xknx tunnel endpoint) - ETS Line-Scan # <UnsupportedCEMIMessage description="CEMI too small. Length: 10; CEMI: 2900b06010fa10ff0080" /> # communication_channel_id: 0x02 sequence_counter: 0x81 Test Tunnel sending ACK for unsupported frames. # LDataInd Unsupported Extended APCI from 0.0.1 to 0/0/0 broadcast # <UnsupportedCEMIMessage description="APCI not supported: 0b1111111000 in CEMI: 2900b0d0000100000103f8" /> # communication_channel_id: 0x02 sequence_counter: 0x4f Test tunnel waits for L_DATA.con before sending another L_DATA.req. # one call for the outgoing request and one for the ACK for the confirmation Test initiating a tunnelling connection. # Connect # Send - use data endpoint # skip ack and confirmation # Disconnect | 2.38469 | 2 |
Example_Reddit_Bot2_Crosspost.py | Roibal/Reddit-Twitter-Social-Media-Bots | 15 | 6624440 | """
The purpose of this bot is to repost high-quality content to various subreddits
"""
import praw
import time
import random
#Create Bot1 with login and private key - username - password
bot1 = praw.Reddit(user_agent='crosspost_bot1 v0.1', client_id='', client_secret='',
username='', password='')
#Create List of topics to search for (cryptocurrency)
crypto_subject_list = ['Bitcoin', 'BTC', 'Binance', 'Ethereum', 'Litecoin', 'Cryptocurrency', 'Bitconnect']
#Create List of Subreddits to post content from
subreddit_crosspost_list = ['Bitcoin+Cryptocurrency+Cryptomarkets+Cryptotrading']
#Create List of crossposted titles
crosspost_title_list = []
#Time in seconds to pause between crossposts - 10 minutes each acct
pause_time = 60*10
#Go through posts in popular tech subreddit, if matches 'list' of words, crosspost
#Choose dedicated Subreddit (Technology/Business/Politics)
source_sub_lists = ['Cryptocurrency', 'Cryptomarkets']
def run():
for sub in subreddit_crosspost_list:
subreddit1 = bot1.subreddit(sub)
for post in subreddit1.submissions():
#Create For Loop for words in subject list
print(post.title)
for word in crypto_subject_list:
#check if word is contained in post title
if word.lower() in post.title.lower():
if post.title not in crosspost_title_list:
#If cryptocurrency-related post is found, crosspost
post.crosspost(random.choice(source_sub_lists))
print('crossposted')
crosspost_title_list.append(post.title)
time.sleep(pause_time)
time.sleep(1)
print(crosspost_title_list)
if __name__=='__main__':
run()
| """
The purpose of this bot is to repost high-quality content to various subreddits
"""
import praw
import time
import random
#Create Bot1 with login and private key - username - password
bot1 = praw.Reddit(user_agent='crosspost_bot1 v0.1', client_id='', client_secret='',
username='', password='')
#Create List of topics to search for (cryptocurrency)
crypto_subject_list = ['Bitcoin', 'BTC', 'Binance', 'Ethereum', 'Litecoin', 'Cryptocurrency', 'Bitconnect']
#Create List of Subreddits to post content from
subreddit_crosspost_list = ['Bitcoin+Cryptocurrency+Cryptomarkets+Cryptotrading']
#Create List of crossposted titles
crosspost_title_list = []
#Time in seconds to pause between crossposts - 10 minutes each acct
pause_time = 60*10
#Go through posts in popular tech subreddit, if matches 'list' of words, crosspost
#Choose dedicated Subreddit (Technology/Business/Politics)
source_sub_lists = ['Cryptocurrency', 'Cryptomarkets']
def run():
for sub in subreddit_crosspost_list:
subreddit1 = bot1.subreddit(sub)
for post in subreddit1.submissions():
#Create For Loop for words in subject list
print(post.title)
for word in crypto_subject_list:
#check if word is contained in post title
if word.lower() in post.title.lower():
if post.title not in crosspost_title_list:
#If cryptocurrency-related post is found, crosspost
post.crosspost(random.choice(source_sub_lists))
print('crossposted')
crosspost_title_list.append(post.title)
time.sleep(pause_time)
time.sleep(1)
print(crosspost_title_list)
if __name__=='__main__':
run()
| en | 0.904268 | The purpose of this bot is to repost high-quality content to various subreddits #Create Bot1 with login and private key - username - password #Create List of topics to search for (cryptocurrency) #Create List of Subreddits to post content from #Create List of crossposted titles #Time in seconds to pause between crossposts - 10 minutes each acct #Go through posts in popular tech subreddit, if matches 'list' of words, crosspost #Choose dedicated Subreddit (Technology/Business/Politics) #Create For Loop for words in subject list #check if word is contained in post title #If cryptocurrency-related post is found, crosspost | 3.250826 | 3 |
resizeimage/apps.py | gilvanti/resize_image | 0 | 6624441 | from django.apps import AppConfig
class ResizeimageConfig(AppConfig):
name = 'resizeimage'
| from django.apps import AppConfig
class ResizeimageConfig(AppConfig):
name = 'resizeimage'
| none | 1 | 1.170072 | 1 | |
partname_resolver/units/capacitanceTolerance.py | sakoPO/partname-resolver | 0 | 6624442 | <reponame>sakoPO/partname-resolver
from decimal import Decimal
from .range_base import RangeBase
from partname_resolver.units.capacitance import Capacitance
class Tolerance(RangeBase):
def __init__(self, tolerance_min, tolerance_max=None):
if tolerance_max is None:
if tolerance_min.find('%') != -1:
self.is_relative = True
self.min = Decimal(tolerance_min.rstrip('%')) * Decimal('-1')
if self.min > 0:
raise ValueError
self.max = abs(self.min)
else:
self.is_relative = False
self.min = Capacitance(tolerance_min)
self.max = self.min
else:
if tolerance_min.find('%') != -1 and tolerance_max.find('%') != -1:
self.is_relative = True
self.min = Decimal(tolerance_min.rstrip('%'))
self.max = Decimal(tolerance_max.rstrip('%'))
else:
self.is_relative = False
if tolerance_min[0] == '-' and tolerance_max[0] == '+':
self.min = Capacitance(tolerance_min[1:len(tolerance_min)])
self.max = Capacitance(tolerance_max[1:len(tolerance_max)])
else:
raise ValueError
def __eq__(self, other):
return self.is_relative == other.is_relative and self.min == other.min and self.max == other.max
def __repr__(self):
return self.__str__()
def __str__(self):
if self.is_relative:
if abs(self.min) == abs(self.max):
return '\u00B1' + str(abs(self.min)) + "%"
else:
return str(self.min) + "%...+" + str(self.max) + "%"
else:
if self.min == self.max:
return '\u00B1' + str(self.min)
else:
return "-" + str(self.min) + "...+" + str(self.max)
| from decimal import Decimal
from .range_base import RangeBase
from partname_resolver.units.capacitance import Capacitance
class Tolerance(RangeBase):
def __init__(self, tolerance_min, tolerance_max=None):
if tolerance_max is None:
if tolerance_min.find('%') != -1:
self.is_relative = True
self.min = Decimal(tolerance_min.rstrip('%')) * Decimal('-1')
if self.min > 0:
raise ValueError
self.max = abs(self.min)
else:
self.is_relative = False
self.min = Capacitance(tolerance_min)
self.max = self.min
else:
if tolerance_min.find('%') != -1 and tolerance_max.find('%') != -1:
self.is_relative = True
self.min = Decimal(tolerance_min.rstrip('%'))
self.max = Decimal(tolerance_max.rstrip('%'))
else:
self.is_relative = False
if tolerance_min[0] == '-' and tolerance_max[0] == '+':
self.min = Capacitance(tolerance_min[1:len(tolerance_min)])
self.max = Capacitance(tolerance_max[1:len(tolerance_max)])
else:
raise ValueError
def __eq__(self, other):
return self.is_relative == other.is_relative and self.min == other.min and self.max == other.max
def __repr__(self):
return self.__str__()
def __str__(self):
if self.is_relative:
if abs(self.min) == abs(self.max):
return '\u00B1' + str(abs(self.min)) + "%"
else:
return str(self.min) + "%...+" + str(self.max) + "%"
else:
if self.min == self.max:
return '\u00B1' + str(self.min)
else:
return "-" + str(self.min) + "...+" + str(self.max) | none | 1 | 2.850926 | 3 | |
sale_order_payment_mode/__manifest__.py | odooerpdevelopers/odoo14-addons | 1 | 6624443 | <filename>sale_order_payment_mode/__manifest__.py
# -*- coding: utf-8 -*-
{
'name': "Modo de pago en Sale Order",
'summary': """
Modulo de prueba para el curso de Odoo 14
""",
'description': """
Modulo de prueba para el curso de Odoo 14
""",
'author': "<NAME>",
'website': "https://www.agenciadigitaltresplatos.com",
'category': 'Tools',
'version': '0.1',
'depends': ['sale'],
# always loaded
'data': [
'views/sale_order_view.xml'
],
}
| <filename>sale_order_payment_mode/__manifest__.py
# -*- coding: utf-8 -*-
{
'name': "Modo de pago en Sale Order",
'summary': """
Modulo de prueba para el curso de Odoo 14
""",
'description': """
Modulo de prueba para el curso de Odoo 14
""",
'author': "<NAME>",
'website': "https://www.agenciadigitaltresplatos.com",
'category': 'Tools',
'version': '0.1',
'depends': ['sale'],
# always loaded
'data': [
'views/sale_order_view.xml'
],
}
| es | 0.843335 | # -*- coding: utf-8 -*- Modulo de prueba para el curso de Odoo 14 Modulo de prueba para el curso de Odoo 14 # always loaded | 0.751588 | 1 |
tests/testunits/testdevices/testadbdevice/testadbdevice.py | rsnakamura/oldape | 0 | 6624444 | from unittest import TestCase
from mock import MagicMock
from nose.tools import raises
from apetools.devices import adbdevice
LOG_MESSAGE = "how now frau cow"
class AdbDeviceTest(TestCase):
def setUp(self):
self.connection = MagicMock()
self.adbdevice = adbdevice.AdbDevice(self.connection)
return
def test_log(self):
self.adbdevice.log(LOG_MESSAGE)
self.connection.log.assert_called_with(LOG_MESSAGE)
return
# end class AdbDeviceTest
| from unittest import TestCase
from mock import MagicMock
from nose.tools import raises
from apetools.devices import adbdevice
LOG_MESSAGE = "how now frau cow"
class AdbDeviceTest(TestCase):
def setUp(self):
self.connection = MagicMock()
self.adbdevice = adbdevice.AdbDevice(self.connection)
return
def test_log(self):
self.adbdevice.log(LOG_MESSAGE)
self.connection.log.assert_called_with(LOG_MESSAGE)
return
# end class AdbDeviceTest
| en | 0.400913 | # end class AdbDeviceTest | 2.695653 | 3 |
pygsp/graphs/nngraphs/bunny.py | jafluri/pygsp | 341 | 6624445 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from pygsp import utils
from pygsp.graphs import NNGraph # prevent circular import in Python < 3.5
class Bunny(NNGraph):
r"""Stanford bunny (NN-graph).
References
----------
See :cite:`turk1994zippered`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> G = graphs.Bunny()
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121)
>>> ax2 = fig.add_subplot(122, projection='3d')
>>> _ = ax1.spy(G.W, markersize=0.1)
>>> _ = _ = G.plot(ax=ax2)
"""
def __init__(self, **kwargs):
data = utils.loadmat('pointclouds/bunny')
plotting = {
'vertex_size': 10,
'elevation': -90,
'azimuth': 90,
'distance': 8,
}
super(Bunny, self).__init__(Xin=data['bunny'],
epsilon=0.02, NNtype='radius',
center=False, rescale=False,
plotting=plotting, **kwargs)
| # -*- coding: utf-8 -*-
from pygsp import utils
from pygsp.graphs import NNGraph # prevent circular import in Python < 3.5
class Bunny(NNGraph):
r"""Stanford bunny (NN-graph).
References
----------
See :cite:`turk1994zippered`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> G = graphs.Bunny()
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121)
>>> ax2 = fig.add_subplot(122, projection='3d')
>>> _ = ax1.spy(G.W, markersize=0.1)
>>> _ = _ = G.plot(ax=ax2)
"""
def __init__(self, **kwargs):
data = utils.loadmat('pointclouds/bunny')
plotting = {
'vertex_size': 10,
'elevation': -90,
'azimuth': 90,
'distance': 8,
}
super(Bunny, self).__init__(Xin=data['bunny'],
epsilon=0.02, NNtype='radius',
center=False, rescale=False,
plotting=plotting, **kwargs) | en | 0.484385 | # -*- coding: utf-8 -*- # prevent circular import in Python < 3.5 Stanford bunny (NN-graph). References ---------- See :cite:`turk1994zippered`. Examples -------- >>> import matplotlib.pyplot as plt >>> G = graphs.Bunny() >>> fig = plt.figure() >>> ax1 = fig.add_subplot(121) >>> ax2 = fig.add_subplot(122, projection='3d') >>> _ = ax1.spy(G.W, markersize=0.1) >>> _ = _ = G.plot(ax=ax2) | 3.159535 | 3 |
visualization.py | naikshubham/Tensorboard-Visualization | 1 | 6624446 | import os,cv2
import numpy as np
import matplotlib.pyplot as plt
import pickle
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
tf.__version__
PATH = 'D:/Projects/tensorboard/tensorboard_printedVshandwritten/'
LOG_DIR = PATH+ 'embedding-logs/'
#metadata = os.path.join(LOG_DIR, 'metadata2.tsv')
#%%
data_path = 'D:/Office/MS/finding_sign_on_form/keras_model/new_train_data/'
images = [file for file in os.listdir(data_path) if file.endswith('.jpg') or file.endswith('.JPG')]
img_data=[]
for img in images:
input_img=cv2.imread(os.path.join(data_path, img), 0)
input_img_resize=cv2.resize(input_img,(100,32))
img_data.append(input_img_resize)
img_data = np.array(img_data)
feature_vectors = np.loadtxt('./feature_files/feature_vectors_4000_samples.txt')
print ("feature_vectors_shape:",feature_vectors.shape)
print ("num of images:",feature_vectors.shape[0])
print ("size of individual feature vector:",feature_vectors.shape[1])
num_of_samples=feature_vectors.shape[0]
num_of_samples_each_class = 2000
features = tf.Variable(feature_vectors, name='features')
y = np.ones((num_of_samples,),dtype='int64')
y[0:2000]=0
y[2000:4000]=1
names = ['handwritten','printed']
#with open(metadata, 'w') as metadata_file:
# for row in range(210):
# c = y[row]
# metadata_file.write('{}\n'.format(c))
metadata_file = open(os.path.join(LOG_DIR, 'metadata_4_classes.tsv'), 'w')
metadata_file.write('Class\tName\n')
k=2000 # num of samples in each class
j=0
#for i in range(210):
# metadata_file.write('%06d\t%s\n' % (i, names[y[i]]))
for i in range(num_of_samples):
c = names[y[i]]
if i%k==0:
j=j+1
metadata_file.write('{}\t{}\n'.format(j,c))
#metadata_file.write('%06d\t%s\n' % (j, c))
metadata_file.close()
def images_to_sprite(data):
"""Creates the sprite image along with any necessary padding
Args:
data: NxHxW[x3] tensor containing the images.
Returns:
data: Properly shaped HxWx3 image with any necessary padding.
"""
if len(data.shape) == 3:
data = np.tile(data[...,np.newaxis], (1,1,1,3))
data = data.astype(np.float32)
min = np.min(data.reshape((data.shape[0], -1)), axis=1)
data = (data.transpose(1,2,3,0) - min).transpose(3,0,1,2)
max = np.max(data.reshape((data.shape[0], -1)), axis=1)
data = (data.transpose(1,2,3,0) / max).transpose(3,0,1,2)
# Inverting the colors seems to look better for MNIST
#data = 1 - data
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, 0),
(0, 0)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant',
constant_values=0)
# Tile the individual thumbnails into an image.
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3)
+ tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
data = (data * 255).astype(np.uint8)
return data
#%%
sprite = images_to_sprite(img_data)
cv2.imwrite(os.path.join(LOG_DIR, 'sprite_4_classes.png'), sprite)
#scipy.misc.imsave(os.path.join(LOG_DIR, 'sprite.png'), sprite)
#%%
with tf.Session() as sess:
saver = tf.train.Saver([features])
sess.run(features.initializer)
saver.save(sess, os.path.join(LOG_DIR, 'images_4_classes.ckpt'))
config = projector.ProjectorConfig()
# One can add multiple embeddings.
embedding = config.embeddings.add()
embedding.tensor_name = features.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = os.path.join(LOG_DIR, 'metadata_4_classes.tsv')
# Comment out if you don't want sprites
embedding.sprite.image_path = os.path.join(LOG_DIR, 'sprite_4_classes.png')
embedding.sprite.single_image_dim.extend([img_data.shape[1], img_data.shape[1]])
# Saves a config file that TensorBoard will read during startup.
projector.visualize_embeddings(tf.summary.FileWriter(LOG_DIR), config)
| import os,cv2
import numpy as np
import matplotlib.pyplot as plt
import pickle
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
tf.__version__
PATH = 'D:/Projects/tensorboard/tensorboard_printedVshandwritten/'
LOG_DIR = PATH+ 'embedding-logs/'
#metadata = os.path.join(LOG_DIR, 'metadata2.tsv')
#%%
data_path = 'D:/Office/MS/finding_sign_on_form/keras_model/new_train_data/'
images = [file for file in os.listdir(data_path) if file.endswith('.jpg') or file.endswith('.JPG')]
img_data=[]
for img in images:
input_img=cv2.imread(os.path.join(data_path, img), 0)
input_img_resize=cv2.resize(input_img,(100,32))
img_data.append(input_img_resize)
img_data = np.array(img_data)
feature_vectors = np.loadtxt('./feature_files/feature_vectors_4000_samples.txt')
print ("feature_vectors_shape:",feature_vectors.shape)
print ("num of images:",feature_vectors.shape[0])
print ("size of individual feature vector:",feature_vectors.shape[1])
num_of_samples=feature_vectors.shape[0]
num_of_samples_each_class = 2000
features = tf.Variable(feature_vectors, name='features')
y = np.ones((num_of_samples,),dtype='int64')
y[0:2000]=0
y[2000:4000]=1
names = ['handwritten','printed']
#with open(metadata, 'w') as metadata_file:
# for row in range(210):
# c = y[row]
# metadata_file.write('{}\n'.format(c))
metadata_file = open(os.path.join(LOG_DIR, 'metadata_4_classes.tsv'), 'w')
metadata_file.write('Class\tName\n')
k=2000 # num of samples in each class
j=0
#for i in range(210):
# metadata_file.write('%06d\t%s\n' % (i, names[y[i]]))
for i in range(num_of_samples):
c = names[y[i]]
if i%k==0:
j=j+1
metadata_file.write('{}\t{}\n'.format(j,c))
#metadata_file.write('%06d\t%s\n' % (j, c))
metadata_file.close()
def images_to_sprite(data):
"""Creates the sprite image along with any necessary padding
Args:
data: NxHxW[x3] tensor containing the images.
Returns:
data: Properly shaped HxWx3 image with any necessary padding.
"""
if len(data.shape) == 3:
data = np.tile(data[...,np.newaxis], (1,1,1,3))
data = data.astype(np.float32)
min = np.min(data.reshape((data.shape[0], -1)), axis=1)
data = (data.transpose(1,2,3,0) - min).transpose(3,0,1,2)
max = np.max(data.reshape((data.shape[0], -1)), axis=1)
data = (data.transpose(1,2,3,0) / max).transpose(3,0,1,2)
# Inverting the colors seems to look better for MNIST
#data = 1 - data
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, 0),
(0, 0)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant',
constant_values=0)
# Tile the individual thumbnails into an image.
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3)
+ tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
data = (data * 255).astype(np.uint8)
return data
#%%
sprite = images_to_sprite(img_data)
cv2.imwrite(os.path.join(LOG_DIR, 'sprite_4_classes.png'), sprite)
#scipy.misc.imsave(os.path.join(LOG_DIR, 'sprite.png'), sprite)
#%%
with tf.Session() as sess:
saver = tf.train.Saver([features])
sess.run(features.initializer)
saver.save(sess, os.path.join(LOG_DIR, 'images_4_classes.ckpt'))
config = projector.ProjectorConfig()
# One can add multiple embeddings.
embedding = config.embeddings.add()
embedding.tensor_name = features.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = os.path.join(LOG_DIR, 'metadata_4_classes.tsv')
# Comment out if you don't want sprites
embedding.sprite.image_path = os.path.join(LOG_DIR, 'sprite_4_classes.png')
embedding.sprite.single_image_dim.extend([img_data.shape[1], img_data.shape[1]])
# Saves a config file that TensorBoard will read during startup.
projector.visualize_embeddings(tf.summary.FileWriter(LOG_DIR), config)
| en | 0.639644 | #metadata = os.path.join(LOG_DIR, 'metadata2.tsv') #%% #with open(metadata, 'w') as metadata_file: # for row in range(210): # c = y[row] # metadata_file.write('{}\n'.format(c)) # num of samples in each class #for i in range(210): # metadata_file.write('%06d\t%s\n' % (i, names[y[i]])) #metadata_file.write('%06d\t%s\n' % (j, c)) Creates the sprite image along with any necessary padding Args: data: NxHxW[x3] tensor containing the images. Returns: data: Properly shaped HxWx3 image with any necessary padding. # Inverting the colors seems to look better for MNIST #data = 1 - data # Tile the individual thumbnails into an image. #%% #scipy.misc.imsave(os.path.join(LOG_DIR, 'sprite.png'), sprite) #%% # One can add multiple embeddings. # Link this tensor to its metadata file (e.g. labels). # Comment out if you don't want sprites # Saves a config file that TensorBoard will read during startup. | 2.678783 | 3 |
src/luminol/__init__.py | nevinkjohn/luminol | 1,042 | 6624447 | <gh_stars>1000+
# coding=utf-8
"""
© 2015 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from luminol import exceptions
class Luminol(object):
def __init__(self, anomalies, correlations):
"""
:param list anomalies: a list of `Anomaly` objects.
`Anomaly` is defined in luminol.modules.anomaly.
:param dict correlations: a dict represents correlated metrics(`TimeSeries` object) to each anomaly.
each key-value pair looks like this:
`Anomaly` --> [metric1, metric2, metric3 ...].
"""
self.anomalies = anomalies
self.correlations = correlations
self._analyze_root_causes()
# TODO(yaguo): Replace this with valid root cause analysis.
def _analyze_root_causes(self):
"""
Conduct root cause analysis.
The first metric of the list is taken as the root cause right now.
"""
causes = {}
for a in self.anomalies:
try:
causes[a] = self.correlations[a][0]
except IndexError:
raise exceptions.InvalidDataFormat('luminol.luminol: dict correlations contains empty list.')
self.causes = causes
def get_root_causes(self):
"""
Get root causes.
:return dict: a dict represents root causes for each anomaly.
"""
return getattr(self, 'causes', None)
| # coding=utf-8
"""
© 2015 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from luminol import exceptions
class Luminol(object):
def __init__(self, anomalies, correlations):
"""
:param list anomalies: a list of `Anomaly` objects.
`Anomaly` is defined in luminol.modules.anomaly.
:param dict correlations: a dict represents correlated metrics(`TimeSeries` object) to each anomaly.
each key-value pair looks like this:
`Anomaly` --> [metric1, metric2, metric3 ...].
"""
self.anomalies = anomalies
self.correlations = correlations
self._analyze_root_causes()
# TODO(yaguo): Replace this with valid root cause analysis.
def _analyze_root_causes(self):
"""
Conduct root cause analysis.
The first metric of the list is taken as the root cause right now.
"""
causes = {}
for a in self.anomalies:
try:
causes[a] = self.correlations[a][0]
except IndexError:
raise exceptions.InvalidDataFormat('luminol.luminol: dict correlations contains empty list.')
self.causes = causes
def get_root_causes(self):
"""
Get root causes.
:return dict: a dict represents root causes for each anomaly.
"""
return getattr(self, 'causes', None) | en | 0.834887 | # coding=utf-8 © 2015 LinkedIn Corp. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. :param list anomalies: a list of `Anomaly` objects. `Anomaly` is defined in luminol.modules.anomaly. :param dict correlations: a dict represents correlated metrics(`TimeSeries` object) to each anomaly. each key-value pair looks like this: `Anomaly` --> [metric1, metric2, metric3 ...]. # TODO(yaguo): Replace this with valid root cause analysis. Conduct root cause analysis. The first metric of the list is taken as the root cause right now. Get root causes. :return dict: a dict represents root causes for each anomaly. | 2.588281 | 3 |
twitter/oauth.py | igeeker/v2ex | 161 | 6624448 | <gh_stars>100-1000
# coding=utf-8
"""
The MIT License
Copyright (c) 2007 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key | # coding=utf-8
"""
The MIT License
Copyright (c) 2007 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key | en | 0.67629 | # coding=utf-8 The MIT License Copyright (c) 2007 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Hi Blaine! Generic exception class. Optional WWW-Authenticate header (401 error) Escape a URL including any /. Convert unicode to utf-8. Get seconds since epoch (UTC). Generate pseudorandom number. Generate pseudorandom number. Consumer of OAuth authentication. OAuthConsumer is a data type that represents the identity of the Consumer via its shared secret with the Service Provider. OAuthToken is a data type that represents an End User via either an access or request token. key -- the token secret -- the token secret # Append the oauth_verifier. Returns a token from something like: oauth_token_secret=xxx&oauth_token=xxx # 1.0, no callback confirmed. OAuthRequest represents the request and can be serialized. OAuth parameters: - oauth_consumer_key - oauth_token - oauth_signature_method - oauth_signature - oauth_timestamp - oauth_nonce - oauth_version - oauth_verifier ... any additional parameters, as defined by the Service Provider. # OAuth parameters. Get any non-OAuth parameters. # Ignore oauth parameters. Serialize as a header for an HTTPAuth request. # Add the oauth parameters. Serialize as post data for a POST request. Serialize as a URL for a GET request. Return a string that contains the parameters that must be signed. # Exclude the signature if it exists. # Escape key values before sorting. # Sort lexicographically, first after key, then after value. # Combine key value pairs into a string. Uppercases the http method. Parses the URL and rebuilds it to be scheme://host/path. # Exclude default port numbers. Set the signature parameter to the result of build_signature. # Set the signature method. # Set the signature. Calls the build signature method within the signature method. Combines multiple parameter sources. # Headers # Check that the authorization header is OAuth. # Get the parameters from the header. # GET or POST query string. # URL parameters. # query # 1.0a support for verifier. # 1.0a support for callback in the request token request. Turn Authorization: header into parameters. # Ignore realm parameter. # Remove whitespace. # Split key-value. # Remove quotes and unescape the value. Turn URL string into parameters. A worker to check the validity of a request against a data store. # In seconds, five minutes. Processes a request_token request and returns the request token on success. # Get the request token for authorization. # No token required for the initial token request. # 1.0, no callback specified. # Fetch a new token. Processes an access_token request and returns the access token on success. # Get the request token. Verifies an api call and checks all the parameters. # -> consumer and token # Get the access token. Authorize a request token. Get the callback URL. Optional support for the authenticate header. Verify the correct version request for this server. Figure out the signature with some defaults. # Get the signature method object. Try to find the token for the provided request token key. # Validate the signature. Verify that timestamp is recentish. Verify that the nonce is uniqueish. OAuthClient is a worker to attempt to execute a request. -> OAuthToken. -> OAuthToken. -> Some protected resource. A database abstraction used to lookup consumers and tokens. -> OAuthConsumer. -> OAuthToken. -> OAuthToken. -> OAuthToken. -> OAuthToken. -> OAuthToken. A strategy class that implements a signature method. -> str. -> str key, str raw. -> str. Builds the base signature string. # HMAC object. # 2.5 # Deprecated # Calculate the digest base 64. Concatenates the consumer key and secret. | 2.538307 | 3 |
mltk/rl/replay.py | lqf96/mltk | 0 | 6624449 | from typing import Generic, Optional
from mltk.types.gym import Env
import torch as th
from sortedcontainers import SortedList
import mltk.util as mu
from mltk.adt import Deque, RecDeque, RecDequeSchema
from .types import Transition
__all__ = [
"ReplayBuffer"
]
class ReplayBuffer():
def __init__(self, env: "Env", capacity: int, extras_schema: RecDequeSchema = {},
dtype: Optional[th.dtype] = None, rand: th.Generator = th.default_generator):
self.dtype = dtype = th.get_default_dtype() if dtype is None else dtype
self.rand = rand
obs_space = env.observation_space
action_space = env.action_space
# Data type for observation and action space
obs_dtype = mu.force_float(mu.as_th_dtype(obs_space.dtype), dtype)
action_dtype = mu.force_float(mu.as_th_dtype(action_space.dtype), dtype)
# Data schema of the replay buffer
replay_schema: RecDequeSchema = dict(
extras_schema,
observation=(obs_space.shape, obs_dtype),
action=(action_space.shape, action_dtype),
reward=dtype,
done=th.bool
) # type: ignore
# Experience buffer
self.buf = RecDeque.from_schema(replay_schema, max_len=capacity)
self._steps = 0
self._episode_begin_steps = SortedList()
@property
def capacity(self) -> int:
observations: Deque = self.buf.observation
# Capacity of the replay buffer is always bounded
assert observations.max_len is not None
return observations.max_len
def append(self, transition: Transition, **kwargs):
buf = self.buf
# Add experience to buffer
buf.append(
observation=transition.observation,
action=transition.action,
reward=transition.reward,
done=transition.done,
**kwargs
)
# Record begin of an episode
if not buf or buf.done[-1]:
self._episode_begin_steps.add(self._steps)
# Update number of steps
self._steps += 1
def sample_seqs(self, n_seqs: int, seq_len: int) -> th.Tensor:
rand = self.rand
episode_begin_steps = self._episode_begin_steps
# Current replay buffer size
buf_len = len(self.buf)
seq_begin_indices = th.empty((n_seqs,), dtype=th.int64)
# Offset between number of steps and indices
offset = self._steps-buf_len
i = 0
while i<n_seqs:
# Sample sequence begin index
seq_begin_index = th.randint(buf_len-seq_len, (), generator=rand).item()
# Compute corresponding number of steps
seq_begin_step = offset+seq_begin_index
try:
# Try to find episode begin within sequence range
next(episode_begin_steps.irange(
seq_begin_step, seq_begin_step+seq_len, inclusive=(False, False)
))
# Episode begin found; resample sequence begin index
continue
except StopIteration:
# Store sequence begin index
seq_begin_indices[i] = seq_begin_index
i += 1
return seq_begin_indices
| from typing import Generic, Optional
from mltk.types.gym import Env
import torch as th
from sortedcontainers import SortedList
import mltk.util as mu
from mltk.adt import Deque, RecDeque, RecDequeSchema
from .types import Transition
__all__ = [
"ReplayBuffer"
]
class ReplayBuffer():
def __init__(self, env: "Env", capacity: int, extras_schema: RecDequeSchema = {},
dtype: Optional[th.dtype] = None, rand: th.Generator = th.default_generator):
self.dtype = dtype = th.get_default_dtype() if dtype is None else dtype
self.rand = rand
obs_space = env.observation_space
action_space = env.action_space
# Data type for observation and action space
obs_dtype = mu.force_float(mu.as_th_dtype(obs_space.dtype), dtype)
action_dtype = mu.force_float(mu.as_th_dtype(action_space.dtype), dtype)
# Data schema of the replay buffer
replay_schema: RecDequeSchema = dict(
extras_schema,
observation=(obs_space.shape, obs_dtype),
action=(action_space.shape, action_dtype),
reward=dtype,
done=th.bool
) # type: ignore
# Experience buffer
self.buf = RecDeque.from_schema(replay_schema, max_len=capacity)
self._steps = 0
self._episode_begin_steps = SortedList()
@property
def capacity(self) -> int:
observations: Deque = self.buf.observation
# Capacity of the replay buffer is always bounded
assert observations.max_len is not None
return observations.max_len
def append(self, transition: Transition, **kwargs):
buf = self.buf
# Add experience to buffer
buf.append(
observation=transition.observation,
action=transition.action,
reward=transition.reward,
done=transition.done,
**kwargs
)
# Record begin of an episode
if not buf or buf.done[-1]:
self._episode_begin_steps.add(self._steps)
# Update number of steps
self._steps += 1
def sample_seqs(self, n_seqs: int, seq_len: int) -> th.Tensor:
rand = self.rand
episode_begin_steps = self._episode_begin_steps
# Current replay buffer size
buf_len = len(self.buf)
seq_begin_indices = th.empty((n_seqs,), dtype=th.int64)
# Offset between number of steps and indices
offset = self._steps-buf_len
i = 0
while i<n_seqs:
# Sample sequence begin index
seq_begin_index = th.randint(buf_len-seq_len, (), generator=rand).item()
# Compute corresponding number of steps
seq_begin_step = offset+seq_begin_index
try:
# Try to find episode begin within sequence range
next(episode_begin_steps.irange(
seq_begin_step, seq_begin_step+seq_len, inclusive=(False, False)
))
# Episode begin found; resample sequence begin index
continue
except StopIteration:
# Store sequence begin index
seq_begin_indices[i] = seq_begin_index
i += 1
return seq_begin_indices
| en | 0.837288 | # Data type for observation and action space # Data schema of the replay buffer # type: ignore # Experience buffer # Capacity of the replay buffer is always bounded # Add experience to buffer # Record begin of an episode # Update number of steps # Current replay buffer size # Offset between number of steps and indices # Sample sequence begin index # Compute corresponding number of steps # Try to find episode begin within sequence range # Episode begin found; resample sequence begin index # Store sequence begin index | 2.070978 | 2 |
bert_e/server/webhook.py | scality/bert-e | 0 | 6624450 | <reponame>scality/bert-e<filename>bert_e/server/webhook.py
# Copyright 2016-2018 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines the server webhook endpoints."""
import json
import logging
from flask import Blueprint, Response, current_app, request
from ..git_host import github
from ..git_host.bitbucket import BuildStatus, PullRequest
from ..git_host.cache import BUILD_STATUS_CACHE
from ..job import CommitJob, PullRequestJob
from .auth import requires_basic_auth
LOG = logging.getLogger(__name__)
blueprint = Blueprint('Bert-E server webhook endpoints', __name__)
def handle_bitbucket_repo_event(bert_e, event, json_data):
"""Handle a Bitbucket webhook sent on a repository event."""
if event in ['commit_status_created', 'commit_status_updated']:
build_status = json_data['commit_status']['state']
key = json_data['commit_status']['key']
build_url = json_data['commit_status']['url']
commit_url = json_data['commit_status']['links']['commit']['href']
commit_sha1 = commit_url.split('/')[-1]
LOG.debug("New build status on commit %s", commit_sha1)
# If we don't have a successful build for this sha1, update the cache
cached = BUILD_STATUS_CACHE[key].get(commit_sha1, None)
if not cached or cached.state != 'SUCCESSFUL':
status = BuildStatus(bert_e.client, **json_data['commit_status'])
BUILD_STATUS_CACHE[key].set(commit_sha1, status)
# Ignore notifications that the build started
if build_status == 'INPROGRESS':
LOG.debug("The build just started on %s, ignoring event",
commit_sha1)
return
LOG.info('The build status of commit <%s> has been updated to %s. '
'More information at %s',
commit_sha1, build_status, build_url)
return CommitJob(bert_e=bert_e, commit=commit_sha1)
def handle_bitbucket_pr_event(bert_e, event, json_data):
"""Handle a Bitbucket webhook sent on a pull request event."""
pr_id = json_data['pullrequest']['id']
pr = PullRequest(bert_e.client, **json_data['pullrequest'])
LOG.info('The pull request <%s> has been updated', pr_id)
return PullRequestJob(bert_e=bert_e, pull_request=pr)
def handle_github_pr_event(bert_e, json_data):
"""Handle a GitHub webhook sent on a pull request update event."""
event = github.PullRequestEvent(client=bert_e.client, **json_data)
pr = event.pull_request
if event.action != "closed":
return PullRequestJob(bert_e=bert_e, pull_request=pr)
else:
LOG.debug('PR #%s closed, ignoring event', pr.id)
def handle_github_issue_comment(bert_e, json_data):
"""Handle a GitHub webhook sent on an issue comment event."""
event = github.IssueCommentEvent(client=bert_e.client, **json_data)
pr = event.pull_request
if pr:
return PullRequestJob(bert_e=bert_e, pull_request=pr)
def handle_github_pr_review_event(bert_e, json_data):
"""Handle a GitHub webhook sent on a pull request review event."""
event = github.PullRequestReviewEvent(client=bert_e.client, **json_data)
pr = event.pull_request
LOG.debug("A review was submitted or dismissed on pull request #%d", pr.id)
return PullRequestJob(bert_e=bert_e, pull_request=pr)
def handle_github_status_event(bert_e, json_data):
"""Handle a GitHub webhook sent on a commit sha1 build status event."""
event = github.StatusEvent(client=bert_e.client, **json_data)
status = event.status
LOG.debug("New build status on commit %s", event.commit)
cached = BUILD_STATUS_CACHE[status.key].get(event.commit)
if not cached or cached.state != 'SUCCESSFUL':
BUILD_STATUS_CACHE[status.key].set(event.commit, status)
if status.state == 'INPROGRESS':
LOG.debug("The build just started on %s, ignoring event", event.commit)
return
return CommitJob(bert_e=bert_e, commit=event.commit)
def handle_github_check_run_event(bert_e, json_data):
event = github.CheckRunEvent(bert_e=bert_e.client, **json_data)
return CommitJob(bert_e=bert_e, commit=event.commit)
@blueprint.route('/bitbucket', methods=['POST'])
@requires_basic_auth
def parse_bitbucket_webhook():
"""Entrypoint for handling a Bitbucket webhook."""
# The event key of the event that triggers the webhook
# for example, repo:push.
entity, event = request.headers.get('X-Event-Key').split(':')
json_data = json.loads(request.data.decode())
LOG.debug('Received webhook from bitbucket:\n%s', json.dumps(json_data,
indent=4))
repo_owner = json_data['repository']['owner']['username']
repo_slug = json_data['repository']['name']
if repo_owner != current_app.bert_e.project_repo.owner:
LOG.error('received repo_owner (%s) incompatible with settings',
repo_owner)
return Response('Internal Server Error', 500)
if repo_slug != current_app.bert_e.project_repo.slug:
LOG.error('received repo_slug (%s) incompatible with settings',
repo_slug)
return Response('Internal Server Error', 500)
job = None
if entity == 'repo':
job = handle_bitbucket_repo_event(current_app.bert_e, event, json_data)
if entity == 'pullrequest':
job = handle_bitbucket_pr_event(current_app.bert_e, event, json_data)
if not job:
LOG.debug('Ignoring unhandled event %s:%s', entity, event)
return Response('OK', 200)
current_app.bert_e.put_job(job)
return Response('OK', 200)
@blueprint.route('/github', methods=['POST'])
@requires_basic_auth
def parse_github_webhook():
"""Entrypoint for handling a GitHub webhook."""
if current_app.bert_e.settings.repository_host != 'github':
LOG.error('Received github webhook but Bert-E is configured '
'for %s', current_app.bert_e.settings.repository_host)
return Response('Internal Server Error', 500)
json_data = json.loads(request.data.decode())
LOG.debug('Received webhook from github:\n%s', json.dumps(json_data,
indent=4))
full_name = json_data.get('repository', {}).get('full_name')
if full_name != current_app.bert_e.project_repo.full_name:
LOG.debug('Received webhook for %s whereas I\'m handling %s. '
'Ignoring', full_name,
current_app.bert_e.project_repo.full_name)
return Response('Internal Server Error', 500)
event = request.headers.get('X-Github-Event')
job = None
LOG.debug("Received '%s' event", event)
if event == 'pull_request':
job = handle_github_pr_event(current_app.bert_e, json_data)
elif event == 'issue_comment':
job = handle_github_issue_comment(current_app.bert_e, json_data)
elif event == 'pull_request_review':
job = handle_github_pr_review_event(current_app.bert_e, json_data)
elif event == 'status':
job = handle_github_status_event(current_app.bert_e, json_data)
elif event == 'check_run':
job = handle_github_check_run_event(current_app.bert_e, json_data)
if job is None:
LOG.debug('Ignoring event.')
return Response('OK', 200)
current_app.bert_e.put_job(job)
return Response('Accepted', 202)
| # Copyright 2016-2018 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines the server webhook endpoints."""
import json
import logging
from flask import Blueprint, Response, current_app, request
from ..git_host import github
from ..git_host.bitbucket import BuildStatus, PullRequest
from ..git_host.cache import BUILD_STATUS_CACHE
from ..job import CommitJob, PullRequestJob
from .auth import requires_basic_auth
LOG = logging.getLogger(__name__)
blueprint = Blueprint('Bert-E server webhook endpoints', __name__)
def handle_bitbucket_repo_event(bert_e, event, json_data):
"""Handle a Bitbucket webhook sent on a repository event."""
if event in ['commit_status_created', 'commit_status_updated']:
build_status = json_data['commit_status']['state']
key = json_data['commit_status']['key']
build_url = json_data['commit_status']['url']
commit_url = json_data['commit_status']['links']['commit']['href']
commit_sha1 = commit_url.split('/')[-1]
LOG.debug("New build status on commit %s", commit_sha1)
# If we don't have a successful build for this sha1, update the cache
cached = BUILD_STATUS_CACHE[key].get(commit_sha1, None)
if not cached or cached.state != 'SUCCESSFUL':
status = BuildStatus(bert_e.client, **json_data['commit_status'])
BUILD_STATUS_CACHE[key].set(commit_sha1, status)
# Ignore notifications that the build started
if build_status == 'INPROGRESS':
LOG.debug("The build just started on %s, ignoring event",
commit_sha1)
return
LOG.info('The build status of commit <%s> has been updated to %s. '
'More information at %s',
commit_sha1, build_status, build_url)
return CommitJob(bert_e=bert_e, commit=commit_sha1)
def handle_bitbucket_pr_event(bert_e, event, json_data):
"""Handle a Bitbucket webhook sent on a pull request event."""
pr_id = json_data['pullrequest']['id']
pr = PullRequest(bert_e.client, **json_data['pullrequest'])
LOG.info('The pull request <%s> has been updated', pr_id)
return PullRequestJob(bert_e=bert_e, pull_request=pr)
def handle_github_pr_event(bert_e, json_data):
"""Handle a GitHub webhook sent on a pull request update event."""
event = github.PullRequestEvent(client=bert_e.client, **json_data)
pr = event.pull_request
if event.action != "closed":
return PullRequestJob(bert_e=bert_e, pull_request=pr)
else:
LOG.debug('PR #%s closed, ignoring event', pr.id)
def handle_github_issue_comment(bert_e, json_data):
"""Handle a GitHub webhook sent on an issue comment event."""
event = github.IssueCommentEvent(client=bert_e.client, **json_data)
pr = event.pull_request
if pr:
return PullRequestJob(bert_e=bert_e, pull_request=pr)
def handle_github_pr_review_event(bert_e, json_data):
"""Handle a GitHub webhook sent on a pull request review event."""
event = github.PullRequestReviewEvent(client=bert_e.client, **json_data)
pr = event.pull_request
LOG.debug("A review was submitted or dismissed on pull request #%d", pr.id)
return PullRequestJob(bert_e=bert_e, pull_request=pr)
def handle_github_status_event(bert_e, json_data):
"""Handle a GitHub webhook sent on a commit sha1 build status event."""
event = github.StatusEvent(client=bert_e.client, **json_data)
status = event.status
LOG.debug("New build status on commit %s", event.commit)
cached = BUILD_STATUS_CACHE[status.key].get(event.commit)
if not cached or cached.state != 'SUCCESSFUL':
BUILD_STATUS_CACHE[status.key].set(event.commit, status)
if status.state == 'INPROGRESS':
LOG.debug("The build just started on %s, ignoring event", event.commit)
return
return CommitJob(bert_e=bert_e, commit=event.commit)
def handle_github_check_run_event(bert_e, json_data):
event = github.CheckRunEvent(bert_e=bert_e.client, **json_data)
return CommitJob(bert_e=bert_e, commit=event.commit)
@blueprint.route('/bitbucket', methods=['POST'])
@requires_basic_auth
def parse_bitbucket_webhook():
"""Entrypoint for handling a Bitbucket webhook."""
# The event key of the event that triggers the webhook
# for example, repo:push.
entity, event = request.headers.get('X-Event-Key').split(':')
json_data = json.loads(request.data.decode())
LOG.debug('Received webhook from bitbucket:\n%s', json.dumps(json_data,
indent=4))
repo_owner = json_data['repository']['owner']['username']
repo_slug = json_data['repository']['name']
if repo_owner != current_app.bert_e.project_repo.owner:
LOG.error('received repo_owner (%s) incompatible with settings',
repo_owner)
return Response('Internal Server Error', 500)
if repo_slug != current_app.bert_e.project_repo.slug:
LOG.error('received repo_slug (%s) incompatible with settings',
repo_slug)
return Response('Internal Server Error', 500)
job = None
if entity == 'repo':
job = handle_bitbucket_repo_event(current_app.bert_e, event, json_data)
if entity == 'pullrequest':
job = handle_bitbucket_pr_event(current_app.bert_e, event, json_data)
if not job:
LOG.debug('Ignoring unhandled event %s:%s', entity, event)
return Response('OK', 200)
current_app.bert_e.put_job(job)
return Response('OK', 200)
@blueprint.route('/github', methods=['POST'])
@requires_basic_auth
def parse_github_webhook():
"""Entrypoint for handling a GitHub webhook."""
if current_app.bert_e.settings.repository_host != 'github':
LOG.error('Received github webhook but Bert-E is configured '
'for %s', current_app.bert_e.settings.repository_host)
return Response('Internal Server Error', 500)
json_data = json.loads(request.data.decode())
LOG.debug('Received webhook from github:\n%s', json.dumps(json_data,
indent=4))
full_name = json_data.get('repository', {}).get('full_name')
if full_name != current_app.bert_e.project_repo.full_name:
LOG.debug('Received webhook for %s whereas I\'m handling %s. '
'Ignoring', full_name,
current_app.bert_e.project_repo.full_name)
return Response('Internal Server Error', 500)
event = request.headers.get('X-Github-Event')
job = None
LOG.debug("Received '%s' event", event)
if event == 'pull_request':
job = handle_github_pr_event(current_app.bert_e, json_data)
elif event == 'issue_comment':
job = handle_github_issue_comment(current_app.bert_e, json_data)
elif event == 'pull_request_review':
job = handle_github_pr_review_event(current_app.bert_e, json_data)
elif event == 'status':
job = handle_github_status_event(current_app.bert_e, json_data)
elif event == 'check_run':
job = handle_github_check_run_event(current_app.bert_e, json_data)
if job is None:
LOG.debug('Ignoring event.')
return Response('OK', 200)
current_app.bert_e.put_job(job)
return Response('Accepted', 202) | en | 0.780689 | # Copyright 2016-2018 Scality # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This module defines the server webhook endpoints. Handle a Bitbucket webhook sent on a repository event. # If we don't have a successful build for this sha1, update the cache # Ignore notifications that the build started Handle a Bitbucket webhook sent on a pull request event. Handle a GitHub webhook sent on a pull request update event. #%s closed, ignoring event', pr.id) Handle a GitHub webhook sent on an issue comment event. Handle a GitHub webhook sent on a pull request review event. #%d", pr.id) Handle a GitHub webhook sent on a commit sha1 build status event. Entrypoint for handling a Bitbucket webhook. # The event key of the event that triggers the webhook # for example, repo:push. Entrypoint for handling a GitHub webhook. | 1.916589 | 2 |