hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f5b0e701cd31fe438d6456b8dfe17515a304be | 11,956 | py | Python | app/user/tests/test_user_api.py | grotvignelli/6chan_project | 5fb6f86abc444b2701cf3c5890a7a8947e45534f | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | grotvignelli/6chan_project | 5fb6f86abc444b2701cf3c5890a7a8947e45534f | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | grotvignelli/6chan_project | 5fb6f86abc444b2701cf3c5890a7a8947e45534f | [
"MIT"
] | null | null | null | import datetime
import tempfile
import os
import shutil
from unittest.mock import patch
from PIL import Image
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.conf import settings
from rest_framework import status
from rest_framework.test import APIClient
SIGNUP_USER_URL = reverse('user:signup')
TOKEN_URL = reverse('user:signin')
PROFILE_URL = reverse('user:profile')
CHANGE_PASSWORD_URL = reverse('user:change-password')
def create_payload(**params):
defaults = {
'email': 'test@gmail.com',
'username': 'testuser',
'password': 'testpass'
}
defaults.update(**params)
return defaults
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test publicly user API"""
def setUp(self):
self.client = APIClient()
def tearDown(self):
directory = 'uploads/avatar'
path = os.path.join(settings.MEDIA_ROOT, directory)
shutil.rmtree(path, ignore_errors=True)
def test_create_user_successful(self):
"""Test creating a new user in API is successful"""
payload = create_payload()
res = self.client.post(SIGNUP_USER_URL, payload)
user = get_user_model().objects.get(
email=payload['email'],
username=payload['username']
)
default_avatar_name = 'uploads/defaults/default.png'
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(user.email, payload['email'])
self.assertEqual(user.username, payload['username'])
self.assertEqual(user.avatar.name, default_avatar_name)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_create_user_with_dob(self):
"""Test creating a new user in API with date of birth"""
payload = create_payload(
date_of_birth=datetime.date(1992, 12, 25),
)
res = self.client.post(SIGNUP_USER_URL, payload)
user = get_user_model().objects.get(
email=payload['email'],
username=payload['username']
)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(user.date_of_birth, payload['date_of_birth'])
@patch('uuid.uuid4')
def test_create_user_with_avatar(self, mock_uuid):
"""Test creating a new user in API with avatar"""
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
uuid = 'test-uuid'
mock_uuid.return_value = uuid
image = Image.new('RGB', (100, 100))
image.save(ntf, format='JPEG')
ntf.seek(0)
payload = create_payload(avatar=ntf)
res = self.client.post(
SIGNUP_USER_URL, payload, format='multipart'
)
user = get_user_model().objects.get(
email=payload['email'],
username=payload['username']
)
filepath = os.path.join(
'/app/' + settings.MEDIA_ROOT,
f'uploads/avatar/{uuid}.jpg'
)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertIn('avatar', res.data)
self.assertEqual(user.avatar.path, filepath)
def test_create_user_invalid_email(self):
"""Test creating a new user with invalid payload
(email blank)"""
payload = create_payload(email='')
res = self.client.post(SIGNUP_USER_URL, payload)
is_exists = get_user_model().objects.filter(
email=payload['email'],
username=payload['username']
).exists()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(is_exists)
def test_create_user_invalid_username(self):
"""Test creating a new user with invalid payload
(email blank)"""
payload = create_payload(username='')
res = self.client.post(SIGNUP_USER_URL, payload)
is_exists = get_user_model().objects.filter(
email=payload['email'],
username=payload['username']
).exists()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(is_exists)
def test_create_with_exists_user(self):
"""Test creating a new user with existing user
raises 400 status code"""
payload = create_payload()
create_user(**payload)
res = self.client.post(SIGNUP_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_user_password_too_short(self):
"""Test creating a new user with password less than 6 character
raises 400 status code"""
payload = create_payload(password='wr')
res = self.client.post(SIGNUP_USER_URL, payload)
is_exists = get_user_model().objects.filter(
email=payload['email'],
username=payload['username']
).exists()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(is_exists)
def test_create_token_for_user(self):
"""Test create a token for authenticated user"""
payload = create_payload()
create_user(**payload)
res = self.client.post(TOKEN_URL, {
'username': payload['username'],
'password': payload['password'],
})
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('token', res.data)
def test_create_token_with_invalid_credentials(self):
"""Test create a token for user with invalid credentials"""
create_user(**create_payload())
payload = {
'username': 'testuser',
'password': 'wrong'
}
res = self.client.post(TOKEN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_no_user(self):
"""Test that creating token with no user existing
is raises 400 status code"""
payload = {
'username': 'testuser',
'password': 'testpass'
}
res = self.client.post(TOKEN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_missing_fields(self):
"""Test that creating token with missing fields required
is raises error"""
res = self.client.post(TOKEN_URL, {'username': 'user', 'password': ''})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_retrieve_user_profile_unauthorized(self):
"""Test retrieve user profile with no authenticated user"""
res = self.client.get(PROFILE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test access private user API (with authenticated user)"""
def setUp(self):
self.client = APIClient()
self.user = create_user(**create_payload())
self.client.force_authenticate(user=self.user)
def tearDown(self):
directory = 'uploads/avatar'
path = os.path.join(settings.MEDIA_ROOT, directory)
shutil.rmtree(path, ignore_errors=True)
def test_retrieve_user_profile(self):
"""Test that retrieving user profile with authenticated user"""
res = self.client.get(PROFILE_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['username'], self.user.username)
self.assertEqual(res.data['email'], self.user.email)
def test_post_method_not_allowed(self):
"""Test that POST method on user profile endpoint is not allowed"""
res = self.client.post(PROFILE_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating user profile successful"""
payload = {
'username': 'newname',
'email': 'newemail@gmail.com'
}
res = self.client.patch(PROFILE_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(self.user.username, payload['username'])
self.assertEqual(self.user.email, payload['email'])
@patch('uuid.uuid4')
def test_update_avatar_user(self, mock_uuid):
"""Test updating avatar profile user is successful"""
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
uuid = 'test-uuid'
mock_uuid.return_value = uuid
image = Image.new('RGB', (100, 100))
image.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.patch(
PROFILE_URL, {'avatar': ntf}, format='multipart'
)
filename = f'uploads/avatar/{uuid}.jpg'
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(self.user.avatar.name, filename)
def test_update_password_not_allowed(self):
"""Test that updating password in profile endpoint is not allowed"""
new_pass = 'newpass'
self.client.patch(
PROFILE_URL, {'password': new_pass}
)
self.user.refresh_from_db()
self.assertFalse(self.user.check_password(new_pass))
# TODO how to make response status code to be 400 bad request
def test_change_password_endpoint(self):
"""Test that change password in change-password endpoint is worked"""
payload = {
'old_password': 'testpass',
'new_password': 'newpass',
'confirm_password': 'newpass'
}
res = self.client.patch(CHANGE_PASSWORD_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue(self.user.check_password(payload['new_password']))
self.assertNotIn('new_password', res.data)
self.assertNotIn('confirm_password', res.data)
def test_change_password_invalid_old_password(self):
"""Test that change password with invalid old password
is raises error"""
payload = {
'old_password': 'wrong',
'new_password': 'newpass',
'confirm_password': 'newpass'
}
res = self.client.patch(CHANGE_PASSWORD_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.user.check_password(payload['new_password']))
def test_change_password_invalid_confirm_password(self):
"""Test that change password with different confirm password
is raises error"""
payload = {
'old_password': 'testpass',
'new_password': 'newpass',
'confirm_password': 'wrong'
}
res = self.client.patch(CHANGE_PASSWORD_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.user.check_password(payload['new_password']))
def test_change_password_too_short(self):
"""Test that change password with less than 6 character
is raises error"""
payload = {
'old_password': 'testpass',
'new_password': 'pw',
'confirm_password': 'pw'
}
res = self.client.patch(CHANGE_PASSWORD_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.user.check_password(payload['new_password']))
| 34.062678 | 79 | 0.640683 | import datetime
import tempfile
import os
import shutil
from unittest.mock import patch
from PIL import Image
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.conf import settings
from rest_framework import status
from rest_framework.test import APIClient
SIGNUP_USER_URL = reverse('user:signup')
TOKEN_URL = reverse('user:signin')
PROFILE_URL = reverse('user:profile')
CHANGE_PASSWORD_URL = reverse('user:change-password')
def create_payload(**params):
defaults = {
'email': 'test@gmail.com',
'username': 'testuser',
'password': 'testpass'
}
defaults.update(**params)
return defaults
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def tearDown(self):
directory = 'uploads/avatar'
path = os.path.join(settings.MEDIA_ROOT, directory)
shutil.rmtree(path, ignore_errors=True)
def test_create_user_successful(self):
payload = create_payload()
res = self.client.post(SIGNUP_USER_URL, payload)
user = get_user_model().objects.get(
email=payload['email'],
username=payload['username']
)
default_avatar_name = 'uploads/defaults/default.png'
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(user.email, payload['email'])
self.assertEqual(user.username, payload['username'])
self.assertEqual(user.avatar.name, default_avatar_name)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_create_user_with_dob(self):
payload = create_payload(
date_of_birth=datetime.date(1992, 12, 25),
)
res = self.client.post(SIGNUP_USER_URL, payload)
user = get_user_model().objects.get(
email=payload['email'],
username=payload['username']
)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(user.date_of_birth, payload['date_of_birth'])
@patch('uuid.uuid4')
def test_create_user_with_avatar(self, mock_uuid):
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
uuid = 'test-uuid'
mock_uuid.return_value = uuid
image = Image.new('RGB', (100, 100))
image.save(ntf, format='JPEG')
ntf.seek(0)
payload = create_payload(avatar=ntf)
res = self.client.post(
SIGNUP_USER_URL, payload, format='multipart'
)
user = get_user_model().objects.get(
email=payload['email'],
username=payload['username']
)
filepath = os.path.join(
'/app/' + settings.MEDIA_ROOT,
f'uploads/avatar/{uuid}.jpg'
)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertIn('avatar', res.data)
self.assertEqual(user.avatar.path, filepath)
def test_create_user_invalid_email(self):
payload = create_payload(email='')
res = self.client.post(SIGNUP_USER_URL, payload)
is_exists = get_user_model().objects.filter(
email=payload['email'],
username=payload['username']
).exists()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(is_exists)
def test_create_user_invalid_username(self):
payload = create_payload(username='')
res = self.client.post(SIGNUP_USER_URL, payload)
is_exists = get_user_model().objects.filter(
email=payload['email'],
username=payload['username']
).exists()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(is_exists)
def test_create_with_exists_user(self):
payload = create_payload()
create_user(**payload)
res = self.client.post(SIGNUP_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_user_password_too_short(self):
payload = create_payload(password='wr')
res = self.client.post(SIGNUP_USER_URL, payload)
is_exists = get_user_model().objects.filter(
email=payload['email'],
username=payload['username']
).exists()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(is_exists)
def test_create_token_for_user(self):
payload = create_payload()
create_user(**payload)
res = self.client.post(TOKEN_URL, {
'username': payload['username'],
'password': payload['password'],
})
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('token', res.data)
def test_create_token_with_invalid_credentials(self):
create_user(**create_payload())
payload = {
'username': 'testuser',
'password': 'wrong'
}
res = self.client.post(TOKEN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_no_user(self):
payload = {
'username': 'testuser',
'password': 'testpass'
}
res = self.client.post(TOKEN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_missing_fields(self):
res = self.client.post(TOKEN_URL, {'username': 'user', 'password': ''})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_retrieve_user_profile_unauthorized(self):
res = self.client.get(PROFILE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = create_user(**create_payload())
self.client.force_authenticate(user=self.user)
def tearDown(self):
directory = 'uploads/avatar'
path = os.path.join(settings.MEDIA_ROOT, directory)
shutil.rmtree(path, ignore_errors=True)
def test_retrieve_user_profile(self):
res = self.client.get(PROFILE_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['username'], self.user.username)
self.assertEqual(res.data['email'], self.user.email)
def test_post_method_not_allowed(self):
res = self.client.post(PROFILE_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
payload = {
'username': 'newname',
'email': 'newemail@gmail.com'
}
res = self.client.patch(PROFILE_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(self.user.username, payload['username'])
self.assertEqual(self.user.email, payload['email'])
@patch('uuid.uuid4')
def test_update_avatar_user(self, mock_uuid):
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
uuid = 'test-uuid'
mock_uuid.return_value = uuid
image = Image.new('RGB', (100, 100))
image.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.patch(
PROFILE_URL, {'avatar': ntf}, format='multipart'
)
filename = f'uploads/avatar/{uuid}.jpg'
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(self.user.avatar.name, filename)
def test_update_password_not_allowed(self):
new_pass = 'newpass'
self.client.patch(
PROFILE_URL, {'password': new_pass}
)
self.user.refresh_from_db()
self.assertFalse(self.user.check_password(new_pass))
def test_change_password_endpoint(self):
payload = {
'old_password': 'testpass',
'new_password': 'newpass',
'confirm_password': 'newpass'
}
res = self.client.patch(CHANGE_PASSWORD_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue(self.user.check_password(payload['new_password']))
self.assertNotIn('new_password', res.data)
self.assertNotIn('confirm_password', res.data)
def test_change_password_invalid_old_password(self):
payload = {
'old_password': 'wrong',
'new_password': 'newpass',
'confirm_password': 'newpass'
}
res = self.client.patch(CHANGE_PASSWORD_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.user.check_password(payload['new_password']))
def test_change_password_invalid_confirm_password(self):
payload = {
'old_password': 'testpass',
'new_password': 'newpass',
'confirm_password': 'wrong'
}
res = self.client.patch(CHANGE_PASSWORD_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.user.check_password(payload['new_password']))
def test_change_password_too_short(self):
payload = {
'old_password': 'testpass',
'new_password': 'pw',
'confirm_password': 'pw'
}
res = self.client.patch(CHANGE_PASSWORD_URL, payload)
self.user.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.user.check_password(payload['new_password']))
| true | true |
f7f5b19149a76915f1a380024cfde6affe4247ef | 31,533 | py | Python | commands/feature_matrix_construction/main/run_pwRK3.py | cancerregulome/gidget | 6c9e9a37f9992267c7505c7a396ff7e2638599ab | [
"MIT"
] | 3 | 2016-02-22T21:29:23.000Z | 2020-09-19T07:38:21.000Z | commands/feature_matrix_construction/main/run_pwRK3.py | cancerregulome/gidget | 6c9e9a37f9992267c7505c7a396ff7e2638599ab | [
"MIT"
] | 1 | 2015-01-16T02:33:59.000Z | 2015-01-16T02:33:59.000Z | commands/feature_matrix_construction/main/run_pwRK3.py | cancerregulome/gidget | 6c9e9a37f9992267c7505c7a396ff7e2638599ab | [
"MIT"
] | 2 | 2015-12-27T08:40:12.000Z | 2021-03-01T06:30:23.000Z | #!/usr/bin/env python
import argparse
import commands
import getpass
import os
import os.path
import sys
import time
from env import gidgetConfigVars
import miscIO
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def cleanString(aType):
aString = ""
for ii in range(len(aType)):
if (aType[ii] != ":"):
aString += aType[ii]
return (aString)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getFeatureIndex(indexString, featureMatrixFile):
print " <%s> <%s> " % (indexString, featureMatrixFile)
matchList = []
indexList = []
fh = file(featureMatrixFile)
ii = 0
for aLine in fh:
if (aLine.find(indexString) >= 0):
tokenList = aLine.split('\t')
if (tokenList[0].find(indexString) >= 0):
matchList += [tokenList[0]]
indexList += [(ii - 1)]
ii += 1
if (len(matchList) == 0):
print " no matching feature ??? ", indexString
sys.exit(-1)
if (len(matchList) == 1):
return (indexList[0])
for ii in range(len(matchList)):
if (matchList[ii] == indexString):
return (indexList[ii])
for ii in range(len(matchList)):
tokenList = matchList[ii].split(':')
if (tokenList[2] == indexString):
return (indexList[ii])
print " in getFeatureIndex ... too many possible matches ??? "
print matchList
sys.exit(-1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getIndexRanges(tsvFile, aType):
print " in getIndexRanges ... ", tsvFile, aType
typeList = ["CLIN", "CNVR", "GEXP", "GNAB",
"METH", "MIRN", "RPPA", "SAMP"]
if (aType in typeList):
aType = ":" + aType + ":"
iRanges = []
iList = []
fh = file(tsvFile)
aLine = fh.readline()
done = 0
ii = 0
while not done:
aLine = fh.readline()
aLine = aLine.strip()
if (len(aLine) < 5):
done = 1
else:
tokenList = aLine.split('\t')
if (aType=="ANY"):
iList += [ii]
elif (tokenList[0].find(aType) >= 0):
iList += [ii]
ii += 1
# if ( ii%10000 == 0 ): print ii, len(tokenList)
fh.close()
numI = len(iList)
if ( numI < 1 ): return ( [] )
print " numI = ", numI
print iList[:5]
print iList[-5:]
iStart = iList[0]
for ii in range(1, numI):
if (iList[ii] > (iList[ii - 1] + 1)):
iRanges += [(iStart, iList[ii - 1])]
iStart = iList[ii]
iRanges += [(iStart, iList[-1])]
print " len(iRanges) = ", len(iRanges)
print iRanges[:5]
print iRanges[-5:]
# now make sure that none of the ranges are too big ...
maxRngSize = max ( 100, (numI/20) )
print " --> maxRngSize = ", maxRngSize
newRanges = []
for aTuple in iRanges:
iStart = aTuple[0]
iStop = aTuple[1]
if ((iStop - iStart) < maxRngSize):
newRanges += [aTuple]
else:
jStart = iStart
jStop = jStart + maxRngSize
while (jStop < iStop):
bTuple = (jStart, min(jStop, iStop))
newRanges += [bTuple]
jStart = jStop
jStop = jStart + maxRngSize
bTuple = (jStart, min(jStop, iStop))
newRanges += [bTuple]
print " original # of range blocks : ", len(iRanges)
print iRanges[:5], iRanges[-5:]
print " new # of range blocks : ", len(newRanges)
print newRanges[:5], newRanges[-5:]
return (newRanges)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getNumFeat(featureMatrixFile):
fh = file(featureMatrixFile)
numLines = miscIO.num_lines(fh)
numFeat = numLines - 1
fh.close()
return (numFeat)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getNumSamples(featureMatrixFile):
fh = file(featureMatrixFile)
numCols = miscIO.num_cols(fh, '\t')
numSamples = numCols - 1
fh.close()
return (numSamples)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# input file is assumed to end in .tsv
# this function checks to see if the binFile exists and is up to date
# with respect to the tsvFile ... if necessary, it will call prep4pairwise
# to create the bin file
def preProcessTSV(tsvFile):
tsvTime = os.path.getmtime(tsvFile)
# print tsvTime
binFile = tsvFile[:-4] + ".bin"
catFile = tsvFile[:-4] + ".cat"
try:
binTime = os.path.getmtime(binFile)
# print binTime
except:
binTime = 0
if (tsvTime > binTime):
# just to be sure, delete the *.bin and *.cat files ...
cmdString = "rm -fr %s" % binFile
(status, output) = commands.getstatusoutput(cmdString)
cmdString = "rm -fr %s" % catFile
(status, output) = commands.getstatusoutput(cmdString)
print " creating bin file "
cmdString = "%s %s/prep4pairwise.py %s" % (gidgetConfigVars['TCGAFMP_PYTHON3'], gidgetConfigVars['TCGAFMP_PAIRWISE_ROOT'], tsvFile)
(status, output) = commands.getstatusoutput(cmdString)
if (status != 0):
print " (a) ERROR ??? failed to execute command ??? "
print cmdString
print status
print output
sys.exit(-1)
print " --> bin file created "
# verify that the bin file actually exists now, otherwise bail ...
try:
binTime = os.path.getmtime(binFile)
except:
print " "
print " FATAL ERROR ... prep4pairwise has failed "
print " "
print cmdString
print status
print output
sys.exit(-1)
else:
print " bin file already up to date "
return (binFile)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getLocalScratchDir():
defaultscratch = gidgetConfigVars['TCGAFMP_CLUSTER_SCRATCH']
localscratch = gidgetConfigVars['TCGAFMP_LOCAL_SCRATCH']
if (not os.path.exists(localscratch)):
if (not os.path.exists(defaultscratch)):
print " FATAL ERROR ... need access to some scratch space !!! "
sys.exit(-1)
else:
print " --> using this scratch directory : ", defaultscratch
return ( defaultscratch )
else:
print " --> using this scratch directory : ", localscratch
return ( localscratch )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def tuplesOverlap(iTuple, jTuple):
# temporarily turning this off
return (0)
# overlapping tuples would be for example:
## (4767, 4867) and (4767, 4867)
# although they need not be identical, maybe:
## (4767, 4867) and (4807, 6000)
# print " iTuple : ", iTuple
# print " jTuple : ", jTuple
if (iTuple[0] >= jTuple[1]):
return (0)
if (jTuple[0] >= iTuple[1]):
return (0)
return (1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# tmpDir13 : <.../TCGA/pw_scratch/YNNROlQo.1388771679.87.scratch>
# localDir : </local/<user>/pw_scratch/>
def copyScratchFiles ( tmpDir13, localDir ):
print " in copyScratchFiles ... <%s> <%s> " % ( tmpDir13, localDir )
if ( not tmpDir13.startswith(localDir) ):
sleepTime=60
time.sleep(sleepTime)
watchDir ( tmpDir13 )
ii = len(tmpDir13) - 3
while ( tmpDir13[ii] != "/" ): ii -= 1
sName = tmpDir13[ii+1:]
## print ii, sName
cmdString = "cp -fr %s %s/" % ( tmpDir13, localDir )
print " DOING COPY ... cmdString : <%s> " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
newDir = localDir + "/" + sName
print " --> newDir : <%s> " % newDir
time.sleep(sleepTime)
watchDir ( newDir )
print " --> returning <%s> " % newDir
return ( newDir )
else:
print " NOT copying scratch files ... "
print " --> returning <%s> " % tmpDir13
return ( tmpDir13 )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def lastModTime ( aDir ):
tLast = -1
for aName in os.listdir(aDir):
if (aName.endswith(".pw")):
## print " aName = <%s> " % aName
t = os.path.getmtime ( aDir+"/"+aName )
if ( t > tLast ): tLast = t
return ( tLast )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def watchDir ( aDir ):
if ( aDir[-1] == "/" ): aDir = aDir[:-1]
t1 = lastModTime ( aDir )
print " watchDir t1 ", t1
nLoop = 0
sleepTime = 20
time.sleep(sleepTime)
t2 = lastModTime ( aDir )
print " watchDir ", t1, t2, nLoop
while ( t2 > t1 ):
t1 = t2
time.sleep(sleepTime)
t2 = lastModTime ( aDir )
nLoop += 1
print " watchDir ", t1, t2, nLoop
if ( nLoop > 100 ):
print " BAILING out of watchDir ... ERROR ... EXITING "
sys.exit(-1)
time.sleep(sleepTime)
time.sleep(sleepTime)
print " leaving watchDir "
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
if __name__ == "__main__":
# ALL necessary inputs should be handled using this ArgumentParser ... there shouldn't
# be any 'left-over' arguments ... any unrecognized command-line inputs will result
# in an error like:
# rkpw_list_gen.py: error: unrecognized arguments: abc def
parser = argparse.ArgumentParser(
description='Create runlist for pairwise')
parser.add_argument('--min-ct-cell', '-minct',
action='store', default=3, type=int)
parser.add_argument('--min-mx-cell', '-minmx',
action='store', default=3, type=int)
parser.add_argument('--min-samples', '-M',
action='store', default=11, type=int)
parser.add_argument('--pvalue', '-p', action='store',
default=0.000001, type=float)
parser.add_argument('--adjP', '-a', action='store_true')
parser.add_argument('--all', '-A', action='store_true')
parser.add_argument('--one', '-O', action='store')
parser.add_argument('--byType', '-T', action='store_true')
parser.add_argument('--type1', '-t1', action='store')
parser.add_argument('--type2', '-t2', action='store')
parser.add_argument('--verbosity', '-v',
action='store', default=0, type=int)
parser.add_argument('--tsvFile', '-f', action='store', required=True)
parser.add_argument('--forRE', '-R', action='store_true')
parser.add_argument('--forLisa', '-L', action='store_true')
parser.add_argument('--useBC', '-B', action='store',
default=99, type=float)
if (len(sys.argv) < 2):
print " "
print " Output of this script is a tab-delimited file with 12 columns, and "
print " one line for each significant pairwise association: "
print " "
print " # 1 feature A "
print " # 2 feature B (order is alphabetical, and has no effect on result) "
print " # 3 Spearman correlation coefficient (range is [-1,+1], or NA "
print " # 4 number of samples used for pairwise test (non-NA overlap of feature A and feature B) "
print " # 5 -log10(p-value) (uncorrected) "
print " # 6 log10(Bonferroni correction factor) "
print " # 7 -log10(corrected p-value) [ col #7 = min ( (col #5 - col #6), 0 ) ] "
print " # 8 # of non-NA samples in feature A that were not used in pairwise test "
print " # 9 -log(p-value) that the samples from A that were not used are 'different' from those that were "
print " #10 (same as col #8 but for feature B) "
print " #11 (same as col #9 but for feature B) "
print " #12 genomic distance between features A and B (or 500000000) "
print " "
print " ERROR -- bad command line arguments "
args = parser.parse_args()
print args
print " (a) TIME ", time.asctime(time.localtime(time.time()))
# at this point we should have a Namespace called 'args' that looks something like this:
# Namespace ( tsvFile=['test.tsv'],
# runFile=['test.run'],
## byname=False, input=None,
# min_ct_cell=5, one=None, all=True,
# pvalue=1e-06, tail=0, verbosity=0 )
if (0):
# NEW 19feb13 : need to have either "forRE" or "forLisa" specified so that we know
# what type of post-processing to invoke ...
if (args.forRE):
if (args.forLisa):
print " ERROR : must choose either --forRE or --forLisa, not both "
sys.exit(-1)
else:
if (not args.forLisa):
print " ERROR : must specify either --forRE or --forLisa "
sys.exit(-1)
# note that we must either have an integer (or string) value in 'one'
# OR 'all' must be TRUE
# OR 'byType' must be TRUE
# new 02jan14 : when using the --one option, we can also use the --byType
# option and only specificy *one* of the types ...
print args
indexString = ''
if (args.all):
print " --> running ALL by ALL "
args.byType = False
args.one = None
elif (args.one != None):
if (args.byType):
if (args.type1 == None and args.type2 == None):
print " ERROR ... when using the one and byType options together, either type1 or type2 must be specified "
elif (args.type1 != None and args.type2 != None):
print " ERROR ... when using the one and byType options together, you can specific only type1 or type2, not both "
else:
if (args.type1 == None):
args.type1 = args.type2
args.type2 = None
args.all = False
indexString = str(args.one)
print " --> running <%s> by <%s> " % (args.one, args.type1)
else:
print " --> running <%s> by ALL " % (args.one)
args.all = False
args.byType = False
indexString = str(args.one)
elif (args.byType):
if (args.type1 == None or args.type2 == None):
print " ERROR ... when using the byType option, type1 and type2 must be specified "
sys.exit(-1)
print " --> running <%s> by <%s> " % (args.type1, args.type2)
args.all = False
args.one = None
else:
print " ERROR ... invalid settings for --all or --byType or --one "
sys.exit(-1)
# get the tsv feature matrix file and also the number of features it
# contains
tsvFile = args.tsvFile
print " input tsv file name <%s> " % tsvFile
if (not os.path.exists(tsvFile)):
print " <%s> is not a valid file, exiting ... " % tsvFile
sys.exit(-1)
if (not tsvFile.endswith(".tsv")):
print " <%s> input file should be a TSV file " % tsvFile
sys.exit(-1)
if (tsvFile[0] != "/"):
print " absolute path name for input file <%s> is required " % tsvFile
sys.exit(-1)
numFeat = getNumFeat(tsvFile)
print " --> number of features : ", numFeat
numSamples = getNumSamples(tsvFile)
print " --> number of samples : ", numSamples
# if we are doing the 'byType' option, we need to figure out which indices
# are involved ...
if (args.byType):
iRanges1 = getIndexRanges(tsvFile, args.type1)
if (args.type2 != None):
iRanges2 = getIndexRanges(tsvFile, args.type2)
else:
try:
index = int(args.one)
except:
index = getFeatureIndex(args.one, args.tsvFile)
iRanges2 = [ ( index, index ) ]
print " single index range : ", iRanges2
# if the user wants to use the "adjP" option, then we set the p-value based on
# the number of samples ... right now the approach is to do 1.e-X where X=5+(N/100)
# and N is the number of samples
if (args.adjP):
args.pvalue = (1. / 100000.) / float(10. ** (int(numSamples / 100)))
print " --> setting pvalue threshold to : ", args.pvalue
# we need to pre-process the tsv file (unless it appears to have already
# been done)
binFile = preProcessTSV(tsvFile)
# create a random name for this particular run ...
# and then make a subdirectory for the outputs ...
curJobName = miscIO.make_random_fname()
print " "
print " randomly generated job name : <%s> " % curJobName
print " "
tmpDir13 = "%s/%s" % (gidgetConfigVars['TCGAFMP_CLUSTER_SCRATCH'], curJobName)
cmdString = "mkdir %s" % tmpDir13
(status, output) = commands.getstatusoutput(cmdString)
if (not os.path.exists(tmpDir13)):
print " mkdir command failed ??? "
print cmdString
sys.exit(-1)
# open the jobInfo file ...
jobFile = tmpDir13 + "/jobInfo.txt"
try:
fh = file(jobFile, 'w')
except:
print " failed to open output file <%s>, exiting ... " % jobFile
sys.exit(-1)
fh.write("tsvFile = %s\n" % args.tsvFile)
if (args.all):
fh.write("all = TRUE\n")
elif (args.byType):
fh.write("type1 = %s\n" % args.type1)
if (args.type2 != None):
fh.write("type2 = %s\n" % args.type2)
elif (args.one):
try:
index = int(args.one)
except:
index = getFeatureIndex(args.one, args.tsvFile)
print " --> got this index : ", index
fh.write("one = %d\n" % index)
if (args.useBC < 1.):
fh.write("useBC = %g\n" % args.useBC)
if (args.adjP):
fh.write("adjP = TRUE\n")
fh.write("pvalue = %f\n" % args.pvalue)
fh.write("min-samples = %d\n" % args.min_samples)
fh.write("min-ct-cell = %d\n" % args.min_ct_cell)
fh.write("min-mx-cell = %d\n" % args.min_mx_cell)
fh.close()
# next open the runFile ...
runFile = tmpDir13 + "/runList.txt"
try:
fh = file(runFile, 'w')
except:
print " failed to open output file <%s>, exiting ... " % runFile
sys.exit(-1)
pythonbin = sys.executable
golempwd = "PASSWD_HERE"
fhC = file (gidgetConfigVars['TCGAFMP_CLUSTER_SCRATCH'] + "/config", 'r' )
aLine = fhC.readline()
fhC.close()
aLine = aLine.strip()
golempwd = aLine
print " got this p ... <%s> " % golempwd
print " "
one_vs_all_flag = 0
if (args.all):
print " --> handling the all by all option ... "
# handle the all by all option ...
# calling with these options:
# --outer index:index:1 --inner +1::1
# changing this 02Jan14 ... to limit the # of tasks being sent to the cluster
maxJobs = 500
nFpJ = max ( 100, (numFeat/maxJobs) )
print " --> number of features per task : ", nFpJ
iStart = 0
numJobs = 0
while iStart < numFeat:
iStop = min ( (iStart + nFpJ), numFeat )
outName = tmpDir13 + "/" + str(numJobs) + ".pw"
## cmdString = "1 " + gidgetConfigVars['TCGAFMP_PAIRWISE_ROOT'] + "/pairwise-2.1.2"
cmdString = "1 " + gidgetConfigVars['TCGAFMP_PAIRWISE_ROOT'] + "/pairwise-3.0.1rc1-rel"
cmdString += " --pvalue %g --min-ct-cell %d --min-mx-cell %d --min-samples %d" \
% (args.pvalue, args.min_ct_cell, args.min_mx_cell, args.min_samples)
cmdString += " --outer %d:%d:1 --inner +1::1 %s %s " \
% (iStart, iStop, binFile, outName)
fh.write("%s\n" % cmdString)
numJobs += 1
iStart += nFpJ
elif (args.byType):
print " --> handling the byType option ... "
try:
print " ", args.type1
except:
doNothing=1
try:
print " ", args.type2
except:
doNothing=1
numJobs = 0
# print " index ranges: "
# print iRanges1
# print iRanges2
for iTuple in iRanges1:
for jTuple in iRanges2:
outName = tmpDir13 + "/" + str(numJobs) + ".pw"
## cmdString = "1 " + gidgetConfigVars['TCGAFMP_PAIRWISE_ROOT'] + "/pairwise-2.1.2"
cmdString = "1 " + gidgetConfigVars['TCGAFMP_PAIRWISE_ROOT'] + "/pairwise-3.0.1rc1-rel"
cmdString += " --pvalue %g --min-ct-cell %d --min-mx-cell %d --min-samples %d" \
% (args.pvalue, args.min_ct_cell, args.min_mx_cell, args.min_samples)
# here we need to adjust things so that we don't do the same
# comparison twice ...
if (tuplesOverlap(iTuple, jTuple)):
# print " handling overlapping tuples ", iTuple, jTuple
jStart = jTuple[0] - iTuple[0] + 1
jStop = jTuple[1] - iTuple[0] + 1
if (jStart < 1):
print " ERROR ??? ", iTuple, jTuple
sys.exit(-1)
cmdString += " --outer %d:%d:1 --inner +%d:%d:1 %s %s " \
% (iTuple[0], iTuple[1] + 1, jStart, jStop, binFile, outName)
else:
# print " handling NONoverlapping tuples ", iTuple,
# jTuple
cmdString += " --outer %d:%d:1 --inner %d:%d:1 %s %s " \
% (iTuple[0], iTuple[1] + 1, jTuple[0], jTuple[1] + 1, binFile, outName)
fh.write("%s\n" % cmdString)
# print numJobs, cmdString
# print "%s" % cmdString
numJobs += 1
else:
one_vs_all_flag = 1
print " --> handling the one vs all option ... ", index
# handle the single index vs all option ...
# ( note that the single-index vs a specified "type" is handled above )
outName = tmpDir13 + "/" + str(index) + ".pw"
## cmdString = "1 " + gidgetConfigVars['TCGAFMP_PAIRWISE_ROOT'] + "/pairwise-2.1.2"
cmdString = "1 " + gidgetConfigVars['TCGAFMP_PAIRWISE_ROOT'] + "/pairwise-3.0.1rc1-rel"
cmdString += " --pvalue %g --min-ct-cell %d --min-mx-cell %d --min-samples %d" \
% (args.pvalue, args.min_ct_cell, args.min_mx_cell, args.min_samples)
cmdString += " --outer %d:%d:1 --inner 0::1 %s %s " \
% (index, index + 1, binFile, outName)
fh.write("%s\n" % cmdString)
numJobs = 1
fh.close()
if ( numJobs < 1 ):
print " "
print " Bailing out now because there is nothing to do ... "
print " "
sys.exit(-1)
print " "
print " ********************************************* "
print " Number of jobs about to be launched : ", numJobs
print " ********************************************* "
print " (b) TIME ", time.asctime(time.localtime(time.time()))
print " "
# ok, now we want to actually launch the jobs ...
cmdString = "python %s/main/golem.py " % gidgetConfigVars['TCGAFMP_ROOT_DIR']
cmdString += "http://glados.systemsbiology.net:7083 -p " + golempwd + " "
cmdString += "-L pairwise-3.0.1rc1-rel -u "
cmdString += getpass.getuser() + " "
cmdString += "runlist " + runFile
print cmdString
(status, output) = commands.getstatusoutput(cmdString)
print status
print output
print " "
print " "
print " --------------- "
done = 0
while not done:
numOutFiles = 0
for aName in os.listdir(tmpDir13):
if (aName.endswith(".pw")):
numOutFiles += 1
print numOutFiles
if (numOutFiles == numJobs):
done = 1
else:
tSleep = max(10, int((numJobs - numOutFiles) / 20))
if (args.byType): tSleep = min(20,tSleep)
print " ( sleeping for %.0f seconds ) " % float(tSleep)
time.sleep(tSleep)
print " should be done !!! ", numOutFiles, numJobs
if ( 0 ):
tSleep = 120
time.sleep(tSleep)
else:
# now we need to poll to make sure that the last file is done
# being written ...
watchDir ( tmpDir13 )
print " (c) TIME ", time.asctime(time.localtime(time.time()))
# make sure that we have a local scratch directory to use for the sorting
localDir = getLocalScratchDir()
print " "
print " now we should move or copy stuff ... "
print " tmpDir13 : <%s> " % tmpDir13
print " localDir : <%s> " % localDir
tmpDir13 = copyScratchFiles ( tmpDir13, localDir )
print " "
## NOTE that from now on, tmpDir13 hopefully points to a LOCAL scratch directory ...
# if there was only one job, then we're done now ...
if ((numJobs == 1) and (not args.byType) and (one_vs_all_flag==1)):
print " handling a one-by-all run ... "
if (args.useBC < 1.):
print " --> will filter on Bonferonni-corrected p-value with threshold of ", args.useBC
# first we run post_pwRK2.py which writes
# out something that looks like the output from runPWPV
iOne = index
cmdString = "python %s/main/post_pwRK2.py %s %s %d %g" % (
gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13, tsvFile, iOne, args.useBC)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print status, output
cmdString = "sort -grk 5 --temporary-directory=%s %s/post_proc_all.tsv >& %s/%d.all.pwpv.sort" % \
(localDir, tmpDir13, tmpDir13, iOne)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print status, output
cmdString = "mv %s/%d.all.pwpv.sort %s.%d.all.pwpv.sort" % (tmpDir13,
iOne, tsvFile[:-4], iOne)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print status, output
print "\n\n DONE \n\n"
print " (d) TIME ", time.asctime(time.localtime(time.time()))
cmdString = "rm -fr %s" % tmpDir13
print " final command : <%s> " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
sys.exit(-1)
# now that the job is finished, we need to handle the post-processing
if (args.forRE):
print " post-processing for RE ... "
if (args.useBC < 1.):
print " --> will filter on Bonferonni-corrected p-value with threshold of ", args.useBC
# first we run post_pwRK2.py which concatenates them all and writes
# out something that looks like the output from runPWPV
cmdString = "python %s/main/post_pwRK2.py %s %s -1 %g" % (
gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13, tsvFile, args.useBC)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " STATUS : ", status
print " OUTPUT : ", output
print " (d) TIME ", time.asctime(time.localtime(time.time()))
# and then we run the script that sorts and trims the output file
cmdString = "%s/shscript/proc_pwpv2.sh %s" % (gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " STATUS : ", status
print " OUTPUT : ", output
print " (e) TIME ", time.asctime(time.localtime(time.time()))
# and now we move the files that we want to keep ...
if (args.byType):
cmdString = "uniq %s/post_proc_all.short.sort.mapped.noPathway > %s.%s.%s.pwpv.forRE" % \
(tmpDir13, tsvFile[:-4], cleanString(args.type1),
cleanString(args.type2))
else:
cmdString = "mv %s/post_proc_all.short.sort.mapped.noPathway %s.pwpv.forRE" % (
tmpDir13, tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (f) TIME ", time.asctime(time.localtime(time.time()))
if (args.byType):
cmdString = "mv %s/post_proc_all.tsv %s.%s.%s.pwpv" % \
(tmpDir13, tsvFile[:-4], cleanString(args.type1),
cleanString(args.type2))
else:
cmdString = "mv %s/post_proc_all.tsv %s.pwpv" % (tmpDir13,
tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (g) TIME ", time.asctime(time.localtime(time.time()))
elif (args.forLisa):
print " post-processing for Lisa's pancan analysis ... "
print " (d) TIME ", time.asctime(time.localtime(time.time()))
if (args.useBC < 1.):
print " --> will filter on Bonferonni-corrected p-value with threshold of ", args.useBC
# first we run post_pwRK2.py which concatenates them all and writes
# out something that looks like the output from runPWPV
cmdString = "python %s/main/post_pwRK2.py %s %s -1 %g" % (
gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13, tsvFile, args.useBC)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (e) TIME ", time.asctime(time.localtime(time.time()))
# at this point we have post_proc_all.tsv
# and post_proc_all.NGEXP.NGEXP.tmp
cmdString = "%s/shscript/proc_pancan.sh %s" % (gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (f) TIME ", time.asctime(time.localtime(time.time()))
# and now we move the files that we want to keep ...
cmdString = "mv %s/post_proc_all.NGEXP.NGEXP.tmp.sort.top1M %s.pwpv.NGEXP.NGEXP.top1M" % (
tmpDir13, tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (g) TIME ", time.asctime(time.localtime(time.time()))
cmdString = "mv %s/post_proc_all.NGEXP.NGEXP.tmp.sort %s.pwpv.NGEXP.NGEXP.all" % (
tmpDir13, tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (h) TIME ", time.asctime(time.localtime(time.time()))
cmdString = "mv %s/post_proc_all.tsv %s.pwpv" % (tmpDir13, tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (i) TIME ", time.asctime(time.localtime(time.time()))
else:
print " ************************************************** "
print " *** NO POST-PROCESSING ??? OUTPUTS MAY BE LOST *** "
print " ************************************************** "
cmdString = "rm -fr %s" % tmpDir13
print " final command : <%s> " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print "\n\n DONE \n\n"
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
| 36.244828 | 139 | 0.534171 |
import argparse
import commands
import getpass
import os
import os.path
import sys
import time
from env import gidgetConfigVars
import miscIO
outName = tmpDir13 + "/" + str(numJobs) + ".pw"
## cmdString = "1 " + gidgetConfigVars['TCGAFMP_PAIRWISE_ROOT'] + "/pairwise-2.1.2"
cmdString = "1 " + gidgetConfigVars['TCGAFMP_PAIRWISE_ROOT'] + "/pairwise-3.0.1rc1-rel"
cmdString += " --pvalue %g --min-ct-cell %d --min-mx-cell %d --min-samples %d" \
% (args.pvalue, args.min_ct_cell, args.min_mx_cell, args.min_samples)
# here we need to adjust things so that we don't do the same
if (tuplesOverlap(iTuple, jTuple)):
jStart = jTuple[0] - iTuple[0] + 1
jStop = jTuple[1] - iTuple[0] + 1
if (jStart < 1):
print " ERROR ??? ", iTuple, jTuple
sys.exit(-1)
cmdString += " --outer %d:%d:1 --inner +%d:%d:1 %s %s " \
% (iTuple[0], iTuple[1] + 1, jStart, jStop, binFile, outName)
else:
cmdString += " --outer %d:%d:1 --inner %d:%d:1 %s %s " \
% (iTuple[0], iTuple[1] + 1, jTuple[0], jTuple[1] + 1, binFile, outName)
fh.write("%s\n" % cmdString)
numJobs += 1
else:
one_vs_all_flag = 1
print " --> handling the one vs all option ... ", index
outName = tmpDir13 + "/" + str(index) + ".pw"
-3.0.1rc1-rel"
cmdString += " --pvalue %g --min-ct-cell %d --min-mx-cell %d --min-samples %d" \
% (args.pvalue, args.min_ct_cell, args.min_mx_cell, args.min_samples)
cmdString += " --outer %d:%d:1 --inner 0::1 %s %s " \
% (index, index + 1, binFile, outName)
fh.write("%s\n" % cmdString)
numJobs = 1
fh.close()
if ( numJobs < 1 ):
print " "
print " Bailing out now because there is nothing to do ... "
print " "
sys.exit(-1)
print " "
print " ********************************************* "
print " Number of jobs about to be launched : ", numJobs
print " ********************************************* "
print " (b) TIME ", time.asctime(time.localtime(time.time()))
print " "
cmdString = "python %s/main/golem.py " % gidgetConfigVars['TCGAFMP_ROOT_DIR']
cmdString += "http://glados.systemsbiology.net:7083 -p " + golempwd + " "
cmdString += "-L pairwise-3.0.1rc1-rel -u "
cmdString += getpass.getuser() + " "
cmdString += "runlist " + runFile
print cmdString
(status, output) = commands.getstatusoutput(cmdString)
print status
print output
print " "
print " "
print " --------------- "
done = 0
while not done:
numOutFiles = 0
for aName in os.listdir(tmpDir13):
if (aName.endswith(".pw")):
numOutFiles += 1
print numOutFiles
if (numOutFiles == numJobs):
done = 1
else:
tSleep = max(10, int((numJobs - numOutFiles) / 20))
if (args.byType): tSleep = min(20,tSleep)
print " ( sleeping for %.0f seconds ) " % float(tSleep)
time.sleep(tSleep)
print " should be done !!! ", numOutFiles, numJobs
if ( 0 ):
tSleep = 120
time.sleep(tSleep)
else:
watchDir ( tmpDir13 )
print " (c) TIME ", time.asctime(time.localtime(time.time()))
localDir = getLocalScratchDir()
print " "
print " now we should move or copy stuff ... "
print " tmpDir13 : <%s> " % tmpDir13
print " localDir : <%s> " % localDir
tmpDir13 = copyScratchFiles ( tmpDir13, localDir )
print " "
print " handling a one-by-all run ... "
if (args.useBC < 1.):
print " --> will filter on Bonferonni-corrected p-value with threshold of ", args.useBC
# first we run post_pwRK2.py which writes
# out something that looks like the output from runPWPV
iOne = index
cmdString = "python %s/main/post_pwRK2.py %s %s %d %g" % (
gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13, tsvFile, iOne, args.useBC)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print status, output
cmdString = "sort -grk 5 --temporary-directory=%s %s/post_proc_all.tsv >& %s/%d.all.pwpv.sort" % \
(localDir, tmpDir13, tmpDir13, iOne)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print status, output
cmdString = "mv %s/%d.all.pwpv.sort %s.%d.all.pwpv.sort" % (tmpDir13,
iOne, tsvFile[:-4], iOne)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print status, output
print "\n\n DONE \n\n"
print " (d) TIME ", time.asctime(time.localtime(time.time()))
cmdString = "rm -fr %s" % tmpDir13
print " final command : <%s> " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
sys.exit(-1)
# now that the job is finished, we need to handle the post-processing
if (args.forRE):
print " post-processing for RE ... "
if (args.useBC < 1.):
print " --> will filter on Bonferonni-corrected p-value with threshold of ", args.useBC
# first we run post_pwRK2.py which concatenates them all and writes
# out something that looks like the output from runPWPV
cmdString = "python %s/main/post_pwRK2.py %s %s -1 %g" % (
gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13, tsvFile, args.useBC)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " STATUS : ", status
print " OUTPUT : ", output
print " (d) TIME ", time.asctime(time.localtime(time.time()))
# and then we run the script that sorts and trims the output file
cmdString = "%s/shscript/proc_pwpv2.sh %s" % (gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " STATUS : ", status
print " OUTPUT : ", output
print " (e) TIME ", time.asctime(time.localtime(time.time()))
# and now we move the files that we want to keep ...
if (args.byType):
cmdString = "uniq %s/post_proc_all.short.sort.mapped.noPathway > %s.%s.%s.pwpv.forRE" % \
(tmpDir13, tsvFile[:-4], cleanString(args.type1),
cleanString(args.type2))
else:
cmdString = "mv %s/post_proc_all.short.sort.mapped.noPathway %s.pwpv.forRE" % (
tmpDir13, tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (f) TIME ", time.asctime(time.localtime(time.time()))
if (args.byType):
cmdString = "mv %s/post_proc_all.tsv %s.%s.%s.pwpv" % \
(tmpDir13, tsvFile[:-4], cleanString(args.type1),
cleanString(args.type2))
else:
cmdString = "mv %s/post_proc_all.tsv %s.pwpv" % (tmpDir13,
tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (g) TIME ", time.asctime(time.localtime(time.time()))
elif (args.forLisa):
print " post-processing for Lisa's pancan analysis ... "
print " (d) TIME ", time.asctime(time.localtime(time.time()))
if (args.useBC < 1.):
print " --> will filter on Bonferonni-corrected p-value with threshold of ", args.useBC
cmdString = "python %s/main/post_pwRK2.py %s %s -1 %g" % (
gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13, tsvFile, args.useBC)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (e) TIME ", time.asctime(time.localtime(time.time()))
cmdString = "%s/shscript/proc_pancan.sh %s" % (gidgetConfigVars['TCGAFMP_ROOT_DIR'], tmpDir13)
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (f) TIME ", time.asctime(time.localtime(time.time()))
cmdString = "mv %s/post_proc_all.NGEXP.NGEXP.tmp.sort.top1M %s.pwpv.NGEXP.NGEXP.top1M" % (
tmpDir13, tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (g) TIME ", time.asctime(time.localtime(time.time()))
cmdString = "mv %s/post_proc_all.NGEXP.NGEXP.tmp.sort %s.pwpv.NGEXP.NGEXP.all" % (
tmpDir13, tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (h) TIME ", time.asctime(time.localtime(time.time()))
cmdString = "mv %s/post_proc_all.tsv %s.pwpv" % (tmpDir13, tsvFile[:-4])
print " < %s > " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print " (i) TIME ", time.asctime(time.localtime(time.time()))
else:
print " ************************************************** "
print " *** NO POST-PROCESSING ??? OUTPUTS MAY BE LOST *** "
print " ************************************************** "
cmdString = "rm -fr %s" % tmpDir13
print " final command : <%s> " % cmdString
(status, output) = commands.getstatusoutput(cmdString)
print "\n\n DONE \n\n"
| false | true |
f7f5b260d4ffc764a73dfd6af01d53b0896e49f8 | 1,506 | py | Python | sqlglot/optimizer/eliminate_subqueries.py | kelsin/sqlglot | e1cfecfa77c43d9fea070c888e4b266b32475c0f | [
"MIT"
] | null | null | null | sqlglot/optimizer/eliminate_subqueries.py | kelsin/sqlglot | e1cfecfa77c43d9fea070c888e4b266b32475c0f | [
"MIT"
] | null | null | null | sqlglot/optimizer/eliminate_subqueries.py | kelsin/sqlglot | e1cfecfa77c43d9fea070c888e4b266b32475c0f | [
"MIT"
] | null | null | null | import itertools
import sqlglot
import sqlglot.expressions as exp
from sqlglot.optimizer.simplify import simplify
from sqlglot.optimizer.scope import traverse_scope
def eliminate_subqueries(expression):
"""
Rewrite duplicate subqueries from sqlglot AST.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT 1 AS x, 2 AS y UNION ALL SELECT 1 AS x, 2 AS y")
>>> eliminate_subqueries(expression).sql()
'WITH _e_0 AS (SELECT 1 AS x, 2 AS y) SELECT * FROM _e_0 UNION ALL SELECT * FROM _e_0'
Args:
expression (sqlglot.Expression): expression to qualify
schema (dict|sqlglot.optimizer.Schema): Database schema
Returns:
sqlglot.Expression: qualified expression
"""
expression = simplify(expression)
queries = {}
for scope in traverse_scope(expression):
query = scope.expression
queries[query] = queries.get(query, []) + [query]
sequence = itertools.count()
for query, duplicates in queries.items():
if len(duplicates) == 1:
continue
alias = f"_e_{next(sequence)}"
expression.with_(alias, as_=query.copy(), copy=False)
for dup in duplicates:
parent = dup.parent
if isinstance(parent, exp.Subquery):
parent.replace(exp.alias_(alias, parent.alias_or_name))
elif isinstance(parent, exp.Union):
dup.replace(sqlglot.select("*").from_(alias))
return expression
| 31.375 | 99 | 0.648738 | import itertools
import sqlglot
import sqlglot.expressions as exp
from sqlglot.optimizer.simplify import simplify
from sqlglot.optimizer.scope import traverse_scope
def eliminate_subqueries(expression):
expression = simplify(expression)
queries = {}
for scope in traverse_scope(expression):
query = scope.expression
queries[query] = queries.get(query, []) + [query]
sequence = itertools.count()
for query, duplicates in queries.items():
if len(duplicates) == 1:
continue
alias = f"_e_{next(sequence)}"
expression.with_(alias, as_=query.copy(), copy=False)
for dup in duplicates:
parent = dup.parent
if isinstance(parent, exp.Subquery):
parent.replace(exp.alias_(alias, parent.alias_or_name))
elif isinstance(parent, exp.Union):
dup.replace(sqlglot.select("*").from_(alias))
return expression
| true | true |
f7f5b2f89b9ec9f4dc2fed70b8e8dd147565c709 | 4,283 | py | Python | tracks/Lars_Loop.py | dp770/aws_deepracer_worksheet | 162b8f1c643c5b8ece33b9f9a35b7b4a513f905f | [
"MIT"
] | 1 | 2022-02-28T23:13:02.000Z | 2022-02-28T23:13:02.000Z | tracks/Lars_Loop.py | dp770/aws_deepracer_worksheet | 162b8f1c643c5b8ece33b9f9a35b7b4a513f905f | [
"MIT"
] | 1 | 2022-01-10T17:22:33.000Z | 2022-03-14T23:44:03.000Z | tracks/Lars_Loop.py | dp770/aws_deepracer_worksheet | 162b8f1c643c5b8ece33b9f9a35b7b4a513f905f | [
"MIT"
] | 1 | 2022-01-16T10:45:30.000Z | 2022-01-16T10:45:30.000Z | track_width = 1.0668000500506656
track_original = [(1.5632755160331726, -1.4320791363716125), (2.0709489583969116, -1.4242927730083466),
(2.578621983528137, -1.4164928197860718), (3.086295962333679, -1.408689171075821),
(3.593969464302063, -1.4008894562721252), (4.097454071044922, -1.3479962646961212),
(4.563138484954834, -1.1505840718746185), (4.948174476623535, -0.8225848972797394),
(5.260157108306885, -0.42210645973682404), (5.547274827957153, -0.003481656312942505),
(5.792325496673584, 0.4407728388905525), (5.980076551437378, 0.9123452305793762),
(6.138317346572876, 1.3947489857673645), (6.2796430587768555, 1.8824004530906677),
(6.408977508544922, 2.3733750581741333), (6.528566360473633, 2.8668160438537598),
(6.596945524215698, 3.3684329986572266), (6.527557134628296, 3.8692755699157715),
(6.250279903411865, 4.285546541213989), (5.793073892593384, 4.501644611358643),
(5.296926021575928, 4.602712392807007), (4.798203945159912, 4.532587051391602),
(4.410511612892151, 4.2167640924453735), (4.21301007270813, 3.750952959060669),
(4.059242129325867, 3.2670775651931763), (3.9155349731445312, 2.7801125049591064),
(3.779893398284912, 2.2908374071121216), (3.5723224878311157, 1.8361100554466248),
(3.1921374797821045, 1.503767967224121), (2.7076685428619434, 1.3743309378623976),
(2.221081018447876, 1.5036619901657104), (1.8219599723815918, 1.8136704564094543),
(1.5148040056228638, 2.2138710618019104), (1.04414701461792, 2.3895634412765503),
(0.5572960525751114, 2.277476489543915), (0.16184110566974014, 1.9604640603065517),
(-0.29436104744672775, 1.7458440065383911), (-0.7919313609600067, 1.7488374710083008),
(-1.039209634065628, 1.311969518661499), (-1.348665475845337, 0.9352428764104843),
(-1.8498529791832004, 0.9114399254322063), (-2.3272935152053833, 1.0742363631725311),
(-2.728696107864382, 1.3836716413497951), (-3.0530240535736084, 1.7730545401573181),
(-3.308995485305785, 2.2108450531959516), (-3.7223395109176636, 2.4906630516052246),
(-4.225600481033325, 2.5149030089378357), (-4.6698198318481445, 2.2873809337615967),
(-5.048052072525024, 1.9490769505500793), (-5.381816387176514, 1.5671890377998352),
(-5.666332006454468, 1.1468691527843475), (-5.9167890548706055, 0.7053375393152237),
(-6.134303331375122, 0.24667788669466972), (-6.315459966659546, -0.22749889083206654),
(-6.45617938041687, -0.7151507139205933), (-6.545801639556885, -1.2146444916725159),
(-6.578178405761719, -1.7211750149726868), (-6.591742992401123, -2.228720545768738),
(-6.596933603286743, -2.736422538757324), (-6.59478235244751, -3.244147539138794),
(-6.585933446884155, -3.751800537109375), (-6.569818496704102, -4.259271860122681),
(-6.222153902053833, -4.578674554824829), (-5.716580390930176, -4.59562349319458),
(-5.2089478969573975, -4.589447021484375), (-4.701385021209717, -4.60211443901062),
(-4.701385021209717, -4.60211443901062), (-4.233348488807678, -4.43516993522644),
(-3.949254870414734, -4.020798563957214), (-3.7455984354019165, -3.555709481239319),
(-3.5504571199417114, -3.0869795083999634), (-3.3622305393218994, -2.6154290437698364),
(-3.1803709268569946, -2.141385495662689), (-2.967740058898926, -1.6886879801750183),
(-2.497014045715332, -1.5133024752140045), (-1.9904460310935974, -1.4861254394054413),
(-1.4827749729156494, -1.478088229894638), (-0.9750929772853851, -1.4709674715995789),
(-0.4674128443002701, -1.4637114703655243), (0.040260784327984744, -1.4559287428855896),
(0.5479313433170319, -1.4479000866413116), (1.055602490901947, -1.4399209320545197),
(1.5632755160331726, -1.4320791363716125)]
| 97.340909 | 106 | 0.651179 | track_width = 1.0668000500506656
track_original = [(1.5632755160331726, -1.4320791363716125), (2.0709489583969116, -1.4242927730083466),
(2.578621983528137, -1.4164928197860718), (3.086295962333679, -1.408689171075821),
(3.593969464302063, -1.4008894562721252), (4.097454071044922, -1.3479962646961212),
(4.563138484954834, -1.1505840718746185), (4.948174476623535, -0.8225848972797394),
(5.260157108306885, -0.42210645973682404), (5.547274827957153, -0.003481656312942505),
(5.792325496673584, 0.4407728388905525), (5.980076551437378, 0.9123452305793762),
(6.138317346572876, 1.3947489857673645), (6.2796430587768555, 1.8824004530906677),
(6.408977508544922, 2.3733750581741333), (6.528566360473633, 2.8668160438537598),
(6.596945524215698, 3.3684329986572266), (6.527557134628296, 3.8692755699157715),
(6.250279903411865, 4.285546541213989), (5.793073892593384, 4.501644611358643),
(5.296926021575928, 4.602712392807007), (4.798203945159912, 4.532587051391602),
(4.410511612892151, 4.2167640924453735), (4.21301007270813, 3.750952959060669),
(4.059242129325867, 3.2670775651931763), (3.9155349731445312, 2.7801125049591064),
(3.779893398284912, 2.2908374071121216), (3.5723224878311157, 1.8361100554466248),
(3.1921374797821045, 1.503767967224121), (2.7076685428619434, 1.3743309378623976),
(2.221081018447876, 1.5036619901657104), (1.8219599723815918, 1.8136704564094543),
(1.5148040056228638, 2.2138710618019104), (1.04414701461792, 2.3895634412765503),
(0.5572960525751114, 2.277476489543915), (0.16184110566974014, 1.9604640603065517),
(-0.29436104744672775, 1.7458440065383911), (-0.7919313609600067, 1.7488374710083008),
(-1.039209634065628, 1.311969518661499), (-1.348665475845337, 0.9352428764104843),
(-1.8498529791832004, 0.9114399254322063), (-2.3272935152053833, 1.0742363631725311),
(-2.728696107864382, 1.3836716413497951), (-3.0530240535736084, 1.7730545401573181),
(-3.308995485305785, 2.2108450531959516), (-3.7223395109176636, 2.4906630516052246),
(-4.225600481033325, 2.5149030089378357), (-4.6698198318481445, 2.2873809337615967),
(-5.048052072525024, 1.9490769505500793), (-5.381816387176514, 1.5671890377998352),
(-5.666332006454468, 1.1468691527843475), (-5.9167890548706055, 0.7053375393152237),
(-6.134303331375122, 0.24667788669466972), (-6.315459966659546, -0.22749889083206654),
(-6.45617938041687, -0.7151507139205933), (-6.545801639556885, -1.2146444916725159),
(-6.578178405761719, -1.7211750149726868), (-6.591742992401123, -2.228720545768738),
(-6.596933603286743, -2.736422538757324), (-6.59478235244751, -3.244147539138794),
(-6.585933446884155, -3.751800537109375), (-6.569818496704102, -4.259271860122681),
(-6.222153902053833, -4.578674554824829), (-5.716580390930176, -4.59562349319458),
(-5.2089478969573975, -4.589447021484375), (-4.701385021209717, -4.60211443901062),
(-4.701385021209717, -4.60211443901062), (-4.233348488807678, -4.43516993522644),
(-3.949254870414734, -4.020798563957214), (-3.7455984354019165, -3.555709481239319),
(-3.5504571199417114, -3.0869795083999634), (-3.3622305393218994, -2.6154290437698364),
(-3.1803709268569946, -2.141385495662689), (-2.967740058898926, -1.6886879801750183),
(-2.497014045715332, -1.5133024752140045), (-1.9904460310935974, -1.4861254394054413),
(-1.4827749729156494, -1.478088229894638), (-0.9750929772853851, -1.4709674715995789),
(-0.4674128443002701, -1.4637114703655243), (0.040260784327984744, -1.4559287428855896),
(0.5479313433170319, -1.4479000866413116), (1.055602490901947, -1.4399209320545197),
(1.5632755160331726, -1.4320791363716125)]
| true | true |
f7f5b3a4dfb8e6b7e2b8a25169ca1eb8e16fa85c | 1,696 | py | Python | MetaHeuristics/TSA.py | roycek7/operation_research | 37f01b7fcd93494a7de38459c324132516724b99 | [
"MIT"
] | 1 | 2021-04-17T17:33:30.000Z | 2021-04-17T17:33:30.000Z | MetaHeuristics/TSA.py | roycek7/operation_research | 37f01b7fcd93494a7de38459c324132516724b99 | [
"MIT"
] | null | null | null | MetaHeuristics/TSA.py | roycek7/operation_research | 37f01b7fcd93494a7de38459c324132516724b99 | [
"MIT"
] | null | null | null | import math
import random
import pylab
def Distance(p1, p2):
return math.hypot(p1[0] - p2[0], p1[1] - p2[1])
nLoc = 150
N = range(nLoc)
Square = 1000
random.seed(nLoc)
Pos = [(random.randint(0, Square), random.randint(0, Square)) for i in N]
D = [[Distance(Pos[i], Pos[j]) for j in N] for i in N]
def Cost(Path):
return sum(D[Path[i - 1]][Path[i]] for i in N)
Path = list(N)
random.shuffle(Path)
def ChooseNeigh(Path):
while True:
i = random.choice(N)
j = random.choice(N)
if j < i:
i, j = j, i
if i != j and j - i < nLoc - 1:
break
a1 = Path[i - 1]
a2 = Path[i]
b1 = Path[j]
b2 = Path[(j + 1) % nLoc]
return D[a1][b1] + D[a2][b2] - (D[a1][a2] + D[b1][b2]), (i, j)
def MoveToNeigh(Path, neigh):
i, j = neigh
for k in range(int((j - i + 1) / 2)):
Path[i + k], Path[j - k] = Path[j - k], Path[i + k]
def RunSA(Solution, Cost, ChooseNeigh, MoveToNeigh, N, T, alpha):
E = Cost(Solution)
Best = E
CostArr = [E]
BestArr = [Best]
for i in range(N):
delta, neighbour = ChooseNeigh(Solution)
if delta < 0 or random.random() < math.exp(-delta / T):
MoveToNeigh(Solution, neighbour)
E += delta
if E < Best:
Best = E
CostArr.append(E)
BestArr.append(Best)
T *= alpha
print(E)
pylab.plot(range(N + 1), CostArr)
pylab.plot(range(N + 1), BestArr)
pylab.show()
RunSA(Path, Cost, ChooseNeigh, MoveToNeigh, 1000000, Square, 0.999994)
print(Cost(Path))
pylab.plot([Pos[Path[i]][0] for i in range(-1, nLoc)], [Pos[Path[i]][1] for i in range(-1, nLoc)])
pylab.show()
| 22.918919 | 98 | 0.544811 | import math
import random
import pylab
def Distance(p1, p2):
return math.hypot(p1[0] - p2[0], p1[1] - p2[1])
nLoc = 150
N = range(nLoc)
Square = 1000
random.seed(nLoc)
Pos = [(random.randint(0, Square), random.randint(0, Square)) for i in N]
D = [[Distance(Pos[i], Pos[j]) for j in N] for i in N]
def Cost(Path):
return sum(D[Path[i - 1]][Path[i]] for i in N)
Path = list(N)
random.shuffle(Path)
def ChooseNeigh(Path):
while True:
i = random.choice(N)
j = random.choice(N)
if j < i:
i, j = j, i
if i != j and j - i < nLoc - 1:
break
a1 = Path[i - 1]
a2 = Path[i]
b1 = Path[j]
b2 = Path[(j + 1) % nLoc]
return D[a1][b1] + D[a2][b2] - (D[a1][a2] + D[b1][b2]), (i, j)
def MoveToNeigh(Path, neigh):
i, j = neigh
for k in range(int((j - i + 1) / 2)):
Path[i + k], Path[j - k] = Path[j - k], Path[i + k]
def RunSA(Solution, Cost, ChooseNeigh, MoveToNeigh, N, T, alpha):
E = Cost(Solution)
Best = E
CostArr = [E]
BestArr = [Best]
for i in range(N):
delta, neighbour = ChooseNeigh(Solution)
if delta < 0 or random.random() < math.exp(-delta / T):
MoveToNeigh(Solution, neighbour)
E += delta
if E < Best:
Best = E
CostArr.append(E)
BestArr.append(Best)
T *= alpha
print(E)
pylab.plot(range(N + 1), CostArr)
pylab.plot(range(N + 1), BestArr)
pylab.show()
RunSA(Path, Cost, ChooseNeigh, MoveToNeigh, 1000000, Square, 0.999994)
print(Cost(Path))
pylab.plot([Pos[Path[i]][0] for i in range(-1, nLoc)], [Pos[Path[i]][1] for i in range(-1, nLoc)])
pylab.show()
| true | true |
f7f5b3cabe73aac5f136dc82fb6bbd942125c592 | 9,414 | py | Python | libraries/botbuilder-dialogs/botbuilder/dialogs/choices/find.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 388 | 2019-05-07T15:53:21.000Z | 2022-03-28T20:29:46.000Z | libraries/botbuilder-dialogs/botbuilder/dialogs/choices/find.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 1,286 | 2019-05-07T23:38:19.000Z | 2022-03-31T10:44:16.000Z | libraries/botbuilder-dialogs/botbuilder/dialogs/choices/find.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 168 | 2019-05-14T20:23:25.000Z | 2022-03-16T06:49:14.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Callable, List, Union
from .choice import Choice
from .find_choices_options import FindChoicesOptions, FindValuesOptions
from .found_choice import FoundChoice
from .found_value import FoundValue
from .model_result import ModelResult
from .sorted_value import SortedValue
from .token import Token
from .tokenizer import Tokenizer
class Find:
""" Contains methods for matching user input against a list of choices """
@staticmethod
def find_choices(
utterance: str,
choices: [Union[str, Choice]],
options: FindChoicesOptions = None,
):
""" Matches user input against a list of choices """
if not choices:
raise TypeError(
"Find: choices cannot be None. Must be a [str] or [Choice]."
)
opt = options if options else FindChoicesOptions()
# Normalize list of choices
choices_list = [
Choice(value=choice) if isinstance(choice, str) else choice
for choice in choices
]
# Build up full list of synonyms to search over.
# - Each entry in the list contains the index of the choice it belongs to which will later be
# used to map the search results back to their choice.
synonyms: [SortedValue] = []
for index, choice in enumerate(choices_list):
if not opt.no_value:
synonyms.append(SortedValue(value=choice.value, index=index))
if (
getattr(choice, "action", False)
and getattr(choice.action, "title", False)
and not opt.no_value
):
synonyms.append(SortedValue(value=choice.action.title, index=index))
if choice.synonyms is not None:
for synonym in choice.synonyms:
synonyms.append(SortedValue(value=synonym, index=index))
def found_choice_constructor(value_model: ModelResult) -> ModelResult:
choice = choices_list[value_model.resolution.index]
return ModelResult(
start=value_model.start,
end=value_model.end,
type_name="choice",
text=value_model.text,
resolution=FoundChoice(
value=choice.value,
index=value_model.resolution.index,
score=value_model.resolution.score,
synonym=value_model.resolution.value,
),
)
# Find synonyms in utterance and map back to their choices_list
return list(
map(
found_choice_constructor, Find.find_values(utterance, synonyms, options)
)
)
@staticmethod
def find_values(
utterance: str, values: List[SortedValue], options: FindValuesOptions = None
) -> List[ModelResult]:
# Sort values in descending order by length, so that the longest value is searchd over first.
sorted_values = sorted(
values, key=lambda sorted_val: len(sorted_val.value), reverse=True
)
# Search for each value within the utterance.
matches: [ModelResult] = []
opt = options if options else FindValuesOptions()
tokenizer: Callable[
[str, str], List[Token]
] = opt.tokenizer if opt.tokenizer else Tokenizer.default_tokenizer
tokens = tokenizer(utterance, opt.locale)
max_distance = (
opt.max_token_distance if opt.max_token_distance is not None else 2
)
for entry in sorted_values:
# Find all matches for a value
# - To match "last one" in "the last time I chose the last one" we need
# to re-search the string starting from the end of the previous match.
# - The start & end position returned for the match are token positions.
start_pos = 0
searched_tokens = tokenizer(entry.value.strip(), opt.locale)
while start_pos < len(tokens):
match: Union[ModelResult, None] = Find._match_value(
tokens,
max_distance,
opt,
entry.index,
entry.value,
searched_tokens,
start_pos,
)
if match is not None:
start_pos = match.end + 1
matches.append(match)
else:
break
# Sort matches by score descending
sorted_matches = sorted(
matches,
key=lambda model_result: model_result.resolution.score,
reverse=True,
)
# Filter out duplicate matching indexes and overlapping characters
# - The start & end positions are token positions and need to be translated to
# character positions before returning. We also need to populate the "text"
# field as well.
results: List[ModelResult] = []
found_indexes = set()
used_tokens = set()
for match in sorted_matches:
# Apply filters.
add = match.resolution.index not in found_indexes
for i in range(match.start, match.end + 1):
if i in used_tokens:
add = False
break
# Add to results
if add:
# Update filter info
found_indexes.add(match.resolution.index)
for i in range(match.start, match.end + 1):
used_tokens.add(i)
# Translate start & end and populate text field
match.start = tokens[match.start].start
match.end = tokens[match.end].end
match.text = utterance[match.start : match.end + 1]
results.append(match)
# Return the results sorted by position in the utterance
return sorted(results, key=lambda model_result: model_result.start)
@staticmethod
def _match_value(
source_tokens: List[Token],
max_distance: int,
options: FindValuesOptions,
index: int,
value: str,
searched_tokens: List[Token],
start_pos: int,
) -> Union[ModelResult, None]:
# Match value to utterance and calculate total deviation.
# - The tokens are matched in order so "second last" will match in
# "the second from last one" but not in "the last from the second one".
# - The total deviation is a count of the number of tokens skipped in the
# match so for the example above the number of tokens matched would be
# 2 and the total deviation would be 1.
matched = 0
total_deviation = 0
start = -1
end = -1
for token in searched_tokens:
# Find the position of the token in the utterance.
pos = Find._index_of_token(source_tokens, token, start_pos)
if pos >= 0:
# Calculate the distance between the current token's position and the previous token's distance.
distance = pos - start_pos if matched > 0 else 0
if distance <= max_distance:
# Update count of tokens matched and move start pointer to search for next token
# after the current token
matched += 1
total_deviation += distance
start_pos = pos + 1
# Update start & end position that will track the span of the utterance that's matched.
if start < 0:
start = pos
end = pos
# Calculate score and format result
# - The start & end positions and the results text field will be corrected by the caller.
result: ModelResult = None
if matched > 0 and (
matched == len(searched_tokens) or options.allow_partial_matches
):
# Percentage of tokens matched. If matching "second last" in
# "the second form last one" the completeness would be 1.0 since
# all tokens were found.
completeness = matched / len(searched_tokens)
# Accuracy of the match. The accuracy is reduced by additional tokens
# occuring in the value that weren't in the utterance. So an utterance
# of "second last" matched against a value of "second from last" would
# result in an accuracy of 0.5.
accuracy = float(matched) / (matched + total_deviation)
# The final score is simply the compeleteness multiplied by the accuracy.
score = completeness * accuracy
# Format result
result = ModelResult(
text="",
start=start,
end=end,
type_name="value",
resolution=FoundValue(value=value, index=index, score=score),
)
return result
@staticmethod
def _index_of_token(tokens: List[Token], token: Token, start_pos: int) -> int:
for i in range(start_pos, len(tokens)):
if tokens[i].normalized == token.normalized:
return i
return -1
| 37.807229 | 112 | 0.578606 |
from typing import Callable, List, Union
from .choice import Choice
from .find_choices_options import FindChoicesOptions, FindValuesOptions
from .found_choice import FoundChoice
from .found_value import FoundValue
from .model_result import ModelResult
from .sorted_value import SortedValue
from .token import Token
from .tokenizer import Tokenizer
class Find:
@staticmethod
def find_choices(
utterance: str,
choices: [Union[str, Choice]],
options: FindChoicesOptions = None,
):
if not choices:
raise TypeError(
"Find: choices cannot be None. Must be a [str] or [Choice]."
)
opt = options if options else FindChoicesOptions()
choices_list = [
Choice(value=choice) if isinstance(choice, str) else choice
for choice in choices
]
synonyms: [SortedValue] = []
for index, choice in enumerate(choices_list):
if not opt.no_value:
synonyms.append(SortedValue(value=choice.value, index=index))
if (
getattr(choice, "action", False)
and getattr(choice.action, "title", False)
and not opt.no_value
):
synonyms.append(SortedValue(value=choice.action.title, index=index))
if choice.synonyms is not None:
for synonym in choice.synonyms:
synonyms.append(SortedValue(value=synonym, index=index))
def found_choice_constructor(value_model: ModelResult) -> ModelResult:
choice = choices_list[value_model.resolution.index]
return ModelResult(
start=value_model.start,
end=value_model.end,
type_name="choice",
text=value_model.text,
resolution=FoundChoice(
value=choice.value,
index=value_model.resolution.index,
score=value_model.resolution.score,
synonym=value_model.resolution.value,
),
)
return list(
map(
found_choice_constructor, Find.find_values(utterance, synonyms, options)
)
)
@staticmethod
def find_values(
utterance: str, values: List[SortedValue], options: FindValuesOptions = None
) -> List[ModelResult]:
sorted_values = sorted(
values, key=lambda sorted_val: len(sorted_val.value), reverse=True
)
matches: [ModelResult] = []
opt = options if options else FindValuesOptions()
tokenizer: Callable[
[str, str], List[Token]
] = opt.tokenizer if opt.tokenizer else Tokenizer.default_tokenizer
tokens = tokenizer(utterance, opt.locale)
max_distance = (
opt.max_token_distance if opt.max_token_distance is not None else 2
)
for entry in sorted_values:
start_pos = 0
searched_tokens = tokenizer(entry.value.strip(), opt.locale)
while start_pos < len(tokens):
match: Union[ModelResult, None] = Find._match_value(
tokens,
max_distance,
opt,
entry.index,
entry.value,
searched_tokens,
start_pos,
)
if match is not None:
start_pos = match.end + 1
matches.append(match)
else:
break
sorted_matches = sorted(
matches,
key=lambda model_result: model_result.resolution.score,
reverse=True,
)
results: List[ModelResult] = []
found_indexes = set()
used_tokens = set()
for match in sorted_matches:
add = match.resolution.index not in found_indexes
for i in range(match.start, match.end + 1):
if i in used_tokens:
add = False
break
if add:
found_indexes.add(match.resolution.index)
for i in range(match.start, match.end + 1):
used_tokens.add(i)
match.start = tokens[match.start].start
match.end = tokens[match.end].end
match.text = utterance[match.start : match.end + 1]
results.append(match)
return sorted(results, key=lambda model_result: model_result.start)
@staticmethod
def _match_value(
source_tokens: List[Token],
max_distance: int,
options: FindValuesOptions,
index: int,
value: str,
searched_tokens: List[Token],
start_pos: int,
) -> Union[ModelResult, None]:
matched = 0
total_deviation = 0
start = -1
end = -1
for token in searched_tokens:
pos = Find._index_of_token(source_tokens, token, start_pos)
if pos >= 0:
distance = pos - start_pos if matched > 0 else 0
if distance <= max_distance:
matched += 1
total_deviation += distance
start_pos = pos + 1
if start < 0:
start = pos
end = pos
# Calculate score and format result
# - The start & end positions and the results text field will be corrected by the caller.
result: ModelResult = None
if matched > 0 and (
matched == len(searched_tokens) or options.allow_partial_matches
):
# Percentage of tokens matched. If matching "second last" in
# "the second form last one" the completeness would be 1.0 since
# all tokens were found.
completeness = matched / len(searched_tokens)
# Accuracy of the match. The accuracy is reduced by additional tokens
# occuring in the value that weren't in the utterance. So an utterance
accuracy = float(matched) / (matched + total_deviation)
score = completeness * accuracy
result = ModelResult(
text="",
start=start,
end=end,
type_name="value",
resolution=FoundValue(value=value, index=index, score=score),
)
return result
@staticmethod
def _index_of_token(tokens: List[Token], token: Token, start_pos: int) -> int:
for i in range(start_pos, len(tokens)):
if tokens[i].normalized == token.normalized:
return i
return -1
| true | true |
f7f5b453307c130d4a81b92e6af87c95a74db22f | 2,176 | py | Python | plugins/tts/google.py | ollmer/jasper-client | 49b76c248c9550ff50e37f549915de2034d22050 | [
"MIT"
] | null | null | null | plugins/tts/google.py | ollmer/jasper-client | 49b76c248c9550ff50e37f549915de2034d22050 | [
"MIT"
] | null | null | null | plugins/tts/google.py | ollmer/jasper-client | 49b76c248c9550ff50e37f549915de2034d22050 | [
"MIT"
] | null | null | null | import os
import yaml
import tempfile
from abstract_tts import AbstractMp3TTSEngine
from src import diagnose
from src import paths
try:
import gtts
except ImportError:
pass
class GoogleTTS(AbstractMp3TTSEngine):
"""
Uses the Google TTS online translator
Requires pymad and gTTS to be available
"""
SLUG = "google-tts"
def __init__(self, language='en'):
super(self.__class__, self).__init__()
self.language = language
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
diagnose.check_python_import('gtts') and
diagnose.check_network_connection())
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = paths.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if ('google-tts' in profile and
'language' in profile['google-tts']):
config['language'] = profile['google-tts']['language']
return config
@property
def languages(self):
langs = ['af', 'sq', 'ar', 'hy', 'ca', 'zh-CN', 'zh-TW', 'hr', 'cs',
'da', 'nl', 'en', 'eo', 'fi', 'fr', 'de', 'el', 'ht', 'hi',
'hu', 'is', 'id', 'it', 'ja', 'ko', 'la', 'lv', 'mk', 'no',
'pl', 'pt', 'ro', 'ru', 'sr', 'sk', 'es', 'sw', 'sv', 'ta',
'th', 'tr', 'vi', 'cy']
return langs
def say(self, phrase):
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
if self.language not in self.languages:
raise ValueError("Language '%s' not supported by '%s'",
self.language, self.SLUG)
tts = gtts.gTTS(text=phrase, lang=self.language)
with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
tmpfile = f.name
tts.save(tmpfile)
self.play_mp3(tmpfile)
os.remove(tmpfile)
| 31.536232 | 76 | 0.551471 | import os
import yaml
import tempfile
from abstract_tts import AbstractMp3TTSEngine
from src import diagnose
from src import paths
try:
import gtts
except ImportError:
pass
class GoogleTTS(AbstractMp3TTSEngine):
SLUG = "google-tts"
def __init__(self, language='en'):
super(self.__class__, self).__init__()
self.language = language
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
diagnose.check_python_import('gtts') and
diagnose.check_network_connection())
@classmethod
def get_config(cls):
config = {}
profile_path = paths.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if ('google-tts' in profile and
'language' in profile['google-tts']):
config['language'] = profile['google-tts']['language']
return config
@property
def languages(self):
langs = ['af', 'sq', 'ar', 'hy', 'ca', 'zh-CN', 'zh-TW', 'hr', 'cs',
'da', 'nl', 'en', 'eo', 'fi', 'fr', 'de', 'el', 'ht', 'hi',
'hu', 'is', 'id', 'it', 'ja', 'ko', 'la', 'lv', 'mk', 'no',
'pl', 'pt', 'ro', 'ru', 'sr', 'sk', 'es', 'sw', 'sv', 'ta',
'th', 'tr', 'vi', 'cy']
return langs
def say(self, phrase):
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
if self.language not in self.languages:
raise ValueError("Language '%s' not supported by '%s'",
self.language, self.SLUG)
tts = gtts.gTTS(text=phrase, lang=self.language)
with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
tmpfile = f.name
tts.save(tmpfile)
self.play_mp3(tmpfile)
os.remove(tmpfile)
| true | true |
f7f5b5b75ec025fdb789f099439d5eb0efc3090c | 666 | py | Python | src/garage/tf/baselines/__init__.py | researchai/unsupervised_meta_rl | 9ca4b41438277ef6cfea047482b98de9da07815a | [
"MIT"
] | null | null | null | src/garage/tf/baselines/__init__.py | researchai/unsupervised_meta_rl | 9ca4b41438277ef6cfea047482b98de9da07815a | [
"MIT"
] | null | null | null | src/garage/tf/baselines/__init__.py | researchai/unsupervised_meta_rl | 9ca4b41438277ef6cfea047482b98de9da07815a | [
"MIT"
] | null | null | null | """Baseline estimators for TensorFlow-based algorithms."""
from garage.tf.baselines.continuous_mlp_baseline import ContinuousMLPBaseline
from garage.tf.baselines.continuous_mlp_baseline_with_model import (
ContinuousMLPBaselineWithModel)
from garage.tf.baselines.gaussian_cnn_baseline_with_model import (
GaussianCNNBaselineWithModel)
from garage.tf.baselines.gaussian_conv_baseline import GaussianConvBaseline
from garage.tf.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
__all__ = [
'ContinuousMLPBaseline',
'ContinuousMLPBaselineWithModel',
'GaussianConvBaseline',
'GaussianCNNBaselineWithModel',
'GaussianMLPBaseline',
]
| 39.176471 | 77 | 0.834835 | from garage.tf.baselines.continuous_mlp_baseline import ContinuousMLPBaseline
from garage.tf.baselines.continuous_mlp_baseline_with_model import (
ContinuousMLPBaselineWithModel)
from garage.tf.baselines.gaussian_cnn_baseline_with_model import (
GaussianCNNBaselineWithModel)
from garage.tf.baselines.gaussian_conv_baseline import GaussianConvBaseline
from garage.tf.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
__all__ = [
'ContinuousMLPBaseline',
'ContinuousMLPBaselineWithModel',
'GaussianConvBaseline',
'GaussianCNNBaselineWithModel',
'GaussianMLPBaseline',
]
| true | true |
f7f5b5d1a9627c743637d92860a3322a2c8de443 | 460 | py | Python | src/config.py | chander/lambda-to-slack | 973060bccc6c32036ea2178845c566974111d178 | [
"MIT"
] | null | null | null | src/config.py | chander/lambda-to-slack | 973060bccc6c32036ea2178845c566974111d178 | [
"MIT"
] | null | null | null | src/config.py | chander/lambda-to-slack | 973060bccc6c32036ea2178845c566974111d178 | [
"MIT"
] | null | null | null | """Environment configuration values used by lambda functions."""
import os
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO')
SLACK_URL = os.getenv('SLACK_URL')
MESSAGE_CONFIGS = []
for I in range(1, 21):
slack_url = os.getenv(f'SLACK_URL{str(I).zfill(2)}', None)
contains_string = os.getenv(f'CONTAINS_STRING{str(I).zfill(2)}', None)
if slack_url and contains_string:
MESSAGE_CONFIGS.append((slack_url, contains_string))
else:
break
| 30.666667 | 74 | 0.697826 |
import os
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO')
SLACK_URL = os.getenv('SLACK_URL')
MESSAGE_CONFIGS = []
for I in range(1, 21):
slack_url = os.getenv(f'SLACK_URL{str(I).zfill(2)}', None)
contains_string = os.getenv(f'CONTAINS_STRING{str(I).zfill(2)}', None)
if slack_url and contains_string:
MESSAGE_CONFIGS.append((slack_url, contains_string))
else:
break
| true | true |
f7f5b5fca5ebb7c8b6b4c3166040bf2f78f518e2 | 6,692 | py | Python | django/links/views.py | kehanlu/shorten-url | 57b97a3dc1ac973273bda7109199ad3d80c2b061 | [
"MIT"
] | 1 | 2018-07-10T02:31:10.000Z | 2018-07-10T02:31:10.000Z | django/links/views.py | kehanlu/shorten_url | 57b97a3dc1ac973273bda7109199ad3d80c2b061 | [
"MIT"
] | 2 | 2018-07-13T02:06:00.000Z | 2018-07-13T15:40:08.000Z | django/links/views.py | kehanlu/shorten_url | 57b97a3dc1ac973273bda7109199ad3d80c2b061 | [
"MIT"
] | null | null | null | import base64
import datetime
import hashlib
import json
from django.conf import settings
from django.http import JsonResponse, HttpResponsePermanentRedirect
from django.shortcuts import render, redirect
from django.utils import timezone
from .models import ShortURL, Viewer
#
BASE_URL = 'http://localhost:8000/links/'
BASE_SHORT_URL = 'http://localhost:8000/u/'
# hash salt when generate permanent url
HASH_SALT = 'MEOW'
# give default value when you create_302
DEFAULT_NAME = ''
DEFAULT_TITLE = ''
DEFAULT_DESCRIPTION = ''
def index(request):
if request.method == 'GET':
context = {
'BASE_URL': BASE_URL,
'BASE_SHORT_URL': BASE_SHORT_URL,
'status': 'normal',
'recent': ShortURL.objects.all().order_by('-id'),
}
return render(request, 'links/links.html', context)
def create_301(request):
if request.method == "POST":
target = request.POST.get('target')
# error message
if not target.startswith('http://') and not target.startswith('https://'):
context = {
'status': 400, 'msg': 'Target url must to starts with "http://" or "https://"'}
return JsonResponse(context)
# ==== Create objects ====
shortcut = ShortURL.objects.create(
mode=301,
target=target
)
# generate permanent url
# object id -> hash() -> [:6]
# BE CAREFUL! it is possible to have collision
permanent_url = base64.b64encode(
hashlib.md5((str(shortcut.id) + HASH_SALT).encode('utf-8')).digest(), altchars=b"-_")[:6].decode("utf-8")
shortcut.permanent_url = permanent_url
shortcut.save()
context = {'status': 200, 'permanent_url': permanent_url}
return JsonResponse(context)
def create_302(request):
if request.method == "POST":
# ajax form input
name = request.POST.get('name') if request.POST.get(
'name') else DEFAULT_NAME
target = request.POST.get('target')
title = request.POST.get('title') if request.POST.get(
'title') else DEFAULT_TITLE
description = request.POST.get('description') if request.POST.get(
'description') else DEFAULT_DESCRIPTION
# error message
if not target.startswith('http://') and not target.startswith('https://'):
context = {
'status': 400, 'msg': 'Target url must to starts with "http://" or "https://"'}
return JsonResponse(context)
# ==== Create object ====
shortcut = ShortURL.objects.create(
mode=302,
name=name,
target=target,
title=title,
description=description,
)
# generate permanent url
# object id -> hash() -> [:6]
# BE CAREFUL! it is possible to have collision
permanent_url = base64.b64encode(
hashlib.md5((str(shortcut.id) + HASH_SALT).encode('utf-8')).digest(), altchars=b"-_")[:6].decode("utf-8")
shortcut.permanent_url = permanent_url
shortcut.save()
# save thumbnail image
if request.FILES:
shortcut.thumbnail.save(
str(request.FILES['file']), request.FILES['file'], save=True)
context = {
'status': 200,
'permanent_url': permanent_url,
'name': name,
'target': target,
'title': title,
'description': description,
'thumbnail': (settings.MEDIA_URL + str(shortcut.thumbnail) if shortcut.thumbnail else (settings.STATIC_URL + '/images/default_thumbnail.png')),
}
return JsonResponse(context)
def custom_url(request, custom_url):
if ShortURL.objects.filter(name=custom_url).exists():
shortcut = ShortURL.objects.filter(name=custom_url).order_by('-id')[0]
elif ShortURL.objects.filter(permanent_url=custom_url).exists():
shortcut = ShortURL.objects.get(permanent_url=custom_url)
else:
return redirect('/')
# create viewer log
Viewer.objects.create(
short_url=shortcut,
ip=request.META.get('REMOTE_ADDR')
)
if shortcut.mode == 301:
return redirect(shortcut.target)
else:
image_url = 'https://' + request.get_host()
image_url += str(shortcut.thumbnail.url) if shortcut.thumbnail else (
settings.STATIC_URL + '/images/default_thumbnail.png')
context = {
'url': shortcut.target,
'title': shortcut.title,
'description': shortcut.description,
'image': image_url
}
return render(request, 'links/redirect.html', context)
def perm_url(request, permanent_url):
if not ShortURL.objects.filter(permanent_url=permanent_url).exists():
return redirect('/')
shortcut = ShortURL.objects.get(permanent_url=permanent_url)
# create viewer log
Viewer.objects.create(
short_url=shortcut,
ip=request.META.get('REMOTE_ADDR')
)
if shortcut.mode == 301:
return redirect(shortcut.target)
else:
image_url = 'https://' + request.get_host()
image_url += str(shortcut.thumbnail.url) if shortcut.thumbnail else (
settings.STATIC_URL + '/images/default_thumbnail.png')
context = {
'url': shortcut.target,
'title': shortcut.title,
'description': shortcut.description,
'image': image_url
}
return render(request, 'links/redirect.html', context)
def chart(request, permanent_url):
shortcut = ShortURL.objects.get(permanent_url=permanent_url)
last_view = shortcut.viewers.all().order_by('-id')[0].timestamp
time_pointer = datetime.date(
year=last_view.year, month=last_view.month, day=last_view.day) - datetime.timedelta(days=10, hours=16)
labels = list()
data = list()
for day in range(10):
time_pointer += datetime.timedelta(days=1)
labels.append(time_pointer.strftime("%b %d"))
data.append(shortcut.viewers.filter(
timestamp__year=time_pointer.year,
timestamp__month=time_pointer.month,
timestamp__day=time_pointer.day,
).count())
context = {
'BASE_URL': BASE_URL,
'BASE_SHORT_URL': BASE_SHORT_URL,
'data': json.dumps(data),
'labels': json.dumps(labels),
'permanent_url': shortcut.permanent_url,
'total_views': shortcut.viewers.all().count(),
'last_view': shortcut.viewers.all().order_by(
'-id')[0].timestamp
}
return render(request, 'links/chart.html', context)
| 33.128713 | 155 | 0.610281 | import base64
import datetime
import hashlib
import json
from django.conf import settings
from django.http import JsonResponse, HttpResponsePermanentRedirect
from django.shortcuts import render, redirect
from django.utils import timezone
from .models import ShortURL, Viewer
BASE_URL = 'http://localhost:8000/links/'
BASE_SHORT_URL = 'http://localhost:8000/u/'
HASH_SALT = 'MEOW'
DEFAULT_NAME = ''
DEFAULT_TITLE = ''
DEFAULT_DESCRIPTION = ''
def index(request):
if request.method == 'GET':
context = {
'BASE_URL': BASE_URL,
'BASE_SHORT_URL': BASE_SHORT_URL,
'status': 'normal',
'recent': ShortURL.objects.all().order_by('-id'),
}
return render(request, 'links/links.html', context)
def create_301(request):
if request.method == "POST":
target = request.POST.get('target')
if not target.startswith('http://') and not target.startswith('https://'):
context = {
'status': 400, 'msg': 'Target url must to starts with "http://" or "https://"'}
return JsonResponse(context)
shortcut = ShortURL.objects.create(
mode=301,
target=target
)
permanent_url = base64.b64encode(
hashlib.md5((str(shortcut.id) + HASH_SALT).encode('utf-8')).digest(), altchars=b"-_")[:6].decode("utf-8")
shortcut.permanent_url = permanent_url
shortcut.save()
context = {'status': 200, 'permanent_url': permanent_url}
return JsonResponse(context)
def create_302(request):
if request.method == "POST":
name = request.POST.get('name') if request.POST.get(
'name') else DEFAULT_NAME
target = request.POST.get('target')
title = request.POST.get('title') if request.POST.get(
'title') else DEFAULT_TITLE
description = request.POST.get('description') if request.POST.get(
'description') else DEFAULT_DESCRIPTION
if not target.startswith('http://') and not target.startswith('https://'):
context = {
'status': 400, 'msg': 'Target url must to starts with "http://" or "https://"'}
return JsonResponse(context)
shortcut = ShortURL.objects.create(
mode=302,
name=name,
target=target,
title=title,
description=description,
)
permanent_url = base64.b64encode(
hashlib.md5((str(shortcut.id) + HASH_SALT).encode('utf-8')).digest(), altchars=b"-_")[:6].decode("utf-8")
shortcut.permanent_url = permanent_url
shortcut.save()
if request.FILES:
shortcut.thumbnail.save(
str(request.FILES['file']), request.FILES['file'], save=True)
context = {
'status': 200,
'permanent_url': permanent_url,
'name': name,
'target': target,
'title': title,
'description': description,
'thumbnail': (settings.MEDIA_URL + str(shortcut.thumbnail) if shortcut.thumbnail else (settings.STATIC_URL + '/images/default_thumbnail.png')),
}
return JsonResponse(context)
def custom_url(request, custom_url):
if ShortURL.objects.filter(name=custom_url).exists():
shortcut = ShortURL.objects.filter(name=custom_url).order_by('-id')[0]
elif ShortURL.objects.filter(permanent_url=custom_url).exists():
shortcut = ShortURL.objects.get(permanent_url=custom_url)
else:
return redirect('/')
Viewer.objects.create(
short_url=shortcut,
ip=request.META.get('REMOTE_ADDR')
)
if shortcut.mode == 301:
return redirect(shortcut.target)
else:
image_url = 'https://' + request.get_host()
image_url += str(shortcut.thumbnail.url) if shortcut.thumbnail else (
settings.STATIC_URL + '/images/default_thumbnail.png')
context = {
'url': shortcut.target,
'title': shortcut.title,
'description': shortcut.description,
'image': image_url
}
return render(request, 'links/redirect.html', context)
def perm_url(request, permanent_url):
if not ShortURL.objects.filter(permanent_url=permanent_url).exists():
return redirect('/')
shortcut = ShortURL.objects.get(permanent_url=permanent_url)
Viewer.objects.create(
short_url=shortcut,
ip=request.META.get('REMOTE_ADDR')
)
if shortcut.mode == 301:
return redirect(shortcut.target)
else:
image_url = 'https://' + request.get_host()
image_url += str(shortcut.thumbnail.url) if shortcut.thumbnail else (
settings.STATIC_URL + '/images/default_thumbnail.png')
context = {
'url': shortcut.target,
'title': shortcut.title,
'description': shortcut.description,
'image': image_url
}
return render(request, 'links/redirect.html', context)
def chart(request, permanent_url):
shortcut = ShortURL.objects.get(permanent_url=permanent_url)
last_view = shortcut.viewers.all().order_by('-id')[0].timestamp
time_pointer = datetime.date(
year=last_view.year, month=last_view.month, day=last_view.day) - datetime.timedelta(days=10, hours=16)
labels = list()
data = list()
for day in range(10):
time_pointer += datetime.timedelta(days=1)
labels.append(time_pointer.strftime("%b %d"))
data.append(shortcut.viewers.filter(
timestamp__year=time_pointer.year,
timestamp__month=time_pointer.month,
timestamp__day=time_pointer.day,
).count())
context = {
'BASE_URL': BASE_URL,
'BASE_SHORT_URL': BASE_SHORT_URL,
'data': json.dumps(data),
'labels': json.dumps(labels),
'permanent_url': shortcut.permanent_url,
'total_views': shortcut.viewers.all().count(),
'last_view': shortcut.viewers.all().order_by(
'-id')[0].timestamp
}
return render(request, 'links/chart.html', context)
| true | true |
f7f5b6a9f5578c5540db5539d1d7268292c197fd | 9,721 | py | Python | tests/text_extraction_tests.py | campagnucci/querido-diario-toolbox | 3ec99564ae92f1b5456f351f34e7745b4385c79e | [
"MIT"
] | 20 | 2020-10-30T19:52:12.000Z | 2021-11-12T12:51:58.000Z | tests/text_extraction_tests.py | campagnucci/querido-diario-toolbox | 3ec99564ae92f1b5456f351f34e7745b4385c79e | [
"MIT"
] | 26 | 2020-10-30T19:58:44.000Z | 2022-03-31T01:41:55.000Z | tests/text_extraction_tests.py | campagnucci/querido-diario-toolbox | 3ec99564ae92f1b5456f351f34e7745b4385c79e | [
"MIT"
] | 9 | 2020-10-30T20:15:33.000Z | 2022-02-18T16:44:15.000Z | import os
from unittest import TestCase
from querido_diario_toolbox import Gazette, Page
class TextExtractionTests(TestCase):
def setUp(self):
ROOT = "tests/bin"
self.TIKA_PATH = ROOT + "/tika-app-1.24.1.jar"
self.TABULA_PATH = ROOT + "/tabula-1.0.4-jar-with-dependencies.jar"
def tearDown(self):
self.clean_txt_file_generated_during_tests()
# definition of helper functions
def clean_txt_file_generated_during_tests(self):
for root, dirs, files in os.walk("tests/data/"):
for generated_file in self.get_files_generated_during_tests(root, files):
os.remove(generated_file)
def get_files_generated_during_tests(self, root, files):
for f in files:
if f in [
"fake_gazette.txt",
"fake_cpf_cnpj.txt",
"multiple_columns.txt",
"multiple_columns.json",
]:
yield f"{root}{f}"
def process_gazette_text(self, filepath):
gazette = Gazette(filepath=filepath, apache_tika_jar=self.TIKA_PATH)
gazette.extract_content()
gazette.load_content()
text = gazette.process_text()
return text
def validate_basic_extract_content(self, gazette, metadata=False):
if metadata:
target = "tests/data/fake_gazette.json"
else:
target = "tests/data/fake_gazette.txt"
gazette.extract_content(metadata=metadata)
self.assertEqual(gazette.filepath, target)
gazette.load_content()
self.assertNotEqual(0, len(gazette.content))
if metadata:
self.assertIsInstance(gazette.content, dict)
self.assertNotEqual(gazette.content.items(), None)
else:
self.assertIn("Querido", gazette.content, "Extraction Failed")
# filetype tests
def test_extract_text_from_invalid_file(self):
with self.assertRaisesRegex(Exception, "No such file"):
gazette = Gazette("file/does/not/exist", self.TIKA_PATH)
gazette.extract_content()
def test_extract_metadata_from_invalid_file(self):
with self.assertRaisesRegex(Exception, "No such file"):
gazette = Gazette("file/does/not/exist", self.TIKA_PATH)
gazette.extract_content(metadata=True)
def test_extract_text_using_invalid_apache_tika_jar_path(self):
with self.assertRaisesRegex(Exception, "File does not exist"):
gazette = Gazette("tests/data/fake_gazette.pdf", "/tika/path")
gazette.extract_content()
def test_extract_metadata_using_invalid_apache_tika_jar_path(self):
with self.assertRaisesRegex(Exception, "File does not exist"):
gazette = Gazette("tests/data/fake_gazette.pdf", "/tika/path")
gazette.extract_content(metadata=True)
def test_extract_text_using_invalid_file_type_apache_tika(self):
with self.assertRaisesRegex(Exception, "Expected Apache Tika jar"):
gazette = Gazette(
"tests/data/fake_gazette.pdf", "tests/data/fake_gazette.pdf"
)
gazette.extract_content(metadata=True)
def test_extract_metadata_using_invalid_file_type_apache_tika(self):
with self.assertRaisesRegex(Exception, "Expected Apache Tika jar"):
gazette = Gazette(
"tests/data/fake_gazette.pdf",
"tests/data/fake_gazette.pdf",
)
gazette.extract_content(metadata=True)
def test_extract_text_from_invalid_file_type_should_fail(self):
with self.assertRaisesRegex(Exception, "Unsupported file type"):
gazette = Gazette("tests/data/fake_gazette.m4a", self.TIKA_PATH)
gazette.extract_content()
def test_extract_metadata_from_invalid_file_type_should_fail(self):
with self.assertRaisesRegex(Exception, "Unsupported file type"):
gazette = Gazette("tests/data/fake_gazette.m4a", self.TIKA_PATH)
gazette.extract_content(metadata=True)
# class instantiation tests
def test_empty_class_instantiation_should_fail(self):
with self.assertRaises(Exception):
Gazette()
def test_class_instantiation_with_tika_path_but_no_filepath(self):
with self.assertRaises(Exception):
Gazette(apache_tika_jar=self.TIKA_PATH)
def test_class_instantiation_with_content(self):
gazette = Gazette(content="tests/data/fake_content.txt")
self.assertNotEqual(gazette.content, None)
def test_class_instantiation_with_no_content(self):
gazette = Gazette(
filepath="tests/data/fake_gazette.pdf",
apache_tika_jar=self.TIKA_PATH,
)
self.assertNotEqual(gazette.filepath, None)
self.assertNotEqual(gazette.tika_jar, None)
self.assertEqual(gazette.content, None)
def test_class_instantiation_with_no_filepath(self):
gazette = Gazette(
apache_tika_jar=self.TIKA_PATH,
content="tests/data/fake_content.txt",
)
self.assertEqual(gazette.filepath, None)
self.assertNotEqual(gazette.tika_jar, None)
self.assertNotEqual(gazette.content, None)
def test_class_instantiation_with_all_arguments(self):
gazette = Gazette(
filepath="tests/data/fake_gazette.pdf",
apache_tika_jar=self.TIKA_PATH,
content="tests/data/fake_content.txt",
)
self.assertNotEqual(gazette.filepath, None)
self.assertNotEqual(gazette.tika_jar, None)
self.assertNotEqual(gazette.content, None)
# content extraction tests
def test_extract_text_from_doc_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.doc", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_docx_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.docx", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_odt_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.odt", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_html_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.html", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_pdf_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.pdf", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_jpeg_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.jpeg", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_png_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.png", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_tiff_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.tiff", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
# metadata extraction tests
def test_extract_metadata_from_doc_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.doc", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_docx_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.docx", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_odt_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.odt", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_html_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.html", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_pdf_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.pdf", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_jpeg_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.jpeg", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_png_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.png", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_tiff_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.tiff", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
# text linearization tests
def test_gazette_text_is_linearized(self):
gazette = Gazette("tests/data/multiple_columns.pdf", self.TIKA_PATH)
gazette.extract_content()
gazette.load_content()
text = gazette.process_text()
self.assertNotIn("-\n", text, "Text Processing Failed")
def test_page_table_has_been_extracted(self):
page = Page(
filepath="tests/data/fake_table.pdf",
apache_tika_jar=self.TIKA_PATH,
tabula_jar=self.TABULA_PATH,
)
content = page.extract_table()
table = content.split("\r\n")
table = filter(None, table)
matrix = [row.split(",") for row in table]
matrix_size = [len(element) for element in matrix]
self.assertEqual(matrix_size, [2, 2])
| 41.900862 | 85 | 0.701265 | import os
from unittest import TestCase
from querido_diario_toolbox import Gazette, Page
class TextExtractionTests(TestCase):
def setUp(self):
ROOT = "tests/bin"
self.TIKA_PATH = ROOT + "/tika-app-1.24.1.jar"
self.TABULA_PATH = ROOT + "/tabula-1.0.4-jar-with-dependencies.jar"
def tearDown(self):
self.clean_txt_file_generated_during_tests()
def clean_txt_file_generated_during_tests(self):
for root, dirs, files in os.walk("tests/data/"):
for generated_file in self.get_files_generated_during_tests(root, files):
os.remove(generated_file)
def get_files_generated_during_tests(self, root, files):
for f in files:
if f in [
"fake_gazette.txt",
"fake_cpf_cnpj.txt",
"multiple_columns.txt",
"multiple_columns.json",
]:
yield f"{root}{f}"
def process_gazette_text(self, filepath):
gazette = Gazette(filepath=filepath, apache_tika_jar=self.TIKA_PATH)
gazette.extract_content()
gazette.load_content()
text = gazette.process_text()
return text
def validate_basic_extract_content(self, gazette, metadata=False):
if metadata:
target = "tests/data/fake_gazette.json"
else:
target = "tests/data/fake_gazette.txt"
gazette.extract_content(metadata=metadata)
self.assertEqual(gazette.filepath, target)
gazette.load_content()
self.assertNotEqual(0, len(gazette.content))
if metadata:
self.assertIsInstance(gazette.content, dict)
self.assertNotEqual(gazette.content.items(), None)
else:
self.assertIn("Querido", gazette.content, "Extraction Failed")
def test_extract_text_from_invalid_file(self):
with self.assertRaisesRegex(Exception, "No such file"):
gazette = Gazette("file/does/not/exist", self.TIKA_PATH)
gazette.extract_content()
def test_extract_metadata_from_invalid_file(self):
with self.assertRaisesRegex(Exception, "No such file"):
gazette = Gazette("file/does/not/exist", self.TIKA_PATH)
gazette.extract_content(metadata=True)
def test_extract_text_using_invalid_apache_tika_jar_path(self):
with self.assertRaisesRegex(Exception, "File does not exist"):
gazette = Gazette("tests/data/fake_gazette.pdf", "/tika/path")
gazette.extract_content()
def test_extract_metadata_using_invalid_apache_tika_jar_path(self):
with self.assertRaisesRegex(Exception, "File does not exist"):
gazette = Gazette("tests/data/fake_gazette.pdf", "/tika/path")
gazette.extract_content(metadata=True)
def test_extract_text_using_invalid_file_type_apache_tika(self):
with self.assertRaisesRegex(Exception, "Expected Apache Tika jar"):
gazette = Gazette(
"tests/data/fake_gazette.pdf", "tests/data/fake_gazette.pdf"
)
gazette.extract_content(metadata=True)
def test_extract_metadata_using_invalid_file_type_apache_tika(self):
with self.assertRaisesRegex(Exception, "Expected Apache Tika jar"):
gazette = Gazette(
"tests/data/fake_gazette.pdf",
"tests/data/fake_gazette.pdf",
)
gazette.extract_content(metadata=True)
def test_extract_text_from_invalid_file_type_should_fail(self):
with self.assertRaisesRegex(Exception, "Unsupported file type"):
gazette = Gazette("tests/data/fake_gazette.m4a", self.TIKA_PATH)
gazette.extract_content()
def test_extract_metadata_from_invalid_file_type_should_fail(self):
with self.assertRaisesRegex(Exception, "Unsupported file type"):
gazette = Gazette("tests/data/fake_gazette.m4a", self.TIKA_PATH)
gazette.extract_content(metadata=True)
def test_empty_class_instantiation_should_fail(self):
with self.assertRaises(Exception):
Gazette()
def test_class_instantiation_with_tika_path_but_no_filepath(self):
with self.assertRaises(Exception):
Gazette(apache_tika_jar=self.TIKA_PATH)
def test_class_instantiation_with_content(self):
gazette = Gazette(content="tests/data/fake_content.txt")
self.assertNotEqual(gazette.content, None)
def test_class_instantiation_with_no_content(self):
gazette = Gazette(
filepath="tests/data/fake_gazette.pdf",
apache_tika_jar=self.TIKA_PATH,
)
self.assertNotEqual(gazette.filepath, None)
self.assertNotEqual(gazette.tika_jar, None)
self.assertEqual(gazette.content, None)
def test_class_instantiation_with_no_filepath(self):
gazette = Gazette(
apache_tika_jar=self.TIKA_PATH,
content="tests/data/fake_content.txt",
)
self.assertEqual(gazette.filepath, None)
self.assertNotEqual(gazette.tika_jar, None)
self.assertNotEqual(gazette.content, None)
def test_class_instantiation_with_all_arguments(self):
gazette = Gazette(
filepath="tests/data/fake_gazette.pdf",
apache_tika_jar=self.TIKA_PATH,
content="tests/data/fake_content.txt",
)
self.assertNotEqual(gazette.filepath, None)
self.assertNotEqual(gazette.tika_jar, None)
self.assertNotEqual(gazette.content, None)
def test_extract_text_from_doc_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.doc", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_docx_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.docx", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_odt_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.odt", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_html_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.html", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_pdf_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.pdf", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_jpeg_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.jpeg", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_png_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.png", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_text_from_tiff_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.tiff", self.TIKA_PATH)
self.validate_basic_extract_content(gazette)
def test_extract_metadata_from_doc_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.doc", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_docx_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.docx", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_odt_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.odt", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_html_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.html", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_pdf_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.pdf", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_jpeg_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.jpeg", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_png_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.png", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_extract_metadata_from_tiff_should_return_content(self):
gazette = Gazette("tests/data/fake_gazette.tiff", self.TIKA_PATH)
self.validate_basic_extract_content(gazette, metadata=True)
def test_gazette_text_is_linearized(self):
gazette = Gazette("tests/data/multiple_columns.pdf", self.TIKA_PATH)
gazette.extract_content()
gazette.load_content()
text = gazette.process_text()
self.assertNotIn("-\n", text, "Text Processing Failed")
def test_page_table_has_been_extracted(self):
page = Page(
filepath="tests/data/fake_table.pdf",
apache_tika_jar=self.TIKA_PATH,
tabula_jar=self.TABULA_PATH,
)
content = page.extract_table()
table = content.split("\r\n")
table = filter(None, table)
matrix = [row.split(",") for row in table]
matrix_size = [len(element) for element in matrix]
self.assertEqual(matrix_size, [2, 2])
| true | true |
f7f5b6f2fa8645346df530bb577d2011be042e99 | 20,563 | py | Python | enodeb_status.py | aweimeow/enodebd | e1cd20693153e6b85e5d1bf9d21af2501c358601 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | enodeb_status.py | aweimeow/enodebd | e1cd20693153e6b85e5d1bf9d21af2501c358601 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | enodeb_status.py | aweimeow/enodebd | e1cd20693153e6b85e5d1bf9d21af2501c358601 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # SPDX-FileCopyrightText: 2020 The Magma Authors.
# SPDX-FileCopyrightText: 2022 Open Networking Foundation <support@opennetworking.org>
#
# SPDX-License-Identifier: BSD-3-Clause
import json
import os
from collections import namedtuple
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
from lte.protos.enodebd_pb2 import SingleEnodebStatus
from lte.protos.mconfig import mconfigs_pb2
from common import serialization_utils
import metrics
from data_models.data_model_parameters import ParameterName
from device_config.configuration_util import (
find_enb_by_cell_id,
get_enb_rf_tx_desired,
)
from exceptions import ConfigurationError
from logger import EnodebdLogger as logger
from s1ap_client import get_all_enb_state
from state_machines.enb_acs import EnodebAcsStateMachine
from state_machines.enb_acs_manager import StateMachineManager
from orc8r.protos.service303_pb2 import State
# There are 2 levels of caching for GPS coordinates from the enodeB: module
# variables (in-memory) and on disk. In the event the enodeB stops reporting
# GPS, we will continue to report the cached coordinates from the in-memory
# cached coordinates. If enodebd is restarted, this in-memory cache will be
# populated by the file
CACHED_GPS_COORD_FILE_PATH = os.path.join(
'/var/opt/magma/enodebd',
'gps_coords.txt',
)
# Cache GPS coordinates in memory so we don't write to the file cache if the
# coordinates have not changed. We can read directly from here instead of the
# file cache when the enodeB goes down unless these are unintialized.
_gps_lat_cached = None
_gps_lon_cached = None
EnodebStatus = NamedTuple(
'EnodebStatus',
[
('enodeb_configured', bool),
('gps_latitude', str),
('gps_longitude', str),
('enodeb_connected', bool),
('opstate_enabled', bool),
('rf_tx_on', bool),
('rf_tx_desired', bool),
('gps_connected', bool),
('ptp_connected', bool),
('mme_connected', bool),
('fsm_state', str),
('cell_id', int),
],
)
# TODO: Remove after checkins support multiple eNB status
MagmaOldEnodebdStatus = namedtuple(
'MagmaOldEnodebdStatus',
[
'enodeb_serial',
'enodeb_configured',
'gps_latitude',
'gps_longitude',
'enodeb_connected',
'opstate_enabled',
'rf_tx_on',
'rf_tx_desired',
'gps_connected',
'ptp_connected',
'mme_connected',
'enodeb_state',
],
)
MagmaEnodebdStatus = NamedTuple(
'MagmaEnodebdStatus',
[
('n_enodeb_connected', str),
('all_enodeb_configured', str),
('all_enodeb_opstate_enabled', str),
('all_enodeb_rf_tx_configured', str),
('any_enodeb_gps_connected', str),
('all_enodeb_ptp_connected', str),
('all_enodeb_mme_connected', str),
('gateway_gps_longitude', str),
('gateway_gps_latitude', str),
],
)
def update_status_metrics(status: EnodebStatus) -> None:
""" Update metrics for eNodeB status """
# Call every second
metrics_by_stat_key = {
'enodeb_connected': metrics.STAT_ENODEB_CONNECTED,
'enodeb_configured': metrics.STAT_ENODEB_CONFIGURED,
'opstate_enabled': metrics.STAT_OPSTATE_ENABLED,
'rf_tx_on': metrics.STAT_RF_TX_ENABLED,
'rf_tx_desired': metrics.STAT_RF_TX_DESIRED,
'gps_connected': metrics.STAT_GPS_CONNECTED,
'ptp_connected': metrics.STAT_PTP_CONNECTED,
'mme_connected': metrics.STAT_MME_CONNECTED,
}
def get_metric_value(enodeb_status: Dict[str, str], key: str):
# Metrics are "sticky" when synced to the cloud - if we don't
# receive a status update from enodeb, set the metric to 0
# to explicitly indicate that it was not received, otherwise the
# metrics collector will continue to report the last value
val = enodeb_status.get(key, None)
if val is None:
return 0
if type(val) is not bool:
logger.error('Could not cast metric value %s to int', val)
return 0
return int(val) # val should be either True or False
for stat_key, metric in metrics_by_stat_key.items():
metric.set(get_metric_value(status._asdict(), stat_key))
# TODO: Remove after checkins support multiple eNB status
def get_service_status_old(
enb_acs_manager: StateMachineManager,
) -> Dict[str, Any]:
""" Get service status compatible with older controller """
enb_status_by_serial = get_all_enb_status(enb_acs_manager)
# Since we only expect users to plug in a single eNB, generate service
# status with the first one we find that is connected
for enb_serial, enb_status in enb_status_by_serial.items():
if enb_status.enodeb_connected:
return MagmaOldEnodebdStatus(
enodeb_serial=enb_serial,
enodeb_configured=_bool_to_str(enb_status.enodeb_configured),
gps_latitude=enb_status.gps_latitude,
gps_longitude=enb_status.gps_longitude,
enodeb_connected=_bool_to_str(enb_status.enodeb_connected),
opstate_enabled=_bool_to_str(enb_status.opstate_enabled),
rf_tx_on=_bool_to_str(enb_status.rf_tx_on),
rf_tx_desired=_bool_to_str(enb_status.rf_tx_desired),
gps_connected=_bool_to_str(enb_status.gps_connected),
ptp_connected=_bool_to_str(enb_status.ptp_connected),
mme_connected=_bool_to_str(enb_status.mme_connected),
enodeb_state=enb_status.fsm_state,
)._asdict()
return MagmaOldEnodebdStatus(
enodeb_serial='N/A',
enodeb_configured='0',
gps_latitude='0.0',
gps_longitude='0.0',
enodeb_connected='0',
opstate_enabled='0',
rf_tx_on='0',
rf_tx_desired='N/A',
gps_connected='0',
ptp_connected='0',
mme_connected='0',
enodeb_state='N/A',
)._asdict()
def get_service_status(enb_acs_manager: StateMachineManager) -> Dict[str, Any]:
enodebd_status = _get_enodebd_status(enb_acs_manager)
return enodebd_status._asdict()
def _get_enodebd_status(
enb_acs_manager: StateMachineManager,
) -> MagmaEnodebdStatus:
enb_status_by_serial = get_all_enb_status(enb_acs_manager)
# Start from default values for enodebd status
n_enodeb_connected = 0
all_enodeb_configured = False
all_enodeb_opstate_enabled = False
all_enodeb_rf_tx_configured = False
any_enodeb_gps_connected = False
all_enodeb_ptp_connected = False
all_enodeb_mme_connected = False
gateway_gps_longitude = '0.0'
gateway_gps_latitude = '0.0'
def _is_rf_tx_configured(enb_status: EnodebStatus) -> bool:
return enb_status.rf_tx_on == enb_status.rf_tx_desired
if enb_status_by_serial:
enb_status_list = list(enb_status_by_serial.values())
# Aggregate all eNB status for enodebd status, repetitive but
# clearer for output purposes.
n_enodeb_connected = sum(
enb_status.enodeb_connected for enb_status in enb_status_list
)
all_enodeb_configured = all(
enb_status.enodeb_configured for enb_status in enb_status_list
)
all_enodeb_mme_connected = all(
enb_status.mme_connected for enb_status in enb_status_list
)
all_enodeb_opstate_enabled = all(
enb_status.opstate_enabled for enb_status in enb_status_list
)
all_enodeb_ptp_connected = all(
enb_status.ptp_connected for enb_status in enb_status_list
)
any_enodeb_gps_connected = any(
enb_status.gps_connected for enb_status in enb_status_list
)
all_enodeb_rf_tx_configured = all(
_is_rf_tx_configured(enb_status) for enb_status in enb_status_list
)
if n_enodeb_connected:
gateway_gps_longitude = enb_status_list[0].gps_longitude
gateway_gps_latitude = enb_status_list[0].gps_latitude
return MagmaEnodebdStatus(
n_enodeb_connected=str(n_enodeb_connected),
all_enodeb_configured=str(all_enodeb_configured),
all_enodeb_opstate_enabled=str(all_enodeb_opstate_enabled),
all_enodeb_rf_tx_configured=str(all_enodeb_rf_tx_configured),
any_enodeb_gps_connected=str(any_enodeb_gps_connected),
all_enodeb_ptp_connected=str(all_enodeb_ptp_connected),
all_enodeb_mme_connected=str(all_enodeb_mme_connected),
gateway_gps_longitude=str(gateway_gps_longitude),
gateway_gps_latitude=str(gateway_gps_latitude),
)
def get_all_enb_status(
enb_acs_manager: StateMachineManager,
) -> Dict[str, EnodebStatus]:
enb_status_by_serial = {}
serial_list = enb_acs_manager.get_connected_serial_id_list()
for enb_serial in serial_list:
handler = enb_acs_manager.get_handler_by_serial(enb_serial)
status = get_enb_status(handler)
enb_status_by_serial[enb_serial] = status
return enb_status_by_serial
def get_enb_status(enodeb: EnodebAcsStateMachine) -> EnodebStatus:
"""
Returns a dict representing the status of an enodeb
The returned dictionary will be a subset of the following keys:
- enodeb_connected
- enodeb_configured
- opstate_enabled
- rf_tx_on
- rf_tx_desired
- gps_connected
- ptp_connected
- mme_connected
- gps_latitude
- gps_longitude
- ip_address
- cell_id
The set of keys returned will depend on the connection status of the
enodeb. A missing key indicates that the value is unknown.
Returns:
Status dictionary for the enodeb state
"""
enodeb_configured = enodeb.is_enodeb_configured()
# We cache GPS coordinates so try to read them before the early return
# if the enB is not connected
gps_lat, gps_lon = _get_and_cache_gps_coords(enodeb)
enodeb_connected = enodeb.is_enodeb_connected()
opstate_enabled = _parse_param_as_bool(enodeb, ParameterName.OP_STATE)
rf_tx_on = _parse_param_as_bool(enodeb, ParameterName.RF_TX_STATUS)
rf_tx_on = rf_tx_on and enodeb_connected
try:
enb_serial = \
enodeb.device_cfg.get_parameter(ParameterName.SERIAL_NUMBER)
enb_cell_id = int(
enodeb.device_cfg.get_parameter(ParameterName.CELL_ID),
)
rf_tx_desired = get_enb_rf_tx_desired(enodeb.mconfig, enb_serial)
except (KeyError, ConfigurationError):
rf_tx_desired = False
enb_cell_id = 0
mme_connected = _parse_param_as_bool(enodeb, ParameterName.MME_STATUS)
gps_connected = _get_gps_status_as_bool(enodeb)
try:
ptp_connected = _parse_param_as_bool(enodeb, ParameterName.PTP_STATUS)
except ConfigurationError:
ptp_connected = False
return EnodebStatus(
enodeb_configured=enodeb_configured,
gps_latitude=gps_lat,
gps_longitude=gps_lon,
enodeb_connected=enodeb_connected,
opstate_enabled=opstate_enabled,
rf_tx_on=rf_tx_on,
rf_tx_desired=rf_tx_desired,
gps_connected=gps_connected,
ptp_connected=ptp_connected,
mme_connected=mme_connected,
fsm_state=enodeb.get_state(),
cell_id=enb_cell_id,
)
def get_single_enb_status(
device_serial: str,
state_machine_manager: StateMachineManager,
) -> SingleEnodebStatus:
try:
handler = state_machine_manager.get_handler_by_serial(device_serial)
except KeyError:
return _empty_enb_status()
# This namedtuple is missing IP and serial info
status = get_enb_status(handler)
# Get IP info
ip = state_machine_manager.get_ip_of_serial(device_serial)
def get_status_property(status: bool) -> SingleEnodebStatus.StatusProperty:
if status:
return SingleEnodebStatus.StatusProperty.Value('ON')
return SingleEnodebStatus.StatusProperty.Value('OFF')
# Build the message to return through gRPC
enb_status = SingleEnodebStatus()
enb_status.device_serial = device_serial
enb_status.ip_address = ip
enb_status.connected = get_status_property(status.enodeb_connected)
enb_status.configured = get_status_property(status.enodeb_configured)
enb_status.opstate_enabled = get_status_property(status.opstate_enabled)
enb_status.rf_tx_on = get_status_property(status.rf_tx_on)
enb_status.rf_tx_desired = get_status_property(status.rf_tx_desired)
enb_status.gps_connected = get_status_property(status.gps_connected)
enb_status.ptp_connected = get_status_property(status.ptp_connected)
enb_status.mme_connected = get_status_property(status.mme_connected)
enb_status.gps_longitude = status.gps_longitude
enb_status.gps_latitude = status.gps_latitude
enb_status.fsm_state = status.fsm_state
return enb_status
def get_operational_states(
enb_acs_manager: StateMachineManager,
mconfig: mconfigs_pb2.EnodebD,
) -> List[State]:
"""
Returns: A list of State with EnodebStatus encoded as JSON
"""
states = []
configured_serial_ids = []
enb_status_by_serial = get_all_enb_status(enb_acs_manager)
# Get S1 connected eNBs
enb_statuses = get_all_enb_state()
for serial_id in enb_status_by_serial:
enb_status_dict = enb_status_by_serial[serial_id]._asdict()
# Add IP address to state
enb_status_dict['ip_address'] = enb_acs_manager.get_ip_of_serial(
serial_id,
)
# Add num of UEs connected
num_ue_connected = enb_statuses.get(enb_status_dict['cell_id'], 0)
enb_status_dict['ues_connected'] = num_ue_connected
serialized = json.dumps(enb_status_dict)
state = State(
type="single_enodeb",
deviceID=serial_id,
value=serialized.encode('utf-8'),
)
configured_serial_ids.append(serial_id)
states.append(state)
# Get state for externally configured enodebs
s1_states = get_enb_s1_connected_states(
enb_statuses,
configured_serial_ids,
mconfig,
)
states.extend(s1_states)
return states
def get_enb_s1_connected_states(
enb_s1_state_map, configured_serial_ids,
mconfig,
) -> List[State]:
states = []
for enb_id in enb_s1_state_map:
enb = find_enb_by_cell_id(mconfig, enb_id)
if enb and enb.serial_num not in configured_serial_ids:
status = EnodebStatus(
enodeb_configured=False,
gps_latitude='N/A',
gps_longitude='N/A',
enodeb_connected=True,
opstate_enabled=False,
rf_tx_on=False,
rf_tx_desired=False,
gps_connected=False,
ptp_connected=False,
mme_connected=True,
fsm_state='N/A',
cell_id=enb_id,
)
status_dict = status._asdict()
# Add IP address to state
status_dict['ip_address'] = enb.config.ip_address
# Add num of UEs connected to state, use cellID from mconfig
status_dict['ues_connected'] = enb_s1_state_map.get(enb_id, 0)
serialized = json.dumps(status_dict)
state = State(
type="single_enodeb",
deviceID=enb.serial_num,
value=serialized.encode('utf-8'),
)
states.append(state)
return states
def _empty_enb_status() -> SingleEnodebStatus:
enb_status = SingleEnodebStatus()
enb_status.device_serial = 'N/A'
enb_status.ip_address = 'N/A'
enb_status.connected = '0'
enb_status.configured = '0'
enb_status.opstate_enabled = '0'
enb_status.rf_tx_on = '0'
enb_status.rf_tx_desired = 'N/A'
enb_status.gps_connected = '0'
enb_status.ptp_connected = '0'
enb_status.mme_connected = '0'
enb_status.gps_longitude = '0.0'
enb_status.gps_latitude = '0.0'
enb_status.fsm_state = 'N/A'
return enb_status
def _parse_param_as_bool(
enodeb: EnodebAcsStateMachine,
param_name: ParameterName,
) -> bool:
try:
return _format_as_bool(enodeb.get_parameter(param_name), param_name)
except (KeyError, ConfigurationError):
return False
def _format_as_bool(
param_value: Union[bool, str, int],
param_name: Optional[Union[ParameterName, str]] = None,
) -> bool:
""" Returns '1' for true, and '0' for false """
stripped_value = str(param_value).lower().strip()
if stripped_value in {'true', '1', 'enabled'}:
return True
elif stripped_value in {'false', '0', 'disabled', 'InProgress'}:
return False
else:
logger.warning(
'%s parameter not understood (%s)', param_name, param_value,
)
return False
def _get_gps_status_as_bool(enodeb: EnodebAcsStateMachine) -> bool:
try:
if not enodeb.has_parameter(ParameterName.GPS_STATUS):
return False
else:
param = enodeb.get_parameter(ParameterName.GPS_STATUS)
if isinstance(param, bool):
# No translation to do.
return param
stripped_value = param.lower().strip()
if stripped_value in ['0', '2', 'inprogress']:
# 2 = GPS locking
return False
elif stripped_value == '1':
return True
else:
logger.warning(
'GPS status parameter not understood (%s)', param,
)
return False
except (KeyError, ConfigurationError, AttributeError):
return False
def _get_and_cache_gps_coords(enodeb: EnodebAcsStateMachine) -> Tuple[
str, str,
]:
"""
Read the GPS coordinates of the enB from its configuration or the
cached coordinate file if the preceding read fails. If reading from
enB configuration succeeds, this method will cache the new coordinates.
Returns:
(str, str): GPS latitude, GPS longitude
"""
lat, lon = '', ''
try:
lat = enodeb.get_parameter(ParameterName.GPS_LAT)
lon = enodeb.get_parameter(ParameterName.GPS_LONG)
if lat != _gps_lat_cached or lon != _gps_lon_cached:
_cache_new_gps_coords(lat, lon)
return lat, lon
except (KeyError, ConfigurationError):
return _get_cached_gps_coords()
except ValueError:
logger.warning('GPS lat/long not understood (%s/%s)', lat, lon)
return '0', '0'
def _get_cached_gps_coords() -> Tuple[str, str]:
"""
Returns cached GPS coordinates if enB is disconnected or otherwise not
reporting coordinates.
Returns:
(str, str): (GPS lat, GPS lon)
"""
# pylint: disable=global-statement
global _gps_lat_cached, _gps_lon_cached
if _gps_lat_cached is None or _gps_lon_cached is None:
_gps_lat_cached, _gps_lon_cached = _read_gps_coords_from_file()
return _gps_lat_cached, _gps_lon_cached
def _read_gps_coords_from_file():
try:
with open(CACHED_GPS_COORD_FILE_PATH, encoding="utf-8") as f:
lines = f.readlines()
if len(lines) != 2:
logger.warning(
'Expected to find 2 lines in GPS '
'coordinate file but only found %d',
len(lines),
)
return '0', '0'
return tuple(map(lambda l: l.strip(), lines))
except OSError:
logger.warning('Could not open cached GPS coordinate file')
return '0', '0'
def _cache_new_gps_coords(gps_lat, gps_lon):
"""
Cache GPS coordinates in the module-level variables here and write them
to a managed file on disk.
Args:
gps_lat (str): latitude as a string
gps_lon (str): longitude as a string
"""
# pylint: disable=global-statement
global _gps_lat_cached, _gps_lon_cached
_gps_lat_cached, _gps_lon_cached = gps_lat, gps_lon
_write_gps_coords_to_file(gps_lat, gps_lon)
def _write_gps_coords_to_file(gps_lat, gps_lon):
lines = '{lat}\n{lon}'.format(lat=gps_lat, lon=gps_lon)
try:
serialization_utils.write_to_file_atomically(
CACHED_GPS_COORD_FILE_PATH,
lines,
)
except OSError:
pass
def _bool_to_str(b: bool) -> str:
if b is True:
return "1"
return "0"
| 34.559664 | 86 | 0.674123 |
import json
import os
from collections import namedtuple
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
from lte.protos.enodebd_pb2 import SingleEnodebStatus
from lte.protos.mconfig import mconfigs_pb2
from common import serialization_utils
import metrics
from data_models.data_model_parameters import ParameterName
from device_config.configuration_util import (
find_enb_by_cell_id,
get_enb_rf_tx_desired,
)
from exceptions import ConfigurationError
from logger import EnodebdLogger as logger
from s1ap_client import get_all_enb_state
from state_machines.enb_acs import EnodebAcsStateMachine
from state_machines.enb_acs_manager import StateMachineManager
from orc8r.protos.service303_pb2 import State
CACHED_GPS_COORD_FILE_PATH = os.path.join(
'/var/opt/magma/enodebd',
'gps_coords.txt',
)
# coordinates have not changed. We can read directly from here instead of the
# file cache when the enodeB goes down unless these are unintialized.
_gps_lat_cached = None
_gps_lon_cached = None
EnodebStatus = NamedTuple(
'EnodebStatus',
[
('enodeb_configured', bool),
('gps_latitude', str),
('gps_longitude', str),
('enodeb_connected', bool),
('opstate_enabled', bool),
('rf_tx_on', bool),
('rf_tx_desired', bool),
('gps_connected', bool),
('ptp_connected', bool),
('mme_connected', bool),
('fsm_state', str),
('cell_id', int),
],
)
# TODO: Remove after checkins support multiple eNB status
MagmaOldEnodebdStatus = namedtuple(
'MagmaOldEnodebdStatus',
[
'enodeb_serial',
'enodeb_configured',
'gps_latitude',
'gps_longitude',
'enodeb_connected',
'opstate_enabled',
'rf_tx_on',
'rf_tx_desired',
'gps_connected',
'ptp_connected',
'mme_connected',
'enodeb_state',
],
)
MagmaEnodebdStatus = NamedTuple(
'MagmaEnodebdStatus',
[
('n_enodeb_connected', str),
('all_enodeb_configured', str),
('all_enodeb_opstate_enabled', str),
('all_enodeb_rf_tx_configured', str),
('any_enodeb_gps_connected', str),
('all_enodeb_ptp_connected', str),
('all_enodeb_mme_connected', str),
('gateway_gps_longitude', str),
('gateway_gps_latitude', str),
],
)
def update_status_metrics(status: EnodebStatus) -> None:
# Call every second
metrics_by_stat_key = {
'enodeb_connected': metrics.STAT_ENODEB_CONNECTED,
'enodeb_configured': metrics.STAT_ENODEB_CONFIGURED,
'opstate_enabled': metrics.STAT_OPSTATE_ENABLED,
'rf_tx_on': metrics.STAT_RF_TX_ENABLED,
'rf_tx_desired': metrics.STAT_RF_TX_DESIRED,
'gps_connected': metrics.STAT_GPS_CONNECTED,
'ptp_connected': metrics.STAT_PTP_CONNECTED,
'mme_connected': metrics.STAT_MME_CONNECTED,
}
def get_metric_value(enodeb_status: Dict[str, str], key: str):
# Metrics are "sticky" when synced to the cloud - if we don't
val = enodeb_status.get(key, None)
if val is None:
return 0
if type(val) is not bool:
logger.error('Could not cast metric value %s to int', val)
return 0
return int(val)
for stat_key, metric in metrics_by_stat_key.items():
metric.set(get_metric_value(status._asdict(), stat_key))
def get_service_status_old(
enb_acs_manager: StateMachineManager,
) -> Dict[str, Any]:
enb_status_by_serial = get_all_enb_status(enb_acs_manager)
for enb_serial, enb_status in enb_status_by_serial.items():
if enb_status.enodeb_connected:
return MagmaOldEnodebdStatus(
enodeb_serial=enb_serial,
enodeb_configured=_bool_to_str(enb_status.enodeb_configured),
gps_latitude=enb_status.gps_latitude,
gps_longitude=enb_status.gps_longitude,
enodeb_connected=_bool_to_str(enb_status.enodeb_connected),
opstate_enabled=_bool_to_str(enb_status.opstate_enabled),
rf_tx_on=_bool_to_str(enb_status.rf_tx_on),
rf_tx_desired=_bool_to_str(enb_status.rf_tx_desired),
gps_connected=_bool_to_str(enb_status.gps_connected),
ptp_connected=_bool_to_str(enb_status.ptp_connected),
mme_connected=_bool_to_str(enb_status.mme_connected),
enodeb_state=enb_status.fsm_state,
)._asdict()
return MagmaOldEnodebdStatus(
enodeb_serial='N/A',
enodeb_configured='0',
gps_latitude='0.0',
gps_longitude='0.0',
enodeb_connected='0',
opstate_enabled='0',
rf_tx_on='0',
rf_tx_desired='N/A',
gps_connected='0',
ptp_connected='0',
mme_connected='0',
enodeb_state='N/A',
)._asdict()
def get_service_status(enb_acs_manager: StateMachineManager) -> Dict[str, Any]:
enodebd_status = _get_enodebd_status(enb_acs_manager)
return enodebd_status._asdict()
def _get_enodebd_status(
enb_acs_manager: StateMachineManager,
) -> MagmaEnodebdStatus:
enb_status_by_serial = get_all_enb_status(enb_acs_manager)
n_enodeb_connected = 0
all_enodeb_configured = False
all_enodeb_opstate_enabled = False
all_enodeb_rf_tx_configured = False
any_enodeb_gps_connected = False
all_enodeb_ptp_connected = False
all_enodeb_mme_connected = False
gateway_gps_longitude = '0.0'
gateway_gps_latitude = '0.0'
def _is_rf_tx_configured(enb_status: EnodebStatus) -> bool:
return enb_status.rf_tx_on == enb_status.rf_tx_desired
if enb_status_by_serial:
enb_status_list = list(enb_status_by_serial.values())
n_enodeb_connected = sum(
enb_status.enodeb_connected for enb_status in enb_status_list
)
all_enodeb_configured = all(
enb_status.enodeb_configured for enb_status in enb_status_list
)
all_enodeb_mme_connected = all(
enb_status.mme_connected for enb_status in enb_status_list
)
all_enodeb_opstate_enabled = all(
enb_status.opstate_enabled for enb_status in enb_status_list
)
all_enodeb_ptp_connected = all(
enb_status.ptp_connected for enb_status in enb_status_list
)
any_enodeb_gps_connected = any(
enb_status.gps_connected for enb_status in enb_status_list
)
all_enodeb_rf_tx_configured = all(
_is_rf_tx_configured(enb_status) for enb_status in enb_status_list
)
if n_enodeb_connected:
gateway_gps_longitude = enb_status_list[0].gps_longitude
gateway_gps_latitude = enb_status_list[0].gps_latitude
return MagmaEnodebdStatus(
n_enodeb_connected=str(n_enodeb_connected),
all_enodeb_configured=str(all_enodeb_configured),
all_enodeb_opstate_enabled=str(all_enodeb_opstate_enabled),
all_enodeb_rf_tx_configured=str(all_enodeb_rf_tx_configured),
any_enodeb_gps_connected=str(any_enodeb_gps_connected),
all_enodeb_ptp_connected=str(all_enodeb_ptp_connected),
all_enodeb_mme_connected=str(all_enodeb_mme_connected),
gateway_gps_longitude=str(gateway_gps_longitude),
gateway_gps_latitude=str(gateway_gps_latitude),
)
def get_all_enb_status(
enb_acs_manager: StateMachineManager,
) -> Dict[str, EnodebStatus]:
enb_status_by_serial = {}
serial_list = enb_acs_manager.get_connected_serial_id_list()
for enb_serial in serial_list:
handler = enb_acs_manager.get_handler_by_serial(enb_serial)
status = get_enb_status(handler)
enb_status_by_serial[enb_serial] = status
return enb_status_by_serial
def get_enb_status(enodeb: EnodebAcsStateMachine) -> EnodebStatus:
enodeb_configured = enodeb.is_enodeb_configured()
gps_lat, gps_lon = _get_and_cache_gps_coords(enodeb)
enodeb_connected = enodeb.is_enodeb_connected()
opstate_enabled = _parse_param_as_bool(enodeb, ParameterName.OP_STATE)
rf_tx_on = _parse_param_as_bool(enodeb, ParameterName.RF_TX_STATUS)
rf_tx_on = rf_tx_on and enodeb_connected
try:
enb_serial = \
enodeb.device_cfg.get_parameter(ParameterName.SERIAL_NUMBER)
enb_cell_id = int(
enodeb.device_cfg.get_parameter(ParameterName.CELL_ID),
)
rf_tx_desired = get_enb_rf_tx_desired(enodeb.mconfig, enb_serial)
except (KeyError, ConfigurationError):
rf_tx_desired = False
enb_cell_id = 0
mme_connected = _parse_param_as_bool(enodeb, ParameterName.MME_STATUS)
gps_connected = _get_gps_status_as_bool(enodeb)
try:
ptp_connected = _parse_param_as_bool(enodeb, ParameterName.PTP_STATUS)
except ConfigurationError:
ptp_connected = False
return EnodebStatus(
enodeb_configured=enodeb_configured,
gps_latitude=gps_lat,
gps_longitude=gps_lon,
enodeb_connected=enodeb_connected,
opstate_enabled=opstate_enabled,
rf_tx_on=rf_tx_on,
rf_tx_desired=rf_tx_desired,
gps_connected=gps_connected,
ptp_connected=ptp_connected,
mme_connected=mme_connected,
fsm_state=enodeb.get_state(),
cell_id=enb_cell_id,
)
def get_single_enb_status(
device_serial: str,
state_machine_manager: StateMachineManager,
) -> SingleEnodebStatus:
try:
handler = state_machine_manager.get_handler_by_serial(device_serial)
except KeyError:
return _empty_enb_status()
status = get_enb_status(handler)
ip = state_machine_manager.get_ip_of_serial(device_serial)
def get_status_property(status: bool) -> SingleEnodebStatus.StatusProperty:
if status:
return SingleEnodebStatus.StatusProperty.Value('ON')
return SingleEnodebStatus.StatusProperty.Value('OFF')
enb_status = SingleEnodebStatus()
enb_status.device_serial = device_serial
enb_status.ip_address = ip
enb_status.connected = get_status_property(status.enodeb_connected)
enb_status.configured = get_status_property(status.enodeb_configured)
enb_status.opstate_enabled = get_status_property(status.opstate_enabled)
enb_status.rf_tx_on = get_status_property(status.rf_tx_on)
enb_status.rf_tx_desired = get_status_property(status.rf_tx_desired)
enb_status.gps_connected = get_status_property(status.gps_connected)
enb_status.ptp_connected = get_status_property(status.ptp_connected)
enb_status.mme_connected = get_status_property(status.mme_connected)
enb_status.gps_longitude = status.gps_longitude
enb_status.gps_latitude = status.gps_latitude
enb_status.fsm_state = status.fsm_state
return enb_status
def get_operational_states(
enb_acs_manager: StateMachineManager,
mconfig: mconfigs_pb2.EnodebD,
) -> List[State]:
states = []
configured_serial_ids = []
enb_status_by_serial = get_all_enb_status(enb_acs_manager)
enb_statuses = get_all_enb_state()
for serial_id in enb_status_by_serial:
enb_status_dict = enb_status_by_serial[serial_id]._asdict()
enb_status_dict['ip_address'] = enb_acs_manager.get_ip_of_serial(
serial_id,
)
num_ue_connected = enb_statuses.get(enb_status_dict['cell_id'], 0)
enb_status_dict['ues_connected'] = num_ue_connected
serialized = json.dumps(enb_status_dict)
state = State(
type="single_enodeb",
deviceID=serial_id,
value=serialized.encode('utf-8'),
)
configured_serial_ids.append(serial_id)
states.append(state)
s1_states = get_enb_s1_connected_states(
enb_statuses,
configured_serial_ids,
mconfig,
)
states.extend(s1_states)
return states
def get_enb_s1_connected_states(
enb_s1_state_map, configured_serial_ids,
mconfig,
) -> List[State]:
states = []
for enb_id in enb_s1_state_map:
enb = find_enb_by_cell_id(mconfig, enb_id)
if enb and enb.serial_num not in configured_serial_ids:
status = EnodebStatus(
enodeb_configured=False,
gps_latitude='N/A',
gps_longitude='N/A',
enodeb_connected=True,
opstate_enabled=False,
rf_tx_on=False,
rf_tx_desired=False,
gps_connected=False,
ptp_connected=False,
mme_connected=True,
fsm_state='N/A',
cell_id=enb_id,
)
status_dict = status._asdict()
status_dict['ip_address'] = enb.config.ip_address
status_dict['ues_connected'] = enb_s1_state_map.get(enb_id, 0)
serialized = json.dumps(status_dict)
state = State(
type="single_enodeb",
deviceID=enb.serial_num,
value=serialized.encode('utf-8'),
)
states.append(state)
return states
def _empty_enb_status() -> SingleEnodebStatus:
enb_status = SingleEnodebStatus()
enb_status.device_serial = 'N/A'
enb_status.ip_address = 'N/A'
enb_status.connected = '0'
enb_status.configured = '0'
enb_status.opstate_enabled = '0'
enb_status.rf_tx_on = '0'
enb_status.rf_tx_desired = 'N/A'
enb_status.gps_connected = '0'
enb_status.ptp_connected = '0'
enb_status.mme_connected = '0'
enb_status.gps_longitude = '0.0'
enb_status.gps_latitude = '0.0'
enb_status.fsm_state = 'N/A'
return enb_status
def _parse_param_as_bool(
enodeb: EnodebAcsStateMachine,
param_name: ParameterName,
) -> bool:
try:
return _format_as_bool(enodeb.get_parameter(param_name), param_name)
except (KeyError, ConfigurationError):
return False
def _format_as_bool(
param_value: Union[bool, str, int],
param_name: Optional[Union[ParameterName, str]] = None,
) -> bool:
stripped_value = str(param_value).lower().strip()
if stripped_value in {'true', '1', 'enabled'}:
return True
elif stripped_value in {'false', '0', 'disabled', 'InProgress'}:
return False
else:
logger.warning(
'%s parameter not understood (%s)', param_name, param_value,
)
return False
def _get_gps_status_as_bool(enodeb: EnodebAcsStateMachine) -> bool:
try:
if not enodeb.has_parameter(ParameterName.GPS_STATUS):
return False
else:
param = enodeb.get_parameter(ParameterName.GPS_STATUS)
if isinstance(param, bool):
return param
stripped_value = param.lower().strip()
if stripped_value in ['0', '2', 'inprogress']:
return False
elif stripped_value == '1':
return True
else:
logger.warning(
'GPS status parameter not understood (%s)', param,
)
return False
except (KeyError, ConfigurationError, AttributeError):
return False
def _get_and_cache_gps_coords(enodeb: EnodebAcsStateMachine) -> Tuple[
str, str,
]:
lat, lon = '', ''
try:
lat = enodeb.get_parameter(ParameterName.GPS_LAT)
lon = enodeb.get_parameter(ParameterName.GPS_LONG)
if lat != _gps_lat_cached or lon != _gps_lon_cached:
_cache_new_gps_coords(lat, lon)
return lat, lon
except (KeyError, ConfigurationError):
return _get_cached_gps_coords()
except ValueError:
logger.warning('GPS lat/long not understood (%s/%s)', lat, lon)
return '0', '0'
def _get_cached_gps_coords() -> Tuple[str, str]:
global _gps_lat_cached, _gps_lon_cached
if _gps_lat_cached is None or _gps_lon_cached is None:
_gps_lat_cached, _gps_lon_cached = _read_gps_coords_from_file()
return _gps_lat_cached, _gps_lon_cached
def _read_gps_coords_from_file():
try:
with open(CACHED_GPS_COORD_FILE_PATH, encoding="utf-8") as f:
lines = f.readlines()
if len(lines) != 2:
logger.warning(
'Expected to find 2 lines in GPS '
'coordinate file but only found %d',
len(lines),
)
return '0', '0'
return tuple(map(lambda l: l.strip(), lines))
except OSError:
logger.warning('Could not open cached GPS coordinate file')
return '0', '0'
def _cache_new_gps_coords(gps_lat, gps_lon):
global _gps_lat_cached, _gps_lon_cached
_gps_lat_cached, _gps_lon_cached = gps_lat, gps_lon
_write_gps_coords_to_file(gps_lat, gps_lon)
def _write_gps_coords_to_file(gps_lat, gps_lon):
lines = '{lat}\n{lon}'.format(lat=gps_lat, lon=gps_lon)
try:
serialization_utils.write_to_file_atomically(
CACHED_GPS_COORD_FILE_PATH,
lines,
)
except OSError:
pass
def _bool_to_str(b: bool) -> str:
if b is True:
return "1"
return "0"
| true | true |
f7f5b70acdaabea9aaa0d2f5fe59f3eb051b6bfa | 503 | py | Python | home/migrations/0004_auto_20201214_2150.py | Vlad-404/phot-portfolio | ff3fba512645c0781755bf5b6f7cc455f09b3c5b | [
"BSD-Source-Code"
] | 1 | 2021-01-05T15:52:19.000Z | 2021-01-05T15:52:19.000Z | home/migrations/0004_auto_20201214_2150.py | Vlad-404/phot-portfolio | ff3fba512645c0781755bf5b6f7cc455f09b3c5b | [
"BSD-Source-Code"
] | null | null | null | home/migrations/0004_auto_20201214_2150.py | Vlad-404/phot-portfolio | ff3fba512645c0781755bf5b6f7cc455f09b3c5b | [
"BSD-Source-Code"
] | 1 | 2021-01-05T18:44:47.000Z | 2021-01-05T18:44:47.000Z | # Generated by Django 3.1.3 on 2020-12-14 21:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0003_auto_20201207_1420'),
]
operations = [
migrations.AlterModelOptions(
name='categories',
options={'verbose_name_plural': 'Categories'},
),
migrations.AlterModelOptions(
name='socialmedia',
options={'verbose_name_plural': 'Social Media'},
),
]
| 22.863636 | 60 | 0.596421 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0003_auto_20201207_1420'),
]
operations = [
migrations.AlterModelOptions(
name='categories',
options={'verbose_name_plural': 'Categories'},
),
migrations.AlterModelOptions(
name='socialmedia',
options={'verbose_name_plural': 'Social Media'},
),
]
| true | true |
f7f5b7e4ab065bc50dec055f0514aeef65e45998 | 504 | py | Python | tests/dataset_merge/dataset_merge_test.py | nya3jp/rules_contest | e74a9892785912b11bbd975068641e558aa4a623 | [
"MIT"
] | 6 | 2020-09-03T13:10:49.000Z | 2021-03-10T01:13:49.000Z | tests/dataset_merge/dataset_merge_test.py | nya3jp/rules_contest | e74a9892785912b11bbd975068641e558aa4a623 | [
"MIT"
] | 11 | 2020-05-22T09:43:29.000Z | 2021-03-24T10:55:49.000Z | tests/dataset_merge/dataset_merge_test.py | nya3jp/rules_contest | e74a9892785912b11bbd975068641e558aa4a623 | [
"MIT"
] | null | null | null | import unittest
import zipfile
class DatasetMergeTest(unittest.TestCase):
def test_dataset(self):
with zipfile.ZipFile('tests/dataset_merge/dataset.zip') as zf:
self.assertEqual(
sorted(zf.namelist()),
['data1.in', 'data2.in', 'data3.ans', 'data3.in'])
def test_empty(self):
with zipfile.ZipFile('tests/dataset_merge/empty.zip') as zf:
self.assertEqual(zf.namelist(), [])
if __name__ == '__main__':
unittest.main()
| 26.526316 | 70 | 0.621032 | import unittest
import zipfile
class DatasetMergeTest(unittest.TestCase):
def test_dataset(self):
with zipfile.ZipFile('tests/dataset_merge/dataset.zip') as zf:
self.assertEqual(
sorted(zf.namelist()),
['data1.in', 'data2.in', 'data3.ans', 'data3.in'])
def test_empty(self):
with zipfile.ZipFile('tests/dataset_merge/empty.zip') as zf:
self.assertEqual(zf.namelist(), [])
if __name__ == '__main__':
unittest.main()
| true | true |
f7f5bbd8b2efc772a612d7c71766823f2686d797 | 45,421 | py | Python | readthedocs/projects/tests/test_build_tasks.py | joaovitor3/readthedocs.org | ff9938cc58ded092e01ae601fca22fc012c764f4 | [
"MIT"
] | null | null | null | readthedocs/projects/tests/test_build_tasks.py | joaovitor3/readthedocs.org | ff9938cc58ded092e01ae601fca22fc012c764f4 | [
"MIT"
] | null | null | null | readthedocs/projects/tests/test_build_tasks.py | joaovitor3/readthedocs.org | ff9938cc58ded092e01ae601fca22fc012c764f4 | [
"MIT"
] | null | null | null | import os
from unittest import mock
from django.conf import settings
from django.test import TestCase
from django.utils import timezone
import django_dynamic_fixture as fixture
import pytest
from readthedocs.builds.constants import (
EXTERNAL,
BUILD_STATUS_FAILURE,
BUILD_STATE_FINISHED,
BUILD_STATUS_SUCCESS,
)
from readthedocs.builds.models import Build
from readthedocs.config import ConfigError, ALL
from readthedocs.config.config import BuildConfigV2
from readthedocs.doc_builder.exceptions import BuildAppError
from readthedocs.projects.exceptions import RepositoryError
from readthedocs.projects.models import EnvironmentVariable, Project, WebHookEvent
from readthedocs.projects.tasks.builds import UpdateDocsTask, update_docs_task, sync_repository_task
from .mockers import BuildEnvironmentMocker
@pytest.mark.django_db
class BuildEnvironmentBase:
# NOTE: `load_yaml_config` maybe be moved to the setup and assign to self.
@pytest.fixture(autouse=True)
def setup(self, requests_mock):
# Save the reference to query it from inside the test
self.requests_mock = requests_mock
self.project = fixture.get(
Project,
slug='project',
enable_epub_build=True,
enable_pdf_build=True,
)
self.version = self.project.versions.get(slug='latest')
self.build = fixture.get(
Build,
version=self.version,
commit='a1b2c3',
)
self.mocker = BuildEnvironmentMocker(
self.project,
self.version,
self.build,
self.requests_mock,
)
self.mocker.start()
yield
# tearDown
self.mocker.stop()
def _trigger_update_docs_task(self):
# NOTE: is it possible to replace calling this directly by `trigger_build` instead? :)
return update_docs_task.delay(
self.version.pk,
self.build.pk,
build_commit=self.build.commit,
)
def _config_file(self, config):
config = BuildConfigV2(
{},
config,
source_file='readthedocs.yaml',
)
config.validate()
return config
class TestBuildTask(BuildEnvironmentBase):
@pytest.mark.parametrize(
'formats,builders',
(
(['pdf'], ['latex']),
(['htmlzip'], ['readthedocssinglehtmllocalmedia']),
(['epub'], ['epub']),
(['pdf', 'htmlzip', 'epub'], ['latex', 'readthedocssinglehtmllocalmedia', 'epub']),
('all', ['latex', 'readthedocssinglehtmllocalmedia', 'nepub']),
)
)
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
@pytest.mark.skip
def test_build_sphinx_formats(self, load_yaml_config, formats, builders):
load_yaml_config.return_value = self._config_file({
'version': 2,
'formats': formats,
'sphinx': {
'configuration': 'docs/conf.py',
},
})
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_any_call(
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
'readthedocs',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
)
)
for builder in builders:
self.mocker.mocks['environment.run'].assert_any_call(
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
builder,
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
)
)
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.build_docs_html')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.build_docs_class')
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_formats_only_html_for_external_versions(self, build_docs_html, build_docs_class, load_yaml_config):
load_yaml_config.return_value = self._config_file({
'version': 2,
'formats': 'all',
})
# Make the version external
self.version.type = EXTERNAL
self.version.save()
self._trigger_update_docs_task()
build_docs_html.assert_called_once() # HTML builder
build_docs_class.assert_not_called() # all the other builders
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.build_docs_html')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.build_docs_class')
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_respects_formats_mkdocs(self, build_docs_html, build_docs_class, load_yaml_config):
load_yaml_config.return_value = self._config_file({
'version': 2,
'mkdocs': {
'configuration': 'mkdocs.yml',
},
'formats': ['epub', 'pdf'],
})
self._trigger_update_docs_task()
build_docs_html.assert_called_once()
build_docs_class.assert_not_called()
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
@pytest.mark.skip()
# NOTE: find a way to test we are passing all the environment variables to all the commands
def test_get_env_vars_default(self, load_yaml_config):
load_yaml_config.return_value = self._config_file({
'version': 2,
})
fixture.get(
EnvironmentVariable,
name='TOKEN',
value='a1b2c3',
project=self.project,
)
env = {
'NO_COLOR': '1',
'READTHEDOCS': 'True',
'READTHEDOCS_VERSION': self.version.slug,
'READTHEDOCS_PROJECT': self.project.slug,
'READTHEDOCS_LANGUAGE': self.project.language,
'BIN_PATH': os.path.join(
self.project.doc_path,
'envs',
self.version.slug,
'bin',
),
'TOKEN': 'a1b2c3',
}
self._trigger_update_docs_task()
# mock this object to make sure that we are in a conda env
env.update({
'CONDA_ENVS_PATH': os.path.join(self.project.doc_path, 'conda'),
'CONDA_DEFAULT_ENV': self.version.slug,
'BIN_PATH': os.path.join(
self.project.doc_path,
'conda',
self.version.slug,
'bin',
),
})
@mock.patch('readthedocs.projects.tasks.builds.fileify')
@mock.patch('readthedocs.projects.tasks.builds.build_complete')
@mock.patch('readthedocs.projects.tasks.builds.send_external_build_status')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.send_notifications')
@mock.patch('readthedocs.projects.tasks.builds.clean_build')
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_successful_build(self, load_yaml_config, clean_build, send_notifications, send_external_build_status, build_complete, fileify):
load_yaml_config.return_value = self._config_file({
'version': 2,
'formats': 'all',
'sphinx': {
'configuration': 'docs/conf.py',
},
})
self._trigger_update_docs_task()
# It has to be called twice, ``before_start`` and ``after_return``
clean_build.assert_has_calls([
mock.call(mock.ANY), # the argument is an APIVersion
mock.call(mock.ANY)
])
# TODO: mock `build_tasks.send_build_notifications` instead and add
# another tests to check that they are not sent for EXTERNAL versions
send_notifications.assert_called_once_with(
self.version.pk,
self.build.pk,
event=WebHookEvent.BUILD_PASSED,
)
send_external_build_status.assert_called_once_with(
version_type=self.version.type,
build_pk=self.build.pk,
commit=self.build.commit,
status=BUILD_STATUS_SUCCESS,
)
build_complete.send.assert_called_once_with(
sender=Build,
build=mock.ANY,
)
fileify.delay.assert_called_once_with(
version_pk=self.version.pk,
commit=self.build.commit,
build=self.build.pk,
search_ranking=mock.ANY,
search_ignore=mock.ANY,
)
# TODO: assert the verb and the path for each API call as well
# Update build state: clonning
assert self.requests_mock.request_history[3].json() == {
'id': 1,
'state': 'cloning',
'commit': 'a1b2c3',
'error': '',
'builder': mock.ANY,
}
# Save config object data (using default values)
assert self.requests_mock.request_history[4].json() == {
'config': {
'version': '2',
'formats': ['htmlzip', 'pdf', 'epub'],
'python': {
'version': '3',
'install': [],
'use_system_site_packages': False,
},
'conda': None,
'build': {
'image': 'readthedocs/build:latest',
'apt_packages': [],
},
'doctype': 'sphinx',
'sphinx': {
'builder': 'sphinx',
'configuration': 'docs/conf.py',
'fail_on_warning': False,
},
'mkdocs': None,
'submodules': {
'include': [],
'exclude': 'all',
'recursive': False,
},
'search': {
'ranking': {},
'ignore': [
'search.html',
'search/index.html',
'404.html',
'404/index.html',
],
},
},
}
# Update build state: installing
assert self.requests_mock.request_history[5].json() == {
'id': 1,
'state': 'installing',
'commit': 'a1b2c3',
'config': mock.ANY,
'builder': mock.ANY,
'error': '',
}
# Update build state: building
assert self.requests_mock.request_history[6].json() == {
'id': 1,
'state': 'building',
'commit': 'a1b2c3',
'config': mock.ANY,
'builder': mock.ANY,
'error': '',
}
# Update build state: uploading
assert self.requests_mock.request_history[7].json() == {
'id': 1,
'state': 'uploading',
'commit': 'a1b2c3',
'config': mock.ANY,
'builder': mock.ANY,
'error': '',
}
# Update version state
assert self.requests_mock.request_history[8]._request.method == 'PATCH'
assert self.requests_mock.request_history[8].path == '/api/v2/version/1/'
assert self.requests_mock.request_history[8].json() == {
'built': True,
'documentation_type': 'sphinx',
'has_pdf': True,
'has_epub': True,
'has_htmlzip': True,
}
# Set project has valid clone
assert self.requests_mock.request_history[9]._request.method == 'PATCH'
assert self.requests_mock.request_history[9].path == '/api/v2/project/1/'
assert self.requests_mock.request_history[9].json() == {'has_valid_clone': True}
# Update build state: finished, success and builder
assert self.requests_mock.request_history[10].json() == {
'id': 1,
'state': 'finished',
'commit': 'a1b2c3',
'config': mock.ANY,
'builder': mock.ANY,
'length': mock.ANY,
'success': True,
'error': '',
}
self.mocker.mocks['build_media_storage'].sync_directory.assert_has_calls([
mock.call(mock.ANY, 'html/project/latest'),
mock.call(mock.ANY, 'json/project/latest'),
mock.call(mock.ANY, 'htmlzip/project/latest'),
mock.call(mock.ANY, 'pdf/project/latest'),
mock.call(mock.ANY, 'epub/project/latest'),
])
# TODO: find a directory to remove here :)
# build_media_storage.delete_directory
@mock.patch('readthedocs.projects.tasks.builds.build_complete')
@mock.patch('readthedocs.projects.tasks.builds.send_external_build_status')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.execute')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.send_notifications')
@mock.patch('readthedocs.projects.tasks.builds.clean_build')
def test_failed_build(self, clean_build, send_notifications, execute, send_external_build_status, build_complete):
# Force an exception from the execution of the task. We don't really
# care "where" it was raised: setup, build, syncing directories, etc
execute.side_effect = Exception('Force and exception here.')
self._trigger_update_docs_task()
# It has to be called twice, ``before_start`` and ``after_return``
clean_build.assert_has_calls([
mock.call(mock.ANY), # the argument is an APIVersion
mock.call(mock.ANY)
])
send_notifications.assert_called_once_with(
self.version.pk,
self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
send_external_build_status.assert_called_once_with(
version_type=self.version.type,
build_pk=self.build.pk,
commit=self.build.commit,
status=BUILD_STATUS_FAILURE,
)
build_complete.send.assert_called_once_with(
sender=Build,
build=mock.ANY,
)
# Test we are updating the DB by calling the API with the updated build object
api_request = self.requests_mock.request_history[-1] # the last one should be the PATCH for the build
assert api_request._request.method == 'PATCH'
assert api_request.json() == {
'builder': mock.ANY,
'commit': self.build.commit,
'error': BuildAppError.GENERIC_WITH_BUILD_ID.format(build_id=self.build.pk),
'id': self.build.pk,
'length': mock.ANY,
'state': 'finished',
'success': False,
}
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_commands_executed(self, load_yaml_config):
load_yaml_config.return_value = self._config_file({
'version': 2,
'formats': 'all',
'sphinx': {
'configuration': 'docs/conf.py',
},
})
self._trigger_update_docs_task()
self.mocker.mocks['git.Backend.run'].assert_has_calls([
mock.call('git', 'clone', '--no-single-branch', '--depth', '50', '', '.'),
mock.call('git', 'checkout', '--force', 'a1b2c3'),
mock.call('git', 'clean', '-d', '-f', '-f'),
])
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'python3.7',
'-mvirtualenv',
mock.ANY,
bin_path=None,
cwd=None,
),
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--no-cache-dir',
'pip',
'setuptools<58.3.0',
bin_path=mock.ANY,
cwd=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--no-cache-dir',
'mock==1.0.1',
'pillow==5.4.1',
'alabaster>=0.7,<0.8,!=0.7.5',
'commonmark==0.8.1',
'recommonmark==0.5.0',
'sphinx<2',
'sphinx-rtd-theme<0.5',
'readthedocs-sphinx-ext<2.2',
bin_path=mock.ANY,
cwd=mock.ANY,
),
# FIXME: shouldn't this one be present here? It's not now because
# we are mocking `append_conf` which is the one that triggers this
# command.
#
# mock.call(
# 'cat',
# 'docs/conf.py',
# cwd=mock.ANY,
# ),
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
'readthedocs',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
'readthedocssinglehtmllocalmedia',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/localmedia',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'sphinx',
'-b',
'latex',
'-D',
'language=en',
'-d',
'_build/doctrees',
'.',
'_build/latex',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'-c',
'"import sys; import sphinx; sys.exit(0 if sphinx.version_info >= (1, 6, 1) else 1)"',
bin_path=mock.ANY,
cwd=mock.ANY,
escape_command=False,
shell=True,
record=False,
),
mock.call(
'mv',
'-f',
'output.file',
# TODO: take a look at
# https://callee.readthedocs.io/en/latest/reference/strings.html#callee.strings.EndsWith
# to match `project.pdf`
mock.ANY,
cwd=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
'epub',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/epub',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
'mv',
'-f',
'output.file',
# TODO: take a look at
# https://callee.readthedocs.io/en/latest/reference/strings.html#callee.strings.EndsWith
# to match `project.epub`
mock.ANY,
cwd=mock.ANY,
),
# FIXME: I think we are hitting this issue here:
# https://github.com/pytest-dev/pytest-mock/issues/234
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_use_config_file(self, load_yaml_config):
self._trigger_update_docs_task()
load_yaml_config.assert_called_once()
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_install_apt_packages(self, load_yaml_config):
config = BuildConfigV2(
{},
{
'version': 2,
'build': {
'apt_packages': [
'clangd',
'cmatrix',
],
},
},
source_file='readthedocs.yml',
)
config.validate()
load_yaml_config.return_value = config
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'apt-get',
'update',
'--assume-yes',
'--quiet',
user='root:root',
),
mock.call(
'apt-get',
'install',
'--assume-yes',
'--quiet',
'--',
'clangd',
'cmatrix',
user='root:root',
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_tools(self, load_yaml_config):
config = BuildConfigV2(
{},
{
'version': 2,
'build': {
'os': 'ubuntu-20.04',
'tools': {
'python': '3.10',
'nodejs': '16',
'rust': '1.55',
'golang': '1.17',
},
},
},
source_file='readthedocs.yml',
)
config.validate()
load_yaml_config.return_value = config
self._trigger_update_docs_task()
python_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['python']['3.10']
nodejs_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['nodejs']['16']
rust_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['rust']['1.55']
golang_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['golang']['1.17']
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call('asdf', 'install', 'python', python_version),
mock.call('asdf', 'global', 'python', python_version),
mock.call('asdf', 'reshim', 'python', record=False),
mock.call('python', '-mpip', 'install', '-U', 'virtualenv', 'setuptools<58.3.0'),
mock.call('asdf', 'install', 'nodejs', nodejs_version),
mock.call('asdf', 'global', 'nodejs', nodejs_version),
mock.call('asdf', 'reshim', 'nodejs', record=False),
mock.call('asdf', 'install', 'rust', rust_version),
mock.call('asdf', 'global', 'rust', rust_version),
mock.call('asdf', 'reshim', 'rust', record=False),
mock.call('asdf', 'install', 'golang', golang_version),
mock.call('asdf', 'global', 'golang', golang_version),
mock.call('asdf', 'reshim', 'golang', record=False),
mock.ANY,
])
@mock.patch('readthedocs.doc_builder.python_environments.tarfile')
@mock.patch('readthedocs.doc_builder.python_environments.build_tools_storage')
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_tools_cached(self, load_yaml_config, build_tools_storage, tarfile):
config = BuildConfigV2(
{},
{
'version': 2,
'build': {
'os': 'ubuntu-20.04',
'tools': {
'python': '3.10',
'nodejs': '16',
'rust': '1.55',
'golang': '1.17',
},
},
},
source_file='readthedocs.yml',
)
config.validate()
load_yaml_config.return_value = config
build_tools_storage.open.return_value = b''
build_tools_storage.exists.return_value = True
tarfile.open.return_value.__enter__.return_value.extract_all.return_value = None
self._trigger_update_docs_task()
python_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['python']['3.10']
nodejs_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['nodejs']['16']
rust_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['rust']['1.55']
golang_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['golang']['1.17']
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'mv',
# Use mock.ANY here because path differs when ran locally
# and on CircleCI
mock.ANY,
f'/home/docs/.asdf/installs/python/{python_version}',
record=False,
),
mock.call('asdf', 'global', 'python', python_version),
mock.call('asdf', 'reshim', 'python', record=False),
mock.call(
'mv',
mock.ANY,
f'/home/docs/.asdf/installs/nodejs/{nodejs_version}',
record=False,
),
mock.call('asdf', 'global', 'nodejs', nodejs_version),
mock.call('asdf', 'reshim', 'nodejs', record=False),
mock.call(
'mv',
mock.ANY,
f'/home/docs/.asdf/installs/rust/{rust_version}',
record=False,
),
mock.call('asdf', 'global', 'rust', rust_version),
mock.call('asdf', 'reshim', 'rust', record=False),
mock.call(
'mv',
mock.ANY,
f'/home/docs/.asdf/installs/golang/{golang_version}',
record=False,
),
mock.call('asdf', 'global', 'golang', golang_version),
mock.call('asdf', 'reshim', 'golang', record=False),
mock.ANY,
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_requirements_from_config_file_installed(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [{
'requirements': 'requirements.txt',
}],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--exists-action=w',
'--no-cache-dir',
'-r',
'requirements.txt',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_conda_config_calls_conda_command(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'conda': {
'environment': 'environment.yaml',
},
},
)
self._trigger_update_docs_task()
# TODO: check we are saving the `conda.environment` in the config file
# via the API call
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'conda',
'env',
'create',
'--quiet',
'--name',
self.version.slug,
'--file',
'environment.yaml',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
'conda',
'install',
'--yes',
'--quiet',
'--name',
self.version.slug,
'mock',
'pillow',
'sphinx',
'sphinx_rtd_theme',
cwd=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'-U',
'--no-cache-dir',
'recommonmark',
'readthedocs-sphinx-ext',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_mamba_commands(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'build': {
'os': 'ubuntu-20.04',
'tools': {
'python': 'mambaforge-4.10',
},
},
'conda': {
'environment': 'environment.yaml',
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call('asdf', 'install', 'python', 'mambaforge-4.10.3-10'),
mock.call('asdf', 'global', 'python', 'mambaforge-4.10.3-10'),
mock.call('asdf', 'reshim', 'python', record=False),
mock.call('mamba', 'env', 'create', '--quiet', '--name', 'latest', '--file', 'environment.yaml', bin_path=None, cwd=mock.ANY),
mock.call('mamba', 'install', '--yes', '--quiet', '--name', 'latest', 'mock', 'pillow', 'sphinx', 'sphinx_rtd_theme', cwd=mock.ANY),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_sphinx_fail_on_warning(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'sphinx': {
'configuration': 'docs/conf.py',
'fail_on_warning': True,
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-W', # fail on warning flag
'--keep-going', # fail on warning flag
'-b',
'readthedocs',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_mkdocs_fail_on_warning(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'mkdocs': {
'configuration': 'docs/mkdocs.yaml',
'fail_on_warning': True,
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'mkdocs',
'build',
'--clean',
'--site-dir',
'_build/html',
'--config-file',
'docs/mkdocs.yaml',
'--strict', # fail on warning flag
cwd=mock.ANY,
bin_path=mock.ANY,
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_system_site_packages(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'system_packages': True,
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'python3.7',
'-mvirtualenv',
'--system-site-packages', # expected flag
mock.ANY,
bin_path=None,
cwd=None,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_system_site_packages_project_overrides(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
# Do not define `system_packages: True` in the config file.
'python': {},
},
)
# Override the setting in the Project object
self.project.use_system_packages = True
self.project.save()
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'python3.7',
'-mvirtualenv',
# we don't expect this flag to be here
# '--system-site-packages'
mock.ANY,
bin_path=None,
cwd=None,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_install_setuptools(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [{
'path': '.',
'method': 'setuptools',
}],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'./setup.py',
'install',
'--force',
cwd=mock.ANY,
bin_path=mock.ANY,
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_install_pip(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [{
'path': '.',
'method': 'pip',
}],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--upgrade-strategy',
'eager',
'--no-cache-dir',
'.',
cwd=mock.ANY,
bin_path=mock.ANY,
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_install_pip_extras(self, load_yaml_config):
# FIXME: the test passes but in the logs there is an error related to
# `backends/sphinx.py` not finding a file.
#
# TypeError('expected str, bytes or os.PathLike object, not NoneType')
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [{
'path': '.',
'method': 'pip',
'extra_requirements': ['docs'],
}],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--upgrade-strategy',
'eager',
'--no-cache-dir',
'.[docs]',
cwd=mock.ANY,
bin_path=mock.ANY,
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_install_pip_several_options(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [
{
'path': '.',
'method': 'pip',
'extra_requirements': ['docs'],
},
{
'path': 'two',
'method': 'setuptools',
},
{
'requirements': 'three.txt',
},
],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--upgrade-strategy',
'eager',
'--no-cache-dir',
'.[docs]',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'two/setup.py',
'install',
'--force',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--exists-action=w',
'--no-cache-dir',
'-r',
'three.txt',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
@pytest.mark.parametrize(
'value,expected', [
(ALL, ['one', 'two', 'three']),
(['one', 'two'], ['one', 'two']),
],
)
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_submodules_include(self, load_yaml_config, value, expected):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'submodules': {
'include': value,
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['git.Backend.run'].assert_has_calls([
mock.call('git', 'submodule', 'sync'),
mock.call('git', 'submodule', 'update', '--init', '--force', *expected),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_submodules_exclude(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'submodules': {
'exclude': ['one'],
'recursive': True
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['git.Backend.run'].assert_has_calls([
mock.call('git', 'submodule', 'sync'),
mock.call('git', 'submodule', 'update', '--init', '--force', '--recursive', 'two', 'three'),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_submodules_exclude_all(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'submodules': {
'exclude': ALL,
'recursive': True
},
},
)
self._trigger_update_docs_task()
# TODO: how do we do a assert_not_has_calls?
# mock.call('git', 'submodule', 'sync'),
# mock.call('git', 'submodule', 'update', '--init', '--force', 'one', 'two', 'three'),
for call in self.mocker.mocks['git.Backend.run'].mock_calls:
if 'submodule' in call.args:
assert False, 'git submodule command found'
@pytest.mark.parametrize(
'value,command',
[
('html', 'readthedocs'),
('htmldir', 'readthedocsdirhtml'),
('dirhtml', 'readthedocsdirhtml'),
('singlehtml', 'readthedocssinglehtml'),
],
)
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_sphinx_builder(self, load_yaml_config, value, command):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'sphinx': {
'builder': value,
'configuration': 'docs/conf.py',
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
command,
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
class TestBuildTaskExceptionHandler(BuildEnvironmentBase):
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_config_file_exception(self, load_yaml_config):
load_yaml_config.side_effect = ConfigError(
code='invalid',
message='Invalid version in config file.'
)
self._trigger_update_docs_task()
# This is a known exceptions. We hit the API saving the correct error
# in the Build object. In this case, the "error message" coming from
# the exception will be shown to the user
assert self.requests_mock.request_history[-1]._request.method == 'PATCH'
assert self.requests_mock.request_history[-1].path == '/api/v2/build/1/'
assert self.requests_mock.request_history[-1].json() == {
'id': 1,
'state': 'finished',
'commit': 'a1b2c3',
'error': "Problem in your project's configuration. Invalid version in config file.",
'success': False,
'builder': mock.ANY,
'length': 0,
}
class TestSyncRepositoryTask(BuildEnvironmentBase):
def _trigger_sync_repository_task(self):
sync_repository_task.delay(self.version.pk)
@mock.patch('readthedocs.projects.tasks.builds.clean_build')
def test_clean_build_after_sync_repository(self, clean_build):
self._trigger_sync_repository_task()
clean_build.assert_called_once()
@mock.patch('readthedocs.projects.tasks.builds.SyncRepositoryTask.execute')
@mock.patch('readthedocs.projects.tasks.builds.clean_build')
def test_clean_build_after_failure_in_sync_repository(self, clean_build, execute):
execute.side_effect = Exception('Something weird happen')
self._trigger_sync_repository_task()
clean_build.assert_called_once()
@pytest.mark.parametrize(
'verbose_name',
[
'stable',
'latest',
],
)
@mock.patch('readthedocs.projects.tasks.builds.SyncRepositoryTask.on_failure')
def test_check_duplicate_reserved_version_latest(self, on_failure, verbose_name):
# `repository.tags` and `repository.branch` both will return a tag/branch named `latest/stable`
with mock.patch(
'readthedocs.vcs_support.backends.git.Backend.branches',
new_callable=mock.PropertyMock,
return_value=[
mock.MagicMock(identifier='a1b2c3', verbose_name=verbose_name),
],
):
with mock.patch(
'readthedocs.vcs_support.backends.git.Backend.tags',
new_callable=mock.PropertyMock,
return_value=[
mock.MagicMock(identifier='a1b2c3', verbose_name=verbose_name),
],
):
self._trigger_sync_repository_task()
on_failure.assert_called_once_with(
# This argument is the exception we are intereste, but I don't know
# how to assert it here. It's checked in the following assert.
mock.ANY,
mock.ANY,
[self.version.pk],
{},
mock.ANY,
)
exception = on_failure.call_args[0][0]
assert isinstance(exception, RepositoryError) == True
assert exception.message == RepositoryError.DUPLICATED_RESERVED_VERSIONS
| 33.54579 | 144 | 0.487946 | import os
from unittest import mock
from django.conf import settings
from django.test import TestCase
from django.utils import timezone
import django_dynamic_fixture as fixture
import pytest
from readthedocs.builds.constants import (
EXTERNAL,
BUILD_STATUS_FAILURE,
BUILD_STATE_FINISHED,
BUILD_STATUS_SUCCESS,
)
from readthedocs.builds.models import Build
from readthedocs.config import ConfigError, ALL
from readthedocs.config.config import BuildConfigV2
from readthedocs.doc_builder.exceptions import BuildAppError
from readthedocs.projects.exceptions import RepositoryError
from readthedocs.projects.models import EnvironmentVariable, Project, WebHookEvent
from readthedocs.projects.tasks.builds import UpdateDocsTask, update_docs_task, sync_repository_task
from .mockers import BuildEnvironmentMocker
@pytest.mark.django_db
class BuildEnvironmentBase:
@pytest.fixture(autouse=True)
def setup(self, requests_mock):
self.requests_mock = requests_mock
self.project = fixture.get(
Project,
slug='project',
enable_epub_build=True,
enable_pdf_build=True,
)
self.version = self.project.versions.get(slug='latest')
self.build = fixture.get(
Build,
version=self.version,
commit='a1b2c3',
)
self.mocker = BuildEnvironmentMocker(
self.project,
self.version,
self.build,
self.requests_mock,
)
self.mocker.start()
yield
self.mocker.stop()
def _trigger_update_docs_task(self):
return update_docs_task.delay(
self.version.pk,
self.build.pk,
build_commit=self.build.commit,
)
def _config_file(self, config):
config = BuildConfigV2(
{},
config,
source_file='readthedocs.yaml',
)
config.validate()
return config
class TestBuildTask(BuildEnvironmentBase):
@pytest.mark.parametrize(
'formats,builders',
(
(['pdf'], ['latex']),
(['htmlzip'], ['readthedocssinglehtmllocalmedia']),
(['epub'], ['epub']),
(['pdf', 'htmlzip', 'epub'], ['latex', 'readthedocssinglehtmllocalmedia', 'epub']),
('all', ['latex', 'readthedocssinglehtmllocalmedia', 'nepub']),
)
)
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
@pytest.mark.skip
def test_build_sphinx_formats(self, load_yaml_config, formats, builders):
load_yaml_config.return_value = self._config_file({
'version': 2,
'formats': formats,
'sphinx': {
'configuration': 'docs/conf.py',
},
})
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_any_call(
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
'readthedocs',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
)
)
for builder in builders:
self.mocker.mocks['environment.run'].assert_any_call(
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
builder,
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
)
)
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.build_docs_html')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.build_docs_class')
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_formats_only_html_for_external_versions(self, build_docs_html, build_docs_class, load_yaml_config):
load_yaml_config.return_value = self._config_file({
'version': 2,
'formats': 'all',
})
self.version.type = EXTERNAL
self.version.save()
self._trigger_update_docs_task()
build_docs_html.assert_called_once()
build_docs_class.assert_not_called()
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.build_docs_html')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.build_docs_class')
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_respects_formats_mkdocs(self, build_docs_html, build_docs_class, load_yaml_config):
load_yaml_config.return_value = self._config_file({
'version': 2,
'mkdocs': {
'configuration': 'mkdocs.yml',
},
'formats': ['epub', 'pdf'],
})
self._trigger_update_docs_task()
build_docs_html.assert_called_once()
build_docs_class.assert_not_called()
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
@pytest.mark.skip()
def test_get_env_vars_default(self, load_yaml_config):
load_yaml_config.return_value = self._config_file({
'version': 2,
})
fixture.get(
EnvironmentVariable,
name='TOKEN',
value='a1b2c3',
project=self.project,
)
env = {
'NO_COLOR': '1',
'READTHEDOCS': 'True',
'READTHEDOCS_VERSION': self.version.slug,
'READTHEDOCS_PROJECT': self.project.slug,
'READTHEDOCS_LANGUAGE': self.project.language,
'BIN_PATH': os.path.join(
self.project.doc_path,
'envs',
self.version.slug,
'bin',
),
'TOKEN': 'a1b2c3',
}
self._trigger_update_docs_task()
env.update({
'CONDA_ENVS_PATH': os.path.join(self.project.doc_path, 'conda'),
'CONDA_DEFAULT_ENV': self.version.slug,
'BIN_PATH': os.path.join(
self.project.doc_path,
'conda',
self.version.slug,
'bin',
),
})
@mock.patch('readthedocs.projects.tasks.builds.fileify')
@mock.patch('readthedocs.projects.tasks.builds.build_complete')
@mock.patch('readthedocs.projects.tasks.builds.send_external_build_status')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.send_notifications')
@mock.patch('readthedocs.projects.tasks.builds.clean_build')
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_successful_build(self, load_yaml_config, clean_build, send_notifications, send_external_build_status, build_complete, fileify):
load_yaml_config.return_value = self._config_file({
'version': 2,
'formats': 'all',
'sphinx': {
'configuration': 'docs/conf.py',
},
})
self._trigger_update_docs_task()
clean_build.assert_has_calls([
mock.call(mock.ANY),
mock.call(mock.ANY)
])
send_notifications.assert_called_once_with(
self.version.pk,
self.build.pk,
event=WebHookEvent.BUILD_PASSED,
)
send_external_build_status.assert_called_once_with(
version_type=self.version.type,
build_pk=self.build.pk,
commit=self.build.commit,
status=BUILD_STATUS_SUCCESS,
)
build_complete.send.assert_called_once_with(
sender=Build,
build=mock.ANY,
)
fileify.delay.assert_called_once_with(
version_pk=self.version.pk,
commit=self.build.commit,
build=self.build.pk,
search_ranking=mock.ANY,
search_ignore=mock.ANY,
)
assert self.requests_mock.request_history[3].json() == {
'id': 1,
'state': 'cloning',
'commit': 'a1b2c3',
'error': '',
'builder': mock.ANY,
}
assert self.requests_mock.request_history[4].json() == {
'config': {
'version': '2',
'formats': ['htmlzip', 'pdf', 'epub'],
'python': {
'version': '3',
'install': [],
'use_system_site_packages': False,
},
'conda': None,
'build': {
'image': 'readthedocs/build:latest',
'apt_packages': [],
},
'doctype': 'sphinx',
'sphinx': {
'builder': 'sphinx',
'configuration': 'docs/conf.py',
'fail_on_warning': False,
},
'mkdocs': None,
'submodules': {
'include': [],
'exclude': 'all',
'recursive': False,
},
'search': {
'ranking': {},
'ignore': [
'search.html',
'search/index.html',
'404.html',
'404/index.html',
],
},
},
}
assert self.requests_mock.request_history[5].json() == {
'id': 1,
'state': 'installing',
'commit': 'a1b2c3',
'config': mock.ANY,
'builder': mock.ANY,
'error': '',
}
assert self.requests_mock.request_history[6].json() == {
'id': 1,
'state': 'building',
'commit': 'a1b2c3',
'config': mock.ANY,
'builder': mock.ANY,
'error': '',
}
assert self.requests_mock.request_history[7].json() == {
'id': 1,
'state': 'uploading',
'commit': 'a1b2c3',
'config': mock.ANY,
'builder': mock.ANY,
'error': '',
}
assert self.requests_mock.request_history[8]._request.method == 'PATCH'
assert self.requests_mock.request_history[8].path == '/api/v2/version/1/'
assert self.requests_mock.request_history[8].json() == {
'built': True,
'documentation_type': 'sphinx',
'has_pdf': True,
'has_epub': True,
'has_htmlzip': True,
}
assert self.requests_mock.request_history[9]._request.method == 'PATCH'
assert self.requests_mock.request_history[9].path == '/api/v2/project/1/'
assert self.requests_mock.request_history[9].json() == {'has_valid_clone': True}
assert self.requests_mock.request_history[10].json() == {
'id': 1,
'state': 'finished',
'commit': 'a1b2c3',
'config': mock.ANY,
'builder': mock.ANY,
'length': mock.ANY,
'success': True,
'error': '',
}
self.mocker.mocks['build_media_storage'].sync_directory.assert_has_calls([
mock.call(mock.ANY, 'html/project/latest'),
mock.call(mock.ANY, 'json/project/latest'),
mock.call(mock.ANY, 'htmlzip/project/latest'),
mock.call(mock.ANY, 'pdf/project/latest'),
mock.call(mock.ANY, 'epub/project/latest'),
])
@mock.patch('readthedocs.projects.tasks.builds.build_complete')
@mock.patch('readthedocs.projects.tasks.builds.send_external_build_status')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.execute')
@mock.patch('readthedocs.projects.tasks.builds.UpdateDocsTask.send_notifications')
@mock.patch('readthedocs.projects.tasks.builds.clean_build')
def test_failed_build(self, clean_build, send_notifications, execute, send_external_build_status, build_complete):
# care "where" it was raised: setup, build, syncing directories, etc
execute.side_effect = Exception('Force and exception here.')
self._trigger_update_docs_task()
# It has to be called twice, ``before_start`` and ``after_return``
clean_build.assert_has_calls([
mock.call(mock.ANY), # the argument is an APIVersion
mock.call(mock.ANY)
])
send_notifications.assert_called_once_with(
self.version.pk,
self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
send_external_build_status.assert_called_once_with(
version_type=self.version.type,
build_pk=self.build.pk,
commit=self.build.commit,
status=BUILD_STATUS_FAILURE,
)
build_complete.send.assert_called_once_with(
sender=Build,
build=mock.ANY,
)
# Test we are updating the DB by calling the API with the updated build object
api_request = self.requests_mock.request_history[-1] # the last one should be the PATCH for the build
assert api_request._request.method == 'PATCH'
assert api_request.json() == {
'builder': mock.ANY,
'commit': self.build.commit,
'error': BuildAppError.GENERIC_WITH_BUILD_ID.format(build_id=self.build.pk),
'id': self.build.pk,
'length': mock.ANY,
'state': 'finished',
'success': False,
}
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_commands_executed(self, load_yaml_config):
load_yaml_config.return_value = self._config_file({
'version': 2,
'formats': 'all',
'sphinx': {
'configuration': 'docs/conf.py',
},
})
self._trigger_update_docs_task()
self.mocker.mocks['git.Backend.run'].assert_has_calls([
mock.call('git', 'clone', '--no-single-branch', '--depth', '50', '', '.'),
mock.call('git', 'checkout', '--force', 'a1b2c3'),
mock.call('git', 'clean', '-d', '-f', '-f'),
])
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'python3.7',
'-mvirtualenv',
mock.ANY,
bin_path=None,
cwd=None,
),
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--no-cache-dir',
'pip',
'setuptools<58.3.0',
bin_path=mock.ANY,
cwd=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--no-cache-dir',
'mock==1.0.1',
'pillow==5.4.1',
'alabaster>=0.7,<0.8,!=0.7.5',
'commonmark==0.8.1',
'recommonmark==0.5.0',
'sphinx<2',
'sphinx-rtd-theme<0.5',
'readthedocs-sphinx-ext<2.2',
bin_path=mock.ANY,
cwd=mock.ANY,
),
# FIXME: shouldn't this one be present here? It's not now because
# we are mocking `append_conf` which is the one that triggers this
# command.
#
# mock.call(
# 'cat',
# 'docs/conf.py',
# cwd=mock.ANY,
# ),
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
'readthedocs',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
'readthedocssinglehtmllocalmedia',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/localmedia',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'sphinx',
'-b',
'latex',
'-D',
'language=en',
'-d',
'_build/doctrees',
'.',
'_build/latex',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'-c',
'"import sys; import sphinx; sys.exit(0 if sphinx.version_info >= (1, 6, 1) else 1)"',
bin_path=mock.ANY,
cwd=mock.ANY,
escape_command=False,
shell=True,
record=False,
),
mock.call(
'mv',
'-f',
'output.file',
# TODO: take a look at
# https://callee.readthedocs.io/en/latest/reference/strings.html#callee.strings.EndsWith
# to match `project.pdf`
mock.ANY,
cwd=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
'epub',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/epub',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
'mv',
'-f',
'output.file',
# TODO: take a look at
# https://callee.readthedocs.io/en/latest/reference/strings.html#callee.strings.EndsWith
# to match `project.epub`
mock.ANY,
cwd=mock.ANY,
),
# FIXME: I think we are hitting this issue here:
# https://github.com/pytest-dev/pytest-mock/issues/234
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_use_config_file(self, load_yaml_config):
self._trigger_update_docs_task()
load_yaml_config.assert_called_once()
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_install_apt_packages(self, load_yaml_config):
config = BuildConfigV2(
{},
{
'version': 2,
'build': {
'apt_packages': [
'clangd',
'cmatrix',
],
},
},
source_file='readthedocs.yml',
)
config.validate()
load_yaml_config.return_value = config
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'apt-get',
'update',
'--assume-yes',
'--quiet',
user='root:root',
),
mock.call(
'apt-get',
'install',
'--assume-yes',
'--quiet',
'--',
'clangd',
'cmatrix',
user='root:root',
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_tools(self, load_yaml_config):
config = BuildConfigV2(
{},
{
'version': 2,
'build': {
'os': 'ubuntu-20.04',
'tools': {
'python': '3.10',
'nodejs': '16',
'rust': '1.55',
'golang': '1.17',
},
},
},
source_file='readthedocs.yml',
)
config.validate()
load_yaml_config.return_value = config
self._trigger_update_docs_task()
python_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['python']['3.10']
nodejs_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['nodejs']['16']
rust_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['rust']['1.55']
golang_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['golang']['1.17']
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call('asdf', 'install', 'python', python_version),
mock.call('asdf', 'global', 'python', python_version),
mock.call('asdf', 'reshim', 'python', record=False),
mock.call('python', '-mpip', 'install', '-U', 'virtualenv', 'setuptools<58.3.0'),
mock.call('asdf', 'install', 'nodejs', nodejs_version),
mock.call('asdf', 'global', 'nodejs', nodejs_version),
mock.call('asdf', 'reshim', 'nodejs', record=False),
mock.call('asdf', 'install', 'rust', rust_version),
mock.call('asdf', 'global', 'rust', rust_version),
mock.call('asdf', 'reshim', 'rust', record=False),
mock.call('asdf', 'install', 'golang', golang_version),
mock.call('asdf', 'global', 'golang', golang_version),
mock.call('asdf', 'reshim', 'golang', record=False),
mock.ANY,
])
@mock.patch('readthedocs.doc_builder.python_environments.tarfile')
@mock.patch('readthedocs.doc_builder.python_environments.build_tools_storage')
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_build_tools_cached(self, load_yaml_config, build_tools_storage, tarfile):
config = BuildConfigV2(
{},
{
'version': 2,
'build': {
'os': 'ubuntu-20.04',
'tools': {
'python': '3.10',
'nodejs': '16',
'rust': '1.55',
'golang': '1.17',
},
},
},
source_file='readthedocs.yml',
)
config.validate()
load_yaml_config.return_value = config
build_tools_storage.open.return_value = b''
build_tools_storage.exists.return_value = True
tarfile.open.return_value.__enter__.return_value.extract_all.return_value = None
self._trigger_update_docs_task()
python_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['python']['3.10']
nodejs_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['nodejs']['16']
rust_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['rust']['1.55']
golang_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['golang']['1.17']
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'mv',
# Use mock.ANY here because path differs when ran locally
# and on CircleCI
mock.ANY,
f'/home/docs/.asdf/installs/python/{python_version}',
record=False,
),
mock.call('asdf', 'global', 'python', python_version),
mock.call('asdf', 'reshim', 'python', record=False),
mock.call(
'mv',
mock.ANY,
f'/home/docs/.asdf/installs/nodejs/{nodejs_version}',
record=False,
),
mock.call('asdf', 'global', 'nodejs', nodejs_version),
mock.call('asdf', 'reshim', 'nodejs', record=False),
mock.call(
'mv',
mock.ANY,
f'/home/docs/.asdf/installs/rust/{rust_version}',
record=False,
),
mock.call('asdf', 'global', 'rust', rust_version),
mock.call('asdf', 'reshim', 'rust', record=False),
mock.call(
'mv',
mock.ANY,
f'/home/docs/.asdf/installs/golang/{golang_version}',
record=False,
),
mock.call('asdf', 'global', 'golang', golang_version),
mock.call('asdf', 'reshim', 'golang', record=False),
mock.ANY,
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_requirements_from_config_file_installed(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [{
'requirements': 'requirements.txt',
}],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--exists-action=w',
'--no-cache-dir',
'-r',
'requirements.txt',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_conda_config_calls_conda_command(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'conda': {
'environment': 'environment.yaml',
},
},
)
self._trigger_update_docs_task()
# TODO: check we are saving the `conda.environment` in the config file
# via the API call
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'conda',
'env',
'create',
'--quiet',
'--name',
self.version.slug,
'--file',
'environment.yaml',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
'conda',
'install',
'--yes',
'--quiet',
'--name',
self.version.slug,
'mock',
'pillow',
'sphinx',
'sphinx_rtd_theme',
cwd=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'-U',
'--no-cache-dir',
'recommonmark',
'readthedocs-sphinx-ext',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_mamba_commands(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'build': {
'os': 'ubuntu-20.04',
'tools': {
'python': 'mambaforge-4.10',
},
},
'conda': {
'environment': 'environment.yaml',
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call('asdf', 'install', 'python', 'mambaforge-4.10.3-10'),
mock.call('asdf', 'global', 'python', 'mambaforge-4.10.3-10'),
mock.call('asdf', 'reshim', 'python', record=False),
mock.call('mamba', 'env', 'create', '--quiet', '--name', 'latest', '--file', 'environment.yaml', bin_path=None, cwd=mock.ANY),
mock.call('mamba', 'install', '--yes', '--quiet', '--name', 'latest', 'mock', 'pillow', 'sphinx', 'sphinx_rtd_theme', cwd=mock.ANY),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_sphinx_fail_on_warning(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'sphinx': {
'configuration': 'docs/conf.py',
'fail_on_warning': True,
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-W', # fail on warning flag
'--keep-going', # fail on warning flag
'-b',
'readthedocs',
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_mkdocs_fail_on_warning(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'mkdocs': {
'configuration': 'docs/mkdocs.yaml',
'fail_on_warning': True,
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'mkdocs',
'build',
'--clean',
'--site-dir',
'_build/html',
'--config-file',
'docs/mkdocs.yaml',
'--strict', # fail on warning flag
cwd=mock.ANY,
bin_path=mock.ANY,
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_system_site_packages(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'system_packages': True,
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'python3.7',
'-mvirtualenv',
'--system-site-packages', # expected flag
mock.ANY,
bin_path=None,
cwd=None,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_system_site_packages_project_overrides(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
# Do not define `system_packages: True` in the config file.
'python': {},
},
)
# Override the setting in the Project object
self.project.use_system_packages = True
self.project.save()
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
'python3.7',
'-mvirtualenv',
# we don't expect this flag to be here
mock.ANY,
bin_path=None,
cwd=None,
),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_install_setuptools(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [{
'path': '.',
'method': 'setuptools',
}],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'./setup.py',
'install',
'--force',
cwd=mock.ANY,
bin_path=mock.ANY,
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_install_pip(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [{
'path': '.',
'method': 'pip',
}],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--upgrade-strategy',
'eager',
'--no-cache-dir',
'.',
cwd=mock.ANY,
bin_path=mock.ANY,
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_install_pip_extras(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [{
'path': '.',
'method': 'pip',
'extra_requirements': ['docs'],
}],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--upgrade-strategy',
'eager',
'--no-cache-dir',
'.[docs]',
cwd=mock.ANY,
bin_path=mock.ANY,
)
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_python_install_pip_several_options(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'python': {
'install': [
{
'path': '.',
'method': 'pip',
'extra_requirements': ['docs'],
},
{
'path': 'two',
'method': 'setuptools',
},
{
'requirements': 'three.txt',
},
],
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--upgrade',
'--upgrade-strategy',
'eager',
'--no-cache-dir',
'.[docs]',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'two/setup.py',
'install',
'--force',
cwd=mock.ANY,
bin_path=mock.ANY,
),
mock.call(
mock.ANY,
'-m',
'pip',
'install',
'--exists-action=w',
'--no-cache-dir',
'-r',
'three.txt',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
@pytest.mark.parametrize(
'value,expected', [
(ALL, ['one', 'two', 'three']),
(['one', 'two'], ['one', 'two']),
],
)
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_submodules_include(self, load_yaml_config, value, expected):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'submodules': {
'include': value,
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['git.Backend.run'].assert_has_calls([
mock.call('git', 'submodule', 'sync'),
mock.call('git', 'submodule', 'update', '--init', '--force', *expected),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_submodules_exclude(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'submodules': {
'exclude': ['one'],
'recursive': True
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['git.Backend.run'].assert_has_calls([
mock.call('git', 'submodule', 'sync'),
mock.call('git', 'submodule', 'update', '--init', '--force', '--recursive', 'two', 'three'),
])
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_submodules_exclude_all(self, load_yaml_config):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'submodules': {
'exclude': ALL,
'recursive': True
},
},
)
self._trigger_update_docs_task()
for call in self.mocker.mocks['git.Backend.run'].mock_calls:
if 'submodule' in call.args:
assert False, 'git submodule command found'
@pytest.mark.parametrize(
'value,command',
[
('html', 'readthedocs'),
('htmldir', 'readthedocsdirhtml'),
('dirhtml', 'readthedocsdirhtml'),
('singlehtml', 'readthedocssinglehtml'),
],
)
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_sphinx_builder(self, load_yaml_config, value, command):
load_yaml_config.return_value = self._config_file(
{
'version': 2,
'sphinx': {
'builder': value,
'configuration': 'docs/conf.py',
},
},
)
self._trigger_update_docs_task()
self.mocker.mocks['environment.run'].assert_has_calls([
mock.call(
mock.ANY,
'-m',
'sphinx',
'-T',
'-E',
'-b',
command,
'-d',
'_build/doctrees',
'-D',
'language=en',
'.',
'_build/html',
cwd=mock.ANY,
bin_path=mock.ANY,
),
])
class TestBuildTaskExceptionHandler(BuildEnvironmentBase):
@mock.patch('readthedocs.projects.tasks.builds.load_yaml_config')
def test_config_file_exception(self, load_yaml_config):
load_yaml_config.side_effect = ConfigError(
code='invalid',
message='Invalid version in config file.'
)
self._trigger_update_docs_task()
assert self.requests_mock.request_history[-1]._request.method == 'PATCH'
assert self.requests_mock.request_history[-1].path == '/api/v2/build/1/'
assert self.requests_mock.request_history[-1].json() == {
'id': 1,
'state': 'finished',
'commit': 'a1b2c3',
'error': "Problem in your project's configuration. Invalid version in config file.",
'success': False,
'builder': mock.ANY,
'length': 0,
}
class TestSyncRepositoryTask(BuildEnvironmentBase):
def _trigger_sync_repository_task(self):
sync_repository_task.delay(self.version.pk)
@mock.patch('readthedocs.projects.tasks.builds.clean_build')
def test_clean_build_after_sync_repository(self, clean_build):
self._trigger_sync_repository_task()
clean_build.assert_called_once()
@mock.patch('readthedocs.projects.tasks.builds.SyncRepositoryTask.execute')
@mock.patch('readthedocs.projects.tasks.builds.clean_build')
def test_clean_build_after_failure_in_sync_repository(self, clean_build, execute):
execute.side_effect = Exception('Something weird happen')
self._trigger_sync_repository_task()
clean_build.assert_called_once()
@pytest.mark.parametrize(
'verbose_name',
[
'stable',
'latest',
],
)
@mock.patch('readthedocs.projects.tasks.builds.SyncRepositoryTask.on_failure')
def test_check_duplicate_reserved_version_latest(self, on_failure, verbose_name):
# `repository.tags` and `repository.branch` both will return a tag/branch named `latest/stable`
with mock.patch(
'readthedocs.vcs_support.backends.git.Backend.branches',
new_callable=mock.PropertyMock,
return_value=[
mock.MagicMock(identifier='a1b2c3', verbose_name=verbose_name),
],
):
with mock.patch(
'readthedocs.vcs_support.backends.git.Backend.tags',
new_callable=mock.PropertyMock,
return_value=[
mock.MagicMock(identifier='a1b2c3', verbose_name=verbose_name),
],
):
self._trigger_sync_repository_task()
on_failure.assert_called_once_with(
# This argument is the exception we are intereste, but I don't know
mock.ANY,
mock.ANY,
[self.version.pk],
{},
mock.ANY,
)
exception = on_failure.call_args[0][0]
assert isinstance(exception, RepositoryError) == True
assert exception.message == RepositoryError.DUPLICATED_RESERVED_VERSIONS
| true | true |
f7f5bc496f5a86a65d9930b6ea8d4d59610ecb17 | 12,282 | py | Python | var/spack/repos/builtin/packages/cudnn/package.py | lcnzg/spack | 5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-06-28T02:20:15.000Z | 2021-06-28T02:20:15.000Z | var/spack/repos/builtin/packages/cudnn/package.py | lcnzg/spack | 5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2021-03-05T22:27:10.000Z | 2021-09-28T20:28:06.000Z | var/spack/repos/builtin/packages/cudnn/package.py | lcnzg/spack | 5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-06-28T04:48:37.000Z | 2021-06-28T04:48:37.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import platform
from spack import *
_versions = {
# cuDNN 8.2.4
'8.2.4.15-11.4': {
'Linux-x86_64': '0e5d2df890b9967efa6619da421310d97323565a79f05a1a8cb9b7165baad0d7',
'Linux-ppc64le': 'af8749ca83fd6bba117c8bee31b787b7f204946e864294030ee0091eb7d3577e',
'Linux-aarch64': '48b11f19e9cd3414ec3c6c357ad228aebbd43282aae372d42cab2af67c32a08b'},
# cuDNN 8.2.0
'8.2.0.53-11.3': {
'Linux-x86_64': '7a195dc93a7cda2bdd4d9b73958d259c784be422cd941a9a625aab75309f19dc',
'Linux-ppc64le': 'cfe06735671a41a5e25fc7542d740177ac8eab1ab146bd30f19e0fa836895611',
'Linux-aarch64': '0f44af94eef7826dc7b41f92aade3d5210891cdb10858bc0a28ba7167909ab7c'},
'8.2.0.53-10.2': {
'Linux-x86_64': '6ecbc98b3795e940ce0831ffb7cd2c0781830fdd6b1911f950bcaf6d569f807c'},
# cuDNN 8.1.1
'8.1.1.33-11.2': {
'Linux-x86_64': '98a8784e92862f20018d20c281b30d4a0cd951f93694f6433ccf4ae9c502ba6a',
'Linux-ppc64le': 'c3e535a5d633ad8f4d50be0b6f8efd084c6c6ed3525c07cbd89fc508b1d76c7a',
'Linux-aarch64': '4f7e4f5698539659d51f28dff0da11e5445a5ae58439af1d8a8e9f2d93535245'},
'8.1.1.33-10.2': {
'Linux-x86_64': '2a4a7b99a6e9bfa690eb19bb41e49553f2a7a491a5b3abfcae900e166c5b6ebd'},
# cuDNN 8.1.0
'8.1.0.77-11.2': {
'Linux-x86_64': 'dbe82faf071d91ba9bcf00480146ad33f462482dfee56caf4479c1b8dabe3ecb',
'Linux-ppc64le': '0d3f8fa21959e9f94889841cc8445aecf41d2f3c557091b447313afb43034037',
'Linux-aarch64': 'ba16ff486b68a8b50b69b32702612634954de529f39cfff68c12b8bfc1958499'},
'8.1.0.77-10.2': {
'Linux-x86_64': 'c5bc617d89198b0fbe485156446be15a08aee37f7aff41c797b120912f2b14b4'},
# cuDNN 8.0.5
'8.0.5.39-11.1': {
'Linux-x86_64': '1d046bfa79399dabcc6f6cb1507918754439442ea0ca9e0fbecdd446f9b00cce',
'Linux-aarch64': '0c3542c51b42131247cd9f839d0ebefe4e02bb46d1716be1682cb2919278085a'},
'8.0.5.39-11.0': {
'Linux-x86_64': '4e16ee7895deb4a8b1c194b812ba49586ef7d26902051401d3717511898a9b73',
'Linux-ppc64le': '05207a02c0b4f22464dbb0ee646693df4a70ae557640ba576ba8678c26393004'},
'8.0.5.39-10.2': {
'Linux-x86_64': '21f84c05c67bf1ec859e77c38ccd5bf154964fa1c308f449959be4c356e382f3',
'Linux-ppc64le': 'ce128ea090b05e36d00ffe921e45982ca10e8207e40cfc2e0067d0f62d9b36f9'},
'8.0.5.39-10.1': {
'Linux-x86_64': '90908495298896b33aa95063a3471f93c36627d7ac01c17dc36d75c65eea4a00',
'Linux-ppc64le': 'e43b10bb3932d5e7a598dcc726d16dc9938dd99dd319cd74b3420f3ed65fe5e0'},
# cuDNN 8.0.4
'8.0.4.30-11.1': {
'Linux-x86_64': '8f4c662343afce5998ce963500fe3bb167e9a508c1a1a949d821a4b80fa9beab',
'Linux-ppc64le': 'b4ddb51610cbae806017616698635a9914c3e1eb14259f3a39ee5c84e7106712'},
'8.0.4.30-11.0': {
'Linux-x86_64': '38a81a28952e314e21577432b0bab68357ef9de7f6c8858f721f78df9ee60c35',
'Linux-ppc64le': '8da8ed689b1a348182ddd3f59b6758a502e11dc6708c33f96e3b4a40e033d2e1'},
'8.0.4.30-10.2': {
'Linux-x86_64': 'c12c69eb16698eacac40aa46b9ce399d4cd86efb6ff0c105142f8a28fcfb980e',
'Linux-ppc64le': '32a5b92f9e1ef2be90e10f220c4ab144ca59d215eb6a386e93597f447aa6507e'},
'8.0.4.30-10.1': {
'Linux-x86_64': 'eb4b888e61715168f57a0a0a21c281ada6856b728e5112618ed15f8637487715',
'Linux-ppc64le': '690811bbf04adef635f4a6f480575fc2a558c4a2c98c85c7090a3a8c60dacea9'},
# cuDNN 8.0.3
'8.0.3.33-11.0': {
'Linux-x86_64': '8924bcc4f833734bdd0009050d110ad0c8419d3796010cf7bc515df654f6065a',
'Linux-ppc64le': 'c2d0519831137b43d0eebe07522edb4ef5d62320e65e5d5fa840a9856f25923d'},
'8.0.3.33-10.2': {
'Linux-x86_64': 'b3d487c621e24b5711983b89bb8ad34f0378bdbf8a1a4b86eefaa23b19956dcc',
'Linux-ppc64le': 'ff22c9c37af191c9104989d784427cde744cdde879bfebf3e4e55ca6a9634a11'},
'8.0.3.33-10.1': {
'Linux-x86_64': '4752ac6aea4e4d2226061610d6843da6338ef75a93518aa9ce50d0f58df5fb07',
'Linux-ppc64le': 'c546175f6ec86a11ee8fb9ab5526fa8d854322545769a87d35b1a505992f89c3'},
# cuDNN 8.0.2
'8.0.2.39-11.0': {
'Linux-x86_64': '672f46288b8edd98f8d156a4f1ff518201ca6de0cff67915ceaa37f6d6d86345',
'Linux-ppc64le': 'b7c1ce5b1191eb007ba3455ea5f497fdce293a646545d8a6ed93e9bb06d7f057'},
'8.0.2.39-10.2': {
'Linux-x86_64': 'c9cbe5c211360f3cfbc0fb104f0e9096b37e53f89392525679f049276b2f701f',
'Linux-ppc64le': 'c32325ff84a8123491f2e58b3694885a9a672005bc21764b38874688c0e43262'},
'8.0.2.39-10.1': {
'Linux-x86_64': '82148a68bd6bdaab93af5e05bb1842b8ccb3ab7de7bed41f609a7616c102213d',
'Linux-ppc64le': '8196ec4f031356317baeccefbc4f61c8fccb2cf0bdef0a6431438918ddf68fb9'},
# cuDNN 8.0
'8.0.0.180-11.0': {
'Linux-x86_64': '9e75ea70280a77de815e0bdc85d08b67e081bc99a708b574092142344d2ba07e',
'Linux-ppc64le': '1229e94731bbca63ee7f5a239f4e1838a51a301d896f3097fbf7377d74704060'},
'8.0.0.180-10.2': {
'Linux-x86_64': '0c87c12358ee2b99d57c2a8c7560e3bb93e54bb929f5f8bec4964a72a2bb261d',
'Linux-ppc64le': '59e4ad6db15fcc374976e8052fe39e3f30f34079710fb3c7751a64c853d9243f'},
# cuDNN 7.6.5
'7.6.5.32-10.2': {
'Linux-x86_64': '600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20',
'Linux-ppc64le': '7dc08b6ab9331bfd12207d4802c61db1ad7cace7395b67a6e7b16efa0335668b'},
'7.6.5.32-10.1': {
'Linux-x86_64': '7eaec8039a2c30ab0bc758d303588767693def6bf49b22485a2c00bf2e136cb3',
'Darwin-x86_64': '8ecce28a5ed388a2b9b2d239e08d7c550f53b79288e6d9e5eb4c152bfc711aff',
'Linux-ppc64le': '97b2faf73eedfc128f2f5762784d21467a95b2d5ba719825419c058f427cbf56'},
'7.6.5.32-10.0': {
'Linux-x86_64': '28355e395f0b2b93ac2c83b61360b35ba6cd0377e44e78be197b6b61b4b492ba',
'Darwin-x86_64': '6fa0b819374da49102e285ecf7fcb8879df4d0b3cc430cc8b781cdeb41009b47',
'Linux-ppc64le': 'b1717f4570083bbfc6b8b59f280bae4e4197cc1cb50e9d873c05adf670084c5b'},
'7.6.5.32-9.2': {
'Linux-x86_64': 'a2a2c7a8ba7b16d323b651766ee37dcfdbc2b50d920f73f8fde85005424960e4',
'Linux-ppc64le': 'a11f44f9a827b7e69f527a9d260f1637694ff7c1674a3e46bd9ec054a08f9a76'},
'7.6.5.32-9.0': {
'Linux-x86_64': 'bd0a4c0090d5b02feec3f195738968690cc2470b9bc6026e6fe8ff245cd261c8'},
# cuDNN 7.6.4
'7.6.4.38-10.1': {
'Linux-x86_64': '32091d115c0373027418620a09ebec3658a6bc467d011de7cdd0eb07d644b099',
'Darwin-x86_64': 'bfced062c3689ced2c1fb49c7d5052e6bc3da6974c1eb707e4dcf8cd209d4236',
'Linux-ppc64le': 'f3615fea50986a4dfd05d7a0cf83396dfdceefa9c209e8bf9691e20a48e420ce'},
'7.6.4.38-10.0': {
'Linux-x86_64': '417bb5daf51377037eb2f5c87649000ca1b9cec0acb16cfe07cb1d3e9a961dbf',
'Darwin-x86_64': 'af01ab841caec25087776a6b8fc7782883da12e590e24825ad1031f9ae0ed4b1',
'Linux-ppc64le': 'c1725ad6bd7d7741e080a1e6da4b62eac027a94ac55c606cce261e3f829400bb'},
'7.6.4.38-9.2': {
'Linux-x86_64': 'c79156531e641289b6a6952888b9637059ef30defd43c3cf82acf38d67f60a27',
'Linux-ppc64le': '98d8aae2dcd851558397a9a30b73242f257e1556be17c83650e63a0685969884'},
'7.6.4.38-9.0': {
'Linux-x86_64': '8db78c3623c192d4f03f3087b41c32cb0baac95e13408b5d9dabe626cb4aab5d'},
# cuDNN 7.6.3
'7.6.3.30-10.1': {
'Linux-x86_64': '352557346d8111e2f954c494be1a90207103d316b8777c33e62b3a7f7b708961',
'Linux-ppc64le': 'f274735a8fc31923d3623b1c3d2b1d0d35bb176687077c6a4d4353c6b900d8ee'},
# cuDNN 7.5.1
'7.5.1.10-10.1': {
'Linux-x86_64': '2c833f43c9147d9a25a20947a4c5a5f5c33b2443240fd767f63b330c482e68e0',
'Linux-ppc64le': 'a9e23bc83c970daec20874ccd1d8d80b648adf15440ecd0164818b330b1e2663'},
'7.5.1.10-10.0': {
'Linux-x86_64': 'c0a4ec438920aa581dd567117b9c316745b4a451ac739b1e04939a3d8b229985',
'Linux-ppc64le': 'd9205718da5fbab85433476f9ff61fcf4b889d216d6eea26753bbc24d115dd70'},
# cuDNN 7.5.0
'7.5.0.56-10.1': {
'Linux-x86_64': 'c31697d6b71afe62838ad2e57da3c3c9419c4e9f5635d14b683ebe63f904fbc8',
'Linux-ppc64le': '15415eb714ab86ab6c7531f2cac6474b5dafd989479b062776c670b190e43638'},
'7.5.0.56-10.0': {
'Linux-x86_64': '701097882cb745d4683bb7ff6c33b8a35c7c81be31bac78f05bad130e7e0b781',
'Linux-ppc64le': 'f0c1cbd9de553c8e2a3893915bd5fff57b30e368ef4c964d783b6a877869e93a'},
# cuDNN 7.3.0
'7.3.0.29-9.0': {
'Linux-x86_64': '403f9043ff2c7b2c5967454872275d07bca11fd41dfc7b21995eadcad6dbe49b'},
# cuDNN 7.2.1
'7.2.1.38-9.0': {
'Linux-x86_64': 'cf007437b9ac6250ec63b89c25f248d2597fdd01369c80146567f78e75ce4e37'},
# cuDNN 7.1.3
'7.1.3-9.1': {
'Linux-x86_64': 'dd616d3794167ceb923d706bf73e8d6acdda770751492b921ee6827cdf190228',
'Linux-ppc64le': 'e3b4837f711b98a52faacc872a68b332c833917ef3cf87c0108f1d01af9b2931'},
# cuDNN 6.0
'6.0-8.0': {
'Linux-x86_64': '9b09110af48c9a4d7b6344eb4b3e344daa84987ed6177d5c44319732f3bb7f9c'},
# cuDNN 5.1
'5.1-8.0': {
'Linux-x86_64': 'c10719b36f2dd6e9ddc63e3189affaa1a94d7d027e63b71c3f64d449ab0645ce'},
}
class Cudnn(Package):
"""NVIDIA cuDNN is a GPU-accelerated library of primitives for deep
neural networks"""
homepage = "https://developer.nvidia.com/cudnn"
# Latest versions available at:
# https://developer.nvidia.com/rdp/cudnn-download
# Archived versions available at:
# https://developer.nvidia.com/rdp/cudnn-archive
# Note that download links don't work from command line,
# need to use modified URLs like in url_for_version.
maintainers = ['adamjstewart', 'bvanessen']
for ver, packages in _versions.items():
key = "{0}-{1}".format(platform.system(), platform.machine())
pkg = packages.get(key)
cudnn_ver, cuda_ver = ver.split('-')
long_ver = "{0}-{1}".format(cudnn_ver, cuda_ver)
if pkg:
version(long_ver, sha256=pkg)
# Add constraints matching CUDA version to cuDNN version
cuda_req = 'cuda@{0}.0:{0}.999'.format(cuda_ver)
cudnn_ver_req = '@{0}'.format(long_ver)
depends_on(cuda_req, when=cudnn_ver_req)
def url_for_version(self, version):
url = 'https://developer.download.nvidia.com/compute/redist/cudnn/v{0}/cudnn-{1}-{2}-v{3}.tgz'
# Get the system and machine arch for building the file path
sys = "{0}-{1}".format(platform.system(), platform.machine())
# Munge it to match Nvidia's naming scheme
sys_key = sys.lower().replace('x86_64', 'x64').replace('darwin', 'osx') \
.replace('aarch64', 'aarch64sbsa')
if version >= Version('7.2'):
directory = version[:3]
ver = version[:4]
cuda = version[4:]
elif version >= Version('7.1'):
directory = version[:3]
ver = version[:2]
cuda = version[3:]
elif version >= Version('7.0'):
directory = version[:3]
ver = version[0]
cuda = version[3:]
else:
directory = version[:2]
ver = version[:2]
cuda = version[2:]
return url.format(directory, cuda, sys_key, ver)
def setup_run_environment(self, env):
if 'target=ppc64le: platform=linux' in self.spec:
env.set('cuDNN_ROOT', os.path.join(
self.prefix, 'targets', 'ppc64le-linux'))
def install(self, spec, prefix):
install_tree('.', prefix)
if 'target=ppc64le: platform=linux' in spec:
target_lib = os.path.join(prefix, 'targets',
'ppc64le-linux', 'lib')
if os.path.isdir(target_lib) and not os.path.isdir(prefix.lib):
symlink(target_lib, prefix.lib)
target_include = os.path.join(prefix, 'targets',
'ppc64le-linux', 'include')
if os.path.isdir(target_include) \
and not os.path.isdir(prefix.include):
symlink(target_include, prefix.include)
| 47.789883 | 102 | 0.703306 |
import os
import platform
from spack import *
_versions = {
'8.2.4.15-11.4': {
'Linux-x86_64': '0e5d2df890b9967efa6619da421310d97323565a79f05a1a8cb9b7165baad0d7',
'Linux-ppc64le': 'af8749ca83fd6bba117c8bee31b787b7f204946e864294030ee0091eb7d3577e',
'Linux-aarch64': '48b11f19e9cd3414ec3c6c357ad228aebbd43282aae372d42cab2af67c32a08b'},
'8.2.0.53-11.3': {
'Linux-x86_64': '7a195dc93a7cda2bdd4d9b73958d259c784be422cd941a9a625aab75309f19dc',
'Linux-ppc64le': 'cfe06735671a41a5e25fc7542d740177ac8eab1ab146bd30f19e0fa836895611',
'Linux-aarch64': '0f44af94eef7826dc7b41f92aade3d5210891cdb10858bc0a28ba7167909ab7c'},
'8.2.0.53-10.2': {
'Linux-x86_64': '6ecbc98b3795e940ce0831ffb7cd2c0781830fdd6b1911f950bcaf6d569f807c'},
'8.1.1.33-11.2': {
'Linux-x86_64': '98a8784e92862f20018d20c281b30d4a0cd951f93694f6433ccf4ae9c502ba6a',
'Linux-ppc64le': 'c3e535a5d633ad8f4d50be0b6f8efd084c6c6ed3525c07cbd89fc508b1d76c7a',
'Linux-aarch64': '4f7e4f5698539659d51f28dff0da11e5445a5ae58439af1d8a8e9f2d93535245'},
'8.1.1.33-10.2': {
'Linux-x86_64': '2a4a7b99a6e9bfa690eb19bb41e49553f2a7a491a5b3abfcae900e166c5b6ebd'},
'8.1.0.77-11.2': {
'Linux-x86_64': 'dbe82faf071d91ba9bcf00480146ad33f462482dfee56caf4479c1b8dabe3ecb',
'Linux-ppc64le': '0d3f8fa21959e9f94889841cc8445aecf41d2f3c557091b447313afb43034037',
'Linux-aarch64': 'ba16ff486b68a8b50b69b32702612634954de529f39cfff68c12b8bfc1958499'},
'8.1.0.77-10.2': {
'Linux-x86_64': 'c5bc617d89198b0fbe485156446be15a08aee37f7aff41c797b120912f2b14b4'},
'8.0.5.39-11.1': {
'Linux-x86_64': '1d046bfa79399dabcc6f6cb1507918754439442ea0ca9e0fbecdd446f9b00cce',
'Linux-aarch64': '0c3542c51b42131247cd9f839d0ebefe4e02bb46d1716be1682cb2919278085a'},
'8.0.5.39-11.0': {
'Linux-x86_64': '4e16ee7895deb4a8b1c194b812ba49586ef7d26902051401d3717511898a9b73',
'Linux-ppc64le': '05207a02c0b4f22464dbb0ee646693df4a70ae557640ba576ba8678c26393004'},
'8.0.5.39-10.2': {
'Linux-x86_64': '21f84c05c67bf1ec859e77c38ccd5bf154964fa1c308f449959be4c356e382f3',
'Linux-ppc64le': 'ce128ea090b05e36d00ffe921e45982ca10e8207e40cfc2e0067d0f62d9b36f9'},
'8.0.5.39-10.1': {
'Linux-x86_64': '90908495298896b33aa95063a3471f93c36627d7ac01c17dc36d75c65eea4a00',
'Linux-ppc64le': 'e43b10bb3932d5e7a598dcc726d16dc9938dd99dd319cd74b3420f3ed65fe5e0'},
'8.0.4.30-11.1': {
'Linux-x86_64': '8f4c662343afce5998ce963500fe3bb167e9a508c1a1a949d821a4b80fa9beab',
'Linux-ppc64le': 'b4ddb51610cbae806017616698635a9914c3e1eb14259f3a39ee5c84e7106712'},
'8.0.4.30-11.0': {
'Linux-x86_64': '38a81a28952e314e21577432b0bab68357ef9de7f6c8858f721f78df9ee60c35',
'Linux-ppc64le': '8da8ed689b1a348182ddd3f59b6758a502e11dc6708c33f96e3b4a40e033d2e1'},
'8.0.4.30-10.2': {
'Linux-x86_64': 'c12c69eb16698eacac40aa46b9ce399d4cd86efb6ff0c105142f8a28fcfb980e',
'Linux-ppc64le': '32a5b92f9e1ef2be90e10f220c4ab144ca59d215eb6a386e93597f447aa6507e'},
'8.0.4.30-10.1': {
'Linux-x86_64': 'eb4b888e61715168f57a0a0a21c281ada6856b728e5112618ed15f8637487715',
'Linux-ppc64le': '690811bbf04adef635f4a6f480575fc2a558c4a2c98c85c7090a3a8c60dacea9'},
'8.0.3.33-11.0': {
'Linux-x86_64': '8924bcc4f833734bdd0009050d110ad0c8419d3796010cf7bc515df654f6065a',
'Linux-ppc64le': 'c2d0519831137b43d0eebe07522edb4ef5d62320e65e5d5fa840a9856f25923d'},
'8.0.3.33-10.2': {
'Linux-x86_64': 'b3d487c621e24b5711983b89bb8ad34f0378bdbf8a1a4b86eefaa23b19956dcc',
'Linux-ppc64le': 'ff22c9c37af191c9104989d784427cde744cdde879bfebf3e4e55ca6a9634a11'},
'8.0.3.33-10.1': {
'Linux-x86_64': '4752ac6aea4e4d2226061610d6843da6338ef75a93518aa9ce50d0f58df5fb07',
'Linux-ppc64le': 'c546175f6ec86a11ee8fb9ab5526fa8d854322545769a87d35b1a505992f89c3'},
'8.0.2.39-11.0': {
'Linux-x86_64': '672f46288b8edd98f8d156a4f1ff518201ca6de0cff67915ceaa37f6d6d86345',
'Linux-ppc64le': 'b7c1ce5b1191eb007ba3455ea5f497fdce293a646545d8a6ed93e9bb06d7f057'},
'8.0.2.39-10.2': {
'Linux-x86_64': 'c9cbe5c211360f3cfbc0fb104f0e9096b37e53f89392525679f049276b2f701f',
'Linux-ppc64le': 'c32325ff84a8123491f2e58b3694885a9a672005bc21764b38874688c0e43262'},
'8.0.2.39-10.1': {
'Linux-x86_64': '82148a68bd6bdaab93af5e05bb1842b8ccb3ab7de7bed41f609a7616c102213d',
'Linux-ppc64le': '8196ec4f031356317baeccefbc4f61c8fccb2cf0bdef0a6431438918ddf68fb9'},
'8.0.0.180-11.0': {
'Linux-x86_64': '9e75ea70280a77de815e0bdc85d08b67e081bc99a708b574092142344d2ba07e',
'Linux-ppc64le': '1229e94731bbca63ee7f5a239f4e1838a51a301d896f3097fbf7377d74704060'},
'8.0.0.180-10.2': {
'Linux-x86_64': '0c87c12358ee2b99d57c2a8c7560e3bb93e54bb929f5f8bec4964a72a2bb261d',
'Linux-ppc64le': '59e4ad6db15fcc374976e8052fe39e3f30f34079710fb3c7751a64c853d9243f'},
'7.6.5.32-10.2': {
'Linux-x86_64': '600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20',
'Linux-ppc64le': '7dc08b6ab9331bfd12207d4802c61db1ad7cace7395b67a6e7b16efa0335668b'},
'7.6.5.32-10.1': {
'Linux-x86_64': '7eaec8039a2c30ab0bc758d303588767693def6bf49b22485a2c00bf2e136cb3',
'Darwin-x86_64': '8ecce28a5ed388a2b9b2d239e08d7c550f53b79288e6d9e5eb4c152bfc711aff',
'Linux-ppc64le': '97b2faf73eedfc128f2f5762784d21467a95b2d5ba719825419c058f427cbf56'},
'7.6.5.32-10.0': {
'Linux-x86_64': '28355e395f0b2b93ac2c83b61360b35ba6cd0377e44e78be197b6b61b4b492ba',
'Darwin-x86_64': '6fa0b819374da49102e285ecf7fcb8879df4d0b3cc430cc8b781cdeb41009b47',
'Linux-ppc64le': 'b1717f4570083bbfc6b8b59f280bae4e4197cc1cb50e9d873c05adf670084c5b'},
'7.6.5.32-9.2': {
'Linux-x86_64': 'a2a2c7a8ba7b16d323b651766ee37dcfdbc2b50d920f73f8fde85005424960e4',
'Linux-ppc64le': 'a11f44f9a827b7e69f527a9d260f1637694ff7c1674a3e46bd9ec054a08f9a76'},
'7.6.5.32-9.0': {
'Linux-x86_64': 'bd0a4c0090d5b02feec3f195738968690cc2470b9bc6026e6fe8ff245cd261c8'},
'7.6.4.38-10.1': {
'Linux-x86_64': '32091d115c0373027418620a09ebec3658a6bc467d011de7cdd0eb07d644b099',
'Darwin-x86_64': 'bfced062c3689ced2c1fb49c7d5052e6bc3da6974c1eb707e4dcf8cd209d4236',
'Linux-ppc64le': 'f3615fea50986a4dfd05d7a0cf83396dfdceefa9c209e8bf9691e20a48e420ce'},
'7.6.4.38-10.0': {
'Linux-x86_64': '417bb5daf51377037eb2f5c87649000ca1b9cec0acb16cfe07cb1d3e9a961dbf',
'Darwin-x86_64': 'af01ab841caec25087776a6b8fc7782883da12e590e24825ad1031f9ae0ed4b1',
'Linux-ppc64le': 'c1725ad6bd7d7741e080a1e6da4b62eac027a94ac55c606cce261e3f829400bb'},
'7.6.4.38-9.2': {
'Linux-x86_64': 'c79156531e641289b6a6952888b9637059ef30defd43c3cf82acf38d67f60a27',
'Linux-ppc64le': '98d8aae2dcd851558397a9a30b73242f257e1556be17c83650e63a0685969884'},
'7.6.4.38-9.0': {
'Linux-x86_64': '8db78c3623c192d4f03f3087b41c32cb0baac95e13408b5d9dabe626cb4aab5d'},
'7.6.3.30-10.1': {
'Linux-x86_64': '352557346d8111e2f954c494be1a90207103d316b8777c33e62b3a7f7b708961',
'Linux-ppc64le': 'f274735a8fc31923d3623b1c3d2b1d0d35bb176687077c6a4d4353c6b900d8ee'},
'7.5.1.10-10.1': {
'Linux-x86_64': '2c833f43c9147d9a25a20947a4c5a5f5c33b2443240fd767f63b330c482e68e0',
'Linux-ppc64le': 'a9e23bc83c970daec20874ccd1d8d80b648adf15440ecd0164818b330b1e2663'},
'7.5.1.10-10.0': {
'Linux-x86_64': 'c0a4ec438920aa581dd567117b9c316745b4a451ac739b1e04939a3d8b229985',
'Linux-ppc64le': 'd9205718da5fbab85433476f9ff61fcf4b889d216d6eea26753bbc24d115dd70'},
'7.5.0.56-10.1': {
'Linux-x86_64': 'c31697d6b71afe62838ad2e57da3c3c9419c4e9f5635d14b683ebe63f904fbc8',
'Linux-ppc64le': '15415eb714ab86ab6c7531f2cac6474b5dafd989479b062776c670b190e43638'},
'7.5.0.56-10.0': {
'Linux-x86_64': '701097882cb745d4683bb7ff6c33b8a35c7c81be31bac78f05bad130e7e0b781',
'Linux-ppc64le': 'f0c1cbd9de553c8e2a3893915bd5fff57b30e368ef4c964d783b6a877869e93a'},
'7.3.0.29-9.0': {
'Linux-x86_64': '403f9043ff2c7b2c5967454872275d07bca11fd41dfc7b21995eadcad6dbe49b'},
'7.2.1.38-9.0': {
'Linux-x86_64': 'cf007437b9ac6250ec63b89c25f248d2597fdd01369c80146567f78e75ce4e37'},
'7.1.3-9.1': {
'Linux-x86_64': 'dd616d3794167ceb923d706bf73e8d6acdda770751492b921ee6827cdf190228',
'Linux-ppc64le': 'e3b4837f711b98a52faacc872a68b332c833917ef3cf87c0108f1d01af9b2931'},
'6.0-8.0': {
'Linux-x86_64': '9b09110af48c9a4d7b6344eb4b3e344daa84987ed6177d5c44319732f3bb7f9c'},
'5.1-8.0': {
'Linux-x86_64': 'c10719b36f2dd6e9ddc63e3189affaa1a94d7d027e63b71c3f64d449ab0645ce'},
}
class Cudnn(Package):
homepage = "https://developer.nvidia.com/cudnn"
# need to use modified URLs like in url_for_version.
maintainers = ['adamjstewart', 'bvanessen']
for ver, packages in _versions.items():
key = "{0}-{1}".format(platform.system(), platform.machine())
pkg = packages.get(key)
cudnn_ver, cuda_ver = ver.split('-')
long_ver = "{0}-{1}".format(cudnn_ver, cuda_ver)
if pkg:
version(long_ver, sha256=pkg)
# Add constraints matching CUDA version to cuDNN version
cuda_req = 'cuda@{0}.0:{0}.999'.format(cuda_ver)
cudnn_ver_req = '@{0}'.format(long_ver)
depends_on(cuda_req, when=cudnn_ver_req)
def url_for_version(self, version):
url = 'https://developer.download.nvidia.com/compute/redist/cudnn/v{0}/cudnn-{1}-{2}-v{3}.tgz'
# Get the system and machine arch for building the file path
sys = "{0}-{1}".format(platform.system(), platform.machine())
# Munge it to match Nvidia's naming scheme
sys_key = sys.lower().replace('x86_64', 'x64').replace('darwin', 'osx') \
.replace('aarch64', 'aarch64sbsa')
if version >= Version('7.2'):
directory = version[:3]
ver = version[:4]
cuda = version[4:]
elif version >= Version('7.1'):
directory = version[:3]
ver = version[:2]
cuda = version[3:]
elif version >= Version('7.0'):
directory = version[:3]
ver = version[0]
cuda = version[3:]
else:
directory = version[:2]
ver = version[:2]
cuda = version[2:]
return url.format(directory, cuda, sys_key, ver)
def setup_run_environment(self, env):
if 'target=ppc64le: platform=linux' in self.spec:
env.set('cuDNN_ROOT', os.path.join(
self.prefix, 'targets', 'ppc64le-linux'))
def install(self, spec, prefix):
install_tree('.', prefix)
if 'target=ppc64le: platform=linux' in spec:
target_lib = os.path.join(prefix, 'targets',
'ppc64le-linux', 'lib')
if os.path.isdir(target_lib) and not os.path.isdir(prefix.lib):
symlink(target_lib, prefix.lib)
target_include = os.path.join(prefix, 'targets',
'ppc64le-linux', 'include')
if os.path.isdir(target_include) \
and not os.path.isdir(prefix.include):
symlink(target_include, prefix.include)
| true | true |
f7f5bd20d407393cdf4da3a5333fb860782150b3 | 175 | py | Python | examples/DeepBlueAI/AutoCV2/kakaobrain2/skeleton2/optim/__init__.py | zichuan-scott-xu/automl-workflow | d108e55da943775953b9f1801311a86ac07e58a0 | [
"Apache-2.0"
] | 3 | 2020-12-15T02:40:43.000Z | 2021-01-14T02:32:13.000Z | examples/DeepBlueAI/AutoCV2/kakaobrain2/skeleton2/optim/__init__.py | zichuan-scott-xu/automl-workflow | d108e55da943775953b9f1801311a86ac07e58a0 | [
"Apache-2.0"
] | null | null | null | examples/DeepBlueAI/AutoCV2/kakaobrain2/skeleton2/optim/__init__.py | zichuan-scott-xu/automl-workflow | d108e55da943775953b9f1801311a86ac07e58a0 | [
"Apache-2.0"
] | 4 | 2021-01-07T05:41:38.000Z | 2021-04-07T08:02:22.000Z | # -*- coding: utf-8 -*-
# pylint: disable=wildcard-import
from __future__ import absolute_import
from .optimizers2 import *
from .scheduler2 import *
from .sgdw2 import SGDW
| 21.875 | 38 | 0.754286 |
from __future__ import absolute_import
from .optimizers2 import *
from .scheduler2 import *
from .sgdw2 import SGDW
| true | true |
f7f5bd3af4717698726f9b6f33f0f5576bb7b9d8 | 49,814 | py | Python | autotest/osr/osr_esri.py | kartverket/gdal | 5ae18e641a3dedd9bd83f072ffcd8008d1d3a868 | [
"MIT"
] | 3 | 2016-07-25T16:30:13.000Z | 2022-02-11T11:09:08.000Z | autotest/osr/osr_esri.py | kartverket/gdal | 5ae18e641a3dedd9bd83f072ffcd8008d1d3a868 | [
"MIT"
] | null | null | null | autotest/osr/osr_esri.py | kartverket/gdal | 5ae18e641a3dedd9bd83f072ffcd8008d1d3a868 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test some ESRI specific translation issues.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import string
import csv
import gzip
sys.path.append( '../pymod' )
import gdaltest
from osgeo import gdal, osr
###############################################################################
# This test verifies that morphToESRI() translates ideosyncratic datum names
# from "EPSG" form to ESRI from when the exception list comes from the
# gdal_datum.csv file.
def osr_esri_1():
srs = osr.SpatialReference()
srs.ImportFromEPSG( 4202 )
if srs.GetAttrValue( 'DATUM' ) != 'Australian_Geodetic_Datum_1966':
gdaltest.post_reason( 'Got wrong DATUM name (%s) after EPSG import.' %\
srs.GetAttrValue( 'DATUM' ) )
return 'fail'
srs.MorphToESRI()
if srs.GetAttrValue( 'DATUM' ) != 'D_Australian_1966':
gdaltest.post_reason( 'Got wrong DATUM name (%s) after ESRI morph.' %\
srs.GetAttrValue( 'DATUM' ) )
return 'fail'
srs.MorphFromESRI()
if srs.GetAttrValue( 'DATUM' ) != 'Australian_Geodetic_Datum_1966':
gdaltest.post_reason( 'Got wrong DATUM name (%s) after ESRI unmorph.' %\
srs.GetAttrValue( 'DATUM' ) )
return 'fail'
return 'success'
###############################################################################
# Verify that exact correct form of UTM names is established when
# translating certain GEOGCSes to ESRI format.
def osr_esri_2():
srs = osr.SpatialReference()
srs.SetFromUserInput( '+proj=utm +zone=11 +south +datum=WGS84' )
srs.MorphToESRI()
if srs.GetAttrValue( 'GEOGCS' ) != 'GCS_WGS_1984':
gdaltest.post_reason( 'Got wrong GEOGCS name (%s) after ESRI morph.' %\
srs.GetAttrValue( 'GEOGCS' ) )
return 'fail'
if srs.GetAttrValue( 'PROJCS' ) != 'WGS_1984_UTM_Zone_11S':
gdaltest.post_reason( 'Got wrong PROJCS name (%s) after ESRI morph.' %\
srs.GetAttrValue( 'PROJCS' ) )
return 'fail'
return 'success'
###############################################################################
# Verify that Unnamed is changed to Unknown in morphToESRI().
def osr_esri_3():
srs = osr.SpatialReference()
srs.SetFromUserInput( '+proj=mill +datum=WGS84' )
srs.MorphToESRI()
if srs.GetAttrValue( 'PROJCS' ) != 'Miller_Cylindrical':
gdaltest.post_reason( 'Got wrong PROJCS name (%s) after ESRI morph.' %\
srs.GetAttrValue( 'PROJCS' ) )
return 'fail'
return 'success'
###############################################################################
# Verify Polar Stereographic translations work properly OGR to ESRI.
def osr_esri_4():
srs = osr.SpatialReference()
srs.SetFromUserInput( 'PROJCS["PS Test",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",-80.2333],PARAMETER["central_meridian",171],PARAMETER["scale_factor",0.9999],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1]]' )
srs.MorphToESRI()
if srs.GetAttrValue( 'PROJECTION' ) != 'Stereographic_South_Pole':
gdaltest.post_reason( 'Got wrong PROJECTION name (%s) after ESRI morph.' %\
srs.GetAttrValue( 'PROJECTION' ) )
return 'fail'
if srs.GetProjParm('standard_parallel_1') != -80.2333:
gdaltest.post_reason( 'Got wrong parameter value (%g) after ESRI morph.' %\
srs.GetProjParm('standard_parallel_1') )
return 'fail'
return 'success'
###############################################################################
# Verify Polar Stereographic translations work properly ESRI to OGR.
def osr_esri_5():
srs = osr.SpatialReference()
srs.SetFromUserInput( 'PROJCS["PS Test",GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Stereographic_South_Pole"],PARAMETER["standard_parallel_1",-80.2333],PARAMETER["central_meridian",171],PARAMETER["scale_factor",0.9999],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]' )
srs.MorphFromESRI()
if srs.GetAttrValue( 'PROJECTION' ) != 'Polar_Stereographic':
gdaltest.post_reason( 'Got wrong PROJECTION name (%s) after ESRI morph.' %\
srs.GetAttrValue( 'PROJECTION' ) )
return 'fail'
if srs.GetProjParm('latitude_of_origin') != -80.2333:
gdaltest.post_reason( 'Got wrong parameter value (%g) after ESRI morph.' %\
srs.GetProjParm('latitude_of_origin') )
return 'fail'
return 'success'
###############################################################################
# Verify Lambert 2SP with a 1.0 scale factor still gets translated to 2SP
# per bug 187.
def osr_esri_6():
srs = osr.SpatialReference()
srs.SetFromUserInput( 'PROJCS["Texas Centric Mapping System/Lambert Conformal",GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic"],PARAMETER["False_Easting",1500000.0],PARAMETER["False_Northing",5000000.0],PARAMETER["Central_Meridian",-100.0],PARAMETER["Standard_Parallel_1",27.5],PARAMETER["Standard_Parallel_2",35.0],PARAMETER["Scale_Factor",1.0],PARAMETER["Latitude_Of_Origin",18.0],UNIT["Meter",1.0]]' )
srs.MorphFromESRI()
if srs.GetAttrValue( 'PROJECTION' ) != 'Lambert_Conformal_Conic_2SP':
gdaltest.post_reason( \
'Got wrong PROJECTION name (%s) after ESRI morph, expected 2SP' %\
srs.GetAttrValue( 'PROJECTION' ) )
return 'fail'
return 'success'
###############################################################################
# Verify that FEET is treated as US survey feet per bug #1533.
def osr_esri_7():
prj = [ 'Projection STATEPLANE',
'Fipszone 903',
'Datum NAD83',
'Spheroid GRS80',
'Units FEET',
'Zunits NO',
'Xshift 0.0',
'Yshift 0.0',
'Parameters ',
'' ]
srs_prj = osr.SpatialReference()
srs_prj.ImportFromESRI( prj )
wkt = """PROJCS["NAD83 / Florida North",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.257222101,
AUTHORITY["EPSG","7019"]],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4269"]],
UNIT["Foot_US",0.3048006096012192],
PROJECTION["Lambert_Conformal_Conic_2SP"],
PARAMETER["standard_parallel_1",30.75],
PARAMETER["standard_parallel_2",29.58333333333333],
PARAMETER["latitude_of_origin",29],
PARAMETER["central_meridian",-84.5],
PARAMETER["false_easting",1968500],
PARAMETER["false_northing",0],
AXIS["X",EAST],
AXIS["Y",NORTH]]"""
srs_wkt = osr.SpatialReference(wkt = wkt)
if not srs_prj.IsSame( srs_wkt ):
print('got: ', srs_prj.ExportToPrettyWkt())
gdaltest.post_reason( 'old style ESRI projection imported wrong, perhaps linear units?' )
return 'fail'
return 'success'
###############################################################################
# Verify that handling of numerically specified units (see bug #1533)
def osr_esri_8():
prj = [ 'Projection STATEPLANE',
'Fipszone 903',
'Datum NAD83',
'Spheroid GRS80',
'Units 3.280839895013123',
'Zunits NO',
'Xshift 0.0',
'Yshift 0.0',
'Parameters ',
'' ]
srs_prj = osr.SpatialReference()
srs_prj.ImportFromESRI( prj )
wkt = """PROJCS["NAD83 / Florida North",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.257222101,
AUTHORITY["EPSG","7019"]],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4269"]],
PROJECTION["Lambert_Conformal_Conic_2SP"],
PARAMETER["standard_parallel_1",30.75],
PARAMETER["standard_parallel_2",29.58333333333333],
PARAMETER["latitude_of_origin",29],
PARAMETER["central_meridian",-84.5],
PARAMETER["false_easting",1968503.937007874],
PARAMETER["false_northing",0],
UNIT["user-defined",0.3048],
AUTHORITY["EPSG","26960"]]"""
srs_wkt = osr.SpatialReference(wkt = wkt)
if not srs_prj.IsSame( srs_wkt ):
gdaltest.post_reason( 'old style ESRI projection imported wrong, perhaps linear units?' )
return 'fail'
return 'success'
###############################################################################
# Verify Equidistant Conic handling.
def osr_esri_9():
srs = osr.SpatialReference()
srs.SetFromUserInput( 'PROJCS["edc",GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Equidistant_Conic"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",-96.0],PARAMETER["Standard_Parallel_1",29.5],PARAMETER["Standard_Parallel_2",45.5],PARAMETER["Latitude_Of_Origin",37.5],UNIT["Meter",1.0]]' )
expected = 'PROJCS["edc",GEOGCS["GCS_North_American_1983",DATUM["North_American_Datum_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Equidistant_Conic"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["longitude_of_center",-96.0],PARAMETER["Standard_Parallel_1",29.5],PARAMETER["Standard_Parallel_2",45.5],PARAMETER["latitude_of_center",37.5],UNIT["Meter",1.0]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
gdaltest.post_reason( 'Did not get expected Equidistant Conic SRS after morphFromESRI' )
return 'fail'
expected = 'PROJCS["edc",GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.017453292519943295]],PROJECTION["Equidistant_Conic"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["central_meridian",-96.0],PARAMETER["Standard_Parallel_1",29.5],PARAMETER["Standard_Parallel_2",45.5],PARAMETER["latitude_of_origin",37.5],UNIT["Meter",1.0]]'
srs.MorphToESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
gdaltest.post_reason( 'Did not get expected Equidistant Conic SRS after morphToESRI' )
return 'fail'
return 'success'
###############################################################################
# Verify Plate_Carree handling.
def osr_esri_10():
srs = osr.SpatialReference()
srs.SetFromUserInput( 'PROJCS["Sphere_Plate_Carree",GEOGCS["GCS_Sphere",DATUM["D_Sphere",SPHEROID["Sphere",6371000.0,0.0]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Plate_Carree"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],UNIT["Meter",1.0]]' )
expected = 'PROJCS["Sphere_Plate_Carree",GEOGCS["GCS_Sphere",DATUM["Not_specified_based_on_Authalic_Sphere",SPHEROID["Sphere",6371000.0,0.0]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Equirectangular"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],UNIT["Meter",1.0]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
gdaltest.post_reason( 'Did not get expected Equirectangular SRS after morphFromESRI' )
return 'fail'
expected = 'PROJCS["Sphere_Plate_Carree",GEOGCS["GCS_Sphere",DATUM["D_Sphere",SPHEROID["Sphere",6371000.0,0.0]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.017453292519943295]],PROJECTION["Equidistant_Cylindrical"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],UNIT["Meter",1.0]]'
srs.MorphToESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
gdaltest.post_reason( 'Did not get expected Equidistant_Cylindrical SRS after morphToESRI' )
return 'fail'
return 'success'
###############################################################################
# Verify arc/info style TM handling.
def osr_esri_11():
srs = osr.SpatialReference()
srs.ImportFromESRI( [ 'Projection TRANSVERSE',
'Datum NAD27',
'Spheroid CLARKE1866',
'Units METERS',
'Zunits NO',
'Xshift 0.0',
'Yshift 0.0',
'Parameters ',
'1.0 /* scale factor at central meridian',
'-106 56 0.5 /* longitude of central meridian',
' 39 33 30 /* latitude of origin',
'0.0 /* false easting (meters)',
'0.0 /* false northing (meters)' ] )
expected = 'PROJCS["unnamed",GEOGCS["NAD27",DATUM["North_American_Datum_1927",SPHEROID["Clarke 1866",6378206.4,294.978698213898,AUTHORITY["EPSG","7008"]],AUTHORITY["EPSG","6267"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AUTHORITY["EPSG","4267"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",39.55833333333333],PARAMETER["central_meridian",-106.9334722222222],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["METERS",1]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
gdaltest.post_reason( 'Did not get expected TM SRS after morphFromESRI' )
return 'fail'
return 'success'
###############################################################################
# Test automatic morphing of ESRI-style LCC WKT prefixed with 'ESRI::'
def osr_esri_12():
srs = osr.SpatialReference()
srs.SetFromUserInput( 'ESRI::PROJCS["Lambert Conformal Conic",GEOGCS["grs80",DATUM["D_North_American_1983",SPHEROID["Geodetic_Reference_System_1980",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Lambert_Conformal_Conic"],PARAMETER["standard_parallel_1",34.33333333333334],PARAMETER["standard_parallel_2",36.16666666666666],PARAMETER["latitude_of_origin",33.75],PARAMETER["central_meridian",-79],PARAMETER["false_easting",609601.22],PARAMETER["false_northing",0],UNIT["Meter",1]]' )
# No MorphFromESRI() is required
if srs.GetAttrValue( 'PROJECTION' ) != 'Lambert_Conformal_Conic_2SP':
gdaltest.post_reason( 'Got wrong PROJECTION name (%s) after ESRI morph.' % \
srs.GetAttrValue( 'PROJECTION' ) )
return 'fail'
if abs( srs.GetProjParm('standard_parallel_1') - 34.333333333 ) > 0.00001:
gdaltest.post_reason( 'Got wrong parameter value (%g) after ESRI morph.' % \
srs.GetProjParm('standard_parallel_1') )
return 'fail'
if srs.GetAttrValue( 'DATUM' ) != 'North_American_Datum_1983':
gdaltest.post_reason( 'Got wrong DATUM name (%s) after ESRI morph.' % \
srs.GetAttrValue( 'DATUM' ) )
if srs.GetAttrValue( 'UNIT' ) != 'Meter':
gdaltest.post_reason( 'Got wrong UNIT name (%s) after ESRI morph.' % \
srs.GetAttrValue( 'UNIT' ) )
return 'fail'
return 'success'
###############################################################################
# Test automatic morphing of ESRI-style LCC WKT prefixed with 'ESRI::'
# but read directly from file.
def osr_esri_13():
srs = osr.SpatialReference()
srs.SetFromUserInput( 'data/lcc_esri.prj' )
# No MorphFromESRI() is required
if srs.GetAttrValue( 'PROJECTION' ) != 'Lambert_Conformal_Conic_2SP':
gdaltest.post_reason( 'Got wrong PROJECTION name (%s) after ESRI morph.' % \
srs.GetAttrValue( 'PROJECTION' ) )
return 'fail'
if abs( srs.GetProjParm('standard_parallel_1') - 34.333333333 ) > 0.00001:
gdaltest.post_reason( 'Got wrong parameter value (%g) after ESRI morph.' % \
srs.GetProjParm('standard_parallel_1') )
return 'fail'
if srs.GetAttrValue( 'DATUM' ) != 'North_American_Datum_1983':
gdaltest.post_reason( 'Got wrong DATUM name (%s) after ESRI morph.' % \
srs.GetAttrValue( 'DATUM' ) )
if srs.GetAttrValue( 'UNIT' ) != 'Meter':
gdaltest.post_reason( 'Got wrong UNIT name (%s) after ESRI morph.' % \
srs.GetAttrValue( 'UNIT' ) )
return 'fail'
return 'success'
###############################################################################
# Verify that state plane epsg authority values are not applied if the
# linear units are changed for old style .prj files (bug #1697)
def osr_esri_14():
srs = osr.SpatialReference()
srs.ImportFromESRI( [ 'PROJECTION STATEPLANE',
'UNITS feet',
'FIPSZONE 2600',
'DATUM NAD83',
'PARAMETERS' ] )
if srs.GetAuthorityCode( 'PROJCS' ) != None:
print(srs.GetAuthorityCode( 'PROJCS' ))
gdaltest.post_reason( 'Get epsg authority code inappropriately.' )
return 'fail'
srs = osr.SpatialReference()
srs.ImportFromESRI( [ 'PROJECTION STATEPLANE',
'UNITS meter',
'FIPSZONE 2600',
'DATUM NAD83',
'PARAMETERS' ] )
if srs.GetAuthorityCode( 'PROJCS' ) != '32104':
print(srs.GetAuthorityCode( 'PROJCS' ))
gdaltest.post_reason( 'Did not get epsg authority code when expected.')
return 'fail'
return 'success'
###############################################################################
# Verify hotine oblique mercator handling, particularly handling
# of the rectified_grid_angle parameter.
def osr_esri_15():
srs = osr.SpatialReference()
srs.SetFromUserInput('PROJCS["Bern_1898_Bern_LV03C",GEOGCS["GCS_Bern_1898_Bern",DATUM["D_Bern_1898",SPHEROID["Bessel_1841",6377397.155,299.1528128]],PRIMEM["Bern",7.439583333333333],UNIT["Degree",0.0174532925199433]],PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Scale_Factor",1.0],PARAMETER["Azimuth",90.0],PARAMETER["Longitude_Of_Center",0.0],PARAMETER["Latitude_Of_Center",46.95240555555556],UNIT["Meter",1.0]]' )
expected = 'PROJCS["Bern_1898_Bern_LV03C",GEOGCS["GCS_Bern_1898_Bern",DATUM["D_Bern_1898",SPHEROID["Bessel_1841",6377397.155,299.1528128]],PRIMEM["Bern",7.439583333333333],UNIT["Degree",0.017453292519943295]],PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Scale_Factor",1.0],PARAMETER["Azimuth",90.0],PARAMETER["Longitude_Of_Center",0.0],PARAMETER["Latitude_Of_Center",46.95240555555556],UNIT["Meter",1.0]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
if wkt.find('rectified_grid_angle') == -1:
print(wkt)
gdaltest.post_reason( 'Did not get rectified_grid_angle as expected.')
return 'fail'
srs.MorphToESRI()
wkt = srs.ExportToWkt()
if wkt.find('rectified_grid_angle') != -1:
gdaltest.post_reason('did not get rectified_grid_angle removed as expected.' )
return 'fail'
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
gdaltest.post_reason( 'Did not get expected HOM projection after morphing' )
return 'fail'
return 'success'
###############################################################################
# Verify translation of equirectngular to equidistant cylindrical with
# cleanup of parameters.
def osr_esri_16():
srs = osr.SpatialReference()
srs.SetFromUserInput('+proj=eqc +lat_0=0 +lat_ts=-10 +lon_0=2 +x=100000 +y_0=200000 +ellps=sphere')
expected = 'PROJCS["Equidistant_Cylindrical",GEOGCS["GCS_Normal Sphere (r=6370997)",DATUM["D_unknown",SPHEROID["sphere",6370997,0]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Equidistant_Cylindrical"],PARAMETER["central_meridian",2],PARAMETER["standard_parallel_1",-10],PARAMETER["false_easting",0],PARAMETER["false_northing",200000],UNIT["Meter",1]]'
srs.MorphToESRI()
wkt = srs.ExportToWkt()
if expected != wkt:
print(wkt)
gdaltest.post_reason( 'Did not get expected equidistant cylindrical.' )
return 'fail'
return 'success'
###############################################################################
# Test LAEA support (#3017)
def osr_esri_17():
original = 'PROJCS["ETRS89 / ETRS-LAEA",GEOGCS["ETRS89",DATUM["European_Terrestrial_Reference_System_1989",SPHEROID["GRS 1980",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["degree",0.01745329251994328]],UNIT["metre",1],PROJECTION["Lambert_Azimuthal_Equal_Area"],PARAMETER["latitude_of_center",52],PARAMETER["longitude_of_center",10],PARAMETER["false_easting",4321000],PARAMETER["false_northing",3210000]]'
srs = osr.SpatialReference()
srs.SetFromUserInput( original )
expected = 'PROJCS["ETRS89_ETRS_LAEA",GEOGCS["GCS_ETRS_1989",DATUM["D_ETRS_1989",SPHEROID["GRS_1980",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Lambert_Azimuthal_Equal_Area"],PARAMETER["latitude_of_origin",52],PARAMETER["central_meridian",10],PARAMETER["false_easting",4321000],PARAMETER["false_northing",3210000],UNIT["Meter",1]]'
srs.MorphToESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
gdaltest.post_reason( 'Did not get expected LAEA SRS after morphToESRI' )
return 'fail'
expected = 'PROJCS["ETRS89_ETRS_LAEA",GEOGCS["GCS_ETRS_1989",DATUM["European_Terrestrial_Reference_System_1989",SPHEROID["GRS_1980",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Lambert_Azimuthal_Equal_Area"],PARAMETER["latitude_of_center",52],PARAMETER["longitude_of_center",10],PARAMETER["false_easting",4321000],PARAMETER["false_northing",3210000],UNIT["Meter",1]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
gdaltest.post_reason( 'Did not get expected LAEA SRS after morphFromESRI' )
return 'fail'
return 'success'
###############################################################################
# Test EC morphing.
def osr_esri_18():
original = """PROJCS["World_Equidistant_Cylindrical",
GEOGCS["GCS_WGS_1984",
DATUM["D_WGS_1984",
SPHEROID["WGS_1984",6378137,298.257223563]],
PRIMEM["Greenwich",0],
UNIT["Degree",0.017453292519943295]],
PROJECTION["Equidistant_Cylindrical"],
PARAMETER["False_Easting",0],
PARAMETER["False_Northing",0],
PARAMETER["Central_Meridian",0],
PARAMETER["Standard_Parallel_1",60],
UNIT["Meter",1]]"""
srs = osr.SpatialReference()
srs.SetFromUserInput( original )
expected = 'PROJCS["World_Equidistant_Cylindrical",GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Equirectangular"],PARAMETER["False_Easting",0],PARAMETER["False_Northing",0],PARAMETER["Central_Meridian",0],PARAMETER["standard_parallel_1",60],UNIT["Meter",1]]'
srs.MorphFromESRI()
srs_expected = osr.SpatialReference( wkt = expected )
if not srs.IsSame(srs_expected):
print('')
print('Got: ', srs.ExportToPrettyWkt())
print('Expected: ', srs_expected.ExportToPrettyWkt())
gdaltest.post_reason( 'Did not get expected EC SRS after morphFromESRI' )
return 'fail'
srs.MorphToESRI()
srs_expected = osr.SpatialReference( wkt = original )
if not srs.IsSame(srs_expected):
print('')
print('Got: ', srs.ExportToPrettyWkt())
print('Expected: ', srs_expected.ExportToPrettyWkt())
gdaltest.post_reason( 'Did not get expected EC SRS after morphToESRI' )
return 'fail'
return 'success'
###############################################################################
# Test spheroid remaping (per #3904)
def osr_esri_19():
original = """GEOGCS["GCS_South_American_1969",DATUM["D_South_American_1969",SPHEROID["GRS_1967_Truncated",6378160.0,298.25]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]"""
srs = osr.SpatialReference()
srs.SetFromUserInput( original )
srs.MorphFromESRI()
expected = 'GRS_1967_Modified'
if srs.GetAttrValue( 'SPHEROID' ) != expected:
print('')
print('Got: ', srs.ExportToPrettyWkt())
gdaltest.post_reason( 'Did not get expected spheroid name after morphFromESRI' )
return 'fail'
srs.MorphToESRI()
expected = 'GRS_1967_Truncated'
if srs.GetAttrValue( 'SPHEROID' ) != expected:
print('')
print('Got: ', srs.ExportToPrettyWkt())
gdaltest.post_reason( 'Did not get expected spheroid name after morphToESRI' )
return 'fail'
return 'success'
###############################################################################
# Test esri->ogc, esri->proj / ogc->esri, ogc->proj / proj->esri, proj->ogc
def osr_ersi_test( wkt_esri, wkt_ogc, proj4 ):
silent = True
#silent = False
result = 'success'
srs_esri = osr.SpatialReference()
srs_ogc = osr.SpatialReference()
if not silent:
print( 'osr_esri_test( ) \nwkt_esri='+wkt_esri+'\nwkt_ogc= '+wkt_ogc+'\nproj4='+proj4 )
#esri->ogc, esri->proj
if not silent:
print( '\nesri->ogc, esri->proj\n' )
srs_esri.SetFromUserInput( wkt_esri )
srs_esri.MorphFromESRI()
srs_esri.SetAttrValue( 'PROJCS|GEOGCS|DATUM','unknown' )
srs_ogc.SetFromUserInput( wkt_ogc )
srs_ogc.SetAttrValue( 'PROJCS|GEOGCS|DATUM','unknown' )
wkt_esri_to_ogc = srs_esri.ExportToWkt()
wkt_esri_to_proj4 = srs_esri.ExportToProj4()
if not silent:
print( 'wkt_ogc: '+srs_ogc.ExportToWkt() )
print( 'wkt_esri_to_ogc: '+wkt_esri_to_ogc )
print( 'wkt_esri_to_proj4: '+wkt_esri_to_proj4 )
if not srs_esri.IsSame(srs_ogc):
print( 'wkt_esri_to_ogc failed for '+proj4 )
result = 'fail'
if wkt_esri_to_proj4 != proj4:
print( 'wkt_esri_to_proj4 failed for '+proj4 )
result = 'fail'
#ogc->esri, ogc->proj
if not silent:
print( '\nogc->esri, ogc->proj\n' )
srs_esri.SetFromUserInput( wkt_esri )
srs_esri.SetAttrValue( 'PROJCS|GEOGCS|DATUM','unknown' )
srs_ogc.SetFromUserInput( wkt_ogc )
srs_ogc.SetAttrValue( 'PROJCS|GEOGCS|DATUM','unknown' )
wkt_ogc_to_proj4 = srs_ogc.ExportToProj4()
srs_ogc.MorphToESRI()
srs_ogc.SetAttrValue( 'PROJCS|GEOGCS|DATUM','unknown' )
wkt_ogc_to_esri = srs_ogc.ExportToWkt()
if not silent:
print( 'wkt_ogc_to_esri: '+wkt_ogc_to_esri )
print( 'wkt_ogc_to_proj4: '+wkt_ogc_to_proj4 )
if not srs_esri.IsSame(srs_ogc):
print( 'wkt_ogc_to_esri failed for '+proj4 )
result = 'fail'
if wkt_ogc_to_proj4 != proj4:
print( 'wkt_ogc_to_proj4 failed for '+proj4 )
result = 'fail'
#proj->esri, proj->ogc
if not silent:
print( '\nproj->esri, proj->ogc\n' )
srs_esri.SetFromUserInput( proj4 )
srs_esri.MorphFromESRI()
srs_esri.SetAttrValue( 'PROJCS|GEOGCS|DATUM','unknown' )
proj4_to_esri = srs_esri.ExportToProj4()
srs_ogc.SetFromUserInput( proj4 )
srs_ogc.SetAttrValue( 'PROJCS|GEOGCS|DATUM','unknown' )
proj4_to_ogc = srs_ogc.ExportToProj4()
if proj4_to_ogc != proj4:
print( 'proj4_to_ogc failed: proj4='+proj4+', proj4_to_ogc='+proj4_to_ogc )
result = 'fail'
if proj4_to_esri != proj4:
print( 'proj4_to_esri failed: proj4='+proj4+', proj4_to_esri='+proj4_to_esri )
result = 'fail'
return result
###############################################################################
# Test for various stereographic projection remappings (ESRI / OGC / PROJ.4)
# Stereographic
# Double_Stereographic / Oblique_Stereographic (#1428 and #4267)
# Stereographic_North_Pole / Polar_Stereographic
# Orthographics (#4249)
def osr_esri_20():
result = 'success'
# Stereographic / Stereographic / +proj=stere +lat_0=0 +lon_0=0 ...
#modified definitions from ESRI 'Stereographic (world).prj'
stere_esri='PROJCS["World_Stereographic",GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Stereographic"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],PARAMETER["Scale_Factor",1.0],PARAMETER["Latitude_Of_Origin",0.0],UNIT["Meter",1.0]]'
stere_ogc='PROJCS["World_Stereographic",GEOGCS["GCS_WGS_1984",DATUM["WGS_84",SPHEROID["WGS_84",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Stereographic"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],PARAMETER["Scale_Factor",1.0],PARAMETER["Latitude_Of_Origin",0.0],UNIT["Meter",1.0]]'
stere_proj4='+proj=stere +lat_0=0 +lon_0=0 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs '
#result1 = 'success'
result1 = osr_ersi_test(stere_esri, stere_ogc, stere_proj4)
# Double_Stereographic / Oblique_Stereographic / +proj=sterea +lat_0=46 +lon_0=25 ...
#modified definitions from ESRI 'Stereo 1970.prj'
sterea_esri='PROJCS["Stereo_70",GEOGCS["GCS_Dealul_Piscului_1970",DATUM["D_Dealul_Piscului_1970",SPHEROID["Krasovsky_1940",6378245.0,298.3]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Double_Stereographic"],PARAMETER["False_Easting",500000.0],PARAMETER["False_Northing",500000.0],PARAMETER["Central_Meridian",25.0],PARAMETER["Scale_Factor",0.99975],PARAMETER["Latitude_Of_Origin",46.0],UNIT["Meter",1.0]]'
sterea_ogc='PROJCS["Stereo_70",GEOGCS["GCS_Dealul_Piscului_1970",DATUM["Dealul_Piscului_1970",SPHEROID["Krasovsky_1940",6378245.0,298.3]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Oblique_Stereographic"],PARAMETER["False_Easting",500000.0],PARAMETER["False_Northing",500000.0],PARAMETER["Central_Meridian",25.0],PARAMETER["Scale_Factor",0.99975],PARAMETER["Latitude_Of_Origin",46.0],UNIT["Meter",1.0]]'
sterea_proj4='+proj=sterea +lat_0=46 +lon_0=25 +k=0.99975 +x_0=500000 +y_0=500000 +ellps=krass +units=m +no_defs '
result2 = osr_ersi_test(sterea_esri, sterea_ogc, sterea_proj4)
# Stereographic_North_Pole / Polar_Stereographic / +proj=stere +lat_0=90 +lat_ts=70 ...
#modified definitions from ESRI 'WGS 1984 NSIDC Sea Ice Polar Stereographic North.prj'
sterep_esri='PROJCS["WGS_1984_NSIDC_Sea_Ice_Polar_Stereographic_North",GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Stereographic_North_Pole"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",-45.0],PARAMETER["Standard_Parallel_1",70.0],UNIT["Meter",1.0]]'
sterep_ogc='PROJCS["WGS_1984_NSIDC_Sea_Ice_Polar_Stereographic_North",GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_84",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Polar_Stereographic"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",-45.0],PARAMETER["latitude_of_origin",70.0],UNIT["Meter",1.0]]'
sterep_proj4='+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs '
result3 = osr_ersi_test(sterep_esri, sterep_ogc, sterep_proj4)
# Orthographic (#4249)
ortho_esri='PROJCS["unnamed",GEOGCS["GCS_WGS_1984",DATUM["unknown",SPHEROID["WGS84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Orthographic"],PARAMETER["Latitude_Of_Center",-37],PARAMETER["Longitude_Of_Center",145],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]'
ortho_ogc='PROJCS["unnamed",GEOGCS["WGS 84",DATUM["unknown",SPHEROID["WGS84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Orthographic"],PARAMETER["latitude_of_origin",-37],PARAMETER["central_meridian",145],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]'
ortho_proj4='+proj=ortho +lat_0=-37 +lon_0=145 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs '
result4 = osr_ersi_test(ortho_esri, ortho_ogc, ortho_proj4)
if ( result1 != 'success' or result2 != 'success' or result3 != 'success' or result4 != 'success'):
result = 'fail'
return result
###############################################################################
# Test round-trip WKT ESRI->OGC->ESRI
#
# data from bug #4345 and ESRI pages below
# ifile must be in csv format (; seperator) with the following header:
# COORD_REF_SYS_CODE;ESRI_DATUM_NAME;WKT
# http://help.arcgis.com/en/arcims/10.0/mainhelp/mergedProjects/ArcXMLGuide/elements/gcs.htm
# http://help.arcgis.com/en/arcims/10.0/mainhelp/mergedProjects/ArcXMLGuide/elements/pcs.htm
# http://help.arcgis.com/en/arcims/10.0/mainhelp/mergedProjects/ArcXMLGuide/elements/dattrans.htm
def osr_esri_test_esri_ogc_esri( ifile, ofile_base, fix_config='NO', check_epsg=False ):
if not os.path.exists( ifile ):
print('input file '+ifile+' does not exist')
return 'fail'
result = 'success'
check_srs = True
check_wkt = False
failed_epsg_count = 0
failed_srs_count = 0
failed_wkt_count = 0
ofile_epsg = 'tmp/'+ofile_base+'_epsg.txt'
ofile_srs = 'tmp/'+ofile_base+'_srs.txt'
ofile_wkt = 'tmp/'+ofile_base+'_wkt.txt'
#initialise output files
if not os.path.exists('tmp'):
os.mkdir('tmp')
if os.path.exists(ofile_epsg):
os.unlink(ofile_epsg)
if check_epsg:
epsg_ne=''
epsg_none=''
epsg_other=''
of_epsg = open(ofile_epsg,'w')
if os.path.exists(ofile_srs):
os.unlink(ofile_srs)
if check_srs:
of_srs = open(ofile_srs,'w')
if os.path.exists(ofile_wkt):
os.unlink(ofile_wkt)
if check_wkt:
of_wkt= open(ofile_wkt,'w')
#open input file
if os.path.splitext(ifile)[1] == '.gz':
f = gzip.open(ifile, 'rb')
else:
f = open(ifile,'rt')
csv_reader = csv.DictReader(f,delimiter=';')
epsg_reader = csv.DictReader(gdal.FindFile('gdal','gcs.csv'), 'epsg_gcs2', 'GEOGCS', True)
#set GDAL_FIX_ESRI_WKT option
fix_config_bak = gdal.GetConfigOption('GDAL_FIX_ESRI_WKT')
gdal.SetConfigOption('GDAL_FIX_ESRI_WKT', fix_config)
#need to be quiet because some codes raise errors
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#parse all lines
for iline in csv_reader:
epsg_code = int(iline['COORD_REF_SYS_CODE'])
if iline['WKT'] is None or iline['WKT']=='':
continue
#read wkt and morph from ESRI
srs1 = osr.SpatialReference()
srs1.ImportFromWkt( iline['WKT'] )
wkt1 = srs1.ExportToWkt()
srs2 = srs1.Clone()
srs2.MorphFromESRI()
wkt2 = srs2.ExportToWkt()
#morph back to ESRI
srs3 = srs2.Clone()
srs3.MorphToESRI()
wkt3 = srs3.ExportToWkt()
#manage special cases of PROJECTION parameters that have multiple mappings
remap_proj=dict([ ['Transverse_Mercator','Gauss_Kruger'], ['Equidistant_Cylindrical', 'Plate_Carree'], \
['Hotine_Oblique_Mercator_Azimuth_Natural_Origin','Hotine_Oblique_Mercator_Azimuth_Center'] ] )
proj1=srs1.GetAttrValue( 'PROJCS|PROJECTION' )
proj3=srs3.GetAttrValue( 'PROJCS|PROJECTION' )
if proj3 in remap_proj and proj1==remap_proj[proj3]:
srs3.SetAttrValue( 'PROJCS|PROJECTION', remap_proj[proj3] )
wkt3 = srs3.ExportToWkt()
#check epsg
if check_epsg:
epsg2 = srs2.GetAuthorityCode('GEOGCS')
if epsg2 is None or int(epsg2)!=epsg_code:
#check why epsg codes conflict
srs4 = osr.SpatialReference()
#check if EPSG code imports ok
if srs4.ImportFromEPSG( epsg_code ) != 0:
#of_epsg.write( 'ERROR: #'+str(epsg_code)+', EPSG does not exist\n')
epsg_ne = epsg_ne+' '+str(epsg_code)
else:
if epsg2 is None:
of_epsg.write( 'ERROR: #'+str(epsg_code)+', did not get EPSG code\n' )
epsg_none = epsg_none+' '+str(epsg_code)
else:
of_epsg.write( 'ERROR: EPSG not matching for # '+str(epsg_code)+', got EPSG:'+str(epsg2)+'\n' )
epsg_other = epsg_other+' '+str(epsg_code)
failed_epsg_count = failed_epsg_count + 1
of_epsg.write( 'wkt1: '+wkt1+'\n'+'wkt3: '+wkt3+'\n' )
#check srs
if check_srs and not srs1.IsSame(srs3):
failed_srs_count = failed_srs_count + 1
of_srs.write( 'ERROR: SRS not matching for # '+iline['COORD_REF_SYS_CODE']+'\n' )
of_srs.write( 'wkt1: '+wkt1+'\n'+'wkt3: '+wkt3+'\n' )
#check wkt
if check_wkt and wkt1 != wkt3:
failed_wkt_count = failed_wkt_count + 1
of_wkt.write( 'WARNING: WKT not matching for # '+iline['COORD_REF_SYS_CODE']+'\n' )
of_wkt.write( 'wkt1: '+wkt1+'\n'+'wkt3: '+wkt3+'\n' )
#revert
gdal.SetConfigOption('GDAL_FIX_ESRI_WKT', fix_config)
gdal.PopErrorHandler()
#close files and report
if check_epsg:
of_epsg.close()
if failed_epsg_count > 0:
print('ERROR: Failed %d EPSG tests, see file %s' % (failed_epsg_count,ofile_epsg) )
#print('epsg_ne: '+epsg_ne)
#print('epsg_none: '+epsg_none)
#print('epsg_other: '+epsg_other)
result='fail'
else:
os.unlink(ofile_epsg)
if check_srs:
of_srs.close()
if failed_srs_count > 0:
print('ERROR: Failed %d SRS tests, see file %s' % (failed_srs_count,ofile_srs) )
result='fail'
else:
os.unlink(ofile_srs)
if check_wkt:
of_wkt.close()
if failed_wkt_count > 0 :
print('WARNING: Failed %d WKT tests, see file %s' % (failed_wkt_count,ofile_wkt) )
else:
os.unlink(ofile_wkt)
return result
def osr_esri_21():
result = 'success'
# Test GEOGCSCS defs
result1 = osr_esri_test_esri_ogc_esri('data/esri_gcs.csv.gz', 'esri_gcs')
if result1 == 'fail':
result = 'fail'
# Test PROJCS defs
result2 = osr_esri_test_esri_ogc_esri('data/esri_pcs.csv.gz', 'esri_pcs')
if result2 == 'fail':
result = 'fail'
# Test other defs (collected elsewhere)
result3 = osr_esri_test_esri_ogc_esri('data/esri_extra.csv', 'esri_extra')
if result3 == 'fail':
result = 'fail'
# Test GEOGCSCS defs - check if can test import from EPSG code
result4 = osr_esri_test_esri_ogc_esri('data/esri_gcs.csv.gz', 'esri_gcs2', 'GEOGCS', True)
if result4 == 'fail':
result = 'expected_fail'
return result
###############################################################################
# Test round-trip WKT OGC->ESRI->OGC from EPSG code
#
# ifile must be in csv format and contain a COORD_REF_SYS_CODE
# which will be used in ImportFromEPSG()
def osr_esri_test_ogc_esri_ogc( ifile, ofile_base, fix_config='NO', check_epsg=False ):
if not os.path.exists( ifile ):
print('input file '+ifile+' does not exist')
return 'fail'
result = 'success'
check_srs = True
check_wkt = False
failed_epsg_count = 0
failed_srs_count = 0
failed_wkt_count = 0
ofile_epsg = 'tmp/'+ofile_base+'_epsg.txt'
ofile_srs = 'tmp/'+ofile_base+'_srs.txt'
ofile_wkt = 'tmp/'+ofile_base+'_wkt.txt'
#initialise output files
if not os.path.exists('tmp'):
os.mkdir('tmp')
if os.path.exists(ofile_epsg):
os.unlink(ofile_epsg)
if check_epsg:
epsg_error=''
of_epsg = open(ofile_epsg,'w')
if os.path.exists(ofile_srs):
os.unlink(ofile_srs)
if check_srs:
of_srs = open(ofile_srs,'w')
if os.path.exists(ofile_wkt):
os.unlink(ofile_wkt)
if check_wkt:
of_wkt = open(ofile_wkt,'w')
#open input file
if os.path.splitext(ifile)[1] == '.gz':
f = gzip.open(ifile, 'rb')
else:
f = open(ifile,'rt')
csv_reader = csv.DictReader(f,delimiter=',')
#set GDAL_FIX_ESRI_WKT option
fix_config_bak = gdal.GetConfigOption('GDAL_FIX_ESRI_WKT')
gdal.SetConfigOption('GDAL_FIX_ESRI_WKT', fix_config)
#need to be quiet because some codes raise errors
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#parse all lines
for iline in csv_reader:
epsg_code = int(iline['COORD_REF_SYS_CODE'])
#import from EPSG code
srs1 = osr.SpatialReference()
if srs1.ImportFromEPSG( epsg_code ) != 0:
continue
wkt1 = srs1.ExportToWkt()
#morph to ESRI
srs2 = srs1.Clone()
srs2.MorphToESRI()
wkt2 = srs2.ExportToWkt()
#morph back from ESRI
srs3 = srs2.Clone()
srs3.MorphFromESRI()
wkt3 = srs3.ExportToWkt()
#manage special cases
#missing rectified_grid_angle (e.g. EPSG:2057)
if srs1.GetProjParm( 'rectified_grid_angle' ) != 0:
srs3.SetProjParm( 'rectified_grid_angle', srs1.GetProjParm( 'rectified_grid_angle' ) )
wkt3 = srs3.ExportToWkt()
#missing scale_factor for Mercator_1SP (e.g. EPSG:2934) and Polar_Stereographic (e.g. EPSG:3031)
proj1 = srs1.GetAttrValue( 'PROJCS|PROJECTION' )
if proj1 == 'Mercator_1SP' or proj1 == 'Polar_Stereographic':
if srs1.GetProjParm( 'scale_factor' ) != 0:
srs3.SetProjParm( 'scale_factor', srs1.GetProjParm( 'scale_factor' ) )
wkt3 = srs3.ExportToWkt()
#do checks
#check epsg
if check_epsg:
epsg3 = srs3.GetAuthorityCode('GEOGCS')
if epsg3 is None or int(epsg3)!=epsg_code:
failed_epsg_count = failed_epsg_count + 1
epsg_error = epsg_error+' '+str(epsg_code)
of_epsg.write( 'ERROR: EPSG not matching for EPSG:'+str(epsg_code)+', got EPSG:'+str(epsg3)+'\n' )
of_epsg.write( 'wkt1: '+wkt1+'\n'+'wkt3: '+wkt3+'\n' )
#of_epsg.write( srs1.ExportToPrettyWkt()+'\n'+srs3.ExportToPrettyWkt()+'\n' )
#strip CT - add option for this and make more tests
srs1.StripCTParms()
wkt1 = srs1.ExportToWkt()
srs3.StripCTParms()
wkt3 = srs3.ExportToWkt()
#check srs
if check_srs and not srs1.IsSame(srs3):
failed_srs_count = failed_srs_count + 1
of_srs.write( 'ERROR: SRS not matching for EPSG:'+str(epsg_code)+'\n' )
of_srs.write( 'wkt1: '+wkt1+'\n'+'wkt3: '+wkt3+'\n' )
#check wkt
if check_wkt and wkt1 != wkt3:
failed_wkt_count = failed_wkt_count + 1
of_wkt.write( 'WARNING: WKT not matching for EPSG:'+str(epsg_code)+'\n' )
of_wkt.write( 'wkt1: '+wkt1+'\n'+'wkt3: '+wkt3+'\n' )
#revert
gdal.SetConfigOption('GDAL_FIX_ESRI_WKT', fix_config)
gdal.PopErrorHandler()
#close files and report
if check_epsg:
of_epsg.close()
if failed_epsg_count > 0 :
print('ERROR: Failed %d EPSG tests, see file %s' % (failed_epsg_count,ofile_epsg) )
#print(epsg_error)
result = 'fail'
else:
os.unlink(ofile_epsg)
if check_srs:
of_srs.close()
if failed_srs_count > 0 :
print('ERROR: Failed %d SRS tests, see file %s' % (failed_srs_count,ofile_srs) )
result = 'fail'
else:
os.unlink(ofile_srs)
if check_wkt:
of_wkt.close()
if failed_wkt_count > 0 :
print('WARNING: Failed %d WKT tests, see file %s' % (failed_wkt_count,ofile_wkt) )
else:
os.unlink(ofile_wkt)
return result
###############################################################################
# Test EPSG->OGC->ESRI->OGC
def osr_esri_22():
result = 'success'
# Test GEOGCSCS defs
result1 = osr_esri_test_ogc_esri_ogc(gdal.FindFile('gdal','gcs.csv'), 'epsg_gcs')
if result1 == 'fail':
result = 'expected_fail'
# Test PROJCS defs
result2 = osr_esri_test_ogc_esri_ogc(gdal.FindFile('gdal','pcs.csv'), 'epsg_pcs')
if result2 == 'fail':
result = 'expected_fail'
return result
###############################################################################
# Test EPSG->OGC->ESRI->OGC
# set GDAL_FIX_ESRI_WKT=DATUM (bugs #4378 and #4345), don't expect to fail
def osr_esri_23():
result = 'success'
# Test GEOGCSCS defs
result1 = osr_esri_test_ogc_esri_ogc(gdal.FindFile('gdal','gcs.csv'), 'epsg_gcs2', 'GEOGCS', True)
if result1 == 'fail':
result = 'expected_fail'
# Test PROJCS defs
result2 = osr_esri_test_ogc_esri_ogc(gdal.FindFile('gdal','pcs.csv'), 'epsg_pcs2', 'DATUM', False)
if result2 == 'fail':
result = 'fail'
return result
###############################################################################
#
gdaltest_list = [
osr_esri_1,
osr_esri_2,
osr_esri_3,
osr_esri_4,
osr_esri_5,
osr_esri_6,
osr_esri_7,
osr_esri_8,
osr_esri_9,
osr_esri_10,
osr_esri_11,
osr_esri_12,
osr_esri_13,
osr_esri_14,
osr_esri_15,
osr_esri_16,
osr_esri_17,
osr_esri_18,
osr_esri_19,
osr_esri_20,
osr_esri_21,
osr_esri_22,
osr_esri_23,
None ]
if __name__ == '__main__':
gdaltest.setup_run( 'osr_esri' )
#make sure GDAL_FIX_ESRI_WKT does not interfere with tests
gdal.SetConfigOption('GDAL_FIX_ESRI_WKT', 'NO')
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 42.906115 | 565 | 0.618922 | true | true | |
f7f5bda15c469db8f225aad16ad3d8871fdef33c | 394 | py | Python | bindings/pydeck/tests/fixtures.py | StijnAmeloot/deck.gl | d67688e3f71a37e2f021dde6681bb1516bebac2b | [
"MIT"
] | 7,702 | 2016-04-19T15:56:09.000Z | 2020-04-14T19:03:13.000Z | bindings/pydeck/tests/fixtures.py | StijnAmeloot/deck.gl | d67688e3f71a37e2f021dde6681bb1516bebac2b | [
"MIT"
] | 3,126 | 2016-04-20T23:04:42.000Z | 2020-04-14T22:46:02.000Z | bindings/pydeck/tests/fixtures.py | StijnAmeloot/deck.gl | d67688e3f71a37e2f021dde6681bb1516bebac2b | [
"MIT"
] | 1,526 | 2016-05-07T06:55:07.000Z | 2020-04-14T18:52:19.000Z | import glob
import json
import os
import logging
here = os.path.dirname(os.path.abspath(__file__))
fixture_path = os.path.join(here, "./fixtures/")
json_glob = os.path.join(fixture_path, "*.json")
fixtures = {}
for fname in glob.glob(json_glob):
fixture_text = open(fname).read()
fixture_name = os.path.basename(fname).replace(".json", "")
fixtures[fixture_name] = fixture_text
| 23.176471 | 63 | 0.715736 | import glob
import json
import os
import logging
here = os.path.dirname(os.path.abspath(__file__))
fixture_path = os.path.join(here, "./fixtures/")
json_glob = os.path.join(fixture_path, "*.json")
fixtures = {}
for fname in glob.glob(json_glob):
fixture_text = open(fname).read()
fixture_name = os.path.basename(fname).replace(".json", "")
fixtures[fixture_name] = fixture_text
| true | true |
f7f5bdadbd38e935ad2f80602d3dbf8de084c816 | 35,200 | py | Python | python3-alpha/python3-src/Lib/test/test_tempfile.py | stormtheh4ck3r/python-for-android | b9ea9161392f60566b81482b1e25cd77004d5c45 | [
"Apache-2.0"
] | 4 | 2016-05-04T07:05:22.000Z | 2020-09-24T00:21:05.000Z | python3-alpha/python3-src/Lib/test/test_tempfile.py | stormtheh4ck3r/python-for-android | b9ea9161392f60566b81482b1e25cd77004d5c45 | [
"Apache-2.0"
] | null | null | null | python3-alpha/python3-src/Lib/test/test_tempfile.py | stormtheh4ck3r/python-for-android | b9ea9161392f60566b81482b1e25cd77004d5c45 | [
"Apache-2.0"
] | 1 | 2018-12-12T03:06:17.000Z | 2018-12-12T03:06:17.000Z | # tempfile.py unit tests.
import tempfile
import os
import sys
import re
import warnings
import unittest
from test import support
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def setUp(self):
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", category=RuntimeWarning,
message="mktemp", module=__name__)
def tearDown(self):
self._warnings_manager.__exit__(None, None, None)
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1,
"TemporaryDirectory" : 1,
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
super().setUp()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
self.failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, str)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
test_classes.append(test__candidate_tempdir_list)
# We test _get_default_tempdir by testing gettempdir.
class test__get_candidate_names(TC):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if not has_spawnl:
return # ugh, can't use SkipTest.
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
# _mkstemp_inner can create files in text mode
if not has_textmode:
return # ugh, can't use SkipTest.
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, str)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
super().setUp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
super().tearDown()
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
test_classes.append(test_mktemp)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class test_NamedTemporaryFile(TC):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
# Helper for test_del_on_shutdown
class NulledModules:
def __init__(self, *modules):
self.refs = [mod.__dict__ for mod in modules]
self.contents = [ref.copy() for ref in self.refs]
def __enter__(self):
for d in self.refs:
for key in d:
d[key] = None
def __exit__(self, *exc_info):
for d, c in zip(self.refs, self.contents):
d.clear()
d.update(c)
class test_TemporaryDirectory(TC):
"""Test TemporaryDirectory()."""
def do_create(self, dir=None, pre="", suf="", recurse=1):
if dir is None:
dir = tempfile.gettempdir()
try:
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("TemporaryDirectory")
self.nameCheck(tmp.name, dir, pre, suf)
# Create a subdirectory and some files
if recurse:
self.do_create(tmp.name, pre, suf, recurse-1)
with open(os.path.join(tmp.name, "test.txt"), "wb") as f:
f.write(b"Hello world!")
return tmp
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
# (noted as part of Issue #10188)
with tempfile.TemporaryDirectory() as nonexistent:
pass
with self.assertRaises(os.error):
tempfile.TemporaryDirectory(dir=nonexistent)
def test_explicit_cleanup(self):
# A TemporaryDirectory is deleted when cleaned up
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
self.assertTrue(os.path.exists(d.name),
"TemporaryDirectory %s does not exist" % d.name)
d.cleanup()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
finally:
os.rmdir(dir)
@support.skip_unless_symlink
def test_cleanup_with_symlink_to_a_directory(self):
# cleanup() should not follow symlinks to directories (issue #12464)
d1 = self.do_create()
d2 = self.do_create()
# Symlink d1/foo -> d2
os.symlink(d2.name, os.path.join(d1.name, "foo"))
# This call to cleanup() should not follow the "foo" symlink
d1.cleanup()
self.assertFalse(os.path.exists(d1.name),
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
self.assertEqual(os.listdir(d2.name), ['test.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
name = d.name
del d # Rely on refcounting to invoke __del__
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
finally:
os.rmdir(dir)
@unittest.expectedFailure # See issue #10188
def test_del_on_shutdown(self):
# A TemporaryDirectory may be cleaned up during shutdown
# Make sure it works with the relevant modules nulled out
with self.do_create() as dir:
d = self.do_create(dir=dir)
# Mimic the nulling out of modules that
# occurs during system shutdown
modules = [os, os.path]
if has_stat:
modules.append(stat)
# Currently broken, so suppress the warning
# that is otherwise emitted on stdout
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
# Currently broken, so stop spurious exception by
# indicating the object has already been closed
d._closed = True
# And this assert will fail, as expected by the
# unittest decorator...
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
def test_warnings_on_cleanup(self):
# Two kinds of warning on shutdown
# Issue 10888: may write to stderr if modules are nulled out
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
if os.sep != '\\':
# Embed a backslash in order to make sure string escaping
# in the displayed error message is dealt with correctly
suffix = '\\check_backslash_handling'
else:
suffix = ''
d = self.do_create(dir=dir, suf=suffix)
#Check for the Issue 10888 message
modules = [os, os.path]
if has_stat:
modules.append(stat)
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
message = err.getvalue().replace('\\\\', '\\')
self.assertIn("while cleaning up", message)
self.assertIn(d.name, message)
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
d.__del__()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after __del__" % d.name)
def test_multiple_close(self):
# Can be cleaned-up many times without error
d = self.do_create()
d.cleanup()
try:
d.cleanup()
d.cleanup()
except:
self.failOnException("cleanup")
def test_context_manager(self):
# Can be used as a context manager
d = self.do_create()
with d as name:
self.assertTrue(os.path.exists(name))
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
test_classes.append(test_TemporaryDirectory)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
| 32.532348 | 100 | 0.5775 |
import tempfile
import os
import sys
import re
import warnings
import unittest
from test import support
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
if sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def setUp(self):
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", category=RuntimeWarning,
message="mktemp", module=__name__)
def tearDown(self):
self._warnings_manager.__exit__(None, None, None)
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1,
"TemporaryDirectory" : 1,
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
def setUp(self):
self.r = tempfile._RandomNameSequence()
super().setUp()
def test_get_six_char_str(self):
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def supports_iter(self):
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
self.failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
def test_nonempty_list(self):
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, str)
def test_wanted_dirs(self):
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
test_classes.append(test__candidate_tempdir_list)
class test__get_candidate_names(TC):
def test_retval(self):
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
if not has_stat:
return
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
if not has_spawnl:
return
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
if not has_textmode:
return
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, str)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777
expected = 0o700
if sys.platform in ('win32', 'os2emx'):
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
super().setUp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
super().tearDown()
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
te)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_text_mode(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
# Helper for test_del_on_shutdown
class NulledModules:
def __init__(self, *modules):
self.refs = [mod.__dict__ for mod in modules]
self.contents = [ref.copy() for ref in self.refs]
def __enter__(self):
for d in self.refs:
for key in d:
d[key] = None
def __exit__(self, *exc_info):
for d, c in zip(self.refs, self.contents):
d.clear()
d.update(c)
class test_TemporaryDirectory(TC):
def do_create(self, dir=None, pre="", suf="", recurse=1):
if dir is None:
dir = tempfile.gettempdir()
try:
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("TemporaryDirectory")
self.nameCheck(tmp.name, dir, pre, suf)
# Create a subdirectory and some files
if recurse:
self.do_create(tmp.name, pre, suf, recurse-1)
with open(os.path.join(tmp.name, "test.txt"), "wb") as f:
f.write(b"Hello world!")
return tmp
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
# (noted as part of Issue #10188)
with tempfile.TemporaryDirectory() as nonexistent:
pass
with self.assertRaises(os.error):
tempfile.TemporaryDirectory(dir=nonexistent)
def test_explicit_cleanup(self):
# A TemporaryDirectory is deleted when cleaned up
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
self.assertTrue(os.path.exists(d.name),
"TemporaryDirectory %s does not exist" % d.name)
d.cleanup()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
finally:
os.rmdir(dir)
@support.skip_unless_symlink
def test_cleanup_with_symlink_to_a_directory(self):
# cleanup() should not follow symlinks to directories (issue #12464)
d1 = self.do_create()
d2 = self.do_create()
# Symlink d1/foo -> d2
os.symlink(d2.name, os.path.join(d1.name, "foo"))
# This call to cleanup() should not follow the "foo" symlink
d1.cleanup()
self.assertFalse(os.path.exists(d1.name),
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
self.assertEqual(os.listdir(d2.name), ['test.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
name = d.name
del d # Rely on refcounting to invoke __del__
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
finally:
os.rmdir(dir)
@unittest.expectedFailure # See issue #10188
def test_del_on_shutdown(self):
# A TemporaryDirectory may be cleaned up during shutdown
# Make sure it works with the relevant modules nulled out
with self.do_create() as dir:
d = self.do_create(dir=dir)
# Mimic the nulling out of modules that
# occurs during system shutdown
modules = [os, os.path]
if has_stat:
modules.append(stat)
# Currently broken, so suppress the warning
# that is otherwise emitted on stdout
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
# Currently broken, so stop spurious exception by
# indicating the object has already been closed
d._closed = True
# And this assert will fail, as expected by the
# unittest decorator...
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
def test_warnings_on_cleanup(self):
# Two kinds of warning on shutdown
# Issue 10888: may write to stderr if modules are nulled out
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
if os.sep != '\\':
# Embed a backslash in order to make sure string escaping
# in the displayed error message is dealt with correctly
suffix = '\\check_backslash_handling'
else:
suffix = ''
d = self.do_create(dir=dir, suf=suffix)
#Check for the Issue 10888 message
modules = [os, os.path]
if has_stat:
modules.append(stat)
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
message = err.getvalue().replace('\\\\', '\\')
self.assertIn("while cleaning up", message)
self.assertIn(d.name, message)
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
d.__del__()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after __del__" % d.name)
def test_multiple_close(self):
# Can be cleaned-up many times without error
d = self.do_create()
d.cleanup()
try:
d.cleanup()
d.cleanup()
except:
self.failOnException("cleanup")
def test_context_manager(self):
# Can be used as a context manager
d = self.do_create()
with d as name:
self.assertTrue(os.path.exists(name))
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
test_classes.append(test_TemporaryDirectory)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
| true | true |
f7f5be4efb1389a6b77a671f867be48566e8a37e | 131 | py | Python | config.py | safeers/Glimpses-journal | 65a71a8be5b0b0812170a684b85cd39ac2f4a1fe | [
"MIT"
] | 1 | 2021-03-04T09:30:34.000Z | 2021-03-04T09:30:34.000Z | config.py | safeers/Glimpses-journal | 65a71a8be5b0b0812170a684b85cd39ac2f4a1fe | [
"MIT"
] | null | null | null | config.py | safeers/Glimpses-journal | 65a71a8be5b0b0812170a684b85cd39ac2f4a1fe | [
"MIT"
] | null | null | null | DEBUG = False
SECRET_KEY = 'ENTER YOU KEY HERE'
SQLALCHEMY_DATABASE_URI = 'sqlite:///db/glimpses.db'
SESSION_PERMANENT = False
| 26.2 | 53 | 0.748092 | DEBUG = False
SECRET_KEY = 'ENTER YOU KEY HERE'
SQLALCHEMY_DATABASE_URI = 'sqlite:///db/glimpses.db'
SESSION_PERMANENT = False
| true | true |
f7f5be7417959f74dffbd6184c2997f823c1523a | 1,224 | py | Python | code-experiments/build/python/python/solvers.py | ysakanaka/coco | ebd5c30ccc83910755e379322b23d60a4b72ef38 | [
"BSD-3-Clause"
] | null | null | null | code-experiments/build/python/python/solvers.py | ysakanaka/coco | ebd5c30ccc83910755e379322b23d60a4b72ef38 | [
"BSD-3-Clause"
] | null | null | null | code-experiments/build/python/python/solvers.py | ysakanaka/coco | ebd5c30ccc83910755e379322b23d60a4b72ef38 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
import numpy as np
# ===============================================
# the most basic example solver
# ===============================================
def random_search(fun, lbounds, ubounds, budget):
"""Efficient implementation of uniform random search between
`lbounds` and `ubounds`
"""
lbounds, ubounds = np.array(lbounds), np.array(ubounds)
dim, x_min, f_min = len(lbounds), None, None
max_chunk_size = 1 + 4e4 / dim
while budget > 0:
chunk = int(max([1, min([budget, max_chunk_size])]))
# about five times faster than "for k in range(budget):..."
X = lbounds + (ubounds - lbounds) * np.random.rand(chunk, dim)
if fun.number_of_constraints > 0:
C = [fun.constraint(x) for x in X] # call constraints
F = [fun(x) for i, x in enumerate(X) if np.all(C[i] <= 0)]
else:
F = [fun(x) for x in X]
if fun.number_of_objectives == 1:
index = np.argmin(F) if len(F) else None
if index is not None and (f_min is None or F[index] < f_min):
x_min, f_min = X[index], F[index]
budget -= chunk
return x_min
| 42.206897 | 73 | 0.556373 | from __future__ import absolute_import, division, print_function
import numpy as np
def random_search(fun, lbounds, ubounds, budget):
lbounds, ubounds = np.array(lbounds), np.array(ubounds)
dim, x_min, f_min = len(lbounds), None, None
max_chunk_size = 1 + 4e4 / dim
while budget > 0:
chunk = int(max([1, min([budget, max_chunk_size])]))
X = lbounds + (ubounds - lbounds) * np.random.rand(chunk, dim)
if fun.number_of_constraints > 0:
C = [fun.constraint(x) for x in X]
F = [fun(x) for i, x in enumerate(X) if np.all(C[i] <= 0)]
else:
F = [fun(x) for x in X]
if fun.number_of_objectives == 1:
index = np.argmin(F) if len(F) else None
if index is not None and (f_min is None or F[index] < f_min):
x_min, f_min = X[index], F[index]
budget -= chunk
return x_min
| true | true |
f7f5be8655097ceab1f46f42527ed396366cf2c0 | 1,674 | py | Python | richtext_blog/admin.py | timmygee/django-richtext-blog | 88bf111039f8ed65212d6bfd45d72a0948094b71 | [
"BSD-3-Clause"
] | 1 | 2021-04-07T13:29:26.000Z | 2021-04-07T13:29:26.000Z | richtext_blog/admin.py | timmygee/django-richtext-blog | 88bf111039f8ed65212d6bfd45d72a0948094b71 | [
"BSD-3-Clause"
] | 3 | 2020-02-11T21:06:37.000Z | 2021-06-10T17:22:41.000Z | richtext_blog/admin.py | timmygee/django-richtext-blog | 88bf111039f8ed65212d6bfd45d72a0948094b71 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from models import Post, Comment, Tag
from forms import PostFormAdmin
class CommentInline(admin.TabularInline):
"""
Inline definition for comments
"""
model = Comment
extra = 0
class PostAdmin(admin.ModelAdmin):
form = PostFormAdmin
fields = ('title', 'slug', 'tags', 'content', 'comments_closed')
search_fields = ('title',)
list_display = ('title', 'author', 'created', 'modified',
'number_of_comments', 'comments_closed', 'tag_list_str')
list_editable = ('comments_closed',)
list_filter = ('author__username', 'tags')
inlines = (CommentInline,)
def save_model(self, request, obj, form, change):
"""
Override save_model method to allow automatic population of the author
field with the current user
"""
obj.author = request.user
obj.save()
def save_formset(self, request, form, formset, change):
"""
Save changed inline objects (ie Comments)
This is so if the logged in user saves a comment in the admin interface
the user can be logged against the Comment object.
"""
instances = formset.save(commit=False)
# Instances is a list of new or changed objects. We're expecting just
# Comment objects in the list but we will check the type in case of
# future code modifications
for instance in instances:
if isinstance(instance, Comment):
instance.auth_user = request.user
instance.save()
admin.site.register(Post, PostAdmin)
class TagAdmin(admin.ModelAdmin):
pass
admin.site.register(Tag, TagAdmin)
| 32.192308 | 79 | 0.654719 | from django.contrib import admin
from models import Post, Comment, Tag
from forms import PostFormAdmin
class CommentInline(admin.TabularInline):
model = Comment
extra = 0
class PostAdmin(admin.ModelAdmin):
form = PostFormAdmin
fields = ('title', 'slug', 'tags', 'content', 'comments_closed')
search_fields = ('title',)
list_display = ('title', 'author', 'created', 'modified',
'number_of_comments', 'comments_closed', 'tag_list_str')
list_editable = ('comments_closed',)
list_filter = ('author__username', 'tags')
inlines = (CommentInline,)
def save_model(self, request, obj, form, change):
obj.author = request.user
obj.save()
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
# Comment objects in the list but we will check the type in case of
# future code modifications
for instance in instances:
if isinstance(instance, Comment):
instance.auth_user = request.user
instance.save()
admin.site.register(Post, PostAdmin)
class TagAdmin(admin.ModelAdmin):
pass
admin.site.register(Tag, TagAdmin)
| true | true |
f7f5bfc9ccef1d53f581db598903ab2e20a2c931 | 1,000 | py | Python | Fontes_Databricks/tools/registrarModelos_pkl.py | marciodelima/case2_santander_engml_stream | 9df333b38b59b6459b7654d49d834159572e583c | [
"Apache-2.0"
] | null | null | null | Fontes_Databricks/tools/registrarModelos_pkl.py | marciodelima/case2_santander_engml_stream | 9df333b38b59b6459b7654d49d834159572e583c | [
"Apache-2.0"
] | null | null | null | Fontes_Databricks/tools/registrarModelos_pkl.py | marciodelima/case2_santander_engml_stream | 9df333b38b59b6459b7654d49d834159572e583c | [
"Apache-2.0"
] | null | null | null | # Databricks notebook source
try:
import mlflow
import mlflow.sklearn
import pickle
import sklearn.ensemble.forest
except:
%pip install mlflow
%pip install -U scikit-learn==0.21.3
# COMMAND ----------
import mlflow
import mlflow.sklearn
import pickle
import sklearn.ensemble.forest
from mlflow.tracking import MlflowClient
def registrarModelo(caminho, nome):
try:
loadModel = pickle.load(open(caminho, 'rb'))
mlflow.sklearn.log_model(loadModel, "", serialization_format="cloudpickle", registered_model_name=nome)
except:
pass
registrarModelo('/dbfs/FileStore/modelo/original_model.pkl', 'Original')
registrarModelo('/dbfs/FileStore/modelo/tunning_model.pkl', 'Tunning')
#Stage
def stagingML(nome, versao=1, stage="Production"):
try:
client = MlflowClient()
client.transition_model_version_stage(name=nome, version=versao, stage=stage)
except:
pass
stagingML('Original')
stagingML('Tunning')
| 23.809524 | 111 | 0.709 |
try:
import mlflow
import mlflow.sklearn
import pickle
import sklearn.ensemble.forest
except:
%pip install mlflow
%pip install -U scikit-learn==0.21.3
import mlflow
import mlflow.sklearn
import pickle
import sklearn.ensemble.forest
from mlflow.tracking import MlflowClient
def registrarModelo(caminho, nome):
try:
loadModel = pickle.load(open(caminho, 'rb'))
mlflow.sklearn.log_model(loadModel, "", serialization_format="cloudpickle", registered_model_name=nome)
except:
pass
registrarModelo('/dbfs/FileStore/modelo/original_model.pkl', 'Original')
registrarModelo('/dbfs/FileStore/modelo/tunning_model.pkl', 'Tunning')
def stagingML(nome, versao=1, stage="Production"):
try:
client = MlflowClient()
client.transition_model_version_stage(name=nome, version=versao, stage=stage)
except:
pass
stagingML('Original')
stagingML('Tunning')
| false | true |
f7f5bfe7441456eea5e19c5239e14688e607d2f6 | 347 | py | Python | __init__.py | krisgesling/example-prompts-skill | 6c7af1c04c2e98ac3a1684a31bd38d1ec34773c6 | [
"Apache-2.0"
] | null | null | null | __init__.py | krisgesling/example-prompts-skill | 6c7af1c04c2e98ac3a1684a31bd38d1ec34773c6 | [
"Apache-2.0"
] | null | null | null | __init__.py | krisgesling/example-prompts-skill | 6c7af1c04c2e98ac3a1684a31bd38d1ec34773c6 | [
"Apache-2.0"
] | null | null | null | from mycroft import MycroftSkill, intent_file_handler
class ExamplePrompts(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('prompts.example.intent')
def handle_prompts_example(self, message):
self.speak_dialog('prompts.example')
def create_skill():
return ExamplePrompts()
| 21.6875 | 53 | 0.743516 | from mycroft import MycroftSkill, intent_file_handler
class ExamplePrompts(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('prompts.example.intent')
def handle_prompts_example(self, message):
self.speak_dialog('prompts.example')
def create_skill():
return ExamplePrompts()
| true | true |
f7f5bff8edbd5af7441e254a709676c225d5b99c | 563 | py | Python | packages/python/plotly/plotly/validators/isosurface/lighting/_vertexnormalsepsilon.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/isosurface/lighting/_vertexnormalsepsilon.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/isosurface/lighting/_vertexnormalsepsilon.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class VertexnormalsepsilonValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="vertexnormalsepsilon",
parent_name="isosurface.lighting",
**kwargs,
):
super(VertexnormalsepsilonValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
| 29.631579 | 82 | 0.619893 | import _plotly_utils.basevalidators
class VertexnormalsepsilonValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="vertexnormalsepsilon",
parent_name="isosurface.lighting",
**kwargs,
):
super(VertexnormalsepsilonValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
| true | true |
f7f5c009b199bc7ef9a5027e3b0529619dd07115 | 310 | py | Python | tests/tensortrade/unit/stochastic/processes/test_ornstein_uhlenbeck.py | bwcknr/tensortrade | 376f5e4cc4ad7df271774088884fbe88f8feb7d8 | [
"Apache-2.0"
] | 34 | 2020-06-05T22:39:53.000Z | 2022-01-09T03:09:12.000Z | tests/tensortrade/unit/stochastic/processes/test_ornstein_uhlenbeck.py | bwcknr/tensortrade | 376f5e4cc4ad7df271774088884fbe88f8feb7d8 | [
"Apache-2.0"
] | 1 | 2022-01-17T06:38:27.000Z | 2022-01-17T06:38:27.000Z | tests/tensortrade/unit/stochastic/processes/test_ornstein_uhlenbeck.py | bwcknr/tensortrade | 376f5e4cc4ad7df271774088884fbe88f8feb7d8 | [
"Apache-2.0"
] | 8 | 2020-06-01T12:09:53.000Z | 2022-01-18T14:45:29.000Z |
from tensortrade.stochastic import ornstein
def test_shape():
frame = ornstein(
base_price=7000,
base_volume=15000,
start_date='2018-01-01',
start_date_format='%Y-%m-%d',
times_to_generate=1500,
time_frame='1d'
)
assert frame.shape == (1500, 5)
| 18.235294 | 43 | 0.603226 |
from tensortrade.stochastic import ornstein
def test_shape():
frame = ornstein(
base_price=7000,
base_volume=15000,
start_date='2018-01-01',
start_date_format='%Y-%m-%d',
times_to_generate=1500,
time_frame='1d'
)
assert frame.shape == (1500, 5)
| true | true |
f7f5c146a419f052cf8ece01dbb0fdbc582900c1 | 10,646 | py | Python | test/utils/spark_common.py | kamalsharma2/horovod | 69c33290f8cc43073fade45619e80d9ffb3b9653 | [
"Apache-2.0"
] | 2 | 2019-03-12T09:13:50.000Z | 2019-04-01T04:40:45.000Z | test/utils/spark_common.py | kamalsharma2/horovod | 69c33290f8cc43073fade45619e80d9ffb3b9653 | [
"Apache-2.0"
] | 6 | 2021-10-05T21:23:43.000Z | 2021-12-13T23:19:10.000Z | test/utils/spark_common.py | kamalsharma2/horovod | 69c33290f8cc43073fade45619e80d9ffb3b9653 | [
"Apache-2.0"
] | 3 | 2021-05-29T06:12:49.000Z | 2022-02-24T06:34:59.000Z | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import contextlib
import os
import platform
import pytest
import stat
import sys
import threading
import time
from tempfile import TemporaryDirectory
import numpy as np
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.linalg import DenseVector, VectorUDT
from pyspark.sql.types import FloatType, IntegerType, StructField, StructType
from horovod.runner.common.util import secret
from horovod.spark.common.store import LocalStore
from horovod.spark.common.util import _wait_file_available_on_dbfs, _get_spark_df_saved_file_list
from horovod.spark.driver.driver_service import SparkDriverService, SparkDriverClient
from horovod.spark.task.task_service import SparkTaskService, SparkTaskClient
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))
from common import tempdir, temppath
# Spark will fail to initialize correctly locally on Mac OS without this
if platform.system() == 'Darwin':
os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
class CallbackBackend(object):
def run(self, fn, args=(), kwargs={}, env={}):
return [fn(*args, **kwargs)] * self.num_processes()
def num_processes(self):
return 1
@contextlib.contextmanager
def local_store():
with tempdir() as tmp:
store = LocalStore(tmp)
yield store
@contextlib.contextmanager
def spark_session(app, cores=2, gpus=0, max_failures=1, *args):
from pyspark import SparkConf
from pyspark.sql import SparkSession
with TemporaryDirectory() as tmpdir:
metastore_path = os.path.join(tmpdir, 'metastore')
# start a single worker with given cores when gpus are present
# max failures are ignored when gpus in that case
master = 'local-cluster[1,{},1024]'.format(cores) if gpus > 0 \
else 'local[{},{}]'.format(cores, max_failures)
conf = SparkConf().setAppName(app).setMaster(master)
conf = conf.setAll([
('spark.ui.showConsoleProgress', 'false'),
('spark.test.home', os.environ.get('SPARK_HOME')),
('spark.locality.wait', '0'),
('spark.unsafe.exceptionOnMemoryLeak', 'true'),
('spark.ui.enabled', 'false'),
('spark.local.dir', os.path.join(tmpdir, 'tmp')),
('spark.sql.warehouse.dir', os.path.join(tmpdir, 'warehouse')),
('javax.jdo.option.ConnectionURL',
f'jdbc:derby:;databaseName={metastore_path};create=true'),
])
with temppath() as temp_filename:
if gpus > 0:
with open(temp_filename, 'wb') as temp_file:
addresses = ', '.join('\\"{}\\"'.format(i) for i in range(gpus))
temp_file.write(b'echo {\\"name\\": \\"gpu\\", \\"addresses\\": [' +
addresses.encode('ascii') + b']}')
os.chmod(temp_file.name, stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP |
stat.S_IROTH | stat.S_IXOTH)
# the single worker takes all gpus discovered, and a single executor will get them
# each task on that executor will get a single gpu
conf = conf.setAll([
('spark.worker.resource.gpu.discoveryScript', temp_filename),
('spark.worker.resource.gpu.amount', str(gpus)),
('spark.task.resource.gpu.amount', '1'),
('spark.executor.resource.gpu.amount', str(gpus)),
])
session = SparkSession \
.builder \
.config(conf=conf) \
.getOrCreate()
try:
yield session
finally:
session.stop()
def fn():
return 0
@contextlib.contextmanager
def spark_driver_service(num_proc, initial_np=None, fn=fn, args=(), kwargs={},
key=None, nics=None, verbose=2):
initial_np = initial_np or num_proc
key = key or secret.make_secret_key()
driver = SparkDriverService(initial_np, num_proc, fn, args, kwargs, key, nics)
client = SparkDriverClient(driver.addresses(), key, verbose)
try:
yield driver, client, key
finally:
driver.shutdown()
@contextlib.contextmanager
def spark_task_service(index, key=None, nics=None, match_intf=False,
minimum_command_lifetime_s=0, verbose=2):
key = key or secret.make_secret_key()
task = SparkTaskService(index, key, nics, minimum_command_lifetime_s, verbose)
client = SparkTaskClient(index, task.addresses(), key, verbose, match_intf)
try:
yield task, client, key
finally:
task.shutdown()
def with_features(raw_df, feature_cols):
vector_assembler = VectorAssembler().setInputCols(feature_cols).setOutputCol('features')
pipeline = Pipeline().setStages([vector_assembler])
df = pipeline.fit(raw_df).transform(raw_df)
return df
def create_xor_data(spark):
data = [[0, 0, 0.0, 0.1], [0, 1, 1.0, 0.2], [1, 0, 1.0, 0.3], [1, 1, 0.0, 0.4]]
schema = StructType([StructField('x1', IntegerType()),
StructField('x2', IntegerType()),
StructField('y', FloatType()),
StructField('weight', FloatType())])
raw_df = create_test_data_from_schema(spark, data, schema)
df = with_features(raw_df, ['x1', 'x2'])
return df
def create_xor_data_with_val(spark):
data = [[0, 0, 0.0, 0.1, 1], [0, 1, 1.0, 0.2, 0], [1, 0, 1.0, 0.3, 1], [1, 1, 0.0, 0.4, 0],
[0, 0, 0.0, 0.1, 1], [0, 1, 1.0, 0.2, 0], [1, 0, 1.0, 0.3, 1], [1, 1, 0.0, 0.4, 0]]
schema = StructType([StructField('x1', IntegerType()),
StructField('x2', IntegerType()),
StructField('y', FloatType()),
StructField('weight', FloatType()),
StructField('val', IntegerType())])
raw_df = create_test_data_from_schema(spark, data, schema)
df = with_features(raw_df, ['x1', 'x2'])
return df
def create_noisy_xor_data(spark):
schema = StructType([StructField('x1', FloatType()),
StructField('x2', FloatType()),
StructField('y', FloatType()),
StructField('weight', FloatType())])
data = [[0.0, 0.0, 0.0], [0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]]
n = 1024
weights = np.random.uniform(0, 1, n)
samples = []
noise = np.random.normal(0, 0.1, [n, 2])
for i, eps in enumerate(noise):
original = data[i % len(data)]
sample = original[0:2] + eps
samples.append(sample.tolist() + [original[2]] + [float(weights[i])])
raw_df = create_test_data_from_schema(spark, samples, schema)
df = with_features(raw_df, ['x1', 'x2'])
return df
def create_noisy_xor_data_with_val(spark):
schema = StructType([StructField('x1', FloatType()),
StructField('x2', FloatType()),
StructField('y', FloatType()),
StructField('weight', FloatType()),
StructField('val', IntegerType())])
data = [[0.0, 0.0, 0.0, 0], [0.0, 1.0, 1.0, 1], [1.0, 0.0, 1.0, 0], [1.0, 1.0, 0.0, 1]]
n = 1024
weights = np.random.uniform(0, 1, n)
samples = []
noise = np.random.normal(0, 0.1, [n, 2])
for i, eps in enumerate(noise):
original = data[i % len(data)]
sample = original[0:2] + eps
samples.append(sample.tolist() + [original[2]] + [float(weights[i])] + [original[3]])
raw_df = create_test_data_from_schema(spark, samples, schema)
df = with_features(raw_df, ['x1', 'x2'])
return df
def create_mnist_data(spark):
features = DenseVector([1.0] * 64)
label_vec = DenseVector([0.0, 0.0, 1.0] + [0.0] * 7)
label = 2.0
data = [[features, label_vec, label]] * 10
schema = StructType([StructField('features', VectorUDT()),
StructField('label_vec', VectorUDT()),
StructField('label', FloatType())])
df = create_test_data_from_schema(spark, data, schema)
return df
def create_test_data_from_schema(spark, data, schema):
return spark.createDataFrame(data, schema=schema)
def test_wait_file_available_on_dbfs():
with tempdir() as d:
pq_dir = os.path.join(d, 'test_ev')
os.makedirs(pq_dir)
file1_path = os.path.join(pq_dir, 'file1')
file2_path = os.path.join(pq_dir, 'file2')
url1 = 'file://' + file1_path.replace(os.sep, '/')
url2 = 'file://' + file2_path.replace(os.sep, '/')
url_list = [url1, url2]
def create_file(p):
with open(p, 'w'):
pass
# 1. test all files exists.
create_file(file1_path)
create_file(file2_path)
_wait_file_available_on_dbfs(url_list)
# 2. test one file does not exists. Raise error.
os.remove(file2_path)
with pytest.raises(
RuntimeError,
match='Timeout while waiting for all parquet-store files to appear'
):
_wait_file_available_on_dbfs(url_list)
# 3. test one file accessible after 1 second.
def delay_create_file2():
time.sleep(1)
create_file(file2_path)
threading.Thread(target=delay_create_file2()).start()
_wait_file_available_on_dbfs(url_list)
def test_get_spark_df_input_files(spark):
with tempdir() as d:
pq_dir = os.path.join(d, 'test_spark_df_output')
with spark_session('test_get_spark_df_input_files') as spark:
spark.range(100).repartition(4).write.parquet(pq_dir)
pq_files = _get_spark_df_saved_file_list(pq_dir)
pq_files = sorted(pq_files)
assert len(pq_files) == 4
for i in range(4):
assert pq_files[i].startswith('part-0000' + str(i))
| 36.710345 | 98 | 0.604358 |
import contextlib
import os
import platform
import pytest
import stat
import sys
import threading
import time
from tempfile import TemporaryDirectory
import numpy as np
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.linalg import DenseVector, VectorUDT
from pyspark.sql.types import FloatType, IntegerType, StructField, StructType
from horovod.runner.common.util import secret
from horovod.spark.common.store import LocalStore
from horovod.spark.common.util import _wait_file_available_on_dbfs, _get_spark_df_saved_file_list
from horovod.spark.driver.driver_service import SparkDriverService, SparkDriverClient
from horovod.spark.task.task_service import SparkTaskService, SparkTaskClient
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))
from common import tempdir, temppath
if platform.system() == 'Darwin':
os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
class CallbackBackend(object):
def run(self, fn, args=(), kwargs={}, env={}):
return [fn(*args, **kwargs)] * self.num_processes()
def num_processes(self):
return 1
@contextlib.contextmanager
def local_store():
with tempdir() as tmp:
store = LocalStore(tmp)
yield store
@contextlib.contextmanager
def spark_session(app, cores=2, gpus=0, max_failures=1, *args):
from pyspark import SparkConf
from pyspark.sql import SparkSession
with TemporaryDirectory() as tmpdir:
metastore_path = os.path.join(tmpdir, 'metastore')
master = 'local-cluster[1,{},1024]'.format(cores) if gpus > 0 \
else 'local[{},{}]'.format(cores, max_failures)
conf = SparkConf().setAppName(app).setMaster(master)
conf = conf.setAll([
('spark.ui.showConsoleProgress', 'false'),
('spark.test.home', os.environ.get('SPARK_HOME')),
('spark.locality.wait', '0'),
('spark.unsafe.exceptionOnMemoryLeak', 'true'),
('spark.ui.enabled', 'false'),
('spark.local.dir', os.path.join(tmpdir, 'tmp')),
('spark.sql.warehouse.dir', os.path.join(tmpdir, 'warehouse')),
('javax.jdo.option.ConnectionURL',
f'jdbc:derby:;databaseName={metastore_path};create=true'),
])
with temppath() as temp_filename:
if gpus > 0:
with open(temp_filename, 'wb') as temp_file:
addresses = ', '.join('\\"{}\\"'.format(i) for i in range(gpus))
temp_file.write(b'echo {\\"name\\": \\"gpu\\", \\"addresses\\": [' +
addresses.encode('ascii') + b']}')
os.chmod(temp_file.name, stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP |
stat.S_IROTH | stat.S_IXOTH)
conf = conf.setAll([
('spark.worker.resource.gpu.discoveryScript', temp_filename),
('spark.worker.resource.gpu.amount', str(gpus)),
('spark.task.resource.gpu.amount', '1'),
('spark.executor.resource.gpu.amount', str(gpus)),
])
session = SparkSession \
.builder \
.config(conf=conf) \
.getOrCreate()
try:
yield session
finally:
session.stop()
def fn():
return 0
@contextlib.contextmanager
def spark_driver_service(num_proc, initial_np=None, fn=fn, args=(), kwargs={},
key=None, nics=None, verbose=2):
initial_np = initial_np or num_proc
key = key or secret.make_secret_key()
driver = SparkDriverService(initial_np, num_proc, fn, args, kwargs, key, nics)
client = SparkDriverClient(driver.addresses(), key, verbose)
try:
yield driver, client, key
finally:
driver.shutdown()
@contextlib.contextmanager
def spark_task_service(index, key=None, nics=None, match_intf=False,
minimum_command_lifetime_s=0, verbose=2):
key = key or secret.make_secret_key()
task = SparkTaskService(index, key, nics, minimum_command_lifetime_s, verbose)
client = SparkTaskClient(index, task.addresses(), key, verbose, match_intf)
try:
yield task, client, key
finally:
task.shutdown()
def with_features(raw_df, feature_cols):
vector_assembler = VectorAssembler().setInputCols(feature_cols).setOutputCol('features')
pipeline = Pipeline().setStages([vector_assembler])
df = pipeline.fit(raw_df).transform(raw_df)
return df
def create_xor_data(spark):
data = [[0, 0, 0.0, 0.1], [0, 1, 1.0, 0.2], [1, 0, 1.0, 0.3], [1, 1, 0.0, 0.4]]
schema = StructType([StructField('x1', IntegerType()),
StructField('x2', IntegerType()),
StructField('y', FloatType()),
StructField('weight', FloatType())])
raw_df = create_test_data_from_schema(spark, data, schema)
df = with_features(raw_df, ['x1', 'x2'])
return df
def create_xor_data_with_val(spark):
data = [[0, 0, 0.0, 0.1, 1], [0, 1, 1.0, 0.2, 0], [1, 0, 1.0, 0.3, 1], [1, 1, 0.0, 0.4, 0],
[0, 0, 0.0, 0.1, 1], [0, 1, 1.0, 0.2, 0], [1, 0, 1.0, 0.3, 1], [1, 1, 0.0, 0.4, 0]]
schema = StructType([StructField('x1', IntegerType()),
StructField('x2', IntegerType()),
StructField('y', FloatType()),
StructField('weight', FloatType()),
StructField('val', IntegerType())])
raw_df = create_test_data_from_schema(spark, data, schema)
df = with_features(raw_df, ['x1', 'x2'])
return df
def create_noisy_xor_data(spark):
schema = StructType([StructField('x1', FloatType()),
StructField('x2', FloatType()),
StructField('y', FloatType()),
StructField('weight', FloatType())])
data = [[0.0, 0.0, 0.0], [0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]]
n = 1024
weights = np.random.uniform(0, 1, n)
samples = []
noise = np.random.normal(0, 0.1, [n, 2])
for i, eps in enumerate(noise):
original = data[i % len(data)]
sample = original[0:2] + eps
samples.append(sample.tolist() + [original[2]] + [float(weights[i])])
raw_df = create_test_data_from_schema(spark, samples, schema)
df = with_features(raw_df, ['x1', 'x2'])
return df
def create_noisy_xor_data_with_val(spark):
schema = StructType([StructField('x1', FloatType()),
StructField('x2', FloatType()),
StructField('y', FloatType()),
StructField('weight', FloatType()),
StructField('val', IntegerType())])
data = [[0.0, 0.0, 0.0, 0], [0.0, 1.0, 1.0, 1], [1.0, 0.0, 1.0, 0], [1.0, 1.0, 0.0, 1]]
n = 1024
weights = np.random.uniform(0, 1, n)
samples = []
noise = np.random.normal(0, 0.1, [n, 2])
for i, eps in enumerate(noise):
original = data[i % len(data)]
sample = original[0:2] + eps
samples.append(sample.tolist() + [original[2]] + [float(weights[i])] + [original[3]])
raw_df = create_test_data_from_schema(spark, samples, schema)
df = with_features(raw_df, ['x1', 'x2'])
return df
def create_mnist_data(spark):
features = DenseVector([1.0] * 64)
label_vec = DenseVector([0.0, 0.0, 1.0] + [0.0] * 7)
label = 2.0
data = [[features, label_vec, label]] * 10
schema = StructType([StructField('features', VectorUDT()),
StructField('label_vec', VectorUDT()),
StructField('label', FloatType())])
df = create_test_data_from_schema(spark, data, schema)
return df
def create_test_data_from_schema(spark, data, schema):
return spark.createDataFrame(data, schema=schema)
def test_wait_file_available_on_dbfs():
with tempdir() as d:
pq_dir = os.path.join(d, 'test_ev')
os.makedirs(pq_dir)
file1_path = os.path.join(pq_dir, 'file1')
file2_path = os.path.join(pq_dir, 'file2')
url1 = 'file://' + file1_path.replace(os.sep, '/')
url2 = 'file://' + file2_path.replace(os.sep, '/')
url_list = [url1, url2]
def create_file(p):
with open(p, 'w'):
pass
create_file(file1_path)
create_file(file2_path)
_wait_file_available_on_dbfs(url_list)
os.remove(file2_path)
with pytest.raises(
RuntimeError,
match='Timeout while waiting for all parquet-store files to appear'
):
_wait_file_available_on_dbfs(url_list)
def delay_create_file2():
time.sleep(1)
create_file(file2_path)
threading.Thread(target=delay_create_file2()).start()
_wait_file_available_on_dbfs(url_list)
def test_get_spark_df_input_files(spark):
with tempdir() as d:
pq_dir = os.path.join(d, 'test_spark_df_output')
with spark_session('test_get_spark_df_input_files') as spark:
spark.range(100).repartition(4).write.parquet(pq_dir)
pq_files = _get_spark_df_saved_file_list(pq_dir)
pq_files = sorted(pq_files)
assert len(pq_files) == 4
for i in range(4):
assert pq_files[i].startswith('part-0000' + str(i))
| true | true |
f7f5c19c7d6083ee486243895480d339cb88424c | 994 | py | Python | isi_sdk_7_2/test/test_storagepool_tier_create_params.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_7_2/test/test_storagepool_tier_create_params.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_7_2/test/test_storagepool_tier_create_params.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 2
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_7_2
from isi_sdk_7_2.models.storagepool_tier_create_params import StoragepoolTierCreateParams # noqa: E501
from isi_sdk_7_2.rest import ApiException
class TestStoragepoolTierCreateParams(unittest.TestCase):
"""StoragepoolTierCreateParams unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStoragepoolTierCreateParams(self):
"""Test StoragepoolTierCreateParams"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_7_2.models.storagepool_tier_create_params.StoragepoolTierCreateParams() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.243902 | 111 | 0.732394 |
from __future__ import absolute_import
import unittest
import isi_sdk_7_2
from isi_sdk_7_2.models.storagepool_tier_create_params import StoragepoolTierCreateParams
from isi_sdk_7_2.rest import ApiException
class TestStoragepoolTierCreateParams(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testStoragepoolTierCreateParams(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f7f5c26c667a211d33e76e25789ac36c66c98844 | 88 | py | Python | src/wsgi.py | notalab/api | 7afc0f9896c0d4b5f81605f671f13a9168c78380 | [
"MIT"
] | null | null | null | src/wsgi.py | notalab/api | 7afc0f9896c0d4b5f81605f671f13a9168c78380 | [
"MIT"
] | null | null | null | src/wsgi.py | notalab/api | 7afc0f9896c0d4b5f81605f671f13a9168c78380 | [
"MIT"
] | null | null | null | from app import server as application
if __name__ == "__main__":
application.run()
| 17.6 | 37 | 0.727273 | from app import server as application
if __name__ == "__main__":
application.run()
| true | true |
f7f5c2c7a18f0b161b88356a9d716a4b3735882f | 3,587 | py | Python | nikola/plugins/command_install_theme.py | servalproject/nikola | 4d78504d93597894f3da4a434dfafdec907601a7 | [
"MIT"
] | 1 | 2015-12-14T21:38:33.000Z | 2015-12-14T21:38:33.000Z | nikola/plugins/command_install_theme.py | servalproject/nikola | 4d78504d93597894f3da4a434dfafdec907601a7 | [
"MIT"
] | null | null | null | nikola/plugins/command_install_theme.py | servalproject/nikola | 4d78504d93597894f3da4a434dfafdec907601a7 | [
"MIT"
] | null | null | null | # Copyright (c) 2012 Roberto Alsina y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import os
import json
from io import BytesIO
try:
import requests
except ImportError:
requests = None # NOQA
from nikola.plugin_categories import Command
from nikola import utils
class CommandInstallTheme(Command):
"""Start test server."""
name = "install_theme"
doc_usage = "[[-u] theme_name] | [[-u] -l]"
doc_purpose = "Install theme into current site."
cmd_options = [
{
'name': 'list',
'short': 'l',
'long': 'list',
'type': bool,
'default': False,
'help': 'Show list of available themes.'
},
{
'name': 'url',
'short': 'u',
'long': 'url',
'type': str,
'help': "URL for the theme repository (default: "
"http://themes.nikola.ralsina.com.ar/themes.json)",
'default': 'http://themes.nikola.ralsina.com.ar/themes.json'
},
]
def _execute(self, options, args):
"""Install theme into current site."""
if requests is None:
print('This command requires the requests package be installed.')
return False
listing = options['list']
url = options['url']
if args:
name = args[0]
else:
name = None
if name is None and not listing:
print("This command needs either a theme name or the -l option.")
return False
data = requests.get(url).text
data = json.loads(data)
if listing:
print("Themes:")
print("-------")
for theme in sorted(data.keys()):
print(theme)
return True
else:
if name in data:
if os.path.isfile("themes"):
raise IOError("'themes' isn't a directory!")
elif not os.path.isdir("themes"):
try:
os.makedirs("themes")
except:
raise OSError("mkdir 'theme' error!")
print('Downloading: ' + data[name])
zip_file = BytesIO()
zip_file.write(requests.get(data[name]).content)
print('Extracting: {0} into themes'.format(name))
utils.extract_all(zip_file)
else:
print("Can't find theme " + name)
return False
| 33.839623 | 77 | 0.583775 |
from __future__ import print_function
import os
import json
from io import BytesIO
try:
import requests
except ImportError:
requests = None
from nikola.plugin_categories import Command
from nikola import utils
class CommandInstallTheme(Command):
name = "install_theme"
doc_usage = "[[-u] theme_name] | [[-u] -l]"
doc_purpose = "Install theme into current site."
cmd_options = [
{
'name': 'list',
'short': 'l',
'long': 'list',
'type': bool,
'default': False,
'help': 'Show list of available themes.'
},
{
'name': 'url',
'short': 'u',
'long': 'url',
'type': str,
'help': "URL for the theme repository (default: "
"http://themes.nikola.ralsina.com.ar/themes.json)",
'default': 'http://themes.nikola.ralsina.com.ar/themes.json'
},
]
def _execute(self, options, args):
if requests is None:
print('This command requires the requests package be installed.')
return False
listing = options['list']
url = options['url']
if args:
name = args[0]
else:
name = None
if name is None and not listing:
print("This command needs either a theme name or the -l option.")
return False
data = requests.get(url).text
data = json.loads(data)
if listing:
print("Themes:")
print("-------")
for theme in sorted(data.keys()):
print(theme)
return True
else:
if name in data:
if os.path.isfile("themes"):
raise IOError("'themes' isn't a directory!")
elif not os.path.isdir("themes"):
try:
os.makedirs("themes")
except:
raise OSError("mkdir 'theme' error!")
print('Downloading: ' + data[name])
zip_file = BytesIO()
zip_file.write(requests.get(data[name]).content)
print('Extracting: {0} into themes'.format(name))
utils.extract_all(zip_file)
else:
print("Can't find theme " + name)
return False
| true | true |
f7f5c2d63dba77ef9936bd159c67774e2fd84d48 | 18,208 | py | Python | pythonProject1/venv/Lib/site-packages/asyncgui/_multierror.py | mjtomlinson/CNE330_Python_1_Final_Project | 05020806860937ef37b9a0ad2e27de4897a606de | [
"CC0-1.0"
] | null | null | null | pythonProject1/venv/Lib/site-packages/asyncgui/_multierror.py | mjtomlinson/CNE330_Python_1_Final_Project | 05020806860937ef37b9a0ad2e27de4897a606de | [
"CC0-1.0"
] | null | null | null | pythonProject1/venv/Lib/site-packages/asyncgui/_multierror.py | mjtomlinson/CNE330_Python_1_Final_Project | 05020806860937ef37b9a0ad2e27de4897a606de | [
"CC0-1.0"
] | null | null | null | import sys
import traceback
import textwrap
import warnings
exc_key = id
################################################################
# MultiError
################################################################
def _filter_impl(handler, root_exc):
# We have a tree of MultiError's, like:
#
# MultiError([
# ValueError,
# MultiError([
# KeyError,
# ValueError,
# ]),
# ])
#
# or similar.
#
# We want to
# 1) apply the filter to each of the leaf exceptions -- each leaf
# might stay the same, be replaced (with the original exception
# potentially sticking around as __context__ or __cause__), or
# disappear altogether.
# 2) simplify the resulting tree -- remove empty nodes, and replace
# singleton MultiError's with their contents, e.g.:
# MultiError([KeyError]) -> KeyError
# (This can happen recursively, e.g. if the two ValueErrors above
# get caught then we'll just be left with a bare KeyError.)
# 3) preserve sensible tracebacks
#
# It's the tracebacks that are most confusing. As a MultiError
# propagates through the stack, it accumulates traceback frames, but
# the exceptions inside it don't. Semantically, the traceback for a
# leaf exception is the concatenation the tracebacks of all the
# exceptions you see when traversing the exception tree from the root
# to that leaf. Our correctness invariant is that this concatenated
# traceback should be the same before and after.
#
# The easy way to do that would be to, at the beginning of this
# function, "push" all tracebacks down to the leafs, so all the
# MultiErrors have __traceback__=None, and all the leafs have complete
# tracebacks. But whenever possible, we'd actually prefer to keep
# tracebacks as high up in the tree as possible, because this lets us
# keep only a single copy of the common parts of these exception's
# tracebacks. This is cheaper (in memory + time -- tracebacks are
# unpleasantly quadratic-ish to work with, and this might matter if
# you have thousands of exceptions, which can happen e.g. after
# cancelling a large task pool, and no-one will ever look at their
# tracebacks!), and more importantly, factoring out redundant parts of
# the tracebacks makes them more readable if/when users do see them.
#
# So instead our strategy is:
# - first go through and construct the new tree, preserving any
# unchanged subtrees
# - then go through the original tree (!) and push tracebacks down
# until either we hit a leaf, or we hit a subtree which was
# preserved in the new tree.
# This used to also support async handler functions. But that runs into:
# https://bugs.python.org/issue29600
# which is difficult to fix on our end.
# Filters a subtree, ignoring tracebacks, while keeping a record of
# which MultiErrors were preserved unchanged
def filter_tree(exc, preserved):
if isinstance(exc, MultiError):
new_exceptions = []
changed = False
for child_exc in exc.exceptions:
new_child_exc = filter_tree(child_exc, preserved)
if new_child_exc is not child_exc:
changed = True
if new_child_exc is not None:
new_exceptions.append(new_child_exc)
if not new_exceptions:
return None
elif changed:
return MultiError(new_exceptions)
else:
preserved.add(id(exc))
return exc
else:
new_exc = handler(exc)
# Our version of implicit exception chaining
if new_exc is not None and new_exc is not exc:
new_exc.__context__ = exc
return new_exc
def push_tb_down(tb, exc, preserved):
if id(exc) in preserved:
return
new_tb = concat_tb(tb, exc.__traceback__)
if isinstance(exc, MultiError):
for child_exc in exc.exceptions:
push_tb_down(new_tb, child_exc, preserved)
exc.__traceback__ = None
else:
exc.__traceback__ = new_tb
preserved = set()
new_root_exc = filter_tree(root_exc, preserved)
push_tb_down(None, root_exc, preserved)
# Delete the local functions to avoid a reference cycle (see
# test_simple_cancel_scope_usage_doesnt_create_cyclic_garbage)
del filter_tree, push_tb_down
return new_root_exc
# Normally I'm a big fan of (a)contextmanager, but in this case I found it
# easier to use the raw context manager protocol, because it makes it a lot
# easier to reason about how we're mutating the traceback as we go. (End
# result: if the exception gets modified, then the 'raise' here makes this
# frame show up in the traceback; otherwise, we leave no trace.)
class MultiErrorCatcher:
_handler = None
def __enter__(self):
pass
def __exit__(self, etype, exc, tb):
if exc is not None:
filtered_exc = MultiError.filter(self._handler, exc)
if filtered_exc is exc:
# Let the interpreter re-raise it
return False
if filtered_exc is None:
# Swallow the exception
return True
# When we raise filtered_exc, Python will unconditionally blow
# away its __context__ attribute and replace it with the original
# exc we caught. So after we raise it, we have to pause it while
# it's in flight to put the correct __context__ back.
old_context = filtered_exc.__context__
try:
raise filtered_exc
finally:
_, value, _ = sys.exc_info()
assert value is filtered_exc
value.__context__ = old_context
class MultiError(Exception):
"""An exception that contains other exceptions; also known as an
"inception".
It's main use is to represent the situation when multiple child tasks all
raise errors "in parallel".
Args:
exceptions (list): The exceptions
Returns:
If ``len(exceptions) == 1``, returns that exception. This means that a
call to ``MultiError(...)`` is not guaranteed to return a
:exc:`MultiError` object!
Otherwise, returns a new :exc:`MultiError` object.
Raises:
TypeError: if any of the passed in objects are not instances of
:exc:`Exception`.
"""
def __init__(self, exceptions):
# Avoid recursion when exceptions[0] returned by __new__() happens
# to be a MultiError and subsequently __init__() is called.
if hasattr(self, "exceptions"):
# __init__ was already called on this object
assert len(exceptions) == 1 and exceptions[0] is self
return
self.exceptions = exceptions
def __new__(cls, exceptions):
exceptions = list(exceptions)
for exc in exceptions:
if not isinstance(exc, Exception):
raise TypeError("Expected an exception object, not {!r}".format(exc))
if len(exceptions) == 1:
# If this lone object happens to itself be a MultiError, then
# Python will implicitly call our __init__ on it again. See
# special handling in __init__.
return exceptions[0]
else:
# The base class __new__() implicitly invokes our __init__, which
# is what we want.
#
# In an earlier version of the code, we didn't define __init__ and
# simply set the `exceptions` attribute directly on the new object.
# However, linters expect attributes to be initialized in __init__.
return Exception.__new__(cls, exceptions)
def __str__(self):
return ", ".join(repr(exc) for exc in self.exceptions)
def __repr__(self):
return "<MultiError: {}>".format(self)
@classmethod
def filter(cls, handler, root_exc):
"""Apply the given ``handler`` to all the exceptions in ``root_exc``.
Args:
handler: A callable that takes an atomic (non-MultiError) exception
as input, and returns either a new exception object or None.
root_exc: An exception, often (though not necessarily) a
:exc:`MultiError`.
Returns:
A new exception object in which each component exception ``exc`` has
been replaced by the result of running ``handler(exc)`` – or, if
``handler`` returned None for all the inputs, returns None.
"""
return _filter_impl(handler, root_exc)
@classmethod
def catch(cls, handler):
"""Return a context manager that catches and re-throws exceptions
after running :meth:`filter` on them.
Args:
handler: as for :meth:`filter`
"""
return MultiErrorCatcher(handler)
# Clean up exception printing:
MultiError.__module__ = "asyncgui"
################################################################
# concat_tb
################################################################
# We need to compute a new traceback that is the concatenation of two existing
# tracebacks. This requires copying the entries in 'head' and then pointing
# the final tb_next to 'tail'.
#
# NB: 'tail' might be None, which requires some special handling in the ctypes
# version.
#
# The complication here is that Python doesn't actually support copying or
# modifying traceback objects, so we have to get creative...
#
# On CPython, we use ctypes. On PyPy, we use "transparent proxies".
#
# Jinja2 is a useful source of inspiration:
# https://github.com/pallets/jinja/blob/master/jinja2/debug.py
try:
import tputil
except ImportError:
have_tproxy = False
else:
have_tproxy = True
if have_tproxy:
# http://doc.pypy.org/en/latest/objspace-proxies.html
def copy_tb(base_tb, tb_next):
def controller(operation):
# Rationale for pragma: I looked fairly carefully and tried a few
# things, and AFAICT it's not actually possible to get any
# 'opname' that isn't __getattr__ or __getattribute__. So there's
# no missing test we could add, and no value in coverage nagging
# us about adding one.
if operation.opname in [
"__getattribute__",
"__getattr__",
]: # pragma: no cover
if operation.args[0] == "tb_next":
return tb_next
return operation.delegate()
return tputil.make_proxy(controller, type(base_tb), base_tb)
else:
# ctypes it is
import ctypes
# How to handle refcounting? I don't want to use ctypes.py_object because
# I don't understand or trust it, and I don't want to use
# ctypes.pythonapi.Py_{Inc,Dec}Ref because we might clash with user code
# that also tries to use them but with different types. So private _ctypes
# APIs it is!
import _ctypes
class CTraceback(ctypes.Structure):
_fields_ = [
("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
("tb_next", ctypes.c_void_p),
("tb_frame", ctypes.c_void_p),
("tb_lasti", ctypes.c_int),
("tb_lineno", ctypes.c_int),
]
def copy_tb(base_tb, tb_next):
# TracebackType has no public constructor, so allocate one the hard way
try:
raise ValueError
except ValueError as exc:
new_tb = exc.__traceback__
c_new_tb = CTraceback.from_address(id(new_tb))
# At the C level, tb_next either pointer to the next traceback or is
# NULL. c_void_p and the .tb_next accessor both convert NULL to None,
# but we shouldn't DECREF None just because we assigned to a NULL
# pointer! Here we know that our new traceback has only 1 frame in it,
# so we can assume the tb_next field is NULL.
assert c_new_tb.tb_next is None
# If tb_next is None, then we want to set c_new_tb.tb_next to NULL,
# which it already is, so we're done. Otherwise, we have to actually
# do some work:
if tb_next is not None:
_ctypes.Py_INCREF(tb_next)
c_new_tb.tb_next = id(tb_next)
assert c_new_tb.tb_frame is not None
_ctypes.Py_INCREF(base_tb.tb_frame)
old_tb_frame = new_tb.tb_frame
c_new_tb.tb_frame = id(base_tb.tb_frame)
_ctypes.Py_DECREF(old_tb_frame)
c_new_tb.tb_lasti = base_tb.tb_lasti
c_new_tb.tb_lineno = base_tb.tb_lineno
return new_tb
def concat_tb(head, tail):
# We have to use an iterative algorithm here, because in the worst case
# this might be a RecursionError stack that is by definition too deep to
# process by recursion!
head_tbs = []
pointer = head
while pointer is not None:
head_tbs.append(pointer)
pointer = pointer.tb_next
current_head = tail
for head_tb in reversed(head_tbs):
current_head = copy_tb(head_tb, tb_next=current_head)
return current_head
################################################################
# MultiError traceback formatting
#
# What follows is terrible, terrible monkey patching of
# traceback.TracebackException to add support for handling
# MultiErrors
################################################################
traceback_exception_original_init = traceback.TracebackException.__init__
def traceback_exception_init(
self,
exc_type,
exc_value,
exc_traceback,
*,
limit=None,
lookup_lines=True,
capture_locals=False,
_seen=None,
):
if _seen is None:
_seen = set()
# Capture the original exception and its cause and context as TracebackExceptions
traceback_exception_original_init(
self,
exc_type,
exc_value,
exc_traceback,
limit=limit,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
_seen=_seen,
)
# Capture each of the exceptions in the MultiError along with each of their causes and contexts
if isinstance(exc_value, MultiError):
embedded = []
for exc in exc_value.exceptions:
if exc_key(exc) not in _seen:
embedded.append(
traceback.TracebackException.from_exception(
exc,
limit=limit,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
# copy the set of _seen exceptions so that duplicates
# shared between sub-exceptions are not omitted
_seen=set(_seen),
)
)
self.embedded = embedded
else:
self.embedded = []
traceback.TracebackException.__init__ = traceback_exception_init # type: ignore
traceback_exception_original_format = traceback.TracebackException.format
def traceback_exception_format(self, *, chain=True):
yield from traceback_exception_original_format(self, chain=chain)
for i, exc in enumerate(self.embedded):
yield "\nDetails of embedded exception {}:\n\n".format(i + 1)
yield from (textwrap.indent(line, " " * 2) for line in exc.format(chain=chain))
traceback.TracebackException.format = traceback_exception_format # type: ignore
def trio_excepthook(etype, value, tb):
for chunk in traceback.format_exception(etype, value, tb):
sys.stderr.write(chunk)
monkeypatched_or_warned = False
if "IPython" in sys.modules:
import IPython
ip = IPython.get_ipython()
if ip is not None:
if ip.custom_exceptions != ():
warnings.warn(
"IPython detected, but you already have a custom exception "
"handler installed. I'll skip installing Trio's custom "
"handler, but this means MultiErrors will not show full "
"tracebacks.",
category=RuntimeWarning,
)
monkeypatched_or_warned = True
else:
def trio_show_traceback(self, etype, value, tb, tb_offset=None):
# XX it would be better to integrate with IPython's fancy
# exception formatting stuff (and not ignore tb_offset)
trio_excepthook(etype, value, tb)
ip.set_custom_exc((MultiError,), trio_show_traceback)
monkeypatched_or_warned = True
if sys.excepthook is sys.__excepthook__:
sys.excepthook = trio_excepthook
monkeypatched_or_warned = True
# Ubuntu's system Python has a sitecustomize.py file that import
# apport_python_hook and replaces sys.excepthook.
#
# The custom hook captures the error for crash reporting, and then calls
# sys.__excepthook__ to actually print the error.
#
# We don't mind it capturing the error for crash reporting, but we want to
# take over printing the error. So we monkeypatch the apport_python_hook
# module so that instead of calling sys.__excepthook__, it calls our custom
# hook.
#
# More details: https://github.com/python-trio/trio/issues/1065
if getattr(sys.excepthook, "__name__", None) == "apport_excepthook":
import apport_python_hook
assert sys.excepthook is apport_python_hook.apport_excepthook
# Give it a descriptive name as a hint for anyone who's stuck trying to
# debug this mess later.
class TrioFakeSysModuleForApport:
pass
fake_sys = TrioFakeSysModuleForApport()
fake_sys.__dict__.update(sys.__dict__)
fake_sys.__excepthook__ = trio_excepthook # type: ignore
apport_python_hook.sys = fake_sys
monkeypatched_or_warned = True
if not monkeypatched_or_warned:
warnings.warn(
"You seem to already have a custom sys.excepthook handler "
"installed. I'll skip installing Trio's custom handler, but this "
"means MultiErrors will not show full tracebacks.",
category=RuntimeWarning,
)
| 36.416 | 99 | 0.634172 | import sys
import traceback
import textwrap
import warnings
exc_key = id
modified, then the 'raise' here makes this
# frame show up in the traceback; otherwise, we leave no trace.)
class MultiErrorCatcher:
_handler = None
def __enter__(self):
pass
def __exit__(self, etype, exc, tb):
if exc is not None:
filtered_exc = MultiError.filter(self._handler, exc)
if filtered_exc is exc:
# Let the interpreter re-raise it
return False
if filtered_exc is None:
# Swallow the exception
return True
# When we raise filtered_exc, Python will unconditionally blow
# away its __context__ attribute and replace it with the original
# exc we caught. So after we raise it, we have to pause it while
# it's in flight to put the correct __context__ back.
old_context = filtered_exc.__context__
try:
raise filtered_exc
finally:
_, value, _ = sys.exc_info()
assert value is filtered_exc
value.__context__ = old_context
class MultiError(Exception):
def __init__(self, exceptions):
if hasattr(self, "exceptions"):
assert len(exceptions) == 1 and exceptions[0] is self
return
self.exceptions = exceptions
def __new__(cls, exceptions):
exceptions = list(exceptions)
for exc in exceptions:
if not isinstance(exc, Exception):
raise TypeError("Expected an exception object, not {!r}".format(exc))
if len(exceptions) == 1:
return exceptions[0]
else:
# simply set the `exceptions` attribute directly on the new object.
# However, linters expect attributes to be initialized in __init__.
return Exception.__new__(cls, exceptions)
def __str__(self):
return ", ".join(repr(exc) for exc in self.exceptions)
def __repr__(self):
return "<MultiError: {}>".format(self)
@classmethod
def filter(cls, handler, root_exc):
return _filter_impl(handler, root_exc)
@classmethod
def catch(cls, handler):
return MultiErrorCatcher(handler)
# Clean up exception printing:
MultiError.__module__ = "asyncgui"
################################################################
# concat_tb
################################################################
# We need to compute a new traceback that is the concatenation of two existing
# tracebacks. This requires copying the entries in 'head' and then pointing
# the final tb_next to 'tail'.
#
# NB: 'tail' might be None, which requires some special handling in the ctypes
# version.
#
# The complication here is that Python doesn't actually support copying or
try:
import tputil
except ImportError:
have_tproxy = False
else:
have_tproxy = True
if have_tproxy:
def copy_tb(base_tb, tb_next):
def controller(operation):
# 'opname' that isn't __getattr__ or __getattribute__. So there's
# no missing test we could add, and no value in coverage nagging
# us about adding one.
if operation.opname in [
"__getattribute__",
"__getattr__",
]: # pragma: no cover
if operation.args[0] == "tb_next":
return tb_next
return operation.delegate()
return tputil.make_proxy(controller, type(base_tb), base_tb)
else:
# ctypes it is
import ctypes
# How to handle refcounting? I don't want to use ctypes.py_object because
import _ctypes
class CTraceback(ctypes.Structure):
_fields_ = [
("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
("tb_next", ctypes.c_void_p),
("tb_frame", ctypes.c_void_p),
("tb_lasti", ctypes.c_int),
("tb_lineno", ctypes.c_int),
]
def copy_tb(base_tb, tb_next):
try:
raise ValueError
except ValueError as exc:
new_tb = exc.__traceback__
c_new_tb = CTraceback.from_address(id(new_tb))
# pointer! Here we know that our new traceback has only 1 frame in it,
# so we can assume the tb_next field is NULL.
assert c_new_tb.tb_next is None
# If tb_next is None, then we want to set c_new_tb.tb_next to NULL,
# which it already is, so we're done. Otherwise, we have to actually
if tb_next is not None:
_ctypes.Py_INCREF(tb_next)
c_new_tb.tb_next = id(tb_next)
assert c_new_tb.tb_frame is not None
_ctypes.Py_INCREF(base_tb.tb_frame)
old_tb_frame = new_tb.tb_frame
c_new_tb.tb_frame = id(base_tb.tb_frame)
_ctypes.Py_DECREF(old_tb_frame)
c_new_tb.tb_lasti = base_tb.tb_lasti
c_new_tb.tb_lineno = base_tb.tb_lineno
return new_tb
def concat_tb(head, tail):
head_tbs = []
pointer = head
while pointer is not None:
head_tbs.append(pointer)
pointer = pointer.tb_next
current_head = tail
for head_tb in reversed(head_tbs):
current_head = copy_tb(head_tb, tb_next=current_head)
return current_head
| true | true |
f7f5c3805e224faa0e6f4135b79f272a7f215658 | 694 | py | Python | examples/cxx/exceptions/example_test.py | cablelabs/esp-idf | a8f9a65f251a33cd5a5deaf14e95ca417abad41b | [
"Apache-2.0"
] | 8,747 | 2016-08-18T14:58:24.000Z | 2022-03-31T20:58:55.000Z | examples/cxx/exceptions/example_test.py | cablelabs/esp-idf | a8f9a65f251a33cd5a5deaf14e95ca417abad41b | [
"Apache-2.0"
] | 8,603 | 2016-08-20T08:55:56.000Z | 2022-03-31T23:04:01.000Z | examples/cxx/exceptions/example_test.py | cablelabs/esp-idf | a8f9a65f251a33cd5a5deaf14e95ca417abad41b | [
"Apache-2.0"
] | 6,380 | 2016-08-18T18:17:00.000Z | 2022-03-31T22:25:57.000Z | from __future__ import print_function
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32c3'])
def test_examples_system_cpp_exceptions(env, extra_data):
dut = env.get_dut('cpp_exceptions_example', 'examples/cxx/exceptions')
# start test
dut.start_app()
lines = ['app_main starting',
'In constructor, arg=42',
'In constructor, arg=0',
'In destructor, m_arg=42',
'Exception caught: Exception in constructor',
'app_main done'
]
for line in lines:
dut.expect(line, timeout=2)
if __name__ == '__main__':
test_examples_system_cpp_exceptions()
| 28.916667 | 82 | 0.651297 | from __future__ import print_function
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32c3'])
def test_examples_system_cpp_exceptions(env, extra_data):
dut = env.get_dut('cpp_exceptions_example', 'examples/cxx/exceptions')
dut.start_app()
lines = ['app_main starting',
'In constructor, arg=42',
'In constructor, arg=0',
'In destructor, m_arg=42',
'Exception caught: Exception in constructor',
'app_main done'
]
for line in lines:
dut.expect(line, timeout=2)
if __name__ == '__main__':
test_examples_system_cpp_exceptions()
| true | true |
f7f5c3a340fc76671335f919b083f33982664084 | 37,744 | py | Python | src/v5.3/resources/swagger_client/api/grading_periods_api.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 2 | 2021-04-27T17:18:17.000Z | 2021-04-27T19:14:39.000Z | src/v5.3/resources/swagger_client/api/grading_periods_api.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | null | null | null | src/v5.3/resources/swagger_client/api/grading_periods_api.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 1 | 2022-01-06T09:43:11.000Z | 2022-01-06T09:43:11.000Z | # coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class GradingPeriodsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_grading_period_by_id(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_grading_period_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_grading_period_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_grading_period_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_grading_period_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_grading_period_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_grading_period_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `delete_grading_period_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/gradingPeriods/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deletes_grading_periods(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_grading_periods(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[DeletedResource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deletes_grading_periods_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.deletes_grading_periods_with_http_info(**kwargs) # noqa: E501
return data
def deletes_grading_periods_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_grading_periods_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[DeletedResource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deletes_grading_periods" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_grading_periods`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_grading_periods`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/gradingPeriods/deletes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DeletedResource]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_grading_periods(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_grading_periods(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param str grading_period_descriptor: The name of the period for which grades are reported.
:param int period_sequence: The sequential order of this period relative to other periods.
:param int school_id: The identifier assigned to a school.
:param int school_year: The identifier for the grading period school year.
:param date begin_date: Month, day, and year of the first day of the GradingPeriod.
:param date end_date: Month, day, and year of the last day of the GradingPeriod.
:param str id:
:param int total_instructional_days: Total days available for educational instruction during the GradingPeriod.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[EdFiGradingPeriod]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_grading_periods_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_grading_periods_with_http_info(**kwargs) # noqa: E501
return data
def get_grading_periods_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_grading_periods_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param str grading_period_descriptor: The name of the period for which grades are reported.
:param int period_sequence: The sequential order of this period relative to other periods.
:param int school_id: The identifier assigned to a school.
:param int school_year: The identifier for the grading period school year.
:param date begin_date: Month, day, and year of the first day of the GradingPeriod.
:param date end_date: Month, day, and year of the last day of the GradingPeriod.
:param str id:
:param int total_instructional_days: Total days available for educational instruction during the GradingPeriod.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[EdFiGradingPeriod]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version', 'total_count', 'grading_period_descriptor', 'period_sequence', 'school_id', 'school_year', 'begin_date', 'end_date', 'id', 'total_instructional_days', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_grading_periods" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_grading_periods`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_grading_periods`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and ('grading_period_descriptor' in params and
len(params['grading_period_descriptor']) > 306):
raise ValueError("Invalid value for parameter `grading_period_descriptor` when calling `get_grading_periods`, length must be less than or equal to `306`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
if 'total_count' in params:
query_params.append(('totalCount', params['total_count'])) # noqa: E501
if 'grading_period_descriptor' in params:
query_params.append(('gradingPeriodDescriptor', params['grading_period_descriptor'])) # noqa: E501
if 'period_sequence' in params:
query_params.append(('periodSequence', params['period_sequence'])) # noqa: E501
if 'school_id' in params:
query_params.append(('schoolId', params['school_id'])) # noqa: E501
if 'school_year' in params:
query_params.append(('schoolYear', params['school_year'])) # noqa: E501
if 'begin_date' in params:
query_params.append(('beginDate', params['begin_date'])) # noqa: E501
if 'end_date' in params:
query_params.append(('endDate', params['end_date'])) # noqa: E501
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
if 'total_instructional_days' in params:
query_params.append(('totalInstructionalDays', params['total_instructional_days'])) # noqa: E501
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/gradingPeriods', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EdFiGradingPeriod]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_grading_periods_by_id(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_grading_periods_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: EdFiGradingPeriod
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_grading_periods_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_grading_periods_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_grading_periods_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_grading_periods_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: EdFiGradingPeriod
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_none_match', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_grading_periods_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_grading_periods_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_none_match' in params:
header_params['If-None-Match'] = params['if_none_match'] # noqa: E501
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/gradingPeriods/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EdFiGradingPeriod', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_grading_period(self, grading_period, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error. The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. It is recommended to use POST for both create and update except while updating natural key of a resource in which case PUT operation must be used. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_grading_period(grading_period, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiGradingPeriod grading_period: The JSON representation of the \"gradingPeriod\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_grading_period_with_http_info(grading_period, **kwargs) # noqa: E501
else:
(data) = self.post_grading_period_with_http_info(grading_period, **kwargs) # noqa: E501
return data
def post_grading_period_with_http_info(self, grading_period, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error. The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. It is recommended to use POST for both create and update except while updating natural key of a resource in which case PUT operation must be used. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_grading_period_with_http_info(grading_period, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiGradingPeriod grading_period: The JSON representation of the \"gradingPeriod\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['grading_period'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_grading_period" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'grading_period' is set
if self.api_client.client_side_validation and ('grading_period' not in params or
params['grading_period'] is None): # noqa: E501
raise ValueError("Missing the required parameter `grading_period` when calling `post_grading_period`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'grading_period' in params:
body_params = params['grading_period']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/gradingPeriods', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def put_grading_period(self, id, grading_period, **kwargs): # noqa: E501
"""Updates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update a resource by identifier. If the resource identifier (\"id\") is provided in the JSON body, it will be ignored. Additionally, this API resource is not configured for cascading natural key updates. Natural key values for this resource cannot be changed using PUT operation and will not be modified in the database, and so recommendation is to use POST as that supports upsert behavior. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_grading_period(id, grading_period, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiGradingPeriod grading_period: The JSON representation of the \"gradingPeriod\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.put_grading_period_with_http_info(id, grading_period, **kwargs) # noqa: E501
else:
(data) = self.put_grading_period_with_http_info(id, grading_period, **kwargs) # noqa: E501
return data
def put_grading_period_with_http_info(self, id, grading_period, **kwargs): # noqa: E501
"""Updates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update a resource by identifier. If the resource identifier (\"id\") is provided in the JSON body, it will be ignored. Additionally, this API resource is not configured for cascading natural key updates. Natural key values for this resource cannot be changed using PUT operation and will not be modified in the database, and so recommendation is to use POST as that supports upsert behavior. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_grading_period_with_http_info(id, grading_period, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiGradingPeriod grading_period: The JSON representation of the \"gradingPeriod\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'grading_period', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_grading_period" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `put_grading_period`") # noqa: E501
# verify the required parameter 'grading_period' is set
if self.api_client.client_side_validation and ('grading_period' not in params or
params['grading_period'] is None): # noqa: E501
raise ValueError("Missing the required parameter `grading_period` when calling `put_grading_period`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'grading_period' in params:
body_params = params['grading_period']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/gradingPeriods/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 52.132597 | 578 | 0.653905 |
from __future__ import absolute_import
import re
import six
from swagger_client.api_client import ApiClient
class GradingPeriodsApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_grading_period_by_id(self, id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_grading_period_by_id_with_http_info(id, **kwargs)
else:
(data) = self.delete_grading_period_by_id_with_http_info(id, **kwargs)
return data
def delete_grading_period_by_id_with_http_info(self, id, **kwargs):
all_params = ['id', 'if_match']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_grading_period_by_id" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_grading_period_by_id`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['oauth2_client_credentials']
return self.api_client.call_api(
'/ed-fi/gradingPeriods/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deletes_grading_periods(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deletes_grading_periods_with_http_info(**kwargs)
else:
(data) = self.deletes_grading_periods_with_http_info(**kwargs)
return data
def deletes_grading_periods_with_http_info(self, **kwargs):
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version', 'snapshot_identifier']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deletes_grading_periods" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500):
raise ValueError("Invalid value for parameter `limit` when calling `deletes_grading_periods`, must be a value less than or equal to `500`")
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0):
raise ValueError("Invalid value for parameter `limit` when calling `deletes_grading_periods`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version']))
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version']))
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['oauth2_client_credentials']
return self.api_client.call_api(
'/ed-fi/gradingPeriods/deletes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DeletedResource]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_grading_periods(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_grading_periods_with_http_info(**kwargs)
else:
(data) = self.get_grading_periods_with_http_info(**kwargs)
return data
def get_grading_periods_with_http_info(self, **kwargs):
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version', 'total_count', 'grading_period_descriptor', 'period_sequence', 'school_id', 'school_year', 'begin_date', 'end_date', 'id', 'total_instructional_days', 'snapshot_identifier']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_grading_periods" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500):
raise ValueError("Invalid value for parameter `limit` when calling `get_grading_periods`, must be a value less than or equal to `500`")
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0):
raise ValueError("Invalid value for parameter `limit` when calling `get_grading_periods`, must be a value greater than or equal to `0`")
if self.api_client.client_side_validation and ('grading_period_descriptor' in params and
len(params['grading_period_descriptor']) > 306):
raise ValueError("Invalid value for parameter `grading_period_descriptor` when calling `get_grading_periods`, length must be less than or equal to `306`")
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version']))
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version']))
if 'total_count' in params:
query_params.append(('totalCount', params['total_count']))
if 'grading_period_descriptor' in params:
query_params.append(('gradingPeriodDescriptor', params['grading_period_descriptor']))
if 'period_sequence' in params:
query_params.append(('periodSequence', params['period_sequence']))
if 'school_id' in params:
query_params.append(('schoolId', params['school_id']))
if 'school_year' in params:
query_params.append(('schoolYear', params['school_year']))
if 'begin_date' in params:
query_params.append(('beginDate', params['begin_date']))
if 'end_date' in params:
query_params.append(('endDate', params['end_date']))
if 'id' in params:
query_params.append(('id', params['id']))
if 'total_instructional_days' in params:
query_params.append(('totalInstructionalDays', params['total_instructional_days']))
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['oauth2_client_credentials']
return self.api_client.call_api(
'/ed-fi/gradingPeriods', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EdFiGradingPeriod]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_grading_periods_by_id(self, id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_grading_periods_by_id_with_http_info(id, **kwargs)
else:
(data) = self.get_grading_periods_by_id_with_http_info(id, **kwargs)
return data
def get_grading_periods_by_id_with_http_info(self, id, **kwargs):
all_params = ['id', 'if_none_match', 'snapshot_identifier']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_grading_periods_by_id" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_grading_periods_by_id`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
if 'if_none_match' in params:
header_params['If-None-Match'] = params['if_none_match']
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['oauth2_client_credentials']
return self.api_client.call_api(
'/ed-fi/gradingPeriods/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EdFiGradingPeriod',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_grading_period(self, grading_period, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_grading_period_with_http_info(grading_period, **kwargs)
else:
(data) = self.post_grading_period_with_http_info(grading_period, **kwargs)
return data
def post_grading_period_with_http_info(self, grading_period, **kwargs):
all_params = ['grading_period']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_grading_period" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('grading_period' not in params or
params['grading_period'] is None):
raise ValueError("Missing the required parameter `grading_period` when calling `post_grading_period`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'grading_period' in params:
body_params = params['grading_period']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['oauth2_client_credentials']
return self.api_client.call_api(
'/ed-fi/gradingPeriods', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def put_grading_period(self, id, grading_period, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.put_grading_period_with_http_info(id, grading_period, **kwargs)
else:
(data) = self.put_grading_period_with_http_info(id, grading_period, **kwargs)
return data
def put_grading_period_with_http_info(self, id, grading_period, **kwargs):
all_params = ['id', 'grading_period', 'if_match']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_grading_period" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `put_grading_period`")
if self.api_client.client_side_validation and ('grading_period' not in params or
params['grading_period'] is None):
raise ValueError("Missing the required parameter `grading_period` when calling `put_grading_period`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match']
form_params = []
local_var_files = {}
body_params = None
if 'grading_period' in params:
body_params = params['grading_period']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['oauth2_client_credentials']
return self.api_client.call_api(
'/ed-fi/gradingPeriods/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f7f5c443f2c5959b87c2e5bd81cb08ddad82aedb | 6,625 | py | Python | venv/Lib/site-packages/pybrain/optimization/distributionbased/cmaes.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pybrain/optimization/distributionbased/cmaes.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pybrain/optimization/distributionbased/cmaes.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | __author__ = 'Tom Schaul, tom@idsia.ch; Sun Yi, yi@idsia.ch'
from numpy import floor, log, eye, zeros, array, sqrt, sum, dot, tile, outer, real
from numpy import exp, diag, power, ravel
from numpy.linalg import eig, norm
from numpy.random import randn
from pybrain.optimization.optimizer import ContinuousOptimizer
class CMAES(ContinuousOptimizer):
""" CMA-ES: Evolution Strategy with Covariance Matrix Adaptation for
nonlinear function minimization.
This code is a close transcription of the provided matlab code.
"""
mustMinimize = True
stopPrecision = 1e-6
storeAllCenters = False
def _additionalInit(self):
self.center = self._initEvaluable
self.stepSize = 0.5 # coordinate wise standard deviation (sigma)
if self.storeAllCenters:
self._allCenters = []
# Strategy parameter setting: Selection
# population size, offspring number
self.mu = int(floor(self.batchSize / 2)) # number of parents/points for recombination
self.weights = log(self.mu + 1) - log(array(xrange(1, self.mu + 1))) # use array
self.weights /= sum(self.weights) # normalize recombination weights array
self.muEff = sum(self.weights) ** 2 / sum(power(self.weights, 2)) # variance-effective size of mu
# Strategy parameter setting: Adaptation
self.cumCov = 4 / float(self.numParameters + 4) # time constant for cumulation for covariance matrix
self.cumStep = (self.muEff + 2) / (self.numParameters + self.muEff + 3)# t-const for cumulation for Size control
self.muCov = self.muEff # size of mu used for calculating learning rate covLearningRate
self.covLearningRate = ((1 / self.muCov) * 2 / (self.numParameters + 1.4) ** 2 + (1 - 1 / self.muCov) * # learning rate for
((2 * self.muEff - 1) / ((self.numParameters + 2) ** 2 + 2 * self.muEff))) # covariance matrix
self.dampings = 1 + 2 * max(0, sqrt((self.muEff - 1) / (self.numParameters + 1)) - 1) + self.cumStep
# damping for stepSize usually close to 1 former damp == self.dampings/self.cumStep
# Initialize dynamic (internal) strategy parameters and constants
self.covPath = zeros(self.numParameters)
self.stepPath = zeros(self.numParameters) # evolution paths for C and stepSize
self.B = eye(self.numParameters, self.numParameters) # B defines the coordinate system
self.D = eye(self.numParameters, self.numParameters) # diagonal matrix D defines the scaling
self.C = dot(dot(self.B, self.D), dot(self.B, self.D).T) # covariance matrix
self.chiN = self.numParameters ** 0.5 * (1 - 1. / (4. * self.numParameters) + 1 / (21. * self.numParameters ** 2))
# expectation of ||numParameters(0,I)|| == norm(randn(numParameters,1))
def _learnStep(self):
# Generate and evaluate lambda offspring
arz = randn(self.numParameters, self.batchSize)
arx = tile(self.center.reshape(self.numParameters, 1), (1, self.batchSize))\
+ self.stepSize * dot(dot(self.B, self.D), arz)
arfitness = zeros(self.batchSize)
for k in xrange(self.batchSize):
arfitness[k] = self._oneEvaluation(arx[:, k])
# Sort by fitness and compute weighted mean into center
arfitness, arindex = sorti(arfitness) # minimization
arz = arz[:, arindex]
arx = arx[:, arindex]
arzsel = arz[:, xrange(self.mu)]
arxsel = arx[:, xrange(self.mu)]
arxmut = arxsel - tile(self.center.reshape(self.numParameters, 1), (1, self.mu))
zmean = dot(arzsel, self.weights)
self.center = dot(arxsel, self.weights)
if self.storeAllCenters:
self.allCenters.append(self.center)
# Cumulation: Update evolution paths
self.stepPath = (1 - self.cumStep) * self.stepPath \
+ sqrt(self.cumStep * (2 - self.cumStep) * self.muEff) * dot(self.B, zmean) # Eq. (4)
hsig = norm(self.stepPath) / sqrt(1 - (1 - self.cumStep) ** (2 * self.numEvaluations / float(self.batchSize))) / self.chiN \
< 1.4 + 2. / (self.numParameters + 1)
self.covPath = (1 - self.cumCov) * self.covPath + hsig * \
sqrt(self.cumCov * (2 - self.cumCov) * self.muEff) * dot(dot(self.B, self.D), zmean) # Eq. (2)
# Adapt covariance matrix C
self.C = ((1 - self.covLearningRate) * self.C # regard old matrix % Eq. (3)
+ self.covLearningRate * (1 / self.muCov) * (outer(self.covPath, self.covPath) # plus rank one update
+ (1 - hsig) * self.cumCov * (2 - self.cumCov) * self.C)
+ self.covLearningRate * (1 - 1 / self.muCov) # plus rank mu update
* dot(dot(arxmut, diag(self.weights)), arxmut.T)
)
# Adapt step size self.stepSize
self.stepSize *= exp((self.cumStep / self.dampings) * (norm(self.stepPath) / self.chiN - 1)) # Eq. (5)
# Update B and D from C
# This is O(n^3). When strategy internal CPU-time is critical, the
# next three lines should be executed only every (alpha/covLearningRate/N)-th
# iteration, where alpha is e.g. between 0.1 and 10
self.C = (self.C + self.C.T) / 2 # enforce symmetry
Ev, self.B = eig(self.C) # eigen decomposition, B==normalized eigenvectors
Ev = real(Ev) # enforce real value
self.D = diag(sqrt(Ev)) #diag(ravel(sqrt(Ev))) # D contains standard deviations now
self.B = real(self.B)
# convergence is reached
if abs((arfitness[0] - arfitness[-1]) / arfitness[0] + arfitness[-1]) <= self.stopPrecision:
if self.verbose:
print "Converged."
self.maxLearningSteps = self.numLearningSteps
# or diverged, unfortunately
if min(Ev) > 1e5:
if self.verbose:
print "Diverged."
self.maxLearningSteps = self.numLearningSteps
@property
def batchSize(self):
return int(4 + floor(3 * log(self.numParameters)))
def sorti(vect):
""" sort, but also return the indices-changes """
tmp = sorted(map(lambda (x, y): (y, x), enumerate(ravel(vect))))
res1 = array(map(lambda x: x[0], tmp))
res2 = array(map(lambda x: int(x[1]), tmp))
return res1, res2
| 51.356589 | 133 | 0.594868 | __author__ = 'Tom Schaul, tom@idsia.ch; Sun Yi, yi@idsia.ch'
from numpy import floor, log, eye, zeros, array, sqrt, sum, dot, tile, outer, real
from numpy import exp, diag, power, ravel
from numpy.linalg import eig, norm
from numpy.random import randn
from pybrain.optimization.optimizer import ContinuousOptimizer
class CMAES(ContinuousOptimizer):
""" CMA-ES: Evolution Strategy with Covariance Matrix Adaptation for
nonlinear function minimization.
This code is a close transcription of the provided matlab code.
"""
mustMinimize = True
stopPrecision = 1e-6
storeAllCenters = False
def _additionalInit(self):
self.center = self._initEvaluable
self.stepSize = 0.5
if self.storeAllCenters:
self._allCenters = []
self.mu = int(floor(self.batchSize / 2))
self.weights = log(self.mu + 1) - log(array(xrange(1, self.mu + 1)))
self.weights /= sum(self.weights)
self.muEff = sum(self.weights) ** 2 / sum(power(self.weights, 2))
self.cumCov = 4 / float(self.numParameters + 4)
self.cumStep = (self.muEff + 2) / (self.numParameters + self.muEff + 3)
self.muCov = self.muEff
self.covLearningRate = ((1 / self.muCov) * 2 / (self.numParameters + 1.4) ** 2 + (1 - 1 / self.muCov) *
((2 * self.muEff - 1) / ((self.numParameters + 2) ** 2 + 2 * self.muEff)))
self.dampings = 1 + 2 * max(0, sqrt((self.muEff - 1) / (self.numParameters + 1)) - 1) + self.cumStep
self.covPath = zeros(self.numParameters)
self.stepPath = zeros(self.numParameters)
self.B = eye(self.numParameters, self.numParameters)
self.D = eye(self.numParameters, self.numParameters)
self.C = dot(dot(self.B, self.D), dot(self.B, self.D).T)
self.chiN = self.numParameters ** 0.5 * (1 - 1. / (4. * self.numParameters) + 1 / (21. * self.numParameters ** 2))
def _learnStep(self):
arz = randn(self.numParameters, self.batchSize)
arx = tile(self.center.reshape(self.numParameters, 1), (1, self.batchSize))\
+ self.stepSize * dot(dot(self.B, self.D), arz)
arfitness = zeros(self.batchSize)
for k in xrange(self.batchSize):
arfitness[k] = self._oneEvaluation(arx[:, k])
arfitness, arindex = sorti(arfitness)
arz = arz[:, arindex]
arx = arx[:, arindex]
arzsel = arz[:, xrange(self.mu)]
arxsel = arx[:, xrange(self.mu)]
arxmut = arxsel - tile(self.center.reshape(self.numParameters, 1), (1, self.mu))
zmean = dot(arzsel, self.weights)
self.center = dot(arxsel, self.weights)
if self.storeAllCenters:
self.allCenters.append(self.center)
self.stepPath = (1 - self.cumStep) * self.stepPath \
+ sqrt(self.cumStep * (2 - self.cumStep) * self.muEff) * dot(self.B, zmean)
hsig = norm(self.stepPath) / sqrt(1 - (1 - self.cumStep) ** (2 * self.numEvaluations / float(self.batchSize))) / self.chiN \
< 1.4 + 2. / (self.numParameters + 1)
self.covPath = (1 - self.cumCov) * self.covPath + hsig * \
sqrt(self.cumCov * (2 - self.cumCov) * self.muEff) * dot(dot(self.B, self.D), zmean)
self.C = ((1 - self.covLearningRate) * self.C
+ self.covLearningRate * (1 / self.muCov) * (outer(self.covPath, self.covPath)
+ (1 - hsig) * self.cumCov * (2 - self.cumCov) * self.C)
+ self.covLearningRate * (1 - 1 / self.muCov)
* dot(dot(arxmut, diag(self.weights)), arxmut.T)
)
self.stepSize *= exp((self.cumStep / self.dampings) * (norm(self.stepPath) / self.chiN - 1))
self.C = (self.C + self.C.T) / 2
Ev, self.B = eig(self.C)
Ev = real(Ev)
self.D = diag(sqrt(Ev))
if abs((arfitness[0] - arfitness[-1]) / arfitness[0] + arfitness[-1]) <= self.stopPrecision:
if self.verbose:
print "Converged."
self.maxLearningSteps = self.numLearningSteps
if min(Ev) > 1e5:
if self.verbose:
print "Diverged."
self.maxLearningSteps = self.numLearningSteps
@property
def batchSize(self):
return int(4 + floor(3 * log(self.numParameters)))
def sorti(vect):
""" sort, but also return the indices-changes """
tmp = sorted(map(lambda (x, y): (y, x), enumerate(ravel(vect))))
res1 = array(map(lambda x: x[0], tmp))
res2 = array(map(lambda x: int(x[1]), tmp))
return res1, res2
| false | true |
f7f5c450ee28aaa23a1f58b9fcf6f616c0b08313 | 3,225 | py | Python | tests/test_latent.py | kecsap/cleanlab | e54adb5f7ec7537c02dd9f3eff473765a087c707 | [
"MIT"
] | 2 | 2020-04-21T11:54:10.000Z | 2020-07-03T02:59:36.000Z | tests/test_latent.py | Royzon/cleanlab | e592e2ae2278018c8fdac33f20fd58659a825c3d | [
"MIT"
] | null | null | null | tests/test_latent.py | Royzon/cleanlab | e592e2ae2278018c8fdac33f20fd58659a825c3d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals, with_statement
from cleanlab import latent_algebra, latent_estimation
import numpy as np
import pytest
s = [0] * 10 + [1] * 5 + [2] * 15
nm = np.array([
[1.0, 0.0, 0.2],
[0.0, 0.7, 0.2],
[0.0, 0.3, 0.6]
])
def test_latent_py_ps_inv():
ps, py, inv = latent_algebra.compute_ps_py_inv_noise_matrix(s, nm)
assert(all(abs(np.dot(inv, ps) - py) < 1e-3))
assert(all(abs(np.dot(nm, py) - ps) < 1e-3))
return ps, py, inv
def test_latent_inv():
ps, py, inv = test_latent_py_ps_inv()
inv2 = latent_algebra.compute_inv_noise_matrix(py, nm)
assert(np.all(abs(inv - inv2) < 1e-3))
def test_latent_nm():
ps, py, inv = test_latent_py_ps_inv()
nm2 = latent_algebra.compute_noise_matrix_from_inverse(ps, inv, py)
assert(np.all(abs(nm - nm2) < 1e-3))
def test_latent_py():
ps, py, inv = test_latent_py_ps_inv()
py2 = latent_algebra.compute_py(ps, nm, inv)
assert(np.all(abs(py - py2) < 1e-3))
def test_latent_py_warning():
ps, py, inv = test_latent_py_ps_inv()
with pytest.raises(TypeError) as e:
with pytest.warns(UserWarning) as w:
py2 = latent_algebra.compute_py(
ps = np.array([[[0.1, 0.3, 0.6]]]),
noise_matrix = nm,
inverse_noise_matrix = inv,
)
py2 = latent_algebra.compute_py(
ps = np.array([[0.1], [0.2], [0.7]]),
noise_matrix = nm,
inverse_noise_matrix = inv,
)
assert(True)
def test_compute_py_err():
ps, py, inv = test_latent_py_ps_inv()
try:
py = latent_algebra.compute_py(
ps = ps,
noise_matrix = nm,
inverse_noise_matrix = inv,
py_method = 'marginal_ps',
)
except ValueError as e:
assert('y_count' in str(e))
with pytest.raises(ValueError) as e:
py = latent_algebra.compute_py(
ps = ps,
noise_matrix = nm,
inverse_noise_matrix = inv,
py_method = 'marginal_ps',
)
def test_compute_py_marginal_ps():
ps, py, inv = test_latent_py_ps_inv()
cj = nm * ps * len(s)
y_count = cj.sum(axis = 0)
py2 = latent_algebra.compute_py(
ps = ps,
noise_matrix = nm,
inverse_noise_matrix = inv,
py_method = 'marginal_ps',
y_count = y_count
)
assert(all(abs(py - py2) < 1e-2))
def test_pyx():
psx = np.array([
[0.1, 0.3, 0.6],
[0.1, 0.0, 0.9],
[0.1, 0.0, 0.9],
[1.0, 0.0, 0.0],
[0.1, 0.8, 0.1],
])
ps, py, inv = test_latent_py_ps_inv()
pyx = latent_algebra.compute_pyx(psx, nm, inv)
assert(np.all(np.sum(pyx, axis = 1) - 1 < 1e-4))
def test_pyx_error():
psx = np.array([0.1, 0.3, 0.6])
ps, py, inv = test_latent_py_ps_inv()
try:
pyx = latent_algebra.compute_pyx(psx, nm, inv)
except ValueError as e:
assert('should be (N, K)' in str(e))
with pytest.raises(ValueError) as e:
pyx = latent_algebra.compute_pyx(psx, nm, inv) | 27.564103 | 98 | 0.569612 |
from __future__ import print_function, absolute_import, division, unicode_literals, with_statement
from cleanlab import latent_algebra, latent_estimation
import numpy as np
import pytest
s = [0] * 10 + [1] * 5 + [2] * 15
nm = np.array([
[1.0, 0.0, 0.2],
[0.0, 0.7, 0.2],
[0.0, 0.3, 0.6]
])
def test_latent_py_ps_inv():
ps, py, inv = latent_algebra.compute_ps_py_inv_noise_matrix(s, nm)
assert(all(abs(np.dot(inv, ps) - py) < 1e-3))
assert(all(abs(np.dot(nm, py) - ps) < 1e-3))
return ps, py, inv
def test_latent_inv():
ps, py, inv = test_latent_py_ps_inv()
inv2 = latent_algebra.compute_inv_noise_matrix(py, nm)
assert(np.all(abs(inv - inv2) < 1e-3))
def test_latent_nm():
ps, py, inv = test_latent_py_ps_inv()
nm2 = latent_algebra.compute_noise_matrix_from_inverse(ps, inv, py)
assert(np.all(abs(nm - nm2) < 1e-3))
def test_latent_py():
ps, py, inv = test_latent_py_ps_inv()
py2 = latent_algebra.compute_py(ps, nm, inv)
assert(np.all(abs(py - py2) < 1e-3))
def test_latent_py_warning():
ps, py, inv = test_latent_py_ps_inv()
with pytest.raises(TypeError) as e:
with pytest.warns(UserWarning) as w:
py2 = latent_algebra.compute_py(
ps = np.array([[[0.1, 0.3, 0.6]]]),
noise_matrix = nm,
inverse_noise_matrix = inv,
)
py2 = latent_algebra.compute_py(
ps = np.array([[0.1], [0.2], [0.7]]),
noise_matrix = nm,
inverse_noise_matrix = inv,
)
assert(True)
def test_compute_py_err():
ps, py, inv = test_latent_py_ps_inv()
try:
py = latent_algebra.compute_py(
ps = ps,
noise_matrix = nm,
inverse_noise_matrix = inv,
py_method = 'marginal_ps',
)
except ValueError as e:
assert('y_count' in str(e))
with pytest.raises(ValueError) as e:
py = latent_algebra.compute_py(
ps = ps,
noise_matrix = nm,
inverse_noise_matrix = inv,
py_method = 'marginal_ps',
)
def test_compute_py_marginal_ps():
ps, py, inv = test_latent_py_ps_inv()
cj = nm * ps * len(s)
y_count = cj.sum(axis = 0)
py2 = latent_algebra.compute_py(
ps = ps,
noise_matrix = nm,
inverse_noise_matrix = inv,
py_method = 'marginal_ps',
y_count = y_count
)
assert(all(abs(py - py2) < 1e-2))
def test_pyx():
psx = np.array([
[0.1, 0.3, 0.6],
[0.1, 0.0, 0.9],
[0.1, 0.0, 0.9],
[1.0, 0.0, 0.0],
[0.1, 0.8, 0.1],
])
ps, py, inv = test_latent_py_ps_inv()
pyx = latent_algebra.compute_pyx(psx, nm, inv)
assert(np.all(np.sum(pyx, axis = 1) - 1 < 1e-4))
def test_pyx_error():
psx = np.array([0.1, 0.3, 0.6])
ps, py, inv = test_latent_py_ps_inv()
try:
pyx = latent_algebra.compute_pyx(psx, nm, inv)
except ValueError as e:
assert('should be (N, K)' in str(e))
with pytest.raises(ValueError) as e:
pyx = latent_algebra.compute_pyx(psx, nm, inv) | true | true |
f7f5c4640348689f7e2fc71a23715f9bc217fc03 | 4,981 | py | Python | tequila_fab/ansible.py | caktus/tequila-fab | 8b216db76fe959d010b7bf286e76ad9d8c68c7b8 | [
"BSD-3-Clause"
] | null | null | null | tequila_fab/ansible.py | caktus/tequila-fab | 8b216db76fe959d010b7bf286e76ad9d8c68c7b8 | [
"BSD-3-Clause"
] | 2 | 2019-04-01T13:35:04.000Z | 2019-09-06T15:10:36.000Z | tequila_fab/ansible.py | caktus/tequila-fab | 8b216db76fe959d010b7bf286e76ad9d8c68c7b8 | [
"BSD-3-Clause"
] | null | null | null | import configparser
import functools
import os
import os.path
# FIXME: when we drop Python 2 support, change the comment-style type annotations to Python 3 style.
import yaml
from fabric.api import env, local
from fabric.colors import red, green, yellow
from fabric.decorators import task
from fabric.tasks import execute
from fabric.utils import abort
@functools.lru_cache(maxsize=1)
def find_ansible_config_file(): # type: () -> Optional[str]
"""
Return path to the ansible config file that ansible would use,
or None if no config file found.
Ansible uses the first config file it finds on this list:
ANSIBLE_CONFIG (an environment variable)
ansible.cfg (in the current directory)
.ansible.cfg (in the home directory)
/etc/ansible/ansible.cfg
"""
possible_paths = [
os.environ.get('ANSIBLE_CONFIG', False),
'ansible.cfg',
os.path.join(os.environ['HOME'], '.ansible.cfg'),
'/etc/ansible/ansible.cfg',
]
for path in possible_paths:
if path and os.path.exists(path):
return os.path.abspath(path)
return None
@functools.lru_cache(maxsize=1)
def get_ansible_configuration(): # type: () -> configparser.ConfigParser
config = configparser.ConfigParser()
config['DEFAULTS'] = {
'roles_path': os.path.join(os.environ['HOME'], '/.ansible/roles') + ':/usr/share/ansible/roles:/etc/ansible/roles'
}
path = find_ansible_config_file()
if path is not None:
config.read(path)
return config
@functools.lru_cache(maxsize=1)
def get_roles_path(): # type: () -> List[str]
"""
Return list of directories where Ansible will look for roles
"""
return get_ansible_configuration()['defaults']['roles_path'].split(':')
@task
def install_roles():
"""
Usage: fab install_roles
"""
local('ansible-galaxy install -i -r deployment/requirements.yml')
def find_install_role(rolename): # type: str -> Optional[str]
"""
Returns path of directory where role is installed,
or None.
"""
for path in get_roles_path():
dir = os.path.join(path, rolename)
if os.path.isdir(dir):
return dir
def req_name(req): # type: Dict -> str
"""
Return name of the role in the given role's requirements entry
"""
return req.get('name', req['src'])
@task
def check_role_versions(): # type: () -> None
"""
Usage: fab check_role_versions
If the wrong versions of any roles are installed, per deployment/requirements.yml,
fail.
If any required roles are not installed, install them.
If env.devflag is true, warns but ignores any locally installed roles. Otherwise,
locally installed roles are a fatal error. See the `dev` task
to set env.devflag.
"""
okay = True # False if we've spotted any problems
bad = [] # Paths to where missing roles should be installed, or where bad version roles are installed
requirements = yaml.load(open("deployment/requirements.yml"))
requirements = sorted(requirements, key=req_name)
requirements_to_install = False
for req in requirements:
name = req_name(req)
install_dir = find_install_role(name)
if not install_dir:
print(yellow("WARNING: role %s not installed" % (name,)))
requirements_to_install = True
continue
meta_path = os.path.join(install_dir, 'meta/.galaxy_install_info')
if os.path.exists(meta_path):
meta = yaml.load(open(meta_path))
if meta['version'] != req['version']:
print(red("ERROR: role %s at %s is version %s, should be version %s" % (
name,
install_dir,
meta['version'],
req['version']
)))
okay = False
bad.append(install_dir)
else:
print(green("GOOD: role %s %s at %s" % (name, meta['version'], install_dir)))
else:
# User must have installed this locally, don't check version
if env.devflag:
print(yellow("SKIP: role %s at %s appears to have been locally installed" % (name, install_dir)))
else:
okay = False
print(red("ERROR: role %s at %s appears to have been locally installed, will not continue" % (name, install_dir)))
print(red("To ignore this error, add 'dev' argument to fab command before this"))
if requirements_to_install and okay:
execute(install_roles)
if not okay:
print(red("Ansible galaxy role requirements are not satisfied, quitting. The simplest fix is to delete "
"the roles that have wrong versions, then run ``fab install_roles`` again."))
if bad:
print("E.g.")
print("$ rm -r %s" % " ".join(badname for badname in bad))
abort('check_installed_roles failed')
| 33.655405 | 131 | 0.62899 | import configparser
import functools
import os
import os.path
import yaml
from fabric.api import env, local
from fabric.colors import red, green, yellow
from fabric.decorators import task
from fabric.tasks import execute
from fabric.utils import abort
@functools.lru_cache(maxsize=1)
def find_ansible_config_file():
possible_paths = [
os.environ.get('ANSIBLE_CONFIG', False),
'ansible.cfg',
os.path.join(os.environ['HOME'], '.ansible.cfg'),
'/etc/ansible/ansible.cfg',
]
for path in possible_paths:
if path and os.path.exists(path):
return os.path.abspath(path)
return None
@functools.lru_cache(maxsize=1)
def get_ansible_configuration():
config = configparser.ConfigParser()
config['DEFAULTS'] = {
'roles_path': os.path.join(os.environ['HOME'], '/.ansible/roles') + ':/usr/share/ansible/roles:/etc/ansible/roles'
}
path = find_ansible_config_file()
if path is not None:
config.read(path)
return config
@functools.lru_cache(maxsize=1)
def get_roles_path():
return get_ansible_configuration()['defaults']['roles_path'].split(':')
@task
def install_roles():
local('ansible-galaxy install -i -r deployment/requirements.yml')
def find_install_role(rolename):
for path in get_roles_path():
dir = os.path.join(path, rolename)
if os.path.isdir(dir):
return dir
def req_name(req):
return req.get('name', req['src'])
@task
def check_role_versions():
okay = True
bad = [] # Paths to where missing roles should be installed, or where bad version roles are installed
requirements = yaml.load(open("deployment/requirements.yml"))
requirements = sorted(requirements, key=req_name)
requirements_to_install = False
for req in requirements:
name = req_name(req)
install_dir = find_install_role(name)
if not install_dir:
print(yellow("WARNING: role %s not installed" % (name,)))
requirements_to_install = True
continue
meta_path = os.path.join(install_dir, 'meta/.galaxy_install_info')
if os.path.exists(meta_path):
meta = yaml.load(open(meta_path))
if meta['version'] != req['version']:
print(red("ERROR: role %s at %s is version %s, should be version %s" % (
name,
install_dir,
meta['version'],
req['version']
)))
okay = False
bad.append(install_dir)
else:
print(green("GOOD: role %s %s at %s" % (name, meta['version'], install_dir)))
else:
# User must have installed this locally, don't check version
if env.devflag:
print(yellow("SKIP: role %s at %s appears to have been locally installed" % (name, install_dir)))
else:
okay = False
print(red("ERROR: role %s at %s appears to have been locally installed, will not continue" % (name, install_dir)))
print(red("To ignore this error, add 'dev' argument to fab command before this"))
if requirements_to_install and okay:
execute(install_roles)
if not okay:
print(red("Ansible galaxy role requirements are not satisfied, quitting. The simplest fix is to delete "
"the roles that have wrong versions, then run ``fab install_roles`` again."))
if bad:
print("E.g.")
print("$ rm -r %s" % " ".join(badname for badname in bad))
abort('check_installed_roles failed')
| true | true |
f7f5c4be51e1f45e1c8a496f9680be2b6dda8007 | 14,909 | py | Python | scripts/site_generator.py | jina-ai/benchmark | d9ade6464c1508c057299d9c6391872a7a70ba4b | [
"Apache-2.0"
] | 11 | 2021-08-06T07:33:44.000Z | 2022-03-15T02:24:37.000Z | scripts/site_generator.py | jina-ai/benchmarking | d9ade6464c1508c057299d9c6391872a7a70ba4b | [
"Apache-2.0"
] | 23 | 2021-08-01T20:43:40.000Z | 2021-11-22T20:46:54.000Z | scripts/site_generator.py | jina-ai/benchmarking | d9ade6464c1508c057299d9c6391872a7a70ba4b | [
"Apache-2.0"
] | 3 | 2021-09-12T07:14:41.000Z | 2022-03-21T19:47:23.000Z | #!/usr/bin/python3
import json
import os
import copy
from collections import defaultdict
from distutils.version import LooseVersion
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union, Optional
COLOR_VALUES = [
'#10a100',
'#7ead14',
'#bab73c',
'#e8c268',
'#e59838',
'#e36717',
'#de1414',
]
COLOR_NAN = '#9b00a1'
NOT_A_NUMBER = 'N/A'
STD_MEAN_THRESHOLD = 0.5
COLOR_LEGEND = ' | '.join(
[
f'<span style="color:{color};">{i*10} - {(i+1)*10}%</span>'
for i, color in enumerate(COLOR_VALUES)
]
)
LEGEND = f"""
The following data should be read as follows:
- Colors of cells display the percentage of the minimum value in the column:\n
{COLOR_LEGEND}
- <s>1337</s>: unstable tests with "standard deviation / mean > {STD_MEAN_THRESHOLD}"
"""
def _format(data: Union[int, float]) -> Any:
if isinstance(data, bool):
return str(data)
elif isinstance(data, int) or isinstance(data, float):
if data >= 1000:
_data = data
i = 0
while abs(_data) >= 1000:
i += 1
_data /= 1000
if isinstance(data, int):
return '%d%s' % (_data, ['', 'K', 'M', 'G', 'T', 'P'][i])
else:
return '%.2f%s' % (_data, ['', 'K', 'M', 'G', 'T', 'P'][i])
else:
i = 1
_data = round(data, i)
while _data == 0 and i <= 5:
i += 1
_data = round(data, i)
return _data
else:
return data
def _get_color(mean_time, master_mean_time):
if mean_time is None or mean_time == NOT_A_NUMBER or master_mean_time == 0:
return COLOR_NAN
raw_bucket = int((float(mean_time) / float(master_mean_time) - 1) * 10)
bucket = max(0, min(6, raw_bucket))
return COLOR_VALUES[bucket]
def _get_cleaned_mean_time(time: Optional[int], scaling: int) -> str:
"""Return cleaned data"""
if time is not None:
return str(int(int(time) / scaling))
else:
return NOT_A_NUMBER
def _cleaned_title(raw_heading: str) -> str:
"""Return cleaned title of artifact name."""
return raw_heading.replace('test_', '').replace('_', ' ').title()
def is_test_unstable(run_stats):
mean = run_stats.get('mean_time', 1e20)
return mean != 0 and run_stats.get('std_time', 0.0) / mean > STD_MEAN_THRESHOLD
def _get_table_header(raw_data: List[Dict[str, Any]]) -> Tuple[str, str]:
"""Return metadata table title and table separator."""
titles = {}
for test_run in raw_data:
for name in test_run['metadata']:
titles[name] = []
break
separators = []
for result in raw_data:
separators.append('---:')
for field in titles:
if 'metadata' in result:
value = result['metadata'].get(field, 'N/A')
titles[field].append(f'**{value}**')
else:
titles[field].append('**N/A**')
final = []
for title, values in titles.items():
final.append(f'| **{title}** | {" | ".join(values)} |\n')
header = f'{final[0]}| :---: | {" | ".join(separators)} |\n{"".join(final[1:])}'
return header
def _get_version_list(artifacts_dir: str) -> List[str]:
"""Generates sorted list of all versions found in reports.
Args:
artifacts_dir: Absolute path to artifact directory.
Return: List of versions found in reports.
"""
lv = []
for folder in os.listdir(artifacts_dir):
if os.path.isfile(os.path.join(artifacts_dir, folder, 'report.json')):
lv.append(LooseVersion(folder))
lv.sort()
sorted_dev = [v.vstring for v in lv]
import re
p = re.compile('dev\\d+$')
i = 0
while i + 1 < len(sorted_dev):
tmp = sorted_dev[i]
m = p.search(sorted_dev[i + 1])
if m and sorted_dev[i + 1].startswith(tmp):
sorted_dev[i] = sorted_dev[i + 1]
sorted_dev[i + 1] = tmp
i += 1
version_list = [sorted_dev[i - 1] for i in range(len(sorted_dev), 0, -1)]
return version_list
def _get_cum_data(version_list: List[str], artifacts_dir: str) -> Dict[Any, Any]:
"""Generates cumulative data and return in a dict.
Args:
version_list: List of versions found in reports.
artifacts_dir: Absolute path to artifact directory.
Return: Dict of cumulative data
"""
data: Dict[Any, Any] = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
)
for version in version_list:
report_file = os.path.join(artifacts_dir, version, 'report.json')
searchers_compare_file = os.path.join(
artifacts_dir, version, 'searchers_compare.json'
)
if os.path.isfile(report_file):
with open(report_file) as fp:
_raw_data = json.load(fp)
if os.path.isfile(searchers_compare_file):
with open(searchers_compare_file) as fp:
_raw_data.extend(json.load(fp))
for i in _raw_data:
page = i.get('page', 'unsorted_tests')
test_name = i['name']
metadata_hash = _hash_run(i)
data[page][test_name][version][metadata_hash] = i
return data
def generate_homepage(output_dir: str) -> None:
"""This generate required homepage for the website.
Args:
output_dir: Absolute path to Hugo content directory.
"""
src = os.path.join(os.getcwd(), 'README.md')
dst = os.path.join(output_dir, '_index.md')
Path(output_dir).mkdir(parents=True, exist_ok=True)
if os.path.isfile(src):
with open(src) as f:
data = f.read()
with open(dst, 'w') as fp:
fp.write('---\n')
fp.write('title: Benchmark Jina\n')
fp.write('type: docs\n')
fp.write('---\n')
fp.write(data)
def _hash_run(d):
tmp_dict = copy.deepcopy(d)
tmp_dict.pop('mean_time', None)
tmp_dict.pop('std_time', None)
tmp_dict.pop('iterations', None)
tmp_dict.pop('results', None)
return json.dumps(tmp_dict, sort_keys=True)
def _get_stats(test_data, latest_version):
results = defaultdict(dict)
for version, test_results in test_data.items():
for test_result in test_results.values():
parameter_hash = _hash_run(test_result)
metadata = test_result.get('metadata', {})
if not metadata:
metadata = {'name': test_result['name']}
results[parameter_hash]['metadata'] = metadata
results[parameter_hash]['min'] = min(
results[parameter_hash].get('min', 1e20), test_result['mean_time']
)
results[parameter_hash]['max'] = max(
results[parameter_hash].get('max', 0), test_result['mean_time']
)
results[parameter_hash]['parameter_hash'] = parameter_hash
if version == latest_version:
results[parameter_hash]['last_version_mean'] = test_result['mean_time']
stats = list(results.values())
_add_scaling(stats)
return stats
def _get_one_version_stats(test_results):
results = defaultdict(lambda x: 1e20)
results['min_mean_docs_per_sec'] = 0
for test in test_results:
results['min_time'] = min(results['min_time'], test['mean_time'])
results['min_memory'] = min(results['min_memory'], test['mean_memory'])
results['min_indexer_memory'] = min(
results['min_indexer_memory'], test['mean_indexer_memory']
)
results['min_mean_docs_per_sec'] = max(
results['min_mean_docs_per_sec'], test['mean_mean_docs_per_sec']
)
results['min_latency'] = min(results['min_latency'], test['mean_latency'])
return results
def _add_scaling(stats):
for run_stats in stats:
if run_stats['min'] > 10_000_000_000:
run_stats['scaling'] = 1_000_000_000
run_stats['metadata']['unit'] = 's'
if run_stats['min'] > 10_000_000:
run_stats['scaling'] = 1_000_000
run_stats['metadata']['unit'] = 'ms'
elif run_stats['min'] > 10_000:
run_stats['scaling'] = 1_000
run_stats['metadata']['unit'] = 'μs'
else:
run_stats['scaling'] = 1
run_stats['metadata']['unit'] = 'ns'
run_stats['min'] = int(run_stats['min'] / run_stats['scaling'])
run_stats['max'] = int(run_stats['max'] / run_stats['scaling'])
def generate_docs(
version_list: List[str], cum_data: Dict[Any, Any], output_dir: str
) -> None:
"""This generate required docs from artifacts.
Args:
version_list: List of versions found in reports.
cum_data: Cumulative data in Dict.
output_dir: Absolute path to Hugo docs directory.
"""
Path(output_dir).mkdir(parents=True, exist_ok=True)
for page, page_data in cum_data.items():
output_file = os.path.join(output_dir, f'{page}.md')
if page == 'indexer_comparison':
generate_comparison_test(page_data, output_file, _cleaned_title(page))
else:
generate_versioned_test(page_data, output_file, _cleaned_title(page))
def _get_last_version(single_test_data):
versions = list(single_test_data.keys())
if versions:
return max(versions)
else:
return None
def generate_versioned_test(page_data, output_file, title):
with open(output_file, 'w') as fp:
fp.write('---\n')
fp.write(f'title: {title}\n')
fp.write('---\n')
fp.write(f'# {title}\n\n')
fp.write(f'{LEGEND}\n')
for test_name, single_test_data in page_data.items():
latest_version = _get_last_version(single_test_data)
if latest_version is None:
return
stats = _get_stats(single_test_data, latest_version)
header = _get_table_header(stats)
fp.write(f'## {_cleaned_title(test_name)}\n')
fp.write(header)
for version, data_dict in single_test_data.items():
fp.write(f'| {version} |')
for run in stats:
run_data = data_dict[run['parameter_hash']]
mean_time = _get_cleaned_mean_time(
run_data.get('mean_time', None), run['scaling']
)
color = _get_color(mean_time, run['min'])
if is_test_unstable(run_data):
mean_time = f'<s>{mean_time}</s>'
fp.write(f' <span style="color:{color};">{mean_time}</span> |')
fp.write('\n')
fp.write('\n')
def generate_comparison_test(page_data, output_file, title):
with open(output_file, 'w') as fp:
fp.write('---\n')
fp.write(f'title: {title}\n')
fp.write('---\n')
fp.write(f'# {title}\n\n')
for test_name, single_test_data in page_data.items():
latest_version = _get_last_version(single_test_data)
if latest_version is None:
continue
table = []
test_data = single_test_data[latest_version]
header = _get_table_header(list(test_data.values()))
fp.write(f'## {_cleaned_title(test_name)}\n')
fp.write(f'Tests were performed against Jina {latest_version}.\n\n')
fp.write(header)
table.append(
[
'index time in ms',
'search time in ms',
'index memory',
'search memory',
'p90 in ms',
'p99 in ms',
'RPS',
'Documents per second',
]
)
for run in test_data.values():
table.append(
[
_get_cleaned_mean_time(run['results']['mean_index_time'], 1e6),
_get_cleaned_mean_time(run['results']['mean_search_time'], 1e6),
get_readable_size(run['results']['mean_search_memory']),
get_readable_size(run['results']['mean_index_memory']),
_get_cleaned_mean_time(run['results']['p90'], 1e6),
_get_cleaned_mean_time(run['results']['p99'], 1e6),
get_rps(run),
get_dps(run),
]
)
transposed = list(map(list, zip(*table)))
fp.write('|\n|'.join(' | '.join(row) for row in transposed))
fp.write('\n\n')
def get_dps(run):
total_docs = run['metadata']['docs_per_request'] * run['metadata']['num_requests']
dps = total_docs / (run['results']['mean_search_time'] / 1e9)
return f'{dps:.2f}'
def get_rps(run):
rps = run['metadata']['num_requests'] / (run['results']['mean_search_time'] / 1e9)
return f'{rps:.2f}'
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def generate_menus(cum_data: Dict[Any, Any], output_dir: str) -> None:
"""This generate required menus from artifacts.
Args:
cum_data: Cumulative data in Dict.
output_dir: Absolute path to Hugo menus directory.
"""
menu_dir = os.path.join(output_dir, 'menu')
menu_index = os.path.join(menu_dir, 'index.md')
Path(menu_dir).mkdir(parents=True, exist_ok=True)
with open(menu_index, 'w') as fp:
fp.write('---\n')
fp.write('headless: true\n')
fp.write('---\n\n')
for page in cum_data:
fp.write(
'- [%s]({{< relref "/docs/%s.md" >}})\n' % (_cleaned_title(page), page)
)
def main():
"""This is the main function to call."""
base_dir = os.path.join(os.getcwd(), 'docs')
content_dir = os.path.join(base_dir, 'content')
docs_dir = os.path.join(content_dir, 'docs')
artifacts_dir = os.path.join(base_dir, 'static/artifacts')
version_list = _get_version_list(artifacts_dir)
cum_data = _get_cum_data(version_list, artifacts_dir)
generate_homepage(content_dir)
generate_docs(version_list, cum_data, docs_dir)
generate_menus(cum_data, content_dir)
if __name__ == '__main__':
main()
| 30.995842 | 93 | 0.574821 |
import json
import os
import copy
from collections import defaultdict
from distutils.version import LooseVersion
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union, Optional
COLOR_VALUES = [
'#10a100',
'#7ead14',
'#bab73c',
'#e8c268',
'#e59838',
'#e36717',
'#de1414',
]
COLOR_NAN = '#9b00a1'
NOT_A_NUMBER = 'N/A'
STD_MEAN_THRESHOLD = 0.5
COLOR_LEGEND = ' | '.join(
[
f'<span style="color:{color};">{i*10} - {(i+1)*10}%</span>'
for i, color in enumerate(COLOR_VALUES)
]
)
LEGEND = f"""
The following data should be read as follows:
- Colors of cells display the percentage of the minimum value in the column:\n
{COLOR_LEGEND}
- <s>1337</s>: unstable tests with "standard deviation / mean > {STD_MEAN_THRESHOLD}"
"""
def _format(data: Union[int, float]) -> Any:
if isinstance(data, bool):
return str(data)
elif isinstance(data, int) or isinstance(data, float):
if data >= 1000:
_data = data
i = 0
while abs(_data) >= 1000:
i += 1
_data /= 1000
if isinstance(data, int):
return '%d%s' % (_data, ['', 'K', 'M', 'G', 'T', 'P'][i])
else:
return '%.2f%s' % (_data, ['', 'K', 'M', 'G', 'T', 'P'][i])
else:
i = 1
_data = round(data, i)
while _data == 0 and i <= 5:
i += 1
_data = round(data, i)
return _data
else:
return data
def _get_color(mean_time, master_mean_time):
if mean_time is None or mean_time == NOT_A_NUMBER or master_mean_time == 0:
return COLOR_NAN
raw_bucket = int((float(mean_time) / float(master_mean_time) - 1) * 10)
bucket = max(0, min(6, raw_bucket))
return COLOR_VALUES[bucket]
def _get_cleaned_mean_time(time: Optional[int], scaling: int) -> str:
if time is not None:
return str(int(int(time) / scaling))
else:
return NOT_A_NUMBER
def _cleaned_title(raw_heading: str) -> str:
return raw_heading.replace('test_', '').replace('_', ' ').title()
def is_test_unstable(run_stats):
mean = run_stats.get('mean_time', 1e20)
return mean != 0 and run_stats.get('std_time', 0.0) / mean > STD_MEAN_THRESHOLD
def _get_table_header(raw_data: List[Dict[str, Any]]) -> Tuple[str, str]:
titles = {}
for test_run in raw_data:
for name in test_run['metadata']:
titles[name] = []
break
separators = []
for result in raw_data:
separators.append('---:')
for field in titles:
if 'metadata' in result:
value = result['metadata'].get(field, 'N/A')
titles[field].append(f'**{value}**')
else:
titles[field].append('**N/A**')
final = []
for title, values in titles.items():
final.append(f'| **{title}** | {" | ".join(values)} |\n')
header = f'{final[0]}| :---: | {" | ".join(separators)} |\n{"".join(final[1:])}'
return header
def _get_version_list(artifacts_dir: str) -> List[str]:
lv = []
for folder in os.listdir(artifacts_dir):
if os.path.isfile(os.path.join(artifacts_dir, folder, 'report.json')):
lv.append(LooseVersion(folder))
lv.sort()
sorted_dev = [v.vstring for v in lv]
import re
p = re.compile('dev\\d+$')
i = 0
while i + 1 < len(sorted_dev):
tmp = sorted_dev[i]
m = p.search(sorted_dev[i + 1])
if m and sorted_dev[i + 1].startswith(tmp):
sorted_dev[i] = sorted_dev[i + 1]
sorted_dev[i + 1] = tmp
i += 1
version_list = [sorted_dev[i - 1] for i in range(len(sorted_dev), 0, -1)]
return version_list
def _get_cum_data(version_list: List[str], artifacts_dir: str) -> Dict[Any, Any]:
data: Dict[Any, Any] = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
)
for version in version_list:
report_file = os.path.join(artifacts_dir, version, 'report.json')
searchers_compare_file = os.path.join(
artifacts_dir, version, 'searchers_compare.json'
)
if os.path.isfile(report_file):
with open(report_file) as fp:
_raw_data = json.load(fp)
if os.path.isfile(searchers_compare_file):
with open(searchers_compare_file) as fp:
_raw_data.extend(json.load(fp))
for i in _raw_data:
page = i.get('page', 'unsorted_tests')
test_name = i['name']
metadata_hash = _hash_run(i)
data[page][test_name][version][metadata_hash] = i
return data
def generate_homepage(output_dir: str) -> None:
src = os.path.join(os.getcwd(), 'README.md')
dst = os.path.join(output_dir, '_index.md')
Path(output_dir).mkdir(parents=True, exist_ok=True)
if os.path.isfile(src):
with open(src) as f:
data = f.read()
with open(dst, 'w') as fp:
fp.write('---\n')
fp.write('title: Benchmark Jina\n')
fp.write('type: docs\n')
fp.write('---\n')
fp.write(data)
def _hash_run(d):
tmp_dict = copy.deepcopy(d)
tmp_dict.pop('mean_time', None)
tmp_dict.pop('std_time', None)
tmp_dict.pop('iterations', None)
tmp_dict.pop('results', None)
return json.dumps(tmp_dict, sort_keys=True)
def _get_stats(test_data, latest_version):
results = defaultdict(dict)
for version, test_results in test_data.items():
for test_result in test_results.values():
parameter_hash = _hash_run(test_result)
metadata = test_result.get('metadata', {})
if not metadata:
metadata = {'name': test_result['name']}
results[parameter_hash]['metadata'] = metadata
results[parameter_hash]['min'] = min(
results[parameter_hash].get('min', 1e20), test_result['mean_time']
)
results[parameter_hash]['max'] = max(
results[parameter_hash].get('max', 0), test_result['mean_time']
)
results[parameter_hash]['parameter_hash'] = parameter_hash
if version == latest_version:
results[parameter_hash]['last_version_mean'] = test_result['mean_time']
stats = list(results.values())
_add_scaling(stats)
return stats
def _get_one_version_stats(test_results):
results = defaultdict(lambda x: 1e20)
results['min_mean_docs_per_sec'] = 0
for test in test_results:
results['min_time'] = min(results['min_time'], test['mean_time'])
results['min_memory'] = min(results['min_memory'], test['mean_memory'])
results['min_indexer_memory'] = min(
results['min_indexer_memory'], test['mean_indexer_memory']
)
results['min_mean_docs_per_sec'] = max(
results['min_mean_docs_per_sec'], test['mean_mean_docs_per_sec']
)
results['min_latency'] = min(results['min_latency'], test['mean_latency'])
return results
def _add_scaling(stats):
for run_stats in stats:
if run_stats['min'] > 10_000_000_000:
run_stats['scaling'] = 1_000_000_000
run_stats['metadata']['unit'] = 's'
if run_stats['min'] > 10_000_000:
run_stats['scaling'] = 1_000_000
run_stats['metadata']['unit'] = 'ms'
elif run_stats['min'] > 10_000:
run_stats['scaling'] = 1_000
run_stats['metadata']['unit'] = 'μs'
else:
run_stats['scaling'] = 1
run_stats['metadata']['unit'] = 'ns'
run_stats['min'] = int(run_stats['min'] / run_stats['scaling'])
run_stats['max'] = int(run_stats['max'] / run_stats['scaling'])
def generate_docs(
version_list: List[str], cum_data: Dict[Any, Any], output_dir: str
) -> None:
Path(output_dir).mkdir(parents=True, exist_ok=True)
for page, page_data in cum_data.items():
output_file = os.path.join(output_dir, f'{page}.md')
if page == 'indexer_comparison':
generate_comparison_test(page_data, output_file, _cleaned_title(page))
else:
generate_versioned_test(page_data, output_file, _cleaned_title(page))
def _get_last_version(single_test_data):
versions = list(single_test_data.keys())
if versions:
return max(versions)
else:
return None
def generate_versioned_test(page_data, output_file, title):
with open(output_file, 'w') as fp:
fp.write('---\n')
fp.write(f'title: {title}\n')
fp.write('---\n')
fp.write(f'# {title}\n\n')
fp.write(f'{LEGEND}\n')
for test_name, single_test_data in page_data.items():
latest_version = _get_last_version(single_test_data)
if latest_version is None:
return
stats = _get_stats(single_test_data, latest_version)
header = _get_table_header(stats)
fp.write(f'## {_cleaned_title(test_name)}\n')
fp.write(header)
for version, data_dict in single_test_data.items():
fp.write(f'| {version} |')
for run in stats:
run_data = data_dict[run['parameter_hash']]
mean_time = _get_cleaned_mean_time(
run_data.get('mean_time', None), run['scaling']
)
color = _get_color(mean_time, run['min'])
if is_test_unstable(run_data):
mean_time = f'<s>{mean_time}</s>'
fp.write(f' <span style="color:{color};">{mean_time}</span> |')
fp.write('\n')
fp.write('\n')
def generate_comparison_test(page_data, output_file, title):
with open(output_file, 'w') as fp:
fp.write('---\n')
fp.write(f'title: {title}\n')
fp.write('---\n')
fp.write(f'# {title}\n\n')
for test_name, single_test_data in page_data.items():
latest_version = _get_last_version(single_test_data)
if latest_version is None:
continue
table = []
test_data = single_test_data[latest_version]
header = _get_table_header(list(test_data.values()))
fp.write(f'## {_cleaned_title(test_name)}\n')
fp.write(f'Tests were performed against Jina {latest_version}.\n\n')
fp.write(header)
table.append(
[
'index time in ms',
'search time in ms',
'index memory',
'search memory',
'p90 in ms',
'p99 in ms',
'RPS',
'Documents per second',
]
)
for run in test_data.values():
table.append(
[
_get_cleaned_mean_time(run['results']['mean_index_time'], 1e6),
_get_cleaned_mean_time(run['results']['mean_search_time'], 1e6),
get_readable_size(run['results']['mean_search_memory']),
get_readable_size(run['results']['mean_index_memory']),
_get_cleaned_mean_time(run['results']['p90'], 1e6),
_get_cleaned_mean_time(run['results']['p99'], 1e6),
get_rps(run),
get_dps(run),
]
)
transposed = list(map(list, zip(*table)))
fp.write('|\n|'.join(' | '.join(row) for row in transposed))
fp.write('\n\n')
def get_dps(run):
total_docs = run['metadata']['docs_per_request'] * run['metadata']['num_requests']
dps = total_docs / (run['results']['mean_search_time'] / 1e9)
return f'{dps:.2f}'
def get_rps(run):
rps = run['metadata']['num_requests'] / (run['results']['mean_search_time'] / 1e9)
return f'{rps:.2f}'
def get_readable_size(num_bytes: Union[int, float]) -> str:
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def generate_menus(cum_data: Dict[Any, Any], output_dir: str) -> None:
menu_dir = os.path.join(output_dir, 'menu')
menu_index = os.path.join(menu_dir, 'index.md')
Path(menu_dir).mkdir(parents=True, exist_ok=True)
with open(menu_index, 'w') as fp:
fp.write('---\n')
fp.write('headless: true\n')
fp.write('---\n\n')
for page in cum_data:
fp.write(
'- [%s]({{< relref "/docs/%s.md" >}})\n' % (_cleaned_title(page), page)
)
def main():
base_dir = os.path.join(os.getcwd(), 'docs')
content_dir = os.path.join(base_dir, 'content')
docs_dir = os.path.join(content_dir, 'docs')
artifacts_dir = os.path.join(base_dir, 'static/artifacts')
version_list = _get_version_list(artifacts_dir)
cum_data = _get_cum_data(version_list, artifacts_dir)
generate_homepage(content_dir)
generate_docs(version_list, cum_data, docs_dir)
generate_menus(cum_data, content_dir)
if __name__ == '__main__':
main()
| true | true |
f7f5c4e017dceec8833a671cf102daf3b759b3da | 2,700 | py | Python | spiders/pitchfork.py | sabbirahm3d/ds5k-capstone-dataset | d6d5ed5a1043de87b90e3e4b1737e6ffc563eeaf | [
"MIT"
] | null | null | null | spiders/pitchfork.py | sabbirahm3d/ds5k-capstone-dataset | d6d5ed5a1043de87b90e3e4b1737e6ffc563eeaf | [
"MIT"
] | 1 | 2021-06-01T22:50:17.000Z | 2021-06-01T22:50:17.000Z | spiders/pitchfork.py | ribbas/ds5k-capstone-dataset | d6d5ed5a1043de87b90e3e4b1737e6ffc563eeaf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from dateutil import parser as date_parser
from util import logs
class PitchFork(object):
def __init__(self):
self.base_url = "https://pitchfork.com/"
self.index_endp = "reviews/albums/?page={}"
self.range_log = logs.PITCHFORK_RANGE
self.urls_log = logs.PITCHFORK_URLS
self.index_only = False
self.pages_range = []
with open(self.range_log) as log_file:
line = log_file.readline()
if line == "init\n":
line = log_file.readline().split()
self.pages_range = range(
int(line[0]),
int(line[-1]) + 1
)
else:
line = log_file.readline().split()
self.pages_range = [int(i) for i in line]
with open(self.urls_log) as log_file:
self.urls = log_file.read().split()
def scrape_urls(self, html):
for tag in html.find_all("a", {"class": "review__link"}):
self.urls.append(tag.get("href"))
def scrape_album_data(self, html):
data = ()
found = 0
# album
for tag in html.find_all("h1", {"class": "single-album-tombstone__review-title"}):
data = (tag.text, )
found = 1
break
if not found:
data = (None,)
found = 0
# artist
for tag in html.find_all("ul", {"class": "artist-links artist-list single-album-tombstone__artist-links"}):
data = data + (tag.text, )
found = 1
break
if not found:
data = data + (None,)
found = 0
# timestamp
for tag in html.find_all("time", {"class": "pub-date"}):
data = data + (date_parser.parse(tag.get("datetime")), )
found = 1
break
if not found:
data = data + (None,)
found = 0
# genre
for tag in html.find_all("a", {"class": "genre-list__link"}):
data = data + (tag.text, )
found = 1
break
if not found:
data = data + (None,)
found = 0
# score
for tag in html.find_all("span", {"class": "score"}):
data = data + (tag.text, )
found = 1
break
if not found:
data = data + (None,)
found = 0
# reviewer
for tag in html.find_all("a", {"class": "authors-detail__display-name"}):
data = data + (tag.text, )
found = 1
break
if not found:
data = data + (None,)
return data
| 27.55102 | 115 | 0.487037 |
from dateutil import parser as date_parser
from util import logs
class PitchFork(object):
def __init__(self):
self.base_url = "https://pitchfork.com/"
self.index_endp = "reviews/albums/?page={}"
self.range_log = logs.PITCHFORK_RANGE
self.urls_log = logs.PITCHFORK_URLS
self.index_only = False
self.pages_range = []
with open(self.range_log) as log_file:
line = log_file.readline()
if line == "init\n":
line = log_file.readline().split()
self.pages_range = range(
int(line[0]),
int(line[-1]) + 1
)
else:
line = log_file.readline().split()
self.pages_range = [int(i) for i in line]
with open(self.urls_log) as log_file:
self.urls = log_file.read().split()
def scrape_urls(self, html):
for tag in html.find_all("a", {"class": "review__link"}):
self.urls.append(tag.get("href"))
def scrape_album_data(self, html):
data = ()
found = 0
for tag in html.find_all("h1", {"class": "single-album-tombstone__review-title"}):
data = (tag.text, )
found = 1
break
if not found:
data = (None,)
found = 0
for tag in html.find_all("ul", {"class": "artist-links artist-list single-album-tombstone__artist-links"}):
data = data + (tag.text, )
found = 1
break
if not found:
data = data + (None,)
found = 0
for tag in html.find_all("time", {"class": "pub-date"}):
data = data + (date_parser.parse(tag.get("datetime")), )
found = 1
break
if not found:
data = data + (None,)
found = 0
for tag in html.find_all("a", {"class": "genre-list__link"}):
data = data + (tag.text, )
found = 1
break
if not found:
data = data + (None,)
found = 0
for tag in html.find_all("span", {"class": "score"}):
data = data + (tag.text, )
found = 1
break
if not found:
data = data + (None,)
found = 0
for tag in html.find_all("a", {"class": "authors-detail__display-name"}):
data = data + (tag.text, )
found = 1
break
if not found:
data = data + (None,)
return data
| true | true |
f7f5c5b2cd5e39f335163ef59760d9ce0b1611b0 | 266 | py | Python | problems/unique-paths.py | sailikhithk/tech-interview-prep | e833764cf98915d56118bddfa0e01871c58de75e | [
"Apache-2.0"
] | null | null | null | problems/unique-paths.py | sailikhithk/tech-interview-prep | e833764cf98915d56118bddfa0e01871c58de75e | [
"Apache-2.0"
] | null | null | null | problems/unique-paths.py | sailikhithk/tech-interview-prep | e833764cf98915d56118bddfa0e01871c58de75e | [
"Apache-2.0"
] | null | null | null | class Solution(object):
def uniquePaths(self, m, n):
def factorial(n):
ans = 1
for i in xrange(1, n+1):
ans *= i
return ans
return factorial((m-1)+(n-1))/(factorial(n-1)*factorial(m-1)) | 29.555556 | 69 | 0.469925 | class Solution(object):
def uniquePaths(self, m, n):
def factorial(n):
ans = 1
for i in xrange(1, n+1):
ans *= i
return ans
return factorial((m-1)+(n-1))/(factorial(n-1)*factorial(m-1)) | true | true |
f7f5c5d940bd41d998996ecf200d16201e70d9fd | 2,679 | py | Python | m2cgen/assemblers/__init__.py | bcampbell-prosper/m2cgen | cc049fe5cd7060c2f0cd5a0331e3aa85fac2a336 | [
"MIT"
] | 3 | 2021-06-29T02:43:40.000Z | 2022-03-28T07:41:59.000Z | m2cgen/assemblers/__init__.py | bcampbell-prosper/m2cgen | cc049fe5cd7060c2f0cd5a0331e3aa85fac2a336 | [
"MIT"
] | null | null | null | m2cgen/assemblers/__init__.py | bcampbell-prosper/m2cgen | cc049fe5cd7060c2f0cd5a0331e3aa85fac2a336 | [
"MIT"
] | 3 | 2021-08-06T07:51:37.000Z | 2022-03-28T07:41:42.000Z | from .linear import LinearModelAssembler
from .tree import TreeModelAssembler
from .ensemble import RandomForestModelAssembler
from .boosting import XGBoostModelAssembler, LightGBMModelAssembler
from .svm import SVMModelAssembler
__all__ = [
LinearModelAssembler,
TreeModelAssembler,
RandomForestModelAssembler,
XGBoostModelAssembler,
]
SUPPORTED_MODELS = {
# LightGBM
"LGBMRegressor": LightGBMModelAssembler,
"LGBMClassifier": LightGBMModelAssembler,
# XGBoost
"XGBClassifier": XGBoostModelAssembler,
"XGBRegressor": XGBoostModelAssembler,
# SVM
"LinearSVC": LinearModelAssembler,
"LinearSVR": LinearModelAssembler,
"SVR": SVMModelAssembler,
"NuSVR": SVMModelAssembler,
"SVC": SVMModelAssembler,
"NuSVC": SVMModelAssembler,
# Linear Regressors
"LinearRegression": LinearModelAssembler,
"HuberRegressor": LinearModelAssembler,
"ElasticNet": LinearModelAssembler,
"ElasticNetCV": LinearModelAssembler,
"TheilSenRegressor": LinearModelAssembler,
"Lars": LinearModelAssembler,
"LarsCV": LinearModelAssembler,
"Lasso": LinearModelAssembler,
"LassoCV": LinearModelAssembler,
"LassoLars": LinearModelAssembler,
"LassoLarsIC": LinearModelAssembler,
"OrthogonalMatchingPursuit": LinearModelAssembler,
"OrthogonalMatchingPursuitCV": LinearModelAssembler,
"Ridge": LinearModelAssembler,
"RidgeCV": LinearModelAssembler,
"BayesianRidge": LinearModelAssembler,
"ARDRegression": LinearModelAssembler,
"SGDRegressor": LinearModelAssembler,
"PassiveAggressiveRegressor": LinearModelAssembler,
# Logistic Regressors
"LogisticRegression": LinearModelAssembler,
"LogisticRegressionCV": LinearModelAssembler,
"RidgeClassifier": LinearModelAssembler,
"RidgeClassifierCV": LinearModelAssembler,
"SGDClassifier": LinearModelAssembler,
"PassiveAggressiveClassifier": LinearModelAssembler,
# Decision trees
"DecisionTreeRegressor": TreeModelAssembler,
"DecisionTreeClassifier": TreeModelAssembler,
"ExtraTreeRegressor": TreeModelAssembler,
"ExtraTreeClassifier": TreeModelAssembler,
# Ensembles
"RandomForestRegressor": RandomForestModelAssembler,
"RandomForestClassifier": RandomForestModelAssembler,
"ExtraTreesRegressor": RandomForestModelAssembler,
"ExtraTreesClassifier": RandomForestModelAssembler,
}
def get_assembler_cls(model):
model_name = type(model).__name__
assembler_cls = SUPPORTED_MODELS.get(model_name)
if not assembler_cls:
raise NotImplementedError(
"Model {} is not supported".format(model_name))
return assembler_cls
| 31.892857 | 67 | 0.762225 | from .linear import LinearModelAssembler
from .tree import TreeModelAssembler
from .ensemble import RandomForestModelAssembler
from .boosting import XGBoostModelAssembler, LightGBMModelAssembler
from .svm import SVMModelAssembler
__all__ = [
LinearModelAssembler,
TreeModelAssembler,
RandomForestModelAssembler,
XGBoostModelAssembler,
]
SUPPORTED_MODELS = {
"LGBMRegressor": LightGBMModelAssembler,
"LGBMClassifier": LightGBMModelAssembler,
"XGBClassifier": XGBoostModelAssembler,
"XGBRegressor": XGBoostModelAssembler,
"LinearSVC": LinearModelAssembler,
"LinearSVR": LinearModelAssembler,
"SVR": SVMModelAssembler,
"NuSVR": SVMModelAssembler,
"SVC": SVMModelAssembler,
"NuSVC": SVMModelAssembler,
"LinearRegression": LinearModelAssembler,
"HuberRegressor": LinearModelAssembler,
"ElasticNet": LinearModelAssembler,
"ElasticNetCV": LinearModelAssembler,
"TheilSenRegressor": LinearModelAssembler,
"Lars": LinearModelAssembler,
"LarsCV": LinearModelAssembler,
"Lasso": LinearModelAssembler,
"LassoCV": LinearModelAssembler,
"LassoLars": LinearModelAssembler,
"LassoLarsIC": LinearModelAssembler,
"OrthogonalMatchingPursuit": LinearModelAssembler,
"OrthogonalMatchingPursuitCV": LinearModelAssembler,
"Ridge": LinearModelAssembler,
"RidgeCV": LinearModelAssembler,
"BayesianRidge": LinearModelAssembler,
"ARDRegression": LinearModelAssembler,
"SGDRegressor": LinearModelAssembler,
"PassiveAggressiveRegressor": LinearModelAssembler,
"LogisticRegression": LinearModelAssembler,
"LogisticRegressionCV": LinearModelAssembler,
"RidgeClassifier": LinearModelAssembler,
"RidgeClassifierCV": LinearModelAssembler,
"SGDClassifier": LinearModelAssembler,
"PassiveAggressiveClassifier": LinearModelAssembler,
"DecisionTreeRegressor": TreeModelAssembler,
"DecisionTreeClassifier": TreeModelAssembler,
"ExtraTreeRegressor": TreeModelAssembler,
"ExtraTreeClassifier": TreeModelAssembler,
"RandomForestRegressor": RandomForestModelAssembler,
"RandomForestClassifier": RandomForestModelAssembler,
"ExtraTreesRegressor": RandomForestModelAssembler,
"ExtraTreesClassifier": RandomForestModelAssembler,
}
def get_assembler_cls(model):
model_name = type(model).__name__
assembler_cls = SUPPORTED_MODELS.get(model_name)
if not assembler_cls:
raise NotImplementedError(
"Model {} is not supported".format(model_name))
return assembler_cls
| true | true |
f7f5c609cb611dd737f3e5740e40e83377fe583c | 534 | py | Python | py2.7/Agent.py | kamrulhasan1203/four-in-a-row | f02b6c26c22689d7637295e64a631ab80f75bc7e | [
"MIT"
] | null | null | null | py2.7/Agent.py | kamrulhasan1203/four-in-a-row | f02b6c26c22689d7637295e64a631ab80f75bc7e | [
"MIT"
] | null | null | null | py2.7/Agent.py | kamrulhasan1203/four-in-a-row | f02b6c26c22689d7637295e64a631ab80f75bc7e | [
"MIT"
] | null | null | null |
class Agent(object):
def __init__(self,name):
self.name = name # Name of the agent
"""
* Sets the role of this agent. Typlically will be called by your extended Game class (The class which extends the Game Class).
@param role
"""
def setRole(self,role):
self.role = role;
"""
* Implement this method to select a move, and change the game state according to the chosen move.
* @param game
* """
def makeMove(game):
raise NotImplementedError('subclasses must override makeMove()!')
| 25.428571 | 130 | 0.662921 |
class Agent(object):
def __init__(self,name):
self.name = name
def setRole(self,role):
self.role = role;
def makeMove(game):
raise NotImplementedError('subclasses must override makeMove()!')
| true | true |
f7f5c65b3da19cbde999d5ab56fbed2f741b9489 | 13,939 | py | Python | ml/pipelinecomponents.py | deepakkumar1984/ml-api | 8d09e9ef99c39838cd2f2db1e70226b8d6cbc77e | [
"MIT"
] | 60 | 2017-08-17T11:24:16.000Z | 2019-02-13T02:09:24.000Z | ml/pipelinecomponents.py | deepakkumar1984/ml-api | 8d09e9ef99c39838cd2f2db1e70226b8d6cbc77e | [
"MIT"
] | null | null | null | ml/pipelinecomponents.py | deepakkumar1984/ml-api | 8d09e9ef99c39838cd2f2db1e70226b8d6cbc77e | [
"MIT"
] | 35 | 2017-09-27T03:02:23.000Z | 2019-04-25T20:50:43.000Z | import simplejson as json
import os
import pickle
import jsonpickle
import numpy
import pandas
from keras import datasets
from keras.models import model_from_json
from pandas import read_csv
from sklearn.model_selection import cross_validate, train_test_split, cross_val_predict
from sklearn.preprocessing import Imputer
from keras.utils import np_utils
from ml import scikitlearn, mxnetfactory
from Interface import projectmgr
from sklearn import preprocessing, feature_selection
projectfolder = ""
model_type = ""
name = ""
optionslist = {}
jobid = ""
def init(self, name, modeltype, jobid=None):
self.projectfolder = "./data/" + name
self.name = name
self.jobid = jobid
self.model_type = modeltype
def addOption(options):
for op in options:
optionslist[op] = options[op]
def data_loadcsv(pipeline):
try:
filename = projectfolder + "/dataset/" + pipeline["options"]["filename"]
if pipeline['options']['column_header'] == True:
dataframe = read_csv(filename, delim_whitespace=pipeline['options']['delim_whitespace'], dtype={'a': numpy.float32})
else:
dataframe = read_csv(filename, delim_whitespace=pipeline['options']['delim_whitespace'], header=None, dtype={'a': numpy.float32})
return dataframe
except Exception as e:
raise Exception("data_loadcsv: " + str(e))
def data_loadsample(pipeline):
dataset_name = pipeline["options"]["dataset_name"]
if dataset_name == "cifar10":
(X_train, Y_train), (X_test, Y_test) = datasets.cifar10.load_data()
elif dataset_name == "cifar100":
(X_train, Y_train), (X_test, Y_test) = datasets.cifar100.load_data()
elif dataset_name == "imdb":
(X_train, Y_train), (X_test, Y_test) = datasets.imdb.load_data(path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
elif dataset_name == "reuters":
(X_train, Y_train), (X_test, Y_test) = datasets.reuters.load_data(path="reuters.npz",
num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
elif dataset_name == "mnist":
(X_train, Y_train), (X_test, Y_test) = datasets.mnist.load_data()
elif dataset_name == "boston_housing":
(X_train, Y_train), (X_test, Y_test) = datasets.boston_housing.load_data()
return (X_train, Y_train), (X_test, Y_test)
def data_testtrainsplit(X, Y, pipeline):
test_size = 0.25
random_state = 42
if "test_split" in pipeline["options"]:
test_size = pipeline["options"]["test_size"]
if "random_state" in pipeline["options"]:
random_state = pipeline["options"]["random_state"]
X_train, Y_train, X_test, Y_test = train_test_split(X, Y, test_size=test_size, random_state=random_state)
return X_train, Y_train, X_test, Y_test
def data_getxy(dataframe, pipeline):
try:
X_frame = dataframe[pipeline['options']['xcols']]
Y_frame = dataframe[pipeline['options']['ycols']]
return (X_frame,Y_frame)
except Exception as e:
raise Exception("data_getxy: " + str(e))
def data_getx(dataframe, pipeline):
try:
X_frame = dataframe[pipeline['options']['xcols']]
return (X_frame, 0)
except Exception as e:
raise Exception("data_getxy: " + str(e))
def data_handlemissing(dataframe, pipeline):
try:
if pipeline['options']['type'] == "dropcolumns":
thresh = pipeline['options']['thresh']
if thresh == -1:
dataframe.dropna(axis=1, how="all", inplace=True)
elif thresh == 0:
dataframe.dropna(axis=1, how="any", inplace=True)
elif thresh > 0:
dataframe.dropna(axis=1, thresh=thresh, inplace=True)
elif pipeline['options']['type'] == "droprows":
thresh = pipeline['options']['thresh']
if thresh == -1:
dataframe.dropna(axis=0, how="all", inplace=True)
elif thresh == 0:
dataframe.dropna(axis=0, how="any", inplace=True)
elif thresh > 0:
dataframe.dropna(axis=0, thresh=thresh)
elif pipeline['options']['type'] == "fillmissing":
strategy = pipeline['options']['strategy']
imp = Imputer(missing_values='NaN', strategy=strategy, axis=0)
array = imp.fit_transform(dataframe.values)
dataframe = pandas.DataFrame(array, columns = dataframe.columns)
return dataframe
except Exception as e:
raise Exception("data_handlemissing: " + str(e))
def data_preprocess(dataframe, pipeline):
try:
method = pipeline['options']['method']
data = dataframe.values
module = eval("preprocessing." + method)()
m = getattr(module, "fit_transform")
data = m(data)
return pandas.DataFrame(data, columns = dataframe.columns)
except Exception as e:
raise Exception("data_preprocess: " + str(e))
def image_preprocess(X, Y, pipeline):
try:
normalize = pipeline["options"]["normalize"]
encode = pipeline["options"]["encode"]
reshape = False
if "reshape" in pipeline["options"]:
reshape = True
pixels = pipeline["options"]["reshape"]["pixels"]
width = pipeline["options"]["reshape"]["width"]
height = pipeline["options"]["reshape"]["height"]
if reshape is True:
X = X.reshape(X.shape[0], pixels, width, height).astype('float32')
else:
X = X.astype('float32')
if normalize is True:
X = X/255
if encode is True:
Y = np_utils.to_categorical(Y)
num_classes = Y.shape[1]
return X,Y,num_classes
except Exception as e:
raise Exception("image_preprocess: " + str(e))
def data_featureselection(X, Y, pipeline):
try:
method = pipeline["options"]['method']
transform = pipeline["options"]['transform']
args = {}
for p in pipeline["options"]:
if "method" in p:
continue
if "transform" in p:
continue
if "score_func" in p:
scorefunc = eval("feature_selection." + pipeline["options"][p])
args[p] = scorefunc
continue
args[p] = pipeline["options"][p]
module = eval("feature_selection." + method)(**args)
fit = getattr(module, "fit")
mtransform = getattr(module, "fit_transform")
f = fit(X.values, Y.values)
names = X.columns
result = {}
if transform is True:
data = mtransform(X.values, Y.values)
selected_columns = []
fcount = 0
for fs in f.get_support():
if fs == True:
selected_columns.append(names[fcount])
fcount = fcount + 1
X = pandas.DataFrame(data, columns=selected_columns)
else:
selected_columns = names
if method == "VarianceThreshold":
result['variances'] = sorted(zip(map(lambda x: round(x, 4), f.variances_), names), reverse=True)
else:
result['scores'] = sorted(zip(map(lambda x: round(x, 4), f.scores_), names), reverse=True)
result['pvalues'] = sorted(zip(map(lambda x: round(x, 4), f.pvalues_), names), reverse=True)
result["features"] = selected_columns
return X, Y, result
except Exception as e:
raise Exception("data_featureselection: " + str(e))
def data_getfeatures(X, Y, result, pipeline):
try:
method = pipeline["options"]['method']
transform = pipeline["options"]['transform']
result = json.loads(result)
names = result["features"]
if transform is True:
X = X[names]
return X, Y, result
except Exception as e:
raise Exception("data_getfeatures: " + str(e))
def data_featureselection_withestimator(estimator, X, Y, pipeline):
try:
method = pipeline["options"]['method']
transform = pipeline["options"]['transform']
args = {}
for p in pipeline["options"]:
if "method" in p:
continue
if "transform" in p:
continue
args[p] = pipeline["options"][p]
module = eval("feature_selection." + method)(estimator = estimator, **args)
fit = getattr(module, "fit")
mtransform = getattr(module, "fit_transform")
f = fit(X, Y)
names = X.columns
if transform is True:
data = mtransform(X, Y)
X = data
selected_columns = []
fcount = 0
for fs in f.get_support():
if fs == True:
selected_columns.append(names[fcount])
fcount = fcount + 1
else:
selected_columns = names
result = {}
result["features"] = selected_columns
return (X, Y, result)
except Exception as e:
raise Exception("data_featureselection_withestimator: " + str(e))
def model_evaluate(X, Y, pipeline):
try:
results = []
if "scoring" in pipeline["options"]:
if len(pipeline['options']['scoring']) > 0:
scoring = pipeline['options']['scoring']
else:
scoring = "neg_mean_squared_error"
else:
scoring = "neg_mean_squared_error"
kfold = 10
if "kfold" in pipeline['options']:
kfold = int(pipeline["options"]["kfold"])
model = scikitlearn.getSKLearnModel(pipeline['options']['model_name'])
valresult = cross_validate(model, X, Y, cv=kfold, scoring=scoring, return_train_score=True)
model.fit(X, Y)
for p in valresult:
results.append({"param": p, "values": valresult[p].tolist(), "min": valresult[p].min, "max": valresult[p].max});
output = jsonpickle.encode(results, unpicklable=False)
projectmgr.UpdateExecuteResult(jobid, output)
picklefile = projectfolder + "/model.out"
with open(picklefile, "wb") as f:
pickle.dump(model, f)
return output
except Exception as e:
raise Exception("model_evaluate: " + str(e))
def model_train(X, Y, pipeline, X_test=None, Y_test=None, more = False):
try:
result = None
if model_type == "mlp":
deepmodel = projectmgr.GetDeepModel(name, "ml", pipeline['options']['model_name'])
if deepmodel is None:
raise Exception(pipeline['options']['model_name'] + ": Model not found!")
modeljson = json.loads(deepmodel.modeldata)
modelObj = mxnetfactory.createModel(modeljson)
#modelObj.compile(loss=pipeline['options']['loss'], optimizer=pipeline['options']['optimizer'],
# metrics=pipeline['options']['scoring'])
epoches = pipeline["options"]["epoches"]
batch_size = pipeline["options"]["batch_size"]
mxnetfactory.init(mxnetfactory, name, jobid)
result = mxnetfactory.Train(modelObj, X, Y, projectfolder, pipeline["options"], epoches, batch_size, X_test=None, Y_test=None, more=more)
projectmgr.UpdateExecuteResult(jobid, json.dumps(result))
picklefile = projectfolder + "/model.json"
model_json = modelObj.to_json()
with open(picklefile, "w") as json_file:
json_file.write(model_json)
return result
except Exception as e:
raise Exception("model_train: " + str(e))
def model_predict(X, pipeline):
if model_type == "mlp":
json_file = open(projectfolder + '/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights(projectfolder + "/weights.hdf5")
model.compile(loss=pipeline['options']['loss'], optimizer=pipeline['options']['optimizer'],
metrics=pipeline['options']['scoring'])
if type(X) is pandas.DataFrame:
X = X.values
Y = model.predict(X)
else:
picklefile = projectfolder + "/model.out"
with open(picklefile, "rb") as f:
model = pickle.load(f)
Y = model.predict(X)
return Y
def return_result(outputname, num = None):
pickleFile = projectfolder + '/pipeline.out'
with open(pickleFile, 'rb') as f:
resultset = pickle.load(f)
result = None
if num is None:
outputname = "output->" + outputname
else:
outputname = "output->" + outputname + "->" + str(num)
count = 0
resultDict = {}
for r in resultset:
if outputname in r:
if count > 0:
resultDict[count - 1] = result
resultDict[count] = resultset[r]
else:
result = resultset[r]
count = count+1
if count > 1:
return resultDict
return result
| 37.170667 | 149 | 0.560944 | import simplejson as json
import os
import pickle
import jsonpickle
import numpy
import pandas
from keras import datasets
from keras.models import model_from_json
from pandas import read_csv
from sklearn.model_selection import cross_validate, train_test_split, cross_val_predict
from sklearn.preprocessing import Imputer
from keras.utils import np_utils
from ml import scikitlearn, mxnetfactory
from Interface import projectmgr
from sklearn import preprocessing, feature_selection
projectfolder = ""
model_type = ""
name = ""
optionslist = {}
jobid = ""
def init(self, name, modeltype, jobid=None):
self.projectfolder = "./data/" + name
self.name = name
self.jobid = jobid
self.model_type = modeltype
def addOption(options):
for op in options:
optionslist[op] = options[op]
def data_loadcsv(pipeline):
try:
filename = projectfolder + "/dataset/" + pipeline["options"]["filename"]
if pipeline['options']['column_header'] == True:
dataframe = read_csv(filename, delim_whitespace=pipeline['options']['delim_whitespace'], dtype={'a': numpy.float32})
else:
dataframe = read_csv(filename, delim_whitespace=pipeline['options']['delim_whitespace'], header=None, dtype={'a': numpy.float32})
return dataframe
except Exception as e:
raise Exception("data_loadcsv: " + str(e))
def data_loadsample(pipeline):
dataset_name = pipeline["options"]["dataset_name"]
if dataset_name == "cifar10":
(X_train, Y_train), (X_test, Y_test) = datasets.cifar10.load_data()
elif dataset_name == "cifar100":
(X_train, Y_train), (X_test, Y_test) = datasets.cifar100.load_data()
elif dataset_name == "imdb":
(X_train, Y_train), (X_test, Y_test) = datasets.imdb.load_data(path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
elif dataset_name == "reuters":
(X_train, Y_train), (X_test, Y_test) = datasets.reuters.load_data(path="reuters.npz",
num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
elif dataset_name == "mnist":
(X_train, Y_train), (X_test, Y_test) = datasets.mnist.load_data()
elif dataset_name == "boston_housing":
(X_train, Y_train), (X_test, Y_test) = datasets.boston_housing.load_data()
return (X_train, Y_train), (X_test, Y_test)
def data_testtrainsplit(X, Y, pipeline):
test_size = 0.25
random_state = 42
if "test_split" in pipeline["options"]:
test_size = pipeline["options"]["test_size"]
if "random_state" in pipeline["options"]:
random_state = pipeline["options"]["random_state"]
X_train, Y_train, X_test, Y_test = train_test_split(X, Y, test_size=test_size, random_state=random_state)
return X_train, Y_train, X_test, Y_test
def data_getxy(dataframe, pipeline):
try:
X_frame = dataframe[pipeline['options']['xcols']]
Y_frame = dataframe[pipeline['options']['ycols']]
return (X_frame,Y_frame)
except Exception as e:
raise Exception("data_getxy: " + str(e))
def data_getx(dataframe, pipeline):
try:
X_frame = dataframe[pipeline['options']['xcols']]
return (X_frame, 0)
except Exception as e:
raise Exception("data_getxy: " + str(e))
def data_handlemissing(dataframe, pipeline):
try:
if pipeline['options']['type'] == "dropcolumns":
thresh = pipeline['options']['thresh']
if thresh == -1:
dataframe.dropna(axis=1, how="all", inplace=True)
elif thresh == 0:
dataframe.dropna(axis=1, how="any", inplace=True)
elif thresh > 0:
dataframe.dropna(axis=1, thresh=thresh, inplace=True)
elif pipeline['options']['type'] == "droprows":
thresh = pipeline['options']['thresh']
if thresh == -1:
dataframe.dropna(axis=0, how="all", inplace=True)
elif thresh == 0:
dataframe.dropna(axis=0, how="any", inplace=True)
elif thresh > 0:
dataframe.dropna(axis=0, thresh=thresh)
elif pipeline['options']['type'] == "fillmissing":
strategy = pipeline['options']['strategy']
imp = Imputer(missing_values='NaN', strategy=strategy, axis=0)
array = imp.fit_transform(dataframe.values)
dataframe = pandas.DataFrame(array, columns = dataframe.columns)
return dataframe
except Exception as e:
raise Exception("data_handlemissing: " + str(e))
def data_preprocess(dataframe, pipeline):
try:
method = pipeline['options']['method']
data = dataframe.values
module = eval("preprocessing." + method)()
m = getattr(module, "fit_transform")
data = m(data)
return pandas.DataFrame(data, columns = dataframe.columns)
except Exception as e:
raise Exception("data_preprocess: " + str(e))
def image_preprocess(X, Y, pipeline):
try:
normalize = pipeline["options"]["normalize"]
encode = pipeline["options"]["encode"]
reshape = False
if "reshape" in pipeline["options"]:
reshape = True
pixels = pipeline["options"]["reshape"]["pixels"]
width = pipeline["options"]["reshape"]["width"]
height = pipeline["options"]["reshape"]["height"]
if reshape is True:
X = X.reshape(X.shape[0], pixels, width, height).astype('float32')
else:
X = X.astype('float32')
if normalize is True:
X = X/255
if encode is True:
Y = np_utils.to_categorical(Y)
num_classes = Y.shape[1]
return X,Y,num_classes
except Exception as e:
raise Exception("image_preprocess: " + str(e))
def data_featureselection(X, Y, pipeline):
try:
method = pipeline["options"]['method']
transform = pipeline["options"]['transform']
args = {}
for p in pipeline["options"]:
if "method" in p:
continue
if "transform" in p:
continue
if "score_func" in p:
scorefunc = eval("feature_selection." + pipeline["options"][p])
args[p] = scorefunc
continue
args[p] = pipeline["options"][p]
module = eval("feature_selection." + method)(**args)
fit = getattr(module, "fit")
mtransform = getattr(module, "fit_transform")
f = fit(X.values, Y.values)
names = X.columns
result = {}
if transform is True:
data = mtransform(X.values, Y.values)
selected_columns = []
fcount = 0
for fs in f.get_support():
if fs == True:
selected_columns.append(names[fcount])
fcount = fcount + 1
X = pandas.DataFrame(data, columns=selected_columns)
else:
selected_columns = names
if method == "VarianceThreshold":
result['variances'] = sorted(zip(map(lambda x: round(x, 4), f.variances_), names), reverse=True)
else:
result['scores'] = sorted(zip(map(lambda x: round(x, 4), f.scores_), names), reverse=True)
result['pvalues'] = sorted(zip(map(lambda x: round(x, 4), f.pvalues_), names), reverse=True)
result["features"] = selected_columns
return X, Y, result
except Exception as e:
raise Exception("data_featureselection: " + str(e))
def data_getfeatures(X, Y, result, pipeline):
try:
method = pipeline["options"]['method']
transform = pipeline["options"]['transform']
result = json.loads(result)
names = result["features"]
if transform is True:
X = X[names]
return X, Y, result
except Exception as e:
raise Exception("data_getfeatures: " + str(e))
def data_featureselection_withestimator(estimator, X, Y, pipeline):
try:
method = pipeline["options"]['method']
transform = pipeline["options"]['transform']
args = {}
for p in pipeline["options"]:
if "method" in p:
continue
if "transform" in p:
continue
args[p] = pipeline["options"][p]
module = eval("feature_selection." + method)(estimator = estimator, **args)
fit = getattr(module, "fit")
mtransform = getattr(module, "fit_transform")
f = fit(X, Y)
names = X.columns
if transform is True:
data = mtransform(X, Y)
X = data
selected_columns = []
fcount = 0
for fs in f.get_support():
if fs == True:
selected_columns.append(names[fcount])
fcount = fcount + 1
else:
selected_columns = names
result = {}
result["features"] = selected_columns
return (X, Y, result)
except Exception as e:
raise Exception("data_featureselection_withestimator: " + str(e))
def model_evaluate(X, Y, pipeline):
try:
results = []
if "scoring" in pipeline["options"]:
if len(pipeline['options']['scoring']) > 0:
scoring = pipeline['options']['scoring']
else:
scoring = "neg_mean_squared_error"
else:
scoring = "neg_mean_squared_error"
kfold = 10
if "kfold" in pipeline['options']:
kfold = int(pipeline["options"]["kfold"])
model = scikitlearn.getSKLearnModel(pipeline['options']['model_name'])
valresult = cross_validate(model, X, Y, cv=kfold, scoring=scoring, return_train_score=True)
model.fit(X, Y)
for p in valresult:
results.append({"param": p, "values": valresult[p].tolist(), "min": valresult[p].min, "max": valresult[p].max});
output = jsonpickle.encode(results, unpicklable=False)
projectmgr.UpdateExecuteResult(jobid, output)
picklefile = projectfolder + "/model.out"
with open(picklefile, "wb") as f:
pickle.dump(model, f)
return output
except Exception as e:
raise Exception("model_evaluate: " + str(e))
def model_train(X, Y, pipeline, X_test=None, Y_test=None, more = False):
try:
result = None
if model_type == "mlp":
deepmodel = projectmgr.GetDeepModel(name, "ml", pipeline['options']['model_name'])
if deepmodel is None:
raise Exception(pipeline['options']['model_name'] + ": Model not found!")
modeljson = json.loads(deepmodel.modeldata)
modelObj = mxnetfactory.createModel(modeljson)
epoches = pipeline["options"]["epoches"]
batch_size = pipeline["options"]["batch_size"]
mxnetfactory.init(mxnetfactory, name, jobid)
result = mxnetfactory.Train(modelObj, X, Y, projectfolder, pipeline["options"], epoches, batch_size, X_test=None, Y_test=None, more=more)
projectmgr.UpdateExecuteResult(jobid, json.dumps(result))
picklefile = projectfolder + "/model.json"
model_json = modelObj.to_json()
with open(picklefile, "w") as json_file:
json_file.write(model_json)
return result
except Exception as e:
raise Exception("model_train: " + str(e))
def model_predict(X, pipeline):
if model_type == "mlp":
json_file = open(projectfolder + '/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights(projectfolder + "/weights.hdf5")
model.compile(loss=pipeline['options']['loss'], optimizer=pipeline['options']['optimizer'],
metrics=pipeline['options']['scoring'])
if type(X) is pandas.DataFrame:
X = X.values
Y = model.predict(X)
else:
picklefile = projectfolder + "/model.out"
with open(picklefile, "rb") as f:
model = pickle.load(f)
Y = model.predict(X)
return Y
def return_result(outputname, num = None):
pickleFile = projectfolder + '/pipeline.out'
with open(pickleFile, 'rb') as f:
resultset = pickle.load(f)
result = None
if num is None:
outputname = "output->" + outputname
else:
outputname = "output->" + outputname + "->" + str(num)
count = 0
resultDict = {}
for r in resultset:
if outputname in r:
if count > 0:
resultDict[count - 1] = result
resultDict[count] = resultset[r]
else:
result = resultset[r]
count = count+1
if count > 1:
return resultDict
return result
| true | true |
f7f5c6b1cf962ad2edd0d920253400e4a153fe61 | 1,154 | py | Python | src/utils/torchvision_utils.py | likojack/bnv_fusion | 76b7354c6f3bf8c7f7e1ff4d958de0e73ec3e614 | [
"MIT"
] | null | null | null | src/utils/torchvision_utils.py | likojack/bnv_fusion | 76b7354c6f3bf8c7f7e1ff4d958de0e73ec3e614 | [
"MIT"
] | null | null | null | src/utils/torchvision_utils.py | likojack/bnv_fusion | 76b7354c6f3bf8c7f7e1ff4d958de0e73ec3e614 | [
"MIT"
] | null | null | null | import torch
import torchvision.transforms as T
import numpy as np
import cv2
from PIL import Image
import matplotlib as mpl
import matplotlib.cm as cm
def visualize_depth(depth, cmap=cv2.COLORMAP_JET):
"""
depth: (H, W)
"""
x = depth.astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_) # (3, H, W)
return x_
def visualize_prob(prob, cmap=cv2.COLORMAP_BONE):
"""
prob: (H, W) 0~1
"""
x = (255*prob).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_) # (3, H, W)
return x_
def depth_visualizer(data, min_depth, max_depth):
"""
Args:
data (HxW): depth data
Returns:
vis_data (HxWx3): depth visualization (RGB)
"""
mask = np.logical_and(data > min_depth, data < max_depth)
inv_depth = 1 / (data + 1e-6)
vmax = np.percentile(1/(data[mask]+1e-6), 90)
normalizer = mpl.colors.Normalize(vmin=inv_depth.min(), vmax=vmax)
mapper = cm.ScalarMappable(norm=normalizer, cmap='magma')
vis_data = (mapper.to_rgba(inv_depth)[:, :, :3] * 255).astype(np.uint8)
return vis_data | 26.227273 | 75 | 0.637782 | import torch
import torchvision.transforms as T
import numpy as np
import cv2
from PIL import Image
import matplotlib as mpl
import matplotlib.cm as cm
def visualize_depth(depth, cmap=cv2.COLORMAP_JET):
x = depth.astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_)
return x_
def visualize_prob(prob, cmap=cv2.COLORMAP_BONE):
x = (255*prob).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_)
return x_
def depth_visualizer(data, min_depth, max_depth):
mask = np.logical_and(data > min_depth, data < max_depth)
inv_depth = 1 / (data + 1e-6)
vmax = np.percentile(1/(data[mask]+1e-6), 90)
normalizer = mpl.colors.Normalize(vmin=inv_depth.min(), vmax=vmax)
mapper = cm.ScalarMappable(norm=normalizer, cmap='magma')
vis_data = (mapper.to_rgba(inv_depth)[:, :, :3] * 255).astype(np.uint8)
return vis_data | true | true |
f7f5c6c9c7dfcf5547abd7f417e1f6fb0f620375 | 668 | py | Python | lib/queries.py | dcsan/mviz-oss | 8577cd45457add8a0b4c1d3e2f307fd66278ba39 | [
"MIT"
] | 1 | 2015-10-14T09:20:27.000Z | 2015-10-14T09:20:27.000Z | lib/queries.py | dcsan/mviz-oss | 8577cd45457add8a0b4c1d3e2f307fd66278ba39 | [
"MIT"
] | null | null | null | lib/queries.py | dcsan/mviz-oss | 8577cd45457add8a0b4c1d3e2f307fd66278ba39 | [
"MIT"
] | null | null | null | # example queries
# you will need to edit these to relate to your own DB and tables
from datetime import datetime, timedelta
tstoday = {"$gte": datetime.utcnow()-timedelta(days=1) }
tsweek = {"$gte": datetime.utcnow()-timedelta(days=7) }
query_list = {
'test': {
'desc': "test query",
'q': {'ts': tstoday },
'proj': {"player:uid":1, "ts":1 }
},
'test-ag': {
'desc': 'aggregation query test',
'type': 'aggregate',
'q': {
'match': {"event": "cashflow", 'ts': tstoday },
'group': {"_id": "$player:name", "total": {"$sum": "$amount"}, "count": {"$sum": 1} }
}
}
}
| 24.740741 | 97 | 0.508982 |
from datetime import datetime, timedelta
tstoday = {"$gte": datetime.utcnow()-timedelta(days=1) }
tsweek = {"$gte": datetime.utcnow()-timedelta(days=7) }
query_list = {
'test': {
'desc': "test query",
'q': {'ts': tstoday },
'proj': {"player:uid":1, "ts":1 }
},
'test-ag': {
'desc': 'aggregation query test',
'type': 'aggregate',
'q': {
'match': {"event": "cashflow", 'ts': tstoday },
'group': {"_id": "$player:name", "total": {"$sum": "$amount"}, "count": {"$sum": 1} }
}
}
}
| true | true |
f7f5c78ed708300fde6c7f3762ea17e5585d4a7f | 504 | py | Python | flod_booking/alembic/versions/20140322-1550-449c8b35b869_removed_request_uri_for_slots_and_.py | Trondheim-kommune/Bookingbasen | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2017-10-17T12:15:28.000Z | 2017-10-17T12:15:28.000Z | flod_booking/alembic/versions/20140322-1550-449c8b35b869_removed_request_uri_for_slots_and_.py | Trondheim-kommune/Bookingbasen | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2021-03-22T17:15:52.000Z | 2022-01-13T00:39:58.000Z | flod_booking/alembic/versions/20140322-1550-449c8b35b869_removed_request_uri_for_slots_and_.py | Trondheim-kommune/Bookingbasen | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-09-09T13:35:03.000Z | 2019-09-09T13:35:03.000Z | """Removed request_uri for slots and repeating slots
Revision ID: 449c8b35b869
Revises: 1b81c4cf5a5a
Create Date: 2014-03-22 15:50:29.543673
"""
# revision identifiers, used by Alembic.
revision = '449c8b35b869'
down_revision = '1b81c4cf5a5a'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column("slots", "request_uri")
op.drop_column("repeating_slots", "request_uri")
def downgrade():
raise NotImplementedError('This application does not support downgrades.')
| 21.913043 | 78 | 0.759921 |
revision = '449c8b35b869'
down_revision = '1b81c4cf5a5a'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column("slots", "request_uri")
op.drop_column("repeating_slots", "request_uri")
def downgrade():
raise NotImplementedError('This application does not support downgrades.')
| true | true |
f7f5c7f565ff0d3a1ff7872f491a25ed1e9e2f7d | 9,775 | py | Python | src/pretix/base/services/cancelevent.py | gvarela1981/pretix | 5cc0bd5d367a8cf46c9635d2e2e474c81210cb80 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/base/services/cancelevent.py | gvarela1981/pretix | 5cc0bd5d367a8cf46c9635d2e2e474c81210cb80 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/base/services/cancelevent.py | gvarela1981/pretix | 5cc0bd5d367a8cf46c9635d2e2e474c81210cb80 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import logging
from decimal import Decimal
from django.db import transaction
from django.db.models import Count, Exists, IntegerField, OuterRef, Subquery
from i18nfield.strings import LazyI18nString
from pretix.base.decimal import round_decimal
from pretix.base.email import get_email_context
from pretix.base.i18n import language
from pretix.base.models import (
Event, InvoiceAddress, Order, OrderFee, OrderPosition, OrderRefund,
SubEvent, User, WaitingListEntry,
)
from pretix.base.services.locking import LockTimeoutException
from pretix.base.services.mail import SendMailException, TolerantDict, mail
from pretix.base.services.orders import (
OrderChangeManager, OrderError, _cancel_order, _try_auto_refund,
)
from pretix.base.services.tasks import ProfiledEventTask
from pretix.celery_app import app
logger = logging.getLogger(__name__)
def _send_wle_mail(wle: WaitingListEntry, subject: LazyI18nString, message: LazyI18nString, subevent: SubEvent):
with language(wle.locale):
email_context = get_email_context(event_or_subevent=subevent or wle.event, event=wle.event)
try:
mail(
wle.email,
str(subject).format_map(TolerantDict(email_context)),
message,
email_context,
wle.event,
locale=wle.locale
)
except SendMailException:
logger.exception('Waiting list canceled email could not be sent')
def _send_mail(order: Order, subject: LazyI18nString, message: LazyI18nString, subevent: SubEvent,
refund_amount: Decimal, user: User, positions: list):
with language(order.locale):
try:
ia = order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = InvoiceAddress()
email_context = get_email_context(event_or_subevent=subevent or order.event, refund_amount=refund_amount,
order=order, position_or_address=ia, event=order.event)
real_subject = str(subject).format_map(TolerantDict(email_context))
try:
order.send_mail(
real_subject, message, email_context,
'pretix.event.order.email.event_canceled',
user,
)
except SendMailException:
logger.exception('Order canceled email could not be sent')
for p in positions:
if subevent and p.subevent_id != subevent.id:
continue
if p.addon_to_id is None and p.attendee_email and p.attendee_email != order.email:
real_subject = str(subject).format_map(TolerantDict(email_context))
email_context = get_email_context(event_or_subevent=subevent or order.event,
event=order.event,
refund_amount=refund_amount,
position_or_address=p,
order=order, position=p)
try:
order.send_mail(
real_subject, message, email_context,
'pretix.event.order.email.event_canceled',
position=p,
user=user
)
except SendMailException:
logger.exception('Order canceled email could not be sent to attendee')
@app.task(base=ProfiledEventTask, bind=True, max_retries=5, default_retry_delay=1, throws=(OrderError,),
acks_late=True)
def cancel_event(self, event: Event, subevent: int, auto_refund: bool, keep_fee_fixed: str,
keep_fee_percentage: str, keep_fees: list=None, manual_refund: bool=False,
send: bool=False, send_subject: dict=None, send_message: dict=None,
send_waitinglist: bool=False, send_waitinglist_subject: dict={}, send_waitinglist_message: dict={},
user: int=None, refund_as_giftcard: bool=False, giftcard_expires=None, giftcard_conditions=None):
send_subject = LazyI18nString(send_subject)
send_message = LazyI18nString(send_message)
send_waitinglist_subject = LazyI18nString(send_waitinglist_subject)
send_waitinglist_message = LazyI18nString(send_waitinglist_message)
if user:
user = User.objects.get(pk=user)
s = OrderPosition.objects.filter(
order=OuterRef('pk')
).order_by().values('order').annotate(k=Count('id')).values('k')
orders_to_cancel = event.orders.annotate(pcnt=Subquery(s, output_field=IntegerField())).filter(
status__in=[Order.STATUS_PAID, Order.STATUS_PENDING, Order.STATUS_EXPIRED],
pcnt__gt=0
).all()
if subevent:
subevent = event.subevents.get(pk=subevent)
has_subevent = OrderPosition.objects.filter(order_id=OuterRef('pk')).filter(
subevent=subevent
)
has_other_subevent = OrderPosition.objects.filter(order_id=OuterRef('pk')).exclude(
subevent=subevent
)
orders_to_change = orders_to_cancel.annotate(
has_subevent=Exists(has_subevent),
has_other_subevent=Exists(has_other_subevent),
).filter(
has_subevent=True, has_other_subevent=True
)
orders_to_cancel = orders_to_cancel.annotate(
has_subevent=Exists(has_subevent),
has_other_subevent=Exists(has_other_subevent),
).filter(
has_subevent=True, has_other_subevent=False
)
subevent.log_action(
'pretix.subevent.canceled', user=user,
)
subevent.active = False
subevent.save(update_fields=['active'])
subevent.log_action(
'pretix.subevent.changed', user=user, data={'active': False, '_source': 'cancel_event'}
)
else:
orders_to_change = event.orders.none()
event.log_action(
'pretix.event.canceled', user=user,
)
for i in event.items.filter(active=True):
i.active = False
i.save(update_fields=['active'])
i.log_action(
'pretix.event.item.changed', user=user, data={'active': False, '_source': 'cancel_event'}
)
failed = 0
for o in orders_to_cancel.only('id', 'total'):
try:
fee = Decimal('0.00')
fee_sum = Decimal('0.00')
keep_fee_objects = []
if keep_fees:
for f in o.fees.all():
if f.fee_type in keep_fees:
fee += f.value
keep_fee_objects.append(f)
fee_sum += f.value
if keep_fee_percentage:
fee += Decimal(keep_fee_percentage) / Decimal('100.00') * (o.total - fee_sum)
if keep_fee_fixed:
fee += Decimal(keep_fee_fixed)
fee = round_decimal(min(fee, o.payment_refund_sum), event.currency)
_cancel_order(o.pk, user, send_mail=False, cancellation_fee=fee, keep_fees=keep_fee_objects)
refund_amount = o.payment_refund_sum
try:
if auto_refund:
_try_auto_refund(o.pk, manual_refund=manual_refund, allow_partial=True,
source=OrderRefund.REFUND_SOURCE_ADMIN, refund_as_giftcard=refund_as_giftcard,
giftcard_expires=giftcard_expires, giftcard_conditions=giftcard_conditions)
finally:
if send:
_send_mail(o, send_subject, send_message, subevent, refund_amount, user, o.positions.all())
except LockTimeoutException:
logger.exception("Could not cancel order")
failed += 1
except OrderError:
logger.exception("Could not cancel order")
failed += 1
for o in orders_to_change.values_list('id', flat=True):
with transaction.atomic():
o = event.orders.select_for_update().get(pk=o)
total = Decimal('0.00')
positions = []
ocm = OrderChangeManager(o, user=user, notify=False)
for p in o.positions.all():
if p.subevent == subevent:
total += p.price
ocm.cancel(p)
positions.append(p)
fee = Decimal('0.00')
if keep_fee_fixed:
fee += Decimal(keep_fee_fixed)
if keep_fee_percentage:
fee += Decimal(keep_fee_percentage) / Decimal('100.00') * total
fee = round_decimal(min(fee, o.payment_refund_sum), event.currency)
if fee:
f = OrderFee(
fee_type=OrderFee.FEE_TYPE_CANCELLATION,
value=fee,
order=o,
tax_rule=o.event.settings.tax_rate_default,
)
f._calculate_tax()
ocm.add_fee(f)
ocm.commit()
refund_amount = o.payment_refund_sum - o.total
if auto_refund:
_try_auto_refund(o.pk, manual_refund=manual_refund, allow_partial=True,
source=OrderRefund.REFUND_SOURCE_ADMIN, refund_as_giftcard=refund_as_giftcard,
giftcard_expires=giftcard_expires, giftcard_conditions=giftcard_conditions)
if send:
_send_mail(o, send_subject, send_message, subevent, refund_amount, user, positions)
for wle in event.waitinglistentries.filter(subevent=subevent, voucher__isnull=True):
_send_wle_mail(wle, send_waitinglist_subject, send_waitinglist_message, subevent)
return failed
| 42.68559 | 116 | 0.606343 | import logging
from decimal import Decimal
from django.db import transaction
from django.db.models import Count, Exists, IntegerField, OuterRef, Subquery
from i18nfield.strings import LazyI18nString
from pretix.base.decimal import round_decimal
from pretix.base.email import get_email_context
from pretix.base.i18n import language
from pretix.base.models import (
Event, InvoiceAddress, Order, OrderFee, OrderPosition, OrderRefund,
SubEvent, User, WaitingListEntry,
)
from pretix.base.services.locking import LockTimeoutException
from pretix.base.services.mail import SendMailException, TolerantDict, mail
from pretix.base.services.orders import (
OrderChangeManager, OrderError, _cancel_order, _try_auto_refund,
)
from pretix.base.services.tasks import ProfiledEventTask
from pretix.celery_app import app
logger = logging.getLogger(__name__)
def _send_wle_mail(wle: WaitingListEntry, subject: LazyI18nString, message: LazyI18nString, subevent: SubEvent):
with language(wle.locale):
email_context = get_email_context(event_or_subevent=subevent or wle.event, event=wle.event)
try:
mail(
wle.email,
str(subject).format_map(TolerantDict(email_context)),
message,
email_context,
wle.event,
locale=wle.locale
)
except SendMailException:
logger.exception('Waiting list canceled email could not be sent')
def _send_mail(order: Order, subject: LazyI18nString, message: LazyI18nString, subevent: SubEvent,
refund_amount: Decimal, user: User, positions: list):
with language(order.locale):
try:
ia = order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = InvoiceAddress()
email_context = get_email_context(event_or_subevent=subevent or order.event, refund_amount=refund_amount,
order=order, position_or_address=ia, event=order.event)
real_subject = str(subject).format_map(TolerantDict(email_context))
try:
order.send_mail(
real_subject, message, email_context,
'pretix.event.order.email.event_canceled',
user,
)
except SendMailException:
logger.exception('Order canceled email could not be sent')
for p in positions:
if subevent and p.subevent_id != subevent.id:
continue
if p.addon_to_id is None and p.attendee_email and p.attendee_email != order.email:
real_subject = str(subject).format_map(TolerantDict(email_context))
email_context = get_email_context(event_or_subevent=subevent or order.event,
event=order.event,
refund_amount=refund_amount,
position_or_address=p,
order=order, position=p)
try:
order.send_mail(
real_subject, message, email_context,
'pretix.event.order.email.event_canceled',
position=p,
user=user
)
except SendMailException:
logger.exception('Order canceled email could not be sent to attendee')
@app.task(base=ProfiledEventTask, bind=True, max_retries=5, default_retry_delay=1, throws=(OrderError,),
acks_late=True)
def cancel_event(self, event: Event, subevent: int, auto_refund: bool, keep_fee_fixed: str,
keep_fee_percentage: str, keep_fees: list=None, manual_refund: bool=False,
send: bool=False, send_subject: dict=None, send_message: dict=None,
send_waitinglist: bool=False, send_waitinglist_subject: dict={}, send_waitinglist_message: dict={},
user: int=None, refund_as_giftcard: bool=False, giftcard_expires=None, giftcard_conditions=None):
send_subject = LazyI18nString(send_subject)
send_message = LazyI18nString(send_message)
send_waitinglist_subject = LazyI18nString(send_waitinglist_subject)
send_waitinglist_message = LazyI18nString(send_waitinglist_message)
if user:
user = User.objects.get(pk=user)
s = OrderPosition.objects.filter(
order=OuterRef('pk')
).order_by().values('order').annotate(k=Count('id')).values('k')
orders_to_cancel = event.orders.annotate(pcnt=Subquery(s, output_field=IntegerField())).filter(
status__in=[Order.STATUS_PAID, Order.STATUS_PENDING, Order.STATUS_EXPIRED],
pcnt__gt=0
).all()
if subevent:
subevent = event.subevents.get(pk=subevent)
has_subevent = OrderPosition.objects.filter(order_id=OuterRef('pk')).filter(
subevent=subevent
)
has_other_subevent = OrderPosition.objects.filter(order_id=OuterRef('pk')).exclude(
subevent=subevent
)
orders_to_change = orders_to_cancel.annotate(
has_subevent=Exists(has_subevent),
has_other_subevent=Exists(has_other_subevent),
).filter(
has_subevent=True, has_other_subevent=True
)
orders_to_cancel = orders_to_cancel.annotate(
has_subevent=Exists(has_subevent),
has_other_subevent=Exists(has_other_subevent),
).filter(
has_subevent=True, has_other_subevent=False
)
subevent.log_action(
'pretix.subevent.canceled', user=user,
)
subevent.active = False
subevent.save(update_fields=['active'])
subevent.log_action(
'pretix.subevent.changed', user=user, data={'active': False, '_source': 'cancel_event'}
)
else:
orders_to_change = event.orders.none()
event.log_action(
'pretix.event.canceled', user=user,
)
for i in event.items.filter(active=True):
i.active = False
i.save(update_fields=['active'])
i.log_action(
'pretix.event.item.changed', user=user, data={'active': False, '_source': 'cancel_event'}
)
failed = 0
for o in orders_to_cancel.only('id', 'total'):
try:
fee = Decimal('0.00')
fee_sum = Decimal('0.00')
keep_fee_objects = []
if keep_fees:
for f in o.fees.all():
if f.fee_type in keep_fees:
fee += f.value
keep_fee_objects.append(f)
fee_sum += f.value
if keep_fee_percentage:
fee += Decimal(keep_fee_percentage) / Decimal('100.00') * (o.total - fee_sum)
if keep_fee_fixed:
fee += Decimal(keep_fee_fixed)
fee = round_decimal(min(fee, o.payment_refund_sum), event.currency)
_cancel_order(o.pk, user, send_mail=False, cancellation_fee=fee, keep_fees=keep_fee_objects)
refund_amount = o.payment_refund_sum
try:
if auto_refund:
_try_auto_refund(o.pk, manual_refund=manual_refund, allow_partial=True,
source=OrderRefund.REFUND_SOURCE_ADMIN, refund_as_giftcard=refund_as_giftcard,
giftcard_expires=giftcard_expires, giftcard_conditions=giftcard_conditions)
finally:
if send:
_send_mail(o, send_subject, send_message, subevent, refund_amount, user, o.positions.all())
except LockTimeoutException:
logger.exception("Could not cancel order")
failed += 1
except OrderError:
logger.exception("Could not cancel order")
failed += 1
for o in orders_to_change.values_list('id', flat=True):
with transaction.atomic():
o = event.orders.select_for_update().get(pk=o)
total = Decimal('0.00')
positions = []
ocm = OrderChangeManager(o, user=user, notify=False)
for p in o.positions.all():
if p.subevent == subevent:
total += p.price
ocm.cancel(p)
positions.append(p)
fee = Decimal('0.00')
if keep_fee_fixed:
fee += Decimal(keep_fee_fixed)
if keep_fee_percentage:
fee += Decimal(keep_fee_percentage) / Decimal('100.00') * total
fee = round_decimal(min(fee, o.payment_refund_sum), event.currency)
if fee:
f = OrderFee(
fee_type=OrderFee.FEE_TYPE_CANCELLATION,
value=fee,
order=o,
tax_rule=o.event.settings.tax_rate_default,
)
f._calculate_tax()
ocm.add_fee(f)
ocm.commit()
refund_amount = o.payment_refund_sum - o.total
if auto_refund:
_try_auto_refund(o.pk, manual_refund=manual_refund, allow_partial=True,
source=OrderRefund.REFUND_SOURCE_ADMIN, refund_as_giftcard=refund_as_giftcard,
giftcard_expires=giftcard_expires, giftcard_conditions=giftcard_conditions)
if send:
_send_mail(o, send_subject, send_message, subevent, refund_amount, user, positions)
for wle in event.waitinglistentries.filter(subevent=subevent, voucher__isnull=True):
_send_wle_mail(wle, send_waitinglist_subject, send_waitinglist_message, subevent)
return failed
| true | true |
f7f5c8ac06118d9f67dc1d935d4f8a7d066d278d | 5,185 | py | Python | test/functional/test_framework/netutil.py | chx381/platopia | 563c616db768f813aa4482d39d8ed1d8aacaad4f | [
"MIT"
] | 5 | 2018-07-21T15:58:30.000Z | 2019-04-25T01:45:36.000Z | test/functional/test_framework/netutil.py | chx381/platopia | 563c616db768f813aa4482d39d8ed1d8aacaad4f | [
"MIT"
] | null | null | null | test/functional/test_framework/netutil.py | chx381/platopia | 563c616db768f813aa4482d39d8ed1d8aacaad4f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
| 33.237179 | 114 | 0.584957 |
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' '))
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9])
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
def all_interfaces():
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912,
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
if '.' in addr:
addr = [int(x) for x in addr.split('.')]
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
import socket
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
| true | true |
f7f5c95be93321c513cc50e7770d445c3df08d6e | 13,292 | py | Python | VirtualBox-5.0.0/src/VBox/ValidationKit/testboxscript/testboxcommand.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | 1 | 2015-04-30T14:18:45.000Z | 2015-04-30T14:18:45.000Z | VirtualBox-5.0.0/src/VBox/ValidationKit/testboxscript/testboxcommand.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | VirtualBox-5.0.0/src/VBox/ValidationKit/testboxscript/testboxcommand.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# $Id: testboxcommand.py $
"""
TestBox Script - Command Processor.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard python imports.
import os;
import sys;
import threading;
# Validation Kit imports.
from common import constants;
from common import utils, webutils;
import testboxcommons;
from testboxcommons import TestBoxException;
from testboxscript import TBS_EXITCODE_NEED_UPGRADE;
from testboxupgrade import upgradeFromZip;
from testboxtasks import TestBoxExecTask, TestBoxCleanupTask, TestBoxTestDriverTask;
# Figure where we are.
try: __file__
except: __file__ = sys.argv[0];
g_ksTestScriptDir = os.path.dirname(os.path.abspath(__file__));
class TestBoxCommand(object):
"""
Implementation of Test Box command.
"""
## The time to wait on the current task to abort.
kcSecStopTimeout = 360
## The time to wait on the current task to abort before rebooting.
kcSecStopBeforeRebootTimeout = 360
def __init__(self, oTestBoxScript):
"""
Class instance init
"""
self._oTestBoxScript = oTestBoxScript;
self._oCurTaskLock = threading.RLock();
self._oCurTask = None;
# List of available commands and their handlers
self._dfnCommands = \
{
constants.tbresp.CMD_IDLE: self._cmdIdle,
constants.tbresp.CMD_WAIT: self._cmdWait,
constants.tbresp.CMD_EXEC: self._cmdExec,
constants.tbresp.CMD_ABORT: self._cmdAbort,
constants.tbresp.CMD_REBOOT: self._cmdReboot,
constants.tbresp.CMD_UPGRADE: self._cmdUpgrade,
constants.tbresp.CMD_UPGRADE_AND_REBOOT: self._cmdUpgradeAndReboot,
constants.tbresp.CMD_SPECIAL: self._cmdSpecial,
}
def _cmdIdle(self, oResponse, oConnection):
"""
Idle response, no ACK.
"""
oResponse.checkParameterCount(1);
# The dispatch loop will delay for us, so nothing to do here.
_ = oConnection; # Leave the connection open.
return True;
def _cmdWait(self, oResponse, oConnection):
"""
Gang scheduling wait response, no ACK.
"""
oResponse.checkParameterCount(1);
# The dispatch loop will delay for us, so nothing to do here.
_ = oConnection; # Leave the connection open.
return True;
def _cmdExec(self, oResponse, oConnection):
"""
Execute incoming command
"""
# Check if required parameters given and make a little sense.
idResult = oResponse.getIntChecked( constants.tbresp.EXEC_PARAM_RESULT_ID, 1);
sScriptZips = oResponse.getStringChecked(constants.tbresp.EXEC_PARAM_SCRIPT_ZIPS);
sScriptCmdLine = oResponse.getStringChecked(constants.tbresp.EXEC_PARAM_SCRIPT_CMD_LINE);
cSecTimeout = oResponse.getIntChecked( constants.tbresp.EXEC_PARAM_TIMEOUT, 30);
oResponse.checkParameterCount(5);
sScriptFile = utils.argsGetFirst(sScriptCmdLine);
if sScriptFile is None:
raise TestBoxException('Bad script command line: "%s"' % (sScriptCmdLine,));
if len(os.path.basename(sScriptFile)) < len('t.py'):
raise TestBoxException('Script file name too short: "%s"' % (sScriptFile,));
if len(sScriptZips) < len('x.zip'):
raise TestBoxException('Script zip name too short: "%s"' % (sScriptFile,));
# One task at the time.
if self.isRunning():
raise TestBoxException('Already running other command');
# Don't bother running the task without the shares mounted.
self._oTestBoxScript.mountShares(); # Raises exception on failure.
# Kick off the task and ACK the command.
self._oCurTaskLock.acquire();
try:
self._oCurTask = TestBoxExecTask(self._oTestBoxScript, idResult = idResult, sScriptZips = sScriptZips,
sScriptCmdLine = sScriptCmdLine, cSecTimeout = cSecTimeout);
finally:
self._oCurTaskLock.release();
oConnection.sendAckAndClose(constants.tbresp.CMD_EXEC);
return True;
def _cmdAbort(self, oResponse, oConnection):
"""
Abort background task
"""
oResponse.checkParameterCount(1);
oConnection.sendAck(constants.tbresp.CMD_ABORT);
oCurTask = self._getCurTask();
if oCurTask is not None:
oCurTask.terminate();
oCurTask.flushLogOnConnection(oConnection);
oConnection.close();
oCurTask.wait(self.kcSecStopTimeout);
return True;
def doReboot(self):
"""
Worker common to _cmdReboot and _doUpgrade that performs a system reboot.
"""
# !! Not more exceptions beyond this point !!
testboxcommons.log('Rebooting');
# Stop anything that might be executing at this point.
oCurTask = self._getCurTask();
if oCurTask is not None:
oCurTask.terminate();
oCurTask.wait(self.kcSecStopBeforeRebootTimeout);
# Invoke shutdown command line utility.
sOs = utils.getHostOs();
asCmd2 = None;
if sOs == 'win':
asCmd = ['shutdown', '/r', '/t', '0'];
elif sOs == 'os2':
asCmd = ['setboot', '/B'];
elif sOs in ('solaris',):
asCmd = ['/usr/sbin/reboot', '-p'];
asCmd2 = ['/usr/sbin/reboot']; # Hack! S10 doesn't have -p, but don't know how to reliably detect S10.
else:
asCmd = ['/sbin/shutdown', '-r', 'now'];
try:
utils.sudoProcessOutputChecked(asCmd);
except Exception, oXcpt:
if asCmd2 is not None:
try:
utils.sudoProcessOutputChecked(asCmd2);
except Exception, oXcpt:
testboxcommons.log('Error executing reboot command "%s" as well as "%s": %s' % (asCmd, asCmd2, oXcpt));
return False;
testboxcommons.log('Error executing reboot command "%s": %s' % (asCmd, oXcpt));
return False;
# Quit the script.
while True:
sys.exit(32);
return True;
def _cmdReboot(self, oResponse, oConnection):
"""
Reboot Test Box
"""
oResponse.checkParameterCount(1);
oConnection.sendAckAndClose(constants.tbresp.CMD_REBOOT);
return self.doReboot();
def _doUpgrade(self, oResponse, oConnection, fReboot):
"""
Common worker for _cmdUpgrade and _cmdUpgradeAndReboot.
Will sys.exit on success!
"""
#
# The server specifies a ZIP archive with the new scripts. It's ASSUMED
# that the zip is of selected files at g_ksValidationKitDir in SVN. It's
# further ASSUMED that we're executing from
#
sZipUrl = oResponse.getStringChecked(constants.tbresp.UPGRADE_PARAM_URL)
oResponse.checkParameterCount(2);
if utils.isRunningFromCheckout():
raise TestBoxException('Cannot upgrade when running from the tree!');
oConnection.sendAckAndClose(constants.tbresp.CMD_UPGRADE_AND_REBOOT if fReboot else constants.tbresp.CMD_UPGRADE);
testboxcommons.log('Upgrading...');
#
# Download the file and install it.
#
sDstFile = os.path.join(g_ksTestScriptDir, 'VBoxTestBoxScript.zip');
if os.path.exists(sDstFile):
os.unlink(sDstFile);
fRc = webutils.downloadFile(sZipUrl, sDstFile, self._oTestBoxScript.getPathBuilds(), testboxcommons.log);
if fRc is not True:
return False;
if upgradeFromZip(sDstFile) is not True:
return False;
#
# Restart the system or the script (we have a parent script which
# respawns us when we quit).
#
if fReboot:
self.doReboot();
sys.exit(TBS_EXITCODE_NEED_UPGRADE);
def _cmdUpgrade(self, oResponse, oConnection):
"""
Upgrade Test Box Script
"""
return self._doUpgrade(oResponse, oConnection, False);
def _cmdUpgradeAndReboot(self, oResponse, oConnection):
"""
Upgrade Test Box Script
"""
return self._doUpgrade(oResponse, oConnection, True);
def _cmdSpecial(self, oResponse, oConnection):
"""
Reserved for future fun.
"""
oConnection.sendReplyAndClose(constants.tbreq.COMMAND_NOTSUP, constants.tbresp.CMD_SPECIAL);
testboxcommons.log('Special command %s not supported...' % (oResponse,));
return False;
def handleCommand(self, oResponse, oConnection):
"""
Handles a command from the test manager.
Some commands will close the connection, others (generally the simple
ones) wont, leaving the caller the option to use it for log flushing.
Returns success indicator.
Raises no exception.
"""
try:
sCmdName = oResponse.getStringChecked(constants.tbresp.ALL_PARAM_RESULT);
except Exception, oXcpt:
oConnection.close();
return False;
# Do we know the command?
fRc = False;
if sCmdName in self._dfnCommands:
testboxcommons.log(sCmdName);
try:
# Execute the handler.
fRc = self._dfnCommands[sCmdName](oResponse, oConnection)
except Exception, oXcpt:
# NACK the command if an exception is raised during parameter validation.
testboxcommons.log1Xcpt('Exception executing "%s": %s' % (sCmdName, oXcpt));
if oConnection.isConnected():
try:
oConnection.sendReplyAndClose(constants.tbreq.COMMAND_NACK, sCmdName);
except Exception, oXcpt2:
testboxcommons.log('Failed to NACK "%s": %s' % (sCmdName, oXcpt2));
elif sCmdName in [constants.tbresp.STATUS_DEAD, constants.tbresp.STATUS_NACK]:
testboxcommons.log('Received status in stead of command: %s' % (sCmdName, ));
else:
# NOTSUP the unknown command.
testboxcommons.log('Received unknown command: %s' % (sCmdName, ));
try:
oConnection.sendReplyAndClose(constants.tbreq.COMMAND_NOTSUP, sCmdName);
except Exception, oXcpt:
testboxcommons.log('Failed to NOTSUP "%s": %s' % (sCmdName, oXcpt));
return fRc;
def resumeIncompleteCommand(self):
"""
Resumes an incomplete command at startup.
The EXEC commands saves essential state information in the scratch area
so we can resume them in case the testbox panics or is rebooted.
Current "resume" means doing cleanups, but we may need to implement
test scenarios involving rebooting the testbox later.
Returns (idTestBox, sTestBoxName, True) if a command was resumed,
otherwise (-1, '', False). Raises no exceptions.
"""
try:
oTask = TestBoxCleanupTask(self._oTestBoxScript);
except:
return (-1, '', False);
self._oCurTaskLock.acquire();
self._oCurTask = oTask;
self._oCurTaskLock.release();
return (oTask.idTestBox, oTask.sTestBoxName, True);
def isRunning(self):
"""
Check if we're running a task or not.
"""
oCurTask = self._getCurTask();
return oCurTask is not None and oCurTask.isRunning();
def flushLogOnConnection(self, oGivenConnection):
"""
Flushes the log of any running task with a log buffer.
"""
oCurTask = self._getCurTask();
if oCurTask is not None and isinstance(oCurTask, TestBoxTestDriverTask):
return oCurTask.flushLogOnConnection(oGivenConnection);
return None;
def _getCurTask(self):
""" Gets the current task in a paranoidly safe manny. """
self._oCurTaskLock.acquire();
oCurTask = self._oCurTask;
self._oCurTaskLock.release();
return oCurTask;
| 37.232493 | 123 | 0.623834 |
"""
TestBox Script - Command Processor.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
import os;
import sys;
import threading;
from common import constants;
from common import utils, webutils;
import testboxcommons;
from testboxcommons import TestBoxException;
from testboxscript import TBS_EXITCODE_NEED_UPGRADE;
from testboxupgrade import upgradeFromZip;
from testboxtasks import TestBoxExecTask, TestBoxCleanupTask, TestBoxTestDriverTask;
try: __file__
except: __file__ = sys.argv[0];
g_ksTestScriptDir = os.path.dirname(os.path.abspath(__file__));
class TestBoxCommand(object):
"""
Implementation of Test Box command.
"""
"""
Class instance init
"""
self._oTestBoxScript = oTestBoxScript;
self._oCurTaskLock = threading.RLock();
self._oCurTask = None;
self._dfnCommands = \
{
constants.tbresp.CMD_IDLE: self._cmdIdle,
constants.tbresp.CMD_WAIT: self._cmdWait,
constants.tbresp.CMD_EXEC: self._cmdExec,
constants.tbresp.CMD_ABORT: self._cmdAbort,
constants.tbresp.CMD_REBOOT: self._cmdReboot,
constants.tbresp.CMD_UPGRADE: self._cmdUpgrade,
constants.tbresp.CMD_UPGRADE_AND_REBOOT: self._cmdUpgradeAndReboot,
constants.tbresp.CMD_SPECIAL: self._cmdSpecial,
}
def _cmdIdle(self, oResponse, oConnection):
"""
Idle response, no ACK.
"""
oResponse.checkParameterCount(1);
_ = oConnection;
return True;
def _cmdWait(self, oResponse, oConnection):
"""
Gang scheduling wait response, no ACK.
"""
oResponse.checkParameterCount(1);
_ = oConnection;
return True;
def _cmdExec(self, oResponse, oConnection):
"""
Execute incoming command
"""
idResult = oResponse.getIntChecked( constants.tbresp.EXEC_PARAM_RESULT_ID, 1);
sScriptZips = oResponse.getStringChecked(constants.tbresp.EXEC_PARAM_SCRIPT_ZIPS);
sScriptCmdLine = oResponse.getStringChecked(constants.tbresp.EXEC_PARAM_SCRIPT_CMD_LINE);
cSecTimeout = oResponse.getIntChecked( constants.tbresp.EXEC_PARAM_TIMEOUT, 30);
oResponse.checkParameterCount(5);
sScriptFile = utils.argsGetFirst(sScriptCmdLine);
if sScriptFile is None:
raise TestBoxException('Bad script command line: "%s"' % (sScriptCmdLine,));
if len(os.path.basename(sScriptFile)) < len('t.py'):
raise TestBoxException('Script file name too short: "%s"' % (sScriptFile,));
if len(sScriptZips) < len('x.zip'):
raise TestBoxException('Script zip name too short: "%s"' % (sScriptFile,));
if self.isRunning():
raise TestBoxException('Already running other command');
self._oTestBoxScript.mountShares(); # Raises exception on failure.
# Kick off the task and ACK the command.
self._oCurTaskLock.acquire();
try:
self._oCurTask = TestBoxExecTask(self._oTestBoxScript, idResult = idResult, sScriptZips = sScriptZips,
sScriptCmdLine = sScriptCmdLine, cSecTimeout = cSecTimeout);
finally:
self._oCurTaskLock.release();
oConnection.sendAckAndClose(constants.tbresp.CMD_EXEC);
return True;
def _cmdAbort(self, oResponse, oConnection):
"""
Abort background task
"""
oResponse.checkParameterCount(1);
oConnection.sendAck(constants.tbresp.CMD_ABORT);
oCurTask = self._getCurTask();
if oCurTask is not None:
oCurTask.terminate();
oCurTask.flushLogOnConnection(oConnection);
oConnection.close();
oCurTask.wait(self.kcSecStopTimeout);
return True;
def doReboot(self):
"""
Worker common to _cmdReboot and _doUpgrade that performs a system reboot.
"""
# !! Not more exceptions beyond this point !!
testboxcommons.log('Rebooting');
# Stop anything that might be executing at this point.
oCurTask = self._getCurTask();
if oCurTask is not None:
oCurTask.terminate();
oCurTask.wait(self.kcSecStopBeforeRebootTimeout);
# Invoke shutdown command line utility.
sOs = utils.getHostOs();
asCmd2 = None;
if sOs == 'win':
asCmd = ['shutdown', '/r', '/t', '0'];
elif sOs == 'os2':
asCmd = ['setboot', '/B'];
elif sOs in ('solaris',):
asCmd = ['/usr/sbin/reboot', '-p'];
asCmd2 = ['/usr/sbin/reboot']; # Hack! S10 doesn't have -p, but don't know how to reliably detect S10.
else:
asCmd = ['/sbin/shutdown', '-r', 'now'];
try:
utils.sudoProcessOutputChecked(asCmd);
except Exception, oXcpt:
if asCmd2 is not None:
try:
utils.sudoProcessOutputChecked(asCmd2);
except Exception, oXcpt:
testboxcommons.log('Error executing reboot command "%s" as well as "%s": %s' % (asCmd, asCmd2, oXcpt));
return False;
testboxcommons.log('Error executing reboot command "%s": %s' % (asCmd, oXcpt));
return False;
# Quit the script.
while True:
sys.exit(32);
return True;
def _cmdReboot(self, oResponse, oConnection):
"""
Reboot Test Box
"""
oResponse.checkParameterCount(1);
oConnection.sendAckAndClose(constants.tbresp.CMD_REBOOT);
return self.doReboot();
def _doUpgrade(self, oResponse, oConnection, fReboot):
"""
Common worker for _cmdUpgrade and _cmdUpgradeAndReboot.
Will sys.exit on success!
"""
#
# The server specifies a ZIP archive with the new scripts. It's ASSUMED
# further ASSUMED that we're executing from
sZipUrl = oResponse.getStringChecked(constants.tbresp.UPGRADE_PARAM_URL)
oResponse.checkParameterCount(2);
if utils.isRunningFromCheckout():
raise TestBoxException('Cannot upgrade when running from the tree!');
oConnection.sendAckAndClose(constants.tbresp.CMD_UPGRADE_AND_REBOOT if fReboot else constants.tbresp.CMD_UPGRADE);
testboxcommons.log('Upgrading...');
sDstFile = os.path.join(g_ksTestScriptDir, 'VBoxTestBoxScript.zip');
if os.path.exists(sDstFile):
os.unlink(sDstFile);
fRc = webutils.downloadFile(sZipUrl, sDstFile, self._oTestBoxScript.getPathBuilds(), testboxcommons.log);
if fRc is not True:
return False;
if upgradeFromZip(sDstFile) is not True:
return False;
if fReboot:
self.doReboot();
sys.exit(TBS_EXITCODE_NEED_UPGRADE);
def _cmdUpgrade(self, oResponse, oConnection):
"""
Upgrade Test Box Script
"""
return self._doUpgrade(oResponse, oConnection, False);
def _cmdUpgradeAndReboot(self, oResponse, oConnection):
"""
Upgrade Test Box Script
"""
return self._doUpgrade(oResponse, oConnection, True);
def _cmdSpecial(self, oResponse, oConnection):
"""
Reserved for future fun.
"""
oConnection.sendReplyAndClose(constants.tbreq.COMMAND_NOTSUP, constants.tbresp.CMD_SPECIAL);
testboxcommons.log('Special command %s not supported...' % (oResponse,));
return False;
def handleCommand(self, oResponse, oConnection):
"""
Handles a command from the test manager.
Some commands will close the connection, others (generally the simple
ones) wont, leaving the caller the option to use it for log flushing.
Returns success indicator.
Raises no exception.
"""
try:
sCmdName = oResponse.getStringChecked(constants.tbresp.ALL_PARAM_RESULT);
except Exception, oXcpt:
oConnection.close();
return False;
fRc = False;
if sCmdName in self._dfnCommands:
testboxcommons.log(sCmdName);
try:
fRc = self._dfnCommands[sCmdName](oResponse, oConnection)
except Exception, oXcpt:
testboxcommons.log1Xcpt('Exception executing "%s": %s' % (sCmdName, oXcpt));
if oConnection.isConnected():
try:
oConnection.sendReplyAndClose(constants.tbreq.COMMAND_NACK, sCmdName);
except Exception, oXcpt2:
testboxcommons.log('Failed to NACK "%s": %s' % (sCmdName, oXcpt2));
elif sCmdName in [constants.tbresp.STATUS_DEAD, constants.tbresp.STATUS_NACK]:
testboxcommons.log('Received status in stead of command: %s' % (sCmdName, ));
else:
testboxcommons.log('Received unknown command: %s' % (sCmdName, ));
try:
oConnection.sendReplyAndClose(constants.tbreq.COMMAND_NOTSUP, sCmdName);
except Exception, oXcpt:
testboxcommons.log('Failed to NOTSUP "%s": %s' % (sCmdName, oXcpt));
return fRc;
def resumeIncompleteCommand(self):
"""
Resumes an incomplete command at startup.
The EXEC commands saves essential state information in the scratch area
so we can resume them in case the testbox panics or is rebooted.
Current "resume" means doing cleanups, but we may need to implement
test scenarios involving rebooting the testbox later.
Returns (idTestBox, sTestBoxName, True) if a command was resumed,
otherwise (-1, '', False). Raises no exceptions.
"""
try:
oTask = TestBoxCleanupTask(self._oTestBoxScript);
except:
return (-1, '', False);
self._oCurTaskLock.acquire();
self._oCurTask = oTask;
self._oCurTaskLock.release();
return (oTask.idTestBox, oTask.sTestBoxName, True);
def isRunning(self):
"""
Check if we're running a task or not.
"""
oCurTask = self._getCurTask();
return oCurTask is not None and oCurTask.isRunning();
def flushLogOnConnection(self, oGivenConnection):
"""
Flushes the log of any running task with a log buffer.
"""
oCurTask = self._getCurTask();
if oCurTask is not None and isinstance(oCurTask, TestBoxTestDriverTask):
return oCurTask.flushLogOnConnection(oGivenConnection);
return None;
def _getCurTask(self):
""" Gets the current task in a paranoidly safe manny. """
self._oCurTaskLock.acquire();
oCurTask = self._oCurTask;
self._oCurTaskLock.release();
return oCurTask;
| false | true |
f7f5ca0a0eb98019868528185866f28259e0d733 | 6,116 | py | Python | src/transformers/models/splinter/configuration_splinter.py | liminghao1630/transformers | 207594be81b8e5a8589c8b11c3b236924555d806 | [
"Apache-2.0"
] | 3 | 2021-12-27T20:13:38.000Z | 2021-12-28T14:11:20.000Z | src/transformers/models/splinter/configuration_splinter.py | liminghao1630/transformers | 207594be81b8e5a8589c8b11c3b236924555d806 | [
"Apache-2.0"
] | 2 | 2022-01-06T05:40:05.000Z | 2022-01-06T15:12:29.000Z | src/transformers/models/splinter/configuration_splinter.py | liminghao1630/transformers | 207594be81b8e5a8589c8b11c3b236924555d806 | [
"Apache-2.0"
] | 3 | 2022-01-06T04:44:13.000Z | 2022-02-18T23:35:21.000Z | # coding=utf-8
# Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Splinter model configuration """
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"tau/splinter-base": "https://huggingface.co/tau/splinter-base/resolve/main/config.json",
"tau/splinter-base-qass": "https://huggingface.co/tau/splinter-base-qass/resolve/main/config.json",
"tau/splinter-large": "https://huggingface.co/tau/splinter-large/resolve/main/config.json",
"tau/splinter-large-qass": "https://huggingface.co/tau/splinter-large-qass/resolve/main/config.json",
# See all Splinter models at https://huggingface.co/models?filter=splinter
}
class SplinterConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SplinterModel`]. It is used to
instantiate an Splinter model according to the specified arguments, defining the model architecture. Instantiating
a configuration with the defaults will yield a similar configuration to that of the Splinter [tau/splinter-base](https://huggingface.co/tau/splinter-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model
outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Splinter model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`SplinterModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
`"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`SplinterModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
question_token_id (`int`, *optional*, defaults to 104):
The id of the `[QUESTION]` token.
Example:
```python
>>> from transformers import SplinterModel, SplinterConfig
>>> # Initializing a Splinter tau/splinter-base style configuration
>>> configuration = SplinterConfig()
>>> # Initializing a model from the tau/splinter-base style configuration
>>> model = SplinterModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "splinter"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
use_cache=True,
pad_token_id=0,
question_token_id=104,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.question_token_id = question_token_id
| 48.15748 | 172 | 0.699477 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"tau/splinter-base": "https://huggingface.co/tau/splinter-base/resolve/main/config.json",
"tau/splinter-base-qass": "https://huggingface.co/tau/splinter-base-qass/resolve/main/config.json",
"tau/splinter-large": "https://huggingface.co/tau/splinter-large/resolve/main/config.json",
"tau/splinter-large-qass": "https://huggingface.co/tau/splinter-large-qass/resolve/main/config.json",
}
class SplinterConfig(PretrainedConfig):
model_type = "splinter"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
use_cache=True,
pad_token_id=0,
question_token_id=104,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.question_token_id = question_token_id
| true | true |
f7f5cd52b886a2eb7892ff45872ad88984c8d603 | 42,384 | py | Python | mbcd/models/bnn.py | Valerio-Colombo/mbcd | 8bb8adce78d303e991d8afdb3fbc045970297f83 | [
"MIT"
] | null | null | null | mbcd/models/bnn.py | Valerio-Colombo/mbcd | 8bb8adce78d303e991d8afdb3fbc045970297f83 | [
"MIT"
] | null | null | null | mbcd/models/bnn.py | Valerio-Colombo/mbcd | 8bb8adce78d303e991d8afdb3fbc045970297f83 | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import time
import pdb
import itertools
from collections import OrderedDict
import tensorflow as tf
import numpy as np
from tqdm import trange
from scipy.io import savemat, loadmat
from mbcd.models.utils import get_required_argument, TensorStandardScaler
from mbcd.models.fc import FC
from mbcd.utils.logger import Progress, Silent
from sklearn.preprocessing import PolynomialFeatures
np.set_printoptions(precision=4)
class BNN:
"""Neural network models which model aleatoric uncertainty (and possibly epistemic uncertainty
with ensembling).
Code adapted from https://github.com/JannerM/mbpo/blob/master/mbpo/models/bnn.py
"""
def __init__(self, params):
"""Initializes a class instance.
Arguments:
params (DotMap): A dotmap of model parameters.
.name (str): Model name, used for logging/use in variable scopes.
Warning: Models with the same name will overwrite each other.
.num_networks (int): (optional) The number of networks in the ensemble. Defaults to 1.
Ignored if model is being loaded.
.model_dir (str/None): (optional) Path to directory from which model will be loaded, and
saved by default. Defaults to None.
.load_model (bool): (optional) If True, model will be loaded from the model directory,
assuming that the files are generated by a model of the same name. Defaults to False.
.sess (tf.Session/None): The session that this model will use.
If None, creates a session with its own associated graph. Defaults to None.
"""
self.name = get_required_argument(params, 'name', 'Must provide name.')
self.model_dir = params.get('model_dir', None)
print('[ BNN ] Initializing model: {} | {} networks | {} elites'.format(params['name'], params['num_networks'],
params['num_elites']))
if params.get('sess', None) is None:
# config = tf.ConfigProto(device_count = {'GPU': 0})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self._sess = tf.Session(config=config)
else:
self._sess = params.get('sess')
# Instance variables
self.finalized = False
self.layers, self.max_logvar, self.min_logvar = [], None, None
self.decays, self.optvars, self.nonoptvars = [], [], []
self.end_act, self.end_act_name = None, None
self.scaler = None
# Training objects
self.train_loss = None
self.accum_grads = None
self.zero_ops = None
self.accum_ops = None
self.scale_coeff = None
self.learning_rate = None
self.optimizer = None
self.sy_train_in, self.sy_train_targ = None, None
self.grads, self.graph_vars = None, None
self.train_op, self.train_op_rescaled, self.mse_loss = None, None, None
# Prediction objects
self.sy_pred_in2d, self.sy_pred_mean2d_fac, self.sy_pred_var2d_fac = None, None, None
self.sy_pred_mean2d, self.sy_pred_var2d = None, None
self.sy_pred_in3d, self.sy_pred_mean3d_fac, self.sy_pred_var3d_fac = None, None, None
if params.get('load_model', False):
if self.model_dir is None:
raise ValueError("Cannot load model without providing model directory.")
self._load_structure()
self.num_nets, self.model_loaded = self.layers[0].get_ensemble_size(), True
print("Model loaded from %s." % self.model_dir)
self.num_elites = params['num_elites']
else:
self.num_nets = params.get('num_networks', 1)
self.num_elites = params['num_elites'] # params.get('num_elites', 1)
self.model_loaded = False
if self.num_nets == 1:
print("Created a neural network with variance predictions.")
else:
print(
"Created an ensemble of {} neural networks with variance predictions | Elites: {}".format(self.num_nets,
self.num_elites))
self._model_inds = [i for i in range(self.num_nets)]
@property
def is_probabilistic(self):
return True
@property
def is_tf_model(self):
return True
@property
def sess(self):
return self._sess
###################################
# Network Structure Setup Methods #
###################################
def add(self, layer):
"""Adds a new layer to the network.
Arguments:
layer: (layer) The new layer to be added to the network.
If this is the first layer, the input dimension of the layer must be set.
Returns: None.
"""
if self.finalized:
raise RuntimeError("Cannot modify network structure after finalizing.")
if len(self.layers) == 0 and layer.get_input_dim() is None:
raise ValueError("Must set input dimension for the first layer.")
if self.model_loaded:
raise RuntimeError("Cannot add layers to a loaded model.")
layer.set_ensemble_size(self.num_nets)
if len(self.layers) > 0:
layer.set_input_dim(self.layers[-1].get_output_dim())
self.layers.append(layer.copy())
def pop(self):
"""Removes and returns the most recently added layer to the network.
Returns: (layer) The removed layer.
"""
if len(self.layers) == 0:
raise RuntimeError("Network is empty.")
if self.finalized:
raise RuntimeError("Cannot modify network structure after finalizing.")
if self.model_loaded:
raise RuntimeError("Cannot remove layers from a loaded model.")
return self.layers.pop()
def finalize(self, optimizer, optimizer_args=None, *args, **kwargs):
"""Finalizes the network.
Arguments:
optimizer: (tf.train.Optimizer) An optimizer class from those available at tf.train.Optimizer.
optimizer_args: (dict) A dictionary of arguments for the __init__ method of the chosen optimizer.
Returns: None
"""
if len(self.layers) == 0:
raise RuntimeError("Cannot finalize an empty network.")
if self.finalized:
raise RuntimeError("Can only finalize a network once.")
optimizer_args = {} if optimizer_args is None else optimizer_args
self.learning_rate = optimizer_args.get("learning_rate")
self.optimizer = optimizer(**optimizer_args)
# Add variance output.
self.layers[-1].set_output_dim(2 * self.layers[-1].get_output_dim())
# Remove last activation to isolate variance from activation function.
self.end_act = self.layers[-1].get_activation()
self.end_act_name = self.layers[-1].get_activation(as_func=False)
self.layers[-1].unset_activation()
# Construct all variables.
with self.sess.as_default():
with tf.variable_scope(self.name):
self.scaler = TensorStandardScaler(self.name, self.layers[0].get_input_dim())
self.max_logvar = tf.Variable(np.ones([1, self.layers[-1].get_output_dim() // 2]) / 2.,
dtype=tf.float32,
name="max_log_var")
self.min_logvar = tf.Variable(-np.ones([1, self.layers[-1].get_output_dim() // 2]) * 10.,
dtype=tf.float32,
name="min_log_var")
for i, layer in enumerate(self.layers):
with tf.variable_scope(self.name + "Layer%i" % i):
layer.construct_vars()
self.decays.extend(layer.get_decays())
self.optvars.extend(layer.get_vars())
self.optvars.extend([self.max_logvar, self.min_logvar])
self.nonoptvars.extend(self.scaler.get_vars())
# Set up training
with tf.variable_scope(self.name):
self.optimizer = optimizer(**optimizer_args)
self.sy_train_in = tf.placeholder(dtype=tf.float32,
shape=[self.num_nets, None, self.layers[0].get_input_dim()],
name="training_inputs")
self.sy_train_targ = tf.placeholder(dtype=tf.float32,
shape=[self.num_nets, None, self.layers[-1].get_output_dim() // 2],
name="training_targets")
self.train_loss = tf.reduce_sum(
self._compile_losses(self.sy_train_in, self.sy_train_targ, inc_var_loss=True))
self.train_loss += tf.add_n(self.decays)
self.train_loss += 0.01 * tf.reduce_sum(self.max_logvar) - 0.01 * tf.reduce_sum(self.min_logvar)
self.mse_loss = self._compile_losses(self.sy_train_in, self.sy_train_targ, inc_var_loss=False)
self.train_op = self.optimizer.minimize(self.train_loss, var_list=self.optvars)
self.grads, self.graph_vars = zip(*self.optimizer.compute_gradients(self.train_loss, var_list=self.optvars))
self.scale_coeff = tf.placeholder(tf.float32)
# Accumulation ops and variables
# create a copy of all trainable variables with `0` as initial values
self.accum_grads = [tf.Variable(tf.zeros_like(t_var.initialized_value()), trainable=False) for t_var in
self.optvars]
print("OPTVARS: {}".format(self.optvars))
print("TRAINABLEVARS: {}".format(tf.trainable_variables()))
# create an op to zero all accumulated vars
self.zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in self.accum_grads]
# Create ops for accumulating the gradient
self.accum_ops = [accum_grad.assign_add(grad * self.scale_coeff) for (accum_grad, grad) in
zip(self.accum_grads, self.grads)]
self.train_op_rescaled = self.optimizer.apply_gradients(zip(self.accum_grads, self.graph_vars))
# Initialize all variables
self.sess.run(tf.global_variables_initializer())
# self.sess.run(tf.variables_initializer(self.optvars + self.nonoptvars + self.optimizer.variables()))
# Set up prediction
with tf.variable_scope(self.name):
self.sy_pred_in2d = tf.placeholder(dtype=tf.float32,
shape=[None, self.layers[0].get_input_dim()],
name="2D_training_inputs")
self.sy_pred_mean2d_fac, self.sy_pred_var2d_fac = self.create_prediction_tensors(self.sy_pred_in2d,
factored=True)
self.sy_pred_mean2d = tf.reduce_mean(self.sy_pred_mean2d_fac, axis=0)
self.sy_pred_var2d = tf.reduce_mean(self.sy_pred_var2d_fac, axis=0) + tf.reduce_mean(
tf.square(self.sy_pred_mean2d_fac - self.sy_pred_mean2d), axis=0)
self.sy_pred_in3d = tf.placeholder(dtype=tf.float32,
shape=[self.num_nets, None, self.layers[0].get_input_dim()],
name="3D_training_inputs")
self.sy_pred_mean3d_fac, self.sy_pred_var3d_fac = self.create_prediction_tensors(self.sy_pred_in3d,
factored=True)
# Load model if needed
if self.model_loaded:
with self.sess.as_default():
params_dict = loadmat(os.path.join(self.model_dir, "%s.mat" % self.name))
all_vars = self.nonoptvars + self.optvars
for i, var in enumerate(all_vars):
var.load(params_dict[str(i)])
self.finalized = True
##################
# Custom Methods #
##################
def get_weights(self):
return {idx: [layer.get_model_vars(idx, self.sess) for layer in self.layers] for idx in range(self.num_nets)}
def set_weights(self, weights):
keys = ['weights', 'biases']
ops = []
num_layers = len(self.layers)
for layer in range(num_layers):
# net_state = self._state[i]
params = {key: np.stack([weights[net][layer][key] for net in range(self.num_nets)]) for key in keys}
ops.extend(self.layers[layer].set_model_vars(params))
self.sess.run(ops)
def _save_state(self, idx):
self._state[idx] = [layer.get_model_vars(idx, self.sess) for layer in self.layers]
def _set_state(self):
keys = ['weights', 'biases']
# ops = []
num_layers = len(self.layers)
for layer in range(num_layers):
# net_state = self._state[i]
params = {key: np.stack([self._state[net][layer][key] for net in range(self.num_nets)]) for key in keys}
self.layers[layer].set_model_vars(params, self.sess)
# ops.extend()
# self.sess.run(ops)
def _save_best(self, epoch, holdout_losses):
updated = False
for i in range(len(holdout_losses)):
current = holdout_losses[i]
_, best = self._snapshots[i]
improvement = (best - current) / best
if improvement > 0.01:
self._snapshots[i] = (epoch, current)
self._save_state(i)
updated = True
improvement = (best - current) / best
# print('epoch {} | updated {} | improvement: {:.4f} | best: {:.4f} | current: {:.4f}'.format(epoch, i, improvement, best, current))
if updated:
self._epochs_since_update = 0
else:
self._epochs_since_update += 1
if self._epochs_since_update > self._max_epochs_since_update:
# print('[ BNN ] Breaking at epoch {}: {} epochs since update ({} max)'.format(epoch, self._epochs_since_update, self._max_epochs_since_update))
return True
else:
return False
def _start_train(self):
self._state = {}
self._snapshots = {i: (None, 1e10) for i in range(self.num_nets)}
self._epochs_since_update = 0
def _end_train(self, holdout_losses):
sorted_inds = np.argsort(holdout_losses)
self._model_inds = sorted_inds[:self.num_elites].tolist()
print('Using {} / {} models: {}'.format(self.num_elites, self.num_nets, self._model_inds))
def random_inds(self, batch_size):
inds = np.random.choice(self._model_inds, size=batch_size)
return inds
def reset(self):
print('[ BNN ] Resetting model')
[layer.reset(self.sess) for layer in self.layers]
def validate(self, inputs, targets):
inputs = np.tile(inputs[None], [self.num_nets, 1, 1])
targets = np.tile(targets[None], [self.num_nets, 1, 1])
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: inputs,
self.sy_train_targ: targets
}
)
mean_elite_loss = np.sort(losses)[:self.num_elites].mean()
return mean_elite_loss
#################
# Model Methods #
#################
# @profile
def train(self, inputs, targets,
batch_size=32, max_epochs=None, max_epochs_since_update=5,
hide_progress=False, holdout_ratio=0.0, max_logging=5000, max_grad_updates=None, timer=None,
max_t=None):
"""Trains/Continues network training
Arguments:
inputs (np.ndarray): Network inputs in the training dataset in rows.
targets (np.ndarray): Network target outputs in the training dataset in rows corresponding
to the rows in inputs.
batch_size (int): The minibatch size to be used for training.
epochs (int): Number of epochs (full network passes that will be done.
hide_progress (bool): If True, hides the progress bar shown at the beginning of training.
Returns: None
"""
self._max_epochs_since_update = max_epochs_since_update
self._start_train()
break_train = False
def shuffle_rows(arr):
idxs = np.argsort(np.random.uniform(size=arr.shape), axis=-1)
return arr[np.arange(arr.shape[0])[:, None], idxs]
with self.sess.as_default():
self.scaler.fit(inputs)
# Split into training and holdout sets
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
permutation = np.random.permutation(inputs.shape[0])
inputs, holdout_inputs = inputs[permutation[num_holdout:]], inputs[permutation[:num_holdout]]
targets, holdout_targets = targets[permutation[num_holdout:]], targets[permutation[:num_holdout]]
holdout_inputs = np.tile(holdout_inputs[None], [self.num_nets, 1, 1])
holdout_targets = np.tile(holdout_targets[None], [self.num_nets, 1, 1])
print('\n[ BNN ] Training {} | Holdout: {}'.format(inputs.shape, holdout_inputs.shape))
idxs = np.random.randint(inputs.shape[0], size=[self.num_nets, inputs.shape[0]])
if hide_progress:
progress = Silent()
else:
progress = Progress(max_epochs)
if max_epochs:
epoch_iter = range(max_epochs)
else:
epoch_iter = itertools.count()
# else:
# epoch_range = trange(epochs, unit="epoch(s)", desc="Network training")
t0 = time.time()
grad_updates = 0
for epoch in epoch_iter:
# print("Normal - Epoch: {}".format(epoch))
for batch_num in range(int(np.ceil(idxs.shape[-1] / batch_size))):
batch_idxs = idxs[:, batch_num * batch_size:(batch_num + 1) * batch_size]
self.sess.run(
self.train_op,
feed_dict={self.sy_train_in: inputs[batch_idxs],
self.sy_train_targ: targets[batch_idxs],
self.learning_rate: 0.001}
)
# print("Classic loss: {}".format(loss))
grad_updates += 1
idxs = shuffle_rows(idxs)
if not hide_progress:
if holdout_ratio < 1e-12:
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: inputs[idxs[:, :max_logging]],
self.sy_train_targ: targets[idxs[:, :max_logging]]
}
)
named_losses = [['M{}'.format(i), losses[i]] for i in range(len(losses))]
progress.set_description(named_losses)
else:
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: inputs[idxs[:, :max_logging]],
self.sy_train_targ: targets[idxs[:, :max_logging]]
}
)
holdout_losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: holdout_inputs,
self.sy_train_targ: holdout_targets
}
)
named_losses = [['M{}'.format(i), losses[i]] for i in range(len(losses))]
named_holdout_losses = [['V{}'.format(i), holdout_losses[i]] for i in
range(len(holdout_losses))]
named_losses = named_losses + named_holdout_losses + [['T', time.time() - t0]]
progress.set_description(named_losses)
break_train = self._save_best(epoch, holdout_losses)
progress.update()
t = time.time() - t0
if break_train or (max_grad_updates and grad_updates > max_grad_updates):
break
if max_t and t > max_t:
descr = 'Breaking because of timeout: {}! (max: {})'.format(t, max_t)
progress.append_description(descr)
# print('Breaking because of timeout: {}! | (max: {})\n'.format(t, max_t))
# time.sleep(5)
break
print("Trained with normal for {} epochs".format(epoch + 1))
if holdout_ratio > 0:
progress.stamp()
if timer: timer.stamp('bnn_train')
self._set_state()
if timer: timer.stamp('bnn_set_state')
holdout_losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: holdout_inputs,
self.sy_train_targ: holdout_targets
}
)
if timer: timer.stamp('bnn_holdout')
self._end_train(holdout_losses)
if timer: timer.stamp('bnn_end')
val_loss = (np.sort(holdout_losses)[:self.num_elites]).mean()
model_metrics = {'val_loss': val_loss}
print('[ BNN ] Holdout', np.sort(holdout_losses), model_metrics, '\n')
return OrderedDict(model_metrics)
# return np.sort(holdout_losses)[]
# pdb.set_trace()
else:
self._model_inds = [0, 1, 2, 3, 4]
def generate_grad_coeff_poly(self, num_batch=40, poly_grade=4):
poly = PolynomialFeatures(poly_grade)
scale = 1
x = np.linspace(0, num_batch * scale, num=num_batch)
phi = poly.fit_transform(x[:, np.newaxis])
proto_H = np.matmul(np.linalg.inv(np.matmul(phi.transpose(), phi)), phi.transpose())
fut_step = np.array([num_batch])[None]
grad = np.matmul(poly.fit_transform(fut_step), proto_H)
# grad = np.flip(grad)
return grad
def exp_basis(self, x):
return np.array([1, np.power(1.1, x)])
def generate_grad_coeff_exp(self, num_batch=40): # low->high
x = np.linspace(0, num_batch * 1.15, num=num_batch)
basis_dim = self.exp_basis(1).shape[0]
phi = np.ones([num_batch, basis_dim])
for idx in range(num_batch):
phi[idx] = self.exp_basis(x[idx])
proto_H = np.matmul(np.linalg.inv(np.matmul(phi.transpose(), phi)), phi.transpose())
fut_step_t = self.exp_basis(num_batch)
grad = np.matmul(fut_step_t, proto_H)
# grad = np.flip(grad)
return grad
# @profile
def train_modified_holdout(self, inputs, targets,
batch_size=32, max_epochs=None, max_epochs_since_update=5,
hide_progress=False, holdout_ratio=0.0, max_logging=5000,
max_grad_updates=None, timer=None, max_t=None, lr=0.001):
"""Trains/Continues network training
Arguments:
inputs (np.ndarray): Network inputs in the training dataset in rows.
targets (np.ndarray): Network target outputs in the training dataset in rows corresponding
to the rows in inputs.
batch_size (int): The minibatch size to be used for training.
epochs (int): Number of epochs (full network passes that will be done.
hide_progress (bool): If True, hides the progress bar shown at the beginning of training.
Returns: None
"""
self._max_epochs_since_update = max_epochs_since_update
self._start_train()
break_train = False
def shuffle_rows(arr):
idxs = np.argsort(np.random.uniform(size=arr.shape), axis=-1)
return arr[np.arange(arr.shape[0])[:, None], idxs]
with self.sess.as_default():
self.scaler.fit(inputs)
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
############
# 1) Calculate gradient and sampling coefficients
total_num_batch = int(np.floor(inputs.shape[0] / batch_size))
# scale_coeff = self.generate_grad_coeff_exp(num_batch=total_num_batch)
# scale_coeff = np.full((total_num_batch), 1/total_num_batch)
scale_coeff = self.generate_grad_coeff_poly(num_batch=total_num_batch, poly_grade=1)
sampling_coeff = np.repeat(scale_coeff, batch_size)
for i in range(sampling_coeff.shape[0]):
if sampling_coeff[i] < 0:
sampling_coeff[i] = 0
sampling_coeff = sampling_coeff / np.sum(sampling_coeff) # normalize
print("Sampling coeff sum: {}".format(np.sum(sampling_coeff)))
# 2) Sample holdout set
idx_holdout = np.empty([num_holdout], dtype=int)
idxs_i = np.arange(inputs.shape[0], dtype=int)[-total_num_batch * batch_size:]
for s in range(num_holdout):
idx_holdout[s] = np.random.choice(a=idxs_i, p=sampling_coeff)
holdout_inputs = inputs[idx_holdout]
holdout_inputs = np.tile(holdout_inputs[None], [self.num_nets, 1, 1])
holdout_targets = targets[idx_holdout]
holdout_targets = np.tile(holdout_targets[None], [self.num_nets, 1, 1])
# 3) Delete holdout data from inputs
train_inputs = np.delete(inputs, idx_holdout, axis=0)
train_targets = np.delete(targets, idx_holdout, axis=0)
train_num_batch = int(np.floor(train_inputs.shape[0] / batch_size))
train_inputs = train_inputs[-train_num_batch * batch_size:]
train_targets = train_targets[-train_num_batch * batch_size:]
# 4) Divide in batches
train_inputs_b = np.array(np.split(train_inputs, train_num_batch))
train_targets_b = np.array(np.split(train_targets, train_num_batch))
############
# permutation = np.random.permutation(inputs.shape[0])
# inputs, holdout_inputs = inputs[num_holdout:], inputs[:num_holdout]
# targets, holdout_targets = targets[num_holdout:], targets[:num_holdout]
# holdout_inputs = np.tile(holdout_inputs[None], [self.num_nets, 1, 1])
# holdout_targets = np.tile(holdout_targets[None], [self.num_nets, 1, 1])
print('[ BNN ] Training {} | Holdout: {}'.format(train_inputs.shape, holdout_inputs.shape))
# idxs = np.random.randint(inputs.shape[0], size=[self.num_nets, inputs.shape[0]])
if hide_progress:
progress = Silent()
else:
progress = Progress(max_epochs)
if max_epochs:
epoch_iter = range(max_epochs)
else:
epoch_iter = itertools.count()
# else:
# epoch_range = trange(epochs, unit="epoch(s)", desc="Network training")
# total_num_batch = int(np.ceil(idxs.shape[-1] / batch_size))
#
# # scale_coeff = self.generate_grad_coeff_poly(num_batch=total_num_batch, poly_grade=2)
# scale_coeff = self.generate_grad_coeff_exp(num_batch=total_num_batch)
t0 = time.time()
grad_updates = 0
for epoch in epoch_iter:
print("Modified - Epoch: {}".format(epoch))
for batch_num in range(train_num_batch):
# batch_idxs = np.arange(inputs.shape[0] - batch_size * (batch_num + 1),
# inputs.shape[0] - batch_size * batch_num)
# batch_idxs_arr = [batch_idxs for _ in range(self.num_nets)]
# arrays_stack = np.stack(batch_idxs_arr, axis=0)
curr_train_inputs = train_inputs_b[train_num_batch - batch_num - 1]
curr_train_targets = train_targets_b[train_num_batch - batch_num - 1]
curr_idxs = np.arange(batch_size)
np.random.shuffle(curr_idxs)
curr_train_inputs = curr_train_inputs[curr_idxs]
curr_train_targets = curr_train_targets[curr_idxs]
curr_train_inputs = [curr_train_inputs for _ in range(self.num_nets)]
curr_train_targets = [curr_train_targets for _ in range(self.num_nets)]
_, _loss = self.sess.run(
(self.accum_ops, self.train_loss),
feed_dict={self.sy_train_in: curr_train_inputs,
self.sy_train_targ: curr_train_targets,
self.scale_coeff: scale_coeff.item(total_num_batch - batch_num - 1)}
# TODO invert. No more flip!!!! - ?
)
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: curr_train_inputs,
self.sy_train_targ: curr_train_targets
}
)
# print("Epoch: {} - Batch: {} - Train Loss: {} - MSE loss: {} - Coeff: {}".format(epoch, batch_num,
# _loss, losses,
# scale_coeff.item(
# total_num_batch - batch_num - 1)))
grad_updates += 1
self.sess.run(self.train_op_rescaled, feed_dict={self.learning_rate: lr}) # apply gradient
self.sess.run(self.zero_ops) # reset for next epoch
# idxs = shuffle_rows(idxs)
if not hide_progress:
if holdout_ratio < 1e-12:
if train_inputs.shape[0] > max_logging:
train_inputs_loss = train_inputs[-max_logging:]
train_targets_loss = train_targets[-max_logging:]
else:
train_inputs_loss = train_inputs
train_targets_loss = train_targets
train_inputs_loss = [train_inputs_loss for _ in range(self.num_nets)]
train_targets_loss = [train_targets_loss for _ in range(self.num_nets)]
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: train_inputs_loss,
self.sy_train_targ: train_targets_loss
}
)
named_losses = [['M{}'.format(i), losses[i]] for i in range(len(losses))]
progress.set_description(named_losses)
else:
if train_inputs.shape[0] > max_logging:
train_inputs_loss = train_inputs[-max_logging:]
train_targets_loss = train_targets[-max_logging:]
else:
train_inputs_loss = train_inputs
train_targets_loss = train_targets
train_inputs_loss = [train_inputs_loss for _ in range(self.num_nets)]
train_targets_loss = [train_targets_loss for _ in range(self.num_nets)]
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: train_inputs_loss,
self.sy_train_targ: train_targets_loss
}
)
holdout_losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: holdout_inputs,
self.sy_train_targ: holdout_targets
}
)
named_losses = [['M{}'.format(i), losses[i]] for i in range(len(losses))]
named_holdout_losses = [['V{}'.format(i), holdout_losses[i]] for i in
range(len(holdout_losses))]
named_losses = named_losses + named_holdout_losses + [['T', time.time() - t0]]
progress.set_description(named_losses)
break_train = self._save_best(epoch, holdout_losses)
progress.update()
t = time.time() - t0
if break_train or (max_grad_updates and grad_updates > max_grad_updates):
break
if max_t and t > max_t:
descr = 'Breaking because of timeout: {}! (max: {})'.format(t, max_t)
progress.append_description(descr)
# print('Breaking because of timeout: {}! | (max: {})\n'.format(t, max_t))
# time.sleep(5)
break
print("Trained with modified for {} epochs".format(epoch+1))
if holdout_ratio > 0:
progress.stamp()
if timer: timer.stamp('bnn_train')
self._set_state()
if timer: timer.stamp('bnn_set_state')
holdout_losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: holdout_inputs,
self.sy_train_targ: holdout_targets
}
)
if timer: timer.stamp('bnn_holdout')
self._end_train(holdout_losses)
if timer: timer.stamp('bnn_end')
val_loss = (np.sort(holdout_losses)[:self.num_elites]).mean()
model_metrics = {'val_loss': val_loss}
print('[ BNN ] Holdout', np.sort(holdout_losses), model_metrics)
return OrderedDict(model_metrics)
# return np.sort(holdout_losses)[]
# pdb.set_trace()
else:
self._model_inds = [0, 1, 2, 3, 4]
def predict(self, inputs, factored=False, *args, **kwargs):
"""Returns the distribution predicted by the model for each input vector in inputs.
Behavior is affected by the dimensionality of inputs and factored as follows:
inputs is 2D, factored=True: Each row is treated as an input vector.
Returns a mean of shape [ensemble_size, batch_size, output_dim] and variance of shape
[ensemble_size, batch_size, output_dim], where N(mean[i, j, :], diag([i, j, :])) is the
predicted output distribution by the ith model in the ensemble on input vector j.
inputs is 2D, factored=False: Each row is treated as an input vector.
Returns a mean of shape [batch_size, output_dim] and variance of shape
[batch_size, output_dim], where aggregation is performed as described in the paper.
inputs is 3D, factored=True/False: Each row in the last dimension is treated as an input vector.
Returns a mean of shape [ensemble_size, batch_size, output_dim] and variance of sha
[ensemble_size, batch_size, output_dim], where N(mean[i, j, :], diag([i, j, :])) is the
predicted output distribution by the ith model in the ensemble on input vector [i, j].
Arguments:
inputs (np.ndarray): An array of input vectors in rows. See above for behavior.
factored (bool): See above for behavior.
"""
if len(inputs.shape) == 2:
if factored:
return self.sess.run(
[self.sy_pred_mean2d_fac, self.sy_pred_var2d_fac],
feed_dict={self.sy_pred_in2d: inputs}
)
else:
return self.sess.run(
[self.sy_pred_mean2d, self.sy_pred_var2d],
feed_dict={self.sy_pred_in2d: inputs}
)
else:
return self.sess.run(
[self.sy_pred_mean3d_fac, self.sy_pred_var3d_fac],
feed_dict={self.sy_pred_in3d: inputs}
)
def create_prediction_tensors(self, inputs, factored=False, *args, **kwargs):
"""See predict() above for documentation.
"""
factored_mean, factored_variance = self._compile_outputs(inputs)
if inputs.shape.ndims == 2 and not factored:
mean = tf.reduce_mean(factored_mean, axis=0)
variance = tf.reduce_mean(tf.square(factored_mean - mean), axis=0) + \
tf.reduce_mean(factored_variance, axis=0)
return mean, variance
return factored_mean, factored_variance
def save_weights(self):
# Save network parameters (including scalers) in a .mat file
var_vals = {}
for i, var_val in enumerate(self.sess.run(self.nonoptvars + self.optvars)):
var_vals[str(i)] = var_val
savemat('weights/' + self.name + '.mat', var_vals)
def load_weights(self):
with self.sess.as_default():
params_dict = loadmat('weights/' + self.name + '.mat')
all_vars = self.nonoptvars + self.optvars
for i, var in enumerate(all_vars):
var.load(params_dict[str(i)])
def save(self, savedir, timestep):
"""Saves all information required to recreate this model in two files in savedir
(or self.model_dir if savedir is None), one containing the model structuure and the other
containing all variables in the network.
savedir (str): (Optional) Path to which files will be saved. If not provided, self.model_dir
(the directory provided at initialization) will be used.
"""
if not self.finalized:
raise RuntimeError()
model_dir = self.model_dir if savedir is None else savedir
# Write structure to file
with open(os.path.join(model_dir, '{}_{}.nns'.format(self.name, timestep)), "w+") as f:
for layer in self.layers[:-1]:
f.write("%s\n" % repr(layer))
last_layer_copy = self.layers[-1].copy()
last_layer_copy.set_activation(self.end_act_name)
last_layer_copy.set_output_dim(last_layer_copy.get_output_dim() // 2)
f.write("%s\n" % repr(last_layer_copy))
# Save network parameters (including scalers) in a .mat file
var_vals = {}
for i, var_val in enumerate(self.sess.run(self.nonoptvars + self.optvars)):
var_vals[str(i)] = var_val
savemat(os.path.join(model_dir, '{}_{}.mat'.format(self.name, timestep)), var_vals)
def _load_structure(self):
"""Uses the saved structure in self.model_dir with the name of this network to initialize
the structure of this network.
"""
structure = []
with open(os.path.join(self.model_dir, "%s.nns" % self.name), "r") as f:
for line in f:
kwargs = {
key: val for (key, val) in
[argval.split("=") for argval in line[3:-2].split(", ")]
}
kwargs["input_dim"] = int(kwargs["input_dim"])
kwargs["output_dim"] = int(kwargs["output_dim"])
kwargs["weight_decay"] = None if kwargs["weight_decay"] == "None" else float(kwargs["weight_decay"])
kwargs["activation"] = None if kwargs["activation"] == "None" else kwargs["activation"][1:-1]
kwargs["ensemble_size"] = int(kwargs["ensemble_size"])
structure.append(FC(**kwargs))
self.layers = structure
#######################
# Compilation methods #
#######################
def _compile_outputs(self, inputs, ret_log_var=False):
"""Compiles the output of the network at the given inputs.
If inputs is 2D, returns a 3D tensor where output[i] is the output of the ith network in the ensemble.
If inputs is 3D, returns a 3D tensor where output[i] is the output of the ith network on the ith input matrix.
Arguments:
inputs: (tf.Tensor) A tensor representing the inputs to the network
ret_log_var: (bool) If True, returns the log variance instead of the variance.
Returns: (tf.Tensors) The mean and variance/log variance predictions at inputs for each network
in the ensemble.
"""
dim_output = self.layers[-1].get_output_dim()
cur_out = self.scaler.transform(inputs)
for layer in self.layers:
cur_out = layer.compute_output_tensor(cur_out)
mean = cur_out[:, :, :dim_output // 2]
if self.end_act is not None:
mean = self.end_act(mean)
logvar = self.max_logvar - tf.nn.softplus(self.max_logvar - cur_out[:, :, dim_output // 2:])
logvar = self.min_logvar + tf.nn.softplus(logvar - self.min_logvar)
if ret_log_var:
return mean, logvar
else:
return mean, tf.exp(logvar)
def _compile_losses(self, inputs, targets, inc_var_loss=True):
"""Helper method for compiling the loss function.
The loss function is obtained from the log likelihood, assuming that the output
distribution is Gaussian, with both mean and (diagonal) covariance matrix being determined
by network outputs.
Arguments:
inputs: (tf.Tensor) A tensor representing the input batch
targets: (tf.Tensor) The desired targets for each input vector in inputs.
inc_var_loss: (bool) If True, includes log variance loss.
Returns: (tf.Tensor) A tensor representing the loss on the input arguments.
"""
mean, log_var = self._compile_outputs(inputs, ret_log_var=True)
inv_var = tf.exp(-log_var)
if inc_var_loss:
mse_losses = tf.reduce_mean(tf.reduce_mean(tf.square(mean - targets) * inv_var, axis=-1), axis=-1)
var_losses = tf.reduce_mean(tf.reduce_mean(log_var, axis=-1), axis=-1)
total_losses = mse_losses + var_losses
else:
total_losses = tf.reduce_mean(tf.reduce_mean(tf.square(mean - targets), axis=-1), axis=-1)
return total_losses | 45.379015 | 156 | 0.574014 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import time
import pdb
import itertools
from collections import OrderedDict
import tensorflow as tf
import numpy as np
from tqdm import trange
from scipy.io import savemat, loadmat
from mbcd.models.utils import get_required_argument, TensorStandardScaler
from mbcd.models.fc import FC
from mbcd.utils.logger import Progress, Silent
from sklearn.preprocessing import PolynomialFeatures
np.set_printoptions(precision=4)
class BNN:
def __init__(self, params):
self.name = get_required_argument(params, 'name', 'Must provide name.')
self.model_dir = params.get('model_dir', None)
print('[ BNN ] Initializing model: {} | {} networks | {} elites'.format(params['name'], params['num_networks'],
params['num_elites']))
if params.get('sess', None) is None:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self._sess = tf.Session(config=config)
else:
self._sess = params.get('sess')
self.finalized = False
self.layers, self.max_logvar, self.min_logvar = [], None, None
self.decays, self.optvars, self.nonoptvars = [], [], []
self.end_act, self.end_act_name = None, None
self.scaler = None
self.train_loss = None
self.accum_grads = None
self.zero_ops = None
self.accum_ops = None
self.scale_coeff = None
self.learning_rate = None
self.optimizer = None
self.sy_train_in, self.sy_train_targ = None, None
self.grads, self.graph_vars = None, None
self.train_op, self.train_op_rescaled, self.mse_loss = None, None, None
self.sy_pred_in2d, self.sy_pred_mean2d_fac, self.sy_pred_var2d_fac = None, None, None
self.sy_pred_mean2d, self.sy_pred_var2d = None, None
self.sy_pred_in3d, self.sy_pred_mean3d_fac, self.sy_pred_var3d_fac = None, None, None
if params.get('load_model', False):
if self.model_dir is None:
raise ValueError("Cannot load model without providing model directory.")
self._load_structure()
self.num_nets, self.model_loaded = self.layers[0].get_ensemble_size(), True
print("Model loaded from %s." % self.model_dir)
self.num_elites = params['num_elites']
else:
self.num_nets = params.get('num_networks', 1)
self.num_elites = params['num_elites']
self.model_loaded = False
if self.num_nets == 1:
print("Created a neural network with variance predictions.")
else:
print(
"Created an ensemble of {} neural networks with variance predictions | Elites: {}".format(self.num_nets,
self.num_elites))
self._model_inds = [i for i in range(self.num_nets)]
@property
def is_probabilistic(self):
return True
@property
def is_tf_model(self):
return True
@property
def sess(self):
return self._sess
ze a network once.")
optimizer_args = {} if optimizer_args is None else optimizer_args
self.learning_rate = optimizer_args.get("learning_rate")
self.optimizer = optimizer(**optimizer_args)
self.layers[-1].set_output_dim(2 * self.layers[-1].get_output_dim())
self.end_act = self.layers[-1].get_activation()
self.end_act_name = self.layers[-1].get_activation(as_func=False)
self.layers[-1].unset_activation()
with self.sess.as_default():
with tf.variable_scope(self.name):
self.scaler = TensorStandardScaler(self.name, self.layers[0].get_input_dim())
self.max_logvar = tf.Variable(np.ones([1, self.layers[-1].get_output_dim() // 2]) / 2.,
dtype=tf.float32,
name="max_log_var")
self.min_logvar = tf.Variable(-np.ones([1, self.layers[-1].get_output_dim() // 2]) * 10.,
dtype=tf.float32,
name="min_log_var")
for i, layer in enumerate(self.layers):
with tf.variable_scope(self.name + "Layer%i" % i):
layer.construct_vars()
self.decays.extend(layer.get_decays())
self.optvars.extend(layer.get_vars())
self.optvars.extend([self.max_logvar, self.min_logvar])
self.nonoptvars.extend(self.scaler.get_vars())
with tf.variable_scope(self.name):
self.optimizer = optimizer(**optimizer_args)
self.sy_train_in = tf.placeholder(dtype=tf.float32,
shape=[self.num_nets, None, self.layers[0].get_input_dim()],
name="training_inputs")
self.sy_train_targ = tf.placeholder(dtype=tf.float32,
shape=[self.num_nets, None, self.layers[-1].get_output_dim() // 2],
name="training_targets")
self.train_loss = tf.reduce_sum(
self._compile_losses(self.sy_train_in, self.sy_train_targ, inc_var_loss=True))
self.train_loss += tf.add_n(self.decays)
self.train_loss += 0.01 * tf.reduce_sum(self.max_logvar) - 0.01 * tf.reduce_sum(self.min_logvar)
self.mse_loss = self._compile_losses(self.sy_train_in, self.sy_train_targ, inc_var_loss=False)
self.train_op = self.optimizer.minimize(self.train_loss, var_list=self.optvars)
self.grads, self.graph_vars = zip(*self.optimizer.compute_gradients(self.train_loss, var_list=self.optvars))
self.scale_coeff = tf.placeholder(tf.float32)
self.accum_grads = [tf.Variable(tf.zeros_like(t_var.initialized_value()), trainable=False) for t_var in
self.optvars]
print("OPTVARS: {}".format(self.optvars))
print("TRAINABLEVARS: {}".format(tf.trainable_variables()))
self.zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in self.accum_grads]
self.accum_ops = [accum_grad.assign_add(grad * self.scale_coeff) for (accum_grad, grad) in
zip(self.accum_grads, self.grads)]
self.train_op_rescaled = self.optimizer.apply_gradients(zip(self.accum_grads, self.graph_vars))
self.sess.run(tf.global_variables_initializer())
with tf.variable_scope(self.name):
self.sy_pred_in2d = tf.placeholder(dtype=tf.float32,
shape=[None, self.layers[0].get_input_dim()],
name="2D_training_inputs")
self.sy_pred_mean2d_fac, self.sy_pred_var2d_fac = self.create_prediction_tensors(self.sy_pred_in2d,
factored=True)
self.sy_pred_mean2d = tf.reduce_mean(self.sy_pred_mean2d_fac, axis=0)
self.sy_pred_var2d = tf.reduce_mean(self.sy_pred_var2d_fac, axis=0) + tf.reduce_mean(
tf.square(self.sy_pred_mean2d_fac - self.sy_pred_mean2d), axis=0)
self.sy_pred_in3d = tf.placeholder(dtype=tf.float32,
shape=[self.num_nets, None, self.layers[0].get_input_dim()],
name="3D_training_inputs")
self.sy_pred_mean3d_fac, self.sy_pred_var3d_fac = self.create_prediction_tensors(self.sy_pred_in3d,
factored=True)
if self.model_loaded:
with self.sess.as_default():
params_dict = loadmat(os.path.join(self.model_dir, "%s.mat" % self.name))
all_vars = self.nonoptvars + self.optvars
for i, var in enumerate(all_vars):
var.load(params_dict[str(i)])
self.finalized = True
range(num_layers):
params = {key: np.stack([weights[net][layer][key] for net in range(self.num_nets)]) for key in keys}
ops.extend(self.layers[layer].set_model_vars(params))
self.sess.run(ops)
def _save_state(self, idx):
self._state[idx] = [layer.get_model_vars(idx, self.sess) for layer in self.layers]
def _set_state(self):
keys = ['weights', 'biases']
num_layers = len(self.layers)
for layer in range(num_layers):
params = {key: np.stack([self._state[net][layer][key] for net in range(self.num_nets)]) for key in keys}
self.layers[layer].set_model_vars(params, self.sess)
def _save_best(self, epoch, holdout_losses):
updated = False
for i in range(len(holdout_losses)):
current = holdout_losses[i]
_, best = self._snapshots[i]
improvement = (best - current) / best
if improvement > 0.01:
self._snapshots[i] = (epoch, current)
self._save_state(i)
updated = True
improvement = (best - current) / best
if updated:
self._epochs_since_update = 0
else:
self._epochs_since_update += 1
if self._epochs_since_update > self._max_epochs_since_update:
return True
else:
return False
def _start_train(self):
self._state = {}
self._snapshots = {i: (None, 1e10) for i in range(self.num_nets)}
self._epochs_since_update = 0
def _end_train(self, holdout_losses):
sorted_inds = np.argsort(holdout_losses)
self._model_inds = sorted_inds[:self.num_elites].tolist()
print('Using {} / {} models: {}'.format(self.num_elites, self.num_nets, self._model_inds))
def random_inds(self, batch_size):
inds = np.random.choice(self._model_inds, size=batch_size)
return inds
def reset(self):
print('[ BNN ] Resetting model')
[layer.reset(self.sess) for layer in self.layers]
def validate(self, inputs, targets):
inputs = np.tile(inputs[None], [self.num_nets, 1, 1])
targets = np.tile(targets[None], [self.num_nets, 1, 1])
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: inputs,
self.sy_train_targ: targets
}
)
mean_elite_loss = np.sort(losses)[:self.num_elites].mean()
return mean_elite_loss
._max_epochs_since_update = max_epochs_since_update
self._start_train()
break_train = False
def shuffle_rows(arr):
idxs = np.argsort(np.random.uniform(size=arr.shape), axis=-1)
return arr[np.arange(arr.shape[0])[:, None], idxs]
with self.sess.as_default():
self.scaler.fit(inputs)
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
permutation = np.random.permutation(inputs.shape[0])
inputs, holdout_inputs = inputs[permutation[num_holdout:]], inputs[permutation[:num_holdout]]
targets, holdout_targets = targets[permutation[num_holdout:]], targets[permutation[:num_holdout]]
holdout_inputs = np.tile(holdout_inputs[None], [self.num_nets, 1, 1])
holdout_targets = np.tile(holdout_targets[None], [self.num_nets, 1, 1])
print('\n[ BNN ] Training {} | Holdout: {}'.format(inputs.shape, holdout_inputs.shape))
idxs = np.random.randint(inputs.shape[0], size=[self.num_nets, inputs.shape[0]])
if hide_progress:
progress = Silent()
else:
progress = Progress(max_epochs)
if max_epochs:
epoch_iter = range(max_epochs)
else:
epoch_iter = itertools.count()
t0 = time.time()
grad_updates = 0
for epoch in epoch_iter:
for batch_num in range(int(np.ceil(idxs.shape[-1] / batch_size))):
batch_idxs = idxs[:, batch_num * batch_size:(batch_num + 1) * batch_size]
self.sess.run(
self.train_op,
feed_dict={self.sy_train_in: inputs[batch_idxs],
self.sy_train_targ: targets[batch_idxs],
self.learning_rate: 0.001}
)
grad_updates += 1
idxs = shuffle_rows(idxs)
if not hide_progress:
if holdout_ratio < 1e-12:
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: inputs[idxs[:, :max_logging]],
self.sy_train_targ: targets[idxs[:, :max_logging]]
}
)
named_losses = [['M{}'.format(i), losses[i]] for i in range(len(losses))]
progress.set_description(named_losses)
else:
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: inputs[idxs[:, :max_logging]],
self.sy_train_targ: targets[idxs[:, :max_logging]]
}
)
holdout_losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: holdout_inputs,
self.sy_train_targ: holdout_targets
}
)
named_losses = [['M{}'.format(i), losses[i]] for i in range(len(losses))]
named_holdout_losses = [['V{}'.format(i), holdout_losses[i]] for i in
range(len(holdout_losses))]
named_losses = named_losses + named_holdout_losses + [['T', time.time() - t0]]
progress.set_description(named_losses)
break_train = self._save_best(epoch, holdout_losses)
progress.update()
t = time.time() - t0
if break_train or (max_grad_updates and grad_updates > max_grad_updates):
break
if max_t and t > max_t:
descr = 'Breaking because of timeout: {}! (max: {})'.format(t, max_t)
progress.append_description(descr)
break
print("Trained with normal for {} epochs".format(epoch + 1))
if holdout_ratio > 0:
progress.stamp()
if timer: timer.stamp('bnn_train')
self._set_state()
if timer: timer.stamp('bnn_set_state')
holdout_losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: holdout_inputs,
self.sy_train_targ: holdout_targets
}
)
if timer: timer.stamp('bnn_holdout')
self._end_train(holdout_losses)
if timer: timer.stamp('bnn_end')
val_loss = (np.sort(holdout_losses)[:self.num_elites]).mean()
model_metrics = {'val_loss': val_loss}
print('[ BNN ] Holdout', np.sort(holdout_losses), model_metrics, '\n')
return OrderedDict(model_metrics)
else:
self._model_inds = [0, 1, 2, 3, 4]
def generate_grad_coeff_poly(self, num_batch=40, poly_grade=4):
poly = PolynomialFeatures(poly_grade)
scale = 1
x = np.linspace(0, num_batch * scale, num=num_batch)
phi = poly.fit_transform(x[:, np.newaxis])
proto_H = np.matmul(np.linalg.inv(np.matmul(phi.transpose(), phi)), phi.transpose())
fut_step = np.array([num_batch])[None]
grad = np.matmul(poly.fit_transform(fut_step), proto_H)
return grad
def exp_basis(self, x):
return np.array([1, np.power(1.1, x)])
def generate_grad_coeff_exp(self, num_batch=40):
x = np.linspace(0, num_batch * 1.15, num=num_batch)
basis_dim = self.exp_basis(1).shape[0]
phi = np.ones([num_batch, basis_dim])
for idx in range(num_batch):
phi[idx] = self.exp_basis(x[idx])
proto_H = np.matmul(np.linalg.inv(np.matmul(phi.transpose(), phi)), phi.transpose())
fut_step_t = self.exp_basis(num_batch)
grad = np.matmul(fut_step_t, proto_H)
return grad
def train_modified_holdout(self, inputs, targets,
batch_size=32, max_epochs=None, max_epochs_since_update=5,
hide_progress=False, holdout_ratio=0.0, max_logging=5000,
max_grad_updates=None, timer=None, max_t=None, lr=0.001):
self._max_epochs_since_update = max_epochs_since_update
self._start_train()
break_train = False
def shuffle_rows(arr):
idxs = np.argsort(np.random.uniform(size=arr.shape), axis=-1)
return arr[np.arange(arr.shape[0])[:, None], idxs]
with self.sess.as_default():
self.scaler.fit(inputs)
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
batch_size))
scale_coeff = self.generate_grad_coeff_poly(num_batch=total_num_batch, poly_grade=1)
sampling_coeff = np.repeat(scale_coeff, batch_size)
for i in range(sampling_coeff.shape[0]):
if sampling_coeff[i] < 0:
sampling_coeff[i] = 0
sampling_coeff = sampling_coeff / np.sum(sampling_coeff)
print("Sampling coeff sum: {}".format(np.sum(sampling_coeff)))
idx_holdout = np.empty([num_holdout], dtype=int)
idxs_i = np.arange(inputs.shape[0], dtype=int)[-total_num_batch * batch_size:]
for s in range(num_holdout):
idx_holdout[s] = np.random.choice(a=idxs_i, p=sampling_coeff)
holdout_inputs = inputs[idx_holdout]
holdout_inputs = np.tile(holdout_inputs[None], [self.num_nets, 1, 1])
holdout_targets = targets[idx_holdout]
holdout_targets = np.tile(holdout_targets[None], [self.num_nets, 1, 1])
train_inputs = np.delete(inputs, idx_holdout, axis=0)
train_targets = np.delete(targets, idx_holdout, axis=0)
train_num_batch = int(np.floor(train_inputs.shape[0] / batch_size))
train_inputs = train_inputs[-train_num_batch * batch_size:]
train_targets = train_targets[-train_num_batch * batch_size:]
train_inputs_b = np.array(np.split(train_inputs, train_num_batch))
train_targets_b = np.array(np.split(train_targets, train_num_batch))
NN ] Training {} | Holdout: {}'.format(train_inputs.shape, holdout_inputs.shape))
if hide_progress:
progress = Silent()
else:
progress = Progress(max_epochs)
if max_epochs:
epoch_iter = range(max_epochs)
else:
epoch_iter = itertools.count()
h_iter:
print("Modified - Epoch: {}".format(epoch))
for batch_num in range(train_num_batch):
curr_train_inputs = train_inputs_b[train_num_batch - batch_num - 1]
curr_train_targets = train_targets_b[train_num_batch - batch_num - 1]
curr_idxs = np.arange(batch_size)
np.random.shuffle(curr_idxs)
curr_train_inputs = curr_train_inputs[curr_idxs]
curr_train_targets = curr_train_targets[curr_idxs]
curr_train_inputs = [curr_train_inputs for _ in range(self.num_nets)]
curr_train_targets = [curr_train_targets for _ in range(self.num_nets)]
_, _loss = self.sess.run(
(self.accum_ops, self.train_loss),
feed_dict={self.sy_train_in: curr_train_inputs,
self.sy_train_targ: curr_train_targets,
self.scale_coeff: scale_coeff.item(total_num_batch - batch_num - 1)}
)
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: curr_train_inputs,
self.sy_train_targ: curr_train_targets
}
)
grad_updates += 1
self.sess.run(self.train_op_rescaled, feed_dict={self.learning_rate: lr})
self.sess.run(self.zero_ops)
if not hide_progress:
if holdout_ratio < 1e-12:
if train_inputs.shape[0] > max_logging:
train_inputs_loss = train_inputs[-max_logging:]
train_targets_loss = train_targets[-max_logging:]
else:
train_inputs_loss = train_inputs
train_targets_loss = train_targets
train_inputs_loss = [train_inputs_loss for _ in range(self.num_nets)]
train_targets_loss = [train_targets_loss for _ in range(self.num_nets)]
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: train_inputs_loss,
self.sy_train_targ: train_targets_loss
}
)
named_losses = [['M{}'.format(i), losses[i]] for i in range(len(losses))]
progress.set_description(named_losses)
else:
if train_inputs.shape[0] > max_logging:
train_inputs_loss = train_inputs[-max_logging:]
train_targets_loss = train_targets[-max_logging:]
else:
train_inputs_loss = train_inputs
train_targets_loss = train_targets
train_inputs_loss = [train_inputs_loss for _ in range(self.num_nets)]
train_targets_loss = [train_targets_loss for _ in range(self.num_nets)]
losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: train_inputs_loss,
self.sy_train_targ: train_targets_loss
}
)
holdout_losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: holdout_inputs,
self.sy_train_targ: holdout_targets
}
)
named_losses = [['M{}'.format(i), losses[i]] for i in range(len(losses))]
named_holdout_losses = [['V{}'.format(i), holdout_losses[i]] for i in
range(len(holdout_losses))]
named_losses = named_losses + named_holdout_losses + [['T', time.time() - t0]]
progress.set_description(named_losses)
break_train = self._save_best(epoch, holdout_losses)
progress.update()
t = time.time() - t0
if break_train or (max_grad_updates and grad_updates > max_grad_updates):
break
if max_t and t > max_t:
descr = 'Breaking because of timeout: {}! (max: {})'.format(t, max_t)
progress.append_description(descr)
break
print("Trained with modified for {} epochs".format(epoch+1))
if holdout_ratio > 0:
progress.stamp()
if timer: timer.stamp('bnn_train')
self._set_state()
if timer: timer.stamp('bnn_set_state')
holdout_losses = self.sess.run(
self.mse_loss,
feed_dict={
self.sy_train_in: holdout_inputs,
self.sy_train_targ: holdout_targets
}
)
if timer: timer.stamp('bnn_holdout')
self._end_train(holdout_losses)
if timer: timer.stamp('bnn_end')
val_loss = (np.sort(holdout_losses)[:self.num_elites]).mean()
model_metrics = {'val_loss': val_loss}
print('[ BNN ] Holdout', np.sort(holdout_losses), model_metrics)
return OrderedDict(model_metrics)
else:
self._model_inds = [0, 1, 2, 3, 4]
def predict(self, inputs, factored=False, *args, **kwargs):
if len(inputs.shape) == 2:
if factored:
return self.sess.run(
[self.sy_pred_mean2d_fac, self.sy_pred_var2d_fac],
feed_dict={self.sy_pred_in2d: inputs}
)
else:
return self.sess.run(
[self.sy_pred_mean2d, self.sy_pred_var2d],
feed_dict={self.sy_pred_in2d: inputs}
)
else:
return self.sess.run(
[self.sy_pred_mean3d_fac, self.sy_pred_var3d_fac],
feed_dict={self.sy_pred_in3d: inputs}
)
def create_prediction_tensors(self, inputs, factored=False, *args, **kwargs):
factored_mean, factored_variance = self._compile_outputs(inputs)
if inputs.shape.ndims == 2 and not factored:
mean = tf.reduce_mean(factored_mean, axis=0)
variance = tf.reduce_mean(tf.square(factored_mean - mean), axis=0) + \
tf.reduce_mean(factored_variance, axis=0)
return mean, variance
return factored_mean, factored_variance
def save_weights(self):
var_vals = {}
for i, var_val in enumerate(self.sess.run(self.nonoptvars + self.optvars)):
var_vals[str(i)] = var_val
savemat('weights/' + self.name + '.mat', var_vals)
def load_weights(self):
with self.sess.as_default():
params_dict = loadmat('weights/' + self.name + '.mat')
all_vars = self.nonoptvars + self.optvars
for i, var in enumerate(all_vars):
var.load(params_dict[str(i)])
def save(self, savedir, timestep):
if not self.finalized:
raise RuntimeError()
model_dir = self.model_dir if savedir is None else savedir
with open(os.path.join(model_dir, '{}_{}.nns'.format(self.name, timestep)), "w+") as f:
for layer in self.layers[:-1]:
f.write("%s\n" % repr(layer))
last_layer_copy = self.layers[-1].copy()
last_layer_copy.set_activation(self.end_act_name)
last_layer_copy.set_output_dim(last_layer_copy.get_output_dim() // 2)
f.write("%s\n" % repr(last_layer_copy))
var_vals = {}
for i, var_val in enumerate(self.sess.run(self.nonoptvars + self.optvars)):
var_vals[str(i)] = var_val
savemat(os.path.join(model_dir, '{}_{}.mat'.format(self.name, timestep)), var_vals)
def _load_structure(self):
structure = []
with open(os.path.join(self.model_dir, "%s.nns" % self.name), "r") as f:
for line in f:
kwargs = {
key: val for (key, val) in
[argval.split("=") for argval in line[3:-2].split(", ")]
}
kwargs["input_dim"] = int(kwargs["input_dim"])
kwargs["output_dim"] = int(kwargs["output_dim"])
kwargs["weight_decay"] = None if kwargs["weight_decay"] == "None" else float(kwargs["weight_decay"])
kwargs["activation"] = None if kwargs["activation"] == "None" else kwargs["activation"][1:-1]
kwargs["ensemble_size"] = int(kwargs["ensemble_size"])
structure.append(FC(**kwargs))
self.layers = structure
self.min_logvar + tf.nn.softplus(logvar - self.min_logvar)
if ret_log_var:
return mean, logvar
else:
return mean, tf.exp(logvar)
def _compile_losses(self, inputs, targets, inc_var_loss=True):
mean, log_var = self._compile_outputs(inputs, ret_log_var=True)
inv_var = tf.exp(-log_var)
if inc_var_loss:
mse_losses = tf.reduce_mean(tf.reduce_mean(tf.square(mean - targets) * inv_var, axis=-1), axis=-1)
var_losses = tf.reduce_mean(tf.reduce_mean(log_var, axis=-1), axis=-1)
total_losses = mse_losses + var_losses
else:
total_losses = tf.reduce_mean(tf.reduce_mean(tf.square(mean - targets), axis=-1), axis=-1)
return total_losses | true | true |
f7f5cd78a8cb2d9936f4fdce573d1f7f372206f9 | 108 | py | Python | health-insurance-lead-prediction/src/utils/__init__.py | sudhirln92/analytics-vidhya-competitions | 2e284f1ae7e3ddd4d810714db0b8eca5770756ee | [
"MIT"
] | null | null | null | health-insurance-lead-prediction/src/utils/__init__.py | sudhirln92/analytics-vidhya-competitions | 2e284f1ae7e3ddd4d810714db0b8eca5770756ee | [
"MIT"
] | null | null | null | health-insurance-lead-prediction/src/utils/__init__.py | sudhirln92/analytics-vidhya-competitions | 2e284f1ae7e3ddd4d810714db0b8eca5770756ee | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 28 09:13:14 2021
@author: sudhir
""" | 13.5 | 35 | 0.601852 | true | true | |
f7f5ce55521d23880ed708056861d4c9b06247ba | 1,990 | py | Python | indicators/support_resistance.py | LucienZuber/AlgoTrader | cc6088a525ed6311c9a6969880c739b91bdebf5c | [
"MIT"
] | null | null | null | indicators/support_resistance.py | LucienZuber/AlgoTrader | cc6088a525ed6311c9a6969880c739b91bdebf5c | [
"MIT"
] | null | null | null | indicators/support_resistance.py | LucienZuber/AlgoTrader | cc6088a525ed6311c9a6969880c739b91bdebf5c | [
"MIT"
] | null | null | null | import pandas as pd
import datetime
def detect_support_resistances(df: pd.DataFrame, initial_state: int = 20, precision: int = 3, expiration_time: datetime.timedelta = datetime.timedelta(days=7)):
initial_min = df['Close'].iloc[:initial_state].idxmin()
initial_max = df['Close'].iloc[:initial_state].idxmax()
supports=pd.DataFrame({'Support': df['Close'].loc[initial_min]}, index=[initial_min])
resistances=pd.DataFrame({'Resistance': df['Close'].loc[initial_max]}, index=[initial_max])
expiration_support = initial_min + expiration_time
expiration_resistance = initial_max + expiration_time
for (index, close_price) in enumerate(df['Close'].iloc[initial_state:], initial_state):
index_datetime = df.index[index]
latest_min_index = df['Close'].iloc[index-precision:index].idxmin()
latest_min_value = df['Close'].iloc[index-precision:index].min()
latest_max_index = df['Close'].iloc[index-precision:index].idxmax()
latest_max_value = df['Close'].iloc[index-precision:index].max()
if (expiration_support <= index_datetime):
supports.loc[latest_min_index] = latest_min_value
expiration_support = latest_min_index + expiration_time
elif (expiration_resistance <= index_datetime):
resistances.loc[latest_max_index] =latest_max_value
expiration_resistance = latest_min_index + expiration_time
elif (latest_max_value < supports['Support'].iloc[-1]):
supports.loc[latest_min_index] = latest_min_value
expiration_support = latest_min_index + expiration_time
elif (latest_min_value > resistances['Resistance'].iloc[-1]):
resistances.loc[latest_max_index] = latest_max_value
expiration_resistance = latest_min_index + expiration_time
supports = supports.reindex(df.index.values)
resistances = resistances.reindex(df.index.values)
result = supports.join(resistances)
return result | 48.536585 | 160 | 0.711055 | import pandas as pd
import datetime
def detect_support_resistances(df: pd.DataFrame, initial_state: int = 20, precision: int = 3, expiration_time: datetime.timedelta = datetime.timedelta(days=7)):
initial_min = df['Close'].iloc[:initial_state].idxmin()
initial_max = df['Close'].iloc[:initial_state].idxmax()
supports=pd.DataFrame({'Support': df['Close'].loc[initial_min]}, index=[initial_min])
resistances=pd.DataFrame({'Resistance': df['Close'].loc[initial_max]}, index=[initial_max])
expiration_support = initial_min + expiration_time
expiration_resistance = initial_max + expiration_time
for (index, close_price) in enumerate(df['Close'].iloc[initial_state:], initial_state):
index_datetime = df.index[index]
latest_min_index = df['Close'].iloc[index-precision:index].idxmin()
latest_min_value = df['Close'].iloc[index-precision:index].min()
latest_max_index = df['Close'].iloc[index-precision:index].idxmax()
latest_max_value = df['Close'].iloc[index-precision:index].max()
if (expiration_support <= index_datetime):
supports.loc[latest_min_index] = latest_min_value
expiration_support = latest_min_index + expiration_time
elif (expiration_resistance <= index_datetime):
resistances.loc[latest_max_index] =latest_max_value
expiration_resistance = latest_min_index + expiration_time
elif (latest_max_value < supports['Support'].iloc[-1]):
supports.loc[latest_min_index] = latest_min_value
expiration_support = latest_min_index + expiration_time
elif (latest_min_value > resistances['Resistance'].iloc[-1]):
resistances.loc[latest_max_index] = latest_max_value
expiration_resistance = latest_min_index + expiration_time
supports = supports.reindex(df.index.values)
resistances = resistances.reindex(df.index.values)
result = supports.join(resistances)
return result | true | true |
f7f5ce7b86e61819247ebe7b6e7895b847a6dc64 | 7,329 | py | Python | doc/conf.py | akornatskyy/wheezy.web | 417834db697cf1f78f3a60cc880b9fd25d40c6de | [
"MIT"
] | 17 | 2020-08-29T18:45:51.000Z | 2022-03-02T19:37:13.000Z | doc/conf.py | akornatskyy/wheezy.web | 417834db697cf1f78f3a60cc880b9fd25d40c6de | [
"MIT"
] | 29 | 2020-07-18T04:34:03.000Z | 2021-07-06T09:42:36.000Z | doc/conf.py | akornatskyy/wheezy.web | 417834db697cf1f78f3a60cc880b9fd25d40c6de | [
"MIT"
] | 1 | 2022-03-14T08:41:42.000Z | 2022-03-14T08:41:42.000Z | # -*- coding: utf-8 -*-
#
# wheezy.web documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 9 20:36:50 2011.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.extend([
os.path.abspath(os.path.join('..', 'src'))
])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.coverage', 'sphinx.ext.viewcode',
'sphinx_rtd_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'wheezy.web'
copyright = '2011, Andriy Kornatskyy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'latest'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'colorful'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# The style sheet to use for HTML pages.
# html_style = 'style.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# '<project> v<release> documentation'.
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named 'default.css' will overwrite the builtin 'default.css'.
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, 'Created using Sphinx' is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, '(C) Copyright ...' is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. '.xhtml').
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wheezy.webdoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'wheezy.web.tex', 'wheezy.web documentation',
'Andriy Kornatskyy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top
# of
# the title page.
# latex_logo = None
# For 'manual' documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wheezy.web', 'wheezy.web documentation',
['Andriy Kornatskyy'], 1)
]
| 31.454936 | 79 | 0.713331 |
import os
import sys
sys.path.extend([
os.path.abspath(os.path.join('..', 'src'))
])
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.coverage', 'sphinx.ext.viewcode',
'sphinx_rtd_theme'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'wheezy.web'
copyright = '2011, Andriy Kornatskyy'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'latest'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'colorful'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# The style sheet to use for HTML pages.
# html_style = 'style.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# '<project> v<release> documentation'.
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named 'default.css' will overwrite the builtin 'default.css'.
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, 'Created using Sphinx' is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, '(C) Copyright ...' is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. '.xhtml').
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wheezy.webdoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'wheezy.web.tex', 'wheezy.web documentation',
'Andriy Kornatskyy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top
# of
# the title page.
# latex_logo = None
# For 'manual' documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wheezy.web', 'wheezy.web documentation',
['Andriy Kornatskyy'], 1)
]
| true | true |
f7f5d07ba2509e0dc988d96fc69ac203ba9eecd9 | 5,394 | py | Python | tests/operator/test_augmented_arithmetic_assignment.py | maggyero/desugar | fd283d4e93d4dbae385577627051094460b74fc8 | [
"MIT"
] | 109 | 2020-07-19T20:18:51.000Z | 2022-03-14T19:03:48.000Z | tests/operator/test_augmented_arithmetic_assignment.py | maggyero/desugar | fd283d4e93d4dbae385577627051094460b74fc8 | [
"MIT"
] | 7 | 2020-08-18T14:20:36.000Z | 2022-02-08T18:16:42.000Z | tests/operator/test_augmented_arithmetic_assignment.py | maggyero/desugar | fd283d4e93d4dbae385577627051094460b74fc8 | [
"MIT"
] | 4 | 2020-10-05T20:27:21.000Z | 2021-09-02T19:27:16.000Z | import operator
import sys
import pytest
import desugar.operator
from . import common
class AugmentedAssignmentTests:
"""Tests for augmented arithmetic assignment.
Subclasses are expected to provide the actual assignment to test.
"""
def test_inplace(self, op):
"""Providing __i*__ should work."""
assert op(common.Lvalue(), object()) == self.lvalue_method
def test_lhs_fallback(self, op):
"""If __i*__ is not defined, fallback to __*__."""
assert op(common.LHS(), object()) == self.lhs_method
def test_lhs_fallback_from_not_implemented(self, op):
"""If __i*__ returned NotImplemented fall back to __*__."""
# https://bugs.python.org/issue38302
if sys.version_info[:2] < (3, 10) and op.__name__ == "ipow":
pytest.skip("CPython's **= implementation does not call __pow__")
lvalue = common.LvalueNotImplementedLHS()
result = op(lvalue, object())
assert result == self.lhs_method
assert lvalue.icalled == 1
def test_rhs_fallback(self, op):
"""If __i*__ and __*__ are not defined, fallback to __r*__."""
assert op(object(), common.RHS()) == self.rhs_method
def test_no_methods(self, op):
"""TypeError is raised if no appropriate methods are available."""
with pytest.raises(TypeError):
op(object(), object())
def test_all_not_implemented(self, op):
"""TypeError is raised if all appropriate methods return NotImplemented.
When the LHS and RHS are the same type then only __i*__ and __*__ are
called.
"""
lvalue = common.LvalueLHSRHSNotImplemented()
rvalue = common.LvalueLHSRHSNotImplemented()
with pytest.raises(TypeError):
op(lvalue, rvalue)
assert lvalue.icalled == 1
# https://bugs.python.org/issue38302
if sys.version_info[:2] < (3, 10) and op.__name__ == "ipow":
return
assert lvalue.called == 1
assert not lvalue.rcalled
assert not rvalue.icalled
assert not rvalue.called
assert not rvalue.rcalled
def test_inplace_when_others_not_implemented(self, op):
"""__i*__ used when __*__ and __r*__ return NotImplemented."""
op(common.LHSRHSNotImplementedLvalue(), object()) == self.lvalue_method
def test_function_name(self, op):
short_name = self.lhs_method[2:-2]
assert short_name in op.__name__
assert short_name in op.__qualname__
@pytest.mark.parametrize("op", [operator.iadd, desugar.operator.iadd])
class TestAdditionInPlace(AugmentedAssignmentTests):
lvalue_method = "__iadd__"
lhs_method = "__add__"
rhs_method = "__radd__"
@pytest.mark.parametrize("op", [operator.isub, desugar.operator.isub])
class TestSubtractionInPlace(AugmentedAssignmentTests):
lvalue_method = "__isub__"
lhs_method = "__sub__"
rhs_method = "__rsub__"
@pytest.mark.parametrize("op", [operator.imul, desugar.operator.imul])
class TestMultiplicationInPlace(AugmentedAssignmentTests):
lvalue_method = "__imul__"
lhs_method = "__mul__"
rhs_method = "__rmul__"
@pytest.mark.parametrize("op", [operator.imatmul, desugar.operator.imatmul])
class TestMatrixMultiplicationInPlace(AugmentedAssignmentTests):
lvalue_method = "__imatmul__"
lhs_method = "__matmul__"
rhs_method = "__rmatmul__"
@pytest.mark.parametrize("op", [operator.itruediv, desugar.operator.itruediv])
class TestTrueDivisionInPlace(AugmentedAssignmentTests):
lvalue_method = "__itruediv__"
lhs_method = "__truediv__"
rhs_method = "__rtruediv__"
@pytest.mark.parametrize("op", [operator.ifloordiv, desugar.operator.ifloordiv])
class TestFloorDivisionInPlace(AugmentedAssignmentTests):
lvalue_method = "__ifloordiv__"
lhs_method = "__floordiv__"
rhs_method = "__rfloordiv__"
@pytest.mark.parametrize("op", [operator.imod, desugar.operator.imod])
class TestModuloInPlace(AugmentedAssignmentTests):
lvalue_method = "__imod__"
lhs_method = "__mod__"
rhs_method = "__rmod__"
@pytest.mark.parametrize("op", [operator.ipow, desugar.operator.ipow])
class TestPowerInPlace(AugmentedAssignmentTests):
lvalue_method = "__ipow__"
lhs_method = "__pow__"
rhs_method = "__rpow__"
@pytest.mark.parametrize("op", [operator.ilshift, desugar.operator.ilshift])
class TestLeftShiftInPlace(AugmentedAssignmentTests):
lvalue_method = "__ilshift__"
lhs_method = "__lshift__"
rhs_method = "__rlshift__"
@pytest.mark.parametrize("op", [operator.irshift, desugar.operator.irshift])
class TestRightShiftInPlace(AugmentedAssignmentTests):
lvalue_method = "__irshift__"
lhs_method = "__rshift__"
rhs_method = "__rrshift__"
@pytest.mark.parametrize("op", [operator.iand, desugar.operator.iand])
class TestAndInPlace(AugmentedAssignmentTests):
lvalue_method = "__iand__"
lhs_method = "__and__"
rhs_method = "__rand__"
@pytest.mark.parametrize("op", [operator.ixor, desugar.operator.ixor])
class TestExclusiveOrInPlace(AugmentedAssignmentTests):
lvalue_method = "__ixor__"
lhs_method = "__xor__"
rhs_method = "__rxor__"
@pytest.mark.parametrize("op", [operator.ior, desugar.operator.ior])
class TestOrInPlace(AugmentedAssignmentTests):
lvalue_method = "__ior__"
lhs_method = "__or__"
rhs_method = "__ror__"
| 30.303371 | 80 | 0.702818 | import operator
import sys
import pytest
import desugar.operator
from . import common
class AugmentedAssignmentTests:
def test_inplace(self, op):
assert op(common.Lvalue(), object()) == self.lvalue_method
def test_lhs_fallback(self, op):
assert op(common.LHS(), object()) == self.lhs_method
def test_lhs_fallback_from_not_implemented(self, op):
if sys.version_info[:2] < (3, 10) and op.__name__ == "ipow":
pytest.skip("CPython's **= implementation does not call __pow__")
lvalue = common.LvalueNotImplementedLHS()
result = op(lvalue, object())
assert result == self.lhs_method
assert lvalue.icalled == 1
def test_rhs_fallback(self, op):
assert op(object(), common.RHS()) == self.rhs_method
def test_no_methods(self, op):
with pytest.raises(TypeError):
op(object(), object())
def test_all_not_implemented(self, op):
lvalue = common.LvalueLHSRHSNotImplemented()
rvalue = common.LvalueLHSRHSNotImplemented()
with pytest.raises(TypeError):
op(lvalue, rvalue)
assert lvalue.icalled == 1
# https://bugs.python.org/issue38302
if sys.version_info[:2] < (3, 10) and op.__name__ == "ipow":
return
assert lvalue.called == 1
assert not lvalue.rcalled
assert not rvalue.icalled
assert not rvalue.called
assert not rvalue.rcalled
def test_inplace_when_others_not_implemented(self, op):
op(common.LHSRHSNotImplementedLvalue(), object()) == self.lvalue_method
def test_function_name(self, op):
short_name = self.lhs_method[2:-2]
assert short_name in op.__name__
assert short_name in op.__qualname__
@pytest.mark.parametrize("op", [operator.iadd, desugar.operator.iadd])
class TestAdditionInPlace(AugmentedAssignmentTests):
lvalue_method = "__iadd__"
lhs_method = "__add__"
rhs_method = "__radd__"
@pytest.mark.parametrize("op", [operator.isub, desugar.operator.isub])
class TestSubtractionInPlace(AugmentedAssignmentTests):
lvalue_method = "__isub__"
lhs_method = "__sub__"
rhs_method = "__rsub__"
@pytest.mark.parametrize("op", [operator.imul, desugar.operator.imul])
class TestMultiplicationInPlace(AugmentedAssignmentTests):
lvalue_method = "__imul__"
lhs_method = "__mul__"
rhs_method = "__rmul__"
@pytest.mark.parametrize("op", [operator.imatmul, desugar.operator.imatmul])
class TestMatrixMultiplicationInPlace(AugmentedAssignmentTests):
lvalue_method = "__imatmul__"
lhs_method = "__matmul__"
rhs_method = "__rmatmul__"
@pytest.mark.parametrize("op", [operator.itruediv, desugar.operator.itruediv])
class TestTrueDivisionInPlace(AugmentedAssignmentTests):
lvalue_method = "__itruediv__"
lhs_method = "__truediv__"
rhs_method = "__rtruediv__"
@pytest.mark.parametrize("op", [operator.ifloordiv, desugar.operator.ifloordiv])
class TestFloorDivisionInPlace(AugmentedAssignmentTests):
lvalue_method = "__ifloordiv__"
lhs_method = "__floordiv__"
rhs_method = "__rfloordiv__"
@pytest.mark.parametrize("op", [operator.imod, desugar.operator.imod])
class TestModuloInPlace(AugmentedAssignmentTests):
lvalue_method = "__imod__"
lhs_method = "__mod__"
rhs_method = "__rmod__"
@pytest.mark.parametrize("op", [operator.ipow, desugar.operator.ipow])
class TestPowerInPlace(AugmentedAssignmentTests):
lvalue_method = "__ipow__"
lhs_method = "__pow__"
rhs_method = "__rpow__"
@pytest.mark.parametrize("op", [operator.ilshift, desugar.operator.ilshift])
class TestLeftShiftInPlace(AugmentedAssignmentTests):
lvalue_method = "__ilshift__"
lhs_method = "__lshift__"
rhs_method = "__rlshift__"
@pytest.mark.parametrize("op", [operator.irshift, desugar.operator.irshift])
class TestRightShiftInPlace(AugmentedAssignmentTests):
lvalue_method = "__irshift__"
lhs_method = "__rshift__"
rhs_method = "__rrshift__"
@pytest.mark.parametrize("op", [operator.iand, desugar.operator.iand])
class TestAndInPlace(AugmentedAssignmentTests):
lvalue_method = "__iand__"
lhs_method = "__and__"
rhs_method = "__rand__"
@pytest.mark.parametrize("op", [operator.ixor, desugar.operator.ixor])
class TestExclusiveOrInPlace(AugmentedAssignmentTests):
lvalue_method = "__ixor__"
lhs_method = "__xor__"
rhs_method = "__rxor__"
@pytest.mark.parametrize("op", [operator.ior, desugar.operator.ior])
class TestOrInPlace(AugmentedAssignmentTests):
lvalue_method = "__ior__"
lhs_method = "__or__"
rhs_method = "__ror__"
| true | true |
f7f5d0e61d891440989de0ef4bbda8ae9a7aa950 | 9,467 | py | Python | hyperlinker/app/Historical Backup Scripts/TRCExternalPrep.py | JAckleyLSNYC/GitPython | 93fa78346e85ec6b8d00c5b362d6b498598a0444 | [
"bzip2-1.0.6"
] | null | null | null | hyperlinker/app/Historical Backup Scripts/TRCExternalPrep.py | JAckleyLSNYC/GitPython | 93fa78346e85ec6b8d00c5b362d6b498598a0444 | [
"bzip2-1.0.6"
] | null | null | null | hyperlinker/app/Historical Backup Scripts/TRCExternalPrep.py | JAckleyLSNYC/GitPython | 93fa78346e85ec6b8d00c5b362d6b498598a0444 | [
"bzip2-1.0.6"
] | null | null | null | from flask import request, send_from_directory
from app import app, DataWizardTools, HousingToolBox
import pandas as pd
@app.route("/TRCExternalPrep", methods=['GET', 'POST'])
def TRCExternalPrep():
#upload file from computer via browser
if request.method == 'POST':
print(request.files['file'])
f = request.files['file']
#turn the excel file into a dataframe, but skip the top 2 rows if they are blank
test = pd.read_excel(f)
test.fillna('',inplace=True)
if test.iloc[0][0] == '':
df = pd.read_excel(f,skiprows=2)
else:
df = pd.read_excel(f)
#Remove Rows without Case ID values
df.fillna('',inplace = True)
df['Matter/Case ID#'] = df.apply(lambda x : DataWizardTools.RemoveNoCaseID(x['Matter/Case ID#']),axis=1)
df = df[df['Matter/Case ID#'] != 'No Case ID']
#Create Hyperlinks
df['Hyperlinked CaseID#'] = df.apply(lambda x : DataWizardTools.Hyperlinker(x['Matter/Case ID#']),axis=1)
###This is where all the functions happen:###
#Just direct mapping for new column names
df['first_name'] = df['Client First Name']
df['last_name'] = df['Client Last Name']
df['SSN'] = df['Social Security #']
df['PA_number'] = df['Gen Pub Assist Case Number']
df['DOB'] = df['Date of Birth']
df['num_adults'] = df['Number of People 18 and Over']
df['num_children'] = df['Number of People under 18']
df['Unit'] = df['Apt#/Suite#']
df['zip'] = df['Zip Code']
df['waiver_approval_date'] = df['Housing Date Of Waiver Approval']
df['waiver'] = df['Housing TRC HRA Waiver Categories']
df['rent'] = df['Housing Total Monthly Rent']
df['LT_index'] = df['Gen Case Index Number']
df['language'] = df['Language']
df['income'] = df['Total Annual Income ']
df['eligibility_date'] = df['HAL Eligibility Date']
df['DHCI'] = df['Housing Signed DHCI Form']
df['units_in_bldg'] = df['Housing Number Of Units In Building']
df['outcome_date'] = df['Housing Outcome Date']
#Append the 'LSNYC' prefix to the caseIDs we submit
df['id'] = 'LSNYC' + df['Matter/Case ID#']
#Turn our funding codes into HRA Program Names
#*for trc (3018 and 3011) everything is AHTP - more complicated for UA etc.
df['program_name'] = 'AHTP'
#Separate out street number from street name (based on first space)
df['street_number'] = df['Street Address'].str.split(' ').str[0]
df['Street'] = df['Street Address'].str.split(' ',1).str[1]
#If it is a case in Queens it will have neighborhood - change it to say Queens
df['city'] = df.apply(lambda x: DataWizardTools.QueensConsolidater(x['City']), axis=1)
#Translation based on HRA Specs
df['proceeding'] = df.apply(lambda x: HousingToolBox.ProceedingType(x['Housing Type Of Case']), axis=1)
#if it's a multi-tenant/group case? change it from saying Yes/no to say "no = individual" or 'yes = Group'
#Also, if it's an eviction case, it's individual, otherwise make it "needs review"
def ProceedingLevel(GroupCase,TypeOfCase,EvictionProceedings):
if GroupCase == "Yes":
return "Group"
elif GroupCase == "No":
return "Individual"
elif TypeOfCase in EvictionProceedings:
return "Individual"
else:
return "Needs Review"
df['proceeding_level'] = df.apply(lambda x: ProceedingLevel(x['Housing Building Case?'], x['proceeding'], HousingToolBox.evictionproceedings), axis=1)
#For years in apartment, negative 1 or less = 0.5
df['years_in_apt'] = df['Housing Years Living In Apartment'].apply(lambda x: .5 if x <= -1 else x)
#Case posture on eligibility date (on trial, no stipulation etc.) - transform them into the HRA initials
df['posture'] = df.apply(lambda x: HousingToolBox.PostureOnEligibility(x['Housing Posture of Case on Eligibility Date']), axis=1)
#Level of Service becomes Service type
df['service_type'] = df.apply(lambda x: HousingToolBox.TRCServiceType(x['Housing Level of Service']), axis=1)
#if below 201, = 'Yes' otherwise 'No'
df['below_200_FPL'] = df['Percentage of Poverty'].apply(lambda x: "Yes" if x < 200 else "No")
#Subsidy type - if it's not in the HRA list, it has to be 'none' (other is not valid) - they want a smaller list than we record. (mapping to be confirmed)
df['subsidy_type'] = df.apply(lambda x: HousingToolBox.SubsidyType(x['Housing Subsidy Type']), axis=1)
#Housing Regulation Type: mapping down - we have way more categories, rent regulated, market rate, or other (mapping to be confirmed). can't be blank
df['housing_type'] = df.apply(lambda x: HousingToolBox.HousingType(x['Housing Form Of Regulation']), axis=1)
#Referrals need to be one of their specific categories
df['referral_source'] = df.apply(lambda x: HousingToolBox.ReferralMap(x['Referral Source']), axis = 1)
#Housing Outcomes needs mapping for HRA
df['outcome'] = df.apply(lambda x: HousingToolBox.Outcome(x['Housing Outcome']), axis=1)
#Outcome related things that need mapping
df['services_rendered'] = df.apply(lambda x: HousingToolBox.ServicesRendered(x['Housing Services Rendered to Client']), axis=1)
#Mapped to what HRA wants - some of the options are in LegalServer,
df['activities'] = df.apply(lambda x: HousingToolBox.Activities(x['Housing Activity Indicators']), axis=1)
#Differentiate pre- and post- 3/1/20 eligibility date cases
df['DateConstruct'] = df.apply(lambda x: DataWizardTools.DateMaker(x['HAL Eligibility Date']), axis=1)
df['Pre-3/1/20 Elig Date?'] = df.apply(lambda x: HousingToolBox.PreThreeOne(x['DateConstruct']), axis=1)
###Finalizing Report###
#put columns in correct order
df = df[['id',
'program_name',
'first_name',
'last_name',
'SSN',
'PA_number',
'DOB',
'num_adults',
'num_children',
'street_number',
'Street',
'Unit',
'city',
'zip',
'waiver_approval_date',
'waiver',
'rent',
'proceeding',
'LT_index',
'proceeding_level',
'years_in_apt',
'language',
'referral_source',
'income',
'eligibility_date',
'DHCI',
'posture',
'service_type',
'below_200_FPL',
'units_in_bldg',
'subsidy_type',
'housing_type',
'outcome_date',
'outcome',
'services_rendered',
'activities',
'HRA Release?',
'Percentage of Poverty',
'Primary Advocate',
'Hyperlinked CaseID#',
'Pre-3/1/20 Elig Date?'
]]
#bounce worksheets back to excel
output_filename = f.filename
writer = pd.ExcelWriter("app\\sheets\\"+output_filename, engine = 'xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1',index=False)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
#highlight yellow if needs review
#make columns wider
#give the hyperlink format
link_format = workbook.add_format({'font_color':'blue', 'bold':True, 'underline':True})
problem_format = workbook.add_format({'bg_color':'yellow'})
worksheet.freeze_panes(1,0)
worksheet.set_column('A:BL',20)
worksheet.set_column ('AN:AN',30,link_format)
worksheet.conditional_format('C2:BO100000',{'type': 'text',
'criteria': 'containing',
'value': 'Needs',
'format': problem_format})
writer.save()
#send file back to user
return send_from_directory('sheets',output_filename, as_attachment = True, attachment_filename = "Formatted " + f.filename)
#what the user-facing site looks like
return '''
<!doctype html>
<title>TRC Report Prep</title>
<link rel="stylesheet" href="/static/css/main.css">
<link rel="stylesheet" href="/static/css/main.css">
<h1>Prep Cases for TRC External Report:</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file><input type=submit value=TRC-ify!>
</form>
<h3>Instructions:</h3>
<ul type="disc">
<li>This tool is meant to be used in conjunction with the LegalServer report called <a href="https://lsnyc.legalserver.org/report/dynamic?load=1969" target="_blank">TRC External Report</a>.</li>
</ul>
</br>
<a href="/">Home</a>
'''
| 43.228311 | 199 | 0.572304 | from flask import request, send_from_directory
from app import app, DataWizardTools, HousingToolBox
import pandas as pd
@app.route("/TRCExternalPrep", methods=['GET', 'POST'])
def TRCExternalPrep():
if request.method == 'POST':
print(request.files['file'])
f = request.files['file']
test = pd.read_excel(f)
test.fillna('',inplace=True)
if test.iloc[0][0] == '':
df = pd.read_excel(f,skiprows=2)
else:
df = pd.read_excel(f)
df.fillna('',inplace = True)
df['Matter/Case ID#'] = df.apply(lambda x : DataWizardTools.RemoveNoCaseID(x['Matter/Case ID#']),axis=1)
df = df[df['Matter/Case ID#'] != 'No Case ID']
df['Hyperlinked CaseID#'] = df.apply(lambda x : DataWizardTools.Hyperlinker(x['Matter/Case ID#']),axis=1)
df['Client Last Name']
df['SSN'] = df['Social Security #']
df['PA_number'] = df['Gen Pub Assist Case Number']
df['DOB'] = df['Date of Birth']
df['num_adults'] = df['Number of People 18 and Over']
df['num_children'] = df['Number of People under 18']
df['Unit'] = df['Apt#/Suite#']
df['zip'] = df['Zip Code']
df['waiver_approval_date'] = df['Housing Date Of Waiver Approval']
df['waiver'] = df['Housing TRC HRA Waiver Categories']
df['rent'] = df['Housing Total Monthly Rent']
df['LT_index'] = df['Gen Case Index Number']
df['language'] = df['Language']
df['income'] = df['Total Annual Income ']
df['eligibility_date'] = df['HAL Eligibility Date']
df['DHCI'] = df['Housing Signed DHCI Form']
df['units_in_bldg'] = df['Housing Number Of Units In Building']
df['outcome_date'] = df['Housing Outcome Date']
df['id'] = 'LSNYC' + df['Matter/Case ID#']
df['program_name'] = 'AHTP'
df['street_number'] = df['Street Address'].str.split(' ').str[0]
df['Street'] = df['Street Address'].str.split(' ',1).str[1]
df['city'] = df.apply(lambda x: DataWizardTools.QueensConsolidater(x['City']), axis=1)
df['proceeding'] = df.apply(lambda x: HousingToolBox.ProceedingType(x['Housing Type Of Case']), axis=1)
#Also, if it's an eviction case, it's individual, otherwise make it "needs review"
def ProceedingLevel(GroupCase,TypeOfCase,EvictionProceedings):
if GroupCase == "Yes":
return "Group"
elif GroupCase == "No":
return "Individual"
elif TypeOfCase in EvictionProceedings:
return "Individual"
else:
return "Needs Review"
df['proceeding_level'] = df.apply(lambda x: ProceedingLevel(x['Housing Building Case?'], x['proceeding'], HousingToolBox.evictionproceedings), axis=1)
#For years in apartment, negative 1 or less = 0.5
df['years_in_apt'] = df['Housing Years Living In Apartment'].apply(lambda x: .5 if x <= -1 else x)
#Case posture on eligibility date (on trial, no stipulation etc.) - transform them into the HRA initials
df['posture'] = df.apply(lambda x: HousingToolBox.PostureOnEligibility(x['Housing Posture of Case on Eligibility Date']), axis=1)
#Level of Service becomes Service type
df['service_type'] = df.apply(lambda x: HousingToolBox.TRCServiceType(x['Housing Level of Service']), axis=1)
#if below 201, = 'Yes' otherwise 'No'
df['below_200_FPL'] = df['Percentage of Poverty'].apply(lambda x: "Yes" if x < 200 else "No")
#Subsidy type - if it's not in the HRA list, it has to be 'none' (other is not valid) - they want a smaller list than we record. (mapping to be confirmed)
df['subsidy_type'] = df.apply(lambda x: HousingToolBox.SubsidyType(x['Housing Subsidy Type']), axis=1)
df['housing_type'] = df.apply(lambda x: HousingToolBox.HousingType(x['Housing Form Of Regulation']), axis=1)
#Referrals need to be one of their specific categories
df['referral_source'] = df.apply(lambda x: HousingToolBox.ReferralMap(x['Referral Source']), axis = 1)
#Housing Outcomes needs mapping for HRA
df['outcome'] = df.apply(lambda x: HousingToolBox.Outcome(x['Housing Outcome']), axis=1)
#Outcome related things that need mapping
df['services_rendered'] = df.apply(lambda x: HousingToolBox.ServicesRendered(x['Housing Services Rendered to Client']), axis=1)
#Mapped to what HRA wants - some of the options are in LegalServer,
df['activities'] = df.apply(lambda x: HousingToolBox.Activities(x['Housing Activity Indicators']), axis=1)
#Differentiate pre- and post- 3/1/20 eligibility date cases
df['DateConstruct'] = df.apply(lambda x: DataWizardTools.DateMaker(x['HAL Eligibility Date']), axis=1)
df['Pre-3/1/20 Elig Date?'] = df.apply(lambda x: HousingToolBox.PreThreeOne(x['DateConstruct']), axis=1)
###Finalizing Report###
#put columns in correct order
df = df[['id',
'program_name',
'first_name',
'last_name',
'SSN',
'PA_number',
'DOB',
'num_adults',
'num_children',
'street_number',
'Street',
'Unit',
'city',
'zip',
'waiver_approval_date',
'waiver',
'rent',
'proceeding',
'LT_index',
'proceeding_level',
'years_in_apt',
'language',
'referral_source',
'income',
'eligibility_date',
'DHCI',
'posture',
'service_type',
'below_200_FPL',
'units_in_bldg',
'subsidy_type',
'housing_type',
'outcome_date',
'outcome',
'services_rendered',
'activities',
'HRA Release?',
'Percentage of Poverty',
'Primary Advocate',
'Hyperlinked CaseID
'Pre-3/1/20 Elig Date?'
]]
#bounce worksheets back to excel
output_filename = f.filename
writer = pd.ExcelWriter("app\\sheets\\"+output_filename, engine = 'xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1',index=False)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
#highlight yellow if needs review
#make columns wider
#give the hyperlink format
link_format = workbook.add_format({'font_color':'blue', 'bold':True, 'underline':True})
problem_format = workbook.add_format({'bg_color':'yellow'})
worksheet.freeze_panes(1,0)
worksheet.set_column('A:BL',20)
worksheet.set_column ('AN:AN',30,link_format)
worksheet.conditional_format('C2:BO100000',{'type': 'text',
'criteria': 'containing',
'value': 'Needs',
'format': problem_format})
writer.save()
#send file back to user
return send_from_directory('sheets',output_filename, as_attachment = True, attachment_filename = "Formatted " + f.filename)
#what the user-facing site looks like
return '''
<!doctype html>
<title>TRC Report Prep</title>
<link rel="stylesheet" href="/static/css/main.css">
<link rel="stylesheet" href="/static/css/main.css">
<h1>Prep Cases for TRC External Report:</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file><input type=submit value=TRC-ify!>
</form>
<h3>Instructions:</h3>
<ul type="disc">
<li>This tool is meant to be used in conjunction with the LegalServer report called <a href="https://lsnyc.legalserver.org/report/dynamic?load=1969" target="_blank">TRC External Report</a>.</li>
</ul>
</br>
<a href="/">Home</a>
'''
| true | true |
f7f5d17b33b79cb5cd0246b3b9b74d231c1ca656 | 83,231 | py | Python | include/ClientFiles.py | sorashi/hydrus | 0544a75d2117904b42e935d264ae35ded5cbf36a | [
"WTFPL"
] | null | null | null | include/ClientFiles.py | sorashi/hydrus | 0544a75d2117904b42e935d264ae35ded5cbf36a | [
"WTFPL"
] | null | null | null | include/ClientFiles.py | sorashi/hydrus | 0544a75d2117904b42e935d264ae35ded5cbf36a | [
"WTFPL"
] | null | null | null | from . import ClientConstants as CC
from . import ClientImageHandling
from . import ClientPaths
from . import ClientThreading
import collections
import gc
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusExceptions
from . import HydrusFileHandling
from . import HydrusGlobals as HG
from . import HydrusImageHandling
from . import HydrusNetworking
from . import HydrusPaths
from . import HydrusThreading
import os
import random
import threading
import time
from qtpy import QtWidgets as QW
from . import QtPorting as QP
REGENERATE_FILE_DATA_JOB_FILE_METADATA = 0
REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL = 1
REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL = 2
REGENERATE_FILE_DATA_JOB_OTHER_HASHES = 3
REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES = 4
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE = 5
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA = 6
REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS = 7
REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP = 8
REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA = 9
REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP = 10
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL = 11
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL = 12
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE = 13
regen_file_enum_to_str_lookup = {}
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_METADATA ] = 'regenerate file metadata'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL ] = 'regenerate thumbnail'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ] = 'regenerate thumbnail if incorrect size'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_OTHER_HASHES ] = 'regenerate non-standard hashes'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ] = 'delete duplicate neighbours with incorrect file extension'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ] = 'if file is missing, remove record'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ] = 'if file is missing and has url, try to redownload'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA ] = 'if file is missing/incorrect, move file out and remove record'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ] = 'if file is missing/incorrect and has url, move file out and try to redownload'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ] = 'if file is incorrect, move file out'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS ] = 'fix file read/write permissions'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ] = 'check for membership in the similar files search system'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA ] = 'regenerate similar files metadata'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP ] = 'regenerate file modified date'
regen_file_enum_to_description_lookup = {}
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_METADATA ] = 'This regenerates file metadata like resolution and duration, or even filetype (such as mkv->webm), which may have been misparsed in a previous version.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL ] = 'This forces a complete regeneration of the thumbnail from the source file.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ] = 'This looks for the existing thumbnail, and if it is not the correct resolution or is missing, will regenerate a new one for the source file.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_OTHER_HASHES ] = 'This regenerates hydrus\'s store of md5, sha1, and sha512 supplementary hashes, which it can use for various external (usually website) lookups.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ] = 'Sometimes, a file metadata regeneration will mean a new filetype and thus a new file extension. If the existing, incorrectly named file is in use, it must be copied rather than renamed, and so there is a spare duplicate left over after the operation. This jobs cleans up the duplicate at a later time.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ] = 'This checks to see if the file is present in the file system as expected. If it is not, the internal file record in the database is removed, just as if the file were deleted. Use this if you have manually deleted or otherwise lost a number of files from your file structure and need hydrus to re-sync with what it actually has. Missing files will have their known URLs exported to your database directory.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ] = 'This checks to see if the file is present in the file system as expected. If it is not, and it has known post/file urls, the URLs will be automatically added to a new URL downloader. Missing files will also have their known URLs exported to your database directory.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA ] = 'This does the same check as the \'file is missing\' job, and if the file is where it is expected, it ensures its file content, byte-for-byte, is correct. This is a heavy job, so be wary. If the file is incorrect, it will be exported to your database directory along with their known URLs, and the file record deleted.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ] = 'This does the same check as the \'file is missing\' job, and if the file is where it is expected, it ensures its file content, byte-for-byte, is correct. This is a heavy job, so be wary. If the file is incorrect _and_ is has known post/file urls, the URLs will be automatically added to a new URL downloader. Incorrect files will also have their known URLs exported to your database directory.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ] = 'If the file is where it is expected, this ensures its file content, byte-for-byte, is correct. This is a heavy job, so be wary. If the file is incorrect, it will be exported to your database directory along with its known URLs. The client\'s file record will not be deleted. This is useful if you have a valid backup and need to clear out invalid files from your live db so you can fill in gaps from your backup with a program like FreeFileSync.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS ] = 'This ensures that files in the file system are readable and writeable. For Linux/macOS users, it specifically sets 644. If you wish to run this job on Linux/macOS, ensure you are first the file owner of all your files.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ] = 'This checks to see if files should be in the similar files system, and if they are falsely in or falsely out, it will remove their record or queue them up for a search as appropriate. It is useful to repair database damage.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA ] = 'This forces a regeneration of the file\'s similar-files \'phashes\'. It is not useful unless you know there is missing data to repair.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP ] = 'This rechecks the file\'s modified timestamp and saves it to the database.'
NORMALISED_BIG_JOB_WEIGHT = 100
regen_file_enum_to_job_weight_lookup = {}
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_METADATA ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL ] = 50
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ] = 25
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_OTHER_HASHES ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ] = 25
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ] = 5
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ] = 50
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS ] = 25
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ] = 50
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP ] = 10
regen_file_enum_to_overruled_jobs = {}
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_METADATA ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL ] = [ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ]
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_OTHER_HASHES ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA ] = [ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ]
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ] = [ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ]
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA ] = [ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ]
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP ] = []
ALL_REGEN_JOBS_IN_PREFERRED_ORDER = [ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE, REGENERATE_FILE_DATA_JOB_FILE_METADATA, REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL, REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL, REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA, REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP, REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS, REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP, REGENERATE_FILE_DATA_JOB_OTHER_HASHES, REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ]
def GetAllFilePaths( raw_paths, do_human_sort = True ):
file_paths = []
paths_to_process = list( raw_paths )
while len( paths_to_process ) > 0:
next_paths_to_process = []
for path in paths_to_process:
if HG.view_shutdown:
raise HydrusExceptions.ShutdownException()
if os.path.isdir( path ):
subpaths = [ os.path.join( path, filename ) for filename in os.listdir( path ) ]
next_paths_to_process.extend( subpaths )
else:
file_paths.append( path )
paths_to_process = next_paths_to_process
if do_human_sort:
HydrusData.HumanTextSort( file_paths )
return file_paths
class ClientFilesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._rwlock = ClientThreading.FileRWLock()
self._prefixes_to_locations = {}
self._bad_error_occurred = False
self._missing_locations = set()
self._Reinit()
def _AddFile( self, hash, mime, source_path ):
dest_path = self._GenerateExpectedFilePath( hash, mime )
if HG.file_report_mode or HG.file_import_report_mode:
HydrusData.ShowText( 'Adding file to client file structure: from {} to {}'.format( source_path, dest_path ) )
successful = HydrusPaths.MirrorFile( source_path, dest_path )
if not successful:
raise Exception( 'There was a problem copying the file from ' + source_path + ' to ' + dest_path + '!' )
def _AddThumbnailFromBytes( self, hash, thumbnail_bytes, silent = False ):
dest_path = self._GenerateExpectedThumbnailPath( hash )
if HG.file_report_mode:
HydrusData.ShowText( 'Adding thumbnail: ' + str( ( len( thumbnail_bytes ), dest_path ) ) )
try:
HydrusPaths.MakeFileWritable( dest_path )
with open( dest_path, 'wb' ) as f:
f.write( thumbnail_bytes )
except Exception as e:
hash_encoded = hash.hex()
prefix = 't' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
thumb_dir = os.path.join( location, prefix )
if not os.path.exists( thumb_dir ):
raise HydrusExceptions.DirectoryMissingException( 'The directory {} was not found! Reconnect the missing location or shut down the client immediately!'.format( thumb_dir ) )
raise HydrusExceptions.FileMissingException( 'The thumbnail for file "{}" failed to write to path "{}". This event suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.'.format( hash.hex(), dest_path ) )
if not silent:
self._controller.pub( 'clear_thumbnails', { hash } )
self._controller.pub( 'new_thumbnails', { hash } )
def _AttemptToHealMissingLocations( self ):
# if a missing prefix folder seems to be in another location, lets update to that other location
correct_rows = []
some_are_unhealable = False
fixes_counter = collections.Counter()
known_locations = set()
known_locations.update( self._prefixes_to_locations.values() )
( locations_to_ideal_weights, thumbnail_override ) = self._controller.Read( 'ideal_client_files_locations' )
known_locations.update( locations_to_ideal_weights.keys() )
if thumbnail_override is not None:
known_locations.add( thumbnail_override )
for ( missing_location, prefix ) in self._missing_locations:
potential_correct_locations = []
for known_location in known_locations:
if known_location == missing_location:
continue
dir_path = os.path.join( known_location, prefix )
if os.path.exists( dir_path ) and os.path.isdir( dir_path ):
potential_correct_locations.append( known_location )
if len( potential_correct_locations ) == 1:
correct_location = potential_correct_locations[0]
correct_rows.append( ( missing_location, prefix, correct_location ) )
fixes_counter[ ( missing_location, correct_location ) ] += 1
else:
some_are_unhealable = True
if len( correct_rows ) > 0 and some_are_unhealable:
message = 'Hydrus found multiple missing locations in your file storage. Some of these locations seemed to be fixable, others did not. The client will now inform you about both problems.'
self._controller.SafeShowCriticalMessage( 'Multiple file location problems.', message )
if len( correct_rows ) > 0:
summaries = [ '{} moved from {} to {}'.format( HydrusData.ToHumanInt( count ), missing_location, correct_location ) for ( ( missing_location, correct_location ), count ) in fixes_counter.items() ]
summaries.sort()
summary_message = 'Some client file folders were missing, but they seem to be in other known locations! The folders are:'
summary_message += os.linesep * 2
summary_message += os.linesep.join( summaries )
summary_message += os.linesep * 2
summary_message += 'Assuming you did this on purpose, Hydrus is ready to update its internal knowledge to reflect these new mappings as soon as this dialog closes. If you know these proposed fixes are incorrect, terminate the program now.'
HydrusData.Print( summary_message )
self._controller.SafeShowCriticalMessage( 'About to auto-heal client file folders.', summary_message )
HG.client_controller.WriteSynchronous( 'repair_client_files', correct_rows )
def _ChangeFileExt( self, hash, old_mime, mime ):
old_path = self._GenerateExpectedFilePath( hash, old_mime )
new_path = self._GenerateExpectedFilePath( hash, mime )
if old_path == new_path:
# some diff mimes have the same ext
return
if HG.file_report_mode:
HydrusData.ShowText( 'Changing file ext: ' + str( ( old_path, new_path ) ) )
if HydrusPaths.PathIsFree( old_path ):
try:
HydrusPaths.MergeFile( old_path, new_path )
needed_to_copy_file = False
except:
HydrusPaths.MirrorFile( old_path, new_path )
needed_to_copy_file = True
else:
HydrusPaths.MirrorFile( old_path, new_path )
needed_to_copy_file = True
return needed_to_copy_file
def _GenerateExpectedFilePath( self, hash, mime ):
self._WaitOnWakeup()
hash_encoded = hash.hex()
prefix = 'f' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded + HC.mime_ext_lookup[ mime ] )
return path
def _GenerateExpectedThumbnailPath( self, hash ):
self._WaitOnWakeup()
hash_encoded = hash.hex()
prefix = 't' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded ) + '.thumbnail'
return path
def _GenerateThumbnailBytes( self, file_path, media ):
hash = media.GetHash()
mime = media.GetMime()
( width, height ) = media.GetResolution()
duration = media.GetDuration()
num_frames = media.GetNumFrames()
bounding_dimensions = HG.client_controller.options[ 'thumbnail_dimensions' ]
target_resolution = HydrusImageHandling.GetThumbnailResolution( ( width, height ), bounding_dimensions )
percentage_in = self._controller.new_options.GetInteger( 'video_thumbnail_percentage_in' )
try:
thumbnail_bytes = HydrusFileHandling.GenerateThumbnailBytes( file_path, target_resolution, mime, duration, num_frames, percentage_in = percentage_in )
except Exception as e:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.hex() + ' could not be regenerated from the original file for the above reason. This event could indicate hard drive corruption. Please check everything is ok.' )
return thumbnail_bytes
def _GetRecoverTuple( self ):
all_locations = { location for location in list(self._prefixes_to_locations.values()) }
all_prefixes = list(self._prefixes_to_locations.keys())
for possible_location in all_locations:
for prefix in all_prefixes:
correct_location = self._prefixes_to_locations[ prefix ]
if possible_location != correct_location and os.path.exists( os.path.join( possible_location, prefix ) ):
recoverable_location = possible_location
return ( prefix, recoverable_location, correct_location )
return None
def _GetRebalanceTuple( self ):
( locations_to_ideal_weights, thumbnail_override ) = self._controller.Read( 'ideal_client_files_locations' )
total_weight = sum( locations_to_ideal_weights.values() )
ideal_locations_to_normalised_weights = { location : weight / total_weight for ( location, weight ) in list(locations_to_ideal_weights.items()) }
current_locations_to_normalised_weights = collections.defaultdict( lambda: 0 )
file_prefixes = [ prefix for prefix in self._prefixes_to_locations if prefix.startswith( 'f' ) ]
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
current_locations_to_normalised_weights[ location ] += 1.0 / 256
for location in list(current_locations_to_normalised_weights.keys()):
if location not in ideal_locations_to_normalised_weights:
ideal_locations_to_normalised_weights[ location ] = 0.0
#
overweight_locations = []
underweight_locations = []
for ( location, ideal_weight ) in list(ideal_locations_to_normalised_weights.items()):
if location in current_locations_to_normalised_weights:
current_weight = current_locations_to_normalised_weights[ location ]
if current_weight < ideal_weight:
underweight_locations.append( location )
elif current_weight >= ideal_weight + 1.0 / 256:
overweight_locations.append( location )
else:
underweight_locations.append( location )
#
if len( underweight_locations ) > 0 and len( overweight_locations ) > 0:
overweight_location = overweight_locations.pop( 0 )
underweight_location = underweight_locations.pop( 0 )
random.shuffle( file_prefixes )
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
if location == overweight_location:
return ( file_prefix, overweight_location, underweight_location )
else:
for hex_prefix in HydrusData.IterateHexPrefixes():
thumbnail_prefix = 't' + hex_prefix
if thumbnail_override is None:
file_prefix = 'f' + hex_prefix
correct_location = self._prefixes_to_locations[ file_prefix ]
else:
correct_location = thumbnail_override
current_thumbnails_location = self._prefixes_to_locations[ thumbnail_prefix ]
if current_thumbnails_location != correct_location:
return ( thumbnail_prefix, current_thumbnails_location, correct_location )
return None
def _IterateAllFilePaths( self ):
for ( prefix, location ) in list(self._prefixes_to_locations.items()):
if prefix.startswith( 'f' ):
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
yield os.path.join( dir, filename )
def _IterateAllThumbnailPaths( self ):
for ( prefix, location ) in list(self._prefixes_to_locations.items()):
if prefix.startswith( 't' ):
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
yield os.path.join( dir, filename )
def _LookForFilePath( self, hash ):
for potential_mime in HC.ALLOWED_MIMES:
potential_path = self._GenerateExpectedFilePath( hash, potential_mime )
if os.path.exists( potential_path ):
return ( potential_path, potential_mime )
hash_encoded = hash.hex()
prefix = 'f' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
subdir = os.path.join( location, prefix )
if not os.path.exists( subdir ):
raise HydrusExceptions.DirectoryMissingException( 'The directory {} was not found! Reconnect the missing location or shut down the client immediately!'.format( subdir ) )
raise HydrusExceptions.FileMissingException( 'File for ' + hash.hex() + ' not found!' )
def _Reinit( self ):
self._prefixes_to_locations = self._controller.Read( 'client_files_locations' )
if HG.client_controller.IsFirstStart():
try:
for ( prefix, location ) in list( self._prefixes_to_locations.items() ):
HydrusPaths.MakeSureDirectoryExists( location )
subdir = os.path.join( location, prefix )
HydrusPaths.MakeSureDirectoryExists( subdir )
except:
text = 'Attempting to create the database\'s client_files folder structure in {} failed!'.format( location )
self._controller.SafeShowCriticalMessage( 'unable to create file structure', text )
raise
else:
self._ReinitMissingLocations()
if len( self._missing_locations ) > 0:
self._AttemptToHealMissingLocations()
self._prefixes_to_locations = self._controller.Read( 'client_files_locations' )
self._ReinitMissingLocations()
if len( self._missing_locations ) > 0:
self._bad_error_occurred = True
#
missing_dict = HydrusData.BuildKeyToListDict( self._missing_locations )
missing_locations = list( missing_dict.keys() )
missing_locations.sort()
missing_string = ''
for missing_location in missing_locations:
missing_prefixes = list( missing_dict[ missing_location ] )
missing_prefixes.sort()
missing_prefixes_string = ' ' + os.linesep.join( ( ', '.join( block ) for block in HydrusData.SplitListIntoChunks( missing_prefixes, 32 ) ) )
missing_string += os.linesep
missing_string += missing_location
missing_string += os.linesep
missing_string += missing_prefixes_string
#
if len( self._missing_locations ) > 4:
text = 'When initialising the client files manager, some file locations did not exist! They have all been written to the log!'
text += os.linesep * 2
text += 'If this is happening on client boot, you should now be presented with a dialog to correct this manually!'
self._controller.SafeShowCriticalMessage( 'missing locations', text )
HydrusData.DebugPrint( 'Missing locations follow:' )
HydrusData.DebugPrint( missing_string )
else:
text = 'When initialising the client files manager, these file locations did not exist:'
text += os.linesep * 2
text += missing_string
text += os.linesep * 2
text += 'If this is happening on client boot, you should now be presented with a dialog to correct this manually!'
self._controller.SafeShowCriticalMessage( 'missing locations', text )
def _ReinitMissingLocations( self ):
self._missing_locations = set()
for ( prefix, location ) in list(self._prefixes_to_locations.items()):
if os.path.exists( location ):
subdir = os.path.join( location, prefix )
if not os.path.exists( subdir ):
self._missing_locations.add( ( location, prefix ) )
else:
self._missing_locations.add( ( location, prefix ) )
def _WaitOnWakeup( self ):
if HG.client_controller.new_options.GetBoolean( 'file_system_waits_on_wakeup' ):
while HG.client_controller.JustWokeFromSleep():
HydrusThreading.CheckIfThreadShuttingDown()
time.sleep( 1.0 )
def AllLocationsAreDefault( self ):
with self._rwlock.read:
db_dir = self._controller.GetDBDir()
client_files_default = os.path.join( db_dir, 'client_files' )
all_locations = set( self._prefixes_to_locations.values() )
return False not in ( location.startswith( client_files_default ) for location in all_locations )
def LocklessAddFileFromBytes( self, hash, mime, file_bytes ):
dest_path = self._GenerateExpectedFilePath( hash, mime )
if HG.file_report_mode:
HydrusData.ShowText( 'Adding file from string: ' + str( ( len( file_bytes ), dest_path ) ) )
HydrusPaths.MakeFileWritable( dest_path )
with open( dest_path, 'wb' ) as f:
f.write( file_bytes )
def AddFile( self, hash, mime, source_path, thumbnail_bytes = None ):
with self._rwlock.write:
self._AddFile( hash, mime, source_path )
if thumbnail_bytes is not None:
self._AddThumbnailFromBytes( hash, thumbnail_bytes )
def AddThumbnailFromBytes( self, hash, thumbnail_bytes, silent = False ):
with self._rwlock.write:
self._AddThumbnailFromBytes( hash, thumbnail_bytes, silent = silent )
def ChangeFileExt( self, hash, old_mime, mime ):
with self._rwlock.write:
return self._ChangeFileExt( hash, old_mime, mime )
def ClearOrphans( self, move_location = None ):
with self._rwlock.write:
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'clearing orphans' )
job_key.SetVariable( 'popup_text_1', 'preparing' )
self._controller.pub( 'message', job_key )
orphan_paths = []
orphan_thumbnails = []
for ( i, path ) in enumerate( self._IterateAllFilePaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ToHumanInt( i ) + ' files, found ' + HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = bytes.fromhex( should_be_a_hex_hash )
is_an_orphan = HG.client_controller.Read( 'is_an_orphan', 'file', hash )
except:
is_an_orphan = True
if is_an_orphan:
if move_location is not None:
( source_dir, filename ) = os.path.split( path )
dest = os.path.join( move_location, filename )
dest = HydrusPaths.AppendPathUntilNoConflicts( dest )
HydrusData.Print( 'Moving the orphan ' + path + ' to ' + dest )
HydrusPaths.MergeFile( path, dest )
orphan_paths.append( path )
time.sleep( 2 )
for ( i, path ) in enumerate( self._IterateAllThumbnailPaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ToHumanInt( i ) + ' thumbnails, found ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = bytes.fromhex( should_be_a_hex_hash )
is_an_orphan = HG.client_controller.Read( 'is_an_orphan', 'thumbnail', hash )
except:
is_an_orphan = True
if is_an_orphan:
orphan_thumbnails.append( path )
time.sleep( 2 )
if move_location is None and len( orphan_paths ) > 0:
status = 'found ' + HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphans, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for path in orphan_paths:
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
HydrusData.Print( 'Deleting the orphan ' + path )
status = 'deleting orphan files: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_paths ) )
job_key.SetVariable( 'popup_text_1', status )
ClientPaths.DeletePath( path )
if len( orphan_thumbnails ) > 0:
status = 'found ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphan thumbnails, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for ( i, path ) in enumerate( orphan_thumbnails ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
status = 'deleting orphan thumbnails: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_thumbnails ) )
job_key.SetVariable( 'popup_text_1', status )
HydrusData.Print( 'Deleting the orphan ' + path )
ClientPaths.DeletePath( path, always_delete_fully = True )
if len( orphan_paths ) == 0 and len( orphan_thumbnails ) == 0:
final_text = 'no orphans found!'
else:
final_text = HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphan files and ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphan thumbnails cleared!'
job_key.SetVariable( 'popup_text_1', final_text )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
def DelayedDeleteFiles( self, hashes ):
if HG.file_report_mode:
HydrusData.ShowText( 'Delayed delete files call: ' + str( len( hashes ) ) )
time.sleep( 2 )
big_pauser = HydrusData.BigJobPauser( period = 1 )
for hashes_chunk in HydrusData.SplitIteratorIntoChunks( hashes, 10 ):
with self._rwlock.write:
for hash in hashes_chunk:
try:
( path, mime ) = self._LookForFilePath( hash )
except HydrusExceptions.FileMissingException:
continue
ClientPaths.DeletePath( path )
big_pauser.Pause()
def DelayedDeleteThumbnails( self, hashes ):
if HG.file_report_mode:
HydrusData.ShowText( 'Delayed delete thumbs call: ' + str( len( hashes ) ) )
time.sleep( 2 )
big_pauser = HydrusData.BigJobPauser( period = 1 )
for hashes_chunk in HydrusData.SplitIteratorIntoChunks( hashes, 20 ):
with self._rwlock.write:
for hash in hashes_chunk:
path = self._GenerateExpectedThumbnailPath( hash )
ClientPaths.DeletePath( path, always_delete_fully = True )
big_pauser.Pause()
def DeleteNeighbourDupes( self, hash, true_mime ):
with self._rwlock.write:
correct_path = self._GenerateExpectedFilePath( hash, true_mime )
if not os.path.exists( correct_path ):
return # misfire, let's not actually delete the right one
for mime in HC.ALLOWED_MIMES:
if mime == true_mime:
continue
incorrect_path = self._GenerateExpectedFilePath( hash, mime )
if incorrect_path == correct_path:
# some diff mimes have the same ext
continue
if os.path.exists( incorrect_path ):
HydrusPaths.DeletePath( incorrect_path )
def GetCurrentFileLocations( self ):
with self._rwlock.read:
locations = set()
for ( prefix, location ) in self._prefixes_to_locations.items():
if prefix.startswith( 'f' ):
locations.add( location )
return locations
def GetFilePath( self, hash, mime = None, check_file_exists = True ):
with self._rwlock.read:
return self.LocklessGetFilePath( hash, mime = mime, check_file_exists = check_file_exists )
def GetMissing( self ):
return self._missing_locations
def LocklessGetFilePath( self, hash, mime = None, check_file_exists = True ):
if HG.file_report_mode:
HydrusData.ShowText( 'File path request: ' + str( ( hash, mime ) ) )
if mime is None:
( path, mime ) = self._LookForFilePath( hash )
else:
path = self._GenerateExpectedFilePath( hash, mime )
if check_file_exists and not os.path.exists( path ):
try:
# let's see if the file exists, but with the wrong ext!
( actual_path, old_mime ) = self._LookForFilePath( hash )
except HydrusExceptions.FileMissingException:
raise HydrusExceptions.FileMissingException( 'No file found at path {}!'.format( path ) )
self._ChangeFileExt( hash, old_mime, mime )
# we have now fixed the path, it is good to return
return path
def GetThumbnailPath( self, media ):
hash = media.GetHash()
mime = media.GetMime()
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail path request: ' + str( ( hash, mime ) ) )
with self._rwlock.read:
path = self._GenerateExpectedThumbnailPath( hash )
thumb_missing = not os.path.exists( path )
if thumb_missing:
self.RegenerateThumbnail( media )
return path
def LocklessHasThumbnail( self, hash ):
path = self._GenerateExpectedThumbnailPath( hash )
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail path test: ' + path )
return os.path.exists( path )
def Rebalance( self, job_key ):
try:
if self._bad_error_occurred:
QW.QMessageBox.warning( None, 'Warning', 'A serious file error has previously occurred during this session, so further file moving will not be reattempted. Please restart the client before trying again.' )
return
with self._rwlock.write:
rebalance_tuple = self._GetRebalanceTuple()
while rebalance_tuple is not None:
if job_key.IsCancelled():
break
( prefix, overweight_location, underweight_location ) = rebalance_tuple
text = 'Moving \'' + prefix + '\' from ' + overweight_location + ' to ' + underweight_location
HydrusData.Print( text )
job_key.SetVariable( 'popup_text_1', text )
# these two lines can cause a deadlock because the db sometimes calls stuff in here.
self._controller.Write( 'relocate_client_files', prefix, overweight_location, underweight_location )
self._Reinit()
rebalance_tuple = self._GetRebalanceTuple()
recover_tuple = self._GetRecoverTuple()
while recover_tuple is not None:
if job_key.IsCancelled():
break
( prefix, recoverable_location, correct_location ) = recover_tuple
text = 'Recovering \'' + prefix + '\' from ' + recoverable_location + ' to ' + correct_location
HydrusData.Print( text )
job_key.SetVariable( 'popup_text_1', text )
recoverable_path = os.path.join( recoverable_location, prefix )
correct_path = os.path.join( correct_location, prefix )
HydrusPaths.MergeTree( recoverable_path, correct_path )
recover_tuple = self._GetRecoverTuple()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete()
def RebalanceWorkToDo( self ):
with self._rwlock.read:
return self._GetRebalanceTuple() is not None
def RegenerateThumbnail( self, media ):
hash = media.GetHash()
mime = media.GetMime()
if mime not in HC.MIMES_WITH_THUMBNAILS:
return
with self._rwlock.read:
file_path = self._GenerateExpectedFilePath( hash, mime )
if not os.path.exists( file_path ):
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.hex() + ' could not be regenerated from the original file because the original file is missing! This event could indicate hard drive corruption. Please check everything is ok.')
thumbnail_bytes = self._GenerateThumbnailBytes( file_path, media )
with self._rwlock.write:
self._AddThumbnailFromBytes( hash, thumbnail_bytes )
def RegenerateThumbnailIfWrongSize( self, media ):
do_it = False
try:
hash = media.GetHash()
mime = media.GetMime()
if mime not in HC.MIMES_WITH_THUMBNAILS:
return
( media_width, media_height ) = media.GetResolution()
path = self._GenerateExpectedThumbnailPath( hash )
numpy_image = ClientImageHandling.GenerateNumPyImage( path, mime )
( current_width, current_height ) = HydrusImageHandling.GetResolutionNumPy( numpy_image )
bounding_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
( expected_width, expected_height ) = HydrusImageHandling.GetThumbnailResolution( ( media_width, media_height ), bounding_dimensions )
if current_width != expected_width or current_height != expected_height:
do_it = True
except:
do_it = True
if do_it:
self.RegenerateThumbnail( media )
return do_it
class FilesMaintenanceManager( object ):
def __init__( self, controller ):
self._controller = controller
self._pubbed_message_about_bad_file_record_delete = False
self._pubbed_message_about_invalid_file_export = False
self._work_tracker = HydrusNetworking.BandwidthTracker()
self._idle_work_rules = HydrusNetworking.BandwidthRules()
self._active_work_rules = HydrusNetworking.BandwidthRules()
self._jobs_since_last_gc_collect = 0
self._ReInitialiseWorkRules()
self._maintenance_lock = threading.Lock()
self._lock = threading.Lock()
self._wake_background_event = threading.Event()
self._reset_background_event = threading.Event()
self._shutdown = False
self._controller.sub( self, 'NotifyNewOptions', 'notify_new_options' )
self._controller.sub( self, 'Shutdown', 'shutdown' )
def _AbleToDoBackgroundMaintenance( self ):
if HG.client_controller.CurrentlyIdle():
if not self._controller.new_options.GetBoolean( 'file_maintenance_during_idle' ):
return False
if not self._controller.GoodTimeToStartBackgroundWork():
return False
return self._idle_work_rules.CanStartRequest( self._work_tracker )
else:
if not self._controller.new_options.GetBoolean( 'file_maintenance_during_active' ):
return False
return self._active_work_rules.CanStartRequest( self._work_tracker )
def _CheckFileIntegrity( self, media_result, job_type ):
hash = media_result.GetHash()
mime = media_result.GetMime()
error_dir = os.path.join( self._controller.GetDBDir(), 'missing_and_invalid_files' )
file_is_missing = False
file_is_invalid = False
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
except HydrusExceptions.FileMissingException:
file_is_missing = True
HydrusData.DebugPrint( 'Missing file: {}!'.format( hash.hex() ) )
if not file_is_missing and job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ):
actual_hash = HydrusFileHandling.GetHashFromPath( path )
if hash != actual_hash:
file_is_invalid = True
HydrusData.DebugPrint( 'Invalid file: {} actually had hash {}!'.format( hash.hex(), actual_hash.hex() ) )
file_was_bad = file_is_missing or file_is_invalid
if file_was_bad:
urls = media_result.GetLocationsManager().GetURLs()
if len( urls ) > 0:
HydrusPaths.MakeSureDirectoryExists( error_dir )
with open( os.path.join( error_dir, hash.hex() + '.urls.txt' ), 'w', encoding = 'utf-8' ) as f:
for url in urls:
f.write( url )
f.write( os.linesep )
with open( os.path.join( error_dir, 'all_urls.txt' ), 'a', encoding = 'utf-8' ) as f:
for url in urls:
f.write( url )
f.write( os.linesep )
useful_urls = []
for url in urls:
add_it = False
url_class = HG.client_controller.network_engine.domain_manager.GetURLClass( url )
if url_class is None:
add_it = True
else:
if url_class.GetURLType() in ( HC.URL_TYPE_FILE, HC.URL_TYPE_POST ):
add_it = True
if add_it:
useful_urls.append( url )
delete_record = job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA )
try_redownload = job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ) and len( useful_urls ) > 0
do_export = file_is_invalid and ( job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ) or ( job_type == REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL and try_redownload ) )
if do_export:
HydrusPaths.MakeSureDirectoryExists( error_dir )
dest_path = os.path.join( error_dir, os.path.basename( path ) )
HydrusPaths.MergeFile( path, dest_path )
if not self._pubbed_message_about_invalid_file_export:
self._pubbed_message_about_invalid_file_export = True
message = 'During file maintenance, a file was found to be invalid. It and any known URLs have been moved to "{}".'.format( error_dir )
message += os.linesep * 2
message += 'More files may be invalid, but this message will not appear again during this boot.'
HydrusData.ShowText( message )
if delete_record:
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, ( hash, ), reason = 'Record deleted during File Integrity check.' )
for service_key in [ CC.LOCAL_FILE_SERVICE_KEY, CC.LOCAL_UPDATE_SERVICE_KEY, CC.TRASH_SERVICE_KEY, CC.COMBINED_LOCAL_FILE_SERVICE_KEY ]:
service_keys_to_content_updates = { service_key : [ content_update ] }
self._controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ADVANCED, ( 'delete_deleted', ( hash, ) ) )
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ content_update ] }
self._controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
if not self._pubbed_message_about_bad_file_record_delete:
self._pubbed_message_about_bad_file_record_delete = True
message = 'During file maintenance, a file was found to be missing or invalid. Its file record has been removed from the database without leaving a deletion record (so it can be easily reimported). Any known URLs for the file have been written to "{}".'.format( error_dir )
message += os.linesep * 2
message += 'More file records may have been removed, but this message will not appear again during this boot.'
HydrusData.ShowText( message )
if try_redownload:
def qt_add_url( url ):
if QP.isValid( HG.client_controller.gui ):
HG.client_controller.gui.ImportURL( url, 'missing files redownloader' )
for url in useful_urls:
QP.CallAfter( qt_add_url, url )
return file_was_bad
def _CheckSimilarFilesMembership( self, media_result ):
mime = media_result.GetMime()
return mime in HC.MIMES_WE_CAN_PHASH
def _ClearJobs( self, hashes, job_type ):
if len( hashes ) > 0:
cleared_jobs = [ ( hash, job_type, None ) for hash in hashes ]
self._controller.WriteSynchronous( 'file_maintenance_clear_jobs', cleared_jobs )
def _DeleteNeighbourDupes( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
self._controller.client_files_manager.DeleteNeighbourDupes( hash, mime )
def _FixFilePermissions( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
HydrusPaths.MakeFileWritable( path )
except HydrusExceptions.FileMissingException:
return None
def _RegenFileMetadata( self, media_result ):
hash = media_result.GetHash()
original_mime = media_result.GetMime()
try:
path = self._controller.client_files_manager.GetFilePath( hash, original_mime )
( size, mime, width, height, duration, num_frames, has_audio, num_words ) = HydrusFileHandling.GetFileInfo( path, ok_to_look_for_hydrus_updates = True )
additional_data = ( size, mime, width, height, duration, num_frames, has_audio, num_words )
if mime != original_mime:
needed_to_dupe_the_file = self._controller.client_files_manager.ChangeFileExt( hash, original_mime, mime )
if needed_to_dupe_the_file:
self._controller.WriteSynchronous( 'file_maintenance_add_jobs_hashes', { hash }, REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES, HydrusData.GetNow() + ( 7 * 86400 ) )
return additional_data
except HydrusExceptions.MimeException:
self._CheckFileIntegrity( media_result, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL )
return None
except HydrusExceptions.FileMissingException:
return None
def _RegenFileModifiedTimestamp( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
if mime in HC.HYDRUS_UPDATE_FILES:
return None
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
file_modified_timestamp = HydrusFileHandling.GetFileModifiedTimestamp( path )
additional_data = file_modified_timestamp
return additional_data
except HydrusExceptions.FileMissingException:
return None
def _RegenFileOtherHashes( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
if mime in HC.HYDRUS_UPDATE_FILES:
return None
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
( md5, sha1, sha512 ) = HydrusFileHandling.GetExtraHashesFromPath( path )
additional_data = ( md5, sha1, sha512 )
return additional_data
except HydrusExceptions.FileMissingException:
return None
def _RegenSimilarFilesMetadata( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
if mime not in HC.MIMES_WE_CAN_PHASH:
self._controller.WriteSynchronous( 'file_maintenance_add_jobs_hashes', { hash }, REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP )
return None
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
except HydrusExceptions.FileMissingException:
return None
phashes = ClientImageHandling.GenerateShapePerceptualHashes( path, mime )
return phashes
def _RegenFileThumbnailForce( self, media_result ):
mime = media_result.GetMime()
if mime not in HC.MIMES_WITH_THUMBNAILS:
return
try:
self._controller.client_files_manager.RegenerateThumbnail( media_result )
except HydrusExceptions.FileMissingException:
pass
def _RegenFileThumbnailRefit( self, media_result ):
mime = media_result.GetMime()
if mime not in HC.MIMES_WITH_THUMBNAILS:
return
try:
was_regenerated = self._controller.client_files_manager.RegenerateThumbnailIfWrongSize( media_result )
return was_regenerated
except HydrusExceptions.FileMissingException:
pass
def _ReInitialiseWorkRules( self ):
file_maintenance_idle_throttle_files = self._controller.new_options.GetInteger( 'file_maintenance_idle_throttle_files' )
file_maintenance_idle_throttle_time_delta = self._controller.new_options.GetInteger( 'file_maintenance_idle_throttle_time_delta' )
self._idle_work_rules = HydrusNetworking.BandwidthRules()
self._idle_work_rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, file_maintenance_idle_throttle_time_delta, file_maintenance_idle_throttle_files * NORMALISED_BIG_JOB_WEIGHT )
file_maintenance_active_throttle_files = self._controller.new_options.GetInteger( 'file_maintenance_active_throttle_files' )
file_maintenance_active_throttle_time_delta = self._controller.new_options.GetInteger( 'file_maintenance_active_throttle_time_delta' )
self._active_work_rules = HydrusNetworking.BandwidthRules()
self._active_work_rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, file_maintenance_active_throttle_time_delta, file_maintenance_active_throttle_files * NORMALISED_BIG_JOB_WEIGHT )
def _RunJob( self, media_results, job_type, job_key ):
num_bad_files = 0
num_thumb_refits = 0
next_gc_collect = HydrusData.GetNow() + 10
try:
cleared_jobs = []
num_to_do = len( media_results )
if HG.file_report_mode:
HydrusData.ShowText( 'file maintenance: {} for {} files'.format( regen_file_enum_to_str_lookup[ job_type ], HydrusData.ToHumanInt( num_to_do ) ) )
for ( i, media_result ) in enumerate( media_results ):
hash = media_result.GetHash()
if job_key.IsCancelled():
return
status_text = '{}: {}'.format( regen_file_enum_to_str_lookup[ job_type ], HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
job_key.SetVariable( 'popup_text_1', status_text )
job_key.SetVariable( 'popup_gauge_1', ( i + 1, num_to_do ) )
additional_data = None
try:
if job_type == REGENERATE_FILE_DATA_JOB_FILE_METADATA:
additional_data = self._RegenFileMetadata( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP:
additional_data = self._RegenFileModifiedTimestamp( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_OTHER_HASHES:
additional_data = self._RegenFileOtherHashes( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL:
self._RegenFileThumbnailForce( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL:
was_regenerated = self._RegenFileThumbnailRefit( media_result )
if was_regenerated:
num_thumb_refits += 1
job_key.SetVariable( 'popup_text_2', 'thumbs needing regen: {}'.format( HydrusData.ToHumanInt( num_thumb_refits ) ) )
elif job_type == REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES:
self._DeleteNeighbourDupes( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP:
additional_data = self._CheckSimilarFilesMembership( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA:
additional_data = self._RegenSimilarFilesMetadata( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS:
self._FixFilePermissions( media_result )
elif job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ):
file_was_bad = self._CheckFileIntegrity( media_result, job_type )
if file_was_bad:
num_bad_files += 1
job_key.SetVariable( 'popup_text_2', 'missing or invalid files: {}'.format( HydrusData.ToHumanInt( num_bad_files ) ) )
except Exception as e:
HydrusData.PrintException( e )
message = 'There was a problem performing maintenance task {} on file {}! The job will not be reattempted. A full traceback of this error should be written to the log.'.format( regen_file_enum_to_str_lookup[ job_type ], hash.hex() )
message += os.linesep * 2
message += str( e )
HydrusData.ShowText( message )
finally:
self._work_tracker.ReportRequestUsed( num_requests = regen_file_enum_to_job_weight_lookup[ job_type ] )
cleared_jobs.append( ( hash, job_type, additional_data ) )
self._jobs_since_last_gc_collect += 1
if self._jobs_since_last_gc_collect > 100:
gc.collect()
self._jobs_since_last_gc_collect = 0
if len( cleared_jobs ) > 100:
self._controller.WriteSynchronous( 'file_maintenance_clear_jobs', cleared_jobs )
cleared_jobs = []
finally:
if len( cleared_jobs ) > 0:
self._controller.Write( 'file_maintenance_clear_jobs', cleared_jobs )
def CancelJobs( self, job_type ):
with self._lock:
self._controller.WriteSynchronous( 'file_maintenance_cancel_jobs', job_type )
self._reset_background_event.set()
def ClearJobs( self, hashes, job_type ):
with self._lock:
self._ClearJobs( hashes, job_type )
self._reset_background_event.set()
def ForceMaintenance( self, mandated_job_types = None ):
self._reset_background_event.set()
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'regenerating file data' )
message_pubbed = False
work_done = False
with self._maintenance_lock:
try:
while True:
job = self._controller.Read( 'file_maintenance_get_job', mandated_job_types )
if job is None:
break
work_done = True
if not message_pubbed:
self._controller.pub( 'message', job_key )
message_pubbed = True
if job_key.IsCancelled():
return
( hashes, job_type ) = job
media_results = self._controller.Read( 'media_results', hashes )
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
missing_hashes = [ hash for hash in hashes if hash not in hashes_to_media_results ]
self._ClearJobs( missing_hashes, job_type )
with self._lock:
self._RunJob( media_results, job_type, job_key )
time.sleep( 0.0001 )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.Finish()
job_key.Delete( 5 )
if not work_done:
HydrusData.ShowText( 'No file maintenance due!' )
self._controller.pub( 'notify_files_maintenance_done' )
def MainLoopBackgroundWork( self ):
def check_shutdown():
if HydrusThreading.IsThreadShuttingDown() or self._shutdown:
raise HydrusExceptions.ShutdownException()
def wait_on_maintenance():
while True:
check_shutdown()
if self._AbleToDoBackgroundMaintenance() or self._reset_background_event.is_set():
break
time.sleep( 1 )
def should_reset():
if self._reset_background_event.is_set():
self._reset_background_event.clear()
return True
else:
return False
try:
time_to_start = HydrusData.GetNow() + 15
while not HydrusData.TimeHasPassed( time_to_start ):
check_shutdown()
time.sleep( 1 )
while True:
check_shutdown()
did_work = False
with self._maintenance_lock:
job = self._controller.Read( 'file_maintenance_get_job' )
if job is not None:
did_work = True
job_key = ClientThreading.JobKey()
i = 0
try:
( hashes, job_type ) = job
media_results = self._controller.Read( 'media_results', hashes )
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
missing_hashes = [ hash for hash in hashes if hash not in hashes_to_media_results ]
self._ClearJobs( missing_hashes, job_type )
for media_result in media_results:
wait_on_maintenance()
if should_reset():
break
with self._lock:
self._RunJob( ( media_result, ), job_type, job_key )
time.sleep( 0.0001 )
i += 1
if i % 100 == 0:
self._controller.pub( 'notify_files_maintenance_done' )
finally:
self._controller.pub( 'notify_files_maintenance_done' )
if not did_work:
self._wake_background_event.wait( 600 )
self._wake_background_event.clear()
time.sleep( 2 )
except HydrusExceptions.ShutdownException:
pass
def NotifyNewOptions( self ):
with self._lock:
self._ReInitialiseWorkRules()
def RunJobImmediately( self, media_results, job_type, pub_job_key = True ):
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'regenerating file data' )
if pub_job_key:
self._controller.pub( 'message', job_key )
self._reset_background_event.set()
with self._lock:
try:
self._RunJob( media_results, job_type, job_key )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.Finish()
job_key.Delete( 5 )
self._controller.pub( 'notify_files_maintenance_done' )
def ScheduleJob( self, hashes, job_type, time_can_start = 0 ):
with self._lock:
self._controller.Write( 'file_maintenance_add_jobs_hashes', hashes, job_type, time_can_start )
self._wake_background_event.set()
def ScheduleJobHashIds( self, hash_ids, job_type, time_can_start = 0 ):
with self._lock:
self._controller.Write( 'file_maintenance_add_jobs', hash_ids, job_type, time_can_start )
self._wake_background_event.set()
def Shutdown( self ):
self._shutdown = True
self._wake_background_event.set()
def Start( self ):
self._controller.CallToThreadLongRunning( self.MainLoopBackgroundWork )
| 38.196879 | 708 | 0.524768 | from . import ClientConstants as CC
from . import ClientImageHandling
from . import ClientPaths
from . import ClientThreading
import collections
import gc
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusExceptions
from . import HydrusFileHandling
from . import HydrusGlobals as HG
from . import HydrusImageHandling
from . import HydrusNetworking
from . import HydrusPaths
from . import HydrusThreading
import os
import random
import threading
import time
from qtpy import QtWidgets as QW
from . import QtPorting as QP
REGENERATE_FILE_DATA_JOB_FILE_METADATA = 0
REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL = 1
REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL = 2
REGENERATE_FILE_DATA_JOB_OTHER_HASHES = 3
REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES = 4
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE = 5
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA = 6
REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS = 7
REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP = 8
REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA = 9
REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP = 10
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL = 11
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL = 12
REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE = 13
regen_file_enum_to_str_lookup = {}
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_METADATA ] = 'regenerate file metadata'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL ] = 'regenerate thumbnail'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ] = 'regenerate thumbnail if incorrect size'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_OTHER_HASHES ] = 'regenerate non-standard hashes'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ] = 'delete duplicate neighbours with incorrect file extension'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ] = 'if file is missing, remove record'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ] = 'if file is missing and has url, try to redownload'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA ] = 'if file is missing/incorrect, move file out and remove record'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ] = 'if file is missing/incorrect and has url, move file out and try to redownload'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ] = 'if file is incorrect, move file out'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS ] = 'fix file read/write permissions'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ] = 'check for membership in the similar files search system'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA ] = 'regenerate similar files metadata'
regen_file_enum_to_str_lookup[ REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP ] = 'regenerate file modified date'
regen_file_enum_to_description_lookup = {}
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_METADATA ] = 'This regenerates file metadata like resolution and duration, or even filetype (such as mkv->webm), which may have been misparsed in a previous version.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL ] = 'This forces a complete regeneration of the thumbnail from the source file.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ] = 'This looks for the existing thumbnail, and if it is not the correct resolution or is missing, will regenerate a new one for the source file.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_OTHER_HASHES ] = 'This regenerates hydrus\'s store of md5, sha1, and sha512 supplementary hashes, which it can use for various external (usually website) lookups.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ] = 'Sometimes, a file metadata regeneration will mean a new filetype and thus a new file extension. If the existing, incorrectly named file is in use, it must be copied rather than renamed, and so there is a spare duplicate left over after the operation. This jobs cleans up the duplicate at a later time.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ] = 'This checks to see if the file is present in the file system as expected. If it is not, the internal file record in the database is removed, just as if the file were deleted. Use this if you have manually deleted or otherwise lost a number of files from your file structure and need hydrus to re-sync with what it actually has. Missing files will have their known URLs exported to your database directory.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ] = 'This checks to see if the file is present in the file system as expected. If it is not, and it has known post/file urls, the URLs will be automatically added to a new URL downloader. Missing files will also have their known URLs exported to your database directory.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA ] = 'This does the same check as the \'file is missing\' job, and if the file is where it is expected, it ensures its file content, byte-for-byte, is correct. This is a heavy job, so be wary. If the file is incorrect, it will be exported to your database directory along with their known URLs, and the file record deleted.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ] = 'This does the same check as the \'file is missing\' job, and if the file is where it is expected, it ensures its file content, byte-for-byte, is correct. This is a heavy job, so be wary. If the file is incorrect _and_ is has known post/file urls, the URLs will be automatically added to a new URL downloader. Incorrect files will also have their known URLs exported to your database directory.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ] = 'If the file is where it is expected, this ensures its file content, byte-for-byte, is correct. This is a heavy job, so be wary. If the file is incorrect, it will be exported to your database directory along with its known URLs. The client\'s file record will not be deleted. This is useful if you have a valid backup and need to clear out invalid files from your live db so you can fill in gaps from your backup with a program like FreeFileSync.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS ] = 'This ensures that files in the file system are readable and writeable. For Linux/macOS users, it specifically sets 644. If you wish to run this job on Linux/macOS, ensure you are first the file owner of all your files.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ] = 'This checks to see if files should be in the similar files system, and if they are falsely in or falsely out, it will remove their record or queue them up for a search as appropriate. It is useful to repair database damage.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA ] = 'This forces a regeneration of the file\'s similar-files \'phashes\'. It is not useful unless you know there is missing data to repair.'
regen_file_enum_to_description_lookup[ REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP ] = 'This rechecks the file\'s modified timestamp and saves it to the database.'
NORMALISED_BIG_JOB_WEIGHT = 100
regen_file_enum_to_job_weight_lookup = {}
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_METADATA ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL ] = 50
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ] = 25
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_OTHER_HASHES ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ] = 25
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ] = 5
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ] = 50
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS ] = 25
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ] = 50
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA ] = 100
regen_file_enum_to_job_weight_lookup[ REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP ] = 10
regen_file_enum_to_overruled_jobs = {}
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_METADATA ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL ] = [ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ]
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_OTHER_HASHES ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA ] = [ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE ]
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ] = [ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL ]
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ] = []
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA ] = [ REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP ]
regen_file_enum_to_overruled_jobs[ REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP ] = []
ALL_REGEN_JOBS_IN_PREFERRED_ORDER = [ REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE, REGENERATE_FILE_DATA_JOB_FILE_METADATA, REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL, REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL, REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA, REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP, REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS, REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP, REGENERATE_FILE_DATA_JOB_OTHER_HASHES, REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES ]
def GetAllFilePaths( raw_paths, do_human_sort = True ):
file_paths = []
paths_to_process = list( raw_paths )
while len( paths_to_process ) > 0:
next_paths_to_process = []
for path in paths_to_process:
if HG.view_shutdown:
raise HydrusExceptions.ShutdownException()
if os.path.isdir( path ):
subpaths = [ os.path.join( path, filename ) for filename in os.listdir( path ) ]
next_paths_to_process.extend( subpaths )
else:
file_paths.append( path )
paths_to_process = next_paths_to_process
if do_human_sort:
HydrusData.HumanTextSort( file_paths )
return file_paths
class ClientFilesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._rwlock = ClientThreading.FileRWLock()
self._prefixes_to_locations = {}
self._bad_error_occurred = False
self._missing_locations = set()
self._Reinit()
def _AddFile( self, hash, mime, source_path ):
dest_path = self._GenerateExpectedFilePath( hash, mime )
if HG.file_report_mode or HG.file_import_report_mode:
HydrusData.ShowText( 'Adding file to client file structure: from {} to {}'.format( source_path, dest_path ) )
successful = HydrusPaths.MirrorFile( source_path, dest_path )
if not successful:
raise Exception( 'There was a problem copying the file from ' + source_path + ' to ' + dest_path + '!' )
def _AddThumbnailFromBytes( self, hash, thumbnail_bytes, silent = False ):
dest_path = self._GenerateExpectedThumbnailPath( hash )
if HG.file_report_mode:
HydrusData.ShowText( 'Adding thumbnail: ' + str( ( len( thumbnail_bytes ), dest_path ) ) )
try:
HydrusPaths.MakeFileWritable( dest_path )
with open( dest_path, 'wb' ) as f:
f.write( thumbnail_bytes )
except Exception as e:
hash_encoded = hash.hex()
prefix = 't' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
thumb_dir = os.path.join( location, prefix )
if not os.path.exists( thumb_dir ):
raise HydrusExceptions.DirectoryMissingException( 'The directory {} was not found! Reconnect the missing location or shut down the client immediately!'.format( thumb_dir ) )
raise HydrusExceptions.FileMissingException( 'The thumbnail for file "{}" failed to write to path "{}". This event suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.'.format( hash.hex(), dest_path ) )
if not silent:
self._controller.pub( 'clear_thumbnails', { hash } )
self._controller.pub( 'new_thumbnails', { hash } )
def _AttemptToHealMissingLocations( self ):
correct_rows = []
some_are_unhealable = False
fixes_counter = collections.Counter()
known_locations = set()
known_locations.update( self._prefixes_to_locations.values() )
( locations_to_ideal_weights, thumbnail_override ) = self._controller.Read( 'ideal_client_files_locations' )
known_locations.update( locations_to_ideal_weights.keys() )
if thumbnail_override is not None:
known_locations.add( thumbnail_override )
for ( missing_location, prefix ) in self._missing_locations:
potential_correct_locations = []
for known_location in known_locations:
if known_location == missing_location:
continue
dir_path = os.path.join( known_location, prefix )
if os.path.exists( dir_path ) and os.path.isdir( dir_path ):
potential_correct_locations.append( known_location )
if len( potential_correct_locations ) == 1:
correct_location = potential_correct_locations[0]
correct_rows.append( ( missing_location, prefix, correct_location ) )
fixes_counter[ ( missing_location, correct_location ) ] += 1
else:
some_are_unhealable = True
if len( correct_rows ) > 0 and some_are_unhealable:
message = 'Hydrus found multiple missing locations in your file storage. Some of these locations seemed to be fixable, others did not. The client will now inform you about both problems.'
self._controller.SafeShowCriticalMessage( 'Multiple file location problems.', message )
if len( correct_rows ) > 0:
summaries = [ '{} moved from {} to {}'.format( HydrusData.ToHumanInt( count ), missing_location, correct_location ) for ( ( missing_location, correct_location ), count ) in fixes_counter.items() ]
summaries.sort()
summary_message = 'Some client file folders were missing, but they seem to be in other known locations! The folders are:'
summary_message += os.linesep * 2
summary_message += os.linesep.join( summaries )
summary_message += os.linesep * 2
summary_message += 'Assuming you did this on purpose, Hydrus is ready to update its internal knowledge to reflect these new mappings as soon as this dialog closes. If you know these proposed fixes are incorrect, terminate the program now.'
HydrusData.Print( summary_message )
self._controller.SafeShowCriticalMessage( 'About to auto-heal client file folders.', summary_message )
HG.client_controller.WriteSynchronous( 'repair_client_files', correct_rows )
def _ChangeFileExt( self, hash, old_mime, mime ):
old_path = self._GenerateExpectedFilePath( hash, old_mime )
new_path = self._GenerateExpectedFilePath( hash, mime )
if old_path == new_path:
return
if HG.file_report_mode:
HydrusData.ShowText( 'Changing file ext: ' + str( ( old_path, new_path ) ) )
if HydrusPaths.PathIsFree( old_path ):
try:
HydrusPaths.MergeFile( old_path, new_path )
needed_to_copy_file = False
except:
HydrusPaths.MirrorFile( old_path, new_path )
needed_to_copy_file = True
else:
HydrusPaths.MirrorFile( old_path, new_path )
needed_to_copy_file = True
return needed_to_copy_file
def _GenerateExpectedFilePath( self, hash, mime ):
self._WaitOnWakeup()
hash_encoded = hash.hex()
prefix = 'f' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded + HC.mime_ext_lookup[ mime ] )
return path
def _GenerateExpectedThumbnailPath( self, hash ):
self._WaitOnWakeup()
hash_encoded = hash.hex()
prefix = 't' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded ) + '.thumbnail'
return path
def _GenerateThumbnailBytes( self, file_path, media ):
hash = media.GetHash()
mime = media.GetMime()
( width, height ) = media.GetResolution()
duration = media.GetDuration()
num_frames = media.GetNumFrames()
bounding_dimensions = HG.client_controller.options[ 'thumbnail_dimensions' ]
target_resolution = HydrusImageHandling.GetThumbnailResolution( ( width, height ), bounding_dimensions )
percentage_in = self._controller.new_options.GetInteger( 'video_thumbnail_percentage_in' )
try:
thumbnail_bytes = HydrusFileHandling.GenerateThumbnailBytes( file_path, target_resolution, mime, duration, num_frames, percentage_in = percentage_in )
except Exception as e:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.hex() + ' could not be regenerated from the original file for the above reason. This event could indicate hard drive corruption. Please check everything is ok.' )
return thumbnail_bytes
def _GetRecoverTuple( self ):
all_locations = { location for location in list(self._prefixes_to_locations.values()) }
all_prefixes = list(self._prefixes_to_locations.keys())
for possible_location in all_locations:
for prefix in all_prefixes:
correct_location = self._prefixes_to_locations[ prefix ]
if possible_location != correct_location and os.path.exists( os.path.join( possible_location, prefix ) ):
recoverable_location = possible_location
return ( prefix, recoverable_location, correct_location )
return None
def _GetRebalanceTuple( self ):
( locations_to_ideal_weights, thumbnail_override ) = self._controller.Read( 'ideal_client_files_locations' )
total_weight = sum( locations_to_ideal_weights.values() )
ideal_locations_to_normalised_weights = { location : weight / total_weight for ( location, weight ) in list(locations_to_ideal_weights.items()) }
current_locations_to_normalised_weights = collections.defaultdict( lambda: 0 )
file_prefixes = [ prefix for prefix in self._prefixes_to_locations if prefix.startswith( 'f' ) ]
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
current_locations_to_normalised_weights[ location ] += 1.0 / 256
for location in list(current_locations_to_normalised_weights.keys()):
if location not in ideal_locations_to_normalised_weights:
ideal_locations_to_normalised_weights[ location ] = 0.0
overweight_locations = []
underweight_locations = []
for ( location, ideal_weight ) in list(ideal_locations_to_normalised_weights.items()):
if location in current_locations_to_normalised_weights:
current_weight = current_locations_to_normalised_weights[ location ]
if current_weight < ideal_weight:
underweight_locations.append( location )
elif current_weight >= ideal_weight + 1.0 / 256:
overweight_locations.append( location )
else:
underweight_locations.append( location )
if len( underweight_locations ) > 0 and len( overweight_locations ) > 0:
overweight_location = overweight_locations.pop( 0 )
underweight_location = underweight_locations.pop( 0 )
random.shuffle( file_prefixes )
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
if location == overweight_location:
return ( file_prefix, overweight_location, underweight_location )
else:
for hex_prefix in HydrusData.IterateHexPrefixes():
thumbnail_prefix = 't' + hex_prefix
if thumbnail_override is None:
file_prefix = 'f' + hex_prefix
correct_location = self._prefixes_to_locations[ file_prefix ]
else:
correct_location = thumbnail_override
current_thumbnails_location = self._prefixes_to_locations[ thumbnail_prefix ]
if current_thumbnails_location != correct_location:
return ( thumbnail_prefix, current_thumbnails_location, correct_location )
return None
def _IterateAllFilePaths( self ):
for ( prefix, location ) in list(self._prefixes_to_locations.items()):
if prefix.startswith( 'f' ):
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
yield os.path.join( dir, filename )
def _IterateAllThumbnailPaths( self ):
for ( prefix, location ) in list(self._prefixes_to_locations.items()):
if prefix.startswith( 't' ):
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
yield os.path.join( dir, filename )
def _LookForFilePath( self, hash ):
for potential_mime in HC.ALLOWED_MIMES:
potential_path = self._GenerateExpectedFilePath( hash, potential_mime )
if os.path.exists( potential_path ):
return ( potential_path, potential_mime )
hash_encoded = hash.hex()
prefix = 'f' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
subdir = os.path.join( location, prefix )
if not os.path.exists( subdir ):
raise HydrusExceptions.DirectoryMissingException( 'The directory {} was not found! Reconnect the missing location or shut down the client immediately!'.format( subdir ) )
raise HydrusExceptions.FileMissingException( 'File for ' + hash.hex() + ' not found!' )
def _Reinit( self ):
self._prefixes_to_locations = self._controller.Read( 'client_files_locations' )
if HG.client_controller.IsFirstStart():
try:
for ( prefix, location ) in list( self._prefixes_to_locations.items() ):
HydrusPaths.MakeSureDirectoryExists( location )
subdir = os.path.join( location, prefix )
HydrusPaths.MakeSureDirectoryExists( subdir )
except:
text = 'Attempting to create the database\'s client_files folder structure in {} failed!'.format( location )
self._controller.SafeShowCriticalMessage( 'unable to create file structure', text )
raise
else:
self._ReinitMissingLocations()
if len( self._missing_locations ) > 0:
self._AttemptToHealMissingLocations()
self._prefixes_to_locations = self._controller.Read( 'client_files_locations' )
self._ReinitMissingLocations()
if len( self._missing_locations ) > 0:
self._bad_error_occurred = True
#
missing_dict = HydrusData.BuildKeyToListDict( self._missing_locations )
missing_locations = list( missing_dict.keys() )
missing_locations.sort()
missing_string = ''
for missing_location in missing_locations:
missing_prefixes = list( missing_dict[ missing_location ] )
missing_prefixes.sort()
missing_prefixes_string = ' ' + os.linesep.join( ( ', '.join( block ) for block in HydrusData.SplitListIntoChunks( missing_prefixes, 32 ) ) )
missing_string += os.linesep
missing_string += missing_location
missing_string += os.linesep
missing_string += missing_prefixes_string
#
if len( self._missing_locations ) > 4:
text = 'When initialising the client files manager, some file locations did not exist! They have all been written to the log!'
text += os.linesep * 2
text += 'If this is happening on client boot, you should now be presented with a dialog to correct this manually!'
self._controller.SafeShowCriticalMessage( 'missing locations', text )
HydrusData.DebugPrint( 'Missing locations follow:' )
HydrusData.DebugPrint( missing_string )
else:
text = 'When initialising the client files manager, these file locations did not exist:'
text += os.linesep * 2
text += missing_string
text += os.linesep * 2
text += 'If this is happening on client boot, you should now be presented with a dialog to correct this manually!'
self._controller.SafeShowCriticalMessage( 'missing locations', text )
def _ReinitMissingLocations( self ):
self._missing_locations = set()
for ( prefix, location ) in list(self._prefixes_to_locations.items()):
if os.path.exists( location ):
subdir = os.path.join( location, prefix )
if not os.path.exists( subdir ):
self._missing_locations.add( ( location, prefix ) )
else:
self._missing_locations.add( ( location, prefix ) )
def _WaitOnWakeup( self ):
if HG.client_controller.new_options.GetBoolean( 'file_system_waits_on_wakeup' ):
while HG.client_controller.JustWokeFromSleep():
HydrusThreading.CheckIfThreadShuttingDown()
time.sleep( 1.0 )
def AllLocationsAreDefault( self ):
with self._rwlock.read:
db_dir = self._controller.GetDBDir()
client_files_default = os.path.join( db_dir, 'client_files' )
all_locations = set( self._prefixes_to_locations.values() )
return False not in ( location.startswith( client_files_default ) for location in all_locations )
def LocklessAddFileFromBytes( self, hash, mime, file_bytes ):
dest_path = self._GenerateExpectedFilePath( hash, mime )
if HG.file_report_mode:
HydrusData.ShowText( 'Adding file from string: ' + str( ( len( file_bytes ), dest_path ) ) )
HydrusPaths.MakeFileWritable( dest_path )
with open( dest_path, 'wb' ) as f:
f.write( file_bytes )
def AddFile( self, hash, mime, source_path, thumbnail_bytes = None ):
with self._rwlock.write:
self._AddFile( hash, mime, source_path )
if thumbnail_bytes is not None:
self._AddThumbnailFromBytes( hash, thumbnail_bytes )
def AddThumbnailFromBytes( self, hash, thumbnail_bytes, silent = False ):
with self._rwlock.write:
self._AddThumbnailFromBytes( hash, thumbnail_bytes, silent = silent )
def ChangeFileExt( self, hash, old_mime, mime ):
with self._rwlock.write:
return self._ChangeFileExt( hash, old_mime, mime )
def ClearOrphans( self, move_location = None ):
with self._rwlock.write:
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'clearing orphans' )
job_key.SetVariable( 'popup_text_1', 'preparing' )
self._controller.pub( 'message', job_key )
orphan_paths = []
orphan_thumbnails = []
for ( i, path ) in enumerate( self._IterateAllFilePaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ToHumanInt( i ) + ' files, found ' + HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = bytes.fromhex( should_be_a_hex_hash )
is_an_orphan = HG.client_controller.Read( 'is_an_orphan', 'file', hash )
except:
is_an_orphan = True
if is_an_orphan:
if move_location is not None:
( source_dir, filename ) = os.path.split( path )
dest = os.path.join( move_location, filename )
dest = HydrusPaths.AppendPathUntilNoConflicts( dest )
HydrusData.Print( 'Moving the orphan ' + path + ' to ' + dest )
HydrusPaths.MergeFile( path, dest )
orphan_paths.append( path )
time.sleep( 2 )
for ( i, path ) in enumerate( self._IterateAllThumbnailPaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ToHumanInt( i ) + ' thumbnails, found ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = bytes.fromhex( should_be_a_hex_hash )
is_an_orphan = HG.client_controller.Read( 'is_an_orphan', 'thumbnail', hash )
except:
is_an_orphan = True
if is_an_orphan:
orphan_thumbnails.append( path )
time.sleep( 2 )
if move_location is None and len( orphan_paths ) > 0:
status = 'found ' + HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphans, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for path in orphan_paths:
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
HydrusData.Print( 'Deleting the orphan ' + path )
status = 'deleting orphan files: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_paths ) )
job_key.SetVariable( 'popup_text_1', status )
ClientPaths.DeletePath( path )
if len( orphan_thumbnails ) > 0:
status = 'found ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphan thumbnails, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for ( i, path ) in enumerate( orphan_thumbnails ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
status = 'deleting orphan thumbnails: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_thumbnails ) )
job_key.SetVariable( 'popup_text_1', status )
HydrusData.Print( 'Deleting the orphan ' + path )
ClientPaths.DeletePath( path, always_delete_fully = True )
if len( orphan_paths ) == 0 and len( orphan_thumbnails ) == 0:
final_text = 'no orphans found!'
else:
final_text = HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphan files and ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphan thumbnails cleared!'
job_key.SetVariable( 'popup_text_1', final_text )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
def DelayedDeleteFiles( self, hashes ):
if HG.file_report_mode:
HydrusData.ShowText( 'Delayed delete files call: ' + str( len( hashes ) ) )
time.sleep( 2 )
big_pauser = HydrusData.BigJobPauser( period = 1 )
for hashes_chunk in HydrusData.SplitIteratorIntoChunks( hashes, 10 ):
with self._rwlock.write:
for hash in hashes_chunk:
try:
( path, mime ) = self._LookForFilePath( hash )
except HydrusExceptions.FileMissingException:
continue
ClientPaths.DeletePath( path )
big_pauser.Pause()
def DelayedDeleteThumbnails( self, hashes ):
if HG.file_report_mode:
HydrusData.ShowText( 'Delayed delete thumbs call: ' + str( len( hashes ) ) )
time.sleep( 2 )
big_pauser = HydrusData.BigJobPauser( period = 1 )
for hashes_chunk in HydrusData.SplitIteratorIntoChunks( hashes, 20 ):
with self._rwlock.write:
for hash in hashes_chunk:
path = self._GenerateExpectedThumbnailPath( hash )
ClientPaths.DeletePath( path, always_delete_fully = True )
big_pauser.Pause()
def DeleteNeighbourDupes( self, hash, true_mime ):
with self._rwlock.write:
correct_path = self._GenerateExpectedFilePath( hash, true_mime )
if not os.path.exists( correct_path ):
return # misfire, let's not actually delete the right one
for mime in HC.ALLOWED_MIMES:
if mime == true_mime:
continue
incorrect_path = self._GenerateExpectedFilePath( hash, mime )
if incorrect_path == correct_path:
continue
if os.path.exists( incorrect_path ):
HydrusPaths.DeletePath( incorrect_path )
def GetCurrentFileLocations( self ):
with self._rwlock.read:
locations = set()
for ( prefix, location ) in self._prefixes_to_locations.items():
if prefix.startswith( 'f' ):
locations.add( location )
return locations
def GetFilePath( self, hash, mime = None, check_file_exists = True ):
with self._rwlock.read:
return self.LocklessGetFilePath( hash, mime = mime, check_file_exists = check_file_exists )
def GetMissing( self ):
return self._missing_locations
def LocklessGetFilePath( self, hash, mime = None, check_file_exists = True ):
if HG.file_report_mode:
HydrusData.ShowText( 'File path request: ' + str( ( hash, mime ) ) )
if mime is None:
( path, mime ) = self._LookForFilePath( hash )
else:
path = self._GenerateExpectedFilePath( hash, mime )
if check_file_exists and not os.path.exists( path ):
try:
( actual_path, old_mime ) = self._LookForFilePath( hash )
except HydrusExceptions.FileMissingException:
raise HydrusExceptions.FileMissingException( 'No file found at path {}!'.format( path ) )
self._ChangeFileExt( hash, old_mime, mime )
# we have now fixed the path, it is good to return
return path
def GetThumbnailPath( self, media ):
hash = media.GetHash()
mime = media.GetMime()
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail path request: ' + str( ( hash, mime ) ) )
with self._rwlock.read:
path = self._GenerateExpectedThumbnailPath( hash )
thumb_missing = not os.path.exists( path )
if thumb_missing:
self.RegenerateThumbnail( media )
return path
def LocklessHasThumbnail( self, hash ):
path = self._GenerateExpectedThumbnailPath( hash )
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail path test: ' + path )
return os.path.exists( path )
def Rebalance( self, job_key ):
try:
if self._bad_error_occurred:
QW.QMessageBox.warning( None, 'Warning', 'A serious file error has previously occurred during this session, so further file moving will not be reattempted. Please restart the client before trying again.' )
return
with self._rwlock.write:
rebalance_tuple = self._GetRebalanceTuple()
while rebalance_tuple is not None:
if job_key.IsCancelled():
break
( prefix, overweight_location, underweight_location ) = rebalance_tuple
text = 'Moving \'' + prefix + '\' from ' + overweight_location + ' to ' + underweight_location
HydrusData.Print( text )
job_key.SetVariable( 'popup_text_1', text )
# these two lines can cause a deadlock because the db sometimes calls stuff in here.
self._controller.Write( 'relocate_client_files', prefix, overweight_location, underweight_location )
self._Reinit()
rebalance_tuple = self._GetRebalanceTuple()
recover_tuple = self._GetRecoverTuple()
while recover_tuple is not None:
if job_key.IsCancelled():
break
( prefix, recoverable_location, correct_location ) = recover_tuple
text = 'Recovering \'' + prefix + '\' from ' + recoverable_location + ' to ' + correct_location
HydrusData.Print( text )
job_key.SetVariable( 'popup_text_1', text )
recoverable_path = os.path.join( recoverable_location, prefix )
correct_path = os.path.join( correct_location, prefix )
HydrusPaths.MergeTree( recoverable_path, correct_path )
recover_tuple = self._GetRecoverTuple()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete()
def RebalanceWorkToDo( self ):
with self._rwlock.read:
return self._GetRebalanceTuple() is not None
def RegenerateThumbnail( self, media ):
hash = media.GetHash()
mime = media.GetMime()
if mime not in HC.MIMES_WITH_THUMBNAILS:
return
with self._rwlock.read:
file_path = self._GenerateExpectedFilePath( hash, mime )
if not os.path.exists( file_path ):
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.hex() + ' could not be regenerated from the original file because the original file is missing! This event could indicate hard drive corruption. Please check everything is ok.')
thumbnail_bytes = self._GenerateThumbnailBytes( file_path, media )
with self._rwlock.write:
self._AddThumbnailFromBytes( hash, thumbnail_bytes )
def RegenerateThumbnailIfWrongSize( self, media ):
do_it = False
try:
hash = media.GetHash()
mime = media.GetMime()
if mime not in HC.MIMES_WITH_THUMBNAILS:
return
( media_width, media_height ) = media.GetResolution()
path = self._GenerateExpectedThumbnailPath( hash )
numpy_image = ClientImageHandling.GenerateNumPyImage( path, mime )
( current_width, current_height ) = HydrusImageHandling.GetResolutionNumPy( numpy_image )
bounding_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
( expected_width, expected_height ) = HydrusImageHandling.GetThumbnailResolution( ( media_width, media_height ), bounding_dimensions )
if current_width != expected_width or current_height != expected_height:
do_it = True
except:
do_it = True
if do_it:
self.RegenerateThumbnail( media )
return do_it
class FilesMaintenanceManager( object ):
def __init__( self, controller ):
self._controller = controller
self._pubbed_message_about_bad_file_record_delete = False
self._pubbed_message_about_invalid_file_export = False
self._work_tracker = HydrusNetworking.BandwidthTracker()
self._idle_work_rules = HydrusNetworking.BandwidthRules()
self._active_work_rules = HydrusNetworking.BandwidthRules()
self._jobs_since_last_gc_collect = 0
self._ReInitialiseWorkRules()
self._maintenance_lock = threading.Lock()
self._lock = threading.Lock()
self._wake_background_event = threading.Event()
self._reset_background_event = threading.Event()
self._shutdown = False
self._controller.sub( self, 'NotifyNewOptions', 'notify_new_options' )
self._controller.sub( self, 'Shutdown', 'shutdown' )
def _AbleToDoBackgroundMaintenance( self ):
if HG.client_controller.CurrentlyIdle():
if not self._controller.new_options.GetBoolean( 'file_maintenance_during_idle' ):
return False
if not self._controller.GoodTimeToStartBackgroundWork():
return False
return self._idle_work_rules.CanStartRequest( self._work_tracker )
else:
if not self._controller.new_options.GetBoolean( 'file_maintenance_during_active' ):
return False
return self._active_work_rules.CanStartRequest( self._work_tracker )
def _CheckFileIntegrity( self, media_result, job_type ):
hash = media_result.GetHash()
mime = media_result.GetMime()
error_dir = os.path.join( self._controller.GetDBDir(), 'missing_and_invalid_files' )
file_is_missing = False
file_is_invalid = False
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
except HydrusExceptions.FileMissingException:
file_is_missing = True
HydrusData.DebugPrint( 'Missing file: {}!'.format( hash.hex() ) )
if not file_is_missing and job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ):
actual_hash = HydrusFileHandling.GetHashFromPath( path )
if hash != actual_hash:
file_is_invalid = True
HydrusData.DebugPrint( 'Invalid file: {} actually had hash {}!'.format( hash.hex(), actual_hash.hex() ) )
file_was_bad = file_is_missing or file_is_invalid
if file_was_bad:
urls = media_result.GetLocationsManager().GetURLs()
if len( urls ) > 0:
HydrusPaths.MakeSureDirectoryExists( error_dir )
with open( os.path.join( error_dir, hash.hex() + '.urls.txt' ), 'w', encoding = 'utf-8' ) as f:
for url in urls:
f.write( url )
f.write( os.linesep )
with open( os.path.join( error_dir, 'all_urls.txt' ), 'a', encoding = 'utf-8' ) as f:
for url in urls:
f.write( url )
f.write( os.linesep )
useful_urls = []
for url in urls:
add_it = False
url_class = HG.client_controller.network_engine.domain_manager.GetURLClass( url )
if url_class is None:
add_it = True
else:
if url_class.GetURLType() in ( HC.URL_TYPE_FILE, HC.URL_TYPE_POST ):
add_it = True
if add_it:
useful_urls.append( url )
delete_record = job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA )
try_redownload = job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL ) and len( useful_urls ) > 0
do_export = file_is_invalid and ( job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ) or ( job_type == REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL and try_redownload ) )
if do_export:
HydrusPaths.MakeSureDirectoryExists( error_dir )
dest_path = os.path.join( error_dir, os.path.basename( path ) )
HydrusPaths.MergeFile( path, dest_path )
if not self._pubbed_message_about_invalid_file_export:
self._pubbed_message_about_invalid_file_export = True
message = 'During file maintenance, a file was found to be invalid. It and any known URLs have been moved to "{}".'.format( error_dir )
message += os.linesep * 2
message += 'More files may be invalid, but this message will not appear again during this boot.'
HydrusData.ShowText( message )
if delete_record:
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, ( hash, ), reason = 'Record deleted during File Integrity check.' )
for service_key in [ CC.LOCAL_FILE_SERVICE_KEY, CC.LOCAL_UPDATE_SERVICE_KEY, CC.TRASH_SERVICE_KEY, CC.COMBINED_LOCAL_FILE_SERVICE_KEY ]:
service_keys_to_content_updates = { service_key : [ content_update ] }
self._controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ADVANCED, ( 'delete_deleted', ( hash, ) ) )
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ content_update ] }
self._controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
if not self._pubbed_message_about_bad_file_record_delete:
self._pubbed_message_about_bad_file_record_delete = True
message = 'During file maintenance, a file was found to be missing or invalid. Its file record has been removed from the database without leaving a deletion record (so it can be easily reimported). Any known URLs for the file have been written to "{}".'.format( error_dir )
message += os.linesep * 2
message += 'More file records may have been removed, but this message will not appear again during this boot.'
HydrusData.ShowText( message )
if try_redownload:
def qt_add_url( url ):
if QP.isValid( HG.client_controller.gui ):
HG.client_controller.gui.ImportURL( url, 'missing files redownloader' )
for url in useful_urls:
QP.CallAfter( qt_add_url, url )
return file_was_bad
def _CheckSimilarFilesMembership( self, media_result ):
mime = media_result.GetMime()
return mime in HC.MIMES_WE_CAN_PHASH
def _ClearJobs( self, hashes, job_type ):
if len( hashes ) > 0:
cleared_jobs = [ ( hash, job_type, None ) for hash in hashes ]
self._controller.WriteSynchronous( 'file_maintenance_clear_jobs', cleared_jobs )
def _DeleteNeighbourDupes( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
self._controller.client_files_manager.DeleteNeighbourDupes( hash, mime )
def _FixFilePermissions( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
HydrusPaths.MakeFileWritable( path )
except HydrusExceptions.FileMissingException:
return None
def _RegenFileMetadata( self, media_result ):
hash = media_result.GetHash()
original_mime = media_result.GetMime()
try:
path = self._controller.client_files_manager.GetFilePath( hash, original_mime )
( size, mime, width, height, duration, num_frames, has_audio, num_words ) = HydrusFileHandling.GetFileInfo( path, ok_to_look_for_hydrus_updates = True )
additional_data = ( size, mime, width, height, duration, num_frames, has_audio, num_words )
if mime != original_mime:
needed_to_dupe_the_file = self._controller.client_files_manager.ChangeFileExt( hash, original_mime, mime )
if needed_to_dupe_the_file:
self._controller.WriteSynchronous( 'file_maintenance_add_jobs_hashes', { hash }, REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES, HydrusData.GetNow() + ( 7 * 86400 ) )
return additional_data
except HydrusExceptions.MimeException:
self._CheckFileIntegrity( media_result, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL )
return None
except HydrusExceptions.FileMissingException:
return None
def _RegenFileModifiedTimestamp( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
if mime in HC.HYDRUS_UPDATE_FILES:
return None
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
file_modified_timestamp = HydrusFileHandling.GetFileModifiedTimestamp( path )
additional_data = file_modified_timestamp
return additional_data
except HydrusExceptions.FileMissingException:
return None
def _RegenFileOtherHashes( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
if mime in HC.HYDRUS_UPDATE_FILES:
return None
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
( md5, sha1, sha512 ) = HydrusFileHandling.GetExtraHashesFromPath( path )
additional_data = ( md5, sha1, sha512 )
return additional_data
except HydrusExceptions.FileMissingException:
return None
def _RegenSimilarFilesMetadata( self, media_result ):
hash = media_result.GetHash()
mime = media_result.GetMime()
if mime not in HC.MIMES_WE_CAN_PHASH:
self._controller.WriteSynchronous( 'file_maintenance_add_jobs_hashes', { hash }, REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP )
return None
try:
path = self._controller.client_files_manager.GetFilePath( hash, mime )
except HydrusExceptions.FileMissingException:
return None
phashes = ClientImageHandling.GenerateShapePerceptualHashes( path, mime )
return phashes
def _RegenFileThumbnailForce( self, media_result ):
mime = media_result.GetMime()
if mime not in HC.MIMES_WITH_THUMBNAILS:
return
try:
self._controller.client_files_manager.RegenerateThumbnail( media_result )
except HydrusExceptions.FileMissingException:
pass
def _RegenFileThumbnailRefit( self, media_result ):
mime = media_result.GetMime()
if mime not in HC.MIMES_WITH_THUMBNAILS:
return
try:
was_regenerated = self._controller.client_files_manager.RegenerateThumbnailIfWrongSize( media_result )
return was_regenerated
except HydrusExceptions.FileMissingException:
pass
def _ReInitialiseWorkRules( self ):
file_maintenance_idle_throttle_files = self._controller.new_options.GetInteger( 'file_maintenance_idle_throttle_files' )
file_maintenance_idle_throttle_time_delta = self._controller.new_options.GetInteger( 'file_maintenance_idle_throttle_time_delta' )
self._idle_work_rules = HydrusNetworking.BandwidthRules()
self._idle_work_rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, file_maintenance_idle_throttle_time_delta, file_maintenance_idle_throttle_files * NORMALISED_BIG_JOB_WEIGHT )
file_maintenance_active_throttle_files = self._controller.new_options.GetInteger( 'file_maintenance_active_throttle_files' )
file_maintenance_active_throttle_time_delta = self._controller.new_options.GetInteger( 'file_maintenance_active_throttle_time_delta' )
self._active_work_rules = HydrusNetworking.BandwidthRules()
self._active_work_rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, file_maintenance_active_throttle_time_delta, file_maintenance_active_throttle_files * NORMALISED_BIG_JOB_WEIGHT )
def _RunJob( self, media_results, job_type, job_key ):
num_bad_files = 0
num_thumb_refits = 0
next_gc_collect = HydrusData.GetNow() + 10
try:
cleared_jobs = []
num_to_do = len( media_results )
if HG.file_report_mode:
HydrusData.ShowText( 'file maintenance: {} for {} files'.format( regen_file_enum_to_str_lookup[ job_type ], HydrusData.ToHumanInt( num_to_do ) ) )
for ( i, media_result ) in enumerate( media_results ):
hash = media_result.GetHash()
if job_key.IsCancelled():
return
status_text = '{}: {}'.format( regen_file_enum_to_str_lookup[ job_type ], HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
job_key.SetVariable( 'popup_text_1', status_text )
job_key.SetVariable( 'popup_gauge_1', ( i + 1, num_to_do ) )
additional_data = None
try:
if job_type == REGENERATE_FILE_DATA_JOB_FILE_METADATA:
additional_data = self._RegenFileMetadata( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_FILE_MODIFIED_TIMESTAMP:
additional_data = self._RegenFileModifiedTimestamp( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_OTHER_HASHES:
additional_data = self._RegenFileOtherHashes( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL:
self._RegenFileThumbnailForce( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL:
was_regenerated = self._RegenFileThumbnailRefit( media_result )
if was_regenerated:
num_thumb_refits += 1
job_key.SetVariable( 'popup_text_2', 'thumbs needing regen: {}'.format( HydrusData.ToHumanInt( num_thumb_refits ) ) )
elif job_type == REGENERATE_FILE_DATA_JOB_DELETE_NEIGHBOUR_DUPES:
self._DeleteNeighbourDupes( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_CHECK_SIMILAR_FILES_MEMBERSHIP:
additional_data = self._CheckSimilarFilesMembership( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_SIMILAR_FILES_METADATA:
additional_data = self._RegenSimilarFilesMetadata( media_result )
elif job_type == REGENERATE_FILE_DATA_JOB_FIX_PERMISSIONS:
self._FixFilePermissions( media_result )
elif job_type in ( REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_PRESENCE_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_URL, REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA_SILENT_DELETE ):
file_was_bad = self._CheckFileIntegrity( media_result, job_type )
if file_was_bad:
num_bad_files += 1
job_key.SetVariable( 'popup_text_2', 'missing or invalid files: {}'.format( HydrusData.ToHumanInt( num_bad_files ) ) )
except Exception as e:
HydrusData.PrintException( e )
message = 'There was a problem performing maintenance task {} on file {}! The job will not be reattempted. A full traceback of this error should be written to the log.'.format( regen_file_enum_to_str_lookup[ job_type ], hash.hex() )
message += os.linesep * 2
message += str( e )
HydrusData.ShowText( message )
finally:
self._work_tracker.ReportRequestUsed( num_requests = regen_file_enum_to_job_weight_lookup[ job_type ] )
cleared_jobs.append( ( hash, job_type, additional_data ) )
self._jobs_since_last_gc_collect += 1
if self._jobs_since_last_gc_collect > 100:
gc.collect()
self._jobs_since_last_gc_collect = 0
if len( cleared_jobs ) > 100:
self._controller.WriteSynchronous( 'file_maintenance_clear_jobs', cleared_jobs )
cleared_jobs = []
finally:
if len( cleared_jobs ) > 0:
self._controller.Write( 'file_maintenance_clear_jobs', cleared_jobs )
def CancelJobs( self, job_type ):
with self._lock:
self._controller.WriteSynchronous( 'file_maintenance_cancel_jobs', job_type )
self._reset_background_event.set()
def ClearJobs( self, hashes, job_type ):
with self._lock:
self._ClearJobs( hashes, job_type )
self._reset_background_event.set()
def ForceMaintenance( self, mandated_job_types = None ):
self._reset_background_event.set()
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'regenerating file data' )
message_pubbed = False
work_done = False
with self._maintenance_lock:
try:
while True:
job = self._controller.Read( 'file_maintenance_get_job', mandated_job_types )
if job is None:
break
work_done = True
if not message_pubbed:
self._controller.pub( 'message', job_key )
message_pubbed = True
if job_key.IsCancelled():
return
( hashes, job_type ) = job
media_results = self._controller.Read( 'media_results', hashes )
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
missing_hashes = [ hash for hash in hashes if hash not in hashes_to_media_results ]
self._ClearJobs( missing_hashes, job_type )
with self._lock:
self._RunJob( media_results, job_type, job_key )
time.sleep( 0.0001 )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.Finish()
job_key.Delete( 5 )
if not work_done:
HydrusData.ShowText( 'No file maintenance due!' )
self._controller.pub( 'notify_files_maintenance_done' )
def MainLoopBackgroundWork( self ):
def check_shutdown():
if HydrusThreading.IsThreadShuttingDown() or self._shutdown:
raise HydrusExceptions.ShutdownException()
def wait_on_maintenance():
while True:
check_shutdown()
if self._AbleToDoBackgroundMaintenance() or self._reset_background_event.is_set():
break
time.sleep( 1 )
def should_reset():
if self._reset_background_event.is_set():
self._reset_background_event.clear()
return True
else:
return False
try:
time_to_start = HydrusData.GetNow() + 15
while not HydrusData.TimeHasPassed( time_to_start ):
check_shutdown()
time.sleep( 1 )
while True:
check_shutdown()
did_work = False
with self._maintenance_lock:
job = self._controller.Read( 'file_maintenance_get_job' )
if job is not None:
did_work = True
job_key = ClientThreading.JobKey()
i = 0
try:
( hashes, job_type ) = job
media_results = self._controller.Read( 'media_results', hashes )
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
missing_hashes = [ hash for hash in hashes if hash not in hashes_to_media_results ]
self._ClearJobs( missing_hashes, job_type )
for media_result in media_results:
wait_on_maintenance()
if should_reset():
break
with self._lock:
self._RunJob( ( media_result, ), job_type, job_key )
time.sleep( 0.0001 )
i += 1
if i % 100 == 0:
self._controller.pub( 'notify_files_maintenance_done' )
finally:
self._controller.pub( 'notify_files_maintenance_done' )
if not did_work:
self._wake_background_event.wait( 600 )
self._wake_background_event.clear()
time.sleep( 2 )
except HydrusExceptions.ShutdownException:
pass
def NotifyNewOptions( self ):
with self._lock:
self._ReInitialiseWorkRules()
def RunJobImmediately( self, media_results, job_type, pub_job_key = True ):
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'regenerating file data' )
if pub_job_key:
self._controller.pub( 'message', job_key )
self._reset_background_event.set()
with self._lock:
try:
self._RunJob( media_results, job_type, job_key )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.Finish()
job_key.Delete( 5 )
self._controller.pub( 'notify_files_maintenance_done' )
def ScheduleJob( self, hashes, job_type, time_can_start = 0 ):
with self._lock:
self._controller.Write( 'file_maintenance_add_jobs_hashes', hashes, job_type, time_can_start )
self._wake_background_event.set()
def ScheduleJobHashIds( self, hash_ids, job_type, time_can_start = 0 ):
with self._lock:
self._controller.Write( 'file_maintenance_add_jobs', hash_ids, job_type, time_can_start )
self._wake_background_event.set()
def Shutdown( self ):
self._shutdown = True
self._wake_background_event.set()
def Start( self ):
self._controller.CallToThreadLongRunning( self.MainLoopBackgroundWork )
| true | true |
f7f5d24400cc77a154e73aae8bfd54bf527c7723 | 2,608 | py | Python | experimenting/buttons.py | aidanchandra/PiPod | 615c0c4476ac1b00518b722b7235ed3348337956 | [
"MIT"
] | null | null | null | experimenting/buttons.py | aidanchandra/PiPod | 615c0c4476ac1b00518b722b7235ed3348337956 | [
"MIT"
] | null | null | null | experimenting/buttons.py | aidanchandra/PiPod | 615c0c4476ac1b00518b722b7235ed3348337956 | [
"MIT"
] | null | null | null | # Copyright (c) 2017 Adafruit Industries
# Author: James DeVito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#import RPi.GPIO as GPIO
import time
#import Adafruit_GPIO.SPI as SPI
#import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Input pins:
L_pin = 27
R_pin = 23
C_pin = 4
U_pin = 17
D_pin = 22
A_pin = 5
B_pin = 6
# Raspberry Pi pin configuration:
RST = 24
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# 128x64 display with hardware I2C:
#disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Initialize library.
#disp.begin()
# Clear display.
#disp.clear()
#disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = 128#disp.width
height = 64#disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
draw.polygon([(20, 20), (30, 2), (40, 20)], outline=255, fill=1) #Up filled
draw.polygon([(0, 30), (18, 21), (18, 41)], outline=255, fill=1) #left filled
draw.polygon([(60, 30), (42, 21), (42, 41)], outline=255, fill=1) #right filled
draw.polygon([(30, 60), (40, 42), (20, 42)], outline=255, fill=1) #down filled
draw.rectangle((20, 22,40,40), outline=255, fill=1) #center filled
draw.ellipse((70,40,90,60), outline=255, fill=1) #A button filled
draw.ellipse((100,20,120,40), outline=255, fill=1) #B button filled
image.show()
#disp.display()
time.sleep(.01)
| 30.325581 | 79 | 0.729294 |
import time
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
L_pin = 27
R_pin = 23
C_pin = 4
U_pin = 17
D_pin = 22
A_pin = 5
B_pin = 6
RST = 24
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
width = 128
height = 64
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
draw.rectangle((0,0,width,height), outline=0, fill=0)
draw.polygon([(20, 20), (30, 2), (40, 20)], outline=255, fill=1)
draw.polygon([(0, 30), (18, 21), (18, 41)], outline=255, fill=1)
draw.polygon([(60, 30), (42, 21), (42, 41)], outline=255, fill=1)
draw.polygon([(30, 60), (40, 42), (20, 42)], outline=255, fill=1)
draw.rectangle((20, 22,40,40), outline=255, fill=1)
draw.ellipse((70,40,90,60), outline=255, fill=1)
draw.ellipse((100,20,120,40), outline=255, fill=1)
image.show()
time.sleep(.01)
| true | true |
f7f5d31879898e8d9e0f37333d1db472f160987f | 150 | py | Python | actions/get_servers.py | StackStorm-Exchange/powerdns | 13879e0e66b29a466d82c1077a1d4abde69c0d3e | [
"Apache-2.0"
] | null | null | null | actions/get_servers.py | StackStorm-Exchange/powerdns | 13879e0e66b29a466d82c1077a1d4abde69c0d3e | [
"Apache-2.0"
] | null | null | null | actions/get_servers.py | StackStorm-Exchange/powerdns | 13879e0e66b29a466d82c1077a1d4abde69c0d3e | [
"Apache-2.0"
] | null | null | null | from lib.base import PowerDNSClient
class Servers(PowerDNSClient):
def _run(self):
return [str(server) for server in self._api.servers]
| 21.428571 | 60 | 0.726667 | from lib.base import PowerDNSClient
class Servers(PowerDNSClient):
def _run(self):
return [str(server) for server in self._api.servers]
| true | true |
f7f5d3be2504f1ae43721558b750ac5ecdd8b85d | 5,412 | py | Python | cynetworkx/algorithms/approximation/clique.py | Viech/cynetworkx | 01a37859c67b752392e9e783c949084964eef2cf | [
"BSD-3-Clause"
] | 12 | 2019-07-23T08:07:53.000Z | 2022-03-09T06:13:16.000Z | cynetworkx/algorithms/approximation/clique.py | Viech/cynetworkx | 01a37859c67b752392e9e783c949084964eef2cf | [
"BSD-3-Clause"
] | 7 | 2019-08-30T07:00:00.000Z | 2021-12-30T08:02:56.000Z | cynetworkx/algorithms/approximation/clique.py | Viech/cynetworkx | 01a37859c67b752392e9e783c949084964eef2cf | [
"BSD-3-Clause"
] | 5 | 2020-10-10T03:40:32.000Z | 2021-11-23T12:28:53.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2011-2018 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
# Copyright 2016-2018 NetworkX developers.
# NetworkX is distributed under a BSD license
#
# Authors: Nicholas Mancuso (nick.mancuso@gmail.com)
# Jeffery Finkelstein <jeffrey.finkelstein@gmail.com>
# Dan Schult <dschult@colgate.edu>
"""Functions for computing large cliques."""
from operator import itemgetter
import cynetworkx as nx
from cynetworkx.utils import not_implemented_for
from cynetworkx.algorithms.approximation import ramsey
__all__ = ["clique_removal", "max_clique", "large_clique_size"]
def max_clique(G):
r"""Find the Maximum Clique
Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set
in the worst case.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
clique : set
The apx-maximum clique of the graph
Notes
------
A clique in an undirected graph G = (V, E) is a subset of the vertex set
`C \subseteq V` such that for every two vertices in C there exists an edge
connecting the two. This is equivalent to saying that the subgraph
induced by C is complete (in some cases, the term clique may also refer
to the subgraph).
A maximum clique is a clique of the largest possible size in a given graph.
The clique number `\omega(G)` of a graph G is the number of
vertices in a maximum clique in G. The intersection number of
G is the smallest number of cliques that together cover all edges of G.
https://en.wikipedia.org/wiki/Maximum_clique
References
----------
.. [1] Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180–196. Springer.
doi:10.1007/BF01994876
"""
if G is None:
raise ValueError("Expected NetworkX graph!")
# finding the maximum clique in a graph is equivalent to finding
# the independent set in the complementary graph
cgraph = nx.complement(G)
iset, _ = clique_removal(cgraph)
return iset
def clique_removal(G):
""" Repeatedly remove cliques from the graph.
Results in a $O(|V|/(\log |V|)^2)$ approximation of maximum clique
and independent set. Returns the largest independent set found, along
with found maximal cliques.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
max_ind_cliques : (set, list) tuple
2-tuple of Maximal Independent Set and list of maximal cliques (sets).
References
----------
.. [1] Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180–196. Springer.
"""
graph = G.copy()
c_i, i_i = ramsey.ramsey_R2(graph)
cliques = [c_i]
isets = [i_i]
while graph:
graph.remove_nodes_from(c_i)
c_i, i_i = ramsey.ramsey_R2(graph)
if c_i:
cliques.append(c_i)
if i_i:
isets.append(i_i)
# Determine the largest independent set as measured by cardinality.
maxiset = max(isets, key=len)
return maxiset, cliques
# @not_implemented_for('directed')
# @not_implemented_for('multigraph')
def large_clique_size(G):
"""Find the size of a large clique in a graph.
A *clique* is a subset of nodes in which each pair of nodes is
adjacent. This function is a heuristic for finding the size of a
large clique in the graph.
Parameters
----------
G : NetworkX graph
Returns
-------
int
The size of a large clique in the graph.
Notes
-----
This implementation is from [1]_. Its worst case time complexity is
:math:`O(n d^2)`, where *n* is the number of nodes in the graph and
*d* is the maximum degree.
This function is a heuristic, which means it may work well in
practice, but there is no rigorous mathematical guarantee on the
ratio between the returned number and the actual largest clique size
in the graph.
References
----------
.. [1] Pattabiraman, Bharath, et al.
"Fast Algorithms for the Maximum Clique Problem on Massive Graphs
with Applications to Overlapping Community Detection."
*Internet Mathematics* 11.4-5 (2015): 421--448.
<https://doi.org/10.1080/15427951.2014.986778>
See also
--------
:func:`cynetworkx.algorithms.approximation.clique.max_clique`
A function that returns an approximate maximum clique with a
guarantee on the approximation ratio.
:mod:`cynetworkx.algorithms.clique`
Functions for finding the exact maximum clique in a graph.
"""
degrees = G.degree
def _clique_heuristic(G, U, size, best_size):
if not U:
return max(best_size, size)
u = max(U, key=degrees)
U.remove(u)
N_prime = {v for v in G[u] if degrees[v] >= best_size}
return _clique_heuristic(G, U & N_prime, size + 1, best_size)
best_size = 0
nodes = (u for u in G if degrees[u] >= best_size)
for u in nodes:
neighbors = {v for v in G[u] if degrees[v] >= best_size}
best_size = _clique_heuristic(G, neighbors, 1, best_size)
return best_size
| 31.283237 | 79 | 0.653732 |
from operator import itemgetter
import cynetworkx as nx
from cynetworkx.utils import not_implemented_for
from cynetworkx.algorithms.approximation import ramsey
__all__ = ["clique_removal", "max_clique", "large_clique_size"]
def max_clique(G):
if G is None:
raise ValueError("Expected NetworkX graph!")
cgraph = nx.complement(G)
iset, _ = clique_removal(cgraph)
return iset
def clique_removal(G):
graph = G.copy()
c_i, i_i = ramsey.ramsey_R2(graph)
cliques = [c_i]
isets = [i_i]
while graph:
graph.remove_nodes_from(c_i)
c_i, i_i = ramsey.ramsey_R2(graph)
if c_i:
cliques.append(c_i)
if i_i:
isets.append(i_i)
maxiset = max(isets, key=len)
return maxiset, cliques
def large_clique_size(G):
degrees = G.degree
def _clique_heuristic(G, U, size, best_size):
if not U:
return max(best_size, size)
u = max(U, key=degrees)
U.remove(u)
N_prime = {v for v in G[u] if degrees[v] >= best_size}
return _clique_heuristic(G, U & N_prime, size + 1, best_size)
best_size = 0
nodes = (u for u in G if degrees[u] >= best_size)
for u in nodes:
neighbors = {v for v in G[u] if degrees[v] >= best_size}
best_size = _clique_heuristic(G, neighbors, 1, best_size)
return best_size
| true | true |
f7f5d43fdde621d1c66eb65dcf08d7c8ba56ca85 | 1,516 | py | Python | hard-gists/2da57d5b039aab4da7ce/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/2da57d5b039aab4da7ce/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/2da57d5b039aab4da7ce/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | #!/usr/bin/env python
"""
Sniff a specific port for Bit Torrent DHT traffic and print
requests/responses in human readable form.
Reference: http://www.bittorrent.org/beps/bep_0005.html
"""
from pcapy import open_live
from bencode import bdecode
from socket import inet_aton, inet_ntoa
import dpkt
import sys
# Defaults to 51413 (transmission's default port)
filter_port = 51413
# Callback function for parsing packets
def parse_udp(hdr, data):
global filter_port
try:
eth = dpkt.ethernet.Ethernet(data)
except Exception:
return
if eth.type != dpkt.ethernet.ETH_TYPE_IP:
return
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_UDP and filter_port in (ip.data.dport, ip.data.sport):
payload = ip.data.data
else:
return
# Print plain text bencoded request.
try:
data = bdecode(payload)
print "%s:%d -> %s:%d (%d bytes): %s\n" % (inet_ntoa(ip.src), ip.data.sport,
inet_ntoa(ip.dst), ip.data.dport, len(payload), data)
except Exception:
return
def main(argv):
global filter_port
if len(argv) == 1:
try:
filter_port = int(argv[0])
except ValueError:
print "Invalid port number"
sys.exit(1)
print "[+] Starting sniffer"
pcap_obj = open_live("eth0", 65536, False, True)
try:
pcap_obj.loop(-1, parse_udp)
except KeyboardInterrupt:
print "[!] Exiting"
sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
| 24.451613 | 86 | 0.637863 |
"""
Sniff a specific port for Bit Torrent DHT traffic and print
requests/responses in human readable form.
Reference: http://www.bittorrent.org/beps/bep_0005.html
"""
from pcapy import open_live
from bencode import bdecode
from socket import inet_aton, inet_ntoa
import dpkt
import sys
filter_port = 51413
# Callback function for parsing packets
def parse_udp(hdr, data):
global filter_port
try:
eth = dpkt.ethernet.Ethernet(data)
except Exception:
return
if eth.type != dpkt.ethernet.ETH_TYPE_IP:
return
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_UDP and filter_port in (ip.data.dport, ip.data.sport):
payload = ip.data.data
else:
return
# Print plain text bencoded request.
try:
data = bdecode(payload)
print "%s:%d -> %s:%d (%d bytes): %s\n" % (inet_ntoa(ip.src), ip.data.sport,
inet_ntoa(ip.dst), ip.data.dport, len(payload), data)
except Exception:
return
def main(argv):
global filter_port
if len(argv) == 1:
try:
filter_port = int(argv[0])
except ValueError:
print "Invalid port number"
sys.exit(1)
print "[+] Starting sniffer"
pcap_obj = open_live("eth0", 65536, False, True)
try:
pcap_obj.loop(-1, parse_udp)
except KeyboardInterrupt:
print "[!] Exiting"
sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
| false | true |
f7f5d5f2abba71c323e7799f5033fd8c94e9215a | 177 | py | Python | Codefights/arcade/intro/level-6/28.alphabeticShift/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codefights/arcade/intro/level-6/28.alphabeticShift/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codefights/arcade/intro/level-6/28.alphabeticShift/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python3
def alphabeticShift(inputString):
la = ''.join(chr(i) for i in range(ord('a'), ord('z') + 1))
return inputString.translate(str.maketrans(la, la[1:] + la[0]))
| 29.5 | 67 | 0.632768 |
def alphabeticShift(inputString):
la = ''.join(chr(i) for i in range(ord('a'), ord('z') + 1))
return inputString.translate(str.maketrans(la, la[1:] + la[0]))
| true | true |
f7f5d62860d7e3f120c2e90cab80011842d58dd6 | 2,707 | py | Python | juanzeng.py | zh981008/zh-farm | e8cdb977579eb29417be000331c342427c4daf54 | [
"MIT"
] | null | null | null | juanzeng.py | zh981008/zh-farm | e8cdb977579eb29417be000331c342427c4daf54 | [
"MIT"
] | null | null | null | juanzeng.py | zh981008/zh-farm | e8cdb977579eb29417be000331c342427c4daf54 | [
"MIT"
] | null | null | null | import uiautomator2 as u2
import time
from utils import *
from cv import *
from Automator import *
import matplotlib.pylab as plt
plt.ion()
fig, ax = plt.subplots(1)
plt.show()
a = Automator()
a.start()
def login_auth(ac,pwd):
need_auth = a.login(ac=ac,pwd=pwd)
if need_auth:
auth_name,auth_id = random_name(), CreatIDnum()
a.auth(auth_name =auth_name ,auth_id = auth_id)
def init_home():
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(1,1)
time.sleep(0.5)#保证回到首页
time.sleep(0.5)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(1,1)
time.sleep(0.2)#保证回到首页
a.d.click(100,505)
def change_acc():#切换账号
time.sleep(1)
a.d.click(871, 513)
time.sleep(1)
a.d.click(165, 411)
time.sleep(1)
a.d.click(591, 369)
time.sleep(1)
def hanghui():#自动行会捐赠
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
a.d.click(1,1)
time.sleep(1)#首页锁定,保证回到首页
time.sleep(1)
a.d.click(693, 436)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
state_flag = a.get_screen_state(screen_shot_)
if state_flag == 'hanghui':
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot, ['img/juanzeng.jpg'],suiji=0)
time.sleep(1)
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot, ['img/max.jpg'],suiji=0)
time.sleep(1)
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot, ['img/hanghui_ok.jpg'],suiji=0)
time.sleep(1)
break
a.d.click(100, 505)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
a.d.click(1,1)
time.sleep(1)#首页锁定,保证回到首页
#%%
#==============================================================================
#主程序
account_dic = {}
with open('zhanghao.txt','r') as f:
for i,line in enumerate(f):
account,password = line.split('\t')[0:2]
account_dic[account]=password.strip()
for account in account_dic:
print(account, account_dic[account])
login_auth(account, account_dic[account])
init_home()#初始化,确保进入首页
hanghui()#行会捐赠
change_acc()#退出当前账号,切换下一个 | 26.539216 | 79 | 0.583303 | import uiautomator2 as u2
import time
from utils import *
from cv import *
from Automator import *
import matplotlib.pylab as plt
plt.ion()
fig, ax = plt.subplots(1)
plt.show()
a = Automator()
a.start()
def login_auth(ac,pwd):
need_auth = a.login(ac=ac,pwd=pwd)
if need_auth:
auth_name,auth_id = random_name(), CreatIDnum()
a.auth(auth_name =auth_name ,auth_id = auth_id)
def init_home():
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(1,1)
time.sleep(0.5)
time.sleep(0.5)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(1,1)
time.sleep(0.2)
a.d.click(100,505)
def change_acc():
time.sleep(1)
a.d.click(871, 513)
time.sleep(1)
a.d.click(165, 411)
time.sleep(1)
a.d.click(591, 369)
time.sleep(1)
def hanghui():
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
a.d.click(1,1)
time.sleep(1)
time.sleep(1)
a.d.click(693, 436)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
state_flag = a.get_screen_state(screen_shot_)
if state_flag == 'hanghui':
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot, ['img/juanzeng.jpg'],suiji=0)
time.sleep(1)
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot, ['img/max.jpg'],suiji=0)
time.sleep(1)
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot, ['img/hanghui_ok.jpg'],suiji=0)
time.sleep(1)
break
a.d.click(100, 505)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
a.d.click(1,1)
time.sleep(1)
account_dic = {}
with open('zhanghao.txt','r') as f:
for i,line in enumerate(f):
account,password = line.split('\t')[0:2]
account_dic[account]=password.strip()
for account in account_dic:
print(account, account_dic[account])
login_auth(account, account_dic[account])
init_home()
hanghui()
change_acc() | true | true |
f7f5d63b33bee31b805f7f20fb39eeec887ad8d8 | 22,072 | py | Python | numba/core/compiler.py | svrakitin/numba | 830a2c7ccc410f270677b0b241f9b8acc2598101 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | numba/core/compiler.py | svrakitin/numba | 830a2c7ccc410f270677b0b241f9b8acc2598101 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2019-08-29T21:03:09.000Z | 2019-08-29T21:04:26.000Z | numba/core/compiler.py | svrakitin/numba | 830a2c7ccc410f270677b0b241f9b8acc2598101 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | from collections import namedtuple
import copy
import warnings
from numba.core.tracing import event
from numba.core import (utils, errors, typing, interpreter, bytecode, postproc,
config, callconv, cpu)
from numba.parfors.parfor import ParforDiagnostics
from numba.core.inline_closurecall import InlineClosureCallPass
from numba.core.errors import CompilerError
from numba.core.compiler_machinery import PassManager
from numba.core.untyped_passes import (ExtractByteCode, TranslateByteCode,
FixupArgs, IRProcessing, DeadBranchPrune,
RewriteSemanticConstants,
InlineClosureLikes, GenericRewrites,
WithLifting, InlineInlinables,
FindLiterallyCalls,
MakeFunctionToJitFunction,
CanonicalizeLoopExit,
CanonicalizeLoopEntry, LiteralUnroll,)
from numba.core.typed_passes import (NopythonTypeInference, AnnotateTypes,
NopythonRewrites, PreParforPass,
ParforPass, DumpParforDiagnostics,
IRLegalization, NoPythonBackend,
InlineOverloads)
from numba.core.object_mode_passes import (ObjectModeFrontEnd,
ObjectModeBackEnd, CompileInterpMode)
class Flags(utils.ConfigOptions):
# These options are all false by default, but the defaults are
# different with the @jit decorator (see targets.options.TargetOptions).
OPTIONS = {
# Enable loop-lifting
'enable_looplift': False,
# Enable pyobject mode (in general)
'enable_pyobject': False,
# Enable pyobject mode inside lifted loops
'enable_pyobject_looplift': False,
# Force pyobject mode inside the whole function
'force_pyobject': False,
# Release GIL inside the native function
'release_gil': False,
'no_compile': False,
'debuginfo': False,
'boundscheck': False,
'forceinline': False,
'no_cpython_wrapper': False,
# Enable automatic parallel optimization, can be fine-tuned by taking
# a dictionary of sub-options instead of a boolean, see parfor.py for
# detail.
'auto_parallel': cpu.ParallelOptions(False),
'nrt': False,
'no_rewrites': False,
'error_model': 'python',
'fastmath': cpu.FastMathOptions(False),
'noalias': False,
'inline': cpu.InlineOptions('never'),
}
DEFAULT_FLAGS = Flags()
DEFAULT_FLAGS.set('nrt')
CR_FIELDS = ["typing_context",
"target_context",
"entry_point",
"typing_error",
"type_annotation",
"signature",
"objectmode",
"lifted",
"fndesc",
"interpmode",
"library",
"call_helper",
"environment",
"metadata",
# List of functions to call to initialize on unserialization
# (i.e cache load).
"reload_init",
]
class CompileResult(namedtuple("_CompileResult", CR_FIELDS)):
"""
A structure holding results from the compilation of a function.
"""
__slots__ = ()
def _reduce(self):
"""
Reduce a CompileResult to picklable components.
"""
libdata = self.library.serialize_using_object_code()
# Make it (un)picklable efficiently
typeann = str(self.type_annotation)
fndesc = self.fndesc
# Those don't need to be pickled and may fail
fndesc.typemap = fndesc.calltypes = None
return (libdata, self.fndesc, self.environment, self.signature,
self.objectmode, self.interpmode, self.lifted, typeann,
self.reload_init)
@classmethod
def _rebuild(cls, target_context, libdata, fndesc, env,
signature, objectmode, interpmode, lifted, typeann,
reload_init):
if reload_init:
# Re-run all
for fn in reload_init:
fn()
library = target_context.codegen().unserialize_library(libdata)
cfunc = target_context.get_executable(library, fndesc, env)
cr = cls(target_context=target_context,
typing_context=target_context.typing_context,
library=library,
environment=env,
entry_point=cfunc,
fndesc=fndesc,
type_annotation=typeann,
signature=signature,
objectmode=objectmode,
interpmode=interpmode,
lifted=lifted,
typing_error=None,
call_helper=None,
metadata=None, # Do not store, arbitrary & potentially large!
reload_init=reload_init,
)
return cr
_LowerResult = namedtuple("_LowerResult", [
"fndesc",
"call_helper",
"cfunc",
"env",
])
def compile_result(**kws):
keys = set(kws.keys())
fieldset = set(CR_FIELDS)
badnames = keys - fieldset
if badnames:
raise NameError(*badnames)
missing = fieldset - keys
for k in missing:
kws[k] = None
# Avoid keeping alive traceback variables
err = kws['typing_error']
if err is not None:
kws['typing_error'] = err.with_traceback(None)
return CompileResult(**kws)
def compile_isolated(func, args, return_type=None, flags=DEFAULT_FLAGS,
locals={}):
"""
Compile the function in an isolated environment (typing and target
context).
Good for testing.
"""
from numba.core.registry import cpu_target
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
# Register the contexts in case for nested @jit or @overload calls
with cpu_target.nested_context(typingctx, targetctx):
return compile_extra(typingctx, targetctx, func, args, return_type,
flags, locals)
def run_frontend(func, inline_closures=False):
"""
Run the compiler frontend over the given Python function, and return
the function's canonical Numba IR.
If inline_closures is Truthy then closure inlining will be run
"""
# XXX make this a dedicated Pipeline?
func_id = bytecode.FunctionIdentity.from_function(func)
interp = interpreter.Interpreter(func_id)
bc = bytecode.ByteCode(func_id=func_id)
func_ir = interp.interpret(bc)
if inline_closures:
inline_pass = InlineClosureCallPass(func_ir, cpu.ParallelOptions(False),
{}, False)
inline_pass.run()
post_proc = postproc.PostProcessor(func_ir)
post_proc.run()
return func_ir
class _CompileStatus(object):
"""
Describes the state of compilation. Used like a C record.
"""
__slots__ = ['fail_reason', 'can_fallback', 'can_giveup']
def __init__(self, can_fallback, can_giveup):
self.fail_reason = None
self.can_fallback = can_fallback
self.can_giveup = can_giveup
def __repr__(self):
vals = []
for k in self.__slots__:
vals.append("{k}={v}".format(k=k, v=getattr(self, k)))
return ', '.join(vals)
class _EarlyPipelineCompletion(Exception):
"""
Raised to indicate that a pipeline has completed early
"""
def __init__(self, result):
self.result = result
class StateDict(dict):
"""
A dictionary that has an overloaded getattr and setattr to permit getting
and setting key/values through the use of attributes.
"""
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
self[attr] = value
def _make_subtarget(targetctx, flags):
"""
Make a new target context from the given target context and flags.
"""
subtargetoptions = {}
if flags.debuginfo:
subtargetoptions['enable_debuginfo'] = True
if flags.boundscheck:
subtargetoptions['enable_boundscheck'] = True
if flags.nrt:
subtargetoptions['enable_nrt'] = True
if flags.auto_parallel:
subtargetoptions['auto_parallel'] = flags.auto_parallel
if flags.fastmath:
subtargetoptions['fastmath'] = flags.fastmath
error_model = callconv.create_error_model(flags.error_model, targetctx)
subtargetoptions['error_model'] = error_model
return targetctx.subtarget(**subtargetoptions)
class CompilerBase(object):
"""
Stores and manages states for the compiler
"""
def __init__(self, typingctx, targetctx, library, args, return_type, flags,
locals):
# Make sure the environment is reloaded
config.reload_config()
typingctx.refresh()
targetctx.refresh()
self.state = StateDict()
self.state.typingctx = typingctx
self.state.targetctx = _make_subtarget(targetctx, flags)
self.state.library = library
self.state.args = args
self.state.return_type = return_type
self.state.flags = flags
self.state.locals = locals
# Results of various steps of the compilation pipeline
self.state.bc = None
self.state.func_id = None
self.state.func_ir = None
self.state.lifted = None
self.state.lifted_from = None
self.state.typemap = None
self.state.calltypes = None
self.state.type_annotation = None
# holds arbitrary inter-pipeline stage meta data
self.state.metadata = {}
self.state.reload_init = []
# hold this for e.g. with_lifting, null out on exit
self.state.pipeline = self
# parfor diagnostics info, add to metadata
self.state.parfor_diagnostics = ParforDiagnostics()
self.state.metadata['parfor_diagnostics'] = \
self.state.parfor_diagnostics
self.state.status = _CompileStatus(
can_fallback=self.state.flags.enable_pyobject,
can_giveup=config.COMPATIBILITY_MODE
)
def compile_extra(self, func):
self.state.func_id = bytecode.FunctionIdentity.from_function(func)
try:
ExtractByteCode().run_pass(self.state)
except Exception as e:
if self.state.status.can_giveup:
CompileInterpMode().run_pass(self.state)
return self.state.cr
else:
raise e
self.state.lifted = ()
self.state.lifted_from = None
return self._compile_bytecode()
def compile_ir(self, func_ir, lifted=(), lifted_from=None):
self.state.func_id = func_ir.func_id
self.state.lifted = lifted
self.state.lifted_from = lifted_from
self.state.func_ir = func_ir
self.state.nargs = self.state.func_ir.arg_count
FixupArgs().run_pass(self.state)
return self._compile_ir()
def define_pipelines(self):
"""Child classes override this to customize the pipelines in use.
"""
raise NotImplementedError()
def _compile_core(self):
"""
Populate and run compiler pipeline
"""
pms = self.define_pipelines()
for pm in pms:
pipeline_name = pm.pipeline_name
func_name = "%s.%s" % (self.state.func_id.modname,
self.state.func_id.func_qualname)
event("Pipeline: %s for %s" % (pipeline_name, func_name))
self.state.metadata['pipeline_times'] = {pipeline_name:
pm.exec_times}
is_final_pipeline = pm == pms[-1]
res = None
try:
pm.run(self.state)
if self.state.cr is not None:
break
except _EarlyPipelineCompletion as e:
res = e.result
break
except Exception as e:
self.state.status.fail_reason = e
if is_final_pipeline:
raise e
else:
raise CompilerError("All available pipelines exhausted")
# Pipeline is done, remove self reference to release refs to user code
self.state.pipeline = None
# organise a return
if res is not None:
# Early pipeline completion
return res
else:
assert self.state.cr is not None
return self.state.cr
def _compile_bytecode(self):
"""
Populate and run pipeline for bytecode input
"""
assert self.state.func_ir is None
return self._compile_core()
def _compile_ir(self):
"""
Populate and run pipeline for IR input
"""
assert self.state.func_ir is not None
return self._compile_core()
class Compiler(CompilerBase):
"""The default compiler
"""
def define_pipelines(self):
# this maintains the objmode fallback behaviour
pms = []
if not self.state.flags.force_pyobject:
pms.append(DefaultPassBuilder.define_nopython_pipeline(self.state))
if self.state.status.can_fallback or self.state.flags.force_pyobject:
pms.append(
DefaultPassBuilder.define_objectmode_pipeline(self.state)
)
if self.state.status.can_giveup:
pms.append(
DefaultPassBuilder.define_interpreted_pipeline(self.state)
)
return pms
class DefaultPassBuilder(object):
"""
This is the default pass builder, it contains the "classic" default
pipelines as pre-canned PassManager instances:
- nopython
- objectmode
- interpreted
"""
@staticmethod
def define_nopython_pipeline(state, name='nopython'):
"""Returns an nopython mode pipeline based PassManager
"""
pm = PassManager(name)
if state.func_ir is None:
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
pm.add_pass(IRProcessing, "processing IR")
pm.add_pass(WithLifting, "Handle with contexts")
# pre typing
if not state.flags.no_rewrites:
pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants")
pm.add_pass(DeadBranchPrune, "dead branch pruning")
pm.add_pass(GenericRewrites, "nopython rewrites")
pm.add_pass(InlineClosureLikes,
"inline calls to locally defined closures")
# convert any remaining closures into functions
pm.add_pass(MakeFunctionToJitFunction,
"convert make_function into JIT functions")
# inline functions that have been determined as inlinable and rerun
# branch pruning, this needs to be run after closures are inlined as
# the IR repr of a closure masks call sites if an inlinable is called
# inside a closure
pm.add_pass(InlineInlinables, "inline inlinable functions")
if not state.flags.no_rewrites:
pm.add_pass(DeadBranchPrune, "dead branch pruning")
pm.add_pass(FindLiterallyCalls, "find literally calls")
pm.add_pass(LiteralUnroll, "handles literal_unroll")
# typing
pm.add_pass(NopythonTypeInference, "nopython frontend")
pm.add_pass(AnnotateTypes, "annotate types")
# optimisation
pm.add_pass(InlineOverloads, "inline overloaded functions")
if state.flags.auto_parallel.enabled:
pm.add_pass(PreParforPass, "Preprocessing for parfors")
if not state.flags.no_rewrites:
pm.add_pass(NopythonRewrites, "nopython rewrites")
if state.flags.auto_parallel.enabled:
pm.add_pass(ParforPass, "convert to parfors")
# legalise
pm.add_pass(IRLegalization,
"ensure IR is legal prior to lowering")
# lower
pm.add_pass(NoPythonBackend, "nopython mode backend")
pm.add_pass(DumpParforDiagnostics, "dump parfor diagnostics")
pm.finalize()
return pm
@staticmethod
def define_objectmode_pipeline(state, name='object'):
"""Returns an object-mode pipeline based PassManager
"""
pm = PassManager(name)
if state.func_ir is None:
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
pm.add_pass(IRProcessing, "processing IR")
if utils.PYVERSION >= (3, 7):
# The following passes are needed to adjust for looplifting
pm.add_pass(CanonicalizeLoopEntry, "canonicalize loop entry")
pm.add_pass(CanonicalizeLoopExit, "canonicalize loop exit")
pm.add_pass(ObjectModeFrontEnd, "object mode frontend")
pm.add_pass(InlineClosureLikes,
"inline calls to locally defined closures")
# convert any remaining closures into functions
pm.add_pass(MakeFunctionToJitFunction,
"convert make_function into JIT functions")
pm.add_pass(AnnotateTypes, "annotate types")
pm.add_pass(IRLegalization, "ensure IR is legal prior to lowering")
pm.add_pass(ObjectModeBackEnd, "object mode backend")
pm.finalize()
return pm
@staticmethod
def define_interpreted_pipeline(state, name="interpreted"):
"""Returns an interpreted mode pipeline based PassManager
"""
pm = PassManager(name)
pm.add_pass(CompileInterpMode,
"compiling with interpreter mode")
pm.finalize()
return pm
def compile_extra(typingctx, targetctx, func, args, return_type, flags,
locals, library=None, pipeline_class=Compiler):
"""Compiler entry point
Parameter
---------
typingctx :
typing context
targetctx :
target context
func : function
the python function to be compiled
args : tuple, list
argument types
return_type :
Use ``None`` to indicate void return
flags : numba.compiler.Flags
compiler flags
library : numba.codegen.CodeLibrary
Used to store the compiled code.
If it is ``None``, a new CodeLibrary is used.
pipeline_class : type like numba.compiler.CompilerBase
compiler pipeline
"""
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_extra(func)
def compile_ir(typingctx, targetctx, func_ir, args, return_type, flags,
locals, lifted=(), lifted_from=None, is_lifted_loop=False,
library=None, pipeline_class=Compiler):
"""
Compile a function with the given IR.
For internal use only.
"""
# This is a special branch that should only run on IR from a lifted loop
if is_lifted_loop:
# This code is pessimistic and costly, but it is a not often trodden
# path and it will go away once IR is made immutable. The problem is
# that the rewrite passes can mutate the IR into a state that makes
# it possible for invalid tokens to be transmitted to lowering which
# then trickle through into LLVM IR and causes RuntimeErrors as LLVM
# cannot compile it. As a result the following approach is taken:
# 1. Create some new flags that copy the original ones but switch
# off rewrites.
# 2. Compile with 1. to get a compile result
# 3. Try and compile another compile result but this time with the
# original flags (and IR being rewritten).
# 4. If 3 was successful, use the result, else use 2.
# create flags with no rewrites
norw_flags = copy.deepcopy(flags)
norw_flags.no_rewrites = True
def compile_local(the_ir, the_flags):
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, the_flags, locals)
return pipeline.compile_ir(func_ir=the_ir, lifted=lifted,
lifted_from=lifted_from)
# compile with rewrites off, IR shouldn't be mutated irreparably
norw_cres = compile_local(func_ir.copy(), norw_flags)
# try and compile with rewrites on if no_rewrites was not set in the
# original flags, IR might get broken but we've got a CompileResult
# that's usable from above.
rw_cres = None
if not flags.no_rewrites:
# Suppress warnings in compilation retry
with warnings.catch_warnings():
warnings.simplefilter("ignore", errors.NumbaWarning)
try:
rw_cres = compile_local(func_ir.copy(), flags)
except Exception:
pass
# if the rewrite variant of compilation worked, use it, else use
# the norewrites backup
if rw_cres is not None:
cres = rw_cres
else:
cres = norw_cres
return cres
else:
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
lifted_from=lifted_from)
def compile_internal(typingctx, targetctx, library,
func, args, return_type, flags, locals):
"""
For internal use only.
"""
pipeline = Compiler(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_extra(func)
| 35.428571 | 80 | 0.612269 | from collections import namedtuple
import copy
import warnings
from numba.core.tracing import event
from numba.core import (utils, errors, typing, interpreter, bytecode, postproc,
config, callconv, cpu)
from numba.parfors.parfor import ParforDiagnostics
from numba.core.inline_closurecall import InlineClosureCallPass
from numba.core.errors import CompilerError
from numba.core.compiler_machinery import PassManager
from numba.core.untyped_passes import (ExtractByteCode, TranslateByteCode,
FixupArgs, IRProcessing, DeadBranchPrune,
RewriteSemanticConstants,
InlineClosureLikes, GenericRewrites,
WithLifting, InlineInlinables,
FindLiterallyCalls,
MakeFunctionToJitFunction,
CanonicalizeLoopExit,
CanonicalizeLoopEntry, LiteralUnroll,)
from numba.core.typed_passes import (NopythonTypeInference, AnnotateTypes,
NopythonRewrites, PreParforPass,
ParforPass, DumpParforDiagnostics,
IRLegalization, NoPythonBackend,
InlineOverloads)
from numba.core.object_mode_passes import (ObjectModeFrontEnd,
ObjectModeBackEnd, CompileInterpMode)
class Flags(utils.ConfigOptions):
OPTIONS = {
'enable_looplift': False,
'enable_pyobject': False,
'enable_pyobject_looplift': False,
'force_pyobject': False,
'release_gil': False,
'no_compile': False,
'debuginfo': False,
'boundscheck': False,
'forceinline': False,
'no_cpython_wrapper': False,
'auto_parallel': cpu.ParallelOptions(False),
'nrt': False,
'no_rewrites': False,
'error_model': 'python',
'fastmath': cpu.FastMathOptions(False),
'noalias': False,
'inline': cpu.InlineOptions('never'),
}
DEFAULT_FLAGS = Flags()
DEFAULT_FLAGS.set('nrt')
CR_FIELDS = ["typing_context",
"target_context",
"entry_point",
"typing_error",
"type_annotation",
"signature",
"objectmode",
"lifted",
"fndesc",
"interpmode",
"library",
"call_helper",
"environment",
"metadata",
"reload_init",
]
class CompileResult(namedtuple("_CompileResult", CR_FIELDS)):
__slots__ = ()
def _reduce(self):
libdata = self.library.serialize_using_object_code()
typeann = str(self.type_annotation)
fndesc = self.fndesc
fndesc.typemap = fndesc.calltypes = None
return (libdata, self.fndesc, self.environment, self.signature,
self.objectmode, self.interpmode, self.lifted, typeann,
self.reload_init)
@classmethod
def _rebuild(cls, target_context, libdata, fndesc, env,
signature, objectmode, interpmode, lifted, typeann,
reload_init):
if reload_init:
# Re-run all
for fn in reload_init:
fn()
library = target_context.codegen().unserialize_library(libdata)
cfunc = target_context.get_executable(library, fndesc, env)
cr = cls(target_context=target_context,
typing_context=target_context.typing_context,
library=library,
environment=env,
entry_point=cfunc,
fndesc=fndesc,
type_annotation=typeann,
signature=signature,
objectmode=objectmode,
interpmode=interpmode,
lifted=lifted,
typing_error=None,
call_helper=None,
metadata=None, # Do not store, arbitrary & potentially large!
reload_init=reload_init,
)
return cr
_LowerResult = namedtuple("_LowerResult", [
"fndesc",
"call_helper",
"cfunc",
"env",
])
def compile_result(**kws):
keys = set(kws.keys())
fieldset = set(CR_FIELDS)
badnames = keys - fieldset
if badnames:
raise NameError(*badnames)
missing = fieldset - keys
for k in missing:
kws[k] = None
# Avoid keeping alive traceback variables
err = kws['typing_error']
if err is not None:
kws['typing_error'] = err.with_traceback(None)
return CompileResult(**kws)
def compile_isolated(func, args, return_type=None, flags=DEFAULT_FLAGS,
locals={}):
from numba.core.registry import cpu_target
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
# Register the contexts in case for nested @jit or @overload calls
with cpu_target.nested_context(typingctx, targetctx):
return compile_extra(typingctx, targetctx, func, args, return_type,
flags, locals)
def run_frontend(func, inline_closures=False):
# XXX make this a dedicated Pipeline?
func_id = bytecode.FunctionIdentity.from_function(func)
interp = interpreter.Interpreter(func_id)
bc = bytecode.ByteCode(func_id=func_id)
func_ir = interp.interpret(bc)
if inline_closures:
inline_pass = InlineClosureCallPass(func_ir, cpu.ParallelOptions(False),
{}, False)
inline_pass.run()
post_proc = postproc.PostProcessor(func_ir)
post_proc.run()
return func_ir
class _CompileStatus(object):
__slots__ = ['fail_reason', 'can_fallback', 'can_giveup']
def __init__(self, can_fallback, can_giveup):
self.fail_reason = None
self.can_fallback = can_fallback
self.can_giveup = can_giveup
def __repr__(self):
vals = []
for k in self.__slots__:
vals.append("{k}={v}".format(k=k, v=getattr(self, k)))
return ', '.join(vals)
class _EarlyPipelineCompletion(Exception):
def __init__(self, result):
self.result = result
class StateDict(dict):
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
self[attr] = value
def _make_subtarget(targetctx, flags):
subtargetoptions = {}
if flags.debuginfo:
subtargetoptions['enable_debuginfo'] = True
if flags.boundscheck:
subtargetoptions['enable_boundscheck'] = True
if flags.nrt:
subtargetoptions['enable_nrt'] = True
if flags.auto_parallel:
subtargetoptions['auto_parallel'] = flags.auto_parallel
if flags.fastmath:
subtargetoptions['fastmath'] = flags.fastmath
error_model = callconv.create_error_model(flags.error_model, targetctx)
subtargetoptions['error_model'] = error_model
return targetctx.subtarget(**subtargetoptions)
class CompilerBase(object):
def __init__(self, typingctx, targetctx, library, args, return_type, flags,
locals):
# Make sure the environment is reloaded
config.reload_config()
typingctx.refresh()
targetctx.refresh()
self.state = StateDict()
self.state.typingctx = typingctx
self.state.targetctx = _make_subtarget(targetctx, flags)
self.state.library = library
self.state.args = args
self.state.return_type = return_type
self.state.flags = flags
self.state.locals = locals
# Results of various steps of the compilation pipeline
self.state.bc = None
self.state.func_id = None
self.state.func_ir = None
self.state.lifted = None
self.state.lifted_from = None
self.state.typemap = None
self.state.calltypes = None
self.state.type_annotation = None
# holds arbitrary inter-pipeline stage meta data
self.state.metadata = {}
self.state.reload_init = []
# hold this for e.g. with_lifting, null out on exit
self.state.pipeline = self
# parfor diagnostics info, add to metadata
self.state.parfor_diagnostics = ParforDiagnostics()
self.state.metadata['parfor_diagnostics'] = \
self.state.parfor_diagnostics
self.state.status = _CompileStatus(
can_fallback=self.state.flags.enable_pyobject,
can_giveup=config.COMPATIBILITY_MODE
)
def compile_extra(self, func):
self.state.func_id = bytecode.FunctionIdentity.from_function(func)
try:
ExtractByteCode().run_pass(self.state)
except Exception as e:
if self.state.status.can_giveup:
CompileInterpMode().run_pass(self.state)
return self.state.cr
else:
raise e
self.state.lifted = ()
self.state.lifted_from = None
return self._compile_bytecode()
def compile_ir(self, func_ir, lifted=(), lifted_from=None):
self.state.func_id = func_ir.func_id
self.state.lifted = lifted
self.state.lifted_from = lifted_from
self.state.func_ir = func_ir
self.state.nargs = self.state.func_ir.arg_count
FixupArgs().run_pass(self.state)
return self._compile_ir()
def define_pipelines(self):
raise NotImplementedError()
def _compile_core(self):
pms = self.define_pipelines()
for pm in pms:
pipeline_name = pm.pipeline_name
func_name = "%s.%s" % (self.state.func_id.modname,
self.state.func_id.func_qualname)
event("Pipeline: %s for %s" % (pipeline_name, func_name))
self.state.metadata['pipeline_times'] = {pipeline_name:
pm.exec_times}
is_final_pipeline = pm == pms[-1]
res = None
try:
pm.run(self.state)
if self.state.cr is not None:
break
except _EarlyPipelineCompletion as e:
res = e.result
break
except Exception as e:
self.state.status.fail_reason = e
if is_final_pipeline:
raise e
else:
raise CompilerError("All available pipelines exhausted")
# Pipeline is done, remove self reference to release refs to user code
self.state.pipeline = None
# organise a return
if res is not None:
# Early pipeline completion
return res
else:
assert self.state.cr is not None
return self.state.cr
def _compile_bytecode(self):
assert self.state.func_ir is None
return self._compile_core()
def _compile_ir(self):
assert self.state.func_ir is not None
return self._compile_core()
class Compiler(CompilerBase):
def define_pipelines(self):
# this maintains the objmode fallback behaviour
pms = []
if not self.state.flags.force_pyobject:
pms.append(DefaultPassBuilder.define_nopython_pipeline(self.state))
if self.state.status.can_fallback or self.state.flags.force_pyobject:
pms.append(
DefaultPassBuilder.define_objectmode_pipeline(self.state)
)
if self.state.status.can_giveup:
pms.append(
DefaultPassBuilder.define_interpreted_pipeline(self.state)
)
return pms
class DefaultPassBuilder(object):
@staticmethod
def define_nopython_pipeline(state, name='nopython'):
pm = PassManager(name)
if state.func_ir is None:
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
pm.add_pass(IRProcessing, "processing IR")
pm.add_pass(WithLifting, "Handle with contexts")
# pre typing
if not state.flags.no_rewrites:
pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants")
pm.add_pass(DeadBranchPrune, "dead branch pruning")
pm.add_pass(GenericRewrites, "nopython rewrites")
pm.add_pass(InlineClosureLikes,
"inline calls to locally defined closures")
# convert any remaining closures into functions
pm.add_pass(MakeFunctionToJitFunction,
"convert make_function into JIT functions")
# inline functions that have been determined as inlinable and rerun
# branch pruning, this needs to be run after closures are inlined as
# the IR repr of a closure masks call sites if an inlinable is called
# inside a closure
pm.add_pass(InlineInlinables, "inline inlinable functions")
if not state.flags.no_rewrites:
pm.add_pass(DeadBranchPrune, "dead branch pruning")
pm.add_pass(FindLiterallyCalls, "find literally calls")
pm.add_pass(LiteralUnroll, "handles literal_unroll")
# typing
pm.add_pass(NopythonTypeInference, "nopython frontend")
pm.add_pass(AnnotateTypes, "annotate types")
# optimisation
pm.add_pass(InlineOverloads, "inline overloaded functions")
if state.flags.auto_parallel.enabled:
pm.add_pass(PreParforPass, "Preprocessing for parfors")
if not state.flags.no_rewrites:
pm.add_pass(NopythonRewrites, "nopython rewrites")
if state.flags.auto_parallel.enabled:
pm.add_pass(ParforPass, "convert to parfors")
# legalise
pm.add_pass(IRLegalization,
"ensure IR is legal prior to lowering")
# lower
pm.add_pass(NoPythonBackend, "nopython mode backend")
pm.add_pass(DumpParforDiagnostics, "dump parfor diagnostics")
pm.finalize()
return pm
@staticmethod
def define_objectmode_pipeline(state, name='object'):
pm = PassManager(name)
if state.func_ir is None:
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
pm.add_pass(IRProcessing, "processing IR")
if utils.PYVERSION >= (3, 7):
# The following passes are needed to adjust for looplifting
pm.add_pass(CanonicalizeLoopEntry, "canonicalize loop entry")
pm.add_pass(CanonicalizeLoopExit, "canonicalize loop exit")
pm.add_pass(ObjectModeFrontEnd, "object mode frontend")
pm.add_pass(InlineClosureLikes,
"inline calls to locally defined closures")
# convert any remaining closures into functions
pm.add_pass(MakeFunctionToJitFunction,
"convert make_function into JIT functions")
pm.add_pass(AnnotateTypes, "annotate types")
pm.add_pass(IRLegalization, "ensure IR is legal prior to lowering")
pm.add_pass(ObjectModeBackEnd, "object mode backend")
pm.finalize()
return pm
@staticmethod
def define_interpreted_pipeline(state, name="interpreted"):
pm = PassManager(name)
pm.add_pass(CompileInterpMode,
"compiling with interpreter mode")
pm.finalize()
return pm
def compile_extra(typingctx, targetctx, func, args, return_type, flags,
locals, library=None, pipeline_class=Compiler):
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_extra(func)
def compile_ir(typingctx, targetctx, func_ir, args, return_type, flags,
locals, lifted=(), lifted_from=None, is_lifted_loop=False,
library=None, pipeline_class=Compiler):
# This is a special branch that should only run on IR from a lifted loop
if is_lifted_loop:
# This code is pessimistic and costly, but it is a not often trodden
# path and it will go away once IR is made immutable. The problem is
# that the rewrite passes can mutate the IR into a state that makes
# it possible for invalid tokens to be transmitted to lowering which
# then trickle through into LLVM IR and causes RuntimeErrors as LLVM
# cannot compile it. As a result the following approach is taken:
# 1. Create some new flags that copy the original ones but switch
# off rewrites.
# 2. Compile with 1. to get a compile result
# 3. Try and compile another compile result but this time with the
# original flags (and IR being rewritten).
# 4. If 3 was successful, use the result, else use 2.
# create flags with no rewrites
norw_flags = copy.deepcopy(flags)
norw_flags.no_rewrites = True
def compile_local(the_ir, the_flags):
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, the_flags, locals)
return pipeline.compile_ir(func_ir=the_ir, lifted=lifted,
lifted_from=lifted_from)
# compile with rewrites off, IR shouldn't be mutated irreparably
norw_cres = compile_local(func_ir.copy(), norw_flags)
# that's usable from above.
rw_cres = None
if not flags.no_rewrites:
with warnings.catch_warnings():
warnings.simplefilter("ignore", errors.NumbaWarning)
try:
rw_cres = compile_local(func_ir.copy(), flags)
except Exception:
pass
if rw_cres is not None:
cres = rw_cres
else:
cres = norw_cres
return cres
else:
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
lifted_from=lifted_from)
def compile_internal(typingctx, targetctx, library,
func, args, return_type, flags, locals):
pipeline = Compiler(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_extra(func)
| true | true |
f7f5d68fade0667b27df7cc25182e2c33488b2f0 | 76,864 | py | Python | tests/test_operator.py | tbenthompson/devito | 7ced4ba4ceca1680c68412172870b7a3c6e6d09a | [
"MIT"
] | 1 | 2022-01-02T17:34:39.000Z | 2022-01-02T17:34:39.000Z | tests/test_operator.py | tbenthompson/devito | 7ced4ba4ceca1680c68412172870b7a3c6e6d09a | [
"MIT"
] | null | null | null | tests/test_operator.py | tbenthompson/devito | 7ced4ba4ceca1680c68412172870b7a3c6e6d09a | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from itertools import permutations
from conftest import skipif
from devito import (Grid, Eq, Operator, Constant, Function, TimeFunction,
SparseFunction, SparseTimeFunction, Dimension, error, SpaceDimension,
NODE, CELL, dimensions, configuration, TensorFunction,
TensorTimeFunction, VectorFunction, VectorTimeFunction, switchconfig)
from devito import Le, Lt, Ge, Gt # noqa
from devito.exceptions import InvalidOperator
from devito.finite_differences.differentiable import diff2sympy
from devito.ir.equations import ClusterizedEq
from devito.ir.equations.algorithms import lower_exprs
from devito.ir.iet import (Callable, Conditional, Expression, Iteration, TimedList,
FindNodes, IsPerfectIteration, retrieve_iteration_tree)
from devito.ir.support import Any, Backward, Forward
from devito.passes.iet import DataManager
from devito.symbolics import ListInitializer, indexify, retrieve_indexed
from devito.tools import flatten, powerset, timed_region
from devito.types import Array, Scalar # noqa
def dimify(dimensions):
assert isinstance(dimensions, str)
return tuple(SpaceDimension(name=i) for i in dimensions.split())
def symbol(name, dimensions, value=0., shape=(3, 5), mode='function'):
"""Short-cut for symbol creation to test "function"
and "indexed" API."""
assert(mode in ['function', 'indexed'])
s = Function(name=name, dimensions=dimensions, shape=shape)
s.data_with_halo[:] = value
return s.indexify() if mode == 'indexed' else s
class TestOperatorSetup(object):
def test_platform_compiler_language(self):
"""
Test code generation when ``platform``, ``compiler`` and ``language``
are explicitly supplied to an Operator, thus bypassing the global values
stored in ``configuration``.
"""
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
# Unrecognised platform name -> exception
try:
Operator(Eq(u, u + 1), platform='asga')
assert False
except InvalidOperator:
assert True
# Operator with auto-detected CPU platform (ie, `configuration['platform']`)
op1 = Operator(Eq(u, u + 1))
# Operator with preset platform
op2 = Operator(Eq(u, u + 1), platform='nvidiaX')
# Definitely should be
assert str(op1) != str(op2)
# `op2` should have OpenMP offloading code
assert '#pragma omp target' in str(op2)
# `op2` uses a user-supplied `platform`, so the Compiler gets rebuilt
# to make sure it can JIT for the target platform
assert op1._compiler is not op2._compiler
# The compiler itself can also be passed explicitly ...
Operator(Eq(u, u + 1), platform='nvidiaX', compiler='gcc')
# ... but it will raise an exception if an unknown one
try:
Operator(Eq(u, u + 1), platform='nvidiaX', compiler='asf')
assert False
except InvalidOperator:
assert True
# Now with explicit platform *and* language
op3 = Operator(Eq(u, u + 1), platform='nvidiaX', language='openacc')
assert '#pragma acc parallel' in str(op3)
assert op3._compiler is not configuration['compiler']
assert (op3._compiler.__class__.__name__ ==
configuration['compiler'].__class__.__name__)
# Unsupported combination of `platform` and `language` should throw an error
try:
Operator(Eq(u, u + 1), platform='bdw', language='openacc')
assert False
except InvalidOperator:
assert True
# Check that local config takes precedence over global config
op4 = switchconfig(language='openmp')(Operator)(Eq(u, u + 1), language='C')
assert '#pragma omp for' not in str(op4)
def test_opt_options(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
# Unknown pass
try:
Operator(Eq(u, u + 1), opt=('aaa'))
assert False
except InvalidOperator:
assert True
# Unknown optimization option
try:
Operator(Eq(u, u + 1), opt=('advanced', {'aaa': 1}))
assert False
except InvalidOperator:
assert True
def test_compiler_uniqueness(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
eqns = [Eq(u.forward, u + 1)]
op0 = Operator(eqns)
op1 = Operator(eqns)
op2 = Operator(eqns, compiler='gcc')
assert op0._compiler is not op1._compiler
assert op0._compiler is not op2._compiler
assert op1._compiler is not op2._compiler
class TestCodeGen(object):
def test_parameters(self):
"""Tests code generation for Operator parameters."""
grid = Grid(shape=(3,))
a_dense = Function(name='a_dense', grid=grid)
const = Constant(name='constant')
eqn = Eq(a_dense, a_dense + 2.*const)
op = Operator(eqn, openmp=False)
assert len(op.parameters) == 5
assert op.parameters[0].name == 'a_dense'
assert op.parameters[0].is_Tensor
assert op.parameters[1].name == 'constant'
assert op.parameters[1].is_Scalar
assert op.parameters[2].name == 'x_M'
assert op.parameters[2].is_Scalar
assert op.parameters[3].name == 'x_m'
assert op.parameters[3].is_Scalar
assert op.parameters[4].name == 'timers'
assert op.parameters[4].is_Object
assert 'a_dense[x + 1] = 2.0F*constant + a_dense[x + 1]' in str(op)
@pytest.mark.parametrize('expr, so, to, expected', [
('Eq(u.forward,u+1)', 0, 1, 'Eq(u[t+1,x,y,z],u[t,x,y,z]+1)'),
('Eq(u.forward,u+1)', 1, 1, 'Eq(u[t+1,x+1,y+1,z+1],u[t,x+1,y+1,z+1]+1)'),
('Eq(u.forward,u+1)', 1, 2, 'Eq(u[t+1,x+1,y+1,z+1],u[t,x+1,y+1,z+1]+1)'),
('Eq(u.forward,u+u.backward + m)', 8, 2,
'Eq(u[t+1,x+8,y+8,z+8],m[x,y,z]+u[t,x+8,y+8,z+8]+u[t-1,x+8,y+8,z+8])')
])
def test_index_shifting(self, expr, so, to, expected):
"""Tests that array accesses get properly shifted based on the halo and
padding regions extent."""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
t = grid.stepping_dim # noqa
u = TimeFunction(name='u', grid=grid, space_order=so, time_order=to) # noqa
m = Function(name='m', grid=grid, space_order=0) # noqa
expr = eval(expr)
with timed_region('x'):
expr = Operator._lower_exprs([expr])[0]
assert str(expr).replace(' ', '') == expected
@pytest.mark.parametrize('expr, so, expected', [
('Lt(0.1*(g1 + g2), 0.2*(g1 + g2))', 0,
'0.1*g1[x,y]+0.1*g2[x,y]<0.2*g1[x,y]+0.2*g2[x,y]'),
('Le(0.1*(g1 + g2), 0.2*(g1 + g2))', 1,
'0.1*g1[x+1,y+1]+0.1*g2[x+1,y+1]<=0.2*g1[x+1,y+1]+0.2*g2[x+1,y+1]'),
('Ge(0.1*(g1 + g2), 0.2*(g1 + g2))', 2,
'0.1*g1[x+2,y+2]+0.1*g2[x+2,y+2]>=0.2*g1[x+2,y+2]+0.2*g2[x+2,y+2]'),
('Gt(0.1*(g1 + g2), 0.2*(g1 + g2))', 4,
'0.1*g1[x+4,y+4]+0.1*g2[x+4,y+4]>0.2*g1[x+4,y+4]+0.2*g2[x+4,y+4]'),
])
def test_relationals_index_shifting(self, expr, so, expected):
grid = Grid(shape=(3, 3))
g1 = Function(name='g1', grid=grid, space_order=so) # noqa
g2 = Function(name='g2', grid=grid, space_order=so) # noqa
expr = eval(expr)
expr = lower_exprs(expr)
assert str(expr).replace(' ', '') == expected
@pytest.mark.parametrize('expr,exp_uindices,exp_mods', [
('Eq(v.forward, u[0, x, y, z] + v + 1)', [(0, 5), (2, 5)], {'v': 5}),
('Eq(v.forward, u + v + 1)', [(0, 5), (2, 5), (0, 2)], {'v': 5, 'u': 2}),
])
def test_multiple_steppers(self, expr, exp_uindices, exp_mods):
"""Tests generation of multiple, mixed time stepping indices."""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
time = grid.time_dim
u = TimeFunction(name='u', grid=grid) # noqa
v = TimeFunction(name='v', grid=grid, time_order=4) # noqa
op = Operator(eval(expr), opt='noop')
iters = FindNodes(Iteration).visit(op)
time_iter = [i for i in iters if i.dim.is_Time]
assert len(time_iter) == 1
time_iter = time_iter[0]
# Check uindices in Iteration header
signatures = [(i._offset, i._modulo) for i in time_iter.uindices]
assert len(signatures) == len(exp_uindices)
exp_uindices = [(time + i, j) for i, j in exp_uindices]
assert all(i in signatures for i in exp_uindices)
# Check uindices within each TimeFunction
exprs = [i.expr for i in FindNodes(Expression).visit(op)]
assert(i.indices[i.function._time_position].modulo == exp_mods[i.function.name]
for i in flatten(retrieve_indexed(i) for i in exprs))
def test_lower_stepping_dims_with_mutiple_iterations(self):
"""
Test lowering SteppingDimensions for a time dimension with
more than one iteration loop with different ModuloDimensions.
MFE for issue #1486
"""
grid = Grid(shape=(4, 4))
f = Function(name="f", grid=grid, space_order=4)
g = Function(name="g", grid=grid, space_order=4)
h = TimeFunction(name="h", grid=grid, space_order=4, time_order=2)
f.data[:] = 0.0
h.data[:] = 0.0
eqn = [Eq(f, h + 1), Eq(g, f),
Eq(h.forward, h + g + 1)]
op = Operator(eqn)
for iter in [i for i in FindNodes(Iteration).visit(op) if i.dim.is_Time]:
exprtimeindices = set([a.indices[a.function._time_position] for
expr in FindNodes(Expression).visit(iter) for
a in retrieve_indexed(expr.expr) if
isinstance(a.function, TimeFunction)])
# Check if iteration time indices match with expressions time indices
assert (exprtimeindices == set(iter.uindices))
# Check if expressions time indices are modulo dimensions
assert(all([i.is_Modulo for i in exprtimeindices]))
op.apply(time_M=10)
assert np.all(h.data[0, :] == 18)
assert np.all(h.data[1, :] == 20)
assert np.all(h.data[2, :] == 22)
@skipif('device')
def test_timedlist_wraps_time_if_parallel(self):
"""
Test that if the time loop is parallel, then it must be wrapped by a
Section (and consequently by a TimedList).
"""
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid, save=3)
op = Operator(Eq(u, u + 1))
assert op.body.body[1].body[0].is_Section
assert isinstance(op.body.body[1].body[0].body[0], TimedList)
timedlist = op.body.body[1].body[0].body[0]
if configuration['language'] == 'openmp':
ompreg = timedlist.body[0]
assert ompreg.body[0].dim is grid.time_dim
else:
timedlist.body[0].dim is grid.time_dim
def test_nested_lowering(self):
"""
Tests that deeply nested (depth > 2) functions over subdomains are lowered.
"""
grid = Grid(shape=(4, 4), dtype=np.int32)
x, y = grid.dimensions
x0, y0 = dimensions('x0 y0')
u0 = Function(name="u0", grid=grid)
u1 = Function(name="u1", shape=grid.shape, dimensions=(x0, y0), dtype=np.int32)
u2 = Function(name="u2", grid=grid)
u0.data[:2, :2] = 1
u0.data[2:, 2:] = 2
u1.data[:, :] = 1
u2.data[:, :] = 1
eq0 = Eq(u0, u0[u1[x0+1, y0+2], u2[x, u2]], subdomain=grid.interior)
eq1 = Eq(u0, u0[u1[x0+1, y0+2], u2[x, u2[x, y]]], subdomain=grid.interior)
op0 = Operator(eq0)
op1 = Operator(eq1)
op0.apply()
# Check they indeed produced the same code
assert str(op0.ccode) == str(op1.ccode)
# Also check for numerical correctness
assert np.all(u0.data[0, 3] == 0) and np.all(u0.data[3, 0] == 0)
assert np.all(u0.data[:2, :2] == 1) and np.all(u0.data[1:3, 1:3] == 1)
assert np.all(u0.data[2:3, 3] == 2) and np.all(u0.data[3, 2:3] == 2)
def test_nested_lowering_indexify(self):
"""
Tests that nested function are lowered if only used as index.
"""
grid = Grid(shape=(4, 4), dtype=np.int32)
x, y = grid.dimensions
u0 = Function(name="u0", grid=grid)
u1 = Function(name="u1", grid=grid)
u2 = Function(name="u2", grid=grid)
u0.data[:, :] = 2
u1.data[:, :] = 1
u2.data[:, :] = 1
# Function as index only
eq0 = Eq(u0._subs(x, u1), 2*u0)
# Function as part of expression as index only
eq1 = Eq(u0._subs(x, u1._subs(y, u2) + 1), 4*u0)
op0 = Operator(eq0)
op0.apply()
op1 = Operator(eq1)
op1.apply()
assert np.all(np.all(u0.data[i, :] == 2) for i in [0, 3])
assert np.all(u0.data[1, :] == 4)
assert np.all(u0.data[2, :] == 8)
class TestArithmetic(object):
@pytest.mark.parametrize('expr, result', [
('Eq(a, a + b + 5.)', 10.),
('Eq(a, b - a)', 1.),
('Eq(a, 4 * (b * a))', 24.),
('Eq(a, (6. / b) + (8. * a))', 18.),
])
@pytest.mark.parametrize('mode', ['function'])
def test_flat(self, expr, result, mode):
"""Tests basic point-wise arithmetic on two-dimensional data"""
i, j = dimify('i j')
a = symbol(name='a', dimensions=(i, j), value=2., mode=mode)
b = symbol(name='b', dimensions=(i, j), value=3., mode=mode)
fa = a.base.function if mode == 'indexed' else a
fb = b.base.function if mode == 'indexed' else b
eqn = eval(expr)
Operator(eqn)(a=fa, b=fb)
assert np.allclose(fa.data, result, rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a, a + b + 5.)', 10.),
('Eq(a, b - a)', 1.),
('Eq(a, 4 * (b * a))', 24.),
('Eq(a, (6. / b) + (8. * a))', 18.),
])
@pytest.mark.parametrize('mode', ['function', 'indexed'])
def test_deep(self, expr, result, mode):
"""Tests basic point-wise arithmetic on multi-dimensional data"""
i, j, k, l = dimify('i j k l')
a = symbol(name='a', dimensions=(i, j, k, l), shape=(3, 5, 7, 6),
value=2., mode=mode)
b = symbol(name='b', dimensions=(j, k), shape=(5, 7),
value=3., mode=mode)
fa = a.base.function if mode == 'indexed' else a
fb = b.base.function if mode == 'indexed' else b
eqn = eval(expr)
Operator(eqn)(a=fa, b=fb)
assert np.allclose(fa.data, result, rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a[j, l], a[j - 1 , l] + 1.)',
np.meshgrid(np.arange(2., 8.), np.arange(2., 7.))[1]),
('Eq(a[j, l], a[j, l - 1] + 1.)',
np.meshgrid(np.arange(2., 8.), np.arange(2., 7.))[0]),
])
def test_indexed_increment(self, expr, result):
"""Tests point-wise increments with stencil offsets in one dimension"""
j, l = dimify('j l')
a = symbol(name='a', dimensions=(j, l), value=1., shape=(5, 6),
mode='indexed').base
fa = a.function
fa.data[:] = 0.
eqn = eval(expr)
Operator(eqn)(a=fa)
assert np.allclose(fa.data, result, rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a[j, l], b[j - 1 , l] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[j, l], b[j , l - 1] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[j, l], b[j - 1, l - 1] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[j, l], b[j + 1, l + 1] + 1.)', np.zeros((5, 6)) + 3.),
])
def test_indexed_stencil(self, expr, result):
"""Test point-wise arithmetic with stencil offsets across two
functions in indexed expression format"""
j, l = dimify('j l')
a = symbol(name='a', dimensions=(j, l), value=0., shape=(5, 6),
mode='indexed').base
fa = a.function
b = symbol(name='b', dimensions=(j, l), value=2., shape=(5, 6),
mode='indexed').base
fb = b.function
eqn = eval(expr)
Operator(eqn)(a=fa, b=fb)
assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a[1, j, l], a[0, j - 1 , l] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[1, j, l], a[0, j , l - 1] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[1, j, l], a[0, j - 1, l - 1] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[1, j, l], a[0, j + 1, l + 1] + 1.)', np.zeros((5, 6)) + 3.),
])
def test_indexed_buffered(self, expr, result):
"""Test point-wise arithmetic with stencil offsets across a single
functions with buffering dimension in indexed expression format"""
i, j, l = dimify('i j l')
a = symbol(name='a', dimensions=(i, j, l), value=2., shape=(3, 5, 6),
mode='indexed').base
fa = a.function
eqn = eval(expr)
Operator(eqn)(a=fa)
assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a[1, j, l], a[0, j - 1 , l] + 1.)', np.zeros((5, 6)) + 3.),
])
def test_indexed_open_loops(self, expr, result):
"""Test point-wise arithmetic with stencil offsets and open loop
boundaries in indexed expression format"""
i, j, l = dimify('i j l')
a = Function(name='a', dimensions=(i, j, l), shape=(3, 5, 6))
fa = a.function
fa.data[0, :, :] = 2.
eqn = eval(expr)
Operator(eqn)(a=fa)
assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
def test_indexed_w_indirections(self):
"""Test point-wise arithmetic with indirectly indexed Functions."""
grid = Grid(shape=(10, 10))
x, y = grid.dimensions
p_poke = Dimension('p_src')
d = Dimension('d')
npoke = 1
u = Function(name='u', grid=grid, space_order=0)
coordinates = Function(name='coordinates', dimensions=(p_poke, d),
shape=(npoke, grid.dim), space_order=0, dtype=np.int32)
coordinates.data[0, 0] = 4
coordinates.data[0, 1] = 3
poke_eq = Eq(u[coordinates[p_poke, 0], coordinates[p_poke, 1]], 1.0)
op = Operator(poke_eq)
op.apply()
ix, iy = np.where(u.data == 1.)
assert len(ix) == len(iy) == 1
assert ix[0] == 4 and iy[0] == 3
assert np.all(u.data[0:3] == 0.) and np.all(u.data[5:] == 0.)
assert np.all(u.data[:, 0:3] == 0.) and np.all(u.data[:, 5:] == 0.)
def test_constant_time_dense(self):
"""Test arithmetic between different data objects, namely Constant
and Function."""
i, j = dimify('i j')
const = Constant(name='truc', value=2.)
a = Function(name='a', shape=(20, 20), dimensions=(i, j))
a.data[:] = 2.
eqn = Eq(a, a + 2.*const)
op = Operator(eqn)
op.apply(a=a, truc=const)
assert(np.allclose(a.data, 6.))
# Applying a different constant still works
op.apply(a=a, truc=Constant(name='truc2', value=3.))
assert(np.allclose(a.data, 12.))
def test_incs_same_lhs(self):
"""Test point-wise arithmetic with multiple increments expressed
as different equations."""
grid = Grid(shape=(10, 10))
u = Function(name='u', grid=grid, space_order=0)
op = Operator([Eq(u, u+1.0), Eq(u, u+2.0)])
u.data[:] = 0.0
op.apply()
assert np.all(u.data[:] == 3)
def test_sparsefunction_inject(self):
"""
Test injection of a SparseFunction into a Function
"""
grid = Grid(shape=(11, 11))
u = Function(name='u', grid=grid, space_order=0)
sf1 = SparseFunction(name='s', grid=grid, npoint=1)
op = Operator(sf1.inject(u, expr=sf1))
assert sf1.data.shape == (1, )
sf1.coordinates.data[0, :] = (0.6, 0.6)
sf1.data[0] = 5.0
u.data[:] = 0.0
op.apply()
# This should be exactly on a point, all others 0
assert u.data[6, 6] == pytest.approx(5.0)
assert np.sum(u.data) == pytest.approx(5.0)
def test_sparsefunction_interp(self):
"""
Test interpolation of a SparseFunction from a Function
"""
grid = Grid(shape=(11, 11))
u = Function(name='u', grid=grid, space_order=0)
sf1 = SparseFunction(name='s', grid=grid, npoint=1)
op = Operator(sf1.interpolate(u))
assert sf1.data.shape == (1, )
sf1.coordinates.data[0, :] = (0.45, 0.45)
sf1.data[:] = 0.0
u.data[:] = 0.0
u.data[4, 4] = 4.0
op.apply()
# Exactly in the middle of 4 points, only 1 nonzero is 4
assert sf1.data[0] == pytest.approx(1.0)
def test_sparsetimefunction_interp(self):
"""
Test injection of a SparseTimeFunction into a TimeFunction
"""
grid = Grid(shape=(11, 11))
u = TimeFunction(name='u', grid=grid, time_order=2, save=5, space_order=0)
sf1 = SparseTimeFunction(name='s', grid=grid, npoint=1, nt=5)
op = Operator(sf1.interpolate(u))
assert sf1.data.shape == (5, 1)
sf1.coordinates.data[0, :] = (0.45, 0.45)
sf1.data[:] = 0.0
u.data[:] = 0.0
u.data[:, 4, 4] = 8*np.arange(5)+4
# Because of time_order=2 this is probably the range we get anyway, but
# to be sure...
op.apply(time_m=1, time_M=3)
# Exactly in the middle of 4 points, only 1 nonzero is 4
assert np.all(sf1.data[:, 0] == pytest.approx([0.0, 3.0, 5.0, 7.0, 0.0]))
def test_sparsetimefunction_inject(self):
"""
Test injection of a SparseTimeFunction from a TimeFunction
"""
grid = Grid(shape=(11, 11))
u = TimeFunction(name='u', grid=grid, time_order=2, save=5, space_order=0)
sf1 = SparseTimeFunction(name='s', grid=grid, npoint=1, nt=5)
op = Operator(sf1.inject(u, expr=3*sf1))
assert sf1.data.shape == (5, 1)
sf1.coordinates.data[0, :] = (0.45, 0.45)
sf1.data[:, 0] = np.arange(5)
u.data[:] = 0.0
# Because of time_order=2 this is probably the range we get anyway, but
# to be sure...
op.apply(time_m=1, time_M=3)
# Exactly in the middle of 4 points, only 1 nonzero is 4
assert np.all(u.data[1, 4:6, 4:6] == pytest.approx(0.75))
assert np.all(u.data[2, 4:6, 4:6] == pytest.approx(1.5))
assert np.all(u.data[3, 4:6, 4:6] == pytest.approx(2.25))
assert np.sum(u.data[:]) == pytest.approx(4*0.75+4*1.5+4*2.25)
def test_sparsetimefunction_inject_dt(self):
"""
Test injection of the time deivative of a SparseTimeFunction into a TimeFunction
"""
grid = Grid(shape=(11, 11))
u = TimeFunction(name='u', grid=grid, time_order=2, save=5, space_order=0)
sf1 = SparseTimeFunction(name='s', grid=grid, npoint=1, nt=5, time_order=2)
# This should end up as a central difference operator
op = Operator(sf1.inject(u, expr=3*sf1.dt))
assert sf1.data.shape == (5, 1)
sf1.coordinates.data[0, :] = (0.45, 0.45)
sf1.data[:, 0] = np.arange(5)
u.data[:] = 0.0
# Because of time_order=2 this is probably the range we get anyway, but
# to be sure...
op.apply(time_m=1, time_M=3, dt=1)
# Exactly in the middle of 4 points, only 1 nonzero is 4
assert np.all(u.data[1:4, 4:6, 4:6] == pytest.approx(0.75))
assert np.sum(u.data[:]) == pytest.approx(12*0.75)
@pytest.mark.parametrize('func1', [TensorFunction, TensorTimeFunction,
VectorFunction, VectorTimeFunction])
def test_tensor(self, func1):
grid = Grid(tuple([5]*3))
f1 = func1(name="f1", grid=grid)
op1 = Operator(Eq(f1, f1.dx))
op2 = Operator([Eq(f, f.dx) for f in f1.values()])
assert str(op1.ccode) == str(op2.ccode)
class TestAllocation(object):
@pytest.mark.parametrize('shape', [(20, 20),
(20, 20, 20),
(20, 20, 20, 20)])
def test_first_touch(self, shape):
dimensions = dimify('i j k l')[:len(shape)]
grid = Grid(shape=shape, dimensions=dimensions)
m = Function(name='m', grid=grid, first_touch=True)
assert(np.allclose(m.data, 0))
m2 = Function(name='m2', grid=grid, first_touch=False)
assert(np.allclose(m2.data, 0))
assert(np.array_equal(m.data, m2.data))
@pytest.mark.parametrize('ndim', [2, 3])
def test_staggered(self, ndim):
"""
Test the "deformed" allocation for staggered functions
"""
grid = Grid(shape=tuple([11]*ndim))
for stagg in tuple(powerset(grid.dimensions))[1::] + (NODE, CELL):
f = Function(name='f', grid=grid, staggered=stagg)
assert f.data.shape == tuple([11]*ndim)
# Add a non-staggered field to ensure that the auto-derived
# dimension size arguments are at maximum
g = Function(name='g', grid=grid)
# Test insertion into a central point
index = tuple(5 for _ in f.dimensions)
set_f = Eq(f[index], 2.)
set_g = Eq(g[index], 3.)
Operator([set_f, set_g])()
assert f.data[index] == 2.
@pytest.mark.parametrize('ndim', [2, 3])
def test_staggered_time(self, ndim):
"""
Test the "deformed" allocation for staggered functions
"""
grid = Grid(shape=tuple([11]*ndim))
for stagg in tuple(powerset(grid.dimensions))[1::] + (NODE,):
f = TimeFunction(name='f', grid=grid, staggered=stagg)
assert f.data.shape[1:] == tuple([11]*ndim)
# Add a non-staggered field to ensure that the auto-derived
# dimension size arguments are at maximum
g = TimeFunction(name='g', grid=grid)
# Test insertion into a central point
index = tuple([0] + [5 for _ in f.dimensions[1:]])
set_f = Eq(f[index], 2.)
set_g = Eq(g[index], 3.)
Operator([set_f, set_g])()
assert f.data[index] == 2.
class TestApplyArguments(object):
def verify_arguments(self, arguments, expected):
"""
Utility function to verify an argument dictionary against
expected values.
"""
for name, v in expected.items():
if isinstance(v, (Function, SparseFunction)):
condition = v._C_as_ndarray(arguments[name])[v._mask_domain] == v.data
condition = condition.all()
else:
condition = arguments[name] == v
if not condition:
error('Wrong argument %s: expected %s, got %s' %
(name, v, arguments[name]))
assert condition
def verify_parameters(self, parameters, expected):
"""
Utility function to verify a parameter set against expected
values.
"""
boilerplate = ['timers']
parameters = [p.name for p in parameters]
for exp in expected:
if exp not in parameters + boilerplate:
error("Missing parameter: %s" % exp)
assert exp in parameters + boilerplate
extra = [p for p in parameters if p not in expected and p not in boilerplate]
if len(extra) > 0:
error("Redundant parameters: %s" % str(extra))
assert len(extra) == 0
def test_default_functions(self):
"""
Test the default argument derivation for functions.
"""
grid = Grid(shape=(5, 6, 7))
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
op = Operator(Eq(f.forward, g + f), openmp=False)
expected = {
'x_m': 0, 'x_M': 4,
'y_m': 0, 'y_M': 5,
'z_m': 0, 'z_M': 6,
'f': f, 'g': g,
}
self.verify_arguments(op.arguments(time=4), expected)
exp_parameters = ['f', 'g', 'x_m', 'x_M', 'y_m', 'y_M', 'z_m', 'z_M',
'x0_blk0_size', 'y0_blk0_size', 'time_m', 'time_M']
self.verify_parameters(op.parameters, exp_parameters)
def test_default_sparse_functions(self):
"""
Test the default argument derivation for composite functions.
"""
grid = Grid(shape=(5, 6, 7))
f = TimeFunction(name='f', grid=grid)
s = SparseTimeFunction(name='s', grid=grid, npoint=3, nt=4)
s.coordinates.data[:, 0] = np.arange(0., 3.)
s.coordinates.data[:, 1] = np.arange(1., 4.)
s.coordinates.data[:, 2] = np.arange(2., 5.)
op = Operator(s.interpolate(f))
expected = {
's': s, 's_coords': s.coordinates,
# Default dimensions of the sparse data
'p_s_size': 3, 'p_s_m': 0, 'p_s_M': 2,
'd_size': 3, 'd_m': 0, 'd_M': 2,
'time_size': 4, 'time_m': 0, 'time_M': 3,
}
self.verify_arguments(op.arguments(), expected)
def test_override_function_size(self):
"""
Test runtime size overrides for Function dimensions.
Note: The current behaviour for size-only arguments seems
ambiguous (eg. op(x=3, y=4), as it sets `dim_size` as well as
`dim_end`. Since `dim_size` is used for the cast, we can get
garbage results if it does not agree with the shape of the
provided data. This should error out, or potentially we could
set the corresponding size, while aliasing `dim` to `dim_e`?
The same should be tested for TimeFunction once fixed.
"""
grid = Grid(shape=(5, 6, 7))
g = Function(name='g', grid=grid)
op = Operator(Eq(g, 1.))
args = {'x': 3, 'y': 4, 'z': 5}
arguments = op.arguments(**args)
expected = {
'x_m': 0, 'x_M': 3,
'y_m': 0, 'y_M': 4,
'z_m': 0, 'z_M': 5,
'g': g
}
self.verify_arguments(arguments, expected)
# Verify execution
op(**args)
assert (g.data[4:] == 0.).all()
assert (g.data[:, 5:] == 0.).all()
assert (g.data[:, :, 6:] == 0.).all()
assert (g.data[:4, :5, :6] == 1.).all()
def test_override_function_subrange(self):
"""
Test runtime start/end override for Function dimensions.
"""
grid = Grid(shape=(5, 6, 7))
g = Function(name='g', grid=grid)
op = Operator(Eq(g, 1.))
args = {'x_m': 1, 'x_M': 3, 'y_m': 2, 'y_M': 4, 'z_m': 3, 'z_M': 5}
arguments = op.arguments(**args)
expected = {
'x_m': 1, 'x_M': 3,
'y_m': 2, 'y_M': 4,
'z_m': 3, 'z_M': 5,
'g': g
}
self.verify_arguments(arguments, expected)
# Verify execution
op(**args)
mask = np.ones((5, 6, 7), dtype=np.bool)
mask[1:4, 2:5, 3:6] = False
assert (g.data[mask] == 0.).all()
assert (g.data[1:4, 2:5, 3:6] == 1.).all()
def test_override_timefunction_subrange(self):
"""
Test runtime start/end overrides for TimeFunction dimensions.
"""
grid = Grid(shape=(5, 6, 7))
f = TimeFunction(name='f', grid=grid, time_order=0)
# Suppress opts to work around a know bug with GCC and OpenMP:
# https://github.com/devitocodes/devito/issues/320
op = Operator(Eq(f, 1.), opt=None)
# TODO: Currently we require the `time` subrange to be set
# explicitly. Ideally `t` would directly alias with `time`,
# but this seems broken currently.
args = {'x_m': 1, 'x_M': 3, 'y_m': 2, 'y_M': 4,
'z_m': 3, 'z_M': 5, 't_m': 1, 't_M': 4}
arguments = op.arguments(**args)
expected = {
'x_m': 1, 'x_M': 3,
'y_m': 2, 'y_M': 4,
'z_m': 3, 'z_M': 5,
'time_m': 1, 'time_M': 4,
'f': f
}
self.verify_arguments(arguments, expected)
# Verify execution
op(**args)
mask = np.ones((1, 5, 6, 7), dtype=np.bool)
mask[:, 1:4, 2:5, 3:6] = False
assert (f.data[mask] == 0.).all()
assert (f.data[:, 1:4, 2:5, 3:6] == 1.).all()
def test_override_function_data(self):
"""
Test runtime data overrides for Function symbols.
"""
grid = Grid(shape=(5, 6, 7))
a = Function(name='a', grid=grid)
op = Operator(Eq(a, a + 3))
# Run with default value
a.data[:] = 1.
op()
assert (a.data[:] == 4.).all()
# Override with symbol (different name)
a1 = Function(name='a1', grid=grid)
a1.data[:] = 2.
op(a=a1)
assert (a1.data[:] == 5.).all()
# Override with symbol (same name as original)
a2 = Function(name='a', grid=grid)
a2.data[:] = 3.
op(a=a2)
assert (a2.data[:] == 6.).all()
# Override with user-allocated numpy data
a3 = np.zeros_like(a._data_allocated)
a3[:] = 4.
op(a=a3)
assert (a3[a._mask_domain] == 7.).all()
def test_override_timefunction_data(self):
"""
Test runtime data overrides for TimeFunction symbols.
"""
grid = Grid(shape=(5, 6, 7))
a = TimeFunction(name='a', grid=grid, save=2)
# Suppress opts to work around a know bug with GCC and OpenMP:
# https://github.com/devitocodes/devito/issues/320
op = Operator(Eq(a, a + 3), opt=None)
# Run with default value
a.data[:] = 1.
op(time_m=0, time=1)
assert (a.data[:] == 4.).all()
# Override with symbol (different name)
a1 = TimeFunction(name='a1', grid=grid, save=2)
a1.data[:] = 2.
op(time_m=0, time=1, a=a1)
assert (a1.data[:] == 5.).all()
# Override with symbol (same name as original)
a2 = TimeFunction(name='a', grid=grid, save=2)
a2.data[:] = 3.
op(time_m=0, time=1, a=a2)
assert (a2.data[:] == 6.).all()
# Override with user-allocated numpy data
a3 = np.zeros_like(a._data_allocated)
a3[:] = 4.
op(time_m=0, time=1, a=a3)
assert (a3[a._mask_domain] == 7.).all()
def test_dimension_size_infer(self, nt=100):
"""Test that the dimension sizes are being inferred correctly"""
grid = Grid(shape=(3, 5, 7))
a = Function(name='a', grid=grid)
b = TimeFunction(name='b', grid=grid, save=nt)
op = Operator(Eq(b, a))
time = b.indices[0]
op_arguments = op.arguments()
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt-1)
def test_dimension_offset_adjust(self, nt=100):
"""Test that the dimension sizes are being inferred correctly"""
i, j, k = dimify('i j k')
shape = (10, 10, 10)
grid = Grid(shape=shape, dimensions=(i, j, k))
a = Function(name='a', grid=grid)
b = TimeFunction(name='b', grid=grid, save=nt)
time = b.indices[0]
eqn = Eq(b[time + 1, i, j, k], b[time - 1, i, j, k]
+ b[time, i, j, k] + a[i, j, k])
op = Operator(eqn)
op_arguments = op.arguments(time=nt-10)
assert(op_arguments[time.min_name] == 1)
assert(op_arguments[time.max_name] == nt - 10)
def test_dimension_size_override(self):
"""Test explicit overrides for the leading time dimension"""
grid = Grid(shape=(3, 5, 7))
a = TimeFunction(name='a', grid=grid)
one = Function(name='one', grid=grid)
one.data[:] = 1.
op = Operator(Eq(a.forward, a + one))
# Test dimension override via the buffered dimenions
a.data[0] = 0.
op(a=a, t=5)
assert(np.allclose(a.data[1], 5.))
# Test dimension override via the parent dimenions
a.data[0] = 0.
op(a=a, time=4)
assert(np.allclose(a.data[0], 4.))
def test_override_sparse_data_fix_dim(self):
"""
Ensure the arguments are derived correctly for an input SparseFunction.
The dimensions are forced to be the same in this case to verify
the aliasing on the SparseFunction name.
"""
grid = Grid(shape=(10, 10))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2)
original_coords = (1., 1.)
new_coords = (2., 2.)
p_dim = Dimension(name='p_src')
src1 = SparseTimeFunction(name='src1', grid=grid, dimensions=(time, p_dim), nt=10,
npoint=1, coordinates=original_coords, time_order=2)
src2 = SparseTimeFunction(name='src2', grid=grid, dimensions=(time, p_dim),
npoint=1, nt=10, coordinates=new_coords, time_order=2)
op = Operator(src1.inject(u, src1))
# Move the source from the location where the setup put it so we can test
# whether the override picks up the original coordinates or the changed ones
args = op.arguments(src1=src2, time=0)
arg_name = src1.coordinates._arg_names[0]
assert(np.array_equal(src2.coordinates._C_as_ndarray(args[arg_name]),
np.asarray((new_coords,))))
def test_override_sparse_data_default_dim(self):
"""
Ensure the arguments are derived correctly for an input SparseFunction.
The dimensions are the defaults (name dependant 'p_name') in this case to verify
the aliasing on the SparseFunction coordinates and dimensions.
"""
grid = Grid(shape=(10, 10))
original_coords = (1., 1.)
new_coords = (2., 2.)
u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2)
src1 = SparseTimeFunction(name='src1', grid=grid, npoint=1, nt=10,
coordinates=original_coords, time_order=2)
src2 = SparseTimeFunction(name='src2', grid=grid, npoint=1, nt=10,
coordinates=new_coords, time_order=2)
op = Operator(src1.inject(u, src1))
# Move the source from the location where the setup put it so we can test
# whether the override picks up the original coordinates or the changed ones
args = op.arguments(src1=src2, t=0)
arg_name = src1.coordinates._arg_names[0]
assert(np.array_equal(src2.coordinates._C_as_ndarray(args[arg_name]),
np.asarray((new_coords,))))
def test_argument_derivation_order(self, nt=100):
""" Ensure the precedence order of arguments is respected
Defaults < (overriden by) Tensor Arguments < Dimensions < Scalar Arguments
"""
i, j, k = dimify('i j k')
shape = (10, 10, 10)
grid = Grid(shape=shape, dimensions=(i, j, k))
a = Function(name='a', grid=grid)
b = TimeFunction(name='b', grid=grid, save=nt)
time = b.indices[0]
op = Operator(Eq(b, a))
# Simple case, same as that tested above.
# Repeated here for clarity of further tests.
op_arguments = op.arguments()
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt-1)
# Providing a tensor argument should infer the dimension size from its shape
b1 = TimeFunction(name='b1', grid=grid, save=nt+1)
op_arguments = op.arguments(b=b1)
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt)
# Providing a dimension size explicitly should override the automatically inferred
op_arguments = op.arguments(b=b1, time=nt - 1)
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt - 1)
# Providing a scalar argument explicitly should override the automatically
# inferred
op_arguments = op.arguments(b=b1, time_M=nt - 2)
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt - 2)
def test_derive_constant_value(self):
"""Ensure that values for Constant symbols are derived correctly."""
grid = Grid(shape=(5, 6))
f = Function(name='f', grid=grid)
a = Constant(name='a', value=3.)
Operator(Eq(f, a))()
assert np.allclose(f.data, 3.)
g = Function(name='g', grid=grid)
b = Constant(name='b')
op = Operator(Eq(g, b))
b.data = 4.
op()
assert np.allclose(g.data, 4.)
def test_argument_from_index_constant(self):
nx, ny = 30, 30
grid = Grid(shape=(nx, ny))
x, y = grid.dimensions
arbdim = Dimension('arb')
u = TimeFunction(name='u', grid=grid, save=None, time_order=2, space_order=0)
snap = Function(name='snap', dimensions=(arbdim, x, y), shape=(5, nx, ny),
space_order=0)
save_t = Constant(name='save_t', dtype=np.int32)
save_slot = Constant(name='save_slot', dtype=np.int32)
expr = Eq(snap.subs(arbdim, save_slot), u.subs(grid.stepping_dim, save_t))
op = Operator(expr)
u.data[:] = 0.0
snap.data[:] = 0.0
u.data[0, 10, 10] = 1.0
op.apply(save_t=0, save_slot=1)
assert snap.data[1, 10, 10] == 1.0
def test_argument_no_shifting(self):
"""Tests that there's no shifting in the written-to region when
iteration bounds are prescribed."""
grid = Grid(shape=(11, 11))
x, y = grid.dimensions
a = Function(name='a', grid=grid)
a.data[:] = 1.
# Try with an operator w/o stencil offsets
op = Operator(Eq(a, a + a))
op(x_m=3, x_M=7)
assert (a.data[:3, :] == 1.).all()
assert (a.data[3:7, :] == 2.).all()
assert (a.data[8:, :] == 1.).all()
# Try with an operator w/ stencil offsets
a.data[:] = 1.
op = Operator(Eq(a, a + (a[x-1, y] + a[x+1, y]) / 2.))
op(x_m=3, x_M=7)
assert (a.data[:3, :] == 1.).all()
assert (a.data[3:7, :] >= 2.).all()
assert (a.data[8:, :] == 1.).all()
def test_argument_unknown(self):
"""Check that Operators deal with unknown runtime arguments."""
grid = Grid(shape=(11, 11))
a = Function(name='a', grid=grid)
op = Operator(Eq(a, a + a))
try:
op.apply(b=3)
assert False
except ValueError:
# `b` means nothing to `op`, so we end up here
assert True
try:
configuration['ignore-unknowns'] = True
op.apply(b=3)
assert True
except ValueError:
# we should not end up here as we're now ignoring unknown arguments
assert False
finally:
configuration['ignore-unknowns'] = configuration._defaults['ignore-unknowns']
@pytest.mark.parametrize('so,to,pad,expected', [
(0, 1, 0, (2, 4, 4, 4)),
(2, 1, 0, (2, 8, 8, 8)),
(4, 1, 0, (2, 12, 12, 12)),
(4, 3, 0, (4, 12, 12, 12)),
(4, 1, 3, (2, 15, 15, 15)),
((2, 5, 2), 1, 0, (2, 11, 11, 11)),
((2, 5, 4), 1, 3, (2, 16, 16, 16)),
])
def test_function_dataobj(self, so, to, pad, expected):
"""
Tests that the C-level structs from DiscreteFunctions are properly
populated upon application of an Operator.
"""
grid = Grid(shape=(4, 4, 4))
u = TimeFunction(name='u', grid=grid, space_order=so, time_order=to, padding=pad)
op = Operator(Eq(u, 1), opt='noop')
u_arg = op.arguments(time=0)['u']
u_arg_shape = tuple(u_arg._obj.size[i] for i in range(u.ndim))
assert u_arg_shape == expected
def test_illegal_override(self):
grid0 = Grid(shape=(11, 11))
grid1 = Grid(shape=(13, 13))
a0 = Function(name='a', grid=grid0)
b0 = Function(name='b', grid=grid0)
a1 = Function(name='a', grid=grid1)
op = Operator(Eq(a0, a0 + b0 + 1))
op.apply()
try:
op.apply(a=a1, b=b0)
assert False
except ValueError as e:
assert 'Override' in e.args[0] # Check it's hitting the right error msg
except:
assert False
def test_incomplete_override(self):
"""
Simulate a typical user error when one has to supply replacements for lots
of Functions (a complex Operator) but at least one is forgotten.
"""
grid0 = Grid(shape=(11, 11))
grid1 = Grid(shape=(13, 13))
a0 = Function(name='a', grid=grid0)
a1 = Function(name='a', grid=grid1)
b = Function(name='b', grid=grid0)
op = Operator(Eq(a0, a0 + b + 1))
op.apply()
try:
op.apply(a=a1)
assert False
except ValueError as e:
assert 'Default' in e.args[0] # Check it's hitting the right error msg
except:
assert False
@skipif('nompi')
@pytest.mark.parallel(mode=1)
def test_new_distributor(self):
"""
Test that `comm` and `nb` are correctly updated when a different distributor
from that it was originally built with is required by an operator.
Note that MPI is required to ensure `comm` and `nb` are included in op.objects.
"""
from devito.mpi import MPI
grid = Grid(shape=(10, 10), comm=MPI.COMM_SELF)
grid2 = Grid(shape=(10, 10), comm=MPI.COMM_WORLD)
u = TimeFunction(name='u', grid=grid, space_order=2)
u2 = TimeFunction(name='u2', grid=grid2, space_order=2)
# Create some operator that requires MPI communication
eqn = Eq(u.forward, u + u.laplace)
op = Operator(eqn)
assert op.arguments(u=u, time_M=0)['comm'] is grid.distributor._obj_comm.value
assert (op.arguments(u=u, time_M=0)['nb'] is
grid.distributor._obj_neighborhood.value)
assert op.arguments(u=u2, time_M=0)['comm'] is grid2.distributor._obj_comm.value
assert (op.arguments(u=u2, time_M=0)['nb'] is
grid2.distributor._obj_neighborhood.value)
def test_spacing_from_new_grid(self):
"""
MFE for issue #1518.
"""
grid = Grid(shape=(10, 10), extent=(9, 9))
u = Function(name='u', grid=grid, space_order=1)
# A bogus operator that just assigns the x spacing into the array
# Note, grid.dimensions[0].spacing here is not a number, it's the symbol h_x
op = Operator(Eq(u, grid.dimensions[0].spacing))
# Create a new grid with different spacing, and a function defined on it
grid2 = Grid(shape=(5, 5), extent=(9, 9))
u2 = Function(name='u', grid=grid2, space_order=1)
op(u=u2)
# The h_x that was passed to the C code must be the one `grid2`, not `grid`
assert u2.data[2, 2] == grid2.spacing[0]
@skipif('device')
class TestDeclarator(object):
def test_heap_1D(self):
i, j = dimensions('i j')
a = Array(name='a', dimensions=(i,))
b = Array(name='b', dimensions=(i,))
f = Function(name='f', shape=(3,), dimensions=(j,))
op = Operator([Eq(a[i], a[i] + b[i] + 5.),
Eq(f[j], a[j])])
assert op.body.casts[2].is_PointerCast
assert str(op.body.casts[2]) == ('float (*restrict f) __attribute__ '
'((aligned (64))) = (float (*)) f_vec->data;')
assert str(op.body.allocs[0]) == 'float *a_vec;'
assert str(op.body.allocs[1]) == ('posix_memalign((void**)&a_vec, 64, '
'sizeof(float[i_size]));')
assert str(op.body.frees[0]) == 'free(a_vec);'
def test_heap_perfect_2D(self):
i, j, k = dimensions('i j k')
a = Array(name='a', dimensions=(i,))
c = Array(name='c', dimensions=(i, j))
f = Function(name='f', shape=(3, 3), dimensions=(j, k))
op = Operator([Eq(a[i], c[i, j]),
Eq(c[i, j], c[i, j]*a[i]),
Eq(f[j, k], a[j] + c[j, k])])
assert op.body.casts[2].is_PointerCast
assert str(op.body.casts[2]) ==\
('float (*restrict f)[f_vec->size[1]] __attribute__ '
'((aligned (64))) = (float (*)[f_vec->size[1]]) f_vec->data;')
assert str(op.body.allocs[0]) == 'float *a_vec;'
assert str(op.body.allocs[1]) == ('posix_memalign((void**)&a_vec, 64, '
'sizeof(float[i_size]));')
assert str(op.body.allocs[2]) == 'float *c_vec;'
assert str(op.body.allocs[3]) == ('posix_memalign((void**)&c_vec, 64, '
'sizeof(float[i_size][j_size]));')
assert str(op.body.frees[0]) == 'free(a_vec);'
assert str(op.body.frees[1]) == 'free(c_vec);'
def test_heap_imperfect_2D(self):
i, j, k = dimensions('i j k')
a = Array(name='a', dimensions=(i,))
c = Array(name='c', dimensions=(i, j))
f = Function(name='f', shape=(3, 3), dimensions=(j, k))
op = Operator([Eq(a[i], 0),
Eq(c[i, j], c[i, j]*a[i]),
Eq(f[j, k], a[j] + c[j, k])])
assert op.body.casts[2].is_PointerCast
assert str(op.body.casts[2]) ==\
('float (*restrict f)[f_vec->size[1]] __attribute__ '
'((aligned (64))) = (float (*)[f_vec->size[1]]) f_vec->data;')
assert str(op.body.allocs[0]) == 'float *a_vec;'
assert str(op.body.allocs[1]) == ('posix_memalign((void**)&a_vec, 64, '
'sizeof(float[i_size]));')
assert str(op.body.allocs[2]) == 'float *c_vec;'
assert str(op.body.allocs[3]) == ('posix_memalign((void**)&c_vec, 64, '
'sizeof(float[i_size][j_size]));')
assert str(op.body.frees[0]) == 'free(a_vec);'
assert str(op.body.frees[1]) == 'free(c_vec);'
def test_stack_scalars(self):
i, j = dimensions('i j')
a = Array(name='a', dimensions=(i,))
f = Function(name='f', shape=(3,), dimensions=(j,))
t0 = Scalar(name='t0')
t1 = Scalar(name='t1')
op = Operator([Eq(t0, 1.),
Eq(t1, 2.),
Eq(a[i], t0*t1*3.),
Eq(f, a[j])])
assert op.body.casts[1].is_PointerCast
assert str(op.body.casts[1]) ==\
('float (*restrict f) __attribute__ '
'((aligned (64))) = (float (*)) f_vec->data;')
assert str(op.body.allocs[0]) == 'float *a_vec;'
assert str(op.body.allocs[1]) == ('posix_memalign((void**)&a_vec, 64, '
'sizeof(float[i_size]));')
assert str(op.body.frees[0]) == 'free(a_vec);'
assert op.body.body[1].body[0].is_ExpressionBundle
assert str(op.body.body[1].body[0].body[0]) == 'float t0 = 1.00000000000000F;'
assert str(op.body.body[1].body[0].body[1]) == 'float t1 = 2.00000000000000F;'
def test_stack_arrays(self):
i, j, k, s, q = dimensions('i j k s q')
c = Array(name='c', dimensions=(i, j), scope='stack')
e = Array(name='e', dimensions=(k, s, q, i, j))
f = Function(name='f', shape=(3, 3), dimensions=(s, q))
op = Operator([Eq(c[i, j], e[k, s, q, i, j]*1.),
Eq(f, c[s, q])])
assert op.body.casts[0].is_PointerCast
assert str(op.body.casts[0]) ==\
('float (*restrict c)[j_size] __attribute__ ((aligned (64))) = '
'(float (*)[j_size]) c_vec;')
assert op.body.casts[2].is_PointerCast
assert str(op.body.casts[2]) ==\
('float (*restrict f)[f_vec->size[1]] __attribute__ '
'((aligned (64))) = (float (*)[f_vec->size[1]]) f_vec->data;')
assert str(op.body.allocs[0]) ==\
'float c_vec[i_size][j_size] __attribute__((aligned(64)));'
def test_conditional_declarations(self):
x = Dimension(name="x")
a = Array(name='a', dimensions=(x,), dtype=np.int32, scope='stack')
init_value = ListInitializer([0, 0])
list_initialize = Expression(ClusterizedEq(Eq(a[x], init_value)))
iet = Conditional(x < 3, list_initialize, list_initialize)
iet = Callable('test', iet, 'void')
iet = DataManager.place_definitions.__wrapped__(DataManager(None, None), iet)[0]
for i in iet.body.body[0].children:
assert len(i) == 1
assert i[0].is_Expression
assert i[0].expr.rhs is init_value
class TestLoopScheduling(object):
def test_permutations_without_deps(self):
"""
Test that if none of the Function accesses in the equations use
offsets, implying that there are no carried dependences, then no
matter the order in which the equations are provided to an Operator
the resulting loop nest is the same, and the input ordering of the
equations is honored.
"""
grid = Grid(shape=(4, 4, 4))
ti0 = Function(name='ti0', grid=grid)
ti1 = Function(name='ti1', grid=grid)
tu = TimeFunction(name='tu', grid=grid)
tv = TimeFunction(name='tv', grid=grid)
eq1 = Eq(tu, tv*ti0 + ti0)
eq2 = Eq(ti0, tu + 3.)
eq3 = Eq(tv, ti0*ti1)
op1 = Operator([eq1, eq2, eq3], opt='noop')
op2 = Operator([eq2, eq1, eq3], opt='noop')
op3 = Operator([eq3, eq2, eq1], opt='noop')
trees = [retrieve_iteration_tree(i) for i in [op1, op2, op3]]
assert all(len(i) == 1 for i in trees)
trees = [i[0] for i in trees]
for tree in trees:
assert IsPerfectIteration().visit(tree[1])
exprs = FindNodes(Expression).visit(tree[-1])
assert len(exprs) == 3
@pytest.mark.parametrize('exprs,fissioned,shared', [
# 0) Trivial case
(('Eq(u, 1)', 'Eq(v, u.dxl)'), '(1,x)', [0]),
# 1) Anti-dependence along x
(('Eq(u, 1)', 'Eq(v, u.dxr)'), '(1,x)', [0]),
# 2, 3) As above, but with an additional Dimension-independent dependence
(('Eq(u, v)', 'Eq(v, u.dxl)'), '(1,x)', [0]),
(('Eq(u, v)', 'Eq(v, u.dxr)'), '(1,x)', [0]),
# 4) Slightly more convoluted than above, as the additional dependence is
# now carried along x
(('Eq(u, v)', 'Eq(v, u.dxr)'), '(1,x)', [0]),
# 5) No backward carried dependences, no storage related dependences
(('Eq(us.forward, vs)', 'Eq(vs, us.dxl)'), '(0,time)', []),
# 6) No backward carried dependences, no storage related dependences
(('Eq(us.forward, vs)', 'Eq(vs, us.dxr)'), '(0,time)', []),
# 7) Three fissionable Eqs
(('Eq(u, u.dxl + v.dxr)', 'Eq(v, w.dxr)', 'Eq(w, u*w.dxl)'), '(1,x)', [0]),
# 8) There are carried backward dependences, but not in the Dimension
# that gets fissioned
(('Eq(u.forward, u + v.dx)', 'Eq(v.forward, v + u.forward.dx)'), '(1,x)', [0])
])
def test_fission_for_parallelism(self, exprs, fissioned, shared):
"""
Test that expressions are scheduled to separate loops if this can
turn one sequential loop into two parallel loops ("loop fission").
"""
grid = Grid(shape=(3, 3))
t = grid.stepping_dim # noqa
time = grid.time_dim # noqa
x, y = grid.dimensions # noqa
u = TimeFunction(name='u', grid=grid) # noqa
v = TimeFunction(name='v', grid=grid) # noqa
w = TimeFunction(name='w', grid=grid) # noqa
us = TimeFunction(name='u', grid=grid, save=5) # noqa
vs = TimeFunction(name='v', grid=grid, save=5) # noqa
# List comprehension would need explicit locals/globals mappings to eval
eqns = []
for e in exprs:
eqns.append(eval(e))
# `opt='noop'` is only to avoid loop blocking, hence making the asserts
# below much simpler to write and understand
op = Operator(eqns, opt='noop')
# Fission expected
trees = retrieve_iteration_tree(op)
assert len(trees) == len(eqns)
exp_depth, exp_dim = eval(fissioned)
for i in trees:
# Some outer loops may still be shared
for j in shared:
assert i[j] is trees[0][j]
# Fission happened
assert i[exp_depth].dim is exp_dim
@pytest.mark.parametrize('exprs', [
# 0) Storage related dependence
('Eq(u.forward, v)', 'Eq(v, u.dxl)'),
# 1) Backward carried flow-dependence through `v`
('Eq(u, v.forward)', 'Eq(v, u)'),
# 2) Backward carried flow-dependence through `vs`
('Eq(us.forward, vs)', 'Eq(vs.forward, us.dxl)'),
# 3) Classic coupled forward-marching equations
('Eq(u.forward, u + u.backward + v)', 'Eq(v.forward, v + v.backward + u)'),
# 4) Three non-fissionable Eqs
('Eq(u, v.dxl)', 'Eq(v, w.dxl)', 'Eq(w, u*w.dxl)')
])
def test_no_fission_as_illegal(self, exprs):
"""
Antithesis of `test_fission_for_parallelism`.
"""
grid = Grid(shape=(3, 3))
x, y = grid.dimensions
u = TimeFunction(name='u', grid=grid) # noqa
v = TimeFunction(name='v', grid=grid) # noqa
w = TimeFunction(name='w', grid=grid) # noqa
us = TimeFunction(name='u', grid=grid, save=5) # noqa
vs = TimeFunction(name='v', grid=grid, save=5) # noqa
# List comprehension would need explicit locals/globals mappings to eval
eqns = []
for e in exprs:
eqns.append(eval(e))
op = Operator(eqns)
# No fission expected
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
@pytest.mark.parametrize('exprs,directions,expected,visit', [
# 0) WAR 2->3, 3 fissioned to maximize parallelism
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti3[x,y,z])',
'Eq(ti3[x,y,z], ti1[x,y,z+1] + 1.)'),
'+++++', ['xyz', 'xyz', 'xyz'], 'xyzzz'),
# 1) WAR 1->2, 2->3
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti0[x,y,z+1])',
'Eq(ti3[x,y,z], ti1[x,y,z-2] + 1.)'),
'+++++', ['xyz', 'xyz', 'xyz'], 'xyzzz'),
# 2) WAR 1->2, 2->3, RAW 2->3
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti0[x,y,z+1])',
'Eq(ti3[x,y,z], ti1[x,y,z-2] + ti1[x,y,z+2])'),
'+++++', ['xyz', 'xyz', 'xyz'], 'xyzzz'),
# 3) WAR 1->3
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti3[x,y,z])',
'Eq(ti3[x,y,z], ti0[x,y,z+1] + 1.)'),
'++++', ['xyz', 'xyz'], 'xyzz'),
# 4) WAR 1->3
# Like before, but the WAR is along `y`, an inner Dimension
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti3[x,y,z])',
'Eq(ti3[x,y,z], ti0[x,y+1,z] + 1.)'),
'+++++', ['xyz', 'xyz'], 'xyzyz'),
# 5) WAR 1->2, 2->3; WAW 1->3
# Similar to the cases above, but the last equation does not iterate over `z`
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti0[x,y,z+2])',
'Eq(ti0[x,y,0], ti0[x,y,0] + 1.)'),
'++++', ['xyz', 'xyz', 'xy'], 'xyzz'),
# 6) WAR 1->2; WAW 1->3
# Basically like above, but with the time dimension. This should have no impact
(('Eq(tu[t,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z+2])',
'Eq(tu[t,x,y,0], tu[t,x,y,0] + 1.)'),
'+++++', ['txyz', 'txyz', 'txy'], 'txyzz'),
# 7) WAR 1->2, 2->3
(('Eq(tu[t,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z+2])',
'Eq(tw[t,x,y,z], tv[t,x,y,z-1] + 1.)'),
'++++++', ['txyz', 'txyz', 'txyz'], 'txyzzz'),
# 8) WAR 1->2; WAW 1->3
(('Eq(tu[t,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x+2,y,z])',
'Eq(tu[t,3,y,0], tu[t,3,y,0] + 1.)'),
'++++++++', ['txyz', 'txyz', 'ty'], 'txyzxyzy'),
# 9) RAW 1->2, WAR 2->3
(('Eq(tu[t,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z-2])',
'Eq(tw[t,x,y,z], tv[t,x,y+1,z] + 1.)'),
'++++++++', ['txyz', 'txyz', 'txyz'], 'txyzyzyz'),
# 10) WAR 1->2; WAW 1->3
(('Eq(tu[t-1,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z+2])',
'Eq(tu[t-1,x,y,0], tu[t,x,y,0] + 1.)'),
'-+++', ['txyz', 'txy'], 'txyz'),
# 11) WAR 1->2
(('Eq(tu[t-1,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z+2] + tu[t,x,y,z-2])',
'Eq(tw[t,x,y,z], tv[t,x,y,z] + 2)'),
'-+++', ['txyz'], 'txyz'),
# 12) Time goes backward so that information flows in time
(('Eq(tu[t-1,x,y,z], tu[t,x+3,y,z] + tv[t,x,y,z])',
'Eq(tv[t-1,x,y,z], tu[t,x,y,z+2])',
'Eq(tw[t-1,x,y,z], tu[t,x,y+1,z] + tv[t,x,y-1,z])'),
'-+++', ['txyz'], 'txyz'),
# 13) Time goes backward so that information flows in time, but the
# first and last Eqs are interleaved by a completely independent
# Eq. This results in three disjoint sets of loops
(('Eq(tu[t-1,x,y,z], tu[t,x+3,y,z] + tv[t,x,y,z])',
'Eq(ti0[x,y,z], ti1[x,y,z+2])',
'Eq(tw[t-1,x,y,z], tu[t,x,y+1,z] + tv[t,x,y-1,z])'),
'-++++++++++', ['txyz', 'xyz', 'txyz'], 'txyzxyztxyz'),
# 14) Time goes backward so that information flows in time
(('Eq(ti0[x,y,z], ti1[x,y,z+2])',
'Eq(tu[t-1,x,y,z], tu[t,x+3,y,z] + tv[t,x,y,z])',
'Eq(tw[t-1,x,y,z], tu[t,x,y+1,z] + ti0[x,y-1,z])'),
'+++-+++', ['xyz', 'txyz'], 'xyztxyz'),
# 15) WAR 2->1
# Here the difference is that we're using SubDimensions
(('Eq(tv[t,xi,yi,zi], tu[t,xi-1,yi,zi] + tu[t,xi+1,yi,zi])',
'Eq(tu[t+1,xi,yi,zi], tu[t,xi,yi,zi] + tv[t,xi-1,yi,zi] + tv[t,xi+1,yi,zi])'),
'+++++++', ['ti0xi0yi0z', 'ti0xi0yi0z'], 'ti0xi0yi0zi0xi0yi0z'),
# 16) RAW 3->1; expected=2
# Time goes backward, but the third equation should get fused with
# the first one, as the time dependence is loop-carried
(('Eq(tv[t-1,x,y,z], tv[t,x-1,y,z] + tv[t,x+1,y,z])',
'Eq(tv[t-1,z,z,z], tv[t-1,z,z,z] + 1)',
'Eq(f[x,y,z], tu[t-1,x,y,z] + tu[t,x,y,z] + tu[t+1,x,y,z] + tv[t,x,y,z])'),
'-++++', ['txyz', 'tz'], 'txyzz'),
# 17) WAR 2->3, 2->4; expected=4
(('Eq(tu[t+1,x,y,z], tu[t,x,y,z] + 1.)',
'Eq(tu[t+1,y,y,y], tu[t+1,y,y,y] + tw[t+1,y,y,y])',
'Eq(tw[t+1,z,z,z], tw[t+1,z,z,z] + 1.)',
'Eq(tv[t+1,x,y,z], tu[t+1,x,y,z] + 1.)'),
'+++++++++', ['txyz', 'ty', 'tz', 'txyz'], 'txyzyzxyz'),
# 18) WAR 1->3; expected=3
# 5 is expected to be moved before 4 but after 3, to be merged with 3
(('Eq(tu[t+1,x,y,z], tv[t,x,y,z] + 1.)',
'Eq(tv[t+1,x,y,z], tu[t,x,y,z] + 1.)',
'Eq(tw[t+1,x,y,z], tu[t+1,x+1,y,z] + tu[t+1,x-1,y,z])',
'Eq(f[x,x,z], tu[t,x,x,z] + tw[t,x,x,z])',
'Eq(ti0[x,y,z], tw[t+1,x,y,z] + 1.)'),
'++++++++', ['txyz', 'txyz', 'txz'], 'txyzxyzz'),
# 19) WAR 1->3; expected=3
# Cannot merge 1 with 3 otherwise we would break an anti-dependence
(('Eq(tv[t+1,x,y,z], tu[t,x,y,z] + tu[t,x+1,y,z])',
'Eq(tu[t+1,xi,yi,zi], tv[t+1,xi,yi,zi] + tv[t+1,xi+1,yi,zi])',
'Eq(tw[t+1,x,y,z], tv[t+1,x,y,z] + tv[t+1,x+1,y,z])'),
'++++++++++', ['txyz', 'ti0xi0yi0z', 'txyz'], 'txyzi0xi0yi0zxyz'),
])
def test_consistency_anti_dependences(self, exprs, directions, expected, visit):
"""
Test that anti dependences end up generating multi loop nests, rather
than a single loop nest enclosing all of the equations.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions # noqa
xi, yi, zi = grid.interior.dimensions # noqa
t = grid.stepping_dim # noqa
ti0 = Array(name='ti0', shape=grid.shape, dimensions=grid.dimensions) # noqa
ti1 = Array(name='ti1', shape=grid.shape, dimensions=grid.dimensions) # noqa
ti3 = Array(name='ti3', shape=grid.shape, dimensions=grid.dimensions) # noqa
f = Function(name='f', grid=grid) # noqa
tu = TimeFunction(name='tu', grid=grid) # noqa
tv = TimeFunction(name='tv', grid=grid) # noqa
tw = TimeFunction(name='tw', grid=grid) # noqa
# List comprehension would need explicit locals/globals mappings to eval
eqns = []
for e in exprs:
eqns.append(eval(e))
# Note: `topofuse` is a subset of `advanced` mode. We use it merely to
# bypass 'blocking', which would complicate the asserts below
op = Operator(eqns, opt=('topofuse', {'openmp': False}))
trees = retrieve_iteration_tree(op)
iters = FindNodes(Iteration).visit(op)
assert len(trees) == len(expected)
assert len(iters) == len(directions)
# mapper just makes it quicker to write out the test parametrization
mapper = {'time': 't'}
assert ["".join(mapper.get(i.dim.name, i.dim.name) for i in j)
for j in trees] == expected
assert "".join(mapper.get(i.dim.name, i.dim.name) for i in iters) == visit
# mapper just makes it quicker to write out the test parametrization
mapper = {'+': Forward, '-': Backward, '*': Any}
assert all(i.direction == mapper[j] for i, j in zip(iters, directions))
def test_expressions_imperfect_loops(self):
"""
Test that equations depending only on a subset of all indices
appearing across all equations are placed within earlier loops
in the loop nest tree.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t0 = Constant(name='t0')
t1 = Scalar(name='t1')
e = Function(name='e', shape=(3,), dimensions=(x,), space_order=0)
f = Function(name='f', shape=(3, 3), dimensions=(x, y), space_order=0)
g = Function(name='g', grid=grid, space_order=0)
h = Function(name='h', grid=grid, space_order=0)
eq0 = Eq(t1, e*1.)
eq1 = Eq(f, t0*3. + t1)
eq2 = Eq(h, g + 4. + f*5.)
op = Operator([eq0, eq1, eq2], opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 3
outer, middle, inner = trees
assert len(outer) == 1 and len(middle) == 2 and len(inner) == 3
assert outer[0] == middle[0] == inner[0]
assert middle[1] == inner[1]
assert outer[-1].nodes[0].exprs[0].expr.rhs == diff2sympy(indexify(eq0.rhs))
assert middle[-1].nodes[0].exprs[0].expr.rhs == diff2sympy(indexify(eq1.rhs))
assert inner[-1].nodes[0].exprs[0].expr.rhs == diff2sympy(indexify(eq2.rhs))
def test_equations_emulate_bc(self):
"""
Test that bc-like equations get inserted into the same loop nest
as the "main" equations.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
time = grid.time_dim
t0 = Scalar(name='t0')
a = Function(name='a', grid=grid)
b = TimeFunction(name='b', grid=grid, save=6)
main = Eq(b[time + 1, x, y, z], b[time - 1, x, y, z] + a[x, y, z] + 3.*t0)
bcs = [Eq(b[time, 0, y, z], 0.),
Eq(b[time, x, 0, z], 0.),
Eq(b[time, x, y, 0], 0.)]
op = Operator([main] + bcs, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 4
assert all(id(trees[0][0]) == id(i[0]) for i in trees)
def test_different_section_nests(self):
grid = Grid((3, 3, 3))
tu = TimeFunction(name='tu', grid=grid, space_order=4)
t0 = Scalar(name='t0')
t1 = Scalar(name='t1')
ti0 = Array(name='ti0', shape=(3, 5, 7), dimensions=grid.dimensions,
scope='heap').indexify()
eq1 = Eq(ti0, t0*3.)
eq2 = Eq(tu, ti0 + t1*3.)
op = Operator([eq1, eq2], opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
assert trees[0][-1].nodes[0].exprs[0].expr.rhs == eq1.rhs
assert trees[1][-1].nodes[0].exprs[0].expr.rhs == eq2.rhs
@pytest.mark.parametrize('exprs', [
['Eq(ti0[x,y,z], ti0[x,y,z] + t0*2.)', 'Eq(ti0[0,0,z], 0.)'],
['Eq(ti0[x,y,z], ti0[x,y,z-1] + t0*2.)', 'Eq(ti0[0,0,z], 0.)'],
['Eq(ti0[x,y,z], ti0[x,y,z] + t0*2.)', 'Eq(ti0[0,y,0], 0.)'],
['Eq(ti0[x,y,z], ti0[x,y,z] + t0*2.)', 'Eq(ti0[0,y,z], 0.)'],
])
def test_directly_indexed_expression(self, exprs):
"""
Test that equations using integer indices are inserted in the right
loop nest, at the right loop nest depth.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions # noqa
ti0 = Function(name='ti0', grid=grid, space_order=0) # noqa
t0 = Scalar(name='t0') # noqa
eqs = [eval(exprs[0]), eval(exprs[1])]
op = Operator(eqs, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
assert trees[0][-1].nodes[0].exprs[0].expr.rhs == eqs[0].rhs
assert trees[1][-1].nodes[0].exprs[0].expr.rhs == eqs[1].rhs
@pytest.mark.parametrize('shape', [(11, 11), (11, 11, 11)])
def test_equations_mixed_functions(self, shape):
"""
Test that equations using a mixture of Function and TimeFunction objects
are embedded within the same time loop.
"""
dims0 = Grid(shape).dimensions
for dims in permutations(dims0):
grid = Grid(shape=shape, dimensions=dims, dtype=np.float64)
time = grid.time_dim
a = TimeFunction(name='a', grid=grid, time_order=2, space_order=2)
p_aux = Dimension(name='p_aux')
b = Function(name='b', shape=shape + (10,), dimensions=dims + (p_aux,),
space_order=2, dtype=np.float64)
b.data_with_halo[:] = 1.0
b2 = Function(name='b2', shape=(10,) + shape, dimensions=(p_aux,) + dims,
space_order=2, dtype=np.float64)
b2.data_with_halo[:] = 1.0
eqns = [Eq(a.forward, a.laplace + 1.),
Eq(b, time*b*a + b)]
eqns2 = [Eq(a.forward, a.laplace + 1.),
Eq(b2, time*b2*a + b2)]
subs = {d.spacing: v for d, v in zip(dims0, [2.5, 1.5, 2.0][:grid.dim])}
op = Operator(eqns, subs=subs, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
assert all(trees[0][i] is trees[1][i] for i in range(3))
op2 = Operator(eqns2, subs=subs, opt='noop')
trees = retrieve_iteration_tree(op2)
assert len(trees) == 2
# Verify both operators produce the same result
op(time=10)
a.data_with_halo[:] = 0.
op2(time=10)
for i in range(10):
assert(np.allclose(b2.data[i, ...].reshape(-1),
b.data[..., i].reshape(-1),
rtol=1e-9))
def test_equations_mixed_timedim_stepdim(self):
""""
Test that two equations one using a TimeDimension the other a derived
SteppingDimension end up in the same loop nest.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
time = grid.time_dim
t = grid.stepping_dim
u1 = TimeFunction(name='u1', grid=grid)
u2 = TimeFunction(name='u2', grid=grid, save=2)
eqn_1 = Eq(u1[t+1, x, y, z], u1[t, x, y, z] + 1.)
eqn_2 = Eq(u2[time+1, x, y, z], u2[time, x, y, z] + 1.)
op = Operator([eqn_1, eqn_2], opt='topofuse')
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
assert len(trees[0][-1].nodes[0].exprs) == 2
assert trees[0][-1].nodes[0].exprs[0].write == u1
assert trees[0][-1].nodes[0].exprs[1].write == u2
def test_flow_detection(self):
"""
Test detection of spatial flow directions inside a time loop.
Stencil uses values at new timestep as well as those at previous ones
This forces an evaluation order onto x.
Weights are:
x=0 x=1 x=2 x=3
t=n 2 ---3
v /
t=n+1 o--+----4
Flow dependency should traverse x in the negative direction
x=2 x=3 x=4 x=5 x=6
t=0 0 --- 0 -- 1 -- 0
v / v / v /
t=1 44 -+--- 11 -+--- 2--+ -- 0
"""
grid = Grid(shape=(10, 10))
x, y = grid.dimensions
u = TimeFunction(name='u', grid=grid, save=2, time_order=1, space_order=0)
step = Eq(u.forward, 2*u
+ 3*u.subs(x, x+x.spacing)
+ 4*u.forward.subs(x, x+x.spacing))
op = Operator(step)
u.data[:] = 0.0
u.data[0, 5, 5] = 1.0
op.apply(time_M=0)
assert u.data[1, 5, 5] == 2
assert u.data[1, 4, 5] == 11
assert u.data[1, 3, 5] == 44
assert u.data[1, 2, 5] == 4*44
assert u.data[1, 1, 5] == 4*4*44
assert u.data[1, 0, 5] == 4*4*4*44
assert np.all(u.data[1, 6:, :] == 0)
assert np.all(u.data[1, :, 0:5] == 0)
assert np.all(u.data[1, :, 6:] == 0)
def test_scheduling_sparse_functions(self):
"""Tests loop scheduling in presence of sparse functions."""
grid = Grid((10, 10))
time = grid.time_dim
u1 = TimeFunction(name="u1", grid=grid, save=10, time_order=2)
u2 = TimeFunction(name="u2", grid=grid, time_order=2)
sf1 = SparseTimeFunction(name='sf1', grid=grid, npoint=1, nt=10)
sf2 = SparseTimeFunction(name='sf2', grid=grid, npoint=1, nt=10)
# Deliberately inject into u1, rather than u1.forward, to create a WAR w/ eqn3
eqn1 = Eq(u1.forward, u1 + 2.0 - u1.backward)
eqn2 = sf1.inject(u1, expr=sf1)
eqn3 = Eq(u2.forward, u2 + 2*u2.backward - u1.dt2)
eqn4 = sf2.interpolate(u2)
# Note: opts disabled only because with OpenMP otherwise there might be more
# `trees` than 4
op = Operator([eqn1] + eqn2 + [eqn3] + eqn4, opt=('noop', {'openmp': False}))
trees = retrieve_iteration_tree(op)
assert len(trees) == 4
# Time loop not shared due to the WAR
assert trees[0][0].dim is time and trees[0][0] is trees[1][0] # this IS shared
assert trees[1][0] is not trees[2][0]
assert trees[2][0].dim is time and trees[2][0] is trees[3][0] # this IS shared
# Now single, shared time loop expected
eqn2 = sf1.inject(u1.forward, expr=sf1)
op = Operator([eqn1] + eqn2 + [eqn3] + eqn4, opt=('noop', {'openmp': False}))
trees = retrieve_iteration_tree(op)
assert len(trees) == 4
assert all(trees[0][0] is i[0] for i in trees)
def test_scheduling_with_free_dims(self):
"""Tests loop scheduling in presence of free dimensions."""
grid = Grid((4, 4))
time = grid.time_dim
x, y = grid.dimensions
u = TimeFunction(name="u", grid=grid)
f = Function(name="f", grid=grid)
eq0 = Eq(u.forward, u + 1)
eq1 = Eq(f, time*2)
# Note that `eq1` doesn't impose any constraint on the ordering of
# the `time` Dimension w.r.t. the `grid` Dimensions, as `time` appears
# as a free Dimension and not within an array access such as [time, x, y]
op = Operator([eq0, eq1], opt='topofuse')
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
tree = trees[0]
assert len(tree) == 3
assert tree[0].dim is time
assert tree[1].dim is x
assert tree[2].dim is y
| 40.095983 | 90 | 0.538093 | import numpy as np
import pytest
from itertools import permutations
from conftest import skipif
from devito import (Grid, Eq, Operator, Constant, Function, TimeFunction,
SparseFunction, SparseTimeFunction, Dimension, error, SpaceDimension,
NODE, CELL, dimensions, configuration, TensorFunction,
TensorTimeFunction, VectorFunction, VectorTimeFunction, switchconfig)
from devito import Le, Lt, Ge, Gt
from devito.exceptions import InvalidOperator
from devito.finite_differences.differentiable import diff2sympy
from devito.ir.equations import ClusterizedEq
from devito.ir.equations.algorithms import lower_exprs
from devito.ir.iet import (Callable, Conditional, Expression, Iteration, TimedList,
FindNodes, IsPerfectIteration, retrieve_iteration_tree)
from devito.ir.support import Any, Backward, Forward
from devito.passes.iet import DataManager
from devito.symbolics import ListInitializer, indexify, retrieve_indexed
from devito.tools import flatten, powerset, timed_region
from devito.types import Array, Scalar
def dimify(dimensions):
assert isinstance(dimensions, str)
return tuple(SpaceDimension(name=i) for i in dimensions.split())
def symbol(name, dimensions, value=0., shape=(3, 5), mode='function'):
assert(mode in ['function', 'indexed'])
s = Function(name=name, dimensions=dimensions, shape=shape)
s.data_with_halo[:] = value
return s.indexify() if mode == 'indexed' else s
class TestOperatorSetup(object):
def test_platform_compiler_language(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
try:
Operator(Eq(u, u + 1), platform='asga')
assert False
except InvalidOperator:
assert True
op1 = Operator(Eq(u, u + 1))
op2 = Operator(Eq(u, u + 1), platform='nvidiaX')
assert str(op1) != str(op2)
assert '#pragma omp target' in str(op2)
assert op1._compiler is not op2._compiler
Operator(Eq(u, u + 1), platform='nvidiaX', compiler='gcc')
try:
Operator(Eq(u, u + 1), platform='nvidiaX', compiler='asf')
assert False
except InvalidOperator:
assert True
op3 = Operator(Eq(u, u + 1), platform='nvidiaX', language='openacc')
assert '#pragma acc parallel' in str(op3)
assert op3._compiler is not configuration['compiler']
assert (op3._compiler.__class__.__name__ ==
configuration['compiler'].__class__.__name__)
try:
Operator(Eq(u, u + 1), platform='bdw', language='openacc')
assert False
except InvalidOperator:
assert True
op4 = switchconfig(language='openmp')(Operator)(Eq(u, u + 1), language='C')
assert '#pragma omp for' not in str(op4)
def test_opt_options(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
try:
Operator(Eq(u, u + 1), opt=('aaa'))
assert False
except InvalidOperator:
assert True
try:
Operator(Eq(u, u + 1), opt=('advanced', {'aaa': 1}))
assert False
except InvalidOperator:
assert True
def test_compiler_uniqueness(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
eqns = [Eq(u.forward, u + 1)]
op0 = Operator(eqns)
op1 = Operator(eqns)
op2 = Operator(eqns, compiler='gcc')
assert op0._compiler is not op1._compiler
assert op0._compiler is not op2._compiler
assert op1._compiler is not op2._compiler
class TestCodeGen(object):
def test_parameters(self):
grid = Grid(shape=(3,))
a_dense = Function(name='a_dense', grid=grid)
const = Constant(name='constant')
eqn = Eq(a_dense, a_dense + 2.*const)
op = Operator(eqn, openmp=False)
assert len(op.parameters) == 5
assert op.parameters[0].name == 'a_dense'
assert op.parameters[0].is_Tensor
assert op.parameters[1].name == 'constant'
assert op.parameters[1].is_Scalar
assert op.parameters[2].name == 'x_M'
assert op.parameters[2].is_Scalar
assert op.parameters[3].name == 'x_m'
assert op.parameters[3].is_Scalar
assert op.parameters[4].name == 'timers'
assert op.parameters[4].is_Object
assert 'a_dense[x + 1] = 2.0F*constant + a_dense[x + 1]' in str(op)
@pytest.mark.parametrize('expr, so, to, expected', [
('Eq(u.forward,u+1)', 0, 1, 'Eq(u[t+1,x,y,z],u[t,x,y,z]+1)'),
('Eq(u.forward,u+1)', 1, 1, 'Eq(u[t+1,x+1,y+1,z+1],u[t,x+1,y+1,z+1]+1)'),
('Eq(u.forward,u+1)', 1, 2, 'Eq(u[t+1,x+1,y+1,z+1],u[t,x+1,y+1,z+1]+1)'),
('Eq(u.forward,u+u.backward + m)', 8, 2,
'Eq(u[t+1,x+8,y+8,z+8],m[x,y,z]+u[t,x+8,y+8,z+8]+u[t-1,x+8,y+8,z+8])')
])
def test_index_shifting(self, expr, so, to, expected):
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
t = grid.stepping_dim
u = TimeFunction(name='u', grid=grid, space_order=so, time_order=to)
m = Function(name='m', grid=grid, space_order=0)
expr = eval(expr)
with timed_region('x'):
expr = Operator._lower_exprs([expr])[0]
assert str(expr).replace(' ', '') == expected
@pytest.mark.parametrize('expr, so, expected', [
('Lt(0.1*(g1 + g2), 0.2*(g1 + g2))', 0,
'0.1*g1[x,y]+0.1*g2[x,y]<0.2*g1[x,y]+0.2*g2[x,y]'),
('Le(0.1*(g1 + g2), 0.2*(g1 + g2))', 1,
'0.1*g1[x+1,y+1]+0.1*g2[x+1,y+1]<=0.2*g1[x+1,y+1]+0.2*g2[x+1,y+1]'),
('Ge(0.1*(g1 + g2), 0.2*(g1 + g2))', 2,
'0.1*g1[x+2,y+2]+0.1*g2[x+2,y+2]>=0.2*g1[x+2,y+2]+0.2*g2[x+2,y+2]'),
('Gt(0.1*(g1 + g2), 0.2*(g1 + g2))', 4,
'0.1*g1[x+4,y+4]+0.1*g2[x+4,y+4]>0.2*g1[x+4,y+4]+0.2*g2[x+4,y+4]'),
])
def test_relationals_index_shifting(self, expr, so, expected):
grid = Grid(shape=(3, 3))
g1 = Function(name='g1', grid=grid, space_order=so)
g2 = Function(name='g2', grid=grid, space_order=so)
expr = eval(expr)
expr = lower_exprs(expr)
assert str(expr).replace(' ', '') == expected
@pytest.mark.parametrize('expr,exp_uindices,exp_mods', [
('Eq(v.forward, u[0, x, y, z] + v + 1)', [(0, 5), (2, 5)], {'v': 5}),
('Eq(v.forward, u + v + 1)', [(0, 5), (2, 5), (0, 2)], {'v': 5, 'u': 2}),
])
def test_multiple_steppers(self, expr, exp_uindices, exp_mods):
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
v = TimeFunction(name='v', grid=grid, time_order=4)
op = Operator(eval(expr), opt='noop')
iters = FindNodes(Iteration).visit(op)
time_iter = [i for i in iters if i.dim.is_Time]
assert len(time_iter) == 1
time_iter = time_iter[0]
signatures = [(i._offset, i._modulo) for i in time_iter.uindices]
assert len(signatures) == len(exp_uindices)
exp_uindices = [(time + i, j) for i, j in exp_uindices]
assert all(i in signatures for i in exp_uindices)
exprs = [i.expr for i in FindNodes(Expression).visit(op)]
assert(i.indices[i.function._time_position].modulo == exp_mods[i.function.name]
for i in flatten(retrieve_indexed(i) for i in exprs))
def test_lower_stepping_dims_with_mutiple_iterations(self):
grid = Grid(shape=(4, 4))
f = Function(name="f", grid=grid, space_order=4)
g = Function(name="g", grid=grid, space_order=4)
h = TimeFunction(name="h", grid=grid, space_order=4, time_order=2)
f.data[:] = 0.0
h.data[:] = 0.0
eqn = [Eq(f, h + 1), Eq(g, f),
Eq(h.forward, h + g + 1)]
op = Operator(eqn)
for iter in [i for i in FindNodes(Iteration).visit(op) if i.dim.is_Time]:
exprtimeindices = set([a.indices[a.function._time_position] for
expr in FindNodes(Expression).visit(iter) for
a in retrieve_indexed(expr.expr) if
isinstance(a.function, TimeFunction)])
assert (exprtimeindices == set(iter.uindices))
assert(all([i.is_Modulo for i in exprtimeindices]))
op.apply(time_M=10)
assert np.all(h.data[0, :] == 18)
assert np.all(h.data[1, :] == 20)
assert np.all(h.data[2, :] == 22)
@skipif('device')
def test_timedlist_wraps_time_if_parallel(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid, save=3)
op = Operator(Eq(u, u + 1))
assert op.body.body[1].body[0].is_Section
assert isinstance(op.body.body[1].body[0].body[0], TimedList)
timedlist = op.body.body[1].body[0].body[0]
if configuration['language'] == 'openmp':
ompreg = timedlist.body[0]
assert ompreg.body[0].dim is grid.time_dim
else:
timedlist.body[0].dim is grid.time_dim
def test_nested_lowering(self):
grid = Grid(shape=(4, 4), dtype=np.int32)
x, y = grid.dimensions
x0, y0 = dimensions('x0 y0')
u0 = Function(name="u0", grid=grid)
u1 = Function(name="u1", shape=grid.shape, dimensions=(x0, y0), dtype=np.int32)
u2 = Function(name="u2", grid=grid)
u0.data[:2, :2] = 1
u0.data[2:, 2:] = 2
u1.data[:, :] = 1
u2.data[:, :] = 1
eq0 = Eq(u0, u0[u1[x0+1, y0+2], u2[x, u2]], subdomain=grid.interior)
eq1 = Eq(u0, u0[u1[x0+1, y0+2], u2[x, u2[x, y]]], subdomain=grid.interior)
op0 = Operator(eq0)
op1 = Operator(eq1)
op0.apply()
assert str(op0.ccode) == str(op1.ccode)
assert np.all(u0.data[0, 3] == 0) and np.all(u0.data[3, 0] == 0)
assert np.all(u0.data[:2, :2] == 1) and np.all(u0.data[1:3, 1:3] == 1)
assert np.all(u0.data[2:3, 3] == 2) and np.all(u0.data[3, 2:3] == 2)
def test_nested_lowering_indexify(self):
grid = Grid(shape=(4, 4), dtype=np.int32)
x, y = grid.dimensions
u0 = Function(name="u0", grid=grid)
u1 = Function(name="u1", grid=grid)
u2 = Function(name="u2", grid=grid)
u0.data[:, :] = 2
u1.data[:, :] = 1
u2.data[:, :] = 1
eq0 = Eq(u0._subs(x, u1), 2*u0)
eq1 = Eq(u0._subs(x, u1._subs(y, u2) + 1), 4*u0)
op0 = Operator(eq0)
op0.apply()
op1 = Operator(eq1)
op1.apply()
assert np.all(np.all(u0.data[i, :] == 2) for i in [0, 3])
assert np.all(u0.data[1, :] == 4)
assert np.all(u0.data[2, :] == 8)
class TestArithmetic(object):
@pytest.mark.parametrize('expr, result', [
('Eq(a, a + b + 5.)', 10.),
('Eq(a, b - a)', 1.),
('Eq(a, 4 * (b * a))', 24.),
('Eq(a, (6. / b) + (8. * a))', 18.),
])
@pytest.mark.parametrize('mode', ['function'])
def test_flat(self, expr, result, mode):
i, j = dimify('i j')
a = symbol(name='a', dimensions=(i, j), value=2., mode=mode)
b = symbol(name='b', dimensions=(i, j), value=3., mode=mode)
fa = a.base.function if mode == 'indexed' else a
fb = b.base.function if mode == 'indexed' else b
eqn = eval(expr)
Operator(eqn)(a=fa, b=fb)
assert np.allclose(fa.data, result, rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a, a + b + 5.)', 10.),
('Eq(a, b - a)', 1.),
('Eq(a, 4 * (b * a))', 24.),
('Eq(a, (6. / b) + (8. * a))', 18.),
])
@pytest.mark.parametrize('mode', ['function', 'indexed'])
def test_deep(self, expr, result, mode):
i, j, k, l = dimify('i j k l')
a = symbol(name='a', dimensions=(i, j, k, l), shape=(3, 5, 7, 6),
value=2., mode=mode)
b = symbol(name='b', dimensions=(j, k), shape=(5, 7),
value=3., mode=mode)
fa = a.base.function if mode == 'indexed' else a
fb = b.base.function if mode == 'indexed' else b
eqn = eval(expr)
Operator(eqn)(a=fa, b=fb)
assert np.allclose(fa.data, result, rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a[j, l], a[j - 1 , l] + 1.)',
np.meshgrid(np.arange(2., 8.), np.arange(2., 7.))[1]),
('Eq(a[j, l], a[j, l - 1] + 1.)',
np.meshgrid(np.arange(2., 8.), np.arange(2., 7.))[0]),
])
def test_indexed_increment(self, expr, result):
j, l = dimify('j l')
a = symbol(name='a', dimensions=(j, l), value=1., shape=(5, 6),
mode='indexed').base
fa = a.function
fa.data[:] = 0.
eqn = eval(expr)
Operator(eqn)(a=fa)
assert np.allclose(fa.data, result, rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a[j, l], b[j - 1 , l] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[j, l], b[j , l - 1] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[j, l], b[j - 1, l - 1] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[j, l], b[j + 1, l + 1] + 1.)', np.zeros((5, 6)) + 3.),
])
def test_indexed_stencil(self, expr, result):
j, l = dimify('j l')
a = symbol(name='a', dimensions=(j, l), value=0., shape=(5, 6),
mode='indexed').base
fa = a.function
b = symbol(name='b', dimensions=(j, l), value=2., shape=(5, 6),
mode='indexed').base
fb = b.function
eqn = eval(expr)
Operator(eqn)(a=fa, b=fb)
assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a[1, j, l], a[0, j - 1 , l] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[1, j, l], a[0, j , l - 1] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[1, j, l], a[0, j - 1, l - 1] + 1.)', np.zeros((5, 6)) + 3.),
('Eq(a[1, j, l], a[0, j + 1, l + 1] + 1.)', np.zeros((5, 6)) + 3.),
])
def test_indexed_buffered(self, expr, result):
i, j, l = dimify('i j l')
a = symbol(name='a', dimensions=(i, j, l), value=2., shape=(3, 5, 6),
mode='indexed').base
fa = a.function
eqn = eval(expr)
Operator(eqn)(a=fa)
assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
@pytest.mark.parametrize('expr, result', [
('Eq(a[1, j, l], a[0, j - 1 , l] + 1.)', np.zeros((5, 6)) + 3.),
])
def test_indexed_open_loops(self, expr, result):
i, j, l = dimify('i j l')
a = Function(name='a', dimensions=(i, j, l), shape=(3, 5, 6))
fa = a.function
fa.data[0, :, :] = 2.
eqn = eval(expr)
Operator(eqn)(a=fa)
assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
def test_indexed_w_indirections(self):
grid = Grid(shape=(10, 10))
x, y = grid.dimensions
p_poke = Dimension('p_src')
d = Dimension('d')
npoke = 1
u = Function(name='u', grid=grid, space_order=0)
coordinates = Function(name='coordinates', dimensions=(p_poke, d),
shape=(npoke, grid.dim), space_order=0, dtype=np.int32)
coordinates.data[0, 0] = 4
coordinates.data[0, 1] = 3
poke_eq = Eq(u[coordinates[p_poke, 0], coordinates[p_poke, 1]], 1.0)
op = Operator(poke_eq)
op.apply()
ix, iy = np.where(u.data == 1.)
assert len(ix) == len(iy) == 1
assert ix[0] == 4 and iy[0] == 3
assert np.all(u.data[0:3] == 0.) and np.all(u.data[5:] == 0.)
assert np.all(u.data[:, 0:3] == 0.) and np.all(u.data[:, 5:] == 0.)
def test_constant_time_dense(self):
i, j = dimify('i j')
const = Constant(name='truc', value=2.)
a = Function(name='a', shape=(20, 20), dimensions=(i, j))
a.data[:] = 2.
eqn = Eq(a, a + 2.*const)
op = Operator(eqn)
op.apply(a=a, truc=const)
assert(np.allclose(a.data, 6.))
op.apply(a=a, truc=Constant(name='truc2', value=3.))
assert(np.allclose(a.data, 12.))
def test_incs_same_lhs(self):
grid = Grid(shape=(10, 10))
u = Function(name='u', grid=grid, space_order=0)
op = Operator([Eq(u, u+1.0), Eq(u, u+2.0)])
u.data[:] = 0.0
op.apply()
assert np.all(u.data[:] == 3)
def test_sparsefunction_inject(self):
grid = Grid(shape=(11, 11))
u = Function(name='u', grid=grid, space_order=0)
sf1 = SparseFunction(name='s', grid=grid, npoint=1)
op = Operator(sf1.inject(u, expr=sf1))
assert sf1.data.shape == (1, )
sf1.coordinates.data[0, :] = (0.6, 0.6)
sf1.data[0] = 5.0
u.data[:] = 0.0
op.apply()
assert u.data[6, 6] == pytest.approx(5.0)
assert np.sum(u.data) == pytest.approx(5.0)
def test_sparsefunction_interp(self):
grid = Grid(shape=(11, 11))
u = Function(name='u', grid=grid, space_order=0)
sf1 = SparseFunction(name='s', grid=grid, npoint=1)
op = Operator(sf1.interpolate(u))
assert sf1.data.shape == (1, )
sf1.coordinates.data[0, :] = (0.45, 0.45)
sf1.data[:] = 0.0
u.data[:] = 0.0
u.data[4, 4] = 4.0
op.apply()
assert sf1.data[0] == pytest.approx(1.0)
def test_sparsetimefunction_interp(self):
grid = Grid(shape=(11, 11))
u = TimeFunction(name='u', grid=grid, time_order=2, save=5, space_order=0)
sf1 = SparseTimeFunction(name='s', grid=grid, npoint=1, nt=5)
op = Operator(sf1.interpolate(u))
assert sf1.data.shape == (5, 1)
sf1.coordinates.data[0, :] = (0.45, 0.45)
sf1.data[:] = 0.0
u.data[:] = 0.0
u.data[:, 4, 4] = 8*np.arange(5)+4
op.apply(time_m=1, time_M=3)
assert np.all(sf1.data[:, 0] == pytest.approx([0.0, 3.0, 5.0, 7.0, 0.0]))
def test_sparsetimefunction_inject(self):
grid = Grid(shape=(11, 11))
u = TimeFunction(name='u', grid=grid, time_order=2, save=5, space_order=0)
sf1 = SparseTimeFunction(name='s', grid=grid, npoint=1, nt=5)
op = Operator(sf1.inject(u, expr=3*sf1))
assert sf1.data.shape == (5, 1)
sf1.coordinates.data[0, :] = (0.45, 0.45)
sf1.data[:, 0] = np.arange(5)
u.data[:] = 0.0
op.apply(time_m=1, time_M=3)
assert np.all(u.data[1, 4:6, 4:6] == pytest.approx(0.75))
assert np.all(u.data[2, 4:6, 4:6] == pytest.approx(1.5))
assert np.all(u.data[3, 4:6, 4:6] == pytest.approx(2.25))
assert np.sum(u.data[:]) == pytest.approx(4*0.75+4*1.5+4*2.25)
def test_sparsetimefunction_inject_dt(self):
grid = Grid(shape=(11, 11))
u = TimeFunction(name='u', grid=grid, time_order=2, save=5, space_order=0)
sf1 = SparseTimeFunction(name='s', grid=grid, npoint=1, nt=5, time_order=2)
op = Operator(sf1.inject(u, expr=3*sf1.dt))
assert sf1.data.shape == (5, 1)
sf1.coordinates.data[0, :] = (0.45, 0.45)
sf1.data[:, 0] = np.arange(5)
u.data[:] = 0.0
op.apply(time_m=1, time_M=3, dt=1)
assert np.all(u.data[1:4, 4:6, 4:6] == pytest.approx(0.75))
assert np.sum(u.data[:]) == pytest.approx(12*0.75)
@pytest.mark.parametrize('func1', [TensorFunction, TensorTimeFunction,
VectorFunction, VectorTimeFunction])
def test_tensor(self, func1):
grid = Grid(tuple([5]*3))
f1 = func1(name="f1", grid=grid)
op1 = Operator(Eq(f1, f1.dx))
op2 = Operator([Eq(f, f.dx) for f in f1.values()])
assert str(op1.ccode) == str(op2.ccode)
class TestAllocation(object):
@pytest.mark.parametrize('shape', [(20, 20),
(20, 20, 20),
(20, 20, 20, 20)])
def test_first_touch(self, shape):
dimensions = dimify('i j k l')[:len(shape)]
grid = Grid(shape=shape, dimensions=dimensions)
m = Function(name='m', grid=grid, first_touch=True)
assert(np.allclose(m.data, 0))
m2 = Function(name='m2', grid=grid, first_touch=False)
assert(np.allclose(m2.data, 0))
assert(np.array_equal(m.data, m2.data))
@pytest.mark.parametrize('ndim', [2, 3])
def test_staggered(self, ndim):
grid = Grid(shape=tuple([11]*ndim))
for stagg in tuple(powerset(grid.dimensions))[1::] + (NODE, CELL):
f = Function(name='f', grid=grid, staggered=stagg)
assert f.data.shape == tuple([11]*ndim)
g = Function(name='g', grid=grid)
index = tuple(5 for _ in f.dimensions)
set_f = Eq(f[index], 2.)
set_g = Eq(g[index], 3.)
Operator([set_f, set_g])()
assert f.data[index] == 2.
@pytest.mark.parametrize('ndim', [2, 3])
def test_staggered_time(self, ndim):
grid = Grid(shape=tuple([11]*ndim))
for stagg in tuple(powerset(grid.dimensions))[1::] + (NODE,):
f = TimeFunction(name='f', grid=grid, staggered=stagg)
assert f.data.shape[1:] == tuple([11]*ndim)
g = TimeFunction(name='g', grid=grid)
index = tuple([0] + [5 for _ in f.dimensions[1:]])
set_f = Eq(f[index], 2.)
set_g = Eq(g[index], 3.)
Operator([set_f, set_g])()
assert f.data[index] == 2.
class TestApplyArguments(object):
def verify_arguments(self, arguments, expected):
for name, v in expected.items():
if isinstance(v, (Function, SparseFunction)):
condition = v._C_as_ndarray(arguments[name])[v._mask_domain] == v.data
condition = condition.all()
else:
condition = arguments[name] == v
if not condition:
error('Wrong argument %s: expected %s, got %s' %
(name, v, arguments[name]))
assert condition
def verify_parameters(self, parameters, expected):
boilerplate = ['timers']
parameters = [p.name for p in parameters]
for exp in expected:
if exp not in parameters + boilerplate:
error("Missing parameter: %s" % exp)
assert exp in parameters + boilerplate
extra = [p for p in parameters if p not in expected and p not in boilerplate]
if len(extra) > 0:
error("Redundant parameters: %s" % str(extra))
assert len(extra) == 0
def test_default_functions(self):
grid = Grid(shape=(5, 6, 7))
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
op = Operator(Eq(f.forward, g + f), openmp=False)
expected = {
'x_m': 0, 'x_M': 4,
'y_m': 0, 'y_M': 5,
'z_m': 0, 'z_M': 6,
'f': f, 'g': g,
}
self.verify_arguments(op.arguments(time=4), expected)
exp_parameters = ['f', 'g', 'x_m', 'x_M', 'y_m', 'y_M', 'z_m', 'z_M',
'x0_blk0_size', 'y0_blk0_size', 'time_m', 'time_M']
self.verify_parameters(op.parameters, exp_parameters)
def test_default_sparse_functions(self):
grid = Grid(shape=(5, 6, 7))
f = TimeFunction(name='f', grid=grid)
s = SparseTimeFunction(name='s', grid=grid, npoint=3, nt=4)
s.coordinates.data[:, 0] = np.arange(0., 3.)
s.coordinates.data[:, 1] = np.arange(1., 4.)
s.coordinates.data[:, 2] = np.arange(2., 5.)
op = Operator(s.interpolate(f))
expected = {
's': s, 's_coords': s.coordinates,
'p_s_size': 3, 'p_s_m': 0, 'p_s_M': 2,
'd_size': 3, 'd_m': 0, 'd_M': 2,
'time_size': 4, 'time_m': 0, 'time_M': 3,
}
self.verify_arguments(op.arguments(), expected)
def test_override_function_size(self):
grid = Grid(shape=(5, 6, 7))
g = Function(name='g', grid=grid)
op = Operator(Eq(g, 1.))
args = {'x': 3, 'y': 4, 'z': 5}
arguments = op.arguments(**args)
expected = {
'x_m': 0, 'x_M': 3,
'y_m': 0, 'y_M': 4,
'z_m': 0, 'z_M': 5,
'g': g
}
self.verify_arguments(arguments, expected)
op(**args)
assert (g.data[4:] == 0.).all()
assert (g.data[:, 5:] == 0.).all()
assert (g.data[:, :, 6:] == 0.).all()
assert (g.data[:4, :5, :6] == 1.).all()
def test_override_function_subrange(self):
grid = Grid(shape=(5, 6, 7))
g = Function(name='g', grid=grid)
op = Operator(Eq(g, 1.))
args = {'x_m': 1, 'x_M': 3, 'y_m': 2, 'y_M': 4, 'z_m': 3, 'z_M': 5}
arguments = op.arguments(**args)
expected = {
'x_m': 1, 'x_M': 3,
'y_m': 2, 'y_M': 4,
'z_m': 3, 'z_M': 5,
'g': g
}
self.verify_arguments(arguments, expected)
op(**args)
mask = np.ones((5, 6, 7), dtype=np.bool)
mask[1:4, 2:5, 3:6] = False
assert (g.data[mask] == 0.).all()
assert (g.data[1:4, 2:5, 3:6] == 1.).all()
def test_override_timefunction_subrange(self):
grid = Grid(shape=(5, 6, 7))
f = TimeFunction(name='f', grid=grid, time_order=0)
op = Operator(Eq(f, 1.), opt=None)
args = {'x_m': 1, 'x_M': 3, 'y_m': 2, 'y_M': 4,
'z_m': 3, 'z_M': 5, 't_m': 1, 't_M': 4}
arguments = op.arguments(**args)
expected = {
'x_m': 1, 'x_M': 3,
'y_m': 2, 'y_M': 4,
'z_m': 3, 'z_M': 5,
'time_m': 1, 'time_M': 4,
'f': f
}
self.verify_arguments(arguments, expected)
op(**args)
mask = np.ones((1, 5, 6, 7), dtype=np.bool)
mask[:, 1:4, 2:5, 3:6] = False
assert (f.data[mask] == 0.).all()
assert (f.data[:, 1:4, 2:5, 3:6] == 1.).all()
def test_override_function_data(self):
grid = Grid(shape=(5, 6, 7))
a = Function(name='a', grid=grid)
op = Operator(Eq(a, a + 3))
a.data[:] = 1.
op()
assert (a.data[:] == 4.).all()
a1 = Function(name='a1', grid=grid)
a1.data[:] = 2.
op(a=a1)
assert (a1.data[:] == 5.).all()
a2 = Function(name='a', grid=grid)
a2.data[:] = 3.
op(a=a2)
assert (a2.data[:] == 6.).all()
a3 = np.zeros_like(a._data_allocated)
a3[:] = 4.
op(a=a3)
assert (a3[a._mask_domain] == 7.).all()
def test_override_timefunction_data(self):
grid = Grid(shape=(5, 6, 7))
a = TimeFunction(name='a', grid=grid, save=2)
op = Operator(Eq(a, a + 3), opt=None)
a.data[:] = 1.
op(time_m=0, time=1)
assert (a.data[:] == 4.).all()
a1 = TimeFunction(name='a1', grid=grid, save=2)
a1.data[:] = 2.
op(time_m=0, time=1, a=a1)
assert (a1.data[:] == 5.).all()
a2 = TimeFunction(name='a', grid=grid, save=2)
a2.data[:] = 3.
op(time_m=0, time=1, a=a2)
assert (a2.data[:] == 6.).all()
a3 = np.zeros_like(a._data_allocated)
a3[:] = 4.
op(time_m=0, time=1, a=a3)
assert (a3[a._mask_domain] == 7.).all()
def test_dimension_size_infer(self, nt=100):
grid = Grid(shape=(3, 5, 7))
a = Function(name='a', grid=grid)
b = TimeFunction(name='b', grid=grid, save=nt)
op = Operator(Eq(b, a))
time = b.indices[0]
op_arguments = op.arguments()
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt-1)
def test_dimension_offset_adjust(self, nt=100):
i, j, k = dimify('i j k')
shape = (10, 10, 10)
grid = Grid(shape=shape, dimensions=(i, j, k))
a = Function(name='a', grid=grid)
b = TimeFunction(name='b', grid=grid, save=nt)
time = b.indices[0]
eqn = Eq(b[time + 1, i, j, k], b[time - 1, i, j, k]
+ b[time, i, j, k] + a[i, j, k])
op = Operator(eqn)
op_arguments = op.arguments(time=nt-10)
assert(op_arguments[time.min_name] == 1)
assert(op_arguments[time.max_name] == nt - 10)
def test_dimension_size_override(self):
grid = Grid(shape=(3, 5, 7))
a = TimeFunction(name='a', grid=grid)
one = Function(name='one', grid=grid)
one.data[:] = 1.
op = Operator(Eq(a.forward, a + one))
a.data[0] = 0.
op(a=a, t=5)
assert(np.allclose(a.data[1], 5.))
a.data[0] = 0.
op(a=a, time=4)
assert(np.allclose(a.data[0], 4.))
def test_override_sparse_data_fix_dim(self):
grid = Grid(shape=(10, 10))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2)
original_coords = (1., 1.)
new_coords = (2., 2.)
p_dim = Dimension(name='p_src')
src1 = SparseTimeFunction(name='src1', grid=grid, dimensions=(time, p_dim), nt=10,
npoint=1, coordinates=original_coords, time_order=2)
src2 = SparseTimeFunction(name='src2', grid=grid, dimensions=(time, p_dim),
npoint=1, nt=10, coordinates=new_coords, time_order=2)
op = Operator(src1.inject(u, src1))
args = op.arguments(src1=src2, time=0)
arg_name = src1.coordinates._arg_names[0]
assert(np.array_equal(src2.coordinates._C_as_ndarray(args[arg_name]),
np.asarray((new_coords,))))
def test_override_sparse_data_default_dim(self):
grid = Grid(shape=(10, 10))
original_coords = (1., 1.)
new_coords = (2., 2.)
u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2)
src1 = SparseTimeFunction(name='src1', grid=grid, npoint=1, nt=10,
coordinates=original_coords, time_order=2)
src2 = SparseTimeFunction(name='src2', grid=grid, npoint=1, nt=10,
coordinates=new_coords, time_order=2)
op = Operator(src1.inject(u, src1))
args = op.arguments(src1=src2, t=0)
arg_name = src1.coordinates._arg_names[0]
assert(np.array_equal(src2.coordinates._C_as_ndarray(args[arg_name]),
np.asarray((new_coords,))))
def test_argument_derivation_order(self, nt=100):
i, j, k = dimify('i j k')
shape = (10, 10, 10)
grid = Grid(shape=shape, dimensions=(i, j, k))
a = Function(name='a', grid=grid)
b = TimeFunction(name='b', grid=grid, save=nt)
time = b.indices[0]
op = Operator(Eq(b, a))
op_arguments = op.arguments()
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt-1)
b1 = TimeFunction(name='b1', grid=grid, save=nt+1)
op_arguments = op.arguments(b=b1)
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt)
op_arguments = op.arguments(b=b1, time=nt - 1)
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt - 1)
op_arguments = op.arguments(b=b1, time_M=nt - 2)
assert(op_arguments[time.min_name] == 0)
assert(op_arguments[time.max_name] == nt - 2)
def test_derive_constant_value(self):
grid = Grid(shape=(5, 6))
f = Function(name='f', grid=grid)
a = Constant(name='a', value=3.)
Operator(Eq(f, a))()
assert np.allclose(f.data, 3.)
g = Function(name='g', grid=grid)
b = Constant(name='b')
op = Operator(Eq(g, b))
b.data = 4.
op()
assert np.allclose(g.data, 4.)
def test_argument_from_index_constant(self):
nx, ny = 30, 30
grid = Grid(shape=(nx, ny))
x, y = grid.dimensions
arbdim = Dimension('arb')
u = TimeFunction(name='u', grid=grid, save=None, time_order=2, space_order=0)
snap = Function(name='snap', dimensions=(arbdim, x, y), shape=(5, nx, ny),
space_order=0)
save_t = Constant(name='save_t', dtype=np.int32)
save_slot = Constant(name='save_slot', dtype=np.int32)
expr = Eq(snap.subs(arbdim, save_slot), u.subs(grid.stepping_dim, save_t))
op = Operator(expr)
u.data[:] = 0.0
snap.data[:] = 0.0
u.data[0, 10, 10] = 1.0
op.apply(save_t=0, save_slot=1)
assert snap.data[1, 10, 10] == 1.0
def test_argument_no_shifting(self):
grid = Grid(shape=(11, 11))
x, y = grid.dimensions
a = Function(name='a', grid=grid)
a.data[:] = 1.
op = Operator(Eq(a, a + a))
op(x_m=3, x_M=7)
assert (a.data[:3, :] == 1.).all()
assert (a.data[3:7, :] == 2.).all()
assert (a.data[8:, :] == 1.).all()
a.data[:] = 1.
op = Operator(Eq(a, a + (a[x-1, y] + a[x+1, y]) / 2.))
op(x_m=3, x_M=7)
assert (a.data[:3, :] == 1.).all()
assert (a.data[3:7, :] >= 2.).all()
assert (a.data[8:, :] == 1.).all()
def test_argument_unknown(self):
grid = Grid(shape=(11, 11))
a = Function(name='a', grid=grid)
op = Operator(Eq(a, a + a))
try:
op.apply(b=3)
assert False
except ValueError:
assert True
try:
configuration['ignore-unknowns'] = True
op.apply(b=3)
assert True
except ValueError:
assert False
finally:
configuration['ignore-unknowns'] = configuration._defaults['ignore-unknowns']
@pytest.mark.parametrize('so,to,pad,expected', [
(0, 1, 0, (2, 4, 4, 4)),
(2, 1, 0, (2, 8, 8, 8)),
(4, 1, 0, (2, 12, 12, 12)),
(4, 3, 0, (4, 12, 12, 12)),
(4, 1, 3, (2, 15, 15, 15)),
((2, 5, 2), 1, 0, (2, 11, 11, 11)),
((2, 5, 4), 1, 3, (2, 16, 16, 16)),
])
def test_function_dataobj(self, so, to, pad, expected):
grid = Grid(shape=(4, 4, 4))
u = TimeFunction(name='u', grid=grid, space_order=so, time_order=to, padding=pad)
op = Operator(Eq(u, 1), opt='noop')
u_arg = op.arguments(time=0)['u']
u_arg_shape = tuple(u_arg._obj.size[i] for i in range(u.ndim))
assert u_arg_shape == expected
def test_illegal_override(self):
grid0 = Grid(shape=(11, 11))
grid1 = Grid(shape=(13, 13))
a0 = Function(name='a', grid=grid0)
b0 = Function(name='b', grid=grid0)
a1 = Function(name='a', grid=grid1)
op = Operator(Eq(a0, a0 + b0 + 1))
op.apply()
try:
op.apply(a=a1, b=b0)
assert False
except ValueError as e:
assert 'Override' in e.args[0] # Check it's hitting the right error msg
except:
assert False
def test_incomplete_override(self):
grid0 = Grid(shape=(11, 11))
grid1 = Grid(shape=(13, 13))
a0 = Function(name='a', grid=grid0)
a1 = Function(name='a', grid=grid1)
b = Function(name='b', grid=grid0)
op = Operator(Eq(a0, a0 + b + 1))
op.apply()
try:
op.apply(a=a1)
assert False
except ValueError as e:
assert 'Default' in e.args[0]
except:
assert False
@skipif('nompi')
@pytest.mark.parallel(mode=1)
def test_new_distributor(self):
from devito.mpi import MPI
grid = Grid(shape=(10, 10), comm=MPI.COMM_SELF)
grid2 = Grid(shape=(10, 10), comm=MPI.COMM_WORLD)
u = TimeFunction(name='u', grid=grid, space_order=2)
u2 = TimeFunction(name='u2', grid=grid2, space_order=2)
# Create some operator that requires MPI communication
eqn = Eq(u.forward, u + u.laplace)
op = Operator(eqn)
assert op.arguments(u=u, time_M=0)['comm'] is grid.distributor._obj_comm.value
assert (op.arguments(u=u, time_M=0)['nb'] is
grid.distributor._obj_neighborhood.value)
assert op.arguments(u=u2, time_M=0)['comm'] is grid2.distributor._obj_comm.value
assert (op.arguments(u=u2, time_M=0)['nb'] is
grid2.distributor._obj_neighborhood.value)
def test_spacing_from_new_grid(self):
grid = Grid(shape=(10, 10), extent=(9, 9))
u = Function(name='u', grid=grid, space_order=1)
# A bogus operator that just assigns the x spacing into the array
# Note, grid.dimensions[0].spacing here is not a number, it's the symbol h_x
op = Operator(Eq(u, grid.dimensions[0].spacing))
grid2 = Grid(shape=(5, 5), extent=(9, 9))
u2 = Function(name='u', grid=grid2, space_order=1)
op(u=u2)
assert u2.data[2, 2] == grid2.spacing[0]
@skipif('device')
class TestDeclarator(object):
def test_heap_1D(self):
i, j = dimensions('i j')
a = Array(name='a', dimensions=(i,))
b = Array(name='b', dimensions=(i,))
f = Function(name='f', shape=(3,), dimensions=(j,))
op = Operator([Eq(a[i], a[i] + b[i] + 5.),
Eq(f[j], a[j])])
assert op.body.casts[2].is_PointerCast
assert str(op.body.casts[2]) == ('float (*restrict f) __attribute__ '
'((aligned (64))) = (float (*)) f_vec->data;')
assert str(op.body.allocs[0]) == 'float *a_vec;'
assert str(op.body.allocs[1]) == ('posix_memalign((void**)&a_vec, 64, '
'sizeof(float[i_size]));')
assert str(op.body.frees[0]) == 'free(a_vec);'
def test_heap_perfect_2D(self):
i, j, k = dimensions('i j k')
a = Array(name='a', dimensions=(i,))
c = Array(name='c', dimensions=(i, j))
f = Function(name='f', shape=(3, 3), dimensions=(j, k))
op = Operator([Eq(a[i], c[i, j]),
Eq(c[i, j], c[i, j]*a[i]),
Eq(f[j, k], a[j] + c[j, k])])
assert op.body.casts[2].is_PointerCast
assert str(op.body.casts[2]) ==\
('float (*restrict f)[f_vec->size[1]] __attribute__ '
'((aligned (64))) = (float (*)[f_vec->size[1]]) f_vec->data;')
assert str(op.body.allocs[0]) == 'float *a_vec;'
assert str(op.body.allocs[1]) == ('posix_memalign((void**)&a_vec, 64, '
'sizeof(float[i_size]));')
assert str(op.body.allocs[2]) == 'float *c_vec;'
assert str(op.body.allocs[3]) == ('posix_memalign((void**)&c_vec, 64, '
'sizeof(float[i_size][j_size]));')
assert str(op.body.frees[0]) == 'free(a_vec);'
assert str(op.body.frees[1]) == 'free(c_vec);'
def test_heap_imperfect_2D(self):
i, j, k = dimensions('i j k')
a = Array(name='a', dimensions=(i,))
c = Array(name='c', dimensions=(i, j))
f = Function(name='f', shape=(3, 3), dimensions=(j, k))
op = Operator([Eq(a[i], 0),
Eq(c[i, j], c[i, j]*a[i]),
Eq(f[j, k], a[j] + c[j, k])])
assert op.body.casts[2].is_PointerCast
assert str(op.body.casts[2]) ==\
('float (*restrict f)[f_vec->size[1]] __attribute__ '
'((aligned (64))) = (float (*)[f_vec->size[1]]) f_vec->data;')
assert str(op.body.allocs[0]) == 'float *a_vec;'
assert str(op.body.allocs[1]) == ('posix_memalign((void**)&a_vec, 64, '
'sizeof(float[i_size]));')
assert str(op.body.allocs[2]) == 'float *c_vec;'
assert str(op.body.allocs[3]) == ('posix_memalign((void**)&c_vec, 64, '
'sizeof(float[i_size][j_size]));')
assert str(op.body.frees[0]) == 'free(a_vec);'
assert str(op.body.frees[1]) == 'free(c_vec);'
def test_stack_scalars(self):
i, j = dimensions('i j')
a = Array(name='a', dimensions=(i,))
f = Function(name='f', shape=(3,), dimensions=(j,))
t0 = Scalar(name='t0')
t1 = Scalar(name='t1')
op = Operator([Eq(t0, 1.),
Eq(t1, 2.),
Eq(a[i], t0*t1*3.),
Eq(f, a[j])])
assert op.body.casts[1].is_PointerCast
assert str(op.body.casts[1]) ==\
('float (*restrict f) __attribute__ '
'((aligned (64))) = (float (*)) f_vec->data;')
assert str(op.body.allocs[0]) == 'float *a_vec;'
assert str(op.body.allocs[1]) == ('posix_memalign((void**)&a_vec, 64, '
'sizeof(float[i_size]));')
assert str(op.body.frees[0]) == 'free(a_vec);'
assert op.body.body[1].body[0].is_ExpressionBundle
assert str(op.body.body[1].body[0].body[0]) == 'float t0 = 1.00000000000000F;'
assert str(op.body.body[1].body[0].body[1]) == 'float t1 = 2.00000000000000F;'
def test_stack_arrays(self):
i, j, k, s, q = dimensions('i j k s q')
c = Array(name='c', dimensions=(i, j), scope='stack')
e = Array(name='e', dimensions=(k, s, q, i, j))
f = Function(name='f', shape=(3, 3), dimensions=(s, q))
op = Operator([Eq(c[i, j], e[k, s, q, i, j]*1.),
Eq(f, c[s, q])])
assert op.body.casts[0].is_PointerCast
assert str(op.body.casts[0]) ==\
('float (*restrict c)[j_size] __attribute__ ((aligned (64))) = '
'(float (*)[j_size]) c_vec;')
assert op.body.casts[2].is_PointerCast
assert str(op.body.casts[2]) ==\
('float (*restrict f)[f_vec->size[1]] __attribute__ '
'((aligned (64))) = (float (*)[f_vec->size[1]]) f_vec->data;')
assert str(op.body.allocs[0]) ==\
'float c_vec[i_size][j_size] __attribute__((aligned(64)));'
def test_conditional_declarations(self):
x = Dimension(name="x")
a = Array(name='a', dimensions=(x,), dtype=np.int32, scope='stack')
init_value = ListInitializer([0, 0])
list_initialize = Expression(ClusterizedEq(Eq(a[x], init_value)))
iet = Conditional(x < 3, list_initialize, list_initialize)
iet = Callable('test', iet, 'void')
iet = DataManager.place_definitions.__wrapped__(DataManager(None, None), iet)[0]
for i in iet.body.body[0].children:
assert len(i) == 1
assert i[0].is_Expression
assert i[0].expr.rhs is init_value
class TestLoopScheduling(object):
def test_permutations_without_deps(self):
grid = Grid(shape=(4, 4, 4))
ti0 = Function(name='ti0', grid=grid)
ti1 = Function(name='ti1', grid=grid)
tu = TimeFunction(name='tu', grid=grid)
tv = TimeFunction(name='tv', grid=grid)
eq1 = Eq(tu, tv*ti0 + ti0)
eq2 = Eq(ti0, tu + 3.)
eq3 = Eq(tv, ti0*ti1)
op1 = Operator([eq1, eq2, eq3], opt='noop')
op2 = Operator([eq2, eq1, eq3], opt='noop')
op3 = Operator([eq3, eq2, eq1], opt='noop')
trees = [retrieve_iteration_tree(i) for i in [op1, op2, op3]]
assert all(len(i) == 1 for i in trees)
trees = [i[0] for i in trees]
for tree in trees:
assert IsPerfectIteration().visit(tree[1])
exprs = FindNodes(Expression).visit(tree[-1])
assert len(exprs) == 3
@pytest.mark.parametrize('exprs,fissioned,shared', [
(('Eq(u, 1)', 'Eq(v, u.dxl)'), '(1,x)', [0]),
(('Eq(u, 1)', 'Eq(v, u.dxr)'), '(1,x)', [0]),
(('Eq(u, v)', 'Eq(v, u.dxl)'), '(1,x)', [0]),
(('Eq(u, v)', 'Eq(v, u.dxr)'), '(1,x)', [0]),
(('Eq(u, v)', 'Eq(v, u.dxr)'), '(1,x)', [0]),
(('Eq(us.forward, vs)', 'Eq(vs, us.dxl)'), '(0,time)', []),
(('Eq(us.forward, vs)', 'Eq(vs, us.dxr)'), '(0,time)', []),
(('Eq(u, u.dxl + v.dxr)', 'Eq(v, w.dxr)', 'Eq(w, u*w.dxl)'), '(1,x)', [0]),
(('Eq(u.forward, u + v.dx)', 'Eq(v.forward, v + u.forward.dx)'), '(1,x)', [0])
])
def test_fission_for_parallelism(self, exprs, fissioned, shared):
grid = Grid(shape=(3, 3))
t = grid.stepping_dim
time = grid.time_dim
x, y = grid.dimensions
u = TimeFunction(name='u', grid=grid)
v = TimeFunction(name='v', grid=grid)
w = TimeFunction(name='w', grid=grid)
us = TimeFunction(name='u', grid=grid, save=5)
vs = TimeFunction(name='v', grid=grid, save=5)
eqns = []
for e in exprs:
eqns.append(eval(e))
op = Operator(eqns, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == len(eqns)
exp_depth, exp_dim = eval(fissioned)
for i in trees:
for j in shared:
assert i[j] is trees[0][j]
assert i[exp_depth].dim is exp_dim
@pytest.mark.parametrize('exprs', [
('Eq(u.forward, v)', 'Eq(v, u.dxl)'),
('Eq(u, v.forward)', 'Eq(v, u)'),
('Eq(us.forward, vs)', 'Eq(vs.forward, us.dxl)'),
('Eq(u.forward, u + u.backward + v)', 'Eq(v.forward, v + v.backward + u)'),
('Eq(u, v.dxl)', 'Eq(v, w.dxl)', 'Eq(w, u*w.dxl)')
])
def test_no_fission_as_illegal(self, exprs):
grid = Grid(shape=(3, 3))
x, y = grid.dimensions
u = TimeFunction(name='u', grid=grid)
v = TimeFunction(name='v', grid=grid)
w = TimeFunction(name='w', grid=grid)
us = TimeFunction(name='u', grid=grid, save=5)
vs = TimeFunction(name='v', grid=grid, save=5)
eqns = []
for e in exprs:
eqns.append(eval(e))
op = Operator(eqns)
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
@pytest.mark.parametrize('exprs,directions,expected,visit', [
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti3[x,y,z])',
'Eq(ti3[x,y,z], ti1[x,y,z+1] + 1.)'),
'+++++', ['xyz', 'xyz', 'xyz'], 'xyzzz'),
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti0[x,y,z+1])',
'Eq(ti3[x,y,z], ti1[x,y,z-2] + 1.)'),
'+++++', ['xyz', 'xyz', 'xyz'], 'xyzzz'),
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti0[x,y,z+1])',
'Eq(ti3[x,y,z], ti1[x,y,z-2] + ti1[x,y,z+2])'),
'+++++', ['xyz', 'xyz', 'xyz'], 'xyzzz'),
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti3[x,y,z])',
'Eq(ti3[x,y,z], ti0[x,y,z+1] + 1.)'),
'++++', ['xyz', 'xyz'], 'xyzz'),
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti3[x,y,z])',
'Eq(ti3[x,y,z], ti0[x,y+1,z] + 1.)'),
'+++++', ['xyz', 'xyz'], 'xyzyz'),
(('Eq(ti0[x,y,z], ti0[x,y,z] + ti1[x,y,z])',
'Eq(ti1[x,y,z], ti0[x,y,z+2])',
'Eq(ti0[x,y,0], ti0[x,y,0] + 1.)'),
'++++', ['xyz', 'xyz', 'xy'], 'xyzz'),
(('Eq(tu[t,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z+2])',
'Eq(tu[t,x,y,0], tu[t,x,y,0] + 1.)'),
'+++++', ['txyz', 'txyz', 'txy'], 'txyzz'),
(('Eq(tu[t,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z+2])',
'Eq(tw[t,x,y,z], tv[t,x,y,z-1] + 1.)'),
'++++++', ['txyz', 'txyz', 'txyz'], 'txyzzz'),
(('Eq(tu[t,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x+2,y,z])',
'Eq(tu[t,3,y,0], tu[t,3,y,0] + 1.)'),
'++++++++', ['txyz', 'txyz', 'ty'], 'txyzxyzy'),
(('Eq(tu[t,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z-2])',
'Eq(tw[t,x,y,z], tv[t,x,y+1,z] + 1.)'),
'++++++++', ['txyz', 'txyz', 'txyz'], 'txyzyzyz'),
(('Eq(tu[t-1,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z+2])',
'Eq(tu[t-1,x,y,0], tu[t,x,y,0] + 1.)'),
'-+++', ['txyz', 'txy'], 'txyz'),
(('Eq(tu[t-1,x,y,z], tu[t,x,y,z] + tv[t,x,y,z])',
'Eq(tv[t,x,y,z], tu[t,x,y,z+2] + tu[t,x,y,z-2])',
'Eq(tw[t,x,y,z], tv[t,x,y,z] + 2)'),
'-+++', ['txyz'], 'txyz'),
(('Eq(tu[t-1,x,y,z], tu[t,x+3,y,z] + tv[t,x,y,z])',
'Eq(tv[t-1,x,y,z], tu[t,x,y,z+2])',
'Eq(tw[t-1,x,y,z], tu[t,x,y+1,z] + tv[t,x,y-1,z])'),
'-+++', ['txyz'], 'txyz'),
(('Eq(tu[t-1,x,y,z], tu[t,x+3,y,z] + tv[t,x,y,z])',
'Eq(ti0[x,y,z], ti1[x,y,z+2])',
'Eq(tw[t-1,x,y,z], tu[t,x,y+1,z] + tv[t,x,y-1,z])'),
'-++++++++++', ['txyz', 'xyz', 'txyz'], 'txyzxyztxyz'),
(('Eq(ti0[x,y,z], ti1[x,y,z+2])',
'Eq(tu[t-1,x,y,z], tu[t,x+3,y,z] + tv[t,x,y,z])',
'Eq(tw[t-1,x,y,z], tu[t,x,y+1,z] + ti0[x,y-1,z])'),
'+++-+++', ['xyz', 'txyz'], 'xyztxyz'),
(('Eq(tv[t,xi,yi,zi], tu[t,xi-1,yi,zi] + tu[t,xi+1,yi,zi])',
'Eq(tu[t+1,xi,yi,zi], tu[t,xi,yi,zi] + tv[t,xi-1,yi,zi] + tv[t,xi+1,yi,zi])'),
'+++++++', ['ti0xi0yi0z', 'ti0xi0yi0z'], 'ti0xi0yi0zi0xi0yi0z'),
# 16) RAW 3->1; expected=2
# Time goes backward, but the third equation should get fused with
# the first one, as the time dependence is loop-carried
(('Eq(tv[t-1,x,y,z], tv[t,x-1,y,z] + tv[t,x+1,y,z])',
'Eq(tv[t-1,z,z,z], tv[t-1,z,z,z] + 1)',
'Eq(f[x,y,z], tu[t-1,x,y,z] + tu[t,x,y,z] + tu[t+1,x,y,z] + tv[t,x,y,z])'),
'-++++', ['txyz', 'tz'], 'txyzz'),
# 17) WAR 2->3, 2->4; expected=4
(('Eq(tu[t+1,x,y,z], tu[t,x,y,z] + 1.)',
'Eq(tu[t+1,y,y,y], tu[t+1,y,y,y] + tw[t+1,y,y,y])',
'Eq(tw[t+1,z,z,z], tw[t+1,z,z,z] + 1.)',
'Eq(tv[t+1,x,y,z], tu[t+1,x,y,z] + 1.)'),
'+++++++++', ['txyz', 'ty', 'tz', 'txyz'], 'txyzyzxyz'),
# 18) WAR 1->3; expected=3
# 5 is expected to be moved before 4 but after 3, to be merged with 3
(('Eq(tu[t+1,x,y,z], tv[t,x,y,z] + 1.)',
'Eq(tv[t+1,x,y,z], tu[t,x,y,z] + 1.)',
'Eq(tw[t+1,x,y,z], tu[t+1,x+1,y,z] + tu[t+1,x-1,y,z])',
'Eq(f[x,x,z], tu[t,x,x,z] + tw[t,x,x,z])',
'Eq(ti0[x,y,z], tw[t+1,x,y,z] + 1.)'),
'++++++++', ['txyz', 'txyz', 'txz'], 'txyzxyzz'),
# 19) WAR 1->3; expected=3
# Cannot merge 1 with 3 otherwise we would break an anti-dependence
(('Eq(tv[t+1,x,y,z], tu[t,x,y,z] + tu[t,x+1,y,z])',
'Eq(tu[t+1,xi,yi,zi], tv[t+1,xi,yi,zi] + tv[t+1,xi+1,yi,zi])',
'Eq(tw[t+1,x,y,z], tv[t+1,x,y,z] + tv[t+1,x+1,y,z])'),
'++++++++++', ['txyz', 'ti0xi0yi0z', 'txyz'], 'txyzi0xi0yi0zxyz'),
])
def test_consistency_anti_dependences(self, exprs, directions, expected, visit):
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions # noqa
xi, yi, zi = grid.interior.dimensions # noqa
t = grid.stepping_dim # noqa
ti0 = Array(name='ti0', shape=grid.shape, dimensions=grid.dimensions) # noqa
ti1 = Array(name='ti1', shape=grid.shape, dimensions=grid.dimensions) # noqa
ti3 = Array(name='ti3', shape=grid.shape, dimensions=grid.dimensions) # noqa
f = Function(name='f', grid=grid) # noqa
tu = TimeFunction(name='tu', grid=grid) # noqa
tv = TimeFunction(name='tv', grid=grid) # noqa
tw = TimeFunction(name='tw', grid=grid) # noqa
# List comprehension would need explicit locals/globals mappings to eval
eqns = []
for e in exprs:
eqns.append(eval(e))
# Note: `topofuse` is a subset of `advanced` mode. We use it merely to
# bypass 'blocking', which would complicate the asserts below
op = Operator(eqns, opt=('topofuse', {'openmp': False}))
trees = retrieve_iteration_tree(op)
iters = FindNodes(Iteration).visit(op)
assert len(trees) == len(expected)
assert len(iters) == len(directions)
# mapper just makes it quicker to write out the test parametrization
mapper = {'time': 't'}
assert ["".join(mapper.get(i.dim.name, i.dim.name) for i in j)
for j in trees] == expected
assert "".join(mapper.get(i.dim.name, i.dim.name) for i in iters) == visit
# mapper just makes it quicker to write out the test parametrization
mapper = {'+': Forward, '-': Backward, '*': Any}
assert all(i.direction == mapper[j] for i, j in zip(iters, directions))
def test_expressions_imperfect_loops(self):
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t0 = Constant(name='t0')
t1 = Scalar(name='t1')
e = Function(name='e', shape=(3,), dimensions=(x,), space_order=0)
f = Function(name='f', shape=(3, 3), dimensions=(x, y), space_order=0)
g = Function(name='g', grid=grid, space_order=0)
h = Function(name='h', grid=grid, space_order=0)
eq0 = Eq(t1, e*1.)
eq1 = Eq(f, t0*3. + t1)
eq2 = Eq(h, g + 4. + f*5.)
op = Operator([eq0, eq1, eq2], opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 3
outer, middle, inner = trees
assert len(outer) == 1 and len(middle) == 2 and len(inner) == 3
assert outer[0] == middle[0] == inner[0]
assert middle[1] == inner[1]
assert outer[-1].nodes[0].exprs[0].expr.rhs == diff2sympy(indexify(eq0.rhs))
assert middle[-1].nodes[0].exprs[0].expr.rhs == diff2sympy(indexify(eq1.rhs))
assert inner[-1].nodes[0].exprs[0].expr.rhs == diff2sympy(indexify(eq2.rhs))
def test_equations_emulate_bc(self):
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
time = grid.time_dim
t0 = Scalar(name='t0')
a = Function(name='a', grid=grid)
b = TimeFunction(name='b', grid=grid, save=6)
main = Eq(b[time + 1, x, y, z], b[time - 1, x, y, z] + a[x, y, z] + 3.*t0)
bcs = [Eq(b[time, 0, y, z], 0.),
Eq(b[time, x, 0, z], 0.),
Eq(b[time, x, y, 0], 0.)]
op = Operator([main] + bcs, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 4
assert all(id(trees[0][0]) == id(i[0]) for i in trees)
def test_different_section_nests(self):
grid = Grid((3, 3, 3))
tu = TimeFunction(name='tu', grid=grid, space_order=4)
t0 = Scalar(name='t0')
t1 = Scalar(name='t1')
ti0 = Array(name='ti0', shape=(3, 5, 7), dimensions=grid.dimensions,
scope='heap').indexify()
eq1 = Eq(ti0, t0*3.)
eq2 = Eq(tu, ti0 + t1*3.)
op = Operator([eq1, eq2], opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
assert trees[0][-1].nodes[0].exprs[0].expr.rhs == eq1.rhs
assert trees[1][-1].nodes[0].exprs[0].expr.rhs == eq2.rhs
@pytest.mark.parametrize('exprs', [
['Eq(ti0[x,y,z], ti0[x,y,z] + t0*2.)', 'Eq(ti0[0,0,z], 0.)'],
['Eq(ti0[x,y,z], ti0[x,y,z-1] + t0*2.)', 'Eq(ti0[0,0,z], 0.)'],
['Eq(ti0[x,y,z], ti0[x,y,z] + t0*2.)', 'Eq(ti0[0,y,0], 0.)'],
['Eq(ti0[x,y,z], ti0[x,y,z] + t0*2.)', 'Eq(ti0[0,y,z], 0.)'],
])
def test_directly_indexed_expression(self, exprs):
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions # noqa
ti0 = Function(name='ti0', grid=grid, space_order=0) # noqa
t0 = Scalar(name='t0') # noqa
eqs = [eval(exprs[0]), eval(exprs[1])]
op = Operator(eqs, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
assert trees[0][-1].nodes[0].exprs[0].expr.rhs == eqs[0].rhs
assert trees[1][-1].nodes[0].exprs[0].expr.rhs == eqs[1].rhs
@pytest.mark.parametrize('shape', [(11, 11), (11, 11, 11)])
def test_equations_mixed_functions(self, shape):
dims0 = Grid(shape).dimensions
for dims in permutations(dims0):
grid = Grid(shape=shape, dimensions=dims, dtype=np.float64)
time = grid.time_dim
a = TimeFunction(name='a', grid=grid, time_order=2, space_order=2)
p_aux = Dimension(name='p_aux')
b = Function(name='b', shape=shape + (10,), dimensions=dims + (p_aux,),
space_order=2, dtype=np.float64)
b.data_with_halo[:] = 1.0
b2 = Function(name='b2', shape=(10,) + shape, dimensions=(p_aux,) + dims,
space_order=2, dtype=np.float64)
b2.data_with_halo[:] = 1.0
eqns = [Eq(a.forward, a.laplace + 1.),
Eq(b, time*b*a + b)]
eqns2 = [Eq(a.forward, a.laplace + 1.),
Eq(b2, time*b2*a + b2)]
subs = {d.spacing: v for d, v in zip(dims0, [2.5, 1.5, 2.0][:grid.dim])}
op = Operator(eqns, subs=subs, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
assert all(trees[0][i] is trees[1][i] for i in range(3))
op2 = Operator(eqns2, subs=subs, opt='noop')
trees = retrieve_iteration_tree(op2)
assert len(trees) == 2
# Verify both operators produce the same result
op(time=10)
a.data_with_halo[:] = 0.
op2(time=10)
for i in range(10):
assert(np.allclose(b2.data[i, ...].reshape(-1),
b.data[..., i].reshape(-1),
rtol=1e-9))
def test_equations_mixed_timedim_stepdim(self):
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
time = grid.time_dim
t = grid.stepping_dim
u1 = TimeFunction(name='u1', grid=grid)
u2 = TimeFunction(name='u2', grid=grid, save=2)
eqn_1 = Eq(u1[t+1, x, y, z], u1[t, x, y, z] + 1.)
eqn_2 = Eq(u2[time+1, x, y, z], u2[time, x, y, z] + 1.)
op = Operator([eqn_1, eqn_2], opt='topofuse')
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
assert len(trees[0][-1].nodes[0].exprs) == 2
assert trees[0][-1].nodes[0].exprs[0].write == u1
assert trees[0][-1].nodes[0].exprs[1].write == u2
def test_flow_detection(self):
grid = Grid(shape=(10, 10))
x, y = grid.dimensions
u = TimeFunction(name='u', grid=grid, save=2, time_order=1, space_order=0)
step = Eq(u.forward, 2*u
+ 3*u.subs(x, x+x.spacing)
+ 4*u.forward.subs(x, x+x.spacing))
op = Operator(step)
u.data[:] = 0.0
u.data[0, 5, 5] = 1.0
op.apply(time_M=0)
assert u.data[1, 5, 5] == 2
assert u.data[1, 4, 5] == 11
assert u.data[1, 3, 5] == 44
assert u.data[1, 2, 5] == 4*44
assert u.data[1, 1, 5] == 4*4*44
assert u.data[1, 0, 5] == 4*4*4*44
assert np.all(u.data[1, 6:, :] == 0)
assert np.all(u.data[1, :, 0:5] == 0)
assert np.all(u.data[1, :, 6:] == 0)
def test_scheduling_sparse_functions(self):
grid = Grid((10, 10))
time = grid.time_dim
u1 = TimeFunction(name="u1", grid=grid, save=10, time_order=2)
u2 = TimeFunction(name="u2", grid=grid, time_order=2)
sf1 = SparseTimeFunction(name='sf1', grid=grid, npoint=1, nt=10)
sf2 = SparseTimeFunction(name='sf2', grid=grid, npoint=1, nt=10)
# Deliberately inject into u1, rather than u1.forward, to create a WAR w/ eqn3
eqn1 = Eq(u1.forward, u1 + 2.0 - u1.backward)
eqn2 = sf1.inject(u1, expr=sf1)
eqn3 = Eq(u2.forward, u2 + 2*u2.backward - u1.dt2)
eqn4 = sf2.interpolate(u2)
# Note: opts disabled only because with OpenMP otherwise there might be more
# `trees` than 4
op = Operator([eqn1] + eqn2 + [eqn3] + eqn4, opt=('noop', {'openmp': False}))
trees = retrieve_iteration_tree(op)
assert len(trees) == 4
# Time loop not shared due to the WAR
assert trees[0][0].dim is time and trees[0][0] is trees[1][0] # this IS shared
assert trees[1][0] is not trees[2][0]
assert trees[2][0].dim is time and trees[2][0] is trees[3][0] # this IS shared
# Now single, shared time loop expected
eqn2 = sf1.inject(u1.forward, expr=sf1)
op = Operator([eqn1] + eqn2 + [eqn3] + eqn4, opt=('noop', {'openmp': False}))
trees = retrieve_iteration_tree(op)
assert len(trees) == 4
assert all(trees[0][0] is i[0] for i in trees)
def test_scheduling_with_free_dims(self):
grid = Grid((4, 4))
time = grid.time_dim
x, y = grid.dimensions
u = TimeFunction(name="u", grid=grid)
f = Function(name="f", grid=grid)
eq0 = Eq(u.forward, u + 1)
eq1 = Eq(f, time*2)
# Note that `eq1` doesn't impose any constraint on the ordering of
op = Operator([eq0, eq1], opt='topofuse')
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
tree = trees[0]
assert len(tree) == 3
assert tree[0].dim is time
assert tree[1].dim is x
assert tree[2].dim is y
| true | true |
f7f5d74d896bc94ab74cf5854acf8c14ae8d3e9d | 870 | py | Python | aspen/testing/pytest_fixtures.py | Acidburn0zzz/aspen-python | 3b25a3ca041bd0bf4d313f7fc3a15cf092903de7 | [
"MIT"
] | null | null | null | aspen/testing/pytest_fixtures.py | Acidburn0zzz/aspen-python | 3b25a3ca041bd0bf4d313f7fc3a15cf092903de7 | [
"MIT"
] | null | null | null | aspen/testing/pytest_fixtures.py | Acidburn0zzz/aspen-python | 3b25a3ca041bd0bf4d313f7fc3a15cf092903de7 | [
"MIT"
] | null | null | null | """
aspen.testing.pytest_fixtures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import pytest
from aspen.testing.harness import Harness
from filesystem_tree import FilesystemTree
@pytest.yield_fixture
def fs():
fs = FilesystemTree()
yield fs
fs.remove()
@pytest.yield_fixture
def sys_path_scrubber():
before = set(sys.path)
yield
after = set(sys.path)
for name in after - before:
sys.path.remove(name)
@pytest.yield_fixture
def sys_path(fs):
sys.path.insert(0, fs.root)
yield fs
@pytest.yield_fixture
def harness(sys_path_scrubber):
harness = Harness()
yield harness
harness.teardown()
@pytest.yield_fixture
def client(harness):
yield harness.client
| 17.755102 | 42 | 0.714943 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import pytest
from aspen.testing.harness import Harness
from filesystem_tree import FilesystemTree
@pytest.yield_fixture
def fs():
fs = FilesystemTree()
yield fs
fs.remove()
@pytest.yield_fixture
def sys_path_scrubber():
before = set(sys.path)
yield
after = set(sys.path)
for name in after - before:
sys.path.remove(name)
@pytest.yield_fixture
def sys_path(fs):
sys.path.insert(0, fs.root)
yield fs
@pytest.yield_fixture
def harness(sys_path_scrubber):
harness = Harness()
yield harness
harness.teardown()
@pytest.yield_fixture
def client(harness):
yield harness.client
| true | true |
f7f5d767ffa516d1f4d559a6abe08690f6030622 | 2,951 | py | Python | test/transform/test_token.py | marrow/schema | e2b16ec45329a646156388936c2e779ddcd8fa77 | [
"MIT"
] | 3 | 2016-09-03T07:00:50.000Z | 2021-06-19T18:52:56.000Z | test/transform/test_token.py | marrow/schema | e2b16ec45329a646156388936c2e779ddcd8fa77 | [
"MIT"
] | 6 | 2015-01-23T19:32:04.000Z | 2019-10-23T15:36:48.000Z | test/transform/test_token.py | marrow/schema | e2b16ec45329a646156388936c2e779ddcd8fa77 | [
"MIT"
] | 2 | 2015-11-13T20:02:17.000Z | 2018-01-30T12:01:47.000Z | from marrow.schema.testing import TransformTest
from marrow.schema.transform.complex import TokenPatternAttribute, Token, tags, terms
class TestTokenGeneral(object):
def test_token_pattern_cache(self):
assert 'pattern' not in tags.__data__
pattern, regex = tags.pattern
assert 'pattern' in tags.__data__
def test_tag_pattern(self):
pattern, regex = tags.pattern
assert pattern == '[\\s \t,]*("[^"]+"|\'[^\']+\'|[^ \t,]+)[ \t,]*'
def test_term_pattern(self):
pattern, regex = terms.pattern
assert pattern == '[\\s \t]*([+-]?"[^"]+"|\'[^\']+\'|[^ \t]+)[ \t]*'
def test_direct_access(self):
assert isinstance(Token.pattern, TokenPatternAttribute)
class TestTagNative(TransformTest):
transform = tags.native
valid = (
(None, None),
('', set()),
('high altitude melting pandas', set(('high', 'altitude', 'melting', 'pandas'))),
('"high altitude" "melting panda"', set(('high altitude', 'melting panda'))),
('Melting PANDAS', set(('melting', 'pandas')))
)
class TestTagForeign(TransformTest):
transform = tags.foreign
valid = (
(None, None),
(('high', 'altitude'), "high altitude"),
(('high', 'altitude', 'melting pandas'), 'high altitude "melting pandas"')
)
invalid = (
'',
)
class TestTermsNative(TransformTest):
transform = terms.native
valid = (
('animals +cat -dog +"medical treatment"', {None: ['animals'], '+': ['cat', '"medical treatment"'], '-': ['dog']}),
)
class TestTermLikeTuple(TransformTest):
transform = Token(groups=[None, '+', '-'], group=tuple).native
valid = (
('animal medicine +cat +"kitty death"', (['animal', 'medicine'], ['cat', '"kitty death"'], [])),
)
class TestTermLikeUngrouped(TransformTest):
transform = Token(groups=[None, '+', '-'], group=None).native
valid = (
('cat dog -leather', [(None, 'cat'), (None, 'dog'), ('-', 'leather')]),
)
class TestTokenSorted(TransformTest):
transform = Token(separators=' \t,', normalize=lambda s: s.lower().strip('"'), sort=True).native
valid = (
('foo bar baz', ['bar', 'baz', 'foo']),
)
class TestTokenNoQuote(TransformTest):
transform = Token(quotes=None).foreign
valid = (
(("foo", "bar", "baz diz"), "foo bar baz diz"),
)
class TestTokenDictionaryForeign(TransformTest):
transform = Token(group=dict, sort=True).foreign
valid = (
({'+': ('foo', 'bar'), '-': ('baz', )}, "+bar +foo -baz"),
)
invalid = (
'foo'
)
'''
if self.group is dict:
if not isinstance(value, dict):
raise Concern("Dictionary grouped values must be passed as a dictionary.")
return self.separators[0].join([(prefix + sanatize(keyword)) for prefix, keywords in value for keyword in value[prefix]])
if not isinstance(value, (list, tuple, set)):
raise Concern("Ungrouped values must be passed as a list, tuple, or set.")
value = [sanatize(keyword) for keyword in value]
return self.separators[0].join(sorted(value) if self.sort else value)
'''
| 28.375 | 124 | 0.635717 | from marrow.schema.testing import TransformTest
from marrow.schema.transform.complex import TokenPatternAttribute, Token, tags, terms
class TestTokenGeneral(object):
def test_token_pattern_cache(self):
assert 'pattern' not in tags.__data__
pattern, regex = tags.pattern
assert 'pattern' in tags.__data__
def test_tag_pattern(self):
pattern, regex = tags.pattern
assert pattern == '[\\s \t,]*("[^"]+"|\'[^\']+\'|[^ \t,]+)[ \t,]*'
def test_term_pattern(self):
pattern, regex = terms.pattern
assert pattern == '[\\s \t]*([+-]?"[^"]+"|\'[^\']+\'|[^ \t]+)[ \t]*'
def test_direct_access(self):
assert isinstance(Token.pattern, TokenPatternAttribute)
class TestTagNative(TransformTest):
transform = tags.native
valid = (
(None, None),
('', set()),
('high altitude melting pandas', set(('high', 'altitude', 'melting', 'pandas'))),
('"high altitude" "melting panda"', set(('high altitude', 'melting panda'))),
('Melting PANDAS', set(('melting', 'pandas')))
)
class TestTagForeign(TransformTest):
transform = tags.foreign
valid = (
(None, None),
(('high', 'altitude'), "high altitude"),
(('high', 'altitude', 'melting pandas'), 'high altitude "melting pandas"')
)
invalid = (
'',
)
class TestTermsNative(TransformTest):
transform = terms.native
valid = (
('animals +cat -dog +"medical treatment"', {None: ['animals'], '+': ['cat', '"medical treatment"'], '-': ['dog']}),
)
class TestTermLikeTuple(TransformTest):
transform = Token(groups=[None, '+', '-'], group=tuple).native
valid = (
('animal medicine +cat +"kitty death"', (['animal', 'medicine'], ['cat', '"kitty death"'], [])),
)
class TestTermLikeUngrouped(TransformTest):
transform = Token(groups=[None, '+', '-'], group=None).native
valid = (
('cat dog -leather', [(None, 'cat'), (None, 'dog'), ('-', 'leather')]),
)
class TestTokenSorted(TransformTest):
transform = Token(separators=' \t,', normalize=lambda s: s.lower().strip('"'), sort=True).native
valid = (
('foo bar baz', ['bar', 'baz', 'foo']),
)
class TestTokenNoQuote(TransformTest):
transform = Token(quotes=None).foreign
valid = (
(("foo", "bar", "baz diz"), "foo bar baz diz"),
)
class TestTokenDictionaryForeign(TransformTest):
transform = Token(group=dict, sort=True).foreign
valid = (
({'+': ('foo', 'bar'), '-': ('baz', )}, "+bar +foo -baz"),
)
invalid = (
'foo'
)
| true | true |
f7f5d77d97cfa7bbc797a3fe5d68c07b300729e5 | 8,090 | py | Python | preprocess/normalization.py | Maggedelle/Project-6-Pose-Estimation | 473019c236bf4b44918ee65959722705a95644e7 | [
"Apache-2.0"
] | null | null | null | preprocess/normalization.py | Maggedelle/Project-6-Pose-Estimation | 473019c236bf4b44918ee65959722705a95644e7 | [
"Apache-2.0"
] | null | null | null | preprocess/normalization.py | Maggedelle/Project-6-Pose-Estimation | 473019c236bf4b44918ee65959722705a95644e7 | [
"Apache-2.0"
] | null | null | null |
import json
def normalizer():
armcurls_f1 = []
armcurls_f2 = []
armcurls_f3 = []
armcurls_f4 = []
armcurls_f5 = []
armraises_f1 = []
armraises_f2 = []
armraises_f3 = []
armraises_f4 = []
armraises_f5 = []
pushups_f1 = []
pushups_f2 = []
pushups_f3 = []
pushups_f4 = []
pushups_f5 = []
correct_armcurls_f1 = []
correct_armcurls_f2 = []
correct_armcurls_f3 = []
correct_armraises_f1 = []
correct_armraises_f2 = []
correct_armraises_f3 = []
correct_pushups_f1 = []
correct_pushups_f4 = []
correct_pushups_f5 = []
with open('preprocess/labels.json', 'r+') as f:
data = json.load(f)
for exercise in data:
if(exercise["exercise"] == "armcurl"):
armcurls_f1.append(exercise["feature_1"])
armcurls_f2.append(exercise["feature_2"])
armcurls_f3.append(exercise["feature_3"])
armcurls_f4.append(exercise["feature_4"])
armcurls_f5.append(exercise["feature_5"])
if exercise['correct'] == 1:
correct_armcurls_f1.append(exercise["feature_1"])
correct_armcurls_f2.append(exercise["feature_2"])
correct_armcurls_f3.append(exercise["feature_3"])
elif(exercise["exercise"] == "armraise"):
armraises_f1.append(exercise["feature_1"])
armraises_f2.append(exercise["feature_2"])
armraises_f3.append(exercise["feature_3"])
armraises_f4.append(exercise["feature_4"])
armraises_f5.append(exercise["feature_5"])
if exercise['correct'] == 1:
correct_armraises_f1.append(exercise["feature_1"])
correct_armraises_f2.append(exercise["feature_2"])
correct_armraises_f3.append(exercise["feature_3"])
elif(exercise["exercise"] == "pushup"):
pushups_f1.append(exercise["feature_1"])
pushups_f2.append(exercise["feature_2"])
pushups_f3.append(exercise["feature_3"])
pushups_f4.append(exercise["feature_4"])
pushups_f5.append(exercise["feature_5"])
if exercise['correct'] == 1:
correct_pushups_f1.append(exercise["feature_1"])
correct_pushups_f4.append(exercise["feature_4"])
correct_pushups_f5.append(exercise["feature_5"])
print("arm curl feature_1 average",
sum(correct_armcurls_f1)/len(correct_armcurls_f1))
print("armcurl feature_2 average",
sum(correct_armcurls_f2)/len(correct_armcurls_f2))
print("arm curl feature 2 max: ", max(
correct_armcurls_f2), ", min: ", min(correct_armcurls_f2))
print("armcurl feature_3 average",
sum(correct_armcurls_f3)/len(correct_armcurls_f3))
print("arm curl feature 3 max: ", max(
correct_armcurls_f3), ", min: ", min(correct_armcurls_f3))
print("armraise feature_1 average",
sum(correct_armraises_f1)/len(correct_armraises_f1))
print("armraise feature_2 average",
sum(correct_armraises_f2)/len(correct_armraises_f2))
print("armraise feature_3 average",
sum(correct_armraises_f3)/len(correct_armraises_f3))
print("armraise feature 1 max: ", max(
correct_armraises_f1), ", min: ", min(correct_armraises_f1))
print("armraise feature 2 max: ", max(
correct_armraises_f2), ", min: ", min(correct_armraises_f2))
print("armraise feature 3 max: ", max(
correct_armraises_f3), ", min: ", min(correct_armraises_f3))
print("pushup feature_1 average",
sum(correct_pushups_f1)/len(correct_pushups_f1))
print("pushup feature_4 average",
sum(correct_pushups_f4)/len(correct_pushups_f4))
print("pushup feature_5 average",
sum(correct_pushups_f5)/len(correct_pushups_f5))
print("push up feature 1 max: ", max(
correct_pushups_f1), ", min: ", min(correct_pushups_f1))
print("push up feature 4 max: ", max(
correct_pushups_f4), ", min: ", min(correct_pushups_f4))
print("push up feature 5 max: ", max(
correct_pushups_f5), ", min: ", min(correct_pushups_f5))
for exercise in data:
if(exercise["exercise"] == "armcurl"):
if(exercise["feature_1"] != 0):
exercise["feature_1"] = (
exercise["feature_1"] - min(armcurls_f1)) / (max(armcurls_f1) - min(armcurls_f1))
if(exercise["feature_2"] != 0):
exercise["feature_2"] = (
exercise["feature_2"] - min(armcurls_f2)) / (max(armcurls_f2) - min(armcurls_f2))
if(exercise["feature_3"] != 0):
exercise["feature_3"] = (
exercise["feature_3"] - min(armcurls_f3)) / (max(armcurls_f3) - min(armcurls_f3))
if(exercise["feature_4"] != 0):
exercise["feature_4"] = (
exercise["feature_4"] - min(armcurls_f4)) / (max(armcurls_f4) - min(armcurls_f4))
if(exercise["feature_5"] != 0):
exercise["feature_5"] = (
exercise["feature_5"] - min(armcurls_f5)) / (max(armcurls_f5) - min(armcurls_f5))
if(exercise["exercise"] == "armraise"):
if(exercise["feature_1"] != 0):
exercise["feature_1"] = (
exercise["feature_1"] - min(armraises_f1)) / (max(armraises_f1) - min(armraises_f1))
if(exercise["feature_2"] != 0):
exercise["feature_2"] = (
exercise["feature_2"] - min(armraises_f2)) / (max(armraises_f2) - min(armraises_f2))
if(exercise["feature_3"] != 0):
exercise["feature_3"] = (
exercise["feature_3"] - min(armraises_f3)) / (max(armraises_f3) - min(armraises_f3))
if(exercise["feature_4"] != 0):
exercise["feature_4"] = (
exercise["feature_4"] - min(armraises_f4)) / (max(armraises_f4) - min(armraises_f4))
if(exercise["feature_5"] != 0):
exercise["feature_5"] = (
exercise["feature_5"] - min(armraises_f5)) / (max(armraises_f5) - min(armraises_f5))
if(exercise["exercise"] == "pushup"):
if(exercise["feature_1"] != 0):
exercise["feature_1"] = (
exercise["feature_1"] - min(pushups_f1)) / (max(pushups_f1) - min(pushups_f1))
if(exercise["feature_2"] != 0):
exercise["feature_2"] = (
exercise["feature_2"] - min(pushups_f2)) / (max(pushups_f2) - min(pushups_f2))
if(exercise["feature_3"] != 0):
exercise["feature_3"] = (
exercise["feature_3"] - min(pushups_f3)) / (max(pushups_f3) - min(pushups_f3))
if(exercise["feature_4"] != 0):
exercise["feature_4"] = (
exercise["feature_4"] - min(pushups_f4)) / (max(pushups_f4) - min(pushups_f4))
if(exercise["feature_5"] != 0):
exercise["feature_5"] = (
exercise["feature_5"] - min(pushups_f5)) / (max(pushups_f5) - min(pushups_f5))
with open('preprocess/labels.json', 'w') as f:
json.dump(data, f)
armcurls_f1.clear()
armcurls_f2.clear()
armcurls_f3.clear()
armcurls_f4.clear()
armcurls_f5.clear()
armraises_f1.clear()
armraises_f2.clear()
armraises_f3.clear()
armraises_f4.clear()
armraises_f5.clear()
pushups_f1.clear()
pushups_f2.clear()
pushups_f3.clear()
pushups_f4.clear()
pushups_f5.clear()
print("Data normalized")
| 45.965909 | 108 | 0.554512 |
import json
def normalizer():
armcurls_f1 = []
armcurls_f2 = []
armcurls_f3 = []
armcurls_f4 = []
armcurls_f5 = []
armraises_f1 = []
armraises_f2 = []
armraises_f3 = []
armraises_f4 = []
armraises_f5 = []
pushups_f1 = []
pushups_f2 = []
pushups_f3 = []
pushups_f4 = []
pushups_f5 = []
correct_armcurls_f1 = []
correct_armcurls_f2 = []
correct_armcurls_f3 = []
correct_armraises_f1 = []
correct_armraises_f2 = []
correct_armraises_f3 = []
correct_pushups_f1 = []
correct_pushups_f4 = []
correct_pushups_f5 = []
with open('preprocess/labels.json', 'r+') as f:
data = json.load(f)
for exercise in data:
if(exercise["exercise"] == "armcurl"):
armcurls_f1.append(exercise["feature_1"])
armcurls_f2.append(exercise["feature_2"])
armcurls_f3.append(exercise["feature_3"])
armcurls_f4.append(exercise["feature_4"])
armcurls_f5.append(exercise["feature_5"])
if exercise['correct'] == 1:
correct_armcurls_f1.append(exercise["feature_1"])
correct_armcurls_f2.append(exercise["feature_2"])
correct_armcurls_f3.append(exercise["feature_3"])
elif(exercise["exercise"] == "armraise"):
armraises_f1.append(exercise["feature_1"])
armraises_f2.append(exercise["feature_2"])
armraises_f3.append(exercise["feature_3"])
armraises_f4.append(exercise["feature_4"])
armraises_f5.append(exercise["feature_5"])
if exercise['correct'] == 1:
correct_armraises_f1.append(exercise["feature_1"])
correct_armraises_f2.append(exercise["feature_2"])
correct_armraises_f3.append(exercise["feature_3"])
elif(exercise["exercise"] == "pushup"):
pushups_f1.append(exercise["feature_1"])
pushups_f2.append(exercise["feature_2"])
pushups_f3.append(exercise["feature_3"])
pushups_f4.append(exercise["feature_4"])
pushups_f5.append(exercise["feature_5"])
if exercise['correct'] == 1:
correct_pushups_f1.append(exercise["feature_1"])
correct_pushups_f4.append(exercise["feature_4"])
correct_pushups_f5.append(exercise["feature_5"])
print("arm curl feature_1 average",
sum(correct_armcurls_f1)/len(correct_armcurls_f1))
print("armcurl feature_2 average",
sum(correct_armcurls_f2)/len(correct_armcurls_f2))
print("arm curl feature 2 max: ", max(
correct_armcurls_f2), ", min: ", min(correct_armcurls_f2))
print("armcurl feature_3 average",
sum(correct_armcurls_f3)/len(correct_armcurls_f3))
print("arm curl feature 3 max: ", max(
correct_armcurls_f3), ", min: ", min(correct_armcurls_f3))
print("armraise feature_1 average",
sum(correct_armraises_f1)/len(correct_armraises_f1))
print("armraise feature_2 average",
sum(correct_armraises_f2)/len(correct_armraises_f2))
print("armraise feature_3 average",
sum(correct_armraises_f3)/len(correct_armraises_f3))
print("armraise feature 1 max: ", max(
correct_armraises_f1), ", min: ", min(correct_armraises_f1))
print("armraise feature 2 max: ", max(
correct_armraises_f2), ", min: ", min(correct_armraises_f2))
print("armraise feature 3 max: ", max(
correct_armraises_f3), ", min: ", min(correct_armraises_f3))
print("pushup feature_1 average",
sum(correct_pushups_f1)/len(correct_pushups_f1))
print("pushup feature_4 average",
sum(correct_pushups_f4)/len(correct_pushups_f4))
print("pushup feature_5 average",
sum(correct_pushups_f5)/len(correct_pushups_f5))
print("push up feature 1 max: ", max(
correct_pushups_f1), ", min: ", min(correct_pushups_f1))
print("push up feature 4 max: ", max(
correct_pushups_f4), ", min: ", min(correct_pushups_f4))
print("push up feature 5 max: ", max(
correct_pushups_f5), ", min: ", min(correct_pushups_f5))
for exercise in data:
if(exercise["exercise"] == "armcurl"):
if(exercise["feature_1"] != 0):
exercise["feature_1"] = (
exercise["feature_1"] - min(armcurls_f1)) / (max(armcurls_f1) - min(armcurls_f1))
if(exercise["feature_2"] != 0):
exercise["feature_2"] = (
exercise["feature_2"] - min(armcurls_f2)) / (max(armcurls_f2) - min(armcurls_f2))
if(exercise["feature_3"] != 0):
exercise["feature_3"] = (
exercise["feature_3"] - min(armcurls_f3)) / (max(armcurls_f3) - min(armcurls_f3))
if(exercise["feature_4"] != 0):
exercise["feature_4"] = (
exercise["feature_4"] - min(armcurls_f4)) / (max(armcurls_f4) - min(armcurls_f4))
if(exercise["feature_5"] != 0):
exercise["feature_5"] = (
exercise["feature_5"] - min(armcurls_f5)) / (max(armcurls_f5) - min(armcurls_f5))
if(exercise["exercise"] == "armraise"):
if(exercise["feature_1"] != 0):
exercise["feature_1"] = (
exercise["feature_1"] - min(armraises_f1)) / (max(armraises_f1) - min(armraises_f1))
if(exercise["feature_2"] != 0):
exercise["feature_2"] = (
exercise["feature_2"] - min(armraises_f2)) / (max(armraises_f2) - min(armraises_f2))
if(exercise["feature_3"] != 0):
exercise["feature_3"] = (
exercise["feature_3"] - min(armraises_f3)) / (max(armraises_f3) - min(armraises_f3))
if(exercise["feature_4"] != 0):
exercise["feature_4"] = (
exercise["feature_4"] - min(armraises_f4)) / (max(armraises_f4) - min(armraises_f4))
if(exercise["feature_5"] != 0):
exercise["feature_5"] = (
exercise["feature_5"] - min(armraises_f5)) / (max(armraises_f5) - min(armraises_f5))
if(exercise["exercise"] == "pushup"):
if(exercise["feature_1"] != 0):
exercise["feature_1"] = (
exercise["feature_1"] - min(pushups_f1)) / (max(pushups_f1) - min(pushups_f1))
if(exercise["feature_2"] != 0):
exercise["feature_2"] = (
exercise["feature_2"] - min(pushups_f2)) / (max(pushups_f2) - min(pushups_f2))
if(exercise["feature_3"] != 0):
exercise["feature_3"] = (
exercise["feature_3"] - min(pushups_f3)) / (max(pushups_f3) - min(pushups_f3))
if(exercise["feature_4"] != 0):
exercise["feature_4"] = (
exercise["feature_4"] - min(pushups_f4)) / (max(pushups_f4) - min(pushups_f4))
if(exercise["feature_5"] != 0):
exercise["feature_5"] = (
exercise["feature_5"] - min(pushups_f5)) / (max(pushups_f5) - min(pushups_f5))
with open('preprocess/labels.json', 'w') as f:
json.dump(data, f)
armcurls_f1.clear()
armcurls_f2.clear()
armcurls_f3.clear()
armcurls_f4.clear()
armcurls_f5.clear()
armraises_f1.clear()
armraises_f2.clear()
armraises_f3.clear()
armraises_f4.clear()
armraises_f5.clear()
pushups_f1.clear()
pushups_f2.clear()
pushups_f3.clear()
pushups_f4.clear()
pushups_f5.clear()
print("Data normalized")
| true | true |
f7f5d7c01e9a3caafb03e155388140156ba58d82 | 51,366 | py | Python | Lib/test/test_fstring.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | 1 | 2020-10-25T16:33:22.000Z | 2020-10-25T16:33:22.000Z | Lib/test/test_fstring.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | null | null | null | Lib/test/test_fstring.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
# There are tests here with unicode string literals and
# identifiers. There's a code in ast.c that was added because of a
# failure with a non-ascii-only expression. So, I have tests for
# that. There are workarounds that would let me run tests for that
# code without unicode identifiers and strings, but just using them
# directly seems like the easiest and therefore safest thing to do.
# Unicode identifiers in tests is allowed by PEP 3131.
import ast
import os
import re
import types
import decimal
import unittest
<<<<<<< HEAD
from test.support.os_helper import temp_cwd
=======
from test.support import temp_cwd, use_old_parser
>>>>>>> 3.9
from test.support.script_helper import assert_python_failure
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# values with assertRaisesRegex, but without it it's way too easy to
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
# worthwhile tradeoff. When I switched to this method, I found many
# examples where I wasn't testing what I thought I was.
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
# it).
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
# This is how __format__ is actually called.
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
# Inspired by http://bugs.python.org/issue24975
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
# Make sure x was not called.
self.assertFalse(x.called)
# Actually run the code.
exec(c)
# Make sure x was called.
self.assertTrue(x.called)
def test_ast_line_numbers(self):
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiple_formattedvalues(self):
expr = """
f'no formatted values'
f'eggs {a * x()} spam {b + y()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `f'no formatted value'`
self.assertEqual(type(t.body[0]), ast.Expr)
self.assertEqual(type(t.body[0].value), ast.JoinedStr)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 4)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(type(t.body[1].value.values[3]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
# check the first binop location
binop1 = t.body[1].value.values[1].value
self.assertEqual(type(binop1), ast.BinOp)
self.assertEqual(type(binop1.left), ast.Name)
self.assertEqual(type(binop1.op), ast.Mult)
self.assertEqual(type(binop1.right), ast.Call)
self.assertEqual(binop1.lineno, 3)
self.assertEqual(binop1.left.lineno, 3)
self.assertEqual(binop1.right.lineno, 3)
self.assertEqual(binop1.col_offset, 8)
self.assertEqual(binop1.left.col_offset, 8)
self.assertEqual(binop1.right.col_offset, 12)
# check the second binop location
binop2 = t.body[1].value.values[3].value
self.assertEqual(type(binop2), ast.BinOp)
self.assertEqual(type(binop2.left), ast.Name)
self.assertEqual(type(binop2.op), ast.Add)
self.assertEqual(type(binop2.right), ast.Call)
self.assertEqual(binop2.lineno, 3)
self.assertEqual(binop2.left.lineno, 3)
self.assertEqual(binop2.right.lineno, 3)
self.assertEqual(binop2.col_offset, 23)
self.assertEqual(binop2.left.col_offset, 23)
self.assertEqual(binop2.right.col_offset, 27)
def test_ast_line_numbers_nested(self):
expr = """
a = 10
f'{a * f"-{x()}-"}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.JoinedStr)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the nested call location
self.assertEqual(len(binop.right.values), 3)
self.assertEqual(type(binop.right.values[0]), ast.Constant)
self.assertEqual(type(binop.right.values[0].value), str)
self.assertEqual(type(binop.right.values[1]), ast.FormattedValue)
self.assertEqual(type(binop.right.values[2]), ast.Constant)
self.assertEqual(type(binop.right.values[2].value), str)
self.assertEqual(binop.right.values[0].lineno, 3)
self.assertEqual(binop.right.values[1].lineno, 3)
self.assertEqual(binop.right.values[2].lineno, 3)
call = binop.right.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.col_offset, 11)
def test_ast_line_numbers_duplicate_expression(self):
"""Duplicate expression
NOTE: this is currently broken, always sets location of the first
expression.
"""
expr = """
a = 10
f'{a * x()} {a * x()} {a * x()}'
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 5)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[1]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[1].value), str)
self.assertEqual(type(t.body[1].value.values[2]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[3]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[3].value), str)
self.assertEqual(type(t.body[1].value.values[4]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
self.assertEqual(t.body[1].value.values[4].lineno, 3)
# check the first binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the second binop location
binop = t.body[1].value.values[2].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
# check the third binop location
binop = t.body[1].value.values[4].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
def test_ast_line_numbers_multiline_fstring(self):
# See bpo-30465 for details.
expr = """
a = 10
f'''
{a
*
x()}
non-important content
'''
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 3)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].col_offset, 0)
self.assertEqual(t.body[1].value.col_offset, 0)
self.assertEqual(t.body[1].value.values[0].col_offset, 0)
self.assertEqual(t.body[1].value.values[1].col_offset, 0)
self.assertEqual(t.body[1].value.values[2].col_offset, 0)
# NOTE: the following lineno information and col_offset is correct for
# expressions within FormattedValues.
binop = t.body[1].value.values[1].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 4)
self.assertEqual(binop.left.lineno, 4)
self.assertEqual(binop.right.lineno, 6)
self.assertEqual(binop.col_offset, 4)
self.assertEqual(binop.left.col_offset, 4)
self.assertEqual(binop.right.col_offset, 7)
def test_docstring(self):
def f():
f'''Not a docstring'''
self.assertIsNone(f.__doc__)
def g():
'''Not a docstring''' \
f''
self.assertIsNone(g.__doc__)
def test_literal_eval(self):
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x'")
def test_ast_compile_time_concat(self):
x = ['']
expr = """x[0] = 'foo' f'{3}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_compile_time_concat_errors(self):
self.assertAllRaise(SyntaxError,
'cannot mix bytes and nonbytes literals',
[r"""f'' b''""",
r"""b'' f''""",
])
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
[r"""f'{"x'""",
r"""f'{"x}'""",
r"""f'{("x'""",
r"""f'{("x}'""",
])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{((}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\)' "
r"does not match opening parenthesis '\['",
["f'{a[4)}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\]' "
r"does not match opening parenthesis '\('",
["f'{a(4]}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\['",
["f'{a[4}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{a(4}'",
])
self.assertRaises(SyntaxError, eval, "f'{" + "("*500 + "}'")
def test_double_braces(self):
self.assertEqual(f'{{', '{')
self.assertEqual(f'a{{', 'a{')
self.assertEqual(f'{{b', '{b')
self.assertEqual(f'a{{b', 'a{b')
self.assertEqual(f'}}', '}')
self.assertEqual(f'a}}', 'a}')
self.assertEqual(f'}}b', '}b')
self.assertEqual(f'a}}b', 'a}b')
self.assertEqual(f'{{}}', '{}')
self.assertEqual(f'a{{}}', 'a{}')
self.assertEqual(f'{{b}}', '{b}')
self.assertEqual(f'{{}}c', '{}c')
self.assertEqual(f'a{{b}}', 'a{b}')
self.assertEqual(f'a{{}}c', 'a{}c')
self.assertEqual(f'{{b}}c', '{b}c')
self.assertEqual(f'a{{b}}c', 'a{b}c')
self.assertEqual(f'{{{10}', '{10')
self.assertEqual(f'}}{10}', '}10')
self.assertEqual(f'}}{{{10}', '}{10')
self.assertEqual(f'}}a{{{10}', '}a{10')
self.assertEqual(f'{10}{{', '10{')
self.assertEqual(f'{10}}}', '10}')
self.assertEqual(f'{10}}}{{', '10}{')
self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
# Inside of strings, don't interpret doubled brackets.
self.assertEqual(f'{"{{}}"}', '{{}}')
self.assertAllRaise(TypeError, 'unhashable type',
["f'{ {{}} }'", # dict in a set
])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual('abc' f'## {x}ghi', 'abc## defghi')
self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi')
self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{x' f'{x}', '{xdef')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{{x}}' f'{x}', '{{x}}def')
self.assertEqual('{{x' f'{x}', '{{xdef')
self.assertEqual('x}}' f'{x}', 'x}}def')
self.assertEqual(f'{x}' 'x}}', 'defx}}')
self.assertEqual(f'{x}' '', 'def')
self.assertEqual('' f'{x}' '', 'def')
self.assertEqual('' f'{x}', 'def')
self.assertEqual(f'{x}' '2', 'def2')
self.assertEqual('1' f'{x}' '2', '1def2')
self.assertEqual('1' f'{x}', '1def')
self.assertEqual(f'{x}' f'-{x}', 'def-def')
self.assertEqual('' f'', '')
self.assertEqual('' f'' '', '')
self.assertEqual('' f'' '' f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'' '', '')
self.assertEqual(f'' '' f'', '')
self.assertEqual(f'' '' f'' '', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3' f'}'", # can't concat to get a valid f-string
])
def test_comments(self):
# These aren't comments, since they're in strings.
d = {'#': 'hash'}
self.assertEqual(f'{"#"}', '#')
self.assertEqual(f'{d["#"]}', 'hash')
self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
["f'{1#}'", # error because the expression becomes "(1#)"
"f'{3(#)}'",
"f'{#}'",
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{)#}'", # When wrapped in parens, this becomes
# '()#)'. Make sure that doesn't compile.
])
def test_many_expressions(self):
# Create a string with many expressions in it. Note that
# because we have a space in here as a literal, we're actually
# going to use twice as many ast nodes: one for each literal
# plus one for each expression.
def build_fstr(n, extra=''):
return "f'" + ('{x} ' * n) + extra + "'"
x = 'X'
width = 1
# Test around 256.
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
# Test concatenating 2 largs fstrings.
self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x+' ')*254)
# Test lots of expressions and constants, concatenated.
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35')
self.assertEqual(f'{10:#{1}0x}', ' 0xa')
self.assertEqual(f'{10:{"#"}1{0}{"x"}}', ' 0xa')
self.assertEqual(f'{-10:-{"#"}1{0}x}', ' -0xa')
self.assertEqual(f'{-10:{"-"}#{1}0{"x"}}', ' -0xa')
self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["""f'{"s"!r{":10"}}'""",
# This looks like a nested format spec.
])
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, "f-string: invalid syntax",
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
[# Invalid syntax inside a nested spec.
"f'{4:{/5}}'",
])
self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
[# Can't nest format specifiers.
"f'result: {value:{width:{0}}.{precision:1}}'",
])
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
[# No expansion inside conversion or for
# the : or ! itself.
"""f'{"s"!{"r"}}'""",
])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
["f'{}'",
"f'{ }'"
"f' {} '",
"f'{!r}'",
"f'{ !r}'",
"f'{10:{ }}'",
"f' { } '",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"f'''{\t\f\r\n}'''",
# Catch the empty expression before the
# invalid conversion.
"f'{!x}'",
"f'{ !xr}'",
"f'{!x:}'",
"f'{!x:a}'",
"f'{ !xr:}'",
"f'{ !xr:a}'",
"f'{!}'",
"f'{:}'",
# We find the empty expression before the
# missing closing brace.
"f'{!'",
"f'{!s:'",
"f'{:'",
"f'{:x'",
])
# Different error message is raised for other whitespace characters.
self.assertAllRaise(SyntaxError, r"invalid non-printable character U\+00A0",
["f'''{\xa0}'''",
"\xa0",
])
def test_parens_in_expressions(self):
self.assertEqual(f'{3,}', '(3,)')
# Add these because when an expression is evaluated, parens
# are added around it. But we shouldn't go from an invalid
# expression to a valid one. The added parens are just
# supposed to allow whitespace (including newlines).
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
["f'{,}'",
"f'{,}'", # this is (,), which is an error
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{3)+(4}'",
])
self.assertAllRaise(SyntaxError, 'EOL while scanning string literal',
["f'{\n}'",
])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual(r'\t', '\\t')
self.assertEqual(rf'\t', '\\t')
self.assertEqual(f'{2}\t', '2\t')
self.assertEqual(f'{2}\t{3}', '2\t3')
self.assertEqual(f'\t{3}', '\t3')
self.assertEqual(f'\u0394', '\u0394')
self.assertEqual(r'\u0394', '\\u0394')
self.assertEqual(rf'\u0394', '\\u0394')
self.assertEqual(f'{2}\u0394', '2\u0394')
self.assertEqual(f'{2}\u0394{3}', '2\u03943')
self.assertEqual(f'\u0394{3}', '\u03943')
self.assertEqual(f'\U00000394', '\u0394')
self.assertEqual(r'\U00000394', '\\U00000394')
self.assertEqual(rf'\U00000394', '\\U00000394')
self.assertEqual(f'{2}\U00000394', '2\u0394')
self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
self.assertEqual(f'\U00000394{3}', '\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
self.assertEqual(f'\x20', ' ')
self.assertEqual(r'\x20', '\\x20')
self.assertEqual(rf'\x20', '\\x20')
self.assertEqual(f'{2}\x20', '2 ')
self.assertEqual(f'{2}\x20{3}', '2 3')
self.assertEqual(f'\x20{3}', ' 3')
self.assertEqual(f'2\x20', '2 ')
self.assertEqual(f'2\x203', '2 3')
self.assertEqual(f'\x203', ' 3')
with self.assertWarns(DeprecationWarning): # invalid escape sequence
value = eval(r"f'\{6*7}'")
self.assertEqual(value, '\\42')
self.assertEqual(f'\\{6*7}', '\\42')
self.assertEqual(fr'\{6*7}', '\\42')
AMPERSAND = 'spam'
# Get the right unicode character (&), or pick up local variable
# depending on the number of backslashes.
self.assertEqual(f'\N{AMPERSAND}', '&')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(fr'\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\\N{AMPERSAND}', '\\&')
def test_misformed_unicode_character_name(self):
# These test are needed because unicode names are parsed
# differently inside f-strings.
self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
[r"f'\N'",
r"f'\N{'",
r"f'\N{GREEK CAPITAL LETTER DELTA'",
# Here are the non-f-string versions,
# which should give the same errors.
r"'\N'",
r"'\N{'",
r"'\N{GREEK CAPITAL LETTER DELTA'",
])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
[r"f'{\'a\'}'",
r"f'{\t3}'",
r"f'{\}'",
r"rf'{\'a\'}'",
r"rf'{\t3}'",
r"rf'{\}'",
r"""rf'{"\N{LEFT CURLY BRACKET}"}'""",
r"f'{\n}'",
])
def test_no_escapes_for_braces(self):
"""
Only literal curly braces begin an expression.
"""
# \x7b is '{'.
self.assertEqual(f'\x7b1+1}}', '{1+1}')
self.assertEqual(f'\x7b1+1', '{1+1')
self.assertEqual(f'\u007b1+1', '{1+1')
self.assertEqual(f'\N{LEFT CURLY BRACKET}1+1\N{RIGHT CURLY BRACKET}', '{1+1}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{0}', '0')
self.assertEqual(rf'''{3+
4}''', '7')
def test_lambda(self):
x = 5
self.assertEqual(f'{(lambda y:x*y)("8")!r}', "'88888'")
self.assertEqual(f'{(lambda y:x*y)("8")!r:10}', "'88888' ")
self.assertEqual(f'{(lambda y:x*y)("8"):10}', "88888 ")
# lambda doesn't work without parens, because the colon
# makes the parser think it's a format_spec
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
["f'{lambda x:x}'",
])
def test_yield(self):
# Not terribly useful, but make sure the yield turns
# a function into a generator
def fn(y):
f'y:{yield y*2}'
f'{yield}'
g = fn(4)
self.assertEqual(next(g), 8)
self.assertEqual(next(g), None)
def test_yield_send(self):
def fn(x):
yield f'x:{yield (lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{'''x'''}", 'x')
self.assertEqual(f"{'''eric's'''}", "eric's")
# Test concatenation within an expression
self.assertEqual(f'{"x" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"x" """eric"s"""}', 'xeric"s')
self.assertEqual(f'{"""eric"s""" "y"}', 'eric"sy')
self.assertEqual(f'{"""x""" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"""x""" """eric"s""" """y"""}', 'xeric"sy')
self.assertEqual(f'{r"""x""" """eric"s""" """y"""}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={x*y:{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f'v:{value}'
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{3:}', '3')
self.assertEqual(f'{3!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f'{f"{0}"*3}', '000')
self.assertEqual(f'{f"{y}"*3}', '555')
def test_invalid_string_prefixes(self):
single_quote_cases = ["fu''",
"uf''",
"Fu''",
"fU''",
"Uf''",
"uF''",
"ufr''",
"urf''",
"fur''",
"fru''",
"rfu''",
"ruf''",
"FUR''",
"Fur''",
"fb''",
"fB''",
"Fb''",
"FB''",
"bf''",
"bF''",
"Bf''",
"BF''",]
double_quote_cases = [case.replace("'", '"') for case in single_quote_cases]
error_msg = (
'invalid syntax'
if use_old_parser()
else 'unexpected EOF while parsing'
)
self.assertAllRaise(SyntaxError, error_msg,
single_quote_cases + double_quote_cases)
def test_leading_trailing_spaces(self):
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
'expr={1: 2}')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
'expr={1: 2}')
def test_not_equal(self):
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
self.assertEqual(f'{3!=4}', 'True')
self.assertEqual(f'{3!=4:}', 'True')
self.assertEqual(f'{3!=4!s}', 'True')
self.assertEqual(f'{3!=4!s:.3}', 'Tru')
def test_equal_equal(self):
# Because an expression ending in = has special meaning,
# there's a special test for ==. Make sure it works.
self.assertEqual(f'{0==1}', 'False')
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
self.assertEqual(f'{"a"}', 'a')
self.assertEqual(f'{"a"!r}', "'a'")
self.assertEqual(f'{"a"!a}', "'a'")
# Not a conversion.
self.assertEqual(f'{"a!r"}', "a!r")
# Not a conversion, but show that ! is allowed in a format spec.
self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
["f'{3!g}'",
"f'{3!A}'",
"f'{3!3}'",
"f'{3!G}'",
"f'{3!!}'",
"f'{3!:}'",
"f'{3! s}'", # no space before conversion char
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{x!s{y}}'",
"f'{3!ss}'",
"f'{3!ss:}'",
"f'{3!ss:s}'",
])
def test_assignment(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'' = 3",
"f'{0}' = x",
"f'{x}' = x",
])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["del f''",
"del '' f''",
])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
["f'{{}'",
"f'{{}}}'",
"f'}'",
"f'x}'",
"f'x}x'",
r"f'\u007b}'",
# Can't have { or } in a format spec.
"f'{3:}>10}'",
"f'{3:}}>10}'",
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3:{{>10}'",
"f'{3'",
"f'{3!'",
"f'{3:'",
"f'{3!s'",
"f'{3!s:'",
"f'{3!s:3'",
"f'x{'",
"f'x{x'",
"f'{x'",
"f'{3:s'",
"f'{{{'",
"f'{{}}{'",
"f'{'",
])
# But these are just normal strings.
self.assertEqual(f'{"{"}', '{')
self.assertEqual(f'{"}"}', '}')
self.assertEqual(f'{3:{"}"}>10}', '}}}}}}}}}3')
self.assertEqual(f'{2:{"{"}>10}', '{{{{{{{{{2')
def test_if_conditional(self):
# There's special logic in compile.c to test if the
# conditional for an if (and while) are constants. Exercise
# that code.
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if '' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if ' ' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string',
0: 'integer',
}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f'{d["a"]}', 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_errors(self):
# see issue 26287
self.assertAllRaise(TypeError, 'unsupported',
[r"f'{(lambda: 0):x}'",
r"f'{(0,):x}'",
])
self.assertAllRaise(ValueError, 'Unknown format code',
[r"f'{1000:j}'",
r"f'{1000:j}'",
])
<<<<<<< HEAD
=======
@unittest.skipIf(use_old_parser(), "The old parser only supports <fstring> as the filename")
>>>>>>> 3.9
def test_filename_in_syntaxerror(self):
# see issue 38964
with temp_cwd() as cwd:
file_path = os.path.join(cwd, 't.py')
with open(file_path, 'w') as f:
f.write('f"{a b}"') # This generates a SyntaxError
_, _, stderr = assert_python_failure(file_path,
PYTHONIOENCODING='ascii')
self.assertIn(file_path.encode('ascii', 'backslashreplace'), stderr)
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote',
"'": 'squote',
'foo': 'bar',
}
self.assertEqual(f'''{d["'"]}''', 'squote')
self.assertEqual(f"""{d['"']}""", 'dquote')
self.assertEqual(f'{d["foo"]}', 'bar')
self.assertEqual(f"{d['foo']}", 'bar')
def test_backslash_char(self):
# Check eval of a backslash followed by a control char.
# See bpo-30682: this used to raise an assert in pydebug mode.
self.assertEqual(eval('f"\\\n"'), '')
self.assertEqual(eval('f"\\\r"'), '')
def test_debug_conversion(self):
x = 'A string'
self.assertEqual(f'{x=}', 'x=' + repr(x))
self.assertEqual(f'{x =}', 'x =' + repr(x))
self.assertEqual(f'{x=!s}', 'x=' + str(x))
self.assertEqual(f'{x=!r}', 'x=' + repr(x))
self.assertEqual(f'{x=!a}', 'x=' + ascii(x))
x = 2.71828
self.assertEqual(f'{x=:.2f}', 'x=' + format(x, '.2f'))
self.assertEqual(f'{x=:}', 'x=' + format(x, ''))
self.assertEqual(f'{x=!r:^20}', 'x=' + format(repr(x), '^20'))
self.assertEqual(f'{x=!s:^20}', 'x=' + format(str(x), '^20'))
self.assertEqual(f'{x=!a:^20}', 'x=' + format(ascii(x), '^20'))
x = 9
self.assertEqual(f'{3*x+15=}', '3*x+15=42')
# There is code in ast.c that deals with non-ascii expression values. So,
# use a unicode identifier to trigger that.
tenπ = 31.4
self.assertEqual(f'{tenπ=:.2f}', 'tenπ=31.40')
# Also test with Unicode in non-identifiers.
self.assertEqual(f'{"Σ"=}', '"Σ"=\'Σ\'')
# Make sure nested fstrings still work.
self.assertEqual(f'{f"{3.1415=:.1f}":*^20}', '*****3.1415=3.1*****')
# Make sure text before and after an expression with = works
# correctly.
pi = 'π'
self.assertEqual(f'alpha α {pi=} ω omega', "alpha α pi='π' ω omega")
# Check multi-line expressions.
self.assertEqual(f'''{
3
=}''', '\n3\n=3')
# Since = is handled specially, make sure all existing uses of
# it still work.
self.assertEqual(f'{0==1}', 'False')
self.assertEqual(f'{0!=1}', 'True')
self.assertEqual(f'{0<=1}', 'True')
self.assertEqual(f'{0>=1}', 'False')
self.assertEqual(f'{(x:="5")}', '5')
self.assertEqual(x, '5')
self.assertEqual(f'{(x:=5)}', '5')
self.assertEqual(x, 5)
self.assertEqual(f'{"="}', '=')
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'. See test_walrus: you need to use parens.
self.assertEqual(f'{x:=10}', ' 20')
# Test named function parameters, to make sure '=' parsing works
# there.
def f(a):
nonlocal x
oldx = x
x = a
return oldx
x = 0
self.assertEqual(f'{f(a="3=")}', '0')
self.assertEqual(x, '3=')
self.assertEqual(f'{f(a=4)}', '3=')
self.assertEqual(x, 4)
# Make sure __format__ is being called.
class C:
def __format__(self, s):
return f'FORMAT-{s}'
def __repr__(self):
return 'REPR'
self.assertEqual(f'{C()=}', 'C()=REPR')
self.assertEqual(f'{C()=!r}', 'C()=REPR')
self.assertEqual(f'{C()=:}', 'C()=FORMAT-')
self.assertEqual(f'{C()=: }', 'C()=FORMAT- ')
self.assertEqual(f'{C()=:x}', 'C()=FORMAT-x')
self.assertEqual(f'{C()=!r:*^20}', 'C()=********REPR********')
self.assertRaises(SyntaxError, eval, "f'{C=]'")
# Make sure leading and following text works.
x = 'foo'
self.assertEqual(f'X{x=}Y', 'Xx='+repr(x)+'Y')
# Make sure whitespace around the = works.
self.assertEqual(f'X{x =}Y', 'Xx ='+repr(x)+'Y')
self.assertEqual(f'X{x= }Y', 'Xx= '+repr(x)+'Y')
self.assertEqual(f'X{x = }Y', 'Xx = '+repr(x)+'Y')
# These next lines contains tabs. Backslash escapes don't
# work in f-strings.
# patchcheck doesn't like these tabs. So the only way to test
# this will be to dynamically created and exec the f-strings. But
# that's such a hassle I'll save it for another day. For now, convert
# the tabs to spaces just to shut up patchcheck.
#self.assertEqual(f'X{x =}Y', 'Xx\t='+repr(x)+'Y')
#self.assertEqual(f'X{x = }Y', 'Xx\t=\t'+repr(x)+'Y')
def test_walrus(self):
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'.
self.assertEqual(f'{x:=10}', ' 20')
# This is an assignment expression, which requires parens.
self.assertEqual(f'{(x:=10)}', '10')
self.assertEqual(x, 10)
def test_invalid_syntax_error_message(self):
<<<<<<< HEAD
with self.assertRaisesRegex(SyntaxError, "f-string: invalid syntax"):
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
with self.assertRaisesRegex(SyntaxError, err_msg):
>>>>>>> 3.9
compile("f'{a $ b}'", "?", "exec")
def test_with_two_commas_in_format_specifier(self):
error_msg = re.escape("Cannot specify ',' with ','.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,,}'
def test_with_two_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify '_' with '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:__}'
def test_with_a_commas_and_an_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,_}'
def test_with_an_underscore_and_a_comma_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:_,}'
if __name__ == '__main__':
unittest.main()
| 40.863962 | 151 | 0.496729 |
# failure with a non-ascii-only expression. So, I have tests for
# that. There are workarounds that would let me run tests for that
# code without unicode identifiers and strings, but just using them
# directly seems like the easiest and therefore safest thing to do.
# Unicode identifiers in tests is allowed by PEP 3131.
import ast
import os
import re
import types
import decimal
import unittest
<<<<<<< HEAD
from test.support.os_helper import temp_cwd
=======
from test.support import temp_cwd, use_old_parser
>>>>>>> 3.9
from test.support.script_helper import assert_python_failure
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
self.assertFalse(x.called)
exec(c)
self.assertTrue(x.called)
def test_ast_line_numbers(self):
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiple_formattedvalues(self):
expr = """
f'no formatted values'
f'eggs {a * x()} spam {b + y()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
self.assertEqual(type(t.body[0]), ast.Expr)
self.assertEqual(type(t.body[0].value), ast.JoinedStr)
self.assertEqual(t.body[0].lineno, 2)
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 4)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(type(t.body[1].value.values[3]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
binop1 = t.body[1].value.values[1].value
self.assertEqual(type(binop1), ast.BinOp)
self.assertEqual(type(binop1.left), ast.Name)
self.assertEqual(type(binop1.op), ast.Mult)
self.assertEqual(type(binop1.right), ast.Call)
self.assertEqual(binop1.lineno, 3)
self.assertEqual(binop1.left.lineno, 3)
self.assertEqual(binop1.right.lineno, 3)
self.assertEqual(binop1.col_offset, 8)
self.assertEqual(binop1.left.col_offset, 8)
self.assertEqual(binop1.right.col_offset, 12)
binop2 = t.body[1].value.values[3].value
self.assertEqual(type(binop2), ast.BinOp)
self.assertEqual(type(binop2.left), ast.Name)
self.assertEqual(type(binop2.op), ast.Add)
self.assertEqual(type(binop2.right), ast.Call)
self.assertEqual(binop2.lineno, 3)
self.assertEqual(binop2.left.lineno, 3)
self.assertEqual(binop2.right.lineno, 3)
self.assertEqual(binop2.col_offset, 23)
self.assertEqual(binop2.left.col_offset, 23)
self.assertEqual(binop2.right.col_offset, 27)
def test_ast_line_numbers_nested(self):
expr = """
a = 10
f'{a * f"-{x()}-"}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.JoinedStr)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
self.assertEqual(len(binop.right.values), 3)
self.assertEqual(type(binop.right.values[0]), ast.Constant)
self.assertEqual(type(binop.right.values[0].value), str)
self.assertEqual(type(binop.right.values[1]), ast.FormattedValue)
self.assertEqual(type(binop.right.values[2]), ast.Constant)
self.assertEqual(type(binop.right.values[2].value), str)
self.assertEqual(binop.right.values[0].lineno, 3)
self.assertEqual(binop.right.values[1].lineno, 3)
self.assertEqual(binop.right.values[2].lineno, 3)
call = binop.right.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.col_offset, 11)
def test_ast_line_numbers_duplicate_expression(self):
"""Duplicate expression
NOTE: this is currently broken, always sets location of the first
expression.
"""
expr = """
a = 10
f'{a * x()} {a * x()} {a * x()}'
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 5)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[1]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[1].value), str)
self.assertEqual(type(t.body[1].value.values[2]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[3]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[3].value), str)
self.assertEqual(type(t.body[1].value.values[4]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
self.assertEqual(t.body[1].value.values[4].lineno, 3)
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
binop = t.body[1].value.values[2].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
binop = t.body[1].value.values[4].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiline_fstring(self):
expr = """
a = 10
f'''
{a
*
x()}
non-important content
'''
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 3)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].col_offset, 0)
self.assertEqual(t.body[1].value.col_offset, 0)
self.assertEqual(t.body[1].value.values[0].col_offset, 0)
self.assertEqual(t.body[1].value.values[1].col_offset, 0)
self.assertEqual(t.body[1].value.values[2].col_offset, 0)
binop = t.body[1].value.values[1].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 4)
self.assertEqual(binop.left.lineno, 4)
self.assertEqual(binop.right.lineno, 6)
self.assertEqual(binop.col_offset, 4)
self.assertEqual(binop.left.col_offset, 4)
self.assertEqual(binop.right.col_offset, 7)
def test_docstring(self):
def f():
f'''Not a docstring'''
self.assertIsNone(f.__doc__)
def g():
'''Not a docstring''' \
f''
self.assertIsNone(g.__doc__)
def test_literal_eval(self):
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x'")
def test_ast_compile_time_concat(self):
x = ['']
expr = """x[0] = 'foo' f'{3}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_compile_time_concat_errors(self):
self.assertAllRaise(SyntaxError,
'cannot mix bytes and nonbytes literals',
[r"""f'' b''""",
r"""b'' f''""",
])
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
[r"""f'{"x'""",
r"""f'{"x}'""",
r"""f'{("x'""",
r"""f'{("x}'""",
])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{((}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\)' "
r"does not match opening parenthesis '\['",
["f'{a[4)}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\]' "
r"does not match opening parenthesis '\('",
["f'{a(4]}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\['",
["f'{a[4}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{a(4}'",
])
self.assertRaises(SyntaxError, eval, "f'{" + "("*500 + "}'")
def test_double_braces(self):
self.assertEqual(f'{{', '{')
self.assertEqual(f'a{{', 'a{')
self.assertEqual(f'{{b', '{b')
self.assertEqual(f'a{{b', 'a{b')
self.assertEqual(f'}}', '}')
self.assertEqual(f'a}}', 'a}')
self.assertEqual(f'}}b', '}b')
self.assertEqual(f'a}}b', 'a}b')
self.assertEqual(f'{{}}', '{}')
self.assertEqual(f'a{{}}', 'a{}')
self.assertEqual(f'{{b}}', '{b}')
self.assertEqual(f'{{}}c', '{}c')
self.assertEqual(f'a{{b}}', 'a{b}')
self.assertEqual(f'a{{}}c', 'a{}c')
self.assertEqual(f'{{b}}c', '{b}c')
self.assertEqual(f'a{{b}}c', 'a{b}c')
self.assertEqual(f'{{{10}', '{10')
self.assertEqual(f'}}{10}', '}10')
self.assertEqual(f'}}{{{10}', '}{10')
self.assertEqual(f'}}a{{{10}', '}a{10')
self.assertEqual(f'{10}{{', '10{')
self.assertEqual(f'{10}}}', '10}')
self.assertEqual(f'{10}}}{{', '10}{')
self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
self.assertEqual(f'{"{{}}"}', '{{}}')
self.assertAllRaise(TypeError, 'unhashable type',
["f'{ {{}} }'", # dict in a set
])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual('abc' f'defghi')
self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{x' f'{x}', '{xdef')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{{x}}' f'{x}', '{{x}}def')
self.assertEqual('{{x' f'{x}', '{{xdef')
self.assertEqual('x}}' f'{x}', 'x}}def')
self.assertEqual(f'{x}' 'x}}', 'defx}}')
self.assertEqual(f'{x}' '', 'def')
self.assertEqual('' f'{x}' '', 'def')
self.assertEqual('' f'{x}', 'def')
self.assertEqual(f'{x}' '2', 'def2')
self.assertEqual('1' f'{x}' '2', '1def2')
self.assertEqual('1' f'{x}', '1def')
self.assertEqual(f'{x}' f'-{x}', 'def-def')
self.assertEqual('' f'', '')
self.assertEqual('' f'' '', '')
self.assertEqual('' f'' '' f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'' '', '')
self.assertEqual(f'' '' f'', '')
self.assertEqual(f'' '' f'' '', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3' f'}'", # can't concat to get a valid f-string
])
def test_comments(self):
d = {'#': 'hash'}
self.assertEqual(f'{"#"}', '#')
self.assertEqual(f'{d["#"]}', 'hash')
self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
["f'{1#}'",
"f'{3(#)}'",
"f'{#}'",
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{)#}'",
])
def test_many_expressions(self):
# Create a string with many expressions in it. Note that
# because we have a space in here as a literal, we're actually
def build_fstr(n, extra=''):
return "f'" + ('{x} ' * n) + extra + "'"
x = 'X'
width = 1
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x+' ')*254)
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35')
self.assertEqual(f'{10:#{1}0x}', ' 0xa')
self.assertEqual(f'{10:{"#"}1{0}{"x"}}', ' 0xa')
self.assertEqual(f'{-10:-{"#"}1{0}x}', ' -0xa')
self.assertEqual(f'{-10:{"-"}#{1}0{"x"}}', ' -0xa')
self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["""f'{"s"!r{":10"}}'""",
])
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, "f-string: invalid syntax",
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
[
"f'{4:{/5}}'",
])
self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
[
"f'result: {value:{width:{0}}.{precision:1}}'",
])
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
[# No expansion inside conversion or for
# the : or ! itself.
"""f'{"s"!{"r"}}'""",
])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
["f'{}'",
"f'{ }'"
"f' {} '",
"f'{!r}'",
"f'{ !r}'",
"f'{10:{ }}'",
"f' { } '",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"f'''{\t\f\r\n}'''",
# Catch the empty expression before the
# invalid conversion.
"f'{!x}'",
"f'{ !xr}'",
"f'{!x:}'",
"f'{!x:a}'",
"f'{ !xr:}'",
"f'{ !xr:a}'",
"f'{!}'",
"f'{:}'",
# We find the empty expression before the
# missing closing brace.
"f'{!'",
"f'{!s:'",
"f'{:'",
"f'{:x'",
])
# Different error message is raised for other whitespace characters.
self.assertAllRaise(SyntaxError, r"invalid non-printable character U\+00A0",
["f'''{\xa0}'''",
"\xa0",
])
def test_parens_in_expressions(self):
self.assertEqual(f'{3,}', '(3,)')
# Add these because when an expression is evaluated, parens
# are added around it. But we shouldn't go from an invalid
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
["f'{,}'",
"f'{,}'",
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{3)+(4}'",
])
self.assertAllRaise(SyntaxError, 'EOL while scanning string literal',
["f'{\n}'",
])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual(r'\t', '\\t')
self.assertEqual(rf'\t', '\\t')
self.assertEqual(f'{2}\t', '2\t')
self.assertEqual(f'{2}\t{3}', '2\t3')
self.assertEqual(f'\t{3}', '\t3')
self.assertEqual(f'\u0394', '\u0394')
self.assertEqual(r'\u0394', '\\u0394')
self.assertEqual(rf'\u0394', '\\u0394')
self.assertEqual(f'{2}\u0394', '2\u0394')
self.assertEqual(f'{2}\u0394{3}', '2\u03943')
self.assertEqual(f'\u0394{3}', '\u03943')
self.assertEqual(f'\U00000394', '\u0394')
self.assertEqual(r'\U00000394', '\\U00000394')
self.assertEqual(rf'\U00000394', '\\U00000394')
self.assertEqual(f'{2}\U00000394', '2\u0394')
self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
self.assertEqual(f'\U00000394{3}', '\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
self.assertEqual(f'\x20', ' ')
self.assertEqual(r'\x20', '\\x20')
self.assertEqual(rf'\x20', '\\x20')
self.assertEqual(f'{2}\x20', '2 ')
self.assertEqual(f'{2}\x20{3}', '2 3')
self.assertEqual(f'\x20{3}', ' 3')
self.assertEqual(f'2\x20', '2 ')
self.assertEqual(f'2\x203', '2 3')
self.assertEqual(f'\x203', ' 3')
with self.assertWarns(DeprecationWarning):
value = eval(r"f'\{6*7}'")
self.assertEqual(value, '\\42')
self.assertEqual(f'\\{6*7}', '\\42')
self.assertEqual(fr'\{6*7}', '\\42')
AMPERSAND = 'spam'
self.assertEqual(f'\N{AMPERSAND}', '&')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(fr'\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\\N{AMPERSAND}', '\\&')
def test_misformed_unicode_character_name(self):
self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
[r"f'\N'",
r"f'\N{'",
r"f'\N{GREEK CAPITAL LETTER DELTA'",
# Here are the non-f-string versions,
# which should give the same errors.
r"'\N'",
r"'\N{'",
r"'\N{GREEK CAPITAL LETTER DELTA'",
])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
[r"f'{\'a\'}'",
r"f'{\t3}'",
r"f'{\}'",
r"rf'{\'a\'}'",
r"rf'{\t3}'",
r"rf'{\}'",
r"""rf'{"\N{LEFT CURLY BRACKET}"}'""",
r"f'{\n}'",
])
def test_no_escapes_for_braces(self):
"""
Only literal curly braces begin an expression.
"""
# \x7b is '{'.
self.assertEqual(f'\x7b1+1}}', '{1+1}')
self.assertEqual(f'\x7b1+1', '{1+1')
self.assertEqual(f'\u007b1+1', '{1+1')
self.assertEqual(f'\N{LEFT CURLY BRACKET}1+1\N{RIGHT CURLY BRACKET}', '{1+1}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{0}', '0')
self.assertEqual(rf'''{3+
4}''', '7')
def test_lambda(self):
x = 5
self.assertEqual(f'{(lambda y:x*y)("8")!r}', "'88888'")
self.assertEqual(f'{(lambda y:x*y)("8")!r:10}', "'88888' ")
self.assertEqual(f'{(lambda y:x*y)("8"):10}', "88888 ")
# lambda doesn't work without parens, because the colon
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
["f'{lambda x:x}'",
])
def test_yield(self):
# Not terribly useful, but make sure the yield turns
# a function into a generator
def fn(y):
f'y:{yield y*2}'
f'{yield}'
g = fn(4)
self.assertEqual(next(g), 8)
self.assertEqual(next(g), None)
def test_yield_send(self):
def fn(x):
yield f'x:{yield (lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{'''x'''}", 'x')
self.assertEqual(f"{'''eric's'''}", "eric's")
# Test concatenation within an expression
self.assertEqual(f'{"x" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"x" """eric"s"""}', 'xeric"s')
self.assertEqual(f'{"""eric"s""" "y"}', 'eric"sy')
self.assertEqual(f'{"""x""" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"""x""" """eric"s""" """y"""}', 'xeric"sy')
self.assertEqual(f'{r"""x""" """eric"s""" """y"""}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={x*y:{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f'v:{value}'
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{3:}', '3')
self.assertEqual(f'{3!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f'{f"{0}"*3}', '000')
self.assertEqual(f'{f"{y}"*3}', '555')
def test_invalid_string_prefixes(self):
single_quote_cases = ["fu''",
"uf''",
"Fu''",
"fU''",
"Uf''",
"uF''",
"ufr''",
"urf''",
"fur''",
"fru''",
"rfu''",
"ruf''",
"FUR''",
"Fur''",
"fb''",
"fB''",
"Fb''",
"FB''",
"bf''",
"bF''",
"Bf''",
"BF''",]
double_quote_cases = [case.replace("'", '"') for case in single_quote_cases]
error_msg = (
'invalid syntax'
if use_old_parser()
else 'unexpected EOF while parsing'
)
self.assertAllRaise(SyntaxError, error_msg,
single_quote_cases + double_quote_cases)
def test_leading_trailing_spaces(self):
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
'expr={1: 2}')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
'expr={1: 2}')
def test_not_equal(self):
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
self.assertEqual(f'{3!=4}', 'True')
self.assertEqual(f'{3!=4:}', 'True')
self.assertEqual(f'{3!=4!s}', 'True')
self.assertEqual(f'{3!=4!s:.3}', 'Tru')
def test_equal_equal(self):
# Because an expression ending in = has special meaning,
# there's a special test for ==. Make sure it works.
self.assertEqual(f'{0==1}', 'False')
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
self.assertEqual(f'{"a"}', 'a')
self.assertEqual(f'{"a"!r}', "'a'")
self.assertEqual(f'{"a"!a}', "'a'")
# Not a conversion.
self.assertEqual(f'{"a!r"}', "a!r")
# Not a conversion, but show that ! is allowed in a format spec.
self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
["f'{3!g}'",
"f'{3!A}'",
"f'{3!3}'",
"f'{3!G}'",
"f'{3!!}'",
"f'{3!:}'",
"f'{3! s}'", # no space before conversion char
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{x!s{y}}'",
"f'{3!ss}'",
"f'{3!ss:}'",
"f'{3!ss:s}'",
])
def test_assignment(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'' = 3",
"f'{0}' = x",
"f'{x}' = x",
])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["del f''",
"del '' f''",
])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
["f'{{}'",
"f'{{}}}'",
"f'}'",
"f'x}'",
"f'x}x'",
r"f'\u007b}'",
# Can't have { or } in a format spec.
"f'{3:}>10}'",
"f'{3:}}>10}'",
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3:{{>10}'",
"f'{3'",
"f'{3!'",
"f'{3:'",
"f'{3!s'",
"f'{3!s:'",
"f'{3!s:3'",
"f'x{'",
"f'x{x'",
"f'{x'",
"f'{3:s'",
"f'{{{'",
"f'{{}}{'",
"f'{'",
])
# But these are just normal strings.
self.assertEqual(f'{"{"}', '{')
self.assertEqual(f'{"}"}', '}')
self.assertEqual(f'{3:{"}"}>10}', '}}}}}}}}}3')
self.assertEqual(f'{2:{"{"}>10}', '{{{{{{{{{2')
def test_if_conditional(self):
# There's special logic in compile.c to test if the
# conditional for an if (and while) are constants. Exercise
# that code.
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if '' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if ' ' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string',
0: 'integer',
}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f'{d["a"]}', 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_errors(self):
# see issue 26287
self.assertAllRaise(TypeError, 'unsupported',
[r"f'{(lambda: 0):x}'",
r"f'{(0,):x}'",
])
self.assertAllRaise(ValueError, 'Unknown format code',
[r"f'{1000:j}'",
r"f'{1000:j}'",
])
<<<<<<< HEAD
=======
@unittest.skipIf(use_old_parser(), "The old parser only supports <fstring> as the filename")
>>>>>>> 3.9
def test_filename_in_syntaxerror(self):
# see issue 38964
with temp_cwd() as cwd:
file_path = os.path.join(cwd, 't.py')
with open(file_path, 'w') as f:
f.write('f"{a b}"') # This generates a SyntaxError
_, _, stderr = assert_python_failure(file_path,
PYTHONIOENCODING='ascii')
self.assertIn(file_path.encode('ascii', 'backslashreplace'), stderr)
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote',
"'": 'squote',
'foo': 'bar',
}
self.assertEqual(f'''{d["'"]}''', 'squote')
self.assertEqual(f"""{d['"']}""", 'dquote')
self.assertEqual(f'{d["foo"]}', 'bar')
self.assertEqual(f"{d['foo']}", 'bar')
def test_backslash_char(self):
# Check eval of a backslash followed by a control char.
# See bpo-30682: this used to raise an assert in pydebug mode.
self.assertEqual(eval('f"\\\n"'), '')
self.assertEqual(eval('f"\\\r"'), '')
def test_debug_conversion(self):
x = 'A string'
self.assertEqual(f'{x=}', 'x=' + repr(x))
self.assertEqual(f'{x =}', 'x =' + repr(x))
self.assertEqual(f'{x=!s}', 'x=' + str(x))
self.assertEqual(f'{x=!r}', 'x=' + repr(x))
self.assertEqual(f'{x=!a}', 'x=' + ascii(x))
x = 2.71828
self.assertEqual(f'{x=:.2f}', 'x=' + format(x, '.2f'))
self.assertEqual(f'{x=:}', 'x=' + format(x, ''))
self.assertEqual(f'{x=!r:^20}', 'x=' + format(repr(x), '^20'))
self.assertEqual(f'{x=!s:^20}', 'x=' + format(str(x), '^20'))
self.assertEqual(f'{x=!a:^20}', 'x=' + format(ascii(x), '^20'))
x = 9
self.assertEqual(f'{3*x+15=}', '3*x+15=42')
# There is code in ast.c that deals with non-ascii expression values. So,
# use a unicode identifier to trigger that.
tenπ = 31.4
self.assertEqual(f'{tenπ=:.2f}', 'tenπ=31.40')
# Also test with Unicode in non-identifiers.
self.assertEqual(f'{"Σ"=}', '"Σ"=\'Σ\'')
# Make sure nested fstrings still work.
self.assertEqual(f'{f"{3.1415=:.1f}":*^20}', '*****3.1415=3.1*****')
# Make sure text before and after an expression with = works
# correctly.
pi = 'π'
self.assertEqual(f'alpha α {pi=} ω omega', "alpha α pi='π' ω omega")
# Check multi-line expressions.
self.assertEqual(f'''{
3
=}''', '\n3\n=3')
# Since = is handled specially, make sure all existing uses of
# it still work.
self.assertEqual(f'{0==1}', 'False')
self.assertEqual(f'{0!=1}', 'True')
self.assertEqual(f'{0<=1}', 'True')
self.assertEqual(f'{0>=1}', 'False')
self.assertEqual(f'{(x:="5")}', '5')
self.assertEqual(x, '5')
self.assertEqual(f'{(x:=5)}', '5')
self.assertEqual(x, 5)
self.assertEqual(f'{"="}', '=')
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'. See test_walrus: you need to use parens.
self.assertEqual(f'{x:=10}', ' 20')
# Test named function parameters, to make sure '=' parsing works
# there.
def f(a):
nonlocal x
oldx = x
x = a
return oldx
x = 0
self.assertEqual(f'{f(a="3=")}', '0')
self.assertEqual(x, '3=')
self.assertEqual(f'{f(a=4)}', '3=')
self.assertEqual(x, 4)
# Make sure __format__ is being called.
class C:
def __format__(self, s):
return f'FORMAT-{s}'
def __repr__(self):
return 'REPR'
self.assertEqual(f'{C()=}', 'C()=REPR')
self.assertEqual(f'{C()=!r}', 'C()=REPR')
self.assertEqual(f'{C()=:}', 'C()=FORMAT-')
self.assertEqual(f'{C()=: }', 'C()=FORMAT- ')
self.assertEqual(f'{C()=:x}', 'C()=FORMAT-x')
self.assertEqual(f'{C()=!r:*^20}', 'C()=********REPR********')
self.assertRaises(SyntaxError, eval, "f'{C=]'")
# Make sure leading and following text works.
x = 'foo'
self.assertEqual(f'X{x=}Y', 'Xx='+repr(x)+'Y')
# Make sure whitespace around the = works.
self.assertEqual(f'X{x =}Y', 'Xx ='+repr(x)+'Y')
self.assertEqual(f'X{x= }Y', 'Xx= '+repr(x)+'Y')
self.assertEqual(f'X{x = }Y', 'Xx = '+repr(x)+'Y')
# These next lines contains tabs. Backslash escapes don't
# work in f-strings.
# patchcheck doesn't like these tabs. So the only way to test
# this will be to dynamically created and exec the f-strings. But
# that's such a hassle I'll save it for another day. For now, convert
# the tabs to spaces just to shut up patchcheck.
#self.assertEqual(f'X{x =}Y', 'Xx\t='+repr(x)+'Y')
#self.assertEqual(f'X{x = }Y', 'Xx\t=\t'+repr(x)+'Y')
def test_walrus(self):
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'.
self.assertEqual(f'{x:=10}', ' 20')
# This is an assignment expression, which requires parens.
self.assertEqual(f'{(x:=10)}', '10')
self.assertEqual(x, 10)
def test_invalid_syntax_error_message(self):
<<<<<<< HEAD
with self.assertRaisesRegex(SyntaxError, "f-string: invalid syntax"):
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
with self.assertRaisesRegex(SyntaxError, err_msg):
>>>>>>> 3.9
compile("f'{a $ b}'", "?", "exec")
def test_with_two_commas_in_format_specifier(self):
error_msg = re.escape("Cannot specify ',' with ','.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,,}'
def test_with_two_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify '_' with '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:__}'
def test_with_a_commas_and_an_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,_}'
def test_with_an_underscore_and_a_comma_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:_,}'
if __name__ == '__main__':
unittest.main()
| false | true |
f7f5d90f582a08243e52ff2ab98c3b6b941e7eaf | 1,387 | py | Python | Embeddings-preparation/word2vec/word2vec.py | danovia/Hebrew-punctuator2 | a3a4410eb951a7aa08da779487c928fcba6ee589 | [
"MIT"
] | 2 | 2018-09-11T18:26:48.000Z | 2018-09-16T06:15:15.000Z | Embeddings-preparation/word2vec/word2vec.py | danovia/Hebrew-punctuator2 | a3a4410eb951a7aa08da779487c928fcba6ee589 | [
"MIT"
] | null | null | null | Embeddings-preparation/word2vec/word2vec.py | danovia/Hebrew-punctuator2 | a3a4410eb951a7aa08da779487c928fcba6ee589 | [
"MIT"
] | null | null | null | import multiprocessing
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import time
def train():
# Model 1 - create morpheme-embeddings
start = time.time()
print(start)
inp1 = "../wiki-he-morph-FULL.txt"
out_model1 = "./wiki.he-morph.window10.word2vec.skipgram-model"
model1 = Word2Vec(LineSentence(inp1), sg = 1, # 0=CBOW , 1= SkipGram
size=100, window=10, min_count=5, workers=multiprocessing.cpu_count())
# trim unneeded model memory = use (much) less RAM
model1.init_sims(replace=True)
print(time.time()-start)
model1.save(out_model1)
model1.wv.save_word2vec_format(out_model1+'.vec', binary=False)
# Model 2 - create word-embeddings
start = time.time()
inp2 = "../wiki.he.text"
out_model2 = "./wiki.he-regular.window5.word2vec.skipgram-model"
model2 = Word2Vec(LineSentence(inp2), sg = 1, # 0=CBOW , 1= SkipGram
size=100, window=5, min_count=5, workers=multiprocessing.cpu_count())
# trim unneeded model memory = use (much) less RAM
model2.init_sims(replace=True)
print(time.time()-start)
model2.save(out_model2)
model2.wv.save_word2vec_format(out_model2+'.vec', binary=False)
def getModel(model = "wiki.he.word2vec.model"):
model = Word2Vec.load(model)
return model
if __name__ == '__main__':
train()
| 33.02381 | 91 | 0.679164 | import multiprocessing
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import time
def train():
start = time.time()
print(start)
inp1 = "../wiki-he-morph-FULL.txt"
out_model1 = "./wiki.he-morph.window10.word2vec.skipgram-model"
model1 = Word2Vec(LineSentence(inp1), sg = 1,
size=100, window=10, min_count=5, workers=multiprocessing.cpu_count())
model1.init_sims(replace=True)
print(time.time()-start)
model1.save(out_model1)
model1.wv.save_word2vec_format(out_model1+'.vec', binary=False)
start = time.time()
inp2 = "../wiki.he.text"
out_model2 = "./wiki.he-regular.window5.word2vec.skipgram-model"
model2 = Word2Vec(LineSentence(inp2), sg = 1,
size=100, window=5, min_count=5, workers=multiprocessing.cpu_count())
model2.init_sims(replace=True)
print(time.time()-start)
model2.save(out_model2)
model2.wv.save_word2vec_format(out_model2+'.vec', binary=False)
def getModel(model = "wiki.he.word2vec.model"):
model = Word2Vec.load(model)
return model
if __name__ == '__main__':
train()
| true | true |
f7f5d93d19378a819de943e05dc70bc0bc8aa570 | 19,164 | py | Python | src/twisted/conch/recvline.py | KentShikama/twisted | 65c933d19ab52175c1d7823bf5e054a266bfdc55 | [
"MIT",
"Unlicense"
] | 1 | 2022-01-14T05:50:29.000Z | 2022-01-14T05:50:29.000Z | src/twisted/conch/recvline.py | KentShikama/twisted | 65c933d19ab52175c1d7823bf5e054a266bfdc55 | [
"MIT",
"Unlicense"
] | 3 | 2015-08-20T09:05:57.000Z | 2015-11-10T08:30:51.000Z | src/twisted/conch/recvline.py | KentShikama/twisted | 65c933d19ab52175c1d7823bf5e054a266bfdc55 | [
"MIT",
"Unlicense"
] | 3 | 2021-08-21T04:09:17.000Z | 2021-08-25T01:00:41.000Z | # -*- test-case-name: twisted.conch.test.test_recvline -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Basic line editing support.
@author: Jp Calderone
"""
import string
from typing import Dict
from zope.interface import implementer
from twisted.conch.insults import insults, helper
from twisted.python import reflect
from twisted.python.compat import iterbytes
from twisted.logger import Logger
_counters: Dict[str, int] = {}
class Logging:
"""
Wrapper which logs attribute lookups.
This was useful in debugging something, I guess. I forget what.
It can probably be deleted or moved somewhere more appropriate.
Nothing special going on here, really.
"""
def __init__(self, original):
self.original = original
key = reflect.qual(original.__class__)
count = _counters.get(key, 0)
_counters[key] = count + 1
self._logFile = open(key + "-" + str(count), "w")
def __str__(self) -> str:
return str(super().__getattribute__("original"))
def __repr__(self) -> str:
return repr(super().__getattribute__("original"))
def __getattribute__(self, name):
original = super().__getattribute__("original")
logFile = super().__getattribute__("_logFile")
logFile.write(name + "\n")
return getattr(original, name)
@implementer(insults.ITerminalTransport)
class TransportSequence:
"""
An L{ITerminalTransport} implementation which forwards calls to
one or more other L{ITerminalTransport}s.
This is a cheap way for servers to keep track of the state they
expect the client to see, since all terminal manipulations can be
send to the real client and to a terminal emulator that lives in
the server process.
"""
for keyID in (
b"UP_ARROW",
b"DOWN_ARROW",
b"RIGHT_ARROW",
b"LEFT_ARROW",
b"HOME",
b"INSERT",
b"DELETE",
b"END",
b"PGUP",
b"PGDN",
b"F1",
b"F2",
b"F3",
b"F4",
b"F5",
b"F6",
b"F7",
b"F8",
b"F9",
b"F10",
b"F11",
b"F12",
):
execBytes = keyID + b" = object()"
execStr = execBytes.decode("ascii")
exec(execStr)
TAB = b"\t"
BACKSPACE = b"\x7f"
def __init__(self, *transports):
assert transports, "Cannot construct a TransportSequence with no transports"
self.transports = transports
for method in insults.ITerminalTransport:
exec(
"""\
def %s(self, *a, **kw):
for tpt in self.transports:
result = tpt.%s(*a, **kw)
return result
"""
% (method, method)
)
def getHost(self):
# ITransport.getHost
raise NotImplementedError("Unimplemented: TransportSequence.getHost")
def getPeer(self):
# ITransport.getPeer
raise NotImplementedError("Unimplemented: TransportSequence.getPeer")
def loseConnection(self):
# ITransport.loseConnection
raise NotImplementedError("Unimplemented: TransportSequence.loseConnection")
def write(self, data):
# ITransport.write
raise NotImplementedError("Unimplemented: TransportSequence.write")
def writeSequence(self, data):
# ITransport.writeSequence
raise NotImplementedError("Unimplemented: TransportSequence.writeSequence")
def cursorUp(self, n=1):
# ITerminalTransport.cursorUp
raise NotImplementedError("Unimplemented: TransportSequence.cursorUp")
def cursorDown(self, n=1):
# ITerminalTransport.cursorDown
raise NotImplementedError("Unimplemented: TransportSequence.cursorDown")
def cursorForward(self, n=1):
# ITerminalTransport.cursorForward
raise NotImplementedError("Unimplemented: TransportSequence.cursorForward")
def cursorBackward(self, n=1):
# ITerminalTransport.cursorBackward
raise NotImplementedError("Unimplemented: TransportSequence.cursorBackward")
def cursorPosition(self, column, line):
# ITerminalTransport.cursorPosition
raise NotImplementedError("Unimplemented: TransportSequence.cursorPosition")
def cursorHome(self):
# ITerminalTransport.cursorHome
raise NotImplementedError("Unimplemented: TransportSequence.cursorHome")
def index(self):
# ITerminalTransport.index
raise NotImplementedError("Unimplemented: TransportSequence.index")
def reverseIndex(self):
# ITerminalTransport.reverseIndex
raise NotImplementedError("Unimplemented: TransportSequence.reverseIndex")
def nextLine(self):
# ITerminalTransport.nextLine
raise NotImplementedError("Unimplemented: TransportSequence.nextLine")
def saveCursor(self):
# ITerminalTransport.saveCursor
raise NotImplementedError("Unimplemented: TransportSequence.saveCursor")
def restoreCursor(self):
# ITerminalTransport.restoreCursor
raise NotImplementedError("Unimplemented: TransportSequence.restoreCursor")
def setModes(self, modes):
# ITerminalTransport.setModes
raise NotImplementedError("Unimplemented: TransportSequence.setModes")
def resetModes(self, mode):
# ITerminalTransport.resetModes
raise NotImplementedError("Unimplemented: TransportSequence.resetModes")
def setPrivateModes(self, modes):
# ITerminalTransport.setPrivateModes
raise NotImplementedError("Unimplemented: TransportSequence.setPrivateModes")
def resetPrivateModes(self, modes):
# ITerminalTransport.resetPrivateModes
raise NotImplementedError("Unimplemented: TransportSequence.resetPrivateModes")
def applicationKeypadMode(self):
# ITerminalTransport.applicationKeypadMode
raise NotImplementedError(
"Unimplemented: TransportSequence.applicationKeypadMode"
)
def numericKeypadMode(self):
# ITerminalTransport.numericKeypadMode
raise NotImplementedError("Unimplemented: TransportSequence.numericKeypadMode")
def selectCharacterSet(self, charSet, which):
# ITerminalTransport.selectCharacterSet
raise NotImplementedError("Unimplemented: TransportSequence.selectCharacterSet")
def shiftIn(self):
# ITerminalTransport.shiftIn
raise NotImplementedError("Unimplemented: TransportSequence.shiftIn")
def shiftOut(self):
# ITerminalTransport.shiftOut
raise NotImplementedError("Unimplemented: TransportSequence.shiftOut")
def singleShift2(self):
# ITerminalTransport.singleShift2
raise NotImplementedError("Unimplemented: TransportSequence.singleShift2")
def singleShift3(self):
# ITerminalTransport.singleShift3
raise NotImplementedError("Unimplemented: TransportSequence.singleShift3")
def selectGraphicRendition(self, *attributes):
# ITerminalTransport.selectGraphicRendition
raise NotImplementedError(
"Unimplemented: TransportSequence.selectGraphicRendition"
)
def horizontalTabulationSet(self):
# ITerminalTransport.horizontalTabulationSet
raise NotImplementedError(
"Unimplemented: TransportSequence.horizontalTabulationSet"
)
def tabulationClear(self):
# ITerminalTransport.tabulationClear
raise NotImplementedError("Unimplemented: TransportSequence.tabulationClear")
def tabulationClearAll(self):
# ITerminalTransport.tabulationClearAll
raise NotImplementedError("Unimplemented: TransportSequence.tabulationClearAll")
def doubleHeightLine(self, top=True):
# ITerminalTransport.doubleHeightLine
raise NotImplementedError("Unimplemented: TransportSequence.doubleHeightLine")
def singleWidthLine(self):
# ITerminalTransport.singleWidthLine
raise NotImplementedError("Unimplemented: TransportSequence.singleWidthLine")
def doubleWidthLine(self):
# ITerminalTransport.doubleWidthLine
raise NotImplementedError("Unimplemented: TransportSequence.doubleWidthLine")
def eraseToLineEnd(self):
# ITerminalTransport.eraseToLineEnd
raise NotImplementedError("Unimplemented: TransportSequence.eraseToLineEnd")
def eraseToLineBeginning(self):
# ITerminalTransport.eraseToLineBeginning
raise NotImplementedError(
"Unimplemented: TransportSequence.eraseToLineBeginning"
)
def eraseLine(self):
# ITerminalTransport.eraseLine
raise NotImplementedError("Unimplemented: TransportSequence.eraseLine")
def eraseToDisplayEnd(self):
# ITerminalTransport.eraseToDisplayEnd
raise NotImplementedError("Unimplemented: TransportSequence.eraseToDisplayEnd")
def eraseToDisplayBeginning(self):
# ITerminalTransport.eraseToDisplayBeginning
raise NotImplementedError(
"Unimplemented: TransportSequence.eraseToDisplayBeginning"
)
def eraseDisplay(self):
# ITerminalTransport.eraseDisplay
raise NotImplementedError("Unimplemented: TransportSequence.eraseDisplay")
def deleteCharacter(self, n=1):
# ITerminalTransport.deleteCharacter
raise NotImplementedError("Unimplemented: TransportSequence.deleteCharacter")
def insertLine(self, n=1):
# ITerminalTransport.insertLine
raise NotImplementedError("Unimplemented: TransportSequence.insertLine")
def deleteLine(self, n=1):
# ITerminalTransport.deleteLine
raise NotImplementedError("Unimplemented: TransportSequence.deleteLine")
def reportCursorPosition(self):
# ITerminalTransport.reportCursorPosition
raise NotImplementedError(
"Unimplemented: TransportSequence.reportCursorPosition"
)
def reset(self):
# ITerminalTransport.reset
raise NotImplementedError("Unimplemented: TransportSequence.reset")
def unhandledControlSequence(self, seq):
# ITerminalTransport.unhandledControlSequence
raise NotImplementedError(
"Unimplemented: TransportSequence.unhandledControlSequence"
)
class LocalTerminalBufferMixin:
"""
A mixin for RecvLine subclasses which records the state of the terminal.
This is accomplished by performing all L{ITerminalTransport} operations on both
the transport passed to makeConnection and an instance of helper.TerminalBuffer.
@ivar terminalCopy: A L{helper.TerminalBuffer} instance which efforts
will be made to keep up to date with the actual terminal
associated with this protocol instance.
"""
def makeConnection(self, transport):
self.terminalCopy = helper.TerminalBuffer()
self.terminalCopy.connectionMade()
return super().makeConnection(TransportSequence(transport, self.terminalCopy))
def __str__(self) -> str:
return str(self.terminalCopy)
class RecvLine(insults.TerminalProtocol):
"""
L{TerminalProtocol} which adds line editing features.
Clients will be prompted for lines of input with all the usual
features: character echoing, left and right arrow support for
moving the cursor to different areas of the line buffer, backspace
and delete for removing characters, and insert for toggling
between typeover and insert mode. Tabs will be expanded to enough
spaces to move the cursor to the next tabstop (every four
characters by default). Enter causes the line buffer to be
cleared and the line to be passed to the lineReceived() method
which, by default, does nothing. Subclasses are responsible for
redrawing the input prompt (this will probably change).
"""
width = 80
height = 24
TABSTOP = 4
ps = (b">>> ", b"... ")
pn = 0
_printableChars = string.printable.encode("ascii")
_log = Logger()
def connectionMade(self):
# A list containing the characters making up the current line
self.lineBuffer = []
# A zero-based (wtf else?) index into self.lineBuffer.
# Indicates the current cursor position.
self.lineBufferIndex = 0
t = self.terminal
# A map of keyIDs to bound instance methods.
self.keyHandlers = {
t.LEFT_ARROW: self.handle_LEFT,
t.RIGHT_ARROW: self.handle_RIGHT,
t.TAB: self.handle_TAB,
# Both of these should not be necessary, but figuring out
# which is necessary is a huge hassle.
b"\r": self.handle_RETURN,
b"\n": self.handle_RETURN,
t.BACKSPACE: self.handle_BACKSPACE,
t.DELETE: self.handle_DELETE,
t.INSERT: self.handle_INSERT,
t.HOME: self.handle_HOME,
t.END: self.handle_END,
}
self.initializeScreen()
def initializeScreen(self):
# Hmm, state sucks. Oh well.
# For now we will just take over the whole terminal.
self.terminal.reset()
self.terminal.write(self.ps[self.pn])
# XXX Note: I would prefer to default to starting in insert
# mode, however this does not seem to actually work! I do not
# know why. This is probably of interest to implementors
# subclassing RecvLine.
# XXX XXX Note: But the unit tests all expect the initial mode
# to be insert right now. Fuck, there needs to be a way to
# query the current mode or something.
# self.setTypeoverMode()
self.setInsertMode()
def currentLineBuffer(self):
s = b"".join(self.lineBuffer)
return s[: self.lineBufferIndex], s[self.lineBufferIndex :]
def setInsertMode(self):
self.mode = "insert"
self.terminal.setModes([insults.modes.IRM])
def setTypeoverMode(self):
self.mode = "typeover"
self.terminal.resetModes([insults.modes.IRM])
def drawInputLine(self):
"""
Write a line containing the current input prompt and the current line
buffer at the current cursor position.
"""
self.terminal.write(self.ps[self.pn] + b"".join(self.lineBuffer))
def terminalSize(self, width, height):
# XXX - Clear the previous input line, redraw it at the new
# cursor position
self.terminal.eraseDisplay()
self.terminal.cursorHome()
self.width = width
self.height = height
self.drawInputLine()
def unhandledControlSequence(self, seq):
pass
def keystrokeReceived(self, keyID, modifier):
m = self.keyHandlers.get(keyID)
if m is not None:
m()
elif keyID in self._printableChars:
self.characterReceived(keyID, False)
else:
self._log.warn("Received unhandled keyID: {keyID!r}", keyID=keyID)
def characterReceived(self, ch, moreCharactersComing):
if self.mode == "insert":
self.lineBuffer.insert(self.lineBufferIndex, ch)
else:
self.lineBuffer[self.lineBufferIndex : self.lineBufferIndex + 1] = [ch]
self.lineBufferIndex += 1
self.terminal.write(ch)
def handle_TAB(self):
n = self.TABSTOP - (len(self.lineBuffer) % self.TABSTOP)
self.terminal.cursorForward(n)
self.lineBufferIndex += n
self.lineBuffer.extend(iterbytes(b" " * n))
def handle_LEFT(self):
if self.lineBufferIndex > 0:
self.lineBufferIndex -= 1
self.terminal.cursorBackward()
def handle_RIGHT(self):
if self.lineBufferIndex < len(self.lineBuffer):
self.lineBufferIndex += 1
self.terminal.cursorForward()
def handle_HOME(self):
if self.lineBufferIndex:
self.terminal.cursorBackward(self.lineBufferIndex)
self.lineBufferIndex = 0
def handle_END(self):
offset = len(self.lineBuffer) - self.lineBufferIndex
if offset:
self.terminal.cursorForward(offset)
self.lineBufferIndex = len(self.lineBuffer)
def handle_BACKSPACE(self):
if self.lineBufferIndex > 0:
self.lineBufferIndex -= 1
del self.lineBuffer[self.lineBufferIndex]
self.terminal.cursorBackward()
self.terminal.deleteCharacter()
def handle_DELETE(self):
if self.lineBufferIndex < len(self.lineBuffer):
del self.lineBuffer[self.lineBufferIndex]
self.terminal.deleteCharacter()
def handle_RETURN(self):
line = b"".join(self.lineBuffer)
self.lineBuffer = []
self.lineBufferIndex = 0
self.terminal.nextLine()
self.lineReceived(line)
def handle_INSERT(self):
assert self.mode in ("typeover", "insert")
if self.mode == "typeover":
self.setInsertMode()
else:
self.setTypeoverMode()
def lineReceived(self, line):
pass
class HistoricRecvLine(RecvLine):
"""
L{TerminalProtocol} which adds both basic line-editing features and input history.
Everything supported by L{RecvLine} is also supported by this class. In addition, the
up and down arrows traverse the input history. Each received line is automatically
added to the end of the input history.
"""
def connectionMade(self):
RecvLine.connectionMade(self)
self.historyLines = []
self.historyPosition = 0
t = self.terminal
self.keyHandlers.update(
{t.UP_ARROW: self.handle_UP, t.DOWN_ARROW: self.handle_DOWN}
)
def currentHistoryBuffer(self):
b = tuple(self.historyLines)
return b[: self.historyPosition], b[self.historyPosition :]
def _deliverBuffer(self, buf):
if buf:
for ch in iterbytes(buf[:-1]):
self.characterReceived(ch, True)
self.characterReceived(buf[-1:], False)
def handle_UP(self):
if self.lineBuffer and self.historyPosition == len(self.historyLines):
self.historyLines.append(b"".join(self.lineBuffer))
if self.historyPosition > 0:
self.handle_HOME()
self.terminal.eraseToLineEnd()
self.historyPosition -= 1
self.lineBuffer = []
self._deliverBuffer(self.historyLines[self.historyPosition])
def handle_DOWN(self):
if self.historyPosition < len(self.historyLines) - 1:
self.handle_HOME()
self.terminal.eraseToLineEnd()
self.historyPosition += 1
self.lineBuffer = []
self._deliverBuffer(self.historyLines[self.historyPosition])
else:
self.handle_HOME()
self.terminal.eraseToLineEnd()
self.historyPosition = len(self.historyLines)
self.lineBuffer = []
self.lineBufferIndex = 0
def handle_RETURN(self):
if self.lineBuffer:
self.historyLines.append(b"".join(self.lineBuffer))
self.historyPosition = len(self.historyLines)
return RecvLine.handle_RETURN(self)
| 33.562172 | 90 | 0.670685 |
import string
from typing import Dict
from zope.interface import implementer
from twisted.conch.insults import insults, helper
from twisted.python import reflect
from twisted.python.compat import iterbytes
from twisted.logger import Logger
_counters: Dict[str, int] = {}
class Logging:
def __init__(self, original):
self.original = original
key = reflect.qual(original.__class__)
count = _counters.get(key, 0)
_counters[key] = count + 1
self._logFile = open(key + "-" + str(count), "w")
def __str__(self) -> str:
return str(super().__getattribute__("original"))
def __repr__(self) -> str:
return repr(super().__getattribute__("original"))
def __getattribute__(self, name):
original = super().__getattribute__("original")
logFile = super().__getattribute__("_logFile")
logFile.write(name + "\n")
return getattr(original, name)
@implementer(insults.ITerminalTransport)
class TransportSequence:
for keyID in (
b"UP_ARROW",
b"DOWN_ARROW",
b"RIGHT_ARROW",
b"LEFT_ARROW",
b"HOME",
b"INSERT",
b"DELETE",
b"END",
b"PGUP",
b"PGDN",
b"F1",
b"F2",
b"F3",
b"F4",
b"F5",
b"F6",
b"F7",
b"F8",
b"F9",
b"F10",
b"F11",
b"F12",
):
execBytes = keyID + b" = object()"
execStr = execBytes.decode("ascii")
exec(execStr)
TAB = b"\t"
BACKSPACE = b"\x7f"
def __init__(self, *transports):
assert transports, "Cannot construct a TransportSequence with no transports"
self.transports = transports
for method in insults.ITerminalTransport:
exec(
"""\
def %s(self, *a, **kw):
for tpt in self.transports:
result = tpt.%s(*a, **kw)
return result
"""
% (method, method)
)
def getHost(self):
raise NotImplementedError("Unimplemented: TransportSequence.getHost")
def getPeer(self):
raise NotImplementedError("Unimplemented: TransportSequence.getPeer")
def loseConnection(self):
raise NotImplementedError("Unimplemented: TransportSequence.loseConnection")
def write(self, data):
raise NotImplementedError("Unimplemented: TransportSequence.write")
def writeSequence(self, data):
raise NotImplementedError("Unimplemented: TransportSequence.writeSequence")
def cursorUp(self, n=1):
raise NotImplementedError("Unimplemented: TransportSequence.cursorUp")
def cursorDown(self, n=1):
raise NotImplementedError("Unimplemented: TransportSequence.cursorDown")
def cursorForward(self, n=1):
raise NotImplementedError("Unimplemented: TransportSequence.cursorForward")
def cursorBackward(self, n=1):
raise NotImplementedError("Unimplemented: TransportSequence.cursorBackward")
def cursorPosition(self, column, line):
raise NotImplementedError("Unimplemented: TransportSequence.cursorPosition")
def cursorHome(self):
raise NotImplementedError("Unimplemented: TransportSequence.cursorHome")
def index(self):
raise NotImplementedError("Unimplemented: TransportSequence.index")
def reverseIndex(self):
raise NotImplementedError("Unimplemented: TransportSequence.reverseIndex")
def nextLine(self):
raise NotImplementedError("Unimplemented: TransportSequence.nextLine")
def saveCursor(self):
raise NotImplementedError("Unimplemented: TransportSequence.saveCursor")
def restoreCursor(self):
raise NotImplementedError("Unimplemented: TransportSequence.restoreCursor")
def setModes(self, modes):
raise NotImplementedError("Unimplemented: TransportSequence.setModes")
def resetModes(self, mode):
raise NotImplementedError("Unimplemented: TransportSequence.resetModes")
def setPrivateModes(self, modes):
raise NotImplementedError("Unimplemented: TransportSequence.setPrivateModes")
def resetPrivateModes(self, modes):
raise NotImplementedError("Unimplemented: TransportSequence.resetPrivateModes")
def applicationKeypadMode(self):
raise NotImplementedError(
"Unimplemented: TransportSequence.applicationKeypadMode"
)
def numericKeypadMode(self):
raise NotImplementedError("Unimplemented: TransportSequence.numericKeypadMode")
def selectCharacterSet(self, charSet, which):
raise NotImplementedError("Unimplemented: TransportSequence.selectCharacterSet")
def shiftIn(self):
raise NotImplementedError("Unimplemented: TransportSequence.shiftIn")
def shiftOut(self):
raise NotImplementedError("Unimplemented: TransportSequence.shiftOut")
def singleShift2(self):
raise NotImplementedError("Unimplemented: TransportSequence.singleShift2")
def singleShift3(self):
raise NotImplementedError("Unimplemented: TransportSequence.singleShift3")
def selectGraphicRendition(self, *attributes):
raise NotImplementedError(
"Unimplemented: TransportSequence.selectGraphicRendition"
)
def horizontalTabulationSet(self):
raise NotImplementedError(
"Unimplemented: TransportSequence.horizontalTabulationSet"
)
def tabulationClear(self):
raise NotImplementedError("Unimplemented: TransportSequence.tabulationClear")
def tabulationClearAll(self):
raise NotImplementedError("Unimplemented: TransportSequence.tabulationClearAll")
def doubleHeightLine(self, top=True):
raise NotImplementedError("Unimplemented: TransportSequence.doubleHeightLine")
def singleWidthLine(self):
raise NotImplementedError("Unimplemented: TransportSequence.singleWidthLine")
def doubleWidthLine(self):
raise NotImplementedError("Unimplemented: TransportSequence.doubleWidthLine")
def eraseToLineEnd(self):
raise NotImplementedError("Unimplemented: TransportSequence.eraseToLineEnd")
def eraseToLineBeginning(self):
raise NotImplementedError(
"Unimplemented: TransportSequence.eraseToLineBeginning"
)
def eraseLine(self):
raise NotImplementedError("Unimplemented: TransportSequence.eraseLine")
def eraseToDisplayEnd(self):
raise NotImplementedError("Unimplemented: TransportSequence.eraseToDisplayEnd")
def eraseToDisplayBeginning(self):
raise NotImplementedError(
"Unimplemented: TransportSequence.eraseToDisplayBeginning"
)
def eraseDisplay(self):
raise NotImplementedError("Unimplemented: TransportSequence.eraseDisplay")
def deleteCharacter(self, n=1):
raise NotImplementedError("Unimplemented: TransportSequence.deleteCharacter")
def insertLine(self, n=1):
raise NotImplementedError("Unimplemented: TransportSequence.insertLine")
def deleteLine(self, n=1):
raise NotImplementedError("Unimplemented: TransportSequence.deleteLine")
def reportCursorPosition(self):
raise NotImplementedError(
"Unimplemented: TransportSequence.reportCursorPosition"
)
def reset(self):
raise NotImplementedError("Unimplemented: TransportSequence.reset")
def unhandledControlSequence(self, seq):
raise NotImplementedError(
"Unimplemented: TransportSequence.unhandledControlSequence"
)
class LocalTerminalBufferMixin:
def makeConnection(self, transport):
self.terminalCopy = helper.TerminalBuffer()
self.terminalCopy.connectionMade()
return super().makeConnection(TransportSequence(transport, self.terminalCopy))
def __str__(self) -> str:
return str(self.terminalCopy)
class RecvLine(insults.TerminalProtocol):
width = 80
height = 24
TABSTOP = 4
ps = (b">>> ", b"... ")
pn = 0
_printableChars = string.printable.encode("ascii")
_log = Logger()
def connectionMade(self):
self.lineBuffer = []
self.lineBufferIndex = 0
t = self.terminal
self.keyHandlers = {
t.LEFT_ARROW: self.handle_LEFT,
t.RIGHT_ARROW: self.handle_RIGHT,
t.TAB: self.handle_TAB,
b"\r": self.handle_RETURN,
b"\n": self.handle_RETURN,
t.BACKSPACE: self.handle_BACKSPACE,
t.DELETE: self.handle_DELETE,
t.INSERT: self.handle_INSERT,
t.HOME: self.handle_HOME,
t.END: self.handle_END,
}
self.initializeScreen()
def initializeScreen(self):
self.terminal.reset()
self.terminal.write(self.ps[self.pn])
self.setInsertMode()
def currentLineBuffer(self):
s = b"".join(self.lineBuffer)
return s[: self.lineBufferIndex], s[self.lineBufferIndex :]
def setInsertMode(self):
self.mode = "insert"
self.terminal.setModes([insults.modes.IRM])
def setTypeoverMode(self):
self.mode = "typeover"
self.terminal.resetModes([insults.modes.IRM])
def drawInputLine(self):
self.terminal.write(self.ps[self.pn] + b"".join(self.lineBuffer))
def terminalSize(self, width, height):
self.terminal.eraseDisplay()
self.terminal.cursorHome()
self.width = width
self.height = height
self.drawInputLine()
def unhandledControlSequence(self, seq):
pass
def keystrokeReceived(self, keyID, modifier):
m = self.keyHandlers.get(keyID)
if m is not None:
m()
elif keyID in self._printableChars:
self.characterReceived(keyID, False)
else:
self._log.warn("Received unhandled keyID: {keyID!r}", keyID=keyID)
def characterReceived(self, ch, moreCharactersComing):
if self.mode == "insert":
self.lineBuffer.insert(self.lineBufferIndex, ch)
else:
self.lineBuffer[self.lineBufferIndex : self.lineBufferIndex + 1] = [ch]
self.lineBufferIndex += 1
self.terminal.write(ch)
def handle_TAB(self):
n = self.TABSTOP - (len(self.lineBuffer) % self.TABSTOP)
self.terminal.cursorForward(n)
self.lineBufferIndex += n
self.lineBuffer.extend(iterbytes(b" " * n))
def handle_LEFT(self):
if self.lineBufferIndex > 0:
self.lineBufferIndex -= 1
self.terminal.cursorBackward()
def handle_RIGHT(self):
if self.lineBufferIndex < len(self.lineBuffer):
self.lineBufferIndex += 1
self.terminal.cursorForward()
def handle_HOME(self):
if self.lineBufferIndex:
self.terminal.cursorBackward(self.lineBufferIndex)
self.lineBufferIndex = 0
def handle_END(self):
offset = len(self.lineBuffer) - self.lineBufferIndex
if offset:
self.terminal.cursorForward(offset)
self.lineBufferIndex = len(self.lineBuffer)
def handle_BACKSPACE(self):
if self.lineBufferIndex > 0:
self.lineBufferIndex -= 1
del self.lineBuffer[self.lineBufferIndex]
self.terminal.cursorBackward()
self.terminal.deleteCharacter()
def handle_DELETE(self):
if self.lineBufferIndex < len(self.lineBuffer):
del self.lineBuffer[self.lineBufferIndex]
self.terminal.deleteCharacter()
def handle_RETURN(self):
line = b"".join(self.lineBuffer)
self.lineBuffer = []
self.lineBufferIndex = 0
self.terminal.nextLine()
self.lineReceived(line)
def handle_INSERT(self):
assert self.mode in ("typeover", "insert")
if self.mode == "typeover":
self.setInsertMode()
else:
self.setTypeoverMode()
def lineReceived(self, line):
pass
class HistoricRecvLine(RecvLine):
def connectionMade(self):
RecvLine.connectionMade(self)
self.historyLines = []
self.historyPosition = 0
t = self.terminal
self.keyHandlers.update(
{t.UP_ARROW: self.handle_UP, t.DOWN_ARROW: self.handle_DOWN}
)
def currentHistoryBuffer(self):
b = tuple(self.historyLines)
return b[: self.historyPosition], b[self.historyPosition :]
def _deliverBuffer(self, buf):
if buf:
for ch in iterbytes(buf[:-1]):
self.characterReceived(ch, True)
self.characterReceived(buf[-1:], False)
def handle_UP(self):
if self.lineBuffer and self.historyPosition == len(self.historyLines):
self.historyLines.append(b"".join(self.lineBuffer))
if self.historyPosition > 0:
self.handle_HOME()
self.terminal.eraseToLineEnd()
self.historyPosition -= 1
self.lineBuffer = []
self._deliverBuffer(self.historyLines[self.historyPosition])
def handle_DOWN(self):
if self.historyPosition < len(self.historyLines) - 1:
self.handle_HOME()
self.terminal.eraseToLineEnd()
self.historyPosition += 1
self.lineBuffer = []
self._deliverBuffer(self.historyLines[self.historyPosition])
else:
self.handle_HOME()
self.terminal.eraseToLineEnd()
self.historyPosition = len(self.historyLines)
self.lineBuffer = []
self.lineBufferIndex = 0
def handle_RETURN(self):
if self.lineBuffer:
self.historyLines.append(b"".join(self.lineBuffer))
self.historyPosition = len(self.historyLines)
return RecvLine.handle_RETURN(self)
| true | true |
f7f5da2c16432c397b9ad0722a3dfd84365369fa | 405 | py | Python | efficient-eagles/early_internet/early_internet/wsgi.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | efficient-eagles/early_internet/early_internet/wsgi.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | efficient-eagles/early_internet/early_internet/wsgi.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | """
WSGI config for early_internet project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "early_internet.settings")
application = get_wsgi_application()
| 23.823529 | 78 | 0.792593 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "early_internet.settings")
application = get_wsgi_application()
| true | true |
f7f5da4dd988b88fa3806b8d114866fa74d20550 | 52,530 | py | Python | calendarserver/tap/util.py | eventable/CalendarServer | 384444edb1966b530bc391789afbe3fb9cd6fd3e | [
"Apache-2.0"
] | 1 | 2017-02-18T19:22:19.000Z | 2017-02-18T19:22:19.000Z | calendarserver/tap/util.py | eventable/CalendarServer | 384444edb1966b530bc391789afbe3fb9cd6fd3e | [
"Apache-2.0"
] | null | null | null | calendarserver/tap/util.py | eventable/CalendarServer | 384444edb1966b530bc391789afbe3fb9cd6fd3e | [
"Apache-2.0"
] | null | null | null | # -*- test-case-name: calendarserver.tap.test.test_caldav -*-
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Utilities for assembling the service and resource hierarchy.
"""
__all__ = [
"FakeRequest",
"getDBPool",
"getRootResource",
"getSSLPassphrase",
"MemoryLimitService",
"postAlert",
"preFlightChecks",
]
from calendarserver.accesslog import DirectoryLogWrapperResource
from calendarserver.provision.root import RootResource
from calendarserver.push.applepush import APNSubscriptionResource
from calendarserver.push.notifier import NotifierFactory
from calendarserver.push.util import getAPNTopicFromCertificate
from calendarserver.tools import diagnose
from calendarserver.tools.util import checkDirectory
from calendarserver.webadmin.landing import WebAdminLandingResource
from calendarserver.webcal.resource import WebCalendarResource
from socket import fromfd, AF_UNIX, SOCK_STREAM, socketpair
from subprocess import Popen, PIPE
from twext.enterprise.adbapi2 import ConnectionPool, ConnectionPoolConnection
from twext.enterprise.adbapi2 import ConnectionPoolClient
from twext.enterprise.ienterprise import ORACLE_DIALECT
from twext.enterprise.ienterprise import POSTGRES_DIALECT
from twext.internet.ssl import ChainingOpenSSLContextFactory
from twext.python.filepath import CachingFilePath
from twext.python.filepath import CachingFilePath as FilePath
from twext.python.log import Logger
from twext.who.checker import HTTPDigestCredentialChecker
from twext.who.checker import UsernamePasswordCredentialChecker
from twisted.application.service import Service
from twisted.cred.error import UnauthorizedLogin
from twisted.cred.portal import Portal
from twisted.internet import reactor as _reactor
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred, succeed
from twisted.internet.reactor import addSystemEventTrigger
from twisted.internet.tcp import Connection
from twisted.python.usage import UsageError
from twistedcaldav.bind import doBind
from twistedcaldav.cache import CacheStoreNotifierFactory
from twistedcaldav.config import ConfigurationError
from twistedcaldav.controlapi import ControlAPIResource
from twistedcaldav.directory.addressbook import DirectoryAddressBookHomeProvisioningResource
from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
from twistedcaldav.directory.digest import QopDigestCredentialFactory
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
from twistedcaldav.directorybackedaddressbook import DirectoryBackedAddressBookResource
from twistedcaldav.resource import AuthenticationWrapper
from twistedcaldav.simpleresource import SimpleResource, SimpleRedirectResource, \
SimpleUnavailableResource
from twistedcaldav.stdconfig import config
from twistedcaldav.timezones import TimezoneCache
from twistedcaldav.timezoneservice import TimezoneServiceResource
from twistedcaldav.timezonestdservice import TimezoneStdServiceResource
from twistedcaldav.util import getPasswordFromKeychain
from twistedcaldav.util import KeychainAccessError, KeychainPasswordNotFound
from txdav.base.datastore.dbapiclient import DBAPIConnector
from txdav.base.datastore.subpostgres import PostgresService
from txdav.caldav.datastore.scheduling.ischedule.dkim import DKIMUtils, DomainKeyResource
from txdav.caldav.datastore.scheduling.ischedule.localservers import buildServersDB
from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
from txdav.common.datastore.file import CommonDataStore as CommonFileDataStore
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.sql import CommonDataStore as CommonSQLDataStore
from txdav.common.datastore.sql import current_sql_schema
from txdav.common.datastore.upgrade.sql.upgrade import NotAllowedToUpgrade
from txdav.dps.client import DirectoryService as DirectoryProxyClientService
from txdav.who.cache import CachingDirectoryService
from txdav.who.util import directoryFromConfig
from txweb2.auth.basic import BasicCredentialFactory
from txweb2.auth.tls import TLSCredentialsFactory, TLSCredentials
from txweb2.dav import auth
from txweb2.dav.auth import IPrincipalCredentials
from txweb2.dav.util import joinURL
from txweb2.http_headers import Headers
from txweb2.resource import Resource
from txweb2.static import File as FileResource
from urllib import quote
import OpenSSL
import errno
import os
import psutil
import sys
try:
from twistedcaldav.authkerb import NegotiateCredentialFactory
NegotiateCredentialFactory # pacify pyflakes
except ImportError:
NegotiateCredentialFactory = None
log = Logger()
def pgServiceFromConfig(config, subServiceFactory, uid=None, gid=None):
"""
Construct a L{PostgresService} from a given configuration and subservice.
@param config: the configuration to derive postgres configuration
parameters from.
@param subServiceFactory: A factory for the service to start once the
L{PostgresService} has been initialized.
@param uid: The user-ID to run the PostgreSQL server as.
@param gid: The group-ID to run the PostgreSQL server as.
@return: a service which can start postgres.
@rtype: L{PostgresService}
"""
dbRoot = CachingFilePath(config.DatabaseRoot)
# Construct a PostgresService exactly as the parent would, so that we
# can establish connection information.
return PostgresService(
dbRoot, subServiceFactory, current_sql_schema,
databaseName=config.Postgres.DatabaseName,
clusterName=config.Postgres.ClusterName,
logFile=config.Postgres.LogFile,
logDirectory=config.LogRoot if config.Postgres.LogRotation else "",
socketDir=config.Postgres.SocketDirectory,
socketName=config.Postgres.SocketName,
listenAddresses=config.Postgres.ListenAddresses,
sharedBuffers=config.Postgres.SharedBuffers,
maxConnections=config.Postgres.MaxConnections,
options=config.Postgres.Options,
uid=uid, gid=gid,
spawnedDBUser=config.SpawnedDBUser,
pgCtl=config.Postgres.Ctl,
initDB=config.Postgres.Init,
)
class ConnectionWithPeer(Connection):
connected = True
def getPeer(self):
return "<peer: %r %r>" % (self.socket.fileno(), id(self))
def getHost(self):
return "<host: %r %r>" % (self.socket.fileno(), id(self))
def transactionFactoryFromFD(dbampfd, dialect, paramstyle):
"""
Create a transaction factory from an inherited file descriptor, such as one
created by L{ConnectionDispenser}.
"""
skt = fromfd(dbampfd, AF_UNIX, SOCK_STREAM)
os.close(dbampfd)
protocol = ConnectionPoolClient(dialect=dialect, paramstyle=paramstyle)
transport = ConnectionWithPeer(skt, protocol)
protocol.makeConnection(transport)
transport.startReading()
return protocol.newTransaction
class ConnectionDispenser(object):
"""
A L{ConnectionDispenser} can dispense already-connected file descriptors,
for use with subprocess spawning.
"""
# Very long term FIXME: this mechanism should ideally be eliminated, by
# making all subprocesses have a single stdio AMP connection that
# multiplexes between multiple protocols.
def __init__(self, connectionPool):
self.pool = connectionPool
def dispense(self):
"""
Dispense a socket object, already connected to a server, for a client
in a subprocess.
"""
# FIXME: these sockets need to be re-dispensed when the process is
# respawned, and they currently won't be.
c, s = socketpair(AF_UNIX, SOCK_STREAM)
protocol = ConnectionPoolConnection(self.pool)
transport = ConnectionWithPeer(s, protocol)
protocol.makeConnection(transport)
transport.startReading()
return c
def storeFromConfigWithoutDPS(config, txnFactory):
store = storeFromConfig(config, txnFactory, None)
directory = directoryFromConfig(config, store)
# if config.DirectoryProxy.InProcessCachingSeconds:
# directory = CachingDirectoryService(
# directory,
# expireSeconds=config.DirectoryProxy.InProcessCachingSeconds
# )
store.setDirectoryService(directory)
return store
def storeFromConfigWithDPSClient(config, txnFactory):
store = storeFromConfig(config, txnFactory, None)
directory = DirectoryProxyClientService(config.DirectoryRealmName)
if config.Servers.Enabled:
directory.setServersDB(buildServersDB(config.Servers.MaxClients))
if config.DirectoryProxy.InProcessCachingSeconds:
directory = CachingDirectoryService(
directory,
expireSeconds=config.DirectoryProxy.InProcessCachingSeconds
)
store.setDirectoryService(directory)
return store
def storeFromConfigWithDPSServer(config, txnFactory):
store = storeFromConfig(config, txnFactory, None)
directory = directoryFromConfig(config, store)
# if config.DirectoryProxy.InSidecarCachingSeconds:
# directory = CachingDirectoryService(
# directory,
# expireSeconds=config.DirectoryProxy.InSidecarCachingSeconds
# )
store.setDirectoryService(directory)
return store
def storeFromConfig(config, txnFactory, directoryService):
"""
Produce an L{IDataStore} from the given configuration, transaction factory,
and notifier factory.
If the transaction factory is C{None}, we will create a filesystem
store. Otherwise, a SQL store, using that connection information.
"""
#
# Configure NotifierFactory
#
notifierFactories = {}
if config.Notifications.Enabled:
notifierFactories["push"] = NotifierFactory(config.ServerHostName, config.Notifications.CoalesceSeconds)
if config.EnableResponseCache and config.Memcached.Pools.Default.ClientEnabled:
notifierFactories["cache"] = CacheStoreNotifierFactory()
quota = config.UserQuota
if quota == 0:
quota = None
if txnFactory is not None:
if config.EnableSSL:
uri = "https://{config.ServerHostName}:{config.SSLPort}".format(config=config)
else:
uri = "https://{config.ServerHostName}:{config.HTTPPort}".format(config=config)
attachments_uri = uri + "/calendars/__uids__/%(home)s/dropbox/%(dropbox_id)s/%(name)s"
store = CommonSQLDataStore(
txnFactory, notifierFactories,
directoryService,
FilePath(config.AttachmentsRoot), attachments_uri,
config.EnableCalDAV, config.EnableCardDAV,
config.EnableManagedAttachments,
quota=quota,
logLabels=config.LogDatabase.LabelsInSQL,
logStats=config.LogDatabase.Statistics,
logStatsLogFile=config.LogDatabase.StatisticsLogFile,
logSQL=config.LogDatabase.SQLStatements,
logTransactionWaits=config.LogDatabase.TransactionWaitSeconds,
timeoutTransactions=config.TransactionTimeoutSeconds,
cacheQueries=config.QueryCaching.Enabled,
cachePool=config.QueryCaching.MemcachedPool,
cacheExpireSeconds=config.QueryCaching.ExpireSeconds
)
else:
store = CommonFileDataStore(
FilePath(config.DocumentRoot),
notifierFactories, directoryService,
config.EnableCalDAV, config.EnableCardDAV,
quota=quota
)
# FIXME: NotifierFactories need a reference to the store in order
# to get a txn in order to possibly create a Work item
for notifierFactory in notifierFactories.values():
notifierFactory.store = store
return store
# MOVE2WHO -- should we move this class somewhere else?
class PrincipalCredentialChecker(object):
credentialInterfaces = (IPrincipalCredentials,)
@inlineCallbacks
def requestAvatarId(self, credentials):
credentials = IPrincipalCredentials(credentials)
if credentials.authnPrincipal is None:
raise UnauthorizedLogin(
"No such user: {user}".format(
user=credentials.credentials.username
)
)
# See if record is enabledForLogin
if not credentials.authnPrincipal.record.isLoginEnabled():
raise UnauthorizedLogin(
"User not allowed to log in: {user}".format(
user=credentials.credentials.username
)
)
# Handle Kerberos as a separate behavior
try:
from twistedcaldav.authkerb import NegotiateCredentials
except ImportError:
NegotiateCredentials = None
if NegotiateCredentials and isinstance(credentials.credentials, NegotiateCredentials):
# If we get here with Kerberos, then authentication has already succeeded
returnValue(
(
credentials.authnPrincipal,
credentials.authzPrincipal,
)
)
# Handle TLS Client Certificate
elif isinstance(credentials.credentials, TLSCredentials):
# If we get here with TLS, then authentication (certificate verification) has already succeeded
returnValue(
(
credentials.authnPrincipal,
credentials.authzPrincipal,
)
)
else:
if (yield credentials.authnPrincipal.record.verifyCredentials(credentials.credentials)):
returnValue(
(
credentials.authnPrincipal,
credentials.authzPrincipal,
)
)
else:
raise UnauthorizedLogin(
"Incorrect credentials for user: {user}".format(
user=credentials.credentials.username
)
)
def getRootResource(config, newStore, resources=None):
"""
Set up directory service and resource hierarchy based on config.
Return root resource.
Additional resources can be added to the hierarchy by passing a list of
tuples containing: path, resource class, __init__ args list, and optional
authentication schemes list ("basic", "digest").
If the store is specified, then it has already been constructed, so use it.
Otherwise build one with L{storeFromConfig}.
"""
if newStore is None:
raise RuntimeError("Internal error, 'newStore' must be specified.")
if resources is None:
resources = []
# FIXME: this is only here to workaround circular imports
doBind()
#
# Default resource classes
#
rootResourceClass = RootResource
calendarResourceClass = DirectoryCalendarHomeProvisioningResource
iScheduleResourceClass = IScheduleInboxResource
conduitResourceClass = ConduitResource
timezoneServiceResourceClass = TimezoneServiceResource
timezoneStdServiceResourceClass = TimezoneStdServiceResource
webCalendarResourceClass = WebCalendarResource
webAdminResourceClass = WebAdminLandingResource
addressBookResourceClass = DirectoryAddressBookHomeProvisioningResource
directoryBackedAddressBookResourceClass = DirectoryBackedAddressBookResource
apnSubscriptionResourceClass = APNSubscriptionResource
principalResourceClass = DirectoryPrincipalProvisioningResource
controlResourceClass = ControlAPIResource
directory = newStore.directoryService()
principalCollection = principalResourceClass("/principals/", directory)
#
# Configure the Site and Wrappers
#
wireEncryptedCredentialFactories = []
wireUnencryptedCredentialFactories = []
portal = Portal(auth.DavRealm())
portal.registerChecker(UsernamePasswordCredentialChecker(directory))
portal.registerChecker(HTTPDigestCredentialChecker(directory))
portal.registerChecker(PrincipalCredentialChecker())
realm = directory.realmName.encode("utf-8") or ""
log.info("Configuring authentication for realm: {realm}", realm=realm)
for scheme, schemeConfig in config.Authentication.iteritems():
scheme = scheme.lower()
credFactory = None
if schemeConfig["Enabled"]:
log.info("Setting up scheme: {scheme}", scheme=scheme)
if scheme == "kerberos":
if not NegotiateCredentialFactory:
log.info("Kerberos support not available")
continue
try:
principal = schemeConfig["ServicePrincipal"]
if not principal:
credFactory = NegotiateCredentialFactory(
serviceType="HTTP",
hostname=config.ServerHostName,
)
else:
credFactory = NegotiateCredentialFactory(
principal=principal,
)
except ValueError:
log.info("Could not start Kerberos")
continue
elif scheme == "digest":
credFactory = QopDigestCredentialFactory(
schemeConfig["Algorithm"],
schemeConfig["Qop"],
realm,
)
elif scheme == "basic":
credFactory = BasicCredentialFactory(realm)
elif scheme == TLSCredentialsFactory.scheme:
credFactory = TLSCredentialsFactory(realm)
elif scheme == "wiki":
pass
else:
log.error("Unknown scheme: {scheme}", scheme=scheme)
if credFactory:
wireEncryptedCredentialFactories.append(credFactory)
if schemeConfig.get("AllowedOverWireUnencrypted", False):
wireUnencryptedCredentialFactories.append(credFactory)
#
# Setup Resource hierarchy
#
log.info("Setting up document root at: {root}", root=config.DocumentRoot)
if config.EnableCalDAV:
log.info("Setting up calendar collection: {cls}", cls=calendarResourceClass)
calendarCollection = calendarResourceClass(
directory,
"/calendars/",
newStore,
)
principalCollection.calendarCollection = calendarCollection
if config.EnableCardDAV:
log.info("Setting up address book collection: {cls}", cls=addressBookResourceClass)
addressBookCollection = addressBookResourceClass(
directory,
"/addressbooks/",
newStore,
)
principalCollection.addressBookCollection = addressBookCollection
if config.DirectoryAddressBook.Enabled and config.EnableSearchAddressBook:
log.info(
"Setting up directory address book: {cls}",
cls=directoryBackedAddressBookResourceClass)
directoryBackedAddressBookCollection = directoryBackedAddressBookResourceClass(
principalCollections=(principalCollection,),
principalDirectory=directory,
uri=joinURL("/", config.DirectoryAddressBook.name, "/")
)
if _reactor._started:
directoryBackedAddressBookCollection.provisionDirectory()
else:
addSystemEventTrigger("after", "startup", directoryBackedAddressBookCollection.provisionDirectory)
else:
# remove /directory from previous runs that may have created it
directoryPath = os.path.join(config.DocumentRoot, config.DirectoryAddressBook.name)
try:
FilePath(directoryPath).remove()
log.info("Deleted: {path}", path=directoryPath)
except (OSError, IOError), e:
if e.errno != errno.ENOENT:
log.error("Could not delete: {path} : {error}", path=directoryPath, error=e)
if config.MigrationOnly:
unavailable = SimpleUnavailableResource((principalCollection,))
else:
unavailable = None
log.info("Setting up root resource: {cls}", cls=rootResourceClass)
root = rootResourceClass(
config.DocumentRoot,
principalCollections=(principalCollection,),
)
root.putChild("principals", principalCollection if unavailable is None else unavailable)
if config.EnableCalDAV:
root.putChild("calendars", calendarCollection if unavailable is None else unavailable)
if config.EnableCardDAV:
root.putChild('addressbooks', addressBookCollection if unavailable is None else unavailable)
if config.DirectoryAddressBook.Enabled and config.EnableSearchAddressBook:
root.putChild(config.DirectoryAddressBook.name, directoryBackedAddressBookCollection if unavailable is None else unavailable)
# /.well-known
if config.EnableWellKnown:
log.info("Setting up .well-known collection resource")
wellKnownResource = SimpleResource(
principalCollections=(principalCollection,),
isdir=True,
defaultACL=SimpleResource.allReadACL
)
root.putChild(".well-known", wellKnownResource)
for enabled, wellknown_name, redirected_to in (
(config.EnableCalDAV, "caldav", "/principals/",),
(config.EnableCardDAV, "carddav", "/principals/",),
(config.TimezoneService.Enabled, "timezone", config.TimezoneService.URI,),
(config.Scheduling.iSchedule.Enabled, "ischedule", "/ischedule"),
):
if enabled:
if config.EnableSSL:
scheme = "https"
port = config.SSLPort
else:
scheme = "http"
port = config.HTTPPort
wellKnownResource.putChild(
wellknown_name,
SimpleRedirectResource(
principalCollections=(principalCollection,),
isdir=False,
defaultACL=SimpleResource.allReadACL,
scheme=scheme, port=port, path=redirected_to)
)
for alias in config.Aliases:
url = alias.get("url", None)
path = alias.get("path", None)
if not url or not path or url[0] != "/":
log.error("Invalid alias: URL: {url} Path: {path}", url=url, path=path)
continue
urlbits = url[1:].split("/")
parent = root
for urlpiece in urlbits[:-1]:
child = parent.getChild(urlpiece)
if child is None:
child = Resource()
parent.putChild(urlpiece, child)
parent = child
if parent.getChild(urlbits[-1]) is not None:
log.error("Invalid alias: URL: {url} Path: {path} already exists", url=url, path=path)
continue
resource = FileResource(path)
parent.putChild(urlbits[-1], resource)
log.info("Added alias {url} -> {path}", url=url, path=path)
# Need timezone cache before setting up any timezone service
log.info("Setting up Timezone Cache")
TimezoneCache.create()
# Timezone service is optional
if config.EnableTimezoneService:
log.info(
"Setting up time zone service resource: {cls}",
cls=timezoneServiceResourceClass)
timezoneService = timezoneServiceResourceClass(
root,
)
root.putChild("timezones", timezoneService)
# Standard Timezone service is optional
if config.TimezoneService.Enabled:
log.info(
"Setting up standard time zone service resource: {cls}",
cls=timezoneStdServiceResourceClass)
timezoneStdService = timezoneStdServiceResourceClass(
root,
)
root.putChild("stdtimezones", timezoneStdService)
# TODO: we only want the master to do this
if _reactor._started:
_reactor.callLater(0, timezoneStdService.onStartup)
else:
addSystemEventTrigger("after", "startup", timezoneStdService.onStartup)
#
# iSchedule/cross-pod service for podding
#
if config.Servers.Enabled:
log.info("Setting up iSchedule podding inbox resource: {cls}", cls=iScheduleResourceClass)
ischedule = iScheduleResourceClass(
root,
newStore,
podding=True
)
root.putChild(config.Servers.InboxName, ischedule if unavailable is None else unavailable)
log.info("Setting up podding conduit resource: {cls}", cls=conduitResourceClass)
conduit = conduitResourceClass(
root,
newStore,
)
root.putChild(config.Servers.ConduitName, conduit)
#
# iSchedule service (not used for podding)
#
if config.Scheduling.iSchedule.Enabled:
log.info("Setting up iSchedule inbox resource: {cls}", cls=iScheduleResourceClass)
ischedule = iScheduleResourceClass(
root,
newStore,
)
root.putChild("ischedule", ischedule if unavailable is None else unavailable)
# Do DomainKey resources
DKIMUtils.validConfiguration(config)
if config.Scheduling.iSchedule.DKIM.Enabled:
log.info("Setting up domainkey resource: {res}", res=DomainKeyResource)
domain = config.Scheduling.iSchedule.DKIM.Domain if config.Scheduling.iSchedule.DKIM.Domain else config.ServerHostName
dk = DomainKeyResource(
domain,
config.Scheduling.iSchedule.DKIM.KeySelector,
config.Scheduling.iSchedule.DKIM.PublicKeyFile,
)
wellKnownResource.putChild("domainkey", dk)
#
# WebCal
#
if config.WebCalendarRoot:
log.info(
"Setting up WebCalendar resource: {res}",
res=config.WebCalendarRoot)
webCalendar = webCalendarResourceClass(
config.WebCalendarRoot,
principalCollections=(principalCollection,),
)
root.putChild("webcal", webCalendar if unavailable is None else unavailable)
#
# WebAdmin
#
if config.EnableWebAdmin:
log.info("Setting up WebAdmin resource")
webAdmin = webAdminResourceClass(
config.WebCalendarRoot,
root,
directory,
newStore,
principalCollections=(principalCollection,),
)
root.putChild("admin", webAdmin)
#
# Control API
#
if config.EnableControlAPI:
log.info("Setting up Control API resource")
controlAPI = controlResourceClass(
root,
directory,
newStore,
principalCollections=(principalCollection,),
)
root.putChild("control", controlAPI)
#
# Apple Push Notification Subscriptions
#
apnConfig = config.Notifications.Services.APNS
if apnConfig.Enabled:
log.info(
"Setting up APNS resource at /{url}",
url=apnConfig["SubscriptionURL"])
apnResource = apnSubscriptionResourceClass(root, newStore)
root.putChild(apnConfig["SubscriptionURL"], apnResource)
#
# Configure ancillary data
#
# MOVE2WHO
log.info("Configuring authentication wrapper")
overrides = {}
if resources:
for path, cls, args, schemes in resources:
# putChild doesn't want "/" starting the path
root.putChild(path, cls(root, newStore, *args))
# overrides requires "/" prepended
path = "/" + path
overrides[path] = []
for scheme in schemes:
if scheme == "basic":
overrides[path].append(BasicCredentialFactory(realm))
elif scheme == "digest":
schemeConfig = config.Authentication.Digest
overrides[path].append(QopDigestCredentialFactory(
schemeConfig["Algorithm"],
schemeConfig["Qop"],
realm,
))
log.info(
"Overriding {path} with {cls} ({schemes})",
path=path, cls=cls, schemes=schemes)
authWrapper = AuthenticationWrapper(
root,
portal,
wireEncryptedCredentialFactories,
wireUnencryptedCredentialFactories,
(auth.IPrincipal,),
overrides=overrides
)
logWrapper = DirectoryLogWrapperResource(
authWrapper,
directory,
)
# FIXME: Storing a reference to the root resource on the store
# until scheduling no longer needs resource objects
newStore.rootResource = root
return logWrapper
def getDBPool(config):
"""
Inspect configuration to determine what database connection pool
to set up.
return: (L{ConnectionPool}, transactionFactory)
"""
if config.DBType == 'oracle':
dialect = ORACLE_DIALECT
paramstyle = 'numeric'
else:
dialect = POSTGRES_DIALECT
paramstyle = 'pyformat'
pool = None
if config.DBAMPFD:
txnFactory = transactionFactoryFromFD(
int(config.DBAMPFD), dialect, paramstyle
)
elif not config.UseDatabase:
txnFactory = None
elif not config.SharedConnectionPool:
if config.DBType == '':
# get a PostgresService to tell us what the local connection
# info is, but *don't* start it (that would start one postgres
# master per slave, resulting in all kinds of mayhem...)
connectionFactory = pgServiceFromConfig(config, None).produceConnection
else:
connectionFactory = DBAPIConnector.connectorFor(config.DBType, **config.DatabaseConnection).connect
pool = ConnectionPool(connectionFactory, dialect=dialect,
paramstyle=paramstyle,
maxConnections=config.MaxDBConnectionsPerPool)
txnFactory = pool.connection
else:
raise UsageError(
"trying to use DB in slave, but no connection info from parent"
)
return (pool, txnFactory)
class FakeRequest(object):
def __init__(self, rootResource, method, path, uri='/', transaction=None):
self.rootResource = rootResource
self.method = method
self.path = path
self.uri = uri
self._resourcesByURL = {}
self._urlsByResource = {}
self.headers = Headers()
if transaction is not None:
self._newStoreTransaction = transaction
@inlineCallbacks
def _getChild(self, resource, segments):
if not segments:
returnValue(resource)
child, remaining = (yield resource.locateChild(self, segments))
returnValue((yield self._getChild(child, remaining)))
@inlineCallbacks
def locateResource(self, url):
url = url.strip("/")
segments = url.split("/")
resource = (yield self._getChild(self.rootResource, segments))
if resource:
self._rememberResource(resource, url)
returnValue(resource)
@inlineCallbacks
def locateChildResource(self, parent, childName):
if parent is None or childName is None:
returnValue(None)
parentURL = self.urlForResource(parent)
if not parentURL.endswith("/"):
parentURL += "/"
url = parentURL + quote(childName)
segment = childName
resource = (yield self._getChild(parent, [segment]))
if resource:
self._rememberResource(resource, url)
returnValue(resource)
def _rememberResource(self, resource, url):
self._resourcesByURL[url] = resource
self._urlsByResource[resource] = url
return resource
def _forgetResource(self, resource, url):
if url in self._resourcesByURL:
del self._resourcesByURL[url]
if resource in self._urlsByResource:
del self._urlsByResource[resource]
def urlForResource(self, resource):
url = self._urlsByResource.get(resource, None)
if url is None:
class NoURLForResourceError(RuntimeError):
pass
raise NoURLForResourceError(resource)
return url
def addResponseFilter(self, *args, **kwds):
pass
def memoryForPID(pid, residentOnly=True):
"""
Return the amount of memory in use for the given process. If residentOnly is True,
then RSS is returned; if False, then virtual memory is returned.
@param pid: process id
@type pid: C{int}
@param residentOnly: Whether only resident memory should be included
@type residentOnly: C{boolean}
@return: Memory used by process in bytes
@rtype: C{int}
"""
memoryInfo = psutil.Process(pid).get_memory_info()
return memoryInfo.rss if residentOnly else memoryInfo.vms
class MemoryLimitService(Service, object):
"""
A service which when paired with a DelayedStartupProcessMonitor will periodically
examine the memory usage of the monitored processes and stop any which exceed
a configured limit. Memcached processes are ignored.
"""
def __init__(self, processMonitor, intervalSeconds, limitBytes, residentOnly, reactor=None):
"""
@param processMonitor: the DelayedStartupProcessMonitor
@param intervalSeconds: how often to check
@type intervalSeconds: C{int}
@param limitBytes: any monitored process over this limit is stopped
@type limitBytes: C{int}
@param residentOnly: whether only resident memory should be included
@type residentOnly: C{boolean}
@param reactor: for testing
"""
self._processMonitor = processMonitor
self._seconds = intervalSeconds
self._bytes = limitBytes
self._residentOnly = residentOnly
self._delayedCall = None
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
# Unit tests can swap out _memoryForPID
self._memoryForPID = memoryForPID
def startService(self):
"""
Start scheduling the memory checks
"""
super(MemoryLimitService, self).startService()
self._delayedCall = self._reactor.callLater(self._seconds, self.checkMemory)
def stopService(self):
"""
Stop checking memory
"""
super(MemoryLimitService, self).stopService()
if self._delayedCall is not None and self._delayedCall.active():
self._delayedCall.cancel()
self._delayedCall = None
def checkMemory(self):
"""
Stop any processes monitored by our paired processMonitor whose resident
memory exceeds our configured limitBytes. Reschedule intervalSeconds in
the future.
"""
try:
for name in self._processMonitor.processes:
if name.startswith("memcached"):
continue
proto = self._processMonitor.protocols.get(name, None)
if proto is not None:
proc = proto.transport
pid = proc.pid
try:
memory = self._memoryForPID(pid, self._residentOnly)
except Exception, e:
log.error(
"Unable to determine memory usage of PID: {pid} ({err})",
pid=pid, err=e)
continue
if memory > self._bytes:
log.warn(
"Killing large process: {name} PID:{pid} {memtype}:{mem}",
name=name, pid=pid,
memtype=("Resident" if self._residentOnly else "Virtual"),
mem=memory)
self._processMonitor.stopProcess(name)
finally:
self._delayedCall = self._reactor.callLater(self._seconds, self.checkMemory)
def checkDirectories(config):
"""
Make sure that various key directories exist (and create if needed)
"""
#
# Verify that server root actually exists
#
checkDirectory(
config.ServerRoot,
"Server root",
# Require write access because one might not allow editing on /
access=os.W_OK,
wait=True # Wait in a loop until ServerRoot exists
)
#
# Verify that other root paths are OK
#
if config.DataRoot.startswith(config.ServerRoot + os.sep):
checkDirectory(
config.DataRoot,
"Data root",
access=os.W_OK,
create=(0750, config.UserName, config.GroupName),
)
if config.DocumentRoot.startswith(config.DataRoot + os.sep):
checkDirectory(
config.DocumentRoot,
"Document root",
# Don't require write access because one might not allow editing on /
access=os.R_OK,
create=(0750, config.UserName, config.GroupName),
)
if config.ConfigRoot.startswith(config.ServerRoot + os.sep):
checkDirectory(
config.ConfigRoot,
"Config root",
access=os.W_OK,
create=(0750, config.UserName, config.GroupName),
)
if config.SocketFiles.Enabled:
checkDirectory(
config.SocketRoot,
"Socket file root",
access=os.W_OK,
create=(
config.SocketFiles.Permissions,
config.SocketFiles.Owner,
config.SocketFiles.Group
)
)
# Always create these:
checkDirectory(
config.LogRoot,
"Log root",
access=os.W_OK,
create=(0750, config.UserName, config.GroupName),
)
checkDirectory(
config.RunRoot,
"Run root",
access=os.W_OK,
create=(0770, config.UserName, config.GroupName),
)
class Stepper(object):
"""
Manages the sequential, deferred execution of "steps" which are objects
implementing these methods:
- stepWithResult(result)
@param result: the result returned from the previous step
@returns: Deferred
- stepWithFailure(failure)
@param failure: a Failure encapsulating the exception from the
previous step
@returns: Failure to continue down the errback chain, or a
Deferred returning a non-Failure to switch back to the
callback chain
"Step" objects are added in order by calling addStep(), and when start()
is called, the Stepper will call the stepWithResult() of the first step.
If stepWithResult() doesn't raise an Exception, the Stepper will call the
next step's stepWithResult(). If a stepWithResult() raises an Exception,
the Stepper will call the next step's stepWithFailure() -- if it's
implemented -- passing it a Failure object. If the stepWithFailure()
decides it can handle the Failure and proceed, it can return a non-Failure
which is an indicator to the Stepper to call the next step's
stepWithResult().
TODO: Create an IStep interface (?)
"""
def __init__(self):
self.steps = []
self.failure = None
self.result = None
self.running = False
def addStep(self, step):
"""
Adds a step object to the ordered list of steps
@param step: the object to add
@type step: an object implementing stepWithResult()
@return: the Stepper object itself so addStep() calls can be chained
"""
if self.running:
raise RuntimeError("Can't add step after start")
self.steps.append(step)
return self
def defaultStepWithResult(self, result):
return succeed(result)
def defaultStepWithFailure(self, failure):
if failure.type not in (
NotAllowedToUpgrade, ConfigurationError,
OpenSSL.SSL.Error
):
log.failure("Step failure", failure=failure)
return failure
# def protectStep(self, callback):
# def _protected(result):
# try:
# return callback(result)
# except Exception, e:
# # TODO: how to turn Exception into Failure
# return Failure()
# return _protected
def start(self, result=None):
"""
Begin executing the added steps in sequence. If a step object
does not implement a stepWithResult/stepWithFailure method, a
default implementation will be used.
@param result: an optional value to pass to the first step
@return: the Deferred that will fire when steps are done
"""
self.running = True
self.deferred = Deferred()
for step in self.steps:
# See if we need to use a default implementation of the step methods:
if hasattr(step, "stepWithResult"):
callBack = step.stepWithResult
# callBack = self.protectStep(step.stepWithResult)
else:
callBack = self.defaultStepWithResult
if hasattr(step, "stepWithFailure"):
errBack = step.stepWithFailure
else:
errBack = self.defaultStepWithFailure
# Add callbacks to the Deferred
self.deferred.addCallbacks(callBack, errBack)
# Get things going
self.deferred.callback(result)
return self.deferred
def requestShutdown(programPath, reason):
"""
Log the shutdown reason and call the shutdown-requesting program.
In the case the service is spawned by launchd (or equivalent), if our
service decides it needs to shut itself down, because of a misconfiguration,
for example, we can't just exit. We may need to go through the system
machinery to unload our job, manage reverse proxies, update admin UI, etc.
Therefore you can configure the ServiceDisablingProgram plist key to point
to a program to run which will stop our service.
@param programPath: the full path to a program to call (with no args)
@type programPath: C{str}
@param reason: a shutdown reason to log
@type reason: C{str}
"""
log.error("Shutting down Calendar and Contacts server")
log.error(reason)
Popen(
args=[config.ServiceDisablingProgram],
stdout=PIPE,
stderr=PIPE,
).communicate()
def preFlightChecks(config):
"""
Perform checks prior to spawning any processes. Returns True if the checks
are ok, False if they don't and we have a ServiceDisablingProgram configured.
Otherwise exits.
"""
success, reason = verifyConfig(config)
if success:
success, reason = verifyServerRoot(config)
if success:
success, reason = verifyTLSCertificate(config)
if success:
success, reason = verifyAPNSCertificate(config)
if not success:
if config.ServiceDisablingProgram:
# If pre-flight checks fail, we don't want launchd to
# repeatedly launch us, we want our job to get unloaded.
# If the config.ServiceDisablingProgram is assigned and exists
# we schedule it to run after startService finishes.
# Its job is to carry out the platform-specific tasks of disabling
# the service.
if os.path.exists(config.ServiceDisablingProgram):
addSystemEventTrigger(
"after", "startup",
requestShutdown, config.ServiceDisablingProgram, reason
)
return False
else:
print(reason)
sys.exit(1)
return True
def verifyConfig(config):
"""
At least one of EnableCalDAV or EnableCardDAV must be True
"""
if config.EnableCalDAV or config.EnableCardDAV:
return True, "A protocol is enabled"
return False, "Neither CalDAV nor CardDAV are enabled"
def verifyServerRoot(config):
"""
Ensure server root is not on a phantom volume
"""
result = diagnose.detectPhantomVolume(config.ServerRoot)
if result == diagnose.EXIT_CODE_SERVER_ROOT_MISSING:
return False, "ServerRoot is missing"
if result == diagnose.EXIT_CODE_PHANTOM_DATA_VOLUME:
return False, "ServerRoot is supposed to be on a non-boot-volume but it's not"
return True, "ServerRoot is ok"
def verifyTLSCertificate(config):
"""
If a TLS certificate is configured, make sure it exists, is non empty,
and that it's valid.
"""
if config.SSLCertificate:
if not os.path.exists(config.SSLCertificate):
message = (
"The configured TLS certificate ({cert}) is missing".format(
cert=config.SSLCertificate
)
)
postAlert("MissingCertificateAlert", ["path", config.SSLCertificate])
return False, message
else:
return True, "TLS disabled"
length = os.stat(config.SSLCertificate).st_size
if length == 0:
message = (
"The configured TLS certificate ({cert}) is empty".format(
cert=config.SSLCertificate
)
)
return False, message
try:
ChainingOpenSSLContextFactory(
config.SSLPrivateKey,
config.SSLCertificate,
certificateChainFile=config.SSLAuthorityChain,
passwdCallback=getSSLPassphrase,
sslmethod=getattr(OpenSSL.SSL, config.SSLMethod),
ciphers=config.SSLCiphers.strip()
)
except Exception as e:
message = (
"The configured TLS certificate ({cert}) cannot be used: {reason}".format(
cert=config.SSLCertificate,
reason=str(e)
)
)
return False, message
return True, "TLS enabled"
def verifyAPNSCertificate(config):
"""
If APNS certificates are configured, make sure they're valid.
"""
if config.Notifications.Services.APNS.Enabled:
for protocol, accountName in (
("CalDAV", "apns:com.apple.calendar"),
("CardDAV", "apns:com.apple.contact"),
):
protoConfig = config.Notifications.Services.APNS[protocol]
# Verify the cert exists
if not os.path.exists(protoConfig.CertificatePath):
message = (
"The {proto} APNS certificate ({cert}) is missing".format(
proto=protocol,
cert=protoConfig.CertificatePath
)
)
postAlert("PushNotificationCertificateAlert", [])
return False, message
# Verify we can extract the topic
if not protoConfig.Topic:
topic = getAPNTopicFromCertificate(protoConfig.CertificatePath)
protoConfig.Topic = topic
if not protoConfig.Topic:
postAlert("PushNotificationCertificateAlert", [])
message = "Cannot extract APN topic"
return False, message
# Verify we can acquire the passphrase
if not protoConfig.Passphrase:
try:
passphrase = getPasswordFromKeychain(accountName)
protoConfig.Passphrase = passphrase
except KeychainAccessError:
# The system doesn't support keychain
pass
except KeychainPasswordNotFound:
# The password doesn't exist in the keychain.
postAlert("PushNotificationCertificateAlert", [])
message = "Cannot retrieve APN passphrase from keychain"
return False, message
# Let OpenSSL try to use the cert
try:
if protoConfig.Passphrase:
passwdCallback = lambda *ignored: protoConfig.Passphrase
else:
passwdCallback = None
ChainingOpenSSLContextFactory(
protoConfig.PrivateKeyPath,
protoConfig.CertificatePath,
certificateChainFile=protoConfig.AuthorityChainPath,
passwdCallback=passwdCallback,
sslmethod=getattr(OpenSSL.SSL, "TLSv1_METHOD"),
)
except Exception as e:
message = (
"The {proto} APNS certificate ({cert}) cannot be used: {reason}".format(
proto=protocol,
cert=protoConfig.CertificatePath,
reason=str(e)
)
)
postAlert("PushNotificationCertificateAlert", [])
return False, message
return True, "APNS enabled"
else:
return True, "APNS disabled"
def getSSLPassphrase(*ignored):
if not config.SSLPrivateKey:
return None
if config.SSLCertAdmin and os.path.isfile(config.SSLCertAdmin):
child = Popen(
args=[
"sudo", config.SSLCertAdmin,
"--get-private-key-passphrase", config.SSLPrivateKey,
],
stdout=PIPE, stderr=PIPE,
)
output, error = child.communicate()
if child.returncode:
log.error(
"Could not get passphrase for {key}: {error}",
key=config.SSLPrivateKey, error=error
)
else:
log.info(
"Obtained passphrase for {key}", key=config.SSLPrivateKey
)
return output.strip()
if (
config.SSLPassPhraseDialog and
os.path.isfile(config.SSLPassPhraseDialog)
):
sslPrivKey = open(config.SSLPrivateKey)
try:
keyType = None
for line in sslPrivKey.readlines():
if "-----BEGIN RSA PRIVATE KEY-----" in line:
keyType = "RSA"
break
elif "-----BEGIN DSA PRIVATE KEY-----" in line:
keyType = "DSA"
break
finally:
sslPrivKey.close()
if keyType is None:
log.error(
"Could not get private key type for {key}",
key=config.SSLPrivateKey
)
else:
child = Popen(
args=[
config.SSLPassPhraseDialog,
"{}:{}".format(config.ServerHostName, config.SSLPort),
keyType,
],
stdout=PIPE, stderr=PIPE,
)
output, error = child.communicate()
if child.returncode:
log.error(
"Could not get passphrase for {key}: {error}",
key=config.SSLPrivateKey, error=error
)
else:
return output.strip()
return None
def postAlert(alertType, args):
if (
config.AlertPostingProgram and
os.path.exists(config.AlertPostingProgram)
):
try:
commandLine = [config.AlertPostingProgram, alertType]
commandLine.extend(args)
Popen(
commandLine,
stdout=PIPE,
stderr=PIPE,
).communicate()
except Exception, e:
log.error(
"Could not post alert: {alertType} {args} ({error})",
alertType=alertType, args=args, error=e
)
| 34.973369 | 137 | 0.636113 |
"""
Utilities for assembling the service and resource hierarchy.
"""
__all__ = [
"FakeRequest",
"getDBPool",
"getRootResource",
"getSSLPassphrase",
"MemoryLimitService",
"postAlert",
"preFlightChecks",
]
from calendarserver.accesslog import DirectoryLogWrapperResource
from calendarserver.provision.root import RootResource
from calendarserver.push.applepush import APNSubscriptionResource
from calendarserver.push.notifier import NotifierFactory
from calendarserver.push.util import getAPNTopicFromCertificate
from calendarserver.tools import diagnose
from calendarserver.tools.util import checkDirectory
from calendarserver.webadmin.landing import WebAdminLandingResource
from calendarserver.webcal.resource import WebCalendarResource
from socket import fromfd, AF_UNIX, SOCK_STREAM, socketpair
from subprocess import Popen, PIPE
from twext.enterprise.adbapi2 import ConnectionPool, ConnectionPoolConnection
from twext.enterprise.adbapi2 import ConnectionPoolClient
from twext.enterprise.ienterprise import ORACLE_DIALECT
from twext.enterprise.ienterprise import POSTGRES_DIALECT
from twext.internet.ssl import ChainingOpenSSLContextFactory
from twext.python.filepath import CachingFilePath
from twext.python.filepath import CachingFilePath as FilePath
from twext.python.log import Logger
from twext.who.checker import HTTPDigestCredentialChecker
from twext.who.checker import UsernamePasswordCredentialChecker
from twisted.application.service import Service
from twisted.cred.error import UnauthorizedLogin
from twisted.cred.portal import Portal
from twisted.internet import reactor as _reactor
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred, succeed
from twisted.internet.reactor import addSystemEventTrigger
from twisted.internet.tcp import Connection
from twisted.python.usage import UsageError
from twistedcaldav.bind import doBind
from twistedcaldav.cache import CacheStoreNotifierFactory
from twistedcaldav.config import ConfigurationError
from twistedcaldav.controlapi import ControlAPIResource
from twistedcaldav.directory.addressbook import DirectoryAddressBookHomeProvisioningResource
from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
from twistedcaldav.directory.digest import QopDigestCredentialFactory
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
from twistedcaldav.directorybackedaddressbook import DirectoryBackedAddressBookResource
from twistedcaldav.resource import AuthenticationWrapper
from twistedcaldav.simpleresource import SimpleResource, SimpleRedirectResource, \
SimpleUnavailableResource
from twistedcaldav.stdconfig import config
from twistedcaldav.timezones import TimezoneCache
from twistedcaldav.timezoneservice import TimezoneServiceResource
from twistedcaldav.timezonestdservice import TimezoneStdServiceResource
from twistedcaldav.util import getPasswordFromKeychain
from twistedcaldav.util import KeychainAccessError, KeychainPasswordNotFound
from txdav.base.datastore.dbapiclient import DBAPIConnector
from txdav.base.datastore.subpostgres import PostgresService
from txdav.caldav.datastore.scheduling.ischedule.dkim import DKIMUtils, DomainKeyResource
from txdav.caldav.datastore.scheduling.ischedule.localservers import buildServersDB
from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
from txdav.common.datastore.file import CommonDataStore as CommonFileDataStore
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.sql import CommonDataStore as CommonSQLDataStore
from txdav.common.datastore.sql import current_sql_schema
from txdav.common.datastore.upgrade.sql.upgrade import NotAllowedToUpgrade
from txdav.dps.client import DirectoryService as DirectoryProxyClientService
from txdav.who.cache import CachingDirectoryService
from txdav.who.util import directoryFromConfig
from txweb2.auth.basic import BasicCredentialFactory
from txweb2.auth.tls import TLSCredentialsFactory, TLSCredentials
from txweb2.dav import auth
from txweb2.dav.auth import IPrincipalCredentials
from txweb2.dav.util import joinURL
from txweb2.http_headers import Headers
from txweb2.resource import Resource
from txweb2.static import File as FileResource
from urllib import quote
import OpenSSL
import errno
import os
import psutil
import sys
try:
from twistedcaldav.authkerb import NegotiateCredentialFactory
NegotiateCredentialFactory
except ImportError:
NegotiateCredentialFactory = None
log = Logger()
def pgServiceFromConfig(config, subServiceFactory, uid=None, gid=None):
"""
Construct a L{PostgresService} from a given configuration and subservice.
@param config: the configuration to derive postgres configuration
parameters from.
@param subServiceFactory: A factory for the service to start once the
L{PostgresService} has been initialized.
@param uid: The user-ID to run the PostgreSQL server as.
@param gid: The group-ID to run the PostgreSQL server as.
@return: a service which can start postgres.
@rtype: L{PostgresService}
"""
dbRoot = CachingFilePath(config.DatabaseRoot)
return PostgresService(
dbRoot, subServiceFactory, current_sql_schema,
databaseName=config.Postgres.DatabaseName,
clusterName=config.Postgres.ClusterName,
logFile=config.Postgres.LogFile,
logDirectory=config.LogRoot if config.Postgres.LogRotation else "",
socketDir=config.Postgres.SocketDirectory,
socketName=config.Postgres.SocketName,
listenAddresses=config.Postgres.ListenAddresses,
sharedBuffers=config.Postgres.SharedBuffers,
maxConnections=config.Postgres.MaxConnections,
options=config.Postgres.Options,
uid=uid, gid=gid,
spawnedDBUser=config.SpawnedDBUser,
pgCtl=config.Postgres.Ctl,
initDB=config.Postgres.Init,
)
class ConnectionWithPeer(Connection):
connected = True
def getPeer(self):
return "<peer: %r %r>" % (self.socket.fileno(), id(self))
def getHost(self):
return "<host: %r %r>" % (self.socket.fileno(), id(self))
def transactionFactoryFromFD(dbampfd, dialect, paramstyle):
"""
Create a transaction factory from an inherited file descriptor, such as one
created by L{ConnectionDispenser}.
"""
skt = fromfd(dbampfd, AF_UNIX, SOCK_STREAM)
os.close(dbampfd)
protocol = ConnectionPoolClient(dialect=dialect, paramstyle=paramstyle)
transport = ConnectionWithPeer(skt, protocol)
protocol.makeConnection(transport)
transport.startReading()
return protocol.newTransaction
class ConnectionDispenser(object):
"""
A L{ConnectionDispenser} can dispense already-connected file descriptors,
for use with subprocess spawning.
"""
def __init__(self, connectionPool):
self.pool = connectionPool
def dispense(self):
"""
Dispense a socket object, already connected to a server, for a client
in a subprocess.
"""
c, s = socketpair(AF_UNIX, SOCK_STREAM)
protocol = ConnectionPoolConnection(self.pool)
transport = ConnectionWithPeer(s, protocol)
protocol.makeConnection(transport)
transport.startReading()
return c
def storeFromConfigWithoutDPS(config, txnFactory):
store = storeFromConfig(config, txnFactory, None)
directory = directoryFromConfig(config, store)
# if config.DirectoryProxy.InProcessCachingSeconds:
# directory = CachingDirectoryService(
# directory,
# expireSeconds=config.DirectoryProxy.InProcessCachingSeconds
# )
store.setDirectoryService(directory)
return store
def storeFromConfigWithDPSClient(config, txnFactory):
store = storeFromConfig(config, txnFactory, None)
directory = DirectoryProxyClientService(config.DirectoryRealmName)
if config.Servers.Enabled:
directory.setServersDB(buildServersDB(config.Servers.MaxClients))
if config.DirectoryProxy.InProcessCachingSeconds:
directory = CachingDirectoryService(
directory,
expireSeconds=config.DirectoryProxy.InProcessCachingSeconds
)
store.setDirectoryService(directory)
return store
def storeFromConfigWithDPSServer(config, txnFactory):
store = storeFromConfig(config, txnFactory, None)
directory = directoryFromConfig(config, store)
# if config.DirectoryProxy.InSidecarCachingSeconds:
# directory = CachingDirectoryService(
# directory,
# expireSeconds=config.DirectoryProxy.InSidecarCachingSeconds
# )
store.setDirectoryService(directory)
return store
def storeFromConfig(config, txnFactory, directoryService):
"""
Produce an L{IDataStore} from the given configuration, transaction factory,
and notifier factory.
If the transaction factory is C{None}, we will create a filesystem
store. Otherwise, a SQL store, using that connection information.
"""
#
# Configure NotifierFactory
#
notifierFactories = {}
if config.Notifications.Enabled:
notifierFactories["push"] = NotifierFactory(config.ServerHostName, config.Notifications.CoalesceSeconds)
if config.EnableResponseCache and config.Memcached.Pools.Default.ClientEnabled:
notifierFactories["cache"] = CacheStoreNotifierFactory()
quota = config.UserQuota
if quota == 0:
quota = None
if txnFactory is not None:
if config.EnableSSL:
uri = "https://{config.ServerHostName}:{config.SSLPort}".format(config=config)
else:
uri = "https://{config.ServerHostName}:{config.HTTPPort}".format(config=config)
attachments_uri = uri + "/calendars/__uids__/%(home)s/dropbox/%(dropbox_id)s/%(name)s"
store = CommonSQLDataStore(
txnFactory, notifierFactories,
directoryService,
FilePath(config.AttachmentsRoot), attachments_uri,
config.EnableCalDAV, config.EnableCardDAV,
config.EnableManagedAttachments,
quota=quota,
logLabels=config.LogDatabase.LabelsInSQL,
logStats=config.LogDatabase.Statistics,
logStatsLogFile=config.LogDatabase.StatisticsLogFile,
logSQL=config.LogDatabase.SQLStatements,
logTransactionWaits=config.LogDatabase.TransactionWaitSeconds,
timeoutTransactions=config.TransactionTimeoutSeconds,
cacheQueries=config.QueryCaching.Enabled,
cachePool=config.QueryCaching.MemcachedPool,
cacheExpireSeconds=config.QueryCaching.ExpireSeconds
)
else:
store = CommonFileDataStore(
FilePath(config.DocumentRoot),
notifierFactories, directoryService,
config.EnableCalDAV, config.EnableCardDAV,
quota=quota
)
# FIXME: NotifierFactories need a reference to the store in order
# to get a txn in order to possibly create a Work item
for notifierFactory in notifierFactories.values():
notifierFactory.store = store
return store
# MOVE2WHO -- should we move this class somewhere else?
class PrincipalCredentialChecker(object):
credentialInterfaces = (IPrincipalCredentials,)
@inlineCallbacks
def requestAvatarId(self, credentials):
credentials = IPrincipalCredentials(credentials)
if credentials.authnPrincipal is None:
raise UnauthorizedLogin(
"No such user: {user}".format(
user=credentials.credentials.username
)
)
# See if record is enabledForLogin
if not credentials.authnPrincipal.record.isLoginEnabled():
raise UnauthorizedLogin(
"User not allowed to log in: {user}".format(
user=credentials.credentials.username
)
)
# Handle Kerberos as a separate behavior
try:
from twistedcaldav.authkerb import NegotiateCredentials
except ImportError:
NegotiateCredentials = None
if NegotiateCredentials and isinstance(credentials.credentials, NegotiateCredentials):
# If we get here with Kerberos, then authentication has already succeeded
returnValue(
(
credentials.authnPrincipal,
credentials.authzPrincipal,
)
)
# Handle TLS Client Certificate
elif isinstance(credentials.credentials, TLSCredentials):
# If we get here with TLS, then authentication (certificate verification) has already succeeded
returnValue(
(
credentials.authnPrincipal,
credentials.authzPrincipal,
)
)
else:
if (yield credentials.authnPrincipal.record.verifyCredentials(credentials.credentials)):
returnValue(
(
credentials.authnPrincipal,
credentials.authzPrincipal,
)
)
else:
raise UnauthorizedLogin(
"Incorrect credentials for user: {user}".format(
user=credentials.credentials.username
)
)
def getRootResource(config, newStore, resources=None):
"""
Set up directory service and resource hierarchy based on config.
Return root resource.
Additional resources can be added to the hierarchy by passing a list of
tuples containing: path, resource class, __init__ args list, and optional
authentication schemes list ("basic", "digest").
If the store is specified, then it has already been constructed, so use it.
Otherwise build one with L{storeFromConfig}.
"""
if newStore is None:
raise RuntimeError("Internal error, 'newStore' must be specified.")
if resources is None:
resources = []
# FIXME: this is only here to workaround circular imports
doBind()
#
# Default resource classes
#
rootResourceClass = RootResource
calendarResourceClass = DirectoryCalendarHomeProvisioningResource
iScheduleResourceClass = IScheduleInboxResource
conduitResourceClass = ConduitResource
timezoneServiceResourceClass = TimezoneServiceResource
timezoneStdServiceResourceClass = TimezoneStdServiceResource
webCalendarResourceClass = WebCalendarResource
webAdminResourceClass = WebAdminLandingResource
addressBookResourceClass = DirectoryAddressBookHomeProvisioningResource
directoryBackedAddressBookResourceClass = DirectoryBackedAddressBookResource
apnSubscriptionResourceClass = APNSubscriptionResource
principalResourceClass = DirectoryPrincipalProvisioningResource
controlResourceClass = ControlAPIResource
directory = newStore.directoryService()
principalCollection = principalResourceClass("/principals/", directory)
#
# Configure the Site and Wrappers
#
wireEncryptedCredentialFactories = []
wireUnencryptedCredentialFactories = []
portal = Portal(auth.DavRealm())
portal.registerChecker(UsernamePasswordCredentialChecker(directory))
portal.registerChecker(HTTPDigestCredentialChecker(directory))
portal.registerChecker(PrincipalCredentialChecker())
realm = directory.realmName.encode("utf-8") or ""
log.info("Configuring authentication for realm: {realm}", realm=realm)
for scheme, schemeConfig in config.Authentication.iteritems():
scheme = scheme.lower()
credFactory = None
if schemeConfig["Enabled"]:
log.info("Setting up scheme: {scheme}", scheme=scheme)
if scheme == "kerberos":
if not NegotiateCredentialFactory:
log.info("Kerberos support not available")
continue
try:
principal = schemeConfig["ServicePrincipal"]
if not principal:
credFactory = NegotiateCredentialFactory(
serviceType="HTTP",
hostname=config.ServerHostName,
)
else:
credFactory = NegotiateCredentialFactory(
principal=principal,
)
except ValueError:
log.info("Could not start Kerberos")
continue
elif scheme == "digest":
credFactory = QopDigestCredentialFactory(
schemeConfig["Algorithm"],
schemeConfig["Qop"],
realm,
)
elif scheme == "basic":
credFactory = BasicCredentialFactory(realm)
elif scheme == TLSCredentialsFactory.scheme:
credFactory = TLSCredentialsFactory(realm)
elif scheme == "wiki":
pass
else:
log.error("Unknown scheme: {scheme}", scheme=scheme)
if credFactory:
wireEncryptedCredentialFactories.append(credFactory)
if schemeConfig.get("AllowedOverWireUnencrypted", False):
wireUnencryptedCredentialFactories.append(credFactory)
#
# Setup Resource hierarchy
#
log.info("Setting up document root at: {root}", root=config.DocumentRoot)
if config.EnableCalDAV:
log.info("Setting up calendar collection: {cls}", cls=calendarResourceClass)
calendarCollection = calendarResourceClass(
directory,
"/calendars/",
newStore,
)
principalCollection.calendarCollection = calendarCollection
if config.EnableCardDAV:
log.info("Setting up address book collection: {cls}", cls=addressBookResourceClass)
addressBookCollection = addressBookResourceClass(
directory,
"/addressbooks/",
newStore,
)
principalCollection.addressBookCollection = addressBookCollection
if config.DirectoryAddressBook.Enabled and config.EnableSearchAddressBook:
log.info(
"Setting up directory address book: {cls}",
cls=directoryBackedAddressBookResourceClass)
directoryBackedAddressBookCollection = directoryBackedAddressBookResourceClass(
principalCollections=(principalCollection,),
principalDirectory=directory,
uri=joinURL("/", config.DirectoryAddressBook.name, "/")
)
if _reactor._started:
directoryBackedAddressBookCollection.provisionDirectory()
else:
addSystemEventTrigger("after", "startup", directoryBackedAddressBookCollection.provisionDirectory)
else:
# remove /directory from previous runs that may have created it
directoryPath = os.path.join(config.DocumentRoot, config.DirectoryAddressBook.name)
try:
FilePath(directoryPath).remove()
log.info("Deleted: {path}", path=directoryPath)
except (OSError, IOError), e:
if e.errno != errno.ENOENT:
log.error("Could not delete: {path} : {error}", path=directoryPath, error=e)
if config.MigrationOnly:
unavailable = SimpleUnavailableResource((principalCollection,))
else:
unavailable = None
log.info("Setting up root resource: {cls}", cls=rootResourceClass)
root = rootResourceClass(
config.DocumentRoot,
principalCollections=(principalCollection,),
)
root.putChild("principals", principalCollection if unavailable is None else unavailable)
if config.EnableCalDAV:
root.putChild("calendars", calendarCollection if unavailable is None else unavailable)
if config.EnableCardDAV:
root.putChild('addressbooks', addressBookCollection if unavailable is None else unavailable)
if config.DirectoryAddressBook.Enabled and config.EnableSearchAddressBook:
root.putChild(config.DirectoryAddressBook.name, directoryBackedAddressBookCollection if unavailable is None else unavailable)
# /.well-known
if config.EnableWellKnown:
log.info("Setting up .well-known collection resource")
wellKnownResource = SimpleResource(
principalCollections=(principalCollection,),
isdir=True,
defaultACL=SimpleResource.allReadACL
)
root.putChild(".well-known", wellKnownResource)
for enabled, wellknown_name, redirected_to in (
(config.EnableCalDAV, "caldav", "/principals/",),
(config.EnableCardDAV, "carddav", "/principals/",),
(config.TimezoneService.Enabled, "timezone", config.TimezoneService.URI,),
(config.Scheduling.iSchedule.Enabled, "ischedule", "/ischedule"),
):
if enabled:
if config.EnableSSL:
scheme = "https"
port = config.SSLPort
else:
scheme = "http"
port = config.HTTPPort
wellKnownResource.putChild(
wellknown_name,
SimpleRedirectResource(
principalCollections=(principalCollection,),
isdir=False,
defaultACL=SimpleResource.allReadACL,
scheme=scheme, port=port, path=redirected_to)
)
for alias in config.Aliases:
url = alias.get("url", None)
path = alias.get("path", None)
if not url or not path or url[0] != "/":
log.error("Invalid alias: URL: {url} Path: {path}", url=url, path=path)
continue
urlbits = url[1:].split("/")
parent = root
for urlpiece in urlbits[:-1]:
child = parent.getChild(urlpiece)
if child is None:
child = Resource()
parent.putChild(urlpiece, child)
parent = child
if parent.getChild(urlbits[-1]) is not None:
log.error("Invalid alias: URL: {url} Path: {path} already exists", url=url, path=path)
continue
resource = FileResource(path)
parent.putChild(urlbits[-1], resource)
log.info("Added alias {url} -> {path}", url=url, path=path)
# Need timezone cache before setting up any timezone service
log.info("Setting up Timezone Cache")
TimezoneCache.create()
# Timezone service is optional
if config.EnableTimezoneService:
log.info(
"Setting up time zone service resource: {cls}",
cls=timezoneServiceResourceClass)
timezoneService = timezoneServiceResourceClass(
root,
)
root.putChild("timezones", timezoneService)
# Standard Timezone service is optional
if config.TimezoneService.Enabled:
log.info(
"Setting up standard time zone service resource: {cls}",
cls=timezoneStdServiceResourceClass)
timezoneStdService = timezoneStdServiceResourceClass(
root,
)
root.putChild("stdtimezones", timezoneStdService)
# TODO: we only want the master to do this
if _reactor._started:
_reactor.callLater(0, timezoneStdService.onStartup)
else:
addSystemEventTrigger("after", "startup", timezoneStdService.onStartup)
#
# iSchedule/cross-pod service for podding
#
if config.Servers.Enabled:
log.info("Setting up iSchedule podding inbox resource: {cls}", cls=iScheduleResourceClass)
ischedule = iScheduleResourceClass(
root,
newStore,
podding=True
)
root.putChild(config.Servers.InboxName, ischedule if unavailable is None else unavailable)
log.info("Setting up podding conduit resource: {cls}", cls=conduitResourceClass)
conduit = conduitResourceClass(
root,
newStore,
)
root.putChild(config.Servers.ConduitName, conduit)
#
# iSchedule service (not used for podding)
#
if config.Scheduling.iSchedule.Enabled:
log.info("Setting up iSchedule inbox resource: {cls}", cls=iScheduleResourceClass)
ischedule = iScheduleResourceClass(
root,
newStore,
)
root.putChild("ischedule", ischedule if unavailable is None else unavailable)
# Do DomainKey resources
DKIMUtils.validConfiguration(config)
if config.Scheduling.iSchedule.DKIM.Enabled:
log.info("Setting up domainkey resource: {res}", res=DomainKeyResource)
domain = config.Scheduling.iSchedule.DKIM.Domain if config.Scheduling.iSchedule.DKIM.Domain else config.ServerHostName
dk = DomainKeyResource(
domain,
config.Scheduling.iSchedule.DKIM.KeySelector,
config.Scheduling.iSchedule.DKIM.PublicKeyFile,
)
wellKnownResource.putChild("domainkey", dk)
#
# WebCal
#
if config.WebCalendarRoot:
log.info(
"Setting up WebCalendar resource: {res}",
res=config.WebCalendarRoot)
webCalendar = webCalendarResourceClass(
config.WebCalendarRoot,
principalCollections=(principalCollection,),
)
root.putChild("webcal", webCalendar if unavailable is None else unavailable)
#
# WebAdmin
#
if config.EnableWebAdmin:
log.info("Setting up WebAdmin resource")
webAdmin = webAdminResourceClass(
config.WebCalendarRoot,
root,
directory,
newStore,
principalCollections=(principalCollection,),
)
root.putChild("admin", webAdmin)
#
# Control API
#
if config.EnableControlAPI:
log.info("Setting up Control API resource")
controlAPI = controlResourceClass(
root,
directory,
newStore,
principalCollections=(principalCollection,),
)
root.putChild("control", controlAPI)
#
# Apple Push Notification Subscriptions
#
apnConfig = config.Notifications.Services.APNS
if apnConfig.Enabled:
log.info(
"Setting up APNS resource at /{url}",
url=apnConfig["SubscriptionURL"])
apnResource = apnSubscriptionResourceClass(root, newStore)
root.putChild(apnConfig["SubscriptionURL"], apnResource)
#
# Configure ancillary data
#
# MOVE2WHO
log.info("Configuring authentication wrapper")
overrides = {}
if resources:
for path, cls, args, schemes in resources:
# putChild doesn't want "/" starting the path
root.putChild(path, cls(root, newStore, *args))
path = "/" + path
overrides[path] = []
for scheme in schemes:
if scheme == "basic":
overrides[path].append(BasicCredentialFactory(realm))
elif scheme == "digest":
schemeConfig = config.Authentication.Digest
overrides[path].append(QopDigestCredentialFactory(
schemeConfig["Algorithm"],
schemeConfig["Qop"],
realm,
))
log.info(
"Overriding {path} with {cls} ({schemes})",
path=path, cls=cls, schemes=schemes)
authWrapper = AuthenticationWrapper(
root,
portal,
wireEncryptedCredentialFactories,
wireUnencryptedCredentialFactories,
(auth.IPrincipal,),
overrides=overrides
)
logWrapper = DirectoryLogWrapperResource(
authWrapper,
directory,
)
newStore.rootResource = root
return logWrapper
def getDBPool(config):
"""
Inspect configuration to determine what database connection pool
to set up.
return: (L{ConnectionPool}, transactionFactory)
"""
if config.DBType == 'oracle':
dialect = ORACLE_DIALECT
paramstyle = 'numeric'
else:
dialect = POSTGRES_DIALECT
paramstyle = 'pyformat'
pool = None
if config.DBAMPFD:
txnFactory = transactionFactoryFromFD(
int(config.DBAMPFD), dialect, paramstyle
)
elif not config.UseDatabase:
txnFactory = None
elif not config.SharedConnectionPool:
if config.DBType == '':
# master per slave, resulting in all kinds of mayhem...)
connectionFactory = pgServiceFromConfig(config, None).produceConnection
else:
connectionFactory = DBAPIConnector.connectorFor(config.DBType, **config.DatabaseConnection).connect
pool = ConnectionPool(connectionFactory, dialect=dialect,
paramstyle=paramstyle,
maxConnections=config.MaxDBConnectionsPerPool)
txnFactory = pool.connection
else:
raise UsageError(
"trying to use DB in slave, but no connection info from parent"
)
return (pool, txnFactory)
class FakeRequest(object):
def __init__(self, rootResource, method, path, uri='/', transaction=None):
self.rootResource = rootResource
self.method = method
self.path = path
self.uri = uri
self._resourcesByURL = {}
self._urlsByResource = {}
self.headers = Headers()
if transaction is not None:
self._newStoreTransaction = transaction
@inlineCallbacks
def _getChild(self, resource, segments):
if not segments:
returnValue(resource)
child, remaining = (yield resource.locateChild(self, segments))
returnValue((yield self._getChild(child, remaining)))
@inlineCallbacks
def locateResource(self, url):
url = url.strip("/")
segments = url.split("/")
resource = (yield self._getChild(self.rootResource, segments))
if resource:
self._rememberResource(resource, url)
returnValue(resource)
@inlineCallbacks
def locateChildResource(self, parent, childName):
if parent is None or childName is None:
returnValue(None)
parentURL = self.urlForResource(parent)
if not parentURL.endswith("/"):
parentURL += "/"
url = parentURL + quote(childName)
segment = childName
resource = (yield self._getChild(parent, [segment]))
if resource:
self._rememberResource(resource, url)
returnValue(resource)
def _rememberResource(self, resource, url):
self._resourcesByURL[url] = resource
self._urlsByResource[resource] = url
return resource
def _forgetResource(self, resource, url):
if url in self._resourcesByURL:
del self._resourcesByURL[url]
if resource in self._urlsByResource:
del self._urlsByResource[resource]
def urlForResource(self, resource):
url = self._urlsByResource.get(resource, None)
if url is None:
class NoURLForResourceError(RuntimeError):
pass
raise NoURLForResourceError(resource)
return url
def addResponseFilter(self, *args, **kwds):
pass
def memoryForPID(pid, residentOnly=True):
"""
Return the amount of memory in use for the given process. If residentOnly is True,
then RSS is returned; if False, then virtual memory is returned.
@param pid: process id
@type pid: C{int}
@param residentOnly: Whether only resident memory should be included
@type residentOnly: C{boolean}
@return: Memory used by process in bytes
@rtype: C{int}
"""
memoryInfo = psutil.Process(pid).get_memory_info()
return memoryInfo.rss if residentOnly else memoryInfo.vms
class MemoryLimitService(Service, object):
"""
A service which when paired with a DelayedStartupProcessMonitor will periodically
examine the memory usage of the monitored processes and stop any which exceed
a configured limit. Memcached processes are ignored.
"""
def __init__(self, processMonitor, intervalSeconds, limitBytes, residentOnly, reactor=None):
"""
@param processMonitor: the DelayedStartupProcessMonitor
@param intervalSeconds: how often to check
@type intervalSeconds: C{int}
@param limitBytes: any monitored process over this limit is stopped
@type limitBytes: C{int}
@param residentOnly: whether only resident memory should be included
@type residentOnly: C{boolean}
@param reactor: for testing
"""
self._processMonitor = processMonitor
self._seconds = intervalSeconds
self._bytes = limitBytes
self._residentOnly = residentOnly
self._delayedCall = None
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
# Unit tests can swap out _memoryForPID
self._memoryForPID = memoryForPID
def startService(self):
"""
Start scheduling the memory checks
"""
super(MemoryLimitService, self).startService()
self._delayedCall = self._reactor.callLater(self._seconds, self.checkMemory)
def stopService(self):
"""
Stop checking memory
"""
super(MemoryLimitService, self).stopService()
if self._delayedCall is not None and self._delayedCall.active():
self._delayedCall.cancel()
self._delayedCall = None
def checkMemory(self):
"""
Stop any processes monitored by our paired processMonitor whose resident
memory exceeds our configured limitBytes. Reschedule intervalSeconds in
the future.
"""
try:
for name in self._processMonitor.processes:
if name.startswith("memcached"):
continue
proto = self._processMonitor.protocols.get(name, None)
if proto is not None:
proc = proto.transport
pid = proc.pid
try:
memory = self._memoryForPID(pid, self._residentOnly)
except Exception, e:
log.error(
"Unable to determine memory usage of PID: {pid} ({err})",
pid=pid, err=e)
continue
if memory > self._bytes:
log.warn(
"Killing large process: {name} PID:{pid} {memtype}:{mem}",
name=name, pid=pid,
memtype=("Resident" if self._residentOnly else "Virtual"),
mem=memory)
self._processMonitor.stopProcess(name)
finally:
self._delayedCall = self._reactor.callLater(self._seconds, self.checkMemory)
def checkDirectories(config):
"""
Make sure that various key directories exist (and create if needed)
"""
#
# Verify that server root actually exists
#
checkDirectory(
config.ServerRoot,
"Server root",
# Require write access because one might not allow editing on /
access=os.W_OK,
wait=True # Wait in a loop until ServerRoot exists
)
#
# Verify that other root paths are OK
#
if config.DataRoot.startswith(config.ServerRoot + os.sep):
checkDirectory(
config.DataRoot,
"Data root",
access=os.W_OK,
create=(0750, config.UserName, config.GroupName),
)
if config.DocumentRoot.startswith(config.DataRoot + os.sep):
checkDirectory(
config.DocumentRoot,
"Document root",
# Don't require write access because one might not allow editing on /
access=os.R_OK,
create=(0750, config.UserName, config.GroupName),
)
if config.ConfigRoot.startswith(config.ServerRoot + os.sep):
checkDirectory(
config.ConfigRoot,
"Config root",
access=os.W_OK,
create=(0750, config.UserName, config.GroupName),
)
if config.SocketFiles.Enabled:
checkDirectory(
config.SocketRoot,
"Socket file root",
access=os.W_OK,
create=(
config.SocketFiles.Permissions,
config.SocketFiles.Owner,
config.SocketFiles.Group
)
)
checkDirectory(
config.LogRoot,
"Log root",
access=os.W_OK,
create=(0750, config.UserName, config.GroupName),
)
checkDirectory(
config.RunRoot,
"Run root",
access=os.W_OK,
create=(0770, config.UserName, config.GroupName),
)
class Stepper(object):
"""
Manages the sequential, deferred execution of "steps" which are objects
implementing these methods:
- stepWithResult(result)
@param result: the result returned from the previous step
@returns: Deferred
- stepWithFailure(failure)
@param failure: a Failure encapsulating the exception from the
previous step
@returns: Failure to continue down the errback chain, or a
Deferred returning a non-Failure to switch back to the
callback chain
"Step" objects are added in order by calling addStep(), and when start()
is called, the Stepper will call the stepWithResult() of the first step.
If stepWithResult() doesn't raise an Exception, the Stepper will call the
next step's stepWithResult(). If a stepWithResult() raises an Exception,
the Stepper will call the next step's stepWithFailure() -- if it's
implemented -- passing it a Failure object. If the stepWithFailure()
decides it can handle the Failure and proceed, it can return a non-Failure
which is an indicator to the Stepper to call the next step's
stepWithResult().
TODO: Create an IStep interface (?)
"""
def __init__(self):
self.steps = []
self.failure = None
self.result = None
self.running = False
def addStep(self, step):
"""
Adds a step object to the ordered list of steps
@param step: the object to add
@type step: an object implementing stepWithResult()
@return: the Stepper object itself so addStep() calls can be chained
"""
if self.running:
raise RuntimeError("Can't add step after start")
self.steps.append(step)
return self
def defaultStepWithResult(self, result):
return succeed(result)
def defaultStepWithFailure(self, failure):
if failure.type not in (
NotAllowedToUpgrade, ConfigurationError,
OpenSSL.SSL.Error
):
log.failure("Step failure", failure=failure)
return failure
ne):
"""
Begin executing the added steps in sequence. If a step object
does not implement a stepWithResult/stepWithFailure method, a
default implementation will be used.
@param result: an optional value to pass to the first step
@return: the Deferred that will fire when steps are done
"""
self.running = True
self.deferred = Deferred()
for step in self.steps:
if hasattr(step, "stepWithResult"):
callBack = step.stepWithResult
else:
callBack = self.defaultStepWithResult
if hasattr(step, "stepWithFailure"):
errBack = step.stepWithFailure
else:
errBack = self.defaultStepWithFailure
self.deferred.addCallbacks(callBack, errBack)
self.deferred.callback(result)
return self.deferred
def requestShutdown(programPath, reason):
"""
Log the shutdown reason and call the shutdown-requesting program.
In the case the service is spawned by launchd (or equivalent), if our
service decides it needs to shut itself down, because of a misconfiguration,
for example, we can't just exit. We may need to go through the system
machinery to unload our job, manage reverse proxies, update admin UI, etc.
Therefore you can configure the ServiceDisablingProgram plist key to point
to a program to run which will stop our service.
@param programPath: the full path to a program to call (with no args)
@type programPath: C{str}
@param reason: a shutdown reason to log
@type reason: C{str}
"""
log.error("Shutting down Calendar and Contacts server")
log.error(reason)
Popen(
args=[config.ServiceDisablingProgram],
stdout=PIPE,
stderr=PIPE,
).communicate()
def preFlightChecks(config):
"""
Perform checks prior to spawning any processes. Returns True if the checks
are ok, False if they don't and we have a ServiceDisablingProgram configured.
Otherwise exits.
"""
success, reason = verifyConfig(config)
if success:
success, reason = verifyServerRoot(config)
if success:
success, reason = verifyTLSCertificate(config)
if success:
success, reason = verifyAPNSCertificate(config)
if not success:
if config.ServiceDisablingProgram:
# repeatedly launch us, we want our job to get unloaded.
# If the config.ServiceDisablingProgram is assigned and exists
# we schedule it to run after startService finishes.
# Its job is to carry out the platform-specific tasks of disabling
# the service.
if os.path.exists(config.ServiceDisablingProgram):
addSystemEventTrigger(
"after", "startup",
requestShutdown, config.ServiceDisablingProgram, reason
)
return False
else:
print(reason)
sys.exit(1)
return True
def verifyConfig(config):
"""
At least one of EnableCalDAV or EnableCardDAV must be True
"""
if config.EnableCalDAV or config.EnableCardDAV:
return True, "A protocol is enabled"
return False, "Neither CalDAV nor CardDAV are enabled"
def verifyServerRoot(config):
"""
Ensure server root is not on a phantom volume
"""
result = diagnose.detectPhantomVolume(config.ServerRoot)
if result == diagnose.EXIT_CODE_SERVER_ROOT_MISSING:
return False, "ServerRoot is missing"
if result == diagnose.EXIT_CODE_PHANTOM_DATA_VOLUME:
return False, "ServerRoot is supposed to be on a non-boot-volume but it's not"
return True, "ServerRoot is ok"
def verifyTLSCertificate(config):
"""
If a TLS certificate is configured, make sure it exists, is non empty,
and that it's valid.
"""
if config.SSLCertificate:
if not os.path.exists(config.SSLCertificate):
message = (
"The configured TLS certificate ({cert}) is missing".format(
cert=config.SSLCertificate
)
)
postAlert("MissingCertificateAlert", ["path", config.SSLCertificate])
return False, message
else:
return True, "TLS disabled"
length = os.stat(config.SSLCertificate).st_size
if length == 0:
message = (
"The configured TLS certificate ({cert}) is empty".format(
cert=config.SSLCertificate
)
)
return False, message
try:
ChainingOpenSSLContextFactory(
config.SSLPrivateKey,
config.SSLCertificate,
certificateChainFile=config.SSLAuthorityChain,
passwdCallback=getSSLPassphrase,
sslmethod=getattr(OpenSSL.SSL, config.SSLMethod),
ciphers=config.SSLCiphers.strip()
)
except Exception as e:
message = (
"The configured TLS certificate ({cert}) cannot be used: {reason}".format(
cert=config.SSLCertificate,
reason=str(e)
)
)
return False, message
return True, "TLS enabled"
def verifyAPNSCertificate(config):
"""
If APNS certificates are configured, make sure they're valid.
"""
if config.Notifications.Services.APNS.Enabled:
for protocol, accountName in (
("CalDAV", "apns:com.apple.calendar"),
("CardDAV", "apns:com.apple.contact"),
):
protoConfig = config.Notifications.Services.APNS[protocol]
if not os.path.exists(protoConfig.CertificatePath):
message = (
"The {proto} APNS certificate ({cert}) is missing".format(
proto=protocol,
cert=protoConfig.CertificatePath
)
)
postAlert("PushNotificationCertificateAlert", [])
return False, message
if not protoConfig.Topic:
topic = getAPNTopicFromCertificate(protoConfig.CertificatePath)
protoConfig.Topic = topic
if not protoConfig.Topic:
postAlert("PushNotificationCertificateAlert", [])
message = "Cannot extract APN topic"
return False, message
if not protoConfig.Passphrase:
try:
passphrase = getPasswordFromKeychain(accountName)
protoConfig.Passphrase = passphrase
except KeychainAccessError:
pass
except KeychainPasswordNotFound:
# The password doesn't exist in the keychain.
postAlert("PushNotificationCertificateAlert", [])
message = "Cannot retrieve APN passphrase from keychain"
return False, message
try:
if protoConfig.Passphrase:
passwdCallback = lambda *ignored: protoConfig.Passphrase
else:
passwdCallback = None
ChainingOpenSSLContextFactory(
protoConfig.PrivateKeyPath,
protoConfig.CertificatePath,
certificateChainFile=protoConfig.AuthorityChainPath,
passwdCallback=passwdCallback,
sslmethod=getattr(OpenSSL.SSL, "TLSv1_METHOD"),
)
except Exception as e:
message = (
"The {proto} APNS certificate ({cert}) cannot be used: {reason}".format(
proto=protocol,
cert=protoConfig.CertificatePath,
reason=str(e)
)
)
postAlert("PushNotificationCertificateAlert", [])
return False, message
return True, "APNS enabled"
else:
return True, "APNS disabled"
def getSSLPassphrase(*ignored):
if not config.SSLPrivateKey:
return None
if config.SSLCertAdmin and os.path.isfile(config.SSLCertAdmin):
child = Popen(
args=[
"sudo", config.SSLCertAdmin,
"--get-private-key-passphrase", config.SSLPrivateKey,
],
stdout=PIPE, stderr=PIPE,
)
output, error = child.communicate()
if child.returncode:
log.error(
"Could not get passphrase for {key}: {error}",
key=config.SSLPrivateKey, error=error
)
else:
log.info(
"Obtained passphrase for {key}", key=config.SSLPrivateKey
)
return output.strip()
if (
config.SSLPassPhraseDialog and
os.path.isfile(config.SSLPassPhraseDialog)
):
sslPrivKey = open(config.SSLPrivateKey)
try:
keyType = None
for line in sslPrivKey.readlines():
if "-----BEGIN RSA PRIVATE KEY-----" in line:
keyType = "RSA"
break
elif "-----BEGIN DSA PRIVATE KEY-----" in line:
keyType = "DSA"
break
finally:
sslPrivKey.close()
if keyType is None:
log.error(
"Could not get private key type for {key}",
key=config.SSLPrivateKey
)
else:
child = Popen(
args=[
config.SSLPassPhraseDialog,
"{}:{}".format(config.ServerHostName, config.SSLPort),
keyType,
],
stdout=PIPE, stderr=PIPE,
)
output, error = child.communicate()
if child.returncode:
log.error(
"Could not get passphrase for {key}: {error}",
key=config.SSLPrivateKey, error=error
)
else:
return output.strip()
return None
def postAlert(alertType, args):
if (
config.AlertPostingProgram and
os.path.exists(config.AlertPostingProgram)
):
try:
commandLine = [config.AlertPostingProgram, alertType]
commandLine.extend(args)
Popen(
commandLine,
stdout=PIPE,
stderr=PIPE,
).communicate()
except Exception, e:
log.error(
"Could not post alert: {alertType} {args} ({error})",
alertType=alertType, args=args, error=e
)
| false | true |
f7f5db70dae472604b535c2a10bb9f6d17e68374 | 22,902 | py | Python | ibllib/io/spikeglx.py | Yiman00/ibllib | 7fe5dcba1edd40ea05c974fe8b8584001c6c0c15 | [
"MIT"
] | 1 | 2020-11-21T07:02:21.000Z | 2020-11-21T07:02:21.000Z | ibllib/io/spikeglx.py | Yiman00/ibllib | 7fe5dcba1edd40ea05c974fe8b8584001c6c0c15 | [
"MIT"
] | null | null | null | ibllib/io/spikeglx.py | Yiman00/ibllib | 7fe5dcba1edd40ea05c974fe8b8584001c6c0c15 | [
"MIT"
] | null | null | null | import json
import logging
from pathlib import Path
import re
import numpy as np
import mtscomp
from brainbox.core import Bunch
from ibllib.ephys import neuropixel as neuropixel
from ibllib.io import hashfile
SAMPLE_SIZE = 2 # int16
DEFAULT_BATCH_SIZE = 1e6
_logger = logging.getLogger('ibllib')
class Reader:
"""
Class for SpikeGLX reading purposes
Some format description was found looking at the Matlab SDK here
https://github.com/billkarsh/SpikeGLX/blob/master/MATLAB-SDK/DemoReadSGLXData.m
"""
def __init__(self, sglx_file):
self.file_bin = Path(sglx_file)
self.nbytes = self.file_bin.stat().st_size
file_meta_data = Path(sglx_file).with_suffix('.meta')
if not file_meta_data.exists():
self.file_meta_data = None
self.meta = None
self.channel_conversion_sample2v = 1
_logger.warning(str(sglx_file) + " : no metadata file found. Very limited support")
return
# normal case we continue reading and interpreting the metadata file
self.file_meta_data = file_meta_data
self.meta = read_meta_data(file_meta_data)
self.channel_conversion_sample2v = _conversion_sample2v_from_meta(self.meta)
# if we are not looking at a compressed file, use a memmap, otherwise instantiate mtscomp
if self.is_mtscomp:
self._raw = mtscomp.Reader()
self._raw.open(self.file_bin, self.file_bin.with_suffix('.ch'))
else:
if self.nc * self.ns * 2 != self.nbytes:
ftsec = self.file_bin.stat().st_size / 2 / self.nc / self.fs
_logger.warning(f"{sglx_file} : meta data and filesize do not checkout\n"
f"File size: expected {self.meta['fileSizeBytes']},"
f" actual {self.file_bin.stat().st_size}\n"
f"File duration: expected {self.meta['fileTimeSecs']},"
f" actual {ftsec}\n"
f"Will attempt to fudge the meta-data information.")
self.meta['fileTimeSecs'] = ftsec
self._raw = np.memmap(sglx_file, dtype='int16', mode='r', shape=(self.ns, self.nc))
def __getitem__(self, item):
if isinstance(item, int) or isinstance(item, slice):
return self.read(nsel=item, sync=False)
elif len(item) == 2:
return self.read(nsel=item[0], csel=item[1], sync=False)
@property
def shape(self):
return self.ns, self.nc
@property
def is_mtscomp(self):
return 'cbin' in self.file_bin.suffix
@property
def version(self):
""":return: """
if not self.meta:
return None
return _get_neuropixel_version_from_meta(self.meta)
@property
def type(self):
""":return: ap, lf or nidq. Useful to index dictionaries """
if not self.meta:
return 0
return _get_type_from_meta(self.meta)
@property
def fs(self):
""" :return: sampling frequency (Hz) """
if not self.meta:
return 1
return _get_fs_from_meta(self.meta)
@property
def nc(self):
""" :return: number of channels """
if not self.meta:
return
return _get_nchannels_from_meta(self.meta)
@property
def ns(self):
""" :return: number of samples """
if not self.meta:
return
return int(np.round(self.meta.get('fileTimeSecs') * self.fs))
def read(self, nsel=slice(0, 10000), csel=slice(None), sync=True):
"""
Read from slices or indexes
:param slice_n: slice or sample indices
:param slice_c: slice or channel indices
:return: float32 array
"""
darray = np.float32(self._raw[nsel, csel])
darray *= self.channel_conversion_sample2v[self.type][csel]
if sync:
return darray, self.read_sync(nsel)
else:
return darray
def read_samples(self, first_sample=0, last_sample=10000, channels=None):
"""
reads all channels from first_sample to last_sample, following numpy slicing convention
sglx.read_samples(first=0, last=100) would be equivalent to slicing the array D
D[:,0:100] where the last axis represent time and the first channels.
:param first_sample: first sample to be read, python slice-wise
:param last_sample: last sample to be read, python slice-wise
:param channels: slice or numpy array of indices
:return: numpy array of int16
"""
if channels is None:
channels = slice(None)
return self.read(slice(first_sample, last_sample), channels)
def read_sync_digital(self, _slice=slice(0, 10000)):
"""
Reads only the digital sync trace at specified samples using slicing syntax
>>> sync_samples = sr.read_sync_digital(slice(0,10000))
"""
if not self.meta:
_logger.warning('Sync trace not labeled in metadata. Assuming last trace')
return split_sync(self._raw[_slice, _get_sync_trace_indices_from_meta(self.meta)])
def read_sync_analog(self, _slice=slice(0, 10000)):
"""
Reads only the analog sync traces at specified samples using slicing syntax
>>> sync_samples = sr.read_sync_analog(slice(0,10000))
"""
if not self.meta:
return
csel = _get_analog_sync_trace_indices_from_meta(self.meta)
if not csel:
return
else:
return self.read(nsel=_slice, csel=csel, sync=False)
def read_sync(self, _slice=slice(0, 10000), threshold=1.2):
"""
Reads all sync trace. Convert analog to digital with selected threshold and append to array
:param _slice: samples slice
:param threshold: (V) threshold for front detection, defaults to 1.2 V
:return: int8 array
"""
digital = self.read_sync_digital(_slice)
analog = self.read_sync_analog(_slice)
if analog is None:
return digital
analog[np.where(analog < threshold)] = 0
analog[np.where(analog >= threshold)] = 1
return np.concatenate((digital, np.int8(analog)), axis=1)
def compress_file(self, keep_original=True, **kwargs):
"""
Compresses
:param keep_original: defaults True. If False, the original uncompressed file is deleted
and the current spikeglx.Reader object is modified in place
:param kwargs:
:return: pathlib.Path of the compressed *.cbin file
"""
file_tmp = self.file_bin.with_suffix('.cbin_tmp')
assert not self.is_mtscomp
mtscomp.compress(self.file_bin,
out=file_tmp,
outmeta=self.file_bin.with_suffix('.ch'),
sample_rate=self.fs,
n_channels=self.nc,
dtype=np.int16,
**kwargs)
file_out = file_tmp.with_suffix('.cbin')
file_tmp.rename(file_out)
if not keep_original:
self.file_bin.unlink()
self.file_bin = file_out
return file_out
def decompress_file(self, keep_original=True, **kwargs):
"""
Decompresses a mtscomp file
:param keep_original: defaults True. If False, the original compressed file (input)
is deleted and the current spikeglx.Reader object is modified in place
NB: This is not equivalent to overwrite (which replaces the output file)
:return: pathlib.Path of the decompressed *.bin file
"""
if 'out' not in kwargs:
kwargs['out'] = self.file_bin.with_suffix('.bin')
assert self.is_mtscomp
mtscomp.decompress(self.file_bin, self.file_bin.with_suffix('.ch'), **kwargs)
if not keep_original:
self.file_bin.unlink()
self.file_bin.with_suffix('.ch').unlink()
self.file_bin = kwargs['out']
return kwargs['out']
def verify_hash(self):
"""
Computes SHA-1 hash and returns True if it matches metadata, False otherwise
:return: boolean
"""
if self.is_mtscomp:
with open(self.file_bin.with_suffix('.ch')) as fid:
mtscomp_params = json.load(fid)
sm = mtscomp_params.get('sha1_compressed', None)
if sm is None:
_logger.warning("SHA1 hash is not implemented for compressed ephys. To check "
"the spikeglx acquisition hash, uncompress the file first !")
return True
sm = sm.upper()
else:
sm = self.meta.fileSHA1
sc = hashfile.sha1(self.file_bin).upper()
if sm == sc:
log_func = _logger.info
else:
log_func = _logger.error
log_func(f"SHA1 metadata: {sm}")
log_func(f"SHA1 computed: {sc}")
return sm == sc
def read(sglx_file, first_sample=0, last_sample=10000):
"""
Function to read from a spikeglx binary file without instantiating the class.
Gets the meta-data as well.
>>> ibllib.io.spikeglx.read('/path/to/file.bin', first_sample=0, last_sample=1000)
:param sglx_file: full path the the binary file to read
:param first_sample: first sample to be read, python slice-wise
:param last_sample: last sample to be read, python slice-wise
:return: Data array, sync trace, meta-data
"""
sglxr = Reader(sglx_file)
D, sync = sglxr.read_samples(first_sample=first_sample, last_sample=last_sample)
return D, sync, sglxr.meta
def read_meta_data(md_file):
"""
Reads the spkike glx metadata file and parse in a dictionary
Agnostic: does not make any assumption on the keys/content, it just parses key=values
:param md_file: last sample to be read, python slice-wise
:return: Data array, sync trace, meta-data
"""
with open(md_file) as fid:
md = fid.read()
d = {}
for a in md.splitlines():
k, v = a.split('=')
# if all numbers, try to interpret the string
if v and re.fullmatch('[0-9,.]*', v) and v.count('.') < 2:
v = [float(val) for val in v.split(',')]
# scalars should not be nested
if len(v) == 1:
v = v[0]
# tildes in keynames removed
d[k.replace('~', '')] = v
d['neuropixelVersion'] = _get_neuropixel_version_from_meta(d)
d['serial'] = _get_serial_number_from_meta(d)
return Bunch(d)
def _get_serial_number_from_meta(md):
"""
Get neuropixel serial number from the metadata dictionary
"""
# imProbeSN for 3A, imDatPrb_sn for 3B2, None for nidq 3B2
serial = md.get('imProbeSN') or md.get('imDatPrb_sn')
if serial:
return int(serial)
def _get_neuropixel_version_from_meta(md):
"""
Get neuropixel version tag (3A, 3B1, 3B2) from the metadata dictionary
"""
if 'typeEnabled' in md.keys():
return '3A'
elif 'typeImEnabled' in md.keys() and 'typeNiEnabled' in md.keys():
if 'imDatPrb_port' in md.keys() and 'imDatPrb_slot' in md.keys():
return '3B2'
else:
return '3B1'
def _get_sync_trace_indices_from_meta(md):
"""
Returns a list containing indices of the sync traces in the original array
"""
typ = _get_type_from_meta(md)
ntr = int(_get_nchannels_from_meta(md))
if typ == 'nidq':
nsync = int(md.get('snsMnMaXaDw')[-1])
elif typ in ['lf', 'ap']:
nsync = int(md.get('snsApLfSy')[2])
return list(range(ntr - nsync, ntr))
def _get_analog_sync_trace_indices_from_meta(md):
"""
Returns a list containing indices of the sync traces in the original array
"""
typ = _get_type_from_meta(md)
if typ != 'nidq':
return []
tr = md.get('snsMnMaXaDw')
nsa = int(tr[-2])
return list(range(int(sum(tr[0:2])), int(sum(tr[0:2])) + nsa))
def _get_nchannels_from_meta(md):
typ = _get_type_from_meta(md)
if typ == 'nidq':
return int(np.round(np.sum(md.get('snsMnMaXaDw'))))
elif typ in ['lf', 'ap']:
return int(np.round(sum(md.get('snsApLfSy'))))
def _get_fs_from_meta(md):
if md.get('typeThis') == 'imec':
return md.get('imSampRate')
else:
return md.get('niSampRate')
def _get_type_from_meta(md):
"""
Get neuropixel data type (ap, lf or nidq) from metadata
"""
snsApLfSy = md.get('snsApLfSy', [-1, -1, -1])
if snsApLfSy[0] == 0 and snsApLfSy[1] != 0:
return 'lf'
elif snsApLfSy[0] != 0 and snsApLfSy[1] == 0:
return 'ap'
elif snsApLfSy == [-1, -1, -1] and md.get('typeThis', None) == 'nidq':
return 'nidq'
def _map_channels_from_meta(meta_data):
"""
Interpret the meta data string to extract an array of channel positions along the shank
:param meta_data: dictionary output from spikeglx.read_meta_data
:return: dictionary of arrays 'shank', 'col', 'row', 'flag', one value per active site
"""
if 'snsShankMap' in meta_data.keys():
chmap = re.findall(r'([0-9]*:[0-9]*:[0-9]*:[0-9]*)', meta_data['snsShankMap'])
# for digital nidq types, the key exists but does not contain any information
if not chmap:
return {'shank': None, 'col': None, 'row': None, 'flag': None}
# shank#, col#, row#, drawflag
# (nb: drawflag is one should be drawn and considered spatial average)
chmap = np.array([np.float32(cm.split(':')) for cm in chmap])
return {k: chmap[:, v] for (k, v) in {'shank': 0, 'col': 1, 'row': 2, 'flag': 3}.items()}
def _conversion_sample2v_from_meta(meta_data):
"""
Interpret the meta data to extract an array of conversion factors for each channel
so the output data is in Volts
Conversion factor is: int2volt / channelGain
For Lf/Ap interpret the gain string from metadata
For Nidq, repmat the gains from the trace counts in `snsMnMaXaDw`
:param meta_data: dictionary output from spikeglx.read_meta_data
:return: numpy array with one gain value per channel
"""
def int2volts(md):
""" :return: Conversion scalar to Volts. Needs to be combined with channel gains """
if md.get('typeThis', None) == 'imec':
return md.get('imAiRangeMax') / 512
else:
return md.get('niAiRangeMax') / 32768
int2volt = int2volts(meta_data)
# interprets the gain value from the metadata header:
if 'imroTbl' in meta_data.keys(): # binary from the probes: ap or lf
sy_gain = np.ones(int(meta_data['snsApLfSy'][-1]), dtype=np.float32)
# imroTbl has 384 entries regardless of no of channels saved, so need to index by n_ch
n_chn = _get_nchannels_from_meta(meta_data) - 1
# the sync traces are not included in the gain values, so are included for broadcast ops
gain = re.findall(r'([0-9]* [0-9]* [0-9]* [0-9]* [0-9]*)', meta_data['imroTbl'])[:n_chn]
out = {'lf': np.hstack((np.array([1 / np.float32(g.split(' ')[-1]) for g in gain]) *
int2volt, sy_gain)),
'ap': np.hstack((np.array([1 / np.float32(g.split(' ')[-2]) for g in gain]) *
int2volt, sy_gain))}
elif 'niMNGain' in meta_data.keys(): # binary from nidq
gain = np.r_[
np.ones(int(meta_data['snsMnMaXaDw'][0],)) / meta_data['niMNGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][1],)) / meta_data['niMAGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][2], )) * int2volt, # no gain for analog sync
np.ones(int(np.sum(meta_data['snsMnMaXaDw'][3]),))] # no unit for digital sync
out = {'nidq': gain}
return out
def split_sync(sync_tr):
"""
The synchronization channels are stored as single bits, this will split the int16 original
channel into 16 single bits channels
:param sync_tr: numpy vector: samples of synchronisation trace
:return: int8 numpy array of 16 channels, 1 column per sync trace
"""
sync_tr = np.int16(np.copy(sync_tr))
out = np.unpackbits(sync_tr.view(np.uint8)).reshape(sync_tr.size, 16)
out = np.flip(np.roll(out, 8, axis=1), axis=1)
return np.int8(out)
def get_neuropixel_version_from_folder(session_path):
ephys_files = glob_ephys_files(session_path)
return get_neuropixel_version_from_files(ephys_files)
def get_neuropixel_version_from_files(ephys_files):
if any([ef.get('nidq') for ef in ephys_files]):
return '3B'
else:
return '3A'
def glob_ephys_files(session_path, suffix='.meta', ext='bin', recursive=True, bin_exists=True):
"""
From an arbitrary folder (usually session folder) gets the ap and lf files and labels
Associated to the subfolders where they are
the expected folder tree is:
├── 3A
│ ├── imec0
│ ├── sync_testing_g0_t0.imec0.ap.bin
│ │ └── sync_testing_g0_t0.imec0.lf.bin
│ └── imec1
│ ├── sync_testing_g0_t0.imec1.ap.bin
│ └── sync_testing_g0_t0.imec1.lf.bin
└── 3B
├── sync_testing_g0_t0.nidq.bin
├── imec0
│ ├── sync_testing_g0_t0.imec0.ap.bin
│ └── sync_testing_g0_t0.imec0.lf.bin
└── imec1
├── sync_testing_g0_t0.imec1.ap.bin
└── sync_testing_g0_t0.imec1.lf.bin
:param bin_exists:
:param suffix:
:param ext: file extension to look for, default 'bin' but could also be 'meta' or 'ch'
:param recursive:
:param session_path: folder, string or pathlib.Path
:param glob_pattern: pattern to look recursively for (defaults to '*.ap.*bin)
:returns: a list of dictionaries with keys 'ap': apfile, 'lf': lffile and 'label'
"""
def get_label(raw_ephys_apfile):
if raw_ephys_apfile.parts[-2] != 'raw_ephys_data':
return raw_ephys_apfile.parts[-2]
else:
return ''
recurse = '**/' if recursive else ''
ephys_files = []
for raw_ephys_file in Path(session_path).glob(f'{recurse}*.ap{suffix}'):
raw_ephys_apfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'), None)
if not raw_ephys_apfile and bin_exists:
continue
elif not raw_ephys_apfile and ext != 'bin':
continue
elif not bin_exists and ext == 'bin':
raw_ephys_apfile = raw_ephys_file.with_suffix('.bin')
# first get the ap file
ephys_files.extend([Bunch({'label': None, 'ap': None, 'lf': None, 'path': None})])
ephys_files[-1].ap = raw_ephys_apfile
# then get the corresponding lf file if it exists
lf_file = raw_ephys_apfile.parent / raw_ephys_apfile.name.replace('.ap.', '.lf.')
ephys_files[-1].lf = next(lf_file.parent.glob(lf_file.stem + f'.*{ext}'), None)
# finally, the label is the current directory except if it is bare in raw_ephys_data
ephys_files[-1].label = get_label(raw_ephys_apfile)
ephys_files[-1].path = raw_ephys_apfile.parent
# for 3b probes, need also to get the nidq dataset type
for raw_ephys_file in Path(session_path).rglob(f'{recurse}*.nidq{suffix}'):
raw_ephys_nidqfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'),
None)
if not bin_exists and ext == 'bin':
raw_ephys_nidqfile = raw_ephys_file.with_suffix('.bin')
ephys_files.extend([Bunch({'label': get_label(raw_ephys_file),
'nidq': raw_ephys_nidqfile,
'path': raw_ephys_file.parent})])
return ephys_files
def _mock_spikeglx_file(mock_bin_file, meta_file, ns, nc, sync_depth,
random=False, int2volts=0.6 / 32768, corrupt=False):
"""
For testing purposes, create a binary file with sync pulses to test reading and extraction
"""
meta_file = Path(meta_file)
mock_path_bin = Path(mock_bin_file)
mock_path_meta = mock_path_bin.with_suffix('.meta')
md = read_meta_data(meta_file)
assert meta_file != mock_path_meta
fs = _get_fs_from_meta(md)
fid_source = open(meta_file)
fid_target = open(mock_path_meta, 'w+')
line = fid_source.readline()
while line:
line = fid_source.readline()
if line.startswith('fileSizeBytes'):
line = f'fileSizeBytes={ns * nc * 2}\n'
if line.startswith('fileTimeSecs'):
if corrupt:
line = f'fileTimeSecs={ns / fs + 1.8324}\n'
else:
line = f'fileTimeSecs={ns / fs}\n'
fid_target.write(line)
fid_source.close()
fid_target.close()
if random:
D = np.random.randint(-32767, 32767, size=(ns, nc), dtype=np.int16)
else: # each channel as an int of chn + 1
D = np.tile(np.int16((np.arange(nc) + 1) / int2volts), (ns, 1))
D[0:16, :] = 0
# the last channel is the sync that we fill with
sync = np.int16(2 ** np.float32(np.arange(-1, sync_depth)))
D[:, -1] = 0
D[:sync.size, -1] = sync
with open(mock_path_bin, 'w+') as fid:
D.tofile(fid)
return {'bin_file': mock_path_bin, 'ns': ns, 'nc': nc, 'sync_depth': sync_depth, 'D': D}
def get_hardware_config(config_file):
"""
Reads the neuropixel_wirings.json file containing sync mapping and parameters
:param config_file: folder or json file
:return: dictionary or None
"""
config_file = Path(config_file)
if config_file.is_dir():
config_file = list(config_file.glob('*.wiring.json'))
if config_file:
config_file = config_file[0]
if not config_file or not config_file.exists():
return
with open(config_file) as fid:
par = json.loads(fid.read())
return par
def _sync_map_from_hardware_config(hardware_config):
"""
:param hardware_config: dictonary from json read of neuropixel_wirings.json
:return: dictionary where key names refer to object and values to sync channel index
"""
pin_out = neuropixel.SYNC_PIN_OUT[hardware_config['SYSTEM']]
sync_map = {hardware_config['SYNC_WIRING_DIGITAL'][pin]: pin_out[pin]
for pin in hardware_config['SYNC_WIRING_DIGITAL']
if pin_out[pin] is not None}
analog = hardware_config.get('SYNC_WIRING_ANALOG')
if analog:
sync_map.update({analog[pin]: int(pin[2:]) + 16 for pin in analog})
return sync_map
def get_sync_map(folder_ephys):
hc = get_hardware_config(folder_ephys)
if not hc:
_logger.warning(f"No channel map for {str(folder_ephys)}")
return None
else:
return _sync_map_from_hardware_config(hc)
| 39.015332 | 99 | 0.617937 | import json
import logging
from pathlib import Path
import re
import numpy as np
import mtscomp
from brainbox.core import Bunch
from ibllib.ephys import neuropixel as neuropixel
from ibllib.io import hashfile
SAMPLE_SIZE = 2
DEFAULT_BATCH_SIZE = 1e6
_logger = logging.getLogger('ibllib')
class Reader:
def __init__(self, sglx_file):
self.file_bin = Path(sglx_file)
self.nbytes = self.file_bin.stat().st_size
file_meta_data = Path(sglx_file).with_suffix('.meta')
if not file_meta_data.exists():
self.file_meta_data = None
self.meta = None
self.channel_conversion_sample2v = 1
_logger.warning(str(sglx_file) + " : no metadata file found. Very limited support")
return
self.file_meta_data = file_meta_data
self.meta = read_meta_data(file_meta_data)
self.channel_conversion_sample2v = _conversion_sample2v_from_meta(self.meta)
if self.is_mtscomp:
self._raw = mtscomp.Reader()
self._raw.open(self.file_bin, self.file_bin.with_suffix('.ch'))
else:
if self.nc * self.ns * 2 != self.nbytes:
ftsec = self.file_bin.stat().st_size / 2 / self.nc / self.fs
_logger.warning(f"{sglx_file} : meta data and filesize do not checkout\n"
f"File size: expected {self.meta['fileSizeBytes']},"
f" actual {self.file_bin.stat().st_size}\n"
f"File duration: expected {self.meta['fileTimeSecs']},"
f" actual {ftsec}\n"
f"Will attempt to fudge the meta-data information.")
self.meta['fileTimeSecs'] = ftsec
self._raw = np.memmap(sglx_file, dtype='int16', mode='r', shape=(self.ns, self.nc))
def __getitem__(self, item):
if isinstance(item, int) or isinstance(item, slice):
return self.read(nsel=item, sync=False)
elif len(item) == 2:
return self.read(nsel=item[0], csel=item[1], sync=False)
@property
def shape(self):
return self.ns, self.nc
@property
def is_mtscomp(self):
return 'cbin' in self.file_bin.suffix
@property
def version(self):
if not self.meta:
return None
return _get_neuropixel_version_from_meta(self.meta)
@property
def type(self):
if not self.meta:
return 0
return _get_type_from_meta(self.meta)
@property
def fs(self):
if not self.meta:
return 1
return _get_fs_from_meta(self.meta)
@property
def nc(self):
if not self.meta:
return
return _get_nchannels_from_meta(self.meta)
@property
def ns(self):
if not self.meta:
return
return int(np.round(self.meta.get('fileTimeSecs') * self.fs))
def read(self, nsel=slice(0, 10000), csel=slice(None), sync=True):
darray = np.float32(self._raw[nsel, csel])
darray *= self.channel_conversion_sample2v[self.type][csel]
if sync:
return darray, self.read_sync(nsel)
else:
return darray
def read_samples(self, first_sample=0, last_sample=10000, channels=None):
if channels is None:
channels = slice(None)
return self.read(slice(first_sample, last_sample), channels)
def read_sync_digital(self, _slice=slice(0, 10000)):
if not self.meta:
_logger.warning('Sync trace not labeled in metadata. Assuming last trace')
return split_sync(self._raw[_slice, _get_sync_trace_indices_from_meta(self.meta)])
def read_sync_analog(self, _slice=slice(0, 10000)):
if not self.meta:
return
csel = _get_analog_sync_trace_indices_from_meta(self.meta)
if not csel:
return
else:
return self.read(nsel=_slice, csel=csel, sync=False)
def read_sync(self, _slice=slice(0, 10000), threshold=1.2):
digital = self.read_sync_digital(_slice)
analog = self.read_sync_analog(_slice)
if analog is None:
return digital
analog[np.where(analog < threshold)] = 0
analog[np.where(analog >= threshold)] = 1
return np.concatenate((digital, np.int8(analog)), axis=1)
def compress_file(self, keep_original=True, **kwargs):
file_tmp = self.file_bin.with_suffix('.cbin_tmp')
assert not self.is_mtscomp
mtscomp.compress(self.file_bin,
out=file_tmp,
outmeta=self.file_bin.with_suffix('.ch'),
sample_rate=self.fs,
n_channels=self.nc,
dtype=np.int16,
**kwargs)
file_out = file_tmp.with_suffix('.cbin')
file_tmp.rename(file_out)
if not keep_original:
self.file_bin.unlink()
self.file_bin = file_out
return file_out
def decompress_file(self, keep_original=True, **kwargs):
if 'out' not in kwargs:
kwargs['out'] = self.file_bin.with_suffix('.bin')
assert self.is_mtscomp
mtscomp.decompress(self.file_bin, self.file_bin.with_suffix('.ch'), **kwargs)
if not keep_original:
self.file_bin.unlink()
self.file_bin.with_suffix('.ch').unlink()
self.file_bin = kwargs['out']
return kwargs['out']
def verify_hash(self):
if self.is_mtscomp:
with open(self.file_bin.with_suffix('.ch')) as fid:
mtscomp_params = json.load(fid)
sm = mtscomp_params.get('sha1_compressed', None)
if sm is None:
_logger.warning("SHA1 hash is not implemented for compressed ephys. To check "
"the spikeglx acquisition hash, uncompress the file first !")
return True
sm = sm.upper()
else:
sm = self.meta.fileSHA1
sc = hashfile.sha1(self.file_bin).upper()
if sm == sc:
log_func = _logger.info
else:
log_func = _logger.error
log_func(f"SHA1 metadata: {sm}")
log_func(f"SHA1 computed: {sc}")
return sm == sc
def read(sglx_file, first_sample=0, last_sample=10000):
sglxr = Reader(sglx_file)
D, sync = sglxr.read_samples(first_sample=first_sample, last_sample=last_sample)
return D, sync, sglxr.meta
def read_meta_data(md_file):
with open(md_file) as fid:
md = fid.read()
d = {}
for a in md.splitlines():
k, v = a.split('=')
if v and re.fullmatch('[0-9,.]*', v) and v.count('.') < 2:
v = [float(val) for val in v.split(',')]
if len(v) == 1:
v = v[0]
d[k.replace('~', '')] = v
d['neuropixelVersion'] = _get_neuropixel_version_from_meta(d)
d['serial'] = _get_serial_number_from_meta(d)
return Bunch(d)
def _get_serial_number_from_meta(md):
serial = md.get('imProbeSN') or md.get('imDatPrb_sn')
if serial:
return int(serial)
def _get_neuropixel_version_from_meta(md):
if 'typeEnabled' in md.keys():
return '3A'
elif 'typeImEnabled' in md.keys() and 'typeNiEnabled' in md.keys():
if 'imDatPrb_port' in md.keys() and 'imDatPrb_slot' in md.keys():
return '3B2'
else:
return '3B1'
def _get_sync_trace_indices_from_meta(md):
typ = _get_type_from_meta(md)
ntr = int(_get_nchannels_from_meta(md))
if typ == 'nidq':
nsync = int(md.get('snsMnMaXaDw')[-1])
elif typ in ['lf', 'ap']:
nsync = int(md.get('snsApLfSy')[2])
return list(range(ntr - nsync, ntr))
def _get_analog_sync_trace_indices_from_meta(md):
typ = _get_type_from_meta(md)
if typ != 'nidq':
return []
tr = md.get('snsMnMaXaDw')
nsa = int(tr[-2])
return list(range(int(sum(tr[0:2])), int(sum(tr[0:2])) + nsa))
def _get_nchannels_from_meta(md):
typ = _get_type_from_meta(md)
if typ == 'nidq':
return int(np.round(np.sum(md.get('snsMnMaXaDw'))))
elif typ in ['lf', 'ap']:
return int(np.round(sum(md.get('snsApLfSy'))))
def _get_fs_from_meta(md):
if md.get('typeThis') == 'imec':
return md.get('imSampRate')
else:
return md.get('niSampRate')
def _get_type_from_meta(md):
snsApLfSy = md.get('snsApLfSy', [-1, -1, -1])
if snsApLfSy[0] == 0 and snsApLfSy[1] != 0:
return 'lf'
elif snsApLfSy[0] != 0 and snsApLfSy[1] == 0:
return 'ap'
elif snsApLfSy == [-1, -1, -1] and md.get('typeThis', None) == 'nidq':
return 'nidq'
def _map_channels_from_meta(meta_data):
if 'snsShankMap' in meta_data.keys():
chmap = re.findall(r'([0-9]*:[0-9]*:[0-9]*:[0-9]*)', meta_data['snsShankMap'])
if not chmap:
return {'shank': None, 'col': None, 'row': None, 'flag': None}
plit(':')) for cm in chmap])
return {k: chmap[:, v] for (k, v) in {'shank': 0, 'col': 1, 'row': 2, 'flag': 3}.items()}
def _conversion_sample2v_from_meta(meta_data):
def int2volts(md):
if md.get('typeThis', None) == 'imec':
return md.get('imAiRangeMax') / 512
else:
return md.get('niAiRangeMax') / 32768
int2volt = int2volts(meta_data)
if 'imroTbl' in meta_data.keys():
sy_gain = np.ones(int(meta_data['snsApLfSy'][-1]), dtype=np.float32)
n_chn = _get_nchannels_from_meta(meta_data) - 1
gain = re.findall(r'([0-9]* [0-9]* [0-9]* [0-9]* [0-9]*)', meta_data['imroTbl'])[:n_chn]
out = {'lf': np.hstack((np.array([1 / np.float32(g.split(' ')[-1]) for g in gain]) *
int2volt, sy_gain)),
'ap': np.hstack((np.array([1 / np.float32(g.split(' ')[-2]) for g in gain]) *
int2volt, sy_gain))}
elif 'niMNGain' in meta_data.keys():
gain = np.r_[
np.ones(int(meta_data['snsMnMaXaDw'][0],)) / meta_data['niMNGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][1],)) / meta_data['niMAGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][2], )) * int2volt,
np.ones(int(np.sum(meta_data['snsMnMaXaDw'][3]),))]
out = {'nidq': gain}
return out
def split_sync(sync_tr):
sync_tr = np.int16(np.copy(sync_tr))
out = np.unpackbits(sync_tr.view(np.uint8)).reshape(sync_tr.size, 16)
out = np.flip(np.roll(out, 8, axis=1), axis=1)
return np.int8(out)
def get_neuropixel_version_from_folder(session_path):
ephys_files = glob_ephys_files(session_path)
return get_neuropixel_version_from_files(ephys_files)
def get_neuropixel_version_from_files(ephys_files):
if any([ef.get('nidq') for ef in ephys_files]):
return '3B'
else:
return '3A'
def glob_ephys_files(session_path, suffix='.meta', ext='bin', recursive=True, bin_exists=True):
def get_label(raw_ephys_apfile):
if raw_ephys_apfile.parts[-2] != 'raw_ephys_data':
return raw_ephys_apfile.parts[-2]
else:
return ''
recurse = '**/' if recursive else ''
ephys_files = []
for raw_ephys_file in Path(session_path).glob(f'{recurse}*.ap{suffix}'):
raw_ephys_apfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'), None)
if not raw_ephys_apfile and bin_exists:
continue
elif not raw_ephys_apfile and ext != 'bin':
continue
elif not bin_exists and ext == 'bin':
raw_ephys_apfile = raw_ephys_file.with_suffix('.bin')
ephys_files.extend([Bunch({'label': None, 'ap': None, 'lf': None, 'path': None})])
ephys_files[-1].ap = raw_ephys_apfile
lf_file = raw_ephys_apfile.parent / raw_ephys_apfile.name.replace('.ap.', '.lf.')
ephys_files[-1].lf = next(lf_file.parent.glob(lf_file.stem + f'.*{ext}'), None)
ephys_files[-1].label = get_label(raw_ephys_apfile)
ephys_files[-1].path = raw_ephys_apfile.parent
for raw_ephys_file in Path(session_path).rglob(f'{recurse}*.nidq{suffix}'):
raw_ephys_nidqfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'),
None)
if not bin_exists and ext == 'bin':
raw_ephys_nidqfile = raw_ephys_file.with_suffix('.bin')
ephys_files.extend([Bunch({'label': get_label(raw_ephys_file),
'nidq': raw_ephys_nidqfile,
'path': raw_ephys_file.parent})])
return ephys_files
def _mock_spikeglx_file(mock_bin_file, meta_file, ns, nc, sync_depth,
random=False, int2volts=0.6 / 32768, corrupt=False):
meta_file = Path(meta_file)
mock_path_bin = Path(mock_bin_file)
mock_path_meta = mock_path_bin.with_suffix('.meta')
md = read_meta_data(meta_file)
assert meta_file != mock_path_meta
fs = _get_fs_from_meta(md)
fid_source = open(meta_file)
fid_target = open(mock_path_meta, 'w+')
line = fid_source.readline()
while line:
line = fid_source.readline()
if line.startswith('fileSizeBytes'):
line = f'fileSizeBytes={ns * nc * 2}\n'
if line.startswith('fileTimeSecs'):
if corrupt:
line = f'fileTimeSecs={ns / fs + 1.8324}\n'
else:
line = f'fileTimeSecs={ns / fs}\n'
fid_target.write(line)
fid_source.close()
fid_target.close()
if random:
D = np.random.randint(-32767, 32767, size=(ns, nc), dtype=np.int16)
else:
D = np.tile(np.int16((np.arange(nc) + 1) / int2volts), (ns, 1))
D[0:16, :] = 0
sync = np.int16(2 ** np.float32(np.arange(-1, sync_depth)))
D[:, -1] = 0
D[:sync.size, -1] = sync
with open(mock_path_bin, 'w+') as fid:
D.tofile(fid)
return {'bin_file': mock_path_bin, 'ns': ns, 'nc': nc, 'sync_depth': sync_depth, 'D': D}
def get_hardware_config(config_file):
config_file = Path(config_file)
if config_file.is_dir():
config_file = list(config_file.glob('*.wiring.json'))
if config_file:
config_file = config_file[0]
if not config_file or not config_file.exists():
return
with open(config_file) as fid:
par = json.loads(fid.read())
return par
def _sync_map_from_hardware_config(hardware_config):
pin_out = neuropixel.SYNC_PIN_OUT[hardware_config['SYSTEM']]
sync_map = {hardware_config['SYNC_WIRING_DIGITAL'][pin]: pin_out[pin]
for pin in hardware_config['SYNC_WIRING_DIGITAL']
if pin_out[pin] is not None}
analog = hardware_config.get('SYNC_WIRING_ANALOG')
if analog:
sync_map.update({analog[pin]: int(pin[2:]) + 16 for pin in analog})
return sync_map
def get_sync_map(folder_ephys):
hc = get_hardware_config(folder_ephys)
if not hc:
_logger.warning(f"No channel map for {str(folder_ephys)}")
return None
else:
return _sync_map_from_hardware_config(hc)
| true | true |
f7f5dc1e65c4a16ce808b0adc7271fc0f364439f | 1,291 | py | Python | custom_components/steam_wishlist/util.py | hudsonbrendon/steam-wishlist | 01e77b60be4f938d6b60e91e4eecf4abf608565b | [
"MIT"
] | null | null | null | custom_components/steam_wishlist/util.py | hudsonbrendon/steam-wishlist | 01e77b60be4f938d6b60e91e4eecf4abf608565b | [
"MIT"
] | null | null | null | custom_components/steam_wishlist/util.py | hudsonbrendon/steam-wishlist | 01e77b60be4f938d6b60e91e4eecf4abf608565b | [
"MIT"
] | null | null | null | import logging
from typing import Any, Dict, Optional
from .types import SteamGame
_LOGGER = logging.getLogger(__name__)
def get_steam_game(game_id: int, game: Dict[str, Any]) -> SteamGame:
"""Get a SteamGame from a game dict."""
pricing: Optional[Dict[str, Any]] = None
try:
pricing: Dict[str, Any] = game["subs"][0]
discount_pct = pricing["discount_pct"]
except IndexError:
# This typically means this game is not yet released so pricing is not known.
pricing = None
discount_pct = 0
normal_price: Optional[float] = None
if pricing:
normal_price = round(pricing["price"] / (100 - discount_pct), 2)
sale_price: Optional[float] = None
if pricing and discount_pct:
# Price is an integer so $6.00 is 600.
sale_price = round(pricing["price"] * 0.01, 2)
game: SteamGame = {
"box_art_url": game["capsule"],
"normal_price": normal_price,
"percent_off": discount_pct,
"review_desc": game.get("review_desc", "No user reviews"),
"reviews_percent": game.get("reviews_percent", 0),
"reviews_total": game.get("reviews_total", "0"),
"sale_price": sale_price,
"steam_id": game_id,
"title": game["name"],
}
return game
| 31.487805 | 85 | 0.627421 | import logging
from typing import Any, Dict, Optional
from .types import SteamGame
_LOGGER = logging.getLogger(__name__)
def get_steam_game(game_id: int, game: Dict[str, Any]) -> SteamGame:
pricing: Optional[Dict[str, Any]] = None
try:
pricing: Dict[str, Any] = game["subs"][0]
discount_pct = pricing["discount_pct"]
except IndexError:
pricing = None
discount_pct = 0
normal_price: Optional[float] = None
if pricing:
normal_price = round(pricing["price"] / (100 - discount_pct), 2)
sale_price: Optional[float] = None
if pricing and discount_pct:
sale_price = round(pricing["price"] * 0.01, 2)
game: SteamGame = {
"box_art_url": game["capsule"],
"normal_price": normal_price,
"percent_off": discount_pct,
"review_desc": game.get("review_desc", "No user reviews"),
"reviews_percent": game.get("reviews_percent", 0),
"reviews_total": game.get("reviews_total", "0"),
"sale_price": sale_price,
"steam_id": game_id,
"title": game["name"],
}
return game
| true | true |
f7f5de097c8fde0b3907e40b97e7b90b59e09795 | 424 | py | Python | packages/python/plotly/plotly/validators/scatterternary/textfont/_sizesrc.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/scatterternary/textfont/_sizesrc.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/scatterternary/textfont/_sizesrc.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="scatterternary.textfont", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| 30.285714 | 84 | 0.658019 | import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="scatterternary.textfont", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| true | true |
f7f5de50264e89fe2d527f1eec0a5bc7d9751835 | 446 | py | Python | tomriddle/cli.py | MatrixManAtYrService/tomriddle | d7d48850883edeb1238a166b3bb3cfd6679f934e | [
"MIT"
] | null | null | null | tomriddle/cli.py | MatrixManAtYrService/tomriddle | d7d48850883edeb1238a166b3bb3cfd6679f934e | [
"MIT"
] | null | null | null | tomriddle/cli.py | MatrixManAtYrService/tomriddle | d7d48850883edeb1238a166b3bb3cfd6679f934e | [
"MIT"
] | null | null | null | import argparse
import sys
from .tomriddle import main
def tomriddle(args=None, dry=False):
parser = argparse.ArgumentParser()
parser.add_argument("answer")
parser.add_argument("-s", "--substr", nargs="+", default=[])
if args:
args = parser.parse_args(args)
else:
args = parser.parse_args()
if not dry:
main(args)
else:
return args
if __name__ == "__main__":
sys.exit(main())
| 18.583333 | 64 | 0.618834 | import argparse
import sys
from .tomriddle import main
def tomriddle(args=None, dry=False):
parser = argparse.ArgumentParser()
parser.add_argument("answer")
parser.add_argument("-s", "--substr", nargs="+", default=[])
if args:
args = parser.parse_args(args)
else:
args = parser.parse_args()
if not dry:
main(args)
else:
return args
if __name__ == "__main__":
sys.exit(main())
| true | true |
f7f5e09452efb81b838ba55ac166be2fd2a7229f | 4,262 | py | Python | lib/urlwatch/cli.py | k-nut/urlwatch | 60b5c88e976e3f45f8fa206e58b52357007e8ef2 | [
"BSD-3-Clause"
] | null | null | null | lib/urlwatch/cli.py | k-nut/urlwatch | 60b5c88e976e3f45f8fa206e58b52357007e8ef2 | [
"BSD-3-Clause"
] | null | null | null | lib/urlwatch/cli.py | k-nut/urlwatch | 60b5c88e976e3f45f8fa206e58b52357007e8ef2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of urlwatch (https://thp.io/2008/urlwatch/).
# Copyright (c) 2008-2020 Thomas Perl <m@thp.io>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# File and folder paths
import logging
import os.path
import signal
import socket
import sys
from appdirs import AppDirs
pkgname = 'urlwatch'
urlwatch_dir = os.path.expanduser(os.path.join('~', '.' + pkgname))
urlwatch_cache_dir = AppDirs(pkgname).user_cache_dir
if not os.path.exists(urlwatch_dir):
urlwatch_dir = AppDirs(pkgname).user_config_dir
# Check if we are installed in the system already
(prefix, bindir) = os.path.split(os.path.dirname(os.path.abspath(sys.argv[0])))
if bindir != 'bin':
sys.path.insert(0, os.path.join(prefix, bindir, 'lib'))
from urlwatch.command import UrlwatchCommand
from urlwatch.config import CommandConfig
from urlwatch.main import Urlwatch
from urlwatch.storage import YamlConfigStorage, CacheMiniDBStorage, UrlsYaml
# One minute (=60 seconds) timeout for each request to avoid hanging
socket.setdefaulttimeout(60)
# Ignore SIGPIPE for stdout (see https://github.com/thp/urlwatch/issues/77)
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
# Windows does not have signal.SIGPIPE
...
logger = logging.getLogger(pkgname)
CONFIG_FILE = 'urlwatch.yaml'
URLS_FILE = 'urls.yaml'
CACHE_FILE = 'cache.db'
HOOKS_FILE = 'hooks.py'
def setup_logger(verbose):
if verbose:
root_logger = logging.getLogger('')
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(asctime)s %(module)s %(levelname)s: %(message)s'))
root_logger.addHandler(console)
root_logger.setLevel(logging.DEBUG)
root_logger.info('turning on verbose logging mode')
def main():
config_file = os.path.join(urlwatch_dir, CONFIG_FILE)
urls_file = os.path.join(urlwatch_dir, URLS_FILE)
hooks_file = os.path.join(urlwatch_dir, HOOKS_FILE)
new_cache_file = os.path.join(urlwatch_cache_dir, CACHE_FILE)
old_cache_file = os.path.join(urlwatch_dir, CACHE_FILE)
cache_file = new_cache_file
if os.path.exists(old_cache_file) and not os.path.exists(new_cache_file):
cache_file = old_cache_file
command_config = CommandConfig(pkgname, urlwatch_dir, bindir, prefix,
config_file, urls_file, hooks_file, cache_file, False)
setup_logger(command_config.verbose)
# setup storage API
config_storage = YamlConfigStorage(command_config.config)
cache_storage = CacheMiniDBStorage(command_config.cache)
urls_storage = UrlsYaml(command_config.urls)
# setup urlwatcher
urlwatch = Urlwatch(command_config, config_storage, cache_storage, urls_storage)
urlwatch_command = UrlwatchCommand(urlwatch)
# run urlwatcher
urlwatch_command.run()
if __name__ == '__main__':
main()
| 36.741379 | 100 | 0.748944 |
import logging
import os.path
import signal
import socket
import sys
from appdirs import AppDirs
pkgname = 'urlwatch'
urlwatch_dir = os.path.expanduser(os.path.join('~', '.' + pkgname))
urlwatch_cache_dir = AppDirs(pkgname).user_cache_dir
if not os.path.exists(urlwatch_dir):
urlwatch_dir = AppDirs(pkgname).user_config_dir
(prefix, bindir) = os.path.split(os.path.dirname(os.path.abspath(sys.argv[0])))
if bindir != 'bin':
sys.path.insert(0, os.path.join(prefix, bindir, 'lib'))
from urlwatch.command import UrlwatchCommand
from urlwatch.config import CommandConfig
from urlwatch.main import Urlwatch
from urlwatch.storage import YamlConfigStorage, CacheMiniDBStorage, UrlsYaml
socket.setdefaulttimeout(60)
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
...
logger = logging.getLogger(pkgname)
CONFIG_FILE = 'urlwatch.yaml'
URLS_FILE = 'urls.yaml'
CACHE_FILE = 'cache.db'
HOOKS_FILE = 'hooks.py'
def setup_logger(verbose):
if verbose:
root_logger = logging.getLogger('')
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(asctime)s %(module)s %(levelname)s: %(message)s'))
root_logger.addHandler(console)
root_logger.setLevel(logging.DEBUG)
root_logger.info('turning on verbose logging mode')
def main():
config_file = os.path.join(urlwatch_dir, CONFIG_FILE)
urls_file = os.path.join(urlwatch_dir, URLS_FILE)
hooks_file = os.path.join(urlwatch_dir, HOOKS_FILE)
new_cache_file = os.path.join(urlwatch_cache_dir, CACHE_FILE)
old_cache_file = os.path.join(urlwatch_dir, CACHE_FILE)
cache_file = new_cache_file
if os.path.exists(old_cache_file) and not os.path.exists(new_cache_file):
cache_file = old_cache_file
command_config = CommandConfig(pkgname, urlwatch_dir, bindir, prefix,
config_file, urls_file, hooks_file, cache_file, False)
setup_logger(command_config.verbose)
config_storage = YamlConfigStorage(command_config.config)
cache_storage = CacheMiniDBStorage(command_config.cache)
urls_storage = UrlsYaml(command_config.urls)
urlwatch = Urlwatch(command_config, config_storage, cache_storage, urls_storage)
urlwatch_command = UrlwatchCommand(urlwatch)
urlwatch_command.run()
if __name__ == '__main__':
main()
| true | true |
f7f5e1e3e8ab9a2942883d0d9335983047369038 | 953 | py | Python | src/deprecated/evaluate.py | whong92/3D_DL | 3c15bca3cc87c3197d38a785f6d1146911a82921 | [
"MIT"
] | 35 | 2019-03-04T00:06:20.000Z | 2022-02-04T22:34:17.000Z | src/deprecated/evaluate.py | 921kiyo/C530 | e64402575661a1e534cb8effe122d8fe8aed156e | [
"MIT"
] | 3 | 2021-03-12T13:12:26.000Z | 2022-01-17T08:47:49.000Z | src/deprecated/evaluate.py | 921kiyo/C530 | e64402575661a1e534cb8effe122d8fe8aed156e | [
"MIT"
] | 12 | 2019-09-26T08:35:18.000Z | 2021-12-09T05:39:59.000Z | from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
from time import *
# load model
from keras.models import load_model
# For Function to feed images to model and augment images at the same time
from keras.preprocessing.image import ImageDataGenerator
# For Tensorboard
from keras.callbacks import TensorBoard
test_dir = '/vol/project/2017/530/g1753002/keras_test_data/test'
input_dim = 150
model = load_model('my_model.h5')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(input_dim, input_dim),
batch_size=16,
class_mode='categorical')
print("calculating ...")
score = model.evaluate_generator(test_generator)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| 25.756757 | 74 | 0.776495 | from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
from time import *
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard
test_dir = '/vol/project/2017/530/g1753002/keras_test_data/test'
input_dim = 150
model = load_model('my_model.h5')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(input_dim, input_dim),
batch_size=16,
class_mode='categorical')
print("calculating ...")
score = model.evaluate_generator(test_generator)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| true | true |
f7f5e222eb40a661b2bb5c47f5adf2163618c601 | 639 | py | Python | setup.py | nesen2019/everyday | 8439d99b12a938cb8325cdaceef8861af062abc5 | [
"MIT"
] | null | null | null | setup.py | nesen2019/everyday | 8439d99b12a938cb8325cdaceef8861af062abc5 | [
"MIT"
] | null | null | null | setup.py | nesen2019/everyday | 8439d99b12a938cb8325cdaceef8861af062abc5 | [
"MIT"
] | null | null | null | import os
import re
from setuptools import setup, find_packages, find_namespace_packages
with open("clecode/__init__.py", "r") as f:
data = f.read()
_version = re.findall(r'__version__="([0-9.]+)"', data)[0]
packages = find_packages(
where="clecode*",
exclude=(),
include=("*",)
)
setup(
name="clecode",
version=_version,
description="This is a leetcode learn package",
author="pancras",
url="https://chenbangguo.com",
packages=packages,
# package_data={
# "lecode": [
# "untitled.md",
# "untitled.ipynb",
# ],
# }
)
# python setup.py develop
| 20.612903 | 68 | 0.593114 | import os
import re
from setuptools import setup, find_packages, find_namespace_packages
with open("clecode/__init__.py", "r") as f:
data = f.read()
_version = re.findall(r'__version__="([0-9.]+)"', data)[0]
packages = find_packages(
where="clecode*",
exclude=(),
include=("*",)
)
setup(
name="clecode",
version=_version,
description="This is a leetcode learn package",
author="pancras",
url="https://chenbangguo.com",
packages=packages,
)
| true | true |
f7f5e2722ff3291efbc1fdc8cd3ac1b3aa8c046f | 25,189 | py | Python | homeassistant/components/sonos/media_player.py | vanstinator/core | 7b2947bad7d41bc6ffc695929650dfcfd19998e1 | [
"Apache-2.0"
] | 1 | 2022-02-23T11:08:51.000Z | 2022-02-23T11:08:51.000Z | homeassistant/components/sonos/media_player.py | vanstinator/core | 7b2947bad7d41bc6ffc695929650dfcfd19998e1 | [
"Apache-2.0"
] | 18 | 2021-11-03T06:21:27.000Z | 2022-03-31T06:20:57.000Z | homeassistant/components/sonos/media_player.py | vanstinator/core | 7b2947bad7d41bc6ffc695929650dfcfd19998e1 | [
"Apache-2.0"
] | null | null | null | """Support to interface with Sonos players."""
from __future__ import annotations
from asyncio import run_coroutine_threadsafe
import datetime
import logging
from typing import Any
from soco import alarms
from soco.core import (
MUSIC_SRC_LINE_IN,
MUSIC_SRC_RADIO,
PLAY_MODE_BY_MEANING,
PLAY_MODES,
)
from soco.data_structures import DidlFavorite
import voluptuous as vol
from homeassistant.components import media_source, spotify
from homeassistant.components.media_player import (
MediaPlayerEntity,
MediaPlayerEntityFeature,
async_process_play_media_url,
)
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_ENQUEUE,
MEDIA_TYPE_ALBUM,
MEDIA_TYPE_ARTIST,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TRACK,
REPEAT_MODE_ALL,
REPEAT_MODE_OFF,
REPEAT_MODE_ONE,
)
from homeassistant.components.plex.const import PLEX_URI_SCHEME
from homeassistant.components.plex.services import process_plex_payload
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TIME, STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_platform, service
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import media_browser
from .const import (
DATA_SONOS,
DOMAIN as SONOS_DOMAIN,
MEDIA_TYPES_TO_SONOS,
MODELS_LINEIN_AND_TV,
MODELS_LINEIN_ONLY,
MODELS_TV_ONLY,
PLAYABLE_MEDIA_TYPES,
SONOS_CREATE_MEDIA_PLAYER,
SONOS_MEDIA_UPDATED,
SONOS_STATE_PLAYING,
SONOS_STATE_TRANSITIONING,
SOURCE_LINEIN,
SOURCE_TV,
)
from .entity import SonosEntity
from .helpers import soco_error
from .speaker import SonosMedia, SonosSpeaker
_LOGGER = logging.getLogger(__name__)
VOLUME_INCREMENT = 2
REPEAT_TO_SONOS = {
REPEAT_MODE_OFF: False,
REPEAT_MODE_ALL: True,
REPEAT_MODE_ONE: "ONE",
}
SONOS_TO_REPEAT = {meaning: mode for mode, meaning in REPEAT_TO_SONOS.items()}
UPNP_ERRORS_TO_IGNORE = ["701", "711", "712"]
SERVICE_JOIN = "join"
SERVICE_UNJOIN = "unjoin"
SERVICE_SNAPSHOT = "snapshot"
SERVICE_RESTORE = "restore"
SERVICE_SET_TIMER = "set_sleep_timer"
SERVICE_CLEAR_TIMER = "clear_sleep_timer"
SERVICE_UPDATE_ALARM = "update_alarm"
SERVICE_PLAY_QUEUE = "play_queue"
SERVICE_REMOVE_FROM_QUEUE = "remove_from_queue"
ATTR_SLEEP_TIME = "sleep_time"
ATTR_ALARM_ID = "alarm_id"
ATTR_VOLUME = "volume"
ATTR_ENABLED = "enabled"
ATTR_INCLUDE_LINKED_ZONES = "include_linked_zones"
ATTR_MASTER = "master"
ATTR_WITH_GROUP = "with_group"
ATTR_QUEUE_POSITION = "queue_position"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Sonos from a config entry."""
platform = entity_platform.async_get_current_platform()
@callback
def async_create_entities(speaker: SonosSpeaker) -> None:
"""Handle device discovery and create entities."""
_LOGGER.debug("Creating media_player on %s", speaker.zone_name)
async_add_entities([SonosMediaPlayerEntity(speaker)])
@service.verify_domain_control(hass, SONOS_DOMAIN)
async def async_service_handle(service_call: ServiceCall) -> None:
"""Handle dispatched services."""
assert platform is not None
entities = await platform.async_extract_from_service(service_call)
if not entities:
return
speakers = []
for entity in entities:
assert isinstance(entity, SonosMediaPlayerEntity)
speakers.append(entity.speaker)
if service_call.service == SERVICE_JOIN:
master = platform.entities.get(service_call.data[ATTR_MASTER])
if master:
await SonosSpeaker.join_multi(hass, master.speaker, speakers) # type: ignore[arg-type]
else:
_LOGGER.error(
"Invalid master specified for join service: %s",
service_call.data[ATTR_MASTER],
)
elif service_call.service == SERVICE_UNJOIN:
await SonosSpeaker.unjoin_multi(hass, speakers) # type: ignore[arg-type]
elif service_call.service == SERVICE_SNAPSHOT:
await SonosSpeaker.snapshot_multi(
hass, speakers, service_call.data[ATTR_WITH_GROUP] # type: ignore[arg-type]
)
elif service_call.service == SERVICE_RESTORE:
await SonosSpeaker.restore_multi(
hass, speakers, service_call.data[ATTR_WITH_GROUP] # type: ignore[arg-type]
)
config_entry.async_on_unload(
async_dispatcher_connect(hass, SONOS_CREATE_MEDIA_PLAYER, async_create_entities)
)
hass.services.async_register(
SONOS_DOMAIN,
SERVICE_JOIN,
async_service_handle,
cv.make_entity_service_schema({vol.Required(ATTR_MASTER): cv.entity_id}),
)
hass.services.async_register(
SONOS_DOMAIN,
SERVICE_UNJOIN,
async_service_handle,
cv.make_entity_service_schema({}),
)
join_unjoin_schema = cv.make_entity_service_schema(
{vol.Optional(ATTR_WITH_GROUP, default=True): cv.boolean}
)
hass.services.async_register(
SONOS_DOMAIN, SERVICE_SNAPSHOT, async_service_handle, join_unjoin_schema
)
hass.services.async_register(
SONOS_DOMAIN, SERVICE_RESTORE, async_service_handle, join_unjoin_schema
)
platform.async_register_entity_service( # type: ignore
SERVICE_SET_TIMER,
{
vol.Required(ATTR_SLEEP_TIME): vol.All(
vol.Coerce(int), vol.Range(min=0, max=86399)
)
},
"set_sleep_timer",
)
platform.async_register_entity_service(SERVICE_CLEAR_TIMER, {}, "clear_sleep_timer") # type: ignore
platform.async_register_entity_service( # type: ignore
SERVICE_UPDATE_ALARM,
{
vol.Required(ATTR_ALARM_ID): cv.positive_int,
vol.Optional(ATTR_TIME): cv.time,
vol.Optional(ATTR_VOLUME): cv.small_float,
vol.Optional(ATTR_ENABLED): cv.boolean,
vol.Optional(ATTR_INCLUDE_LINKED_ZONES): cv.boolean,
},
"set_alarm",
)
platform.async_register_entity_service( # type: ignore
SERVICE_PLAY_QUEUE,
{vol.Optional(ATTR_QUEUE_POSITION): cv.positive_int},
"play_queue",
)
platform.async_register_entity_service( # type: ignore
SERVICE_REMOVE_FROM_QUEUE,
{vol.Optional(ATTR_QUEUE_POSITION): cv.positive_int},
"remove_from_queue",
)
class SonosMediaPlayerEntity(SonosEntity, MediaPlayerEntity):
"""Representation of a Sonos entity."""
_attr_supported_features = (
MediaPlayerEntityFeature.BROWSE_MEDIA
| MediaPlayerEntityFeature.CLEAR_PLAYLIST
| MediaPlayerEntityFeature.GROUPING
| MediaPlayerEntityFeature.NEXT_TRACK
| MediaPlayerEntityFeature.PAUSE
| MediaPlayerEntityFeature.PLAY
| MediaPlayerEntityFeature.PLAY_MEDIA
| MediaPlayerEntityFeature.PREVIOUS_TRACK
| MediaPlayerEntityFeature.REPEAT_SET
| MediaPlayerEntityFeature.SEEK
| MediaPlayerEntityFeature.SELECT_SOURCE
| MediaPlayerEntityFeature.SHUFFLE_SET
| MediaPlayerEntityFeature.STOP
| MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.VOLUME_SET
)
_attr_media_content_type = MEDIA_TYPE_MUSIC
def __init__(self, speaker: SonosSpeaker) -> None:
"""Initialize the media player entity."""
super().__init__(speaker)
self._attr_unique_id = self.soco.uid
self._attr_name = self.speaker.zone_name
async def async_added_to_hass(self) -> None:
"""Handle common setup when added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SONOS_MEDIA_UPDATED,
self.async_write_media_state,
)
)
@callback
def async_write_media_state(self, uid: str) -> None:
"""Write media state if the provided UID is coordinator of this speaker."""
if self.coordinator.uid == uid:
self.async_write_ha_state()
@property
def available(self) -> bool:
"""Return if the media_player is available."""
return (
self.speaker.available
and self.speaker.sonos_group_entities
and self.media.playback_status
)
@property
def coordinator(self) -> SonosSpeaker:
"""Return the current coordinator SonosSpeaker."""
return self.speaker.coordinator or self.speaker
@property
def group_members(self) -> list[str] | None:
"""List of entity_ids which are currently grouped together."""
return self.speaker.sonos_group_entities
def __hash__(self) -> int:
"""Return a hash of self."""
return hash(self.unique_id)
@property # type: ignore[misc]
def state(self) -> str:
"""Return the state of the entity."""
if self.media.playback_status in (
"PAUSED_PLAYBACK",
"STOPPED",
):
# Sonos can consider itself "paused" but without having media loaded
# (happens if playing Spotify and via Spotify app you pick another device to play on)
if self.media.title is None:
return STATE_IDLE
return STATE_PAUSED
if self.media.playback_status in (
SONOS_STATE_PLAYING,
SONOS_STATE_TRANSITIONING,
):
return STATE_PLAYING
return STATE_IDLE
async def _async_fallback_poll(self) -> None:
"""Retrieve latest state by polling."""
await self.hass.data[DATA_SONOS].favorites[
self.speaker.household_id
].async_poll()
await self.hass.async_add_executor_job(self._update)
def _update(self) -> None:
"""Retrieve latest state by polling."""
self.speaker.update_groups()
self.speaker.update_volume()
if self.speaker.is_coordinator:
self.media.poll_media()
@property
def volume_level(self) -> float | None:
"""Volume level of the media player (0..1)."""
return self.speaker.volume and self.speaker.volume / 100
@property
def is_volume_muted(self) -> bool | None:
"""Return true if volume is muted."""
return self.speaker.muted
@property # type: ignore[misc]
def shuffle(self) -> str | None:
"""Shuffling state."""
shuffle: str = PLAY_MODES[self.media.play_mode][0]
return shuffle
@property # type: ignore[misc]
def repeat(self) -> str | None:
"""Return current repeat mode."""
sonos_repeat = PLAY_MODES[self.media.play_mode][1]
return SONOS_TO_REPEAT[sonos_repeat]
@property
def media(self) -> SonosMedia:
"""Return the SonosMedia object from the coordinator speaker."""
return self.coordinator.media
@property # type: ignore[misc]
def media_content_id(self) -> str | None:
"""Content id of current playing media."""
return self.media.uri
@property # type: ignore[misc]
def media_duration(self) -> float | None:
"""Duration of current playing media in seconds."""
return self.media.duration
@property # type: ignore[misc]
def media_position(self) -> float | None:
"""Position of current playing media in seconds."""
return self.media.position
@property # type: ignore[misc]
def media_position_updated_at(self) -> datetime.datetime | None:
"""When was the position of the current playing media valid."""
return self.media.position_updated_at
@property # type: ignore[misc]
def media_image_url(self) -> str | None:
"""Image url of current playing media."""
return self.media.image_url or None
@property # type: ignore[misc]
def media_channel(self) -> str | None:
"""Channel currently playing."""
return self.media.channel or None
@property
def media_playlist(self) -> str | None:
"""Title of playlist currently playing."""
return self.media.playlist_name
@property # type: ignore[misc]
def media_artist(self) -> str | None:
"""Artist of current playing media, music track only."""
return self.media.artist or None
@property # type: ignore[misc]
def media_album_name(self) -> str | None:
"""Album name of current playing media, music track only."""
return self.media.album_name or None
@property # type: ignore[misc]
def media_title(self) -> str | None:
"""Title of current playing media."""
return self.media.title or None
@property # type: ignore[misc]
def source(self) -> str | None:
"""Name of the current input source."""
return self.media.source_name or None
@soco_error()
def volume_up(self) -> None:
"""Volume up media player."""
self.soco.volume += VOLUME_INCREMENT
@soco_error()
def volume_down(self) -> None:
"""Volume down media player."""
self.soco.volume -= VOLUME_INCREMENT
@soco_error()
def set_volume_level(self, volume: str) -> None:
"""Set volume level, range 0..1."""
self.soco.volume = str(int(volume * 100))
@soco_error(UPNP_ERRORS_TO_IGNORE)
def set_shuffle(self, shuffle: str) -> None:
"""Enable/Disable shuffle mode."""
sonos_shuffle = shuffle
sonos_repeat = PLAY_MODES[self.media.play_mode][1]
self.coordinator.soco.play_mode = PLAY_MODE_BY_MEANING[
(sonos_shuffle, sonos_repeat)
]
@soco_error(UPNP_ERRORS_TO_IGNORE)
def set_repeat(self, repeat: str) -> None:
"""Set repeat mode."""
sonos_shuffle = PLAY_MODES[self.media.play_mode][0]
sonos_repeat = REPEAT_TO_SONOS[repeat]
self.coordinator.soco.play_mode = PLAY_MODE_BY_MEANING[
(sonos_shuffle, sonos_repeat)
]
@soco_error()
def mute_volume(self, mute: bool) -> None:
"""Mute (true) or unmute (false) media player."""
self.soco.mute = mute
@soco_error()
def select_source(self, source: str) -> None:
"""Select input source."""
soco = self.coordinator.soco
if source == SOURCE_LINEIN:
soco.switch_to_line_in()
return
if source == SOURCE_TV:
soco.switch_to_tv()
return
self._play_favorite_by_name(source)
def _play_favorite_by_name(self, name: str) -> None:
"""Play a favorite by name."""
fav = [fav for fav in self.speaker.favorites if fav.title == name]
if len(fav) != 1:
return
src = fav.pop()
self._play_favorite(src)
def _play_favorite(self, favorite: DidlFavorite) -> None:
"""Play a favorite."""
uri = favorite.reference.get_uri()
soco = self.coordinator.soco
if soco.music_source_from_uri(uri) in [
MUSIC_SRC_RADIO,
MUSIC_SRC_LINE_IN,
]:
soco.play_uri(uri, title=favorite.title)
else:
soco.clear_queue()
soco.add_to_queue(favorite.reference)
soco.play_from_queue(0)
@property
def source_list(self) -> list[str]:
"""List of available input sources."""
model = self.coordinator.model_name.split()[-1].upper()
if model in MODELS_LINEIN_ONLY:
return [SOURCE_LINEIN]
if model in MODELS_TV_ONLY:
return [SOURCE_TV]
if model in MODELS_LINEIN_AND_TV:
return [SOURCE_LINEIN, SOURCE_TV]
return []
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_play(self) -> None:
"""Send play command."""
self.coordinator.soco.play()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_stop(self) -> None:
"""Send stop command."""
self.coordinator.soco.stop()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_pause(self) -> None:
"""Send pause command."""
self.coordinator.soco.pause()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_next_track(self) -> None:
"""Send next track command."""
self.coordinator.soco.next()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_previous_track(self) -> None:
"""Send next track command."""
self.coordinator.soco.previous()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_seek(self, position: str) -> None:
"""Send seek command."""
self.coordinator.soco.seek(str(datetime.timedelta(seconds=int(position))))
@soco_error()
def clear_playlist(self) -> None:
"""Clear players playlist."""
self.coordinator.soco.clear_queue()
@soco_error()
def play_media(self, media_type: str, media_id: str, **kwargs: Any) -> None:
"""
Send the play_media command to the media player.
If media_id is a Plex payload, attempt Plex->Sonos playback.
If media_id is an Apple Music, Deezer, Sonos, or Tidal share link,
attempt playback using the respective service.
If media_type is "playlist", media_id should be a Sonos
Playlist name. Otherwise, media_id should be a URI.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if spotify.is_spotify_media_type(media_type):
media_type = spotify.resolve_spotify_media_type(media_type)
media_id = spotify.spotify_uri_from_media_browser_url(media_id)
is_radio = False
if media_source.is_media_source_id(media_id):
is_radio = media_id.startswith("media-source://radio_browser/")
media_type = MEDIA_TYPE_MUSIC
media_id = (
run_coroutine_threadsafe(
media_source.async_resolve_media(self.hass, media_id),
self.hass.loop,
)
.result()
.url
)
if media_type == "favorite_item_id":
favorite = self.speaker.favorites.lookup_by_item_id(media_id)
if favorite is None:
raise ValueError(f"Missing favorite for media_id: {media_id}")
self._play_favorite(favorite)
return
soco = self.coordinator.soco
if media_id and media_id.startswith(PLEX_URI_SCHEME):
plex_plugin = self.speaker.plex_plugin
result = process_plex_payload(
self.hass, media_type, media_id, supports_playqueues=False
)
if result.shuffle:
self.set_shuffle(True)
if kwargs.get(ATTR_MEDIA_ENQUEUE):
plex_plugin.add_to_queue(result.media)
else:
soco.clear_queue()
plex_plugin.add_to_queue(result.media)
soco.play_from_queue(0)
return
share_link = self.coordinator.share_link
if share_link.is_share_link(media_id):
if kwargs.get(ATTR_MEDIA_ENQUEUE):
share_link.add_share_link_to_queue(media_id)
else:
soco.clear_queue()
share_link.add_share_link_to_queue(media_id)
soco.play_from_queue(0)
elif media_type in (MEDIA_TYPE_MUSIC, MEDIA_TYPE_TRACK):
# If media ID is a relative URL, we serve it from HA.
media_id = async_process_play_media_url(self.hass, media_id)
if kwargs.get(ATTR_MEDIA_ENQUEUE):
soco.add_uri_to_queue(media_id)
else:
soco.play_uri(media_id, force_radio=is_radio)
elif media_type == MEDIA_TYPE_PLAYLIST:
if media_id.startswith("S:"):
item = media_browser.get_media(self.media.library, media_id, media_type) # type: ignore[no-untyped-call]
soco.play_uri(item.get_uri())
return
try:
playlists = soco.get_sonos_playlists()
playlist = next(p for p in playlists if p.title == media_id)
except StopIteration:
_LOGGER.error('Could not find a Sonos playlist named "%s"', media_id)
else:
soco.clear_queue()
soco.add_to_queue(playlist)
soco.play_from_queue(0)
elif media_type in PLAYABLE_MEDIA_TYPES:
item = media_browser.get_media(self.media.library, media_id, media_type) # type: ignore[no-untyped-call]
if not item:
_LOGGER.error('Could not find "%s" in the library', media_id)
return
soco.play_uri(item.get_uri())
else:
_LOGGER.error('Sonos does not support a media type of "%s"', media_type)
@soco_error()
def set_sleep_timer(self, sleep_time: int) -> None:
"""Set the timer on the player."""
self.coordinator.soco.set_sleep_timer(sleep_time)
@soco_error()
def clear_sleep_timer(self) -> None:
"""Clear the timer on the player."""
self.coordinator.soco.set_sleep_timer(None)
@soco_error()
def set_alarm(
self,
alarm_id: int,
time: datetime.datetime | None = None,
volume: float | None = None,
enabled: bool | None = None,
include_linked_zones: bool | None = None,
) -> None:
"""Set the alarm clock on the player."""
alarm = None
for one_alarm in alarms.get_alarms(self.coordinator.soco):
if one_alarm.alarm_id == str(alarm_id):
alarm = one_alarm
if alarm is None:
_LOGGER.warning("Did not find alarm with id %s", alarm_id)
return
if time is not None:
alarm.start_time = time
if volume is not None:
alarm.volume = int(volume * 100)
if enabled is not None:
alarm.enabled = enabled
if include_linked_zones is not None:
alarm.include_linked_zones = include_linked_zones
alarm.save()
@soco_error()
def play_queue(self, queue_position: int = 0) -> None:
"""Start playing the queue."""
self.soco.play_from_queue(queue_position)
@soco_error()
def remove_from_queue(self, queue_position: int = 0) -> None:
"""Remove item from the queue."""
self.coordinator.soco.remove_from_queue(queue_position)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return entity specific state attributes."""
attributes: dict[str, Any] = {}
if self.media.queue_position is not None:
attributes[ATTR_QUEUE_POSITION] = self.media.queue_position
if self.media.queue_size:
attributes["queue_size"] = self.media.queue_size
if self.source:
attributes[ATTR_INPUT_SOURCE] = self.source
return attributes
async def async_get_browse_image(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> tuple[bytes | None, str | None]:
"""Fetch media browser image to serve via proxy."""
if (
media_content_type in [MEDIA_TYPE_ALBUM, MEDIA_TYPE_ARTIST]
and media_content_id
):
item = await self.hass.async_add_executor_job(
media_browser.get_media,
self.media.library,
media_content_id,
MEDIA_TYPES_TO_SONOS[media_content_type],
)
if image_url := getattr(item, "album_art_uri", None):
result = await self._async_fetch_image(image_url) # type: ignore[no-untyped-call]
return result # type: ignore
return (None, None)
async def async_browse_media(
self, media_content_type: str | None = None, media_content_id: str | None = None
) -> Any:
"""Implement the websocket media browsing helper."""
return await media_browser.async_browse_media(
self.hass,
self.speaker,
self.media,
self.get_browse_image_url,
media_content_id,
media_content_type,
)
def join_players(self, group_members):
"""Join `group_members` as a player group with the current player."""
speakers = []
for entity_id in group_members:
if speaker := self.hass.data[DATA_SONOS].entity_id_mappings.get(entity_id):
speakers.append(speaker)
else:
raise HomeAssistantError(f"Not a known Sonos entity_id: {entity_id}")
self.speaker.join(speakers)
def unjoin_player(self):
"""Remove this player from any group."""
self.speaker.unjoin()
| 34.411202 | 121 | 0.640875 | from __future__ import annotations
from asyncio import run_coroutine_threadsafe
import datetime
import logging
from typing import Any
from soco import alarms
from soco.core import (
MUSIC_SRC_LINE_IN,
MUSIC_SRC_RADIO,
PLAY_MODE_BY_MEANING,
PLAY_MODES,
)
from soco.data_structures import DidlFavorite
import voluptuous as vol
from homeassistant.components import media_source, spotify
from homeassistant.components.media_player import (
MediaPlayerEntity,
MediaPlayerEntityFeature,
async_process_play_media_url,
)
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_ENQUEUE,
MEDIA_TYPE_ALBUM,
MEDIA_TYPE_ARTIST,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TRACK,
REPEAT_MODE_ALL,
REPEAT_MODE_OFF,
REPEAT_MODE_ONE,
)
from homeassistant.components.plex.const import PLEX_URI_SCHEME
from homeassistant.components.plex.services import process_plex_payload
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TIME, STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_platform, service
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import media_browser
from .const import (
DATA_SONOS,
DOMAIN as SONOS_DOMAIN,
MEDIA_TYPES_TO_SONOS,
MODELS_LINEIN_AND_TV,
MODELS_LINEIN_ONLY,
MODELS_TV_ONLY,
PLAYABLE_MEDIA_TYPES,
SONOS_CREATE_MEDIA_PLAYER,
SONOS_MEDIA_UPDATED,
SONOS_STATE_PLAYING,
SONOS_STATE_TRANSITIONING,
SOURCE_LINEIN,
SOURCE_TV,
)
from .entity import SonosEntity
from .helpers import soco_error
from .speaker import SonosMedia, SonosSpeaker
_LOGGER = logging.getLogger(__name__)
VOLUME_INCREMENT = 2
REPEAT_TO_SONOS = {
REPEAT_MODE_OFF: False,
REPEAT_MODE_ALL: True,
REPEAT_MODE_ONE: "ONE",
}
SONOS_TO_REPEAT = {meaning: mode for mode, meaning in REPEAT_TO_SONOS.items()}
UPNP_ERRORS_TO_IGNORE = ["701", "711", "712"]
SERVICE_JOIN = "join"
SERVICE_UNJOIN = "unjoin"
SERVICE_SNAPSHOT = "snapshot"
SERVICE_RESTORE = "restore"
SERVICE_SET_TIMER = "set_sleep_timer"
SERVICE_CLEAR_TIMER = "clear_sleep_timer"
SERVICE_UPDATE_ALARM = "update_alarm"
SERVICE_PLAY_QUEUE = "play_queue"
SERVICE_REMOVE_FROM_QUEUE = "remove_from_queue"
ATTR_SLEEP_TIME = "sleep_time"
ATTR_ALARM_ID = "alarm_id"
ATTR_VOLUME = "volume"
ATTR_ENABLED = "enabled"
ATTR_INCLUDE_LINKED_ZONES = "include_linked_zones"
ATTR_MASTER = "master"
ATTR_WITH_GROUP = "with_group"
ATTR_QUEUE_POSITION = "queue_position"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
platform = entity_platform.async_get_current_platform()
@callback
def async_create_entities(speaker: SonosSpeaker) -> None:
_LOGGER.debug("Creating media_player on %s", speaker.zone_name)
async_add_entities([SonosMediaPlayerEntity(speaker)])
@service.verify_domain_control(hass, SONOS_DOMAIN)
async def async_service_handle(service_call: ServiceCall) -> None:
assert platform is not None
entities = await platform.async_extract_from_service(service_call)
if not entities:
return
speakers = []
for entity in entities:
assert isinstance(entity, SonosMediaPlayerEntity)
speakers.append(entity.speaker)
if service_call.service == SERVICE_JOIN:
master = platform.entities.get(service_call.data[ATTR_MASTER])
if master:
await SonosSpeaker.join_multi(hass, master.speaker, speakers)
else:
_LOGGER.error(
"Invalid master specified for join service: %s",
service_call.data[ATTR_MASTER],
)
elif service_call.service == SERVICE_UNJOIN:
await SonosSpeaker.unjoin_multi(hass, speakers)
elif service_call.service == SERVICE_SNAPSHOT:
await SonosSpeaker.snapshot_multi(
hass, speakers, service_call.data[ATTR_WITH_GROUP]
)
elif service_call.service == SERVICE_RESTORE:
await SonosSpeaker.restore_multi(
hass, speakers, service_call.data[ATTR_WITH_GROUP]
)
config_entry.async_on_unload(
async_dispatcher_connect(hass, SONOS_CREATE_MEDIA_PLAYER, async_create_entities)
)
hass.services.async_register(
SONOS_DOMAIN,
SERVICE_JOIN,
async_service_handle,
cv.make_entity_service_schema({vol.Required(ATTR_MASTER): cv.entity_id}),
)
hass.services.async_register(
SONOS_DOMAIN,
SERVICE_UNJOIN,
async_service_handle,
cv.make_entity_service_schema({}),
)
join_unjoin_schema = cv.make_entity_service_schema(
{vol.Optional(ATTR_WITH_GROUP, default=True): cv.boolean}
)
hass.services.async_register(
SONOS_DOMAIN, SERVICE_SNAPSHOT, async_service_handle, join_unjoin_schema
)
hass.services.async_register(
SONOS_DOMAIN, SERVICE_RESTORE, async_service_handle, join_unjoin_schema
)
platform.async_register_entity_service(
SERVICE_SET_TIMER,
{
vol.Required(ATTR_SLEEP_TIME): vol.All(
vol.Coerce(int), vol.Range(min=0, max=86399)
)
},
"set_sleep_timer",
)
platform.async_register_entity_service(SERVICE_CLEAR_TIMER, {}, "clear_sleep_timer")
platform.async_register_entity_service(
SERVICE_UPDATE_ALARM,
{
vol.Required(ATTR_ALARM_ID): cv.positive_int,
vol.Optional(ATTR_TIME): cv.time,
vol.Optional(ATTR_VOLUME): cv.small_float,
vol.Optional(ATTR_ENABLED): cv.boolean,
vol.Optional(ATTR_INCLUDE_LINKED_ZONES): cv.boolean,
},
"set_alarm",
)
platform.async_register_entity_service(
SERVICE_PLAY_QUEUE,
{vol.Optional(ATTR_QUEUE_POSITION): cv.positive_int},
"play_queue",
)
platform.async_register_entity_service(
SERVICE_REMOVE_FROM_QUEUE,
{vol.Optional(ATTR_QUEUE_POSITION): cv.positive_int},
"remove_from_queue",
)
class SonosMediaPlayerEntity(SonosEntity, MediaPlayerEntity):
_attr_supported_features = (
MediaPlayerEntityFeature.BROWSE_MEDIA
| MediaPlayerEntityFeature.CLEAR_PLAYLIST
| MediaPlayerEntityFeature.GROUPING
| MediaPlayerEntityFeature.NEXT_TRACK
| MediaPlayerEntityFeature.PAUSE
| MediaPlayerEntityFeature.PLAY
| MediaPlayerEntityFeature.PLAY_MEDIA
| MediaPlayerEntityFeature.PREVIOUS_TRACK
| MediaPlayerEntityFeature.REPEAT_SET
| MediaPlayerEntityFeature.SEEK
| MediaPlayerEntityFeature.SELECT_SOURCE
| MediaPlayerEntityFeature.SHUFFLE_SET
| MediaPlayerEntityFeature.STOP
| MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.VOLUME_SET
)
_attr_media_content_type = MEDIA_TYPE_MUSIC
def __init__(self, speaker: SonosSpeaker) -> None:
super().__init__(speaker)
self._attr_unique_id = self.soco.uid
self._attr_name = self.speaker.zone_name
async def async_added_to_hass(self) -> None:
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SONOS_MEDIA_UPDATED,
self.async_write_media_state,
)
)
@callback
def async_write_media_state(self, uid: str) -> None:
if self.coordinator.uid == uid:
self.async_write_ha_state()
@property
def available(self) -> bool:
return (
self.speaker.available
and self.speaker.sonos_group_entities
and self.media.playback_status
)
@property
def coordinator(self) -> SonosSpeaker:
return self.speaker.coordinator or self.speaker
@property
def group_members(self) -> list[str] | None:
return self.speaker.sonos_group_entities
def __hash__(self) -> int:
return hash(self.unique_id)
@property
def state(self) -> str:
if self.media.playback_status in (
"PAUSED_PLAYBACK",
"STOPPED",
):
if self.media.title is None:
return STATE_IDLE
return STATE_PAUSED
if self.media.playback_status in (
SONOS_STATE_PLAYING,
SONOS_STATE_TRANSITIONING,
):
return STATE_PLAYING
return STATE_IDLE
async def _async_fallback_poll(self) -> None:
await self.hass.data[DATA_SONOS].favorites[
self.speaker.household_id
].async_poll()
await self.hass.async_add_executor_job(self._update)
def _update(self) -> None:
self.speaker.update_groups()
self.speaker.update_volume()
if self.speaker.is_coordinator:
self.media.poll_media()
@property
def volume_level(self) -> float | None:
return self.speaker.volume and self.speaker.volume / 100
@property
def is_volume_muted(self) -> bool | None:
return self.speaker.muted
@property
def shuffle(self) -> str | None:
shuffle: str = PLAY_MODES[self.media.play_mode][0]
return shuffle
@property
def repeat(self) -> str | None:
sonos_repeat = PLAY_MODES[self.media.play_mode][1]
return SONOS_TO_REPEAT[sonos_repeat]
@property
def media(self) -> SonosMedia:
return self.coordinator.media
@property
def media_content_id(self) -> str | None:
return self.media.uri
@property
def media_duration(self) -> float | None:
return self.media.duration
@property
def media_position(self) -> float | None:
return self.media.position
@property
def media_position_updated_at(self) -> datetime.datetime | None:
return self.media.position_updated_at
@property
def media_image_url(self) -> str | None:
return self.media.image_url or None
@property
def media_channel(self) -> str | None:
return self.media.channel or None
@property
def media_playlist(self) -> str | None:
return self.media.playlist_name
@property
def media_artist(self) -> str | None:
return self.media.artist or None
@property
def media_album_name(self) -> str | None:
return self.media.album_name or None
@property
def media_title(self) -> str | None:
return self.media.title or None
@property
def source(self) -> str | None:
return self.media.source_name or None
@soco_error()
def volume_up(self) -> None:
self.soco.volume += VOLUME_INCREMENT
@soco_error()
def volume_down(self) -> None:
self.soco.volume -= VOLUME_INCREMENT
@soco_error()
def set_volume_level(self, volume: str) -> None:
self.soco.volume = str(int(volume * 100))
@soco_error(UPNP_ERRORS_TO_IGNORE)
def set_shuffle(self, shuffle: str) -> None:
sonos_shuffle = shuffle
sonos_repeat = PLAY_MODES[self.media.play_mode][1]
self.coordinator.soco.play_mode = PLAY_MODE_BY_MEANING[
(sonos_shuffle, sonos_repeat)
]
@soco_error(UPNP_ERRORS_TO_IGNORE)
def set_repeat(self, repeat: str) -> None:
sonos_shuffle = PLAY_MODES[self.media.play_mode][0]
sonos_repeat = REPEAT_TO_SONOS[repeat]
self.coordinator.soco.play_mode = PLAY_MODE_BY_MEANING[
(sonos_shuffle, sonos_repeat)
]
@soco_error()
def mute_volume(self, mute: bool) -> None:
self.soco.mute = mute
@soco_error()
def select_source(self, source: str) -> None:
soco = self.coordinator.soco
if source == SOURCE_LINEIN:
soco.switch_to_line_in()
return
if source == SOURCE_TV:
soco.switch_to_tv()
return
self._play_favorite_by_name(source)
def _play_favorite_by_name(self, name: str) -> None:
fav = [fav for fav in self.speaker.favorites if fav.title == name]
if len(fav) != 1:
return
src = fav.pop()
self._play_favorite(src)
def _play_favorite(self, favorite: DidlFavorite) -> None:
uri = favorite.reference.get_uri()
soco = self.coordinator.soco
if soco.music_source_from_uri(uri) in [
MUSIC_SRC_RADIO,
MUSIC_SRC_LINE_IN,
]:
soco.play_uri(uri, title=favorite.title)
else:
soco.clear_queue()
soco.add_to_queue(favorite.reference)
soco.play_from_queue(0)
@property
def source_list(self) -> list[str]:
model = self.coordinator.model_name.split()[-1].upper()
if model in MODELS_LINEIN_ONLY:
return [SOURCE_LINEIN]
if model in MODELS_TV_ONLY:
return [SOURCE_TV]
if model in MODELS_LINEIN_AND_TV:
return [SOURCE_LINEIN, SOURCE_TV]
return []
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_play(self) -> None:
self.coordinator.soco.play()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_stop(self) -> None:
self.coordinator.soco.stop()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_pause(self) -> None:
self.coordinator.soco.pause()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_next_track(self) -> None:
self.coordinator.soco.next()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_previous_track(self) -> None:
self.coordinator.soco.previous()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_seek(self, position: str) -> None:
self.coordinator.soco.seek(str(datetime.timedelta(seconds=int(position))))
@soco_error()
def clear_playlist(self) -> None:
self.coordinator.soco.clear_queue()
@soco_error()
def play_media(self, media_type: str, media_id: str, **kwargs: Any) -> None:
if spotify.is_spotify_media_type(media_type):
media_type = spotify.resolve_spotify_media_type(media_type)
media_id = spotify.spotify_uri_from_media_browser_url(media_id)
is_radio = False
if media_source.is_media_source_id(media_id):
is_radio = media_id.startswith("media-source://radio_browser/")
media_type = MEDIA_TYPE_MUSIC
media_id = (
run_coroutine_threadsafe(
media_source.async_resolve_media(self.hass, media_id),
self.hass.loop,
)
.result()
.url
)
if media_type == "favorite_item_id":
favorite = self.speaker.favorites.lookup_by_item_id(media_id)
if favorite is None:
raise ValueError(f"Missing favorite for media_id: {media_id}")
self._play_favorite(favorite)
return
soco = self.coordinator.soco
if media_id and media_id.startswith(PLEX_URI_SCHEME):
plex_plugin = self.speaker.plex_plugin
result = process_plex_payload(
self.hass, media_type, media_id, supports_playqueues=False
)
if result.shuffle:
self.set_shuffle(True)
if kwargs.get(ATTR_MEDIA_ENQUEUE):
plex_plugin.add_to_queue(result.media)
else:
soco.clear_queue()
plex_plugin.add_to_queue(result.media)
soco.play_from_queue(0)
return
share_link = self.coordinator.share_link
if share_link.is_share_link(media_id):
if kwargs.get(ATTR_MEDIA_ENQUEUE):
share_link.add_share_link_to_queue(media_id)
else:
soco.clear_queue()
share_link.add_share_link_to_queue(media_id)
soco.play_from_queue(0)
elif media_type in (MEDIA_TYPE_MUSIC, MEDIA_TYPE_TRACK):
media_id = async_process_play_media_url(self.hass, media_id)
if kwargs.get(ATTR_MEDIA_ENQUEUE):
soco.add_uri_to_queue(media_id)
else:
soco.play_uri(media_id, force_radio=is_radio)
elif media_type == MEDIA_TYPE_PLAYLIST:
if media_id.startswith("S:"):
item = media_browser.get_media(self.media.library, media_id, media_type)
soco.play_uri(item.get_uri())
return
try:
playlists = soco.get_sonos_playlists()
playlist = next(p for p in playlists if p.title == media_id)
except StopIteration:
_LOGGER.error('Could not find a Sonos playlist named "%s"', media_id)
else:
soco.clear_queue()
soco.add_to_queue(playlist)
soco.play_from_queue(0)
elif media_type in PLAYABLE_MEDIA_TYPES:
item = media_browser.get_media(self.media.library, media_id, media_type)
if not item:
_LOGGER.error('Could not find "%s" in the library', media_id)
return
soco.play_uri(item.get_uri())
else:
_LOGGER.error('Sonos does not support a media type of "%s"', media_type)
@soco_error()
def set_sleep_timer(self, sleep_time: int) -> None:
self.coordinator.soco.set_sleep_timer(sleep_time)
@soco_error()
def clear_sleep_timer(self) -> None:
self.coordinator.soco.set_sleep_timer(None)
@soco_error()
def set_alarm(
self,
alarm_id: int,
time: datetime.datetime | None = None,
volume: float | None = None,
enabled: bool | None = None,
include_linked_zones: bool | None = None,
) -> None:
alarm = None
for one_alarm in alarms.get_alarms(self.coordinator.soco):
if one_alarm.alarm_id == str(alarm_id):
alarm = one_alarm
if alarm is None:
_LOGGER.warning("Did not find alarm with id %s", alarm_id)
return
if time is not None:
alarm.start_time = time
if volume is not None:
alarm.volume = int(volume * 100)
if enabled is not None:
alarm.enabled = enabled
if include_linked_zones is not None:
alarm.include_linked_zones = include_linked_zones
alarm.save()
@soco_error()
def play_queue(self, queue_position: int = 0) -> None:
self.soco.play_from_queue(queue_position)
@soco_error()
def remove_from_queue(self, queue_position: int = 0) -> None:
self.coordinator.soco.remove_from_queue(queue_position)
@property
def extra_state_attributes(self) -> dict[str, Any]:
attributes: dict[str, Any] = {}
if self.media.queue_position is not None:
attributes[ATTR_QUEUE_POSITION] = self.media.queue_position
if self.media.queue_size:
attributes["queue_size"] = self.media.queue_size
if self.source:
attributes[ATTR_INPUT_SOURCE] = self.source
return attributes
async def async_get_browse_image(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> tuple[bytes | None, str | None]:
if (
media_content_type in [MEDIA_TYPE_ALBUM, MEDIA_TYPE_ARTIST]
and media_content_id
):
item = await self.hass.async_add_executor_job(
media_browser.get_media,
self.media.library,
media_content_id,
MEDIA_TYPES_TO_SONOS[media_content_type],
)
if image_url := getattr(item, "album_art_uri", None):
result = await self._async_fetch_image(image_url)
return result
return (None, None)
async def async_browse_media(
self, media_content_type: str | None = None, media_content_id: str | None = None
) -> Any:
return await media_browser.async_browse_media(
self.hass,
self.speaker,
self.media,
self.get_browse_image_url,
media_content_id,
media_content_type,
)
def join_players(self, group_members):
speakers = []
for entity_id in group_members:
if speaker := self.hass.data[DATA_SONOS].entity_id_mappings.get(entity_id):
speakers.append(speaker)
else:
raise HomeAssistantError(f"Not a known Sonos entity_id: {entity_id}")
self.speaker.join(speakers)
def unjoin_player(self):
self.speaker.unjoin()
| true | true |
f7f5e2abbf768435c740118d454a124fb26492fe | 72,925 | py | Python | torch/distributed/distributed_c10d.py | jsun94/nimble | e5c899a69677818b1becc58100577441e15ede13 | [
"BSD-3-Clause"
] | 206 | 2020-11-28T22:56:38.000Z | 2022-03-27T02:33:04.000Z | torch/distributed/distributed_c10d.py | jsun94/nimble | e5c899a69677818b1becc58100577441e15ede13 | [
"BSD-3-Clause"
] | 19 | 2020-12-09T23:13:14.000Z | 2022-01-24T23:24:08.000Z | torch/distributed/distributed_c10d.py | jsun94/nimble | e5c899a69677818b1becc58100577441e15ede13 | [
"BSD-3-Clause"
] | 28 | 2020-11-29T15:25:12.000Z | 2022-01-20T02:16:27.000Z | import pickle
import torch
import warnings
from torch._six import string_classes
from datetime import timedelta
# This module is wildcard imported from torch.distributed.
# TODO: specify __all__
from .constants import default_pg_timeout
from .rendezvous import rendezvous, register_rendezvous_handler # noqa: F401
from . import (
AllreduceOptions,
AllreduceCoalescedOptions,
AllToAllOptions,
BroadcastOptions,
GatherOptions,
ReduceOptions,
ReduceScatterOptions,
ScatterOptions,
)
from . import ReduceOp
from . import PrefixStore
_MPI_AVAILABLE = True
_NCCL_AVAILABLE = True
_GLOO_AVAILABLE = True
try:
from. import ProcessGroupMPI
except ImportError:
_MPI_AVAILABLE = False
try:
from. import ProcessGroupNCCL
except ImportError:
_NCCL_AVAILABLE = False
try:
from. import ProcessGroupGloo
except ImportError:
_GLOO_AVAILABLE = False
class Backend(object):
"""
An enum-like class of available backends: GLOO, NCCL, MPI, and other registered
backends.
The values of this class are lowercase strings, e.g., ``"gloo"``. They can
be accessed as attributes, e.g., ``Backend.NCCL``.
This class can be directly called to parse the string, e.g.,
``Backend(backend_str)`` will check if ``backend_str`` is valid, and
return the parsed lowercase string if so. It also accepts uppercase strings,
e.g., ``Backend("GLOO")`` returns ``"gloo"``.
.. note:: The entry ``Backend.UNDEFINED`` is present but only used as
initial value of some fields. Users should neither use it directly
nor assume its existence.
"""
UNDEFINED = "undefined"
GLOO = "gloo"
NCCL = "nccl"
MPI = "mpi"
TCP = "tcp"
def __new__(cls, name):
if not isinstance(name, string_classes):
raise ValueError("Backend name must be a string, but got: {}".format(name))
value = getattr(Backend, name.upper(), Backend.UNDEFINED)
if value == Backend.TCP:
raise ValueError("TCP backend has been deprecated. Please use "
"Gloo or MPI backend for collective operations "
"on CPU tensors.")
elif value == Backend.UNDEFINED:
raise ValueError("Invalid backend: '{}'".format(name))
elif value != Backend.GLOO and value != Backend.NCCL and value != Backend.MPI:
value = name
return value
@classmethod
def register_backend(cls, name, func):
"""
Registers a new backend.
This class method is used by 3rd party cpp extension to register new backend.
Arguments:
name (str): Backend name matching with the one in `init_process_group()`.
func (function): Function handler that instantiates the backend.
The function should be implemented in the backend cpp extension
and takes four arguments, including prefix_store, rank,
world_size, and timeout.
.. note:: This support of 3rd party backend is experimental and subject to change.
"""
setattr(Backend, name.upper(), func)
# `_backend`, `dist_backend`, and `reduce_op` are here to maintain backward
# compatibility with pre-c10d distributed package.
# TODO: remove them when users are ready to take a hard dependency on PyTorch 1.
_backend = Backend.UNDEFINED
dist_backend = Backend
class reduce_op(object):
r"""
Deprecated enum-like class for reduction operations: ``SUM``, ``PRODUCT``,
``MIN``, and ``MAX``.
:class:`~torch.distributed.ReduceOp` is recommended to use instead.
"""
def __init__(self):
# __members__ is a dict storing key-value pairs for enum classes
for k, v in ReduceOp.__members__.items():
setattr(self, k, v)
self.__members__ = ReduceOp.__members__
def __getattribute__(self, key):
warnings.warn("torch.distributed.reduce_op is deprecated, please use "
"torch.distributed.ReduceOp instead")
return object.__getattribute__(self, key)
reduce_op = reduce_op()
class group(object):
WORLD = object()
class GroupMember(object):
# Alias to group.WORLD for backward compatibility
WORLD = group.WORLD
NON_GROUP_MEMBER = object()
# Cached process groups
# For NCCL and GLOO pg, it is a map from ProcessGroup to (Backend, Store)
# For MPI pg, it is a map from ProcessGroup to (Backend, None)
_pg_map = {}
# Process group's names, map from ProcessGroup to str
_pg_names = {}
# Process group's global rank to local rank mapping
_pg_group_ranks = {}
# Default process group state
_default_pg = None
_default_pg_init_method = None
# Process group count for default naming
_group_count = 0
def _rank_not_in_group(group):
"""
Helper that checks if the current process's rank is not in a given group
"""
if group == GroupMember.WORLD:
return False
return group == GroupMember.NON_GROUP_MEMBER
def _get_group_rank(group, rank):
"""
Helper that gets a given group's local rank in the group from a given global
rank
"""
if group is GroupMember.WORLD:
raise RuntimeError("group.WORLD does not have local rank to global "
"rank mapping")
if group not in _pg_group_ranks:
raise RuntimeError("The given group does not exist")
try:
group_rank = _pg_group_ranks[group][rank]
except KeyError:
raise RuntimeError(f"The global rank {rank} is not part of the group {group}") from None
return group_rank
def _get_global_rank(group, group_rank):
"""
Helper that gets a given group's global rank from a given local rank in the
group
"""
if group is GroupMember.WORLD:
raise RuntimeError("group.WORLD does not have local rank to global "
"rank mapping")
group_rank_map = _pg_group_ranks[group]
for rank, grp_rank in group_rank_map.items():
if grp_rank == group_rank:
return rank
raise RuntimeError("The group rank is not part of the group")
def _check_default_pg():
"""
Helper that checks if the default ProcessGroup has been initialized, with
assertion
"""
assert _default_pg is not None, \
"Default process group is not initialized"
def _get_group_size(group):
"""
Helper that gets a given group's world size
"""
if group is GroupMember.WORLD:
_check_default_pg()
return _default_pg.size()
if group not in _pg_group_ranks:
raise RuntimeError("The given group does not exist")
return len(_pg_group_ranks[group])
def _check_single_tensor(param, param_name):
"""
Helper to check that the parameter ``param_name`` is a single tensor.
"""
if not isinstance(param, torch.Tensor):
raise RuntimeError("Invalid function argument. Expected parameter `{}` "
"to be of type torch.Tensor.".format(param_name))
def _check_tensor_list(param, param_name):
"""
Helper to check that the parameter ``param_name`` is a list of tensors.
"""
if not isinstance(param, list) or \
not all(isinstance(p, torch.Tensor) for p in param):
raise RuntimeError("Invalid function argument. Expected parameter `{}` "
"to be of type List[torch.Tensor].".format(param_name))
def is_mpi_available():
"""
Checks if the MPI backend is available.
"""
return _MPI_AVAILABLE
def is_nccl_available():
"""
Checks if the NCCL backend is available.
"""
return _NCCL_AVAILABLE
def is_gloo_available():
"""
Checks if the Gloo backend is available.
"""
return _GLOO_AVAILABLE
def is_initialized():
"""
Checking if the default process group has been initialized
"""
return _default_pg is not None
def _get_default_group():
"""
Getting the default process group created by init_process_group
"""
if not is_initialized():
raise RuntimeError("Default process group has not been initialized, "
"please make sure to call init_process_group.")
return _default_pg
def _get_default_store():
"""
Getting the default store created by init_process_group
"""
if not is_initialized():
raise RuntimeError("Default process group has not been initialized, "
"please make sure to call init_process_group.")
_, default_store = _pg_map[_default_pg]
return default_store
def get_backend(group=group.WORLD):
"""
Returns the backend of the given process group.
Arguments:
group (ProcessGroup, optional): The process group to work on. The
default is the general main process group. If another specific group
is specified, the calling process must be part of :attr:`group`.
Returns:
The backend of the given process group as a lower case string.
"""
_check_default_pg()
if group == GroupMember.WORLD:
pg = _default_pg
else:
pg = group
if _rank_not_in_group(pg):
raise RuntimeError("Invalid process group specified")
return _pg_map.get(pg, None)[0]
def init_process_group(backend,
init_method=None,
timeout=default_pg_timeout,
world_size=-1,
rank=-1,
store=None,
group_name=''):
"""
Initializes the default distributed process group, and this will also
initialize the distributed package.
There are 2 main ways to initialize a process group:
1. Specify ``store``, ``rank``, and ``world_size`` explicitly.
2. Specify ``init_method`` (a URL string) which indicates where/how
to discover peers. Optionally specify ``rank`` and ``world_size``,
or encode all required parameters in the URL and omit them.
If neither is specified, ``init_method`` is assumed to be "env://".
Arguments:
backend (str or Backend): The backend to use. Depending on
build-time configurations, valid values include ``mpi``, ``gloo``,
and ``nccl``. This field should be given as a lowercase string
(e.g., ``"gloo"``), which can also be accessed via
:class:`Backend` attributes (e.g., ``Backend.GLOO``). If using
multiple processes per machine with ``nccl`` backend, each process
must have exclusive access to every GPU it uses, as sharing GPUs
between processes can result in deadlocks.
init_method (str, optional): URL specifying how to initialize the
process group. Default is "env://" if no
``init_method`` or ``store`` is specified.
Mutually exclusive with ``store``.
world_size (int, optional): Number of processes participating in
the job. Required if ``store`` is specified.
rank (int, optional): Rank of the current process.
Required if ``store`` is specified.
store(Store, optional): Key/value store accessible to all workers, used
to exchange connection/address information.
Mutually exclusive with ``init_method``.
timeout (timedelta, optional): Timeout for operations executed against
the process group. Default value equals 30 minutes.
This is applicable for the ``gloo`` backend. For ``nccl``, this is
applicable only if the environment variable ``NCCL_BLOCKING_WAIT``
or ``NCCL_ASYNC_ERROR_HANDLING`` is set to 1. When
``NCCL_BLOCKING_WAIT`` is set, this is the duration for which the
process will block and wait for collectives to complete before
throwing an exception. When ``NCCL_ASYNC_ERROR_HANDLING`` is set,
this is the duration after which collectives will be aborted
asynchronously and the process will crash. ``NCCL_BLOCKING_WAIT``
will provide errors to the user which can be caught and handled,
but due to its blocking nature, it has a performance overhead. On
the other hand, ``NCCL_ASYNC_ERROR_HANDLING`` has little
performance overhead, but crashes the process on errors. This is
done since CUDA execution is async and it is no longer safe to
continue executing user code since failed async NCCL operations
might result in subsequent CUDA operations to run on corrupted
data. Only one of these two environment variables should be set.
group_name (str, optional, deprecated): Group name.
To enable ``backend == Backend.MPI``, PyTorch needs to be built from source
on a system that supports MPI.
"""
global _pg_group_ranks
global _backend
global _default_pg
global _default_pg_init_method
if not isinstance(timeout, timedelta):
raise RuntimeError("Expected timeout argument to be of type"
"datetime.timedelta")
if _default_pg is not None:
raise RuntimeError("trying to initialize the default process group "
"twice!")
assert (store is None) or (init_method is None), \
"Cannot specify both init_method and store."
if store is not None:
assert world_size > 0, 'world_size must be positive if using store'
assert rank >= 0, 'rank must be non-negative if using store'
elif init_method is None:
init_method = "env://"
backend = Backend(backend)
if backend == Backend.MPI:
if world_size != -1 or rank != -1:
warnings.warn(
"For MPI backend, world_size ({}) and rank ({}) "
"are ignored since they are assigned by the "
"MPI runtime.".format(world_size, rank))
_default_pg = _new_process_group_helper(
-1,
-1,
[],
Backend.MPI,
None,
group_name=group_name,
timeout=timeout)
else:
# backward compatible API
if store is None:
rendezvous_iterator = rendezvous(
init_method, rank, world_size, timeout=timeout
)
store, rank, world_size = next(rendezvous_iterator)
store.set_timeout(timeout)
_default_pg = _new_process_group_helper(
world_size,
rank,
[],
backend,
store,
group_name=group_name,
timeout=timeout)
_pg_group_ranks[_default_pg] = {i: i for i in range(_default_pg.size())}
_backend = _pg_map[_default_pg][0]
_default_pg_init_method = init_method
# barrier at the end to ensure that once we return from this method, all
# process groups including global variables are updated correctly on all
# ranks.
barrier()
def _new_process_group_helper(world_size,
rank,
group_ranks,
backend,
store,
group_name=None,
timeout=default_pg_timeout):
"""
Create a new distributed process group.
This function must be called by ALL processes in the global group, even if
the calling process is not part of the newly created group. In that case,
this function returns GroupMember.NON_GROUP_MEMBER.
This function is called with ``group_ranks == []`` for the default group.
"""
global _pg_map
global _group_count
global _pg_names
if not group_name:
group_name = str(_group_count)
_group_count += 1
if group_name in _pg_names.values():
raise RuntimeError("The specified group name has already been "
"created, please use a different group name")
if not isinstance(timeout, timedelta):
raise RuntimeError("Expected timeout argument to be of type"
"datetime.timedelta")
# The list of group ranks is empty if we're creating the default group.
is_default_group = (len(group_ranks) == 0)
backend = Backend(backend)
if backend == Backend.MPI:
if not is_mpi_available():
raise RuntimeError(
"Distributed package doesn't have MPI built in."
" MPI is only included if you build PyTorch from"
" source on a host that has MPI installed.")
pg = ProcessGroupMPI.create(group_ranks)
if not pg:
return GroupMember.NON_GROUP_MEMBER
_pg_map[pg] = (Backend.MPI, None)
_pg_names[pg] = group_name
else:
# If this is a subgroup (which means group_ranks is specified),
# we check if the current process is a member of the new group.
if not is_default_group:
global_rank = _default_pg.rank()
if global_rank not in group_ranks:
return GroupMember.NON_GROUP_MEMBER
# Use the group name as prefix in the default store, such that
# a single store can be reused by multiple groups.
prefix_store = PrefixStore(group_name, store)
if backend == Backend.GLOO:
pg = ProcessGroupGloo(
prefix_store,
rank,
world_size,
timeout=timeout)
_pg_map[pg] = (Backend.GLOO, store)
_pg_names[pg] = group_name
elif backend == Backend.NCCL:
if not is_nccl_available():
raise RuntimeError("Distributed package doesn't have NCCL "
"built in")
pg = ProcessGroupNCCL(
prefix_store,
rank,
world_size,
timeout)
_pg_map[pg] = (Backend.NCCL, store)
_pg_names[pg] = group_name
else:
pg = getattr(Backend, backend.upper())(
prefix_store,
rank,
world_size,
timeout)
_pg_map[pg] = (backend, store)
_pg_names[pg] = group_name
return pg
def destroy_process_group(group=group.WORLD):
"""
Destroy a given process group, and deinitialize the distributed package
Arguments:
group (ProcessGroup, optional): The process group to be destroyed, if
group.WORLD is given, all process
groups including the default one will
be destroyed.
"""
global _pg_map
global _pg_names
global _pg_group_ranks
global _default_pg
global _default_pg_init_method
global _group_count
if group == GroupMember.NON_GROUP_MEMBER:
return
if group == GroupMember.WORLD:
pg = _default_pg
else:
pg = group
if _pg_map.get(pg, None) is None:
raise RuntimeError("Invalid process group specified")
if group == GroupMember.WORLD:
_default_pg = None
_default_pg_init_method = None
_pg_map.clear()
_pg_names.clear()
_pg_group_ranks.clear()
# when process group doesn't have an explicit name (only WORLD (default)
# process group can have an explicit name), we use global _group_counter
# to generate the name. We need to reset the counter on destruction to
# allow consistent value to be generated when we re-create process
# groups after some trainers recover from failure
#
# We only reset this when WORLD is being destroyed because if this
# process group is in good state, we aren't dealing with failures.
_group_count = 0
else:
del _pg_map[pg]
del _pg_names[pg]
del _pg_group_ranks[pg]
def get_rank(group=group.WORLD):
"""
Returns the rank of current process group
Rank is a unique identifier assigned to each process within a distributed
process group. They are always consecutive integers ranging from 0 to
``world_size``.
Arguments:
group (ProcessGroup, optional): The process group to work on
Returns:
The rank of the process group
-1, if not part of the group
"""
if _rank_not_in_group(group):
return -1
_check_default_pg()
if group == GroupMember.WORLD:
return _default_pg.rank()
return _get_group_rank(group, _default_pg.rank())
def get_world_size(group=group.WORLD):
"""
Returns the number of processes in the current process group
Arguments:
group (ProcessGroup, optional): The process group to work on
Returns:
The world size of the process group
-1, if not part of the group
"""
if _rank_not_in_group(group):
return -1
return _get_group_size(group)
def isend(tensor,
dst,
group=group.WORLD,
tag=0):
"""
Sends a tensor asynchronously.
Arguments:
tensor (Tensor): Tensor to send.
dst (int): Destination rank.
group (ProcessGroup, optional): The process group to work on
tag (int, optional): Tag to match send with remote recv
Returns:
A distributed request object.
None, if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
return _default_pg.send([tensor], dst, tag)
else:
group_dst_rank = _get_group_rank(group, dst)
return group.send([tensor], group_dst_rank, tag)
def irecv(tensor,
src,
group=group.WORLD,
tag=0):
"""
Receives a tensor asynchronously.
Arguments:
tensor (Tensor): Tensor to fill with received data.
src (int): Source rank.
group (ProcessGroup, optional): The process group to work on
tag (int, optional): Tag to match recv with remote send
Returns:
A distributed request object.
None, if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
return _default_pg.recv([tensor], src, tag)
else:
group_src_rank = _get_group_rank(group, src)
return group.recv([tensor], group_src_rank, tag)
def send(tensor,
dst,
group=group.WORLD,
tag=0):
"""
Sends a tensor synchronously.
Arguments:
tensor (Tensor): Tensor to send.
dst (int): Destination rank.
group (ProcessGroup, optional): The process group to work on
tag (int, optional): Tag to match send with remote recv
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
_default_pg.send([tensor], dst, tag).wait()
else:
group_dst_rank = _get_group_rank(group, dst)
group.send([tensor], group_dst_rank, tag).wait()
def recv(tensor,
src=None,
group=group.WORLD,
tag=0):
"""
Receives a tensor synchronously.
Arguments:
tensor (Tensor): Tensor to fill with received data.
src (int, optional): Source rank. Will receive from any
process if unspecified.
group (ProcessGroup, optional): The process group to work on
tag (int, optional): Tag to match recv with remote send
Returns:
Sender rank
-1, if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return -1
if group == GroupMember.WORLD:
_check_default_pg()
pg = _default_pg
else:
pg = group
if src is None:
work = pg.recv_anysource([tensor], tag)
work.wait()
src_rank = work._source_rank()
if group == GroupMember.WORLD:
return src_rank
else:
return _get_global_rank(pg, src_rank)
else:
if group == GroupMember.WORLD:
pg.recv([tensor], src, tag).wait()
else:
group_src_rank = _get_group_rank(pg, src)
pg.recv([tensor], group_src_rank, tag).wait()
return src
def broadcast_multigpu(tensor_list,
src,
group=group.WORLD,
async_op=False,
src_tensor=0):
"""
Broadcasts the tensor to the whole group with multiple GPU tensors
per node.
``tensor`` must have the same number of elements in all the GPUs from
all processes participating in the collective. each tensor in the list must
be on a different GPU
Only nccl and gloo backend are currently supported
tensors should only be GPU tensors
Arguments:
tensor_list (List[Tensor]): Tensors that participate in the collective
operation. If ``src`` is the rank, then the specified ``src_tensor``
element of ``tensor_list`` (``tensor_list[src_tensor]``) will be
broadcast to all other tensors (on different GPUs) in the src process
and all tensors in ``tensor_list`` of other non-src processes.
You also need to make sure that ``len(tensor_list)`` is the same
for all the distributed processes calling this function.
src (int): Source rank.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
src_tensor (int, optional): Source tensor rank within ``tensor_list``
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
opts = BroadcastOptions()
opts.rootRank = src
opts.rootTensor = src_tensor
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.broadcast(tensor_list, opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.broadcast(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def broadcast(tensor,
src,
group=group.WORLD,
async_op=False):
"""
Broadcasts the tensor to the whole group.
``tensor`` must have the same number of elements in all processes
participating in the collective.
Arguments:
tensor (Tensor): Data to be sent if ``src`` is the rank of current
process, and tensor to be used to save received data otherwise.
src (int): Source rank.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
opts = BroadcastOptions()
opts.rootRank = src
opts.rootTensor = 0
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.broadcast([tensor], opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.broadcast([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_reduce_multigpu(tensor_list,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
r"""
Reduces the tensor data across all machines in such a way that all get
the final result. This function reduces a number of tensors on every node,
while each tensor resides on different GPUs.
Therefore, the input tensor in the tensor list needs to be GPU tensors.
Also, each tensor in the tensor list needs to reside on a different GPU.
After the call, all ``tensor`` in ``tensor_list`` is going to be bitwise
identical in all processes.
Only nccl and gloo backend is currently supported
tensors should only be GPU tensors
Arguments:
tensor list (List[Tensor]): List of input and output tensors of
the collective. The function operates in-place and requires that
each tensor to be a GPU tensor on different GPUs.
You also need to make sure that ``len(tensor_list)`` is the same for
all the distributed processes calling this function.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
opts = AllreduceOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allreduce(tensor_list, opts)
else:
work = group.allreduce(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def all_reduce(tensor,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
"""
Reduces the tensor data across all machines in such a way that all get
the final result.
After the call ``tensor`` is going to be bitwise identical in all processes.
Arguments:
tensor (Tensor): Input and output of the collective. The function
operates in-place.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
opts = AllreduceOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allreduce([tensor], opts)
else:
work = group.allreduce([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_reduce_coalesced(tensors,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
"""
WARNING: at this time individual shape checking is not implemented across nodes.
For example, if the rank 0 node passes [torch.rand(4), torch.rand(2)] and the
rank 1 node passes [torch.rand(2), torch.rand(2), torch.rand(2)], the allreduce
operation will proceed without complaint and return erroneous outputs. This lack
of shape checking results in significant performance improvements but users of this
function should take extra care to ensure that each node passes in tensors whose
shapes match across nodes.
Reduces each tensor in tensors (residing on the same device) across all machines
in such a way that all get the final result.
After the call each tensor in tensors is going to bitwise identical
in all processes.
Arguments:
tensors (List[Tensor]): Input and output of the collective. The function
operates in-place.
op (Optional[ReduceOp]): One of the values from
``torch.distributed.ReduceOp`` enum. Specifies an operation used for
element-wise reductions.
group (Optional[ProcessGroup]): The process group to work on.
async_op (Optional[bool]): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
"""
_check_tensor_list(tensors, "tensor")
if _rank_not_in_group(group):
return
opts = AllreduceCoalescedOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allreduce_coalesced(tensors, opts)
else:
work = group.allreduce_coalesced(tensors, opts)
if async_op:
return work
else:
work.wait()
def reduce_multigpu(tensor_list,
dst,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False,
dst_tensor=0):
"""
Reduces the tensor data on multiple GPUs across all machines. Each tensor
in ``tensor_list`` should reside on a separate GPU
Only the GPU of ``tensor_list[dst_tensor]`` on the process with rank ``dst``
is going to receive the final result.
Only nccl backend is currently supported
tensors should only be GPU tensors
Arguments:
tensor_list (List[Tensor]): Input and output GPU tensors of the
collective. The function operates in-place.
You also need to make sure that ``len(tensor_list)`` is the same for
all the distributed processes calling this function.
dst (int): Destination rank
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
dst_tensor (int, optional): Destination tensor rank within
``tensor_list``
Returns:
Async work handle, if async_op is set to True.
None, otherwise
"""
if _rank_not_in_group(group):
return
opts = ReduceOptions()
opts.reduceOp = op
opts.rootRank = dst
opts.rootTensor = dst_tensor
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce(tensor_list, opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.reduce(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def reduce(tensor,
dst,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
"""
Reduces the tensor data across all machines.
Only the process with rank ``dst`` is going to receive the final result.
Arguments:
tensor (Tensor): Input and output of the collective. The function
operates in-place.
dst (int): Destination rank
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
opts = ReduceOptions()
opts.reduceOp = op
opts.rootRank = dst
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce([tensor], opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.reduce([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_gather_multigpu(output_tensor_lists,
input_tensor_list,
group=group.WORLD,
async_op=False):
"""
Gathers tensors from the whole group in a list.
Each tensor in ``tensor_list`` should reside on a separate GPU
Only nccl backend is currently supported
tensors should only be GPU tensors
Arguments:
output_tensor_lists (List[List[Tensor]]): Output lists. It should
contain correctly-sized tensors on each GPU to be used for output
of the collective, e.g. ``output_tensor_lists[i]`` contains the
all_gather result that resides on the GPU of
``input_tensor_list[i]``.
Note that each element of ``output_tensor_lists`` has the size of
``world_size * len(input_tensor_list)``, since the function all
gathers the result from every single GPU in the group. To interpret
each element of ``output_tensor_lists[i]``, note that
``input_tensor_list[j]`` of rank k will be appear in
``output_tensor_lists[i][k * world_size + j]``
Also note that ``len(output_tensor_lists)``, and the size of each
element in ``output_tensor_lists`` (each element is a list,
therefore ``len(output_tensor_lists[i])``) need to be the same
for all the distributed processes calling this function.
input_tensor_list (List[Tensor]): List of tensors(on different GPUs) to
be broadcast from current process.
Note that ``len(input_tensor_list)`` needs to be the same for
all the distributed processes calling this function.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allgather(output_tensor_lists, input_tensor_list)
else:
work = group.allgather(output_tensor_lists, input_tensor_list)
if async_op:
return work
else:
work.wait()
def _object_to_tensor(obj):
buffer = pickle.dumps(obj)
byte_storage = torch.ByteStorage.from_buffer(buffer)
byte_tensor = torch.ByteTensor(byte_storage)
local_size = torch.LongTensor([byte_tensor.numel()])
return byte_tensor, local_size
def _tensor_to_object(tensor, tensor_size):
buf = tensor.numpy().tobytes()[:tensor_size]
out = pickle.loads(buf)
return out
def all_gather_object(object_list, obj, group=group.WORLD):
"""
Gathers picklable objects from the whole group into a list. Similar to
:func:`all_gather`, but Python objects can be passed in. Note that the object
must be picklable in order to be gathered.
Arguments:
object_list (list[Any]): Output list. It should be correctly sized as the
size of the group for this collective and will contain the output.
object (Any): Pickable Python object to be broadcast from current process.
group (ProcessGroup, optional): The process group to work on
Returns:
None. If the calling rank is part of this group, the output of the
collective will be populated into the input ``object_list``. If the
calling rank is not part of the group, the passed in ``object_list`` will
be unmodified.
.. note:: Note that this API differs slightly from the :func:`all_gather`
collective since it does not provide an ``async_op`` handle and thus
will be a blocking call.
.. warning::
:func:`all_gather_object` uses ``pickle`` module implicitly, which is
known to be insecure. It is possible to construct malicious pickle data
which will execute arbitrary code during unpickling. Only call this
function with data you trust.
"""
if _rank_not_in_group(group):
return
input_tensor, local_size = _object_to_tensor(obj)
group_backend = get_backend(group)
my_rank = get_rank()
is_nccl_backend = group_backend == Backend.NCCL
if is_nccl_backend:
input_tensor, local_size = input_tensor.to(my_rank), local_size.to(my_rank)
# Gather all local sizes. This is so that we can find the max size, and index
# until the correct size when deserializing the tensors.
group_size = get_world_size(group=group)
object_sizes_tensor = torch.zeros(group_size, dtype=int).to(
my_rank if is_nccl_backend else "cpu"
)
object_size_list = [
object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size)
]
# Allgather tensor sizes
all_gather(object_size_list, local_size, group=group)
max_object_size = max(object_size_list)
# Resize tensor to max size across all ranks.
input_tensor.resize_(max_object_size)
coalesced_output_tensor = torch.empty(
max_object_size * group_size, dtype=torch.uint8
).to(my_rank if is_nccl_backend else "cpu")
# Output tensors are nonoverlapping views of coalesced_output_tensor
output_tensors = [
coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)]
for i in range(group_size)
]
all_gather(output_tensors, input_tensor, group=group)
# Deserialize outputs back to object.
for i, tensor in enumerate(output_tensors):
tensor = tensor.type(torch.ByteTensor)
tensor_size = object_size_list[i]
object_list[i] = _tensor_to_object(tensor, tensor_size)
def gather_object(obj, object_gather_list=None, dst=0, group=group.WORLD):
"""
Gathers picklable objects from the whole group in a single process.
Similar to :func:`gather`, but Python objects can be passed in. Note that the
object must be picklable in order to be gathered.
Arguments:
obj (Any): Input object. Must be picklable.
object_gather_list (list[Any]): Output list. On the ``dst`` rank, it
should be correctly sized as the size of the group for this
collective and will contain the output. Must be ``None`` on non-dst
ranks. (default is ``None``)
dst (int, optional): Destination rank. (default is 0)
group: (ProcessGroup, optional): The process group to work on.
Returns:
None. On the ``dst`` rank, ``object_gather_list`` will contain the
output of the collective.
.. note:: Note that this API differs slightly from the gather collective
since it does not provide an async_op handle and thus will be a blocking
call.
.. note:: Note that this API is not supported when using the NCCL backend.
.. warning::
:func:`gather_object` uses ``pickle`` module implicitly, which is
known to be insecure. It is possible to construct malicious pickle data
which will execute arbitrary code during unpickling. Only call this
function with data you trust.
"""
if _rank_not_in_group(group):
return
# Ensure object_gather_list is specified appopriately.
my_rank = get_rank()
_validate_output_list_for_rank(my_rank, dst, object_gather_list)
input_tensor, local_size = _object_to_tensor(obj)
group_backend = get_backend(group)
is_nccl_backend = group_backend == Backend.NCCL
if is_nccl_backend:
input_tensor, local_size = input_tensor.to(my_rank), local_size.to(my_rank)
# Gather all local sizes. This is so that we can find the max size, and index
# until the correct size when deserializing the tensors.
group_size = get_world_size(group=group)
object_sizes_tensor = torch.zeros(group_size, dtype=int).to(
my_rank if is_nccl_backend else "cpu"
)
object_size_list = [
object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size)
]
# Allgather tensor sizes. An all-gather is needed here despite this being a gather,
# since each rank needs to broadcast a tensor of the same (maximal) size.
all_gather(object_size_list, local_size, group=group)
max_object_size = max(object_size_list)
# Resize tensor to max size across all ranks.
input_tensor.resize_(max_object_size)
# Avoid populating output tensors if the result won't be gathered on this rank.
if my_rank == dst:
coalesced_output_tensor = torch.empty(
max_object_size * group_size, dtype=torch.uint8
).to(my_rank if is_nccl_backend else "cpu")
# Output tensors are nonoverlapping views of coalesced_output_tensor
output_tensors = [
coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)]
for i in range(group_size)
]
# All ranks call gather with equal-sized tensors.
gather(
input_tensor,
gather_list=output_tensors if my_rank == dst else None,
dst=dst,
group=group,
)
if my_rank != dst:
return
for i, tensor in enumerate(output_tensors):
tensor = tensor.type(torch.ByteTensor)
tensor_size = object_size_list[i]
object_gather_list[i] = _tensor_to_object(tensor, tensor_size)
def broadcast_object_list(object_list, src, group=group.WORLD):
"""
Broadcasts picklable objects in ``object_list`` to the whole group. Similar
to :func:`broadcast`, but Python objects can be passed in.
Note that all objects in ``object_list`` must be picklable in order to be
broadcasted.
Arguments:
object_list (List[Any]): List of input objects to broadcast.
Each object must be picklable. Only objects on the ``src`` rank will
be broadcast, but each rank must provide lists of equal sizes.
src (int): Source rank from which to broadcast ``object_list``.
group: (ProcessGroup, optional): The process group to work on.
Returns:
``None``. If rank is part of the group, ``object_list`` will contain the
broadcasted objects from ``src`` rank.
.. note:: Note that this API differs slightly from the broadcast collective
since it does not provide an ``async_op`` handle and thus will be a
blocking call.
.. warning::
:func:`broadcast_object_list` uses ``pickle`` module implicitly, which
is known to be insecure. It is possible to construct malicious pickle
data which will execute arbitrary code during unpickling. Only call this
function with data you trust.
"""
if _rank_not_in_group(group):
return
my_rank = get_rank()
# Serialize object_list elements to tensors on src rank.
if my_rank == src:
tensor_list, size_list = zip(*[_object_to_tensor(obj) for obj in object_list])
object_sizes_tensor = torch.cat(size_list)
else:
object_sizes_tensor = torch.LongTensor(len(object_list))
group_backend = get_backend(group)
is_nccl_backend = group_backend == Backend.NCCL
if is_nccl_backend:
object_sizes_tensor = object_sizes_tensor.to(my_rank)
# Broadcast object sizes
broadcast(object_sizes_tensor, src=src, group=group)
# Concatenate and broadcast serialized object tensors
if my_rank == src:
object_tensor = torch.cat(tensor_list)
else:
object_tensor = torch.ByteTensor(torch.sum(object_sizes_tensor).item())
if is_nccl_backend:
object_tensor = object_tensor.to(my_rank)
broadcast(object_tensor, src=src, group=group)
# Deserialize objects using their stored sizes.
offset = 0
if my_rank != src:
for i, obj_size in enumerate(object_sizes_tensor):
obj_view = object_tensor[offset : offset + obj_size]
obj_view = obj_view.type(torch.ByteTensor)
offset += obj_size
object_list[i] = _tensor_to_object(obj_view, obj_size)
def all_gather(tensor_list,
tensor,
group=group.WORLD,
async_op=False):
"""
Gathers tensors from the whole group in a list.
Arguments:
tensor_list (list[Tensor]): Output list. It should contain
correctly-sized tensors to be used for output of the collective.
tensor (Tensor): Tensor to be broadcast from current process.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_tensor_list(tensor_list, "tensor_list")
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allgather([tensor_list], [tensor])
else:
work = group.allgather([tensor_list], [tensor])
if async_op:
return work
else:
work.wait()
def all_gather_coalesced(output_tensor_lists,
input_tensor_list,
group=group.WORLD,
async_op=False):
"""
Gathers input tensors from the whole group in a list in a coalesced manner.
Arguments:
output_tensor_lists (list[list[Tensor]]): Output list. It should contain
correctly-sized tensors to be used for output of the collective.
input_tensor_list (list[Tensor]): Tensors to be broadcast from
current process. At least one tensor has to be non empty.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
Example:
we have 2 process groups, 2 ranks.
rank 0 passes:
input_tensor_list = [[[1, 1], [1, 1]], [2], [3, 3]]
output_tensor_lists =
[[[[-1, -1], [-1, -1]], [-1], [-1, -1]],
[[[-1, -1], [-1, -1]], [-1], [-1, -1]]]
rank 1 passes:
input_tensor_list = [[[3, 3], [3, 3]], [5], [1, 1]]
output_tensor_lists =
[[[[-1, -1], [-1, -1]], [-1], [-1, -1]],
[[[-1, -1], [-1, -1]], [-1], [-1, -1]]]
both rank 0 and 1 get:
output_tensor_lists =
[[[1, 1], [1, 1]], [2], [3, 3]],
[[3, 3], [3, 3]], [5], [1, 1]]].
WARNING: at this time individual shape checking is not implemented across nodes.
For example, if the rank 0 node passes [torch.rand(4), torch.rand(2)] and the
rank 1 node passes [torch.rand(2), torch.rand(2), torch.rand(2)], the
all_gather_coalesced operation will proceed without complaint and return
erroneous outputs. This lack of shape checking results in significant
performance improvements but users of this function should take extra care
to ensure that each node passes in tensors whose shapes match across nodes.
"""
# We only check basic compatibility with C++ params here, C++ code will
# do shape and type checking.
if _rank_not_in_group(group):
return
_check_tensor_list(input_tensor_list, "tensor_list")
if not isinstance(output_tensor_lists, list):
raise RuntimeError("Invalid function argument: "
"output_tensor_lists should be a list")
for output_tensor_list in output_tensor_lists:
_check_tensor_list(output_tensor_list, "output_tensor_lists")
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allgather_coalesced(
output_tensor_lists, input_tensor_list)
else:
work = group.allgather_coalesced(output_tensor_lists, input_tensor_list)
if async_op:
return work
else:
work.wait()
def _validate_output_list_for_rank(my_rank, dst, gather_list):
if dst == my_rank:
if not gather_list:
raise ValueError(
"Argument ``gather_list`` must be specified on destination rank."
)
elif gather_list:
raise ValueError(
"Argument ``gather_list`` must NOT be specified "
"on non-destination ranks."
)
def gather(tensor,
gather_list=None,
dst=0,
group=group.WORLD,
async_op=False):
"""
Gathers a list of tensors in a single process.
Arguments:
tensor (Tensor): Input tensor.
gather_list (list[Tensor], optional): List of appropriately-sized
tensors to use for gathered data (default is None, must be specified
on the destination rank)
dst (int, optional): Destination rank (default is 0)
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
# Parameter ``gather_list`` may be left unspecified on non-dst ranks.
if gather_list:
_check_tensor_list(gather_list, "gather_list")
else:
gather_list = []
if _rank_not_in_group(group):
return
my_rank = get_rank()
_validate_output_list_for_rank(my_rank, dst, gather_list)
output_tensors = [gather_list] if dst == my_rank else []
input_tensors = [tensor]
opts = GatherOptions()
opts.rootRank = dst
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.gather(output_tensors, input_tensors, opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.gather(output_tensors, input_tensors, opts)
if async_op:
return work
else:
work.wait()
def scatter(tensor,
scatter_list=None,
src=0,
group=group.WORLD,
async_op=False):
"""
Scatters a list of tensors to all processes in a group.
Each process will receive exactly one tensor and store its data in the
``tensor`` argument.
Arguments:
tensor (Tensor): Output tensor.
scatter_list (list[Tensor]): List of tensors to scatter (default is
None, must be specified on the source rank)
src (int): Source rank (default is 0)
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
# Parameter ``scatter_list`` may be left unspecified on non-src ranks.
if scatter_list:
_check_tensor_list(scatter_list, "scatter_list")
else:
scatter_list = []
if _rank_not_in_group(group):
return
my_rank = get_rank()
if src == my_rank:
if not scatter_list:
raise ValueError("Argument ``scatter_list`` must be specified "
"on source rank.")
input_tensors = [scatter_list]
output_tensors = [tensor]
else:
if scatter_list:
raise ValueError("Argument ``scatter_list`` must NOT be specified "
"on non-source ranks.")
input_tensors = []
output_tensors = [tensor]
opts = ScatterOptions()
opts.rootRank = src
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.scatter(output_tensors, input_tensors, opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.scatter(output_tensors, input_tensors, opts)
if async_op:
return work
else:
work.wait()
def reduce_scatter_multigpu(output_tensor_list,
input_tensor_lists,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
"""
Reduce and scatter a list of tensors to the whole group. Only nccl backend
is currently supported.
Each tensor in ``output_tensor_list`` should reside on a separate GPU, as
should each list of tensors in ``input_tensor_lists``.
Arguments:
output_tensor_list (List[Tensor]): Output tensors (on different GPUs)
to receive the result of the operation.
Note that ``len(output_tensor_list)`` needs to be the same for all
the distributed processes calling this function.
input_tensor_lists (List[List[Tensor]]): Input lists. It should
contain correctly-sized tensors on each GPU to be used for input of
the collective, e.g. ``input_tensor_lists[i]`` contains the
reduce_scatter input that resides on the GPU of
``output_tensor_list[i]``.
Note that each element of ``input_tensor_lists`` has the size of
``world_size * len(output_tensor_list)``, since the function
scatters the result from every single GPU in the group. To
interpret each element of ``input_tensor_lists[i]``, note that
``output_tensor_list[j]`` of rank k receives the reduce-scattered
result from ``input_tensor_lists[i][k * world_size + j]``
Also note that ``len(input_tensor_lists)``, and the size of each
element in ``input_tensor_lists`` (each element is a list,
therefore ``len(input_tensor_lists[i])``) need to be the same for
all the distributed processes calling this function.
group (ProcessGroup, optional): The process group to work on.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
"""
if _rank_not_in_group(group):
return
opts = ReduceScatterOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce_scatter(
output_tensor_list,
input_tensor_lists,
opts
)
else:
work = group.reduce_scatter(
output_tensor_list,
input_tensor_lists,
opts
)
if async_op:
return work
else:
work.wait()
def reduce_scatter(output,
input_list,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
"""
Reduces, then scatters a list of tensors to all processes in a group.
Arguments:
output (Tensor): Output tensor.
input_list (list[Tensor]): List of tensors to reduce and scatter.
group (ProcessGroup, optional): The process group to work on.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
"""
_check_single_tensor(output, "output")
_check_tensor_list(input_list, "input_list")
if _rank_not_in_group(group):
return
opts = ReduceScatterOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce_scatter([output], [input_list], opts)
else:
work = group.reduce_scatter([output], [input_list], opts)
if async_op:
return work
else:
work.wait()
def all_to_all_single(output,
input,
output_split_sizes=None,
input_split_sizes=None,
group=group.WORLD,
async_op=False):
"""
Each process splits input tensor and then scatters the split list
to all processes in a group. Then concatenate the received tensors from all
the processes in the group and return single output tensor.
Arguments:
output (Tensor): Gathered cancatenated output tensor.
input (Tensor): Input tensor to scatter.
output_split_sizes: (list[Int], optional): Output split sizes for dim 0
if specified None or empty, dim 0 of ``output`` tensor must divide
equally by ``world_size``.
input_split_sizes: (list[Int], optional): Input split sizes for dim 0
if specified None or empty, dim 0 of ``input`` tensor must divide
equally by ``world_size``.
group (ProcessGroup, optional): The process group to work on.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
.. warning::
`all_to_all_single` is experimental and subject to change.
Examples:
>>> input = torch.arange(4) + rank * 4
>>> input
tensor([0, 1, 2, 3]) # Rank 0
tensor([4, 5, 6, 7]) # Rank 1
tensor([8, 9, 10, 11]) # Rank 2
tensor([12, 13, 14, 15]) # Rank 3
>>> output = torch.empty([4], dtype=torch.int64)
>>> dist.all_to_all_single(output, input)
>>> output
tensor([0, 4, 8, 12]) # Rank 0
tensor([1, 5, 9, 13]) # Rank 1
tensor([2, 6, 10, 14]) # Rank 2
tensor([3, 7, 11, 15]) # Rank 3
>>> # Essentially, it is similar to following operation:
>>> scatter_list = list(input.chunk(world_size))
>>> gather_list = list(output.chunk(world_size))
>>> for i in range(world_size):
>>> dist.scatter(gather_list[i], scatter_list if i == rank else [], src = i)
>>> # Another example with uneven split
>>> input
tensor([0, 1, 2, 3, 4, 5]) # Rank 0
tensor([10, 11, 12, 13, 14, 15, 16, 17, 18]) # Rank 1
tensor([20, 21, 22, 23, 24]) # Rank 2
tensor([30, 31, 32, 33, 34, 35, 36]) # Rank 3
>>> input_splits
[2, 2, 1, 1] # Rank 0
[3, 2, 2, 2] # Rank 1
[2, 1, 1, 1] # Rank 2
[2, 2, 2, 1] # Rank 3
>>> output_splits
[2, 3, 2, 2] # Rank 0
[2, 2, 1, 2] # Rank 1
[1, 2, 1, 2] # Rank 2
[1, 2, 1, 1] # Rank 3
>>> output = ...
>>> dist.all_to_all_single(output, input, output_splits, input_splits)
>>> output
tensor([ 0, 1, 10, 11, 12, 20, 21, 30, 31]) # Rank 0
tensor([ 2, 3, 13, 14, 22, 32, 33]) # Rank 1
tensor([ 4, 15, 16, 23, 34, 35]) # Rank 2
tensor([ 5, 17, 18, 24, 36]) # Rank 3
"""
if _rank_not_in_group(group):
return
opts = AllToAllOptions()
_check_single_tensor(output, "output")
_check_single_tensor(input, "input")
output_split_sizes = [] if output_split_sizes is None else output_split_sizes
input_split_sizes = [] if input_split_sizes is None else input_split_sizes
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.alltoall_base(output, input, output_split_sizes, input_split_sizes, opts)
else:
work = group.alltoall_base(output, input, output_split_sizes, input_split_sizes, opts)
if async_op:
return work
else:
work.wait()
def all_to_all(output_tensor_list,
input_tensor_list,
group=group.WORLD,
async_op=False):
"""
Each process scatters list of input tensors to all processes in a group and
return gathered list of tensors in output list.
Arguments:
output_tensor_list (list[Tensor]): List of tensors to be gathered one
per rank.
input_tensor_list (list[Tensor]): List of tensors to scatter one per rank.
group (ProcessGroup, optional): The process group to work on.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
.. warning::
`all_to_all` is experimental and subject to change.
Examples:
>>> input = torch.arange(4) + rank * 4
>>> input = list(input.chunk(4))
>>> input
[tensor([0]), tensor([1]), tensor([2]), tensor([3])] # Rank 0
[tensor([4]), tensor([5]), tensor([6]), tensor([7])] # Rank 1
[tensor([8]), tensor([9]), tensor([10]), tensor([11])] # Rank 2
[tensor([12]), tensor([13]), tensor([14]), tensor([15])] # Rank 3
>>> output = list(torch.empty([4], dtype=torch.int64).chunk(4))
>>> dist.all_to_all(output, input)
>>> output
[tensor([0]), tensor([4]), tensor([8]), tensor([12])] # Rank 0
[tensor([1]), tensor([5]), tensor([9]), tensor([13])] # Rank 1
[tensor([2]), tensor([6]), tensor([10]), tensor([14])] # Rank 2
[tensor([3]), tensor([7]), tensor([11]), tensor([15])] # Rank 3
>>> # Essentially, it is similar to following operation:
>>> scatter_list = input
>>> gather_list = output
>>> for i in range(world_size):
>>> dist.scatter(gather_list[i], scatter_list if i == rank else [], src = i)
>>> input
tensor([0, 1, 2, 3, 4, 5]) # Rank 0
tensor([10, 11, 12, 13, 14, 15, 16, 17, 18]) # Rank 1
tensor([20, 21, 22, 23, 24]) # Rank 2
tensor([30, 31, 32, 33, 34, 35, 36]) # Rank 3
>>> input_splits
[2, 2, 1, 1] # Rank 0
[3, 2, 2, 2] # Rank 1
[2, 1, 1, 1] # Rank 2
[2, 2, 2, 1] # Rank 3
>>> output_splits
[2, 3, 2, 2] # Rank 0
[2, 2, 1, 2] # Rank 1
[1, 2, 1, 2] # Rank 2
[1, 2, 1, 1] # Rank 3
>>> input = list(input.split(input_splits))
>>> input
[tensor([0, 1]), tensor([2, 3]), tensor([4]), tensor([5])] # Rank 0
[tensor([10, 11, 12]), tensor([13, 14]), tensor([15, 16]), tensor([17, 18])] # Rank 1
[tensor([20, 21]), tensor([22]), tensor([23]), tensor([24])] # Rank 2
[tensor([30, 31]), tensor([32, 33]), tensor([34, 35]), tensor([36])] # Rank 3
>>> output = ...
>>> dist.all_to_all(output, input)
>>> output
[tensor([0, 1]), tensor([10, 11, 12]), tensor([20, 21]), tensor([30, 31])] # Rank 0
[tensor([2, 3]), tensor([13, 14]), tensor([22]), tensor([32, 33])] # Rank 1
[tensor([4]), tensor([15, 16]), tensor([23]), tensor([34, 35])] # Rank 2
[tensor([5]), tensor([17, 18]), tensor([24]), tensor([36])] # Rank 3
"""
if _rank_not_in_group(group):
return
opts = AllToAllOptions()
_check_tensor_list(output_tensor_list, "output_tensor_list")
_check_tensor_list(input_tensor_list, "input_tensor_list")
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.alltoall(output_tensor_list, input_tensor_list, opts)
else:
work = group.alltoall(output_tensor_list, input_tensor_list, opts)
if async_op:
return work
else:
work.wait()
def barrier(group=group.WORLD,
async_op=False):
"""
Synchronizes all processes.
This collective blocks processes until the whole group enters this function,
if async_op is False, or if async work handle is called on wait().
Arguments:
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.barrier()
else:
work = group.barrier()
if async_op:
return work
else:
work.wait()
def new_group(ranks=None, timeout=default_pg_timeout, backend=None):
"""
Creates a new distributed group.
This function requires that all processes in the main group (i.e. all
processes that are part of the distributed job) enter this function, even
if they are not going to be members of the group. Additionally, groups
should be created in the same order in all processes.
Arguments:
ranks (list[int]): List of ranks of group members. If ``None``, will be
set to all ranks. Default is ``None``.
timeout (timedelta, optional): Timeout for operations executed against
the process group. Default value equals 30 minutes.
This is only applicable for the ``gloo`` backend.
backend (str or Backend, optional): The backend to use. Depending on
build-time configurations, valid values are ``gloo`` and ``nccl``.
By default uses the same backend as the global group. This field
should be given as a lowercase string (e.g., ``"gloo"``), which can
also be accessed via :class:`Backend` attributes (e.g.,
``Backend.GLOO``).
Returns:
A handle of distributed group that can be given to collective calls.
"""
_check_default_pg()
global _pg_group_ranks
default_backend, default_store = _pg_map[_default_pg]
global_rank = _default_pg.rank()
global_world_size = _default_pg.size()
# Default to the same backend as the global process group
# if the backend is not specified.
if not backend:
backend = default_backend
# checks the input ranks
if ranks is not None:
ranks = sorted(ranks)
group_world_size = len(ranks)
if group_world_size > global_world_size:
raise RuntimeError("the new group's world size should be less or "
"equal to the world size set by "
"init_process_group")
# check ranks' sanity
for rank in ranks:
if rank < 0 or rank >= global_world_size:
raise RuntimeError("The new group's rank should be within the "
"the world_size set by init_process_group")
if global_rank in ranks:
group_rank = ranks.index(global_rank)
else:
group_rank = None
else:
ranks = list(range(global_world_size))
group_world_size = global_world_size
group_rank = global_rank
backend = Backend(backend)
pg = _new_process_group_helper(group_world_size,
group_rank,
ranks,
backend,
default_store,
timeout=timeout)
# Create the global rank to group rank mapping
_pg_group_ranks[pg] = {
global_rank: group_rank
for group_rank, global_rank in enumerate(ranks)
}
# barrier at the end to ensure that once we return from this method, all
# process groups including global variables are updated correctly on all
# ranks.
barrier()
return pg
| 35.555826 | 100 | 0.613205 | import pickle
import torch
import warnings
from torch._six import string_classes
from datetime import timedelta
from .constants import default_pg_timeout
from .rendezvous import rendezvous, register_rendezvous_handler
from . import (
AllreduceOptions,
AllreduceCoalescedOptions,
AllToAllOptions,
BroadcastOptions,
GatherOptions,
ReduceOptions,
ReduceScatterOptions,
ScatterOptions,
)
from . import ReduceOp
from . import PrefixStore
_MPI_AVAILABLE = True
_NCCL_AVAILABLE = True
_GLOO_AVAILABLE = True
try:
from. import ProcessGroupMPI
except ImportError:
_MPI_AVAILABLE = False
try:
from. import ProcessGroupNCCL
except ImportError:
_NCCL_AVAILABLE = False
try:
from. import ProcessGroupGloo
except ImportError:
_GLOO_AVAILABLE = False
class Backend(object):
UNDEFINED = "undefined"
GLOO = "gloo"
NCCL = "nccl"
MPI = "mpi"
TCP = "tcp"
def __new__(cls, name):
if not isinstance(name, string_classes):
raise ValueError("Backend name must be a string, but got: {}".format(name))
value = getattr(Backend, name.upper(), Backend.UNDEFINED)
if value == Backend.TCP:
raise ValueError("TCP backend has been deprecated. Please use "
"Gloo or MPI backend for collective operations "
"on CPU tensors.")
elif value == Backend.UNDEFINED:
raise ValueError("Invalid backend: '{}'".format(name))
elif value != Backend.GLOO and value != Backend.NCCL and value != Backend.MPI:
value = name
return value
@classmethod
def register_backend(cls, name, func):
setattr(Backend, name.upper(), func)
_backend = Backend.UNDEFINED
dist_backend = Backend
class reduce_op(object):
def __init__(self):
for k, v in ReduceOp.__members__.items():
setattr(self, k, v)
self.__members__ = ReduceOp.__members__
def __getattribute__(self, key):
warnings.warn("torch.distributed.reduce_op is deprecated, please use "
"torch.distributed.ReduceOp instead")
return object.__getattribute__(self, key)
reduce_op = reduce_op()
class group(object):
WORLD = object()
class GroupMember(object):
WORLD = group.WORLD
NON_GROUP_MEMBER = object()
_pg_map = {}
_pg_names = {}
# Process group's global rank to local rank mapping
_pg_group_ranks = {}
_default_pg = None
_default_pg_init_method = None
_group_count = 0
def _rank_not_in_group(group):
if group == GroupMember.WORLD:
return False
return group == GroupMember.NON_GROUP_MEMBER
def _get_group_rank(group, rank):
if group is GroupMember.WORLD:
raise RuntimeError("group.WORLD does not have local rank to global "
"rank mapping")
if group not in _pg_group_ranks:
raise RuntimeError("The given group does not exist")
try:
group_rank = _pg_group_ranks[group][rank]
except KeyError:
raise RuntimeError(f"The global rank {rank} is not part of the group {group}") from None
return group_rank
def _get_global_rank(group, group_rank):
if group is GroupMember.WORLD:
raise RuntimeError("group.WORLD does not have local rank to global "
"rank mapping")
group_rank_map = _pg_group_ranks[group]
for rank, grp_rank in group_rank_map.items():
if grp_rank == group_rank:
return rank
raise RuntimeError("The group rank is not part of the group")
def _check_default_pg():
assert _default_pg is not None, \
"Default process group is not initialized"
def _get_group_size(group):
if group is GroupMember.WORLD:
_check_default_pg()
return _default_pg.size()
if group not in _pg_group_ranks:
raise RuntimeError("The given group does not exist")
return len(_pg_group_ranks[group])
def _check_single_tensor(param, param_name):
if not isinstance(param, torch.Tensor):
raise RuntimeError("Invalid function argument. Expected parameter `{}` "
"to be of type torch.Tensor.".format(param_name))
def _check_tensor_list(param, param_name):
if not isinstance(param, list) or \
not all(isinstance(p, torch.Tensor) for p in param):
raise RuntimeError("Invalid function argument. Expected parameter `{}` "
"to be of type List[torch.Tensor].".format(param_name))
def is_mpi_available():
return _MPI_AVAILABLE
def is_nccl_available():
return _NCCL_AVAILABLE
def is_gloo_available():
return _GLOO_AVAILABLE
def is_initialized():
return _default_pg is not None
def _get_default_group():
if not is_initialized():
raise RuntimeError("Default process group has not been initialized, "
"please make sure to call init_process_group.")
return _default_pg
def _get_default_store():
if not is_initialized():
raise RuntimeError("Default process group has not been initialized, "
"please make sure to call init_process_group.")
_, default_store = _pg_map[_default_pg]
return default_store
def get_backend(group=group.WORLD):
_check_default_pg()
if group == GroupMember.WORLD:
pg = _default_pg
else:
pg = group
if _rank_not_in_group(pg):
raise RuntimeError("Invalid process group specified")
return _pg_map.get(pg, None)[0]
def init_process_group(backend,
init_method=None,
timeout=default_pg_timeout,
world_size=-1,
rank=-1,
store=None,
group_name=''):
global _pg_group_ranks
global _backend
global _default_pg
global _default_pg_init_method
if not isinstance(timeout, timedelta):
raise RuntimeError("Expected timeout argument to be of type"
"datetime.timedelta")
if _default_pg is not None:
raise RuntimeError("trying to initialize the default process group "
"twice!")
assert (store is None) or (init_method is None), \
"Cannot specify both init_method and store."
if store is not None:
assert world_size > 0, 'world_size must be positive if using store'
assert rank >= 0, 'rank must be non-negative if using store'
elif init_method is None:
init_method = "env://"
backend = Backend(backend)
if backend == Backend.MPI:
if world_size != -1 or rank != -1:
warnings.warn(
"For MPI backend, world_size ({}) and rank ({}) "
"are ignored since they are assigned by the "
"MPI runtime.".format(world_size, rank))
_default_pg = _new_process_group_helper(
-1,
-1,
[],
Backend.MPI,
None,
group_name=group_name,
timeout=timeout)
else:
if store is None:
rendezvous_iterator = rendezvous(
init_method, rank, world_size, timeout=timeout
)
store, rank, world_size = next(rendezvous_iterator)
store.set_timeout(timeout)
_default_pg = _new_process_group_helper(
world_size,
rank,
[],
backend,
store,
group_name=group_name,
timeout=timeout)
_pg_group_ranks[_default_pg] = {i: i for i in range(_default_pg.size())}
_backend = _pg_map[_default_pg][0]
_default_pg_init_method = init_method
barrier()
def _new_process_group_helper(world_size,
rank,
group_ranks,
backend,
store,
group_name=None,
timeout=default_pg_timeout):
global _pg_map
global _group_count
global _pg_names
if not group_name:
group_name = str(_group_count)
_group_count += 1
if group_name in _pg_names.values():
raise RuntimeError("The specified group name has already been "
"created, please use a different group name")
if not isinstance(timeout, timedelta):
raise RuntimeError("Expected timeout argument to be of type"
"datetime.timedelta")
is_default_group = (len(group_ranks) == 0)
backend = Backend(backend)
if backend == Backend.MPI:
if not is_mpi_available():
raise RuntimeError(
"Distributed package doesn't have MPI built in."
" MPI is only included if you build PyTorch from"
" source on a host that has MPI installed.")
pg = ProcessGroupMPI.create(group_ranks)
if not pg:
return GroupMember.NON_GROUP_MEMBER
_pg_map[pg] = (Backend.MPI, None)
_pg_names[pg] = group_name
else:
if not is_default_group:
global_rank = _default_pg.rank()
if global_rank not in group_ranks:
return GroupMember.NON_GROUP_MEMBER
prefix_store = PrefixStore(group_name, store)
if backend == Backend.GLOO:
pg = ProcessGroupGloo(
prefix_store,
rank,
world_size,
timeout=timeout)
_pg_map[pg] = (Backend.GLOO, store)
_pg_names[pg] = group_name
elif backend == Backend.NCCL:
if not is_nccl_available():
raise RuntimeError("Distributed package doesn't have NCCL "
"built in")
pg = ProcessGroupNCCL(
prefix_store,
rank,
world_size,
timeout)
_pg_map[pg] = (Backend.NCCL, store)
_pg_names[pg] = group_name
else:
pg = getattr(Backend, backend.upper())(
prefix_store,
rank,
world_size,
timeout)
_pg_map[pg] = (backend, store)
_pg_names[pg] = group_name
return pg
def destroy_process_group(group=group.WORLD):
global _pg_map
global _pg_names
global _pg_group_ranks
global _default_pg
global _default_pg_init_method
global _group_count
if group == GroupMember.NON_GROUP_MEMBER:
return
if group == GroupMember.WORLD:
pg = _default_pg
else:
pg = group
if _pg_map.get(pg, None) is None:
raise RuntimeError("Invalid process group specified")
if group == GroupMember.WORLD:
_default_pg = None
_default_pg_init_method = None
_pg_map.clear()
_pg_names.clear()
_pg_group_ranks.clear()
# when process group doesn't have an explicit name (only WORLD (default)
_group_count = 0
else:
del _pg_map[pg]
del _pg_names[pg]
del _pg_group_ranks[pg]
def get_rank(group=group.WORLD):
if _rank_not_in_group(group):
return -1
_check_default_pg()
if group == GroupMember.WORLD:
return _default_pg.rank()
return _get_group_rank(group, _default_pg.rank())
def get_world_size(group=group.WORLD):
if _rank_not_in_group(group):
return -1
return _get_group_size(group)
def isend(tensor,
dst,
group=group.WORLD,
tag=0):
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
return _default_pg.send([tensor], dst, tag)
else:
group_dst_rank = _get_group_rank(group, dst)
return group.send([tensor], group_dst_rank, tag)
def irecv(tensor,
src,
group=group.WORLD,
tag=0):
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
return _default_pg.recv([tensor], src, tag)
else:
group_src_rank = _get_group_rank(group, src)
return group.recv([tensor], group_src_rank, tag)
def send(tensor,
dst,
group=group.WORLD,
tag=0):
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
_default_pg.send([tensor], dst, tag).wait()
else:
group_dst_rank = _get_group_rank(group, dst)
group.send([tensor], group_dst_rank, tag).wait()
def recv(tensor,
src=None,
group=group.WORLD,
tag=0):
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return -1
if group == GroupMember.WORLD:
_check_default_pg()
pg = _default_pg
else:
pg = group
if src is None:
work = pg.recv_anysource([tensor], tag)
work.wait()
src_rank = work._source_rank()
if group == GroupMember.WORLD:
return src_rank
else:
return _get_global_rank(pg, src_rank)
else:
if group == GroupMember.WORLD:
pg.recv([tensor], src, tag).wait()
else:
group_src_rank = _get_group_rank(pg, src)
pg.recv([tensor], group_src_rank, tag).wait()
return src
def broadcast_multigpu(tensor_list,
src,
group=group.WORLD,
async_op=False,
src_tensor=0):
if _rank_not_in_group(group):
return
opts = BroadcastOptions()
opts.rootRank = src
opts.rootTensor = src_tensor
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.broadcast(tensor_list, opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.broadcast(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def broadcast(tensor,
src,
group=group.WORLD,
async_op=False):
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
opts = BroadcastOptions()
opts.rootRank = src
opts.rootTensor = 0
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.broadcast([tensor], opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.broadcast([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_reduce_multigpu(tensor_list,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
if _rank_not_in_group(group):
return
opts = AllreduceOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allreduce(tensor_list, opts)
else:
work = group.allreduce(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def all_reduce(tensor,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
opts = AllreduceOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allreduce([tensor], opts)
else:
work = group.allreduce([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_reduce_coalesced(tensors,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
_check_tensor_list(tensors, "tensor")
if _rank_not_in_group(group):
return
opts = AllreduceCoalescedOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allreduce_coalesced(tensors, opts)
else:
work = group.allreduce_coalesced(tensors, opts)
if async_op:
return work
else:
work.wait()
def reduce_multigpu(tensor_list,
dst,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False,
dst_tensor=0):
if _rank_not_in_group(group):
return
opts = ReduceOptions()
opts.reduceOp = op
opts.rootRank = dst
opts.rootTensor = dst_tensor
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce(tensor_list, opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.reduce(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def reduce(tensor,
dst,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
opts = ReduceOptions()
opts.reduceOp = op
opts.rootRank = dst
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce([tensor], opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.reduce([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_gather_multigpu(output_tensor_lists,
input_tensor_list,
group=group.WORLD,
async_op=False):
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allgather(output_tensor_lists, input_tensor_list)
else:
work = group.allgather(output_tensor_lists, input_tensor_list)
if async_op:
return work
else:
work.wait()
def _object_to_tensor(obj):
buffer = pickle.dumps(obj)
byte_storage = torch.ByteStorage.from_buffer(buffer)
byte_tensor = torch.ByteTensor(byte_storage)
local_size = torch.LongTensor([byte_tensor.numel()])
return byte_tensor, local_size
def _tensor_to_object(tensor, tensor_size):
buf = tensor.numpy().tobytes()[:tensor_size]
out = pickle.loads(buf)
return out
def all_gather_object(object_list, obj, group=group.WORLD):
if _rank_not_in_group(group):
return
input_tensor, local_size = _object_to_tensor(obj)
group_backend = get_backend(group)
my_rank = get_rank()
is_nccl_backend = group_backend == Backend.NCCL
if is_nccl_backend:
input_tensor, local_size = input_tensor.to(my_rank), local_size.to(my_rank)
# Gather all local sizes. This is so that we can find the max size, and index
# until the correct size when deserializing the tensors.
group_size = get_world_size(group=group)
object_sizes_tensor = torch.zeros(group_size, dtype=int).to(
my_rank if is_nccl_backend else "cpu"
)
object_size_list = [
object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size)
]
# Allgather tensor sizes
all_gather(object_size_list, local_size, group=group)
max_object_size = max(object_size_list)
# Resize tensor to max size across all ranks.
input_tensor.resize_(max_object_size)
coalesced_output_tensor = torch.empty(
max_object_size * group_size, dtype=torch.uint8
).to(my_rank if is_nccl_backend else "cpu")
# Output tensors are nonoverlapping views of coalesced_output_tensor
output_tensors = [
coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)]
for i in range(group_size)
]
all_gather(output_tensors, input_tensor, group=group)
# Deserialize outputs back to object.
for i, tensor in enumerate(output_tensors):
tensor = tensor.type(torch.ByteTensor)
tensor_size = object_size_list[i]
object_list[i] = _tensor_to_object(tensor, tensor_size)
def gather_object(obj, object_gather_list=None, dst=0, group=group.WORLD):
if _rank_not_in_group(group):
return
# Ensure object_gather_list is specified appopriately.
my_rank = get_rank()
_validate_output_list_for_rank(my_rank, dst, object_gather_list)
input_tensor, local_size = _object_to_tensor(obj)
group_backend = get_backend(group)
is_nccl_backend = group_backend == Backend.NCCL
if is_nccl_backend:
input_tensor, local_size = input_tensor.to(my_rank), local_size.to(my_rank)
# Gather all local sizes. This is so that we can find the max size, and index
# until the correct size when deserializing the tensors.
group_size = get_world_size(group=group)
object_sizes_tensor = torch.zeros(group_size, dtype=int).to(
my_rank if is_nccl_backend else "cpu"
)
object_size_list = [
object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size)
]
# Allgather tensor sizes. An all-gather is needed here despite this being a gather,
# since each rank needs to broadcast a tensor of the same (maximal) size.
all_gather(object_size_list, local_size, group=group)
max_object_size = max(object_size_list)
# Resize tensor to max size across all ranks.
input_tensor.resize_(max_object_size)
# Avoid populating output tensors if the result won't be gathered on this rank.
if my_rank == dst:
coalesced_output_tensor = torch.empty(
max_object_size * group_size, dtype=torch.uint8
).to(my_rank if is_nccl_backend else "cpu")
output_tensors = [
coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)]
for i in range(group_size)
]
gather(
input_tensor,
gather_list=output_tensors if my_rank == dst else None,
dst=dst,
group=group,
)
if my_rank != dst:
return
for i, tensor in enumerate(output_tensors):
tensor = tensor.type(torch.ByteTensor)
tensor_size = object_size_list[i]
object_gather_list[i] = _tensor_to_object(tensor, tensor_size)
def broadcast_object_list(object_list, src, group=group.WORLD):
if _rank_not_in_group(group):
return
my_rank = get_rank()
if my_rank == src:
tensor_list, size_list = zip(*[_object_to_tensor(obj) for obj in object_list])
object_sizes_tensor = torch.cat(size_list)
else:
object_sizes_tensor = torch.LongTensor(len(object_list))
group_backend = get_backend(group)
is_nccl_backend = group_backend == Backend.NCCL
if is_nccl_backend:
object_sizes_tensor = object_sizes_tensor.to(my_rank)
broadcast(object_sizes_tensor, src=src, group=group)
if my_rank == src:
object_tensor = torch.cat(tensor_list)
else:
object_tensor = torch.ByteTensor(torch.sum(object_sizes_tensor).item())
if is_nccl_backend:
object_tensor = object_tensor.to(my_rank)
broadcast(object_tensor, src=src, group=group)
offset = 0
if my_rank != src:
for i, obj_size in enumerate(object_sizes_tensor):
obj_view = object_tensor[offset : offset + obj_size]
obj_view = obj_view.type(torch.ByteTensor)
offset += obj_size
object_list[i] = _tensor_to_object(obj_view, obj_size)
def all_gather(tensor_list,
tensor,
group=group.WORLD,
async_op=False):
_check_tensor_list(tensor_list, "tensor_list")
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allgather([tensor_list], [tensor])
else:
work = group.allgather([tensor_list], [tensor])
if async_op:
return work
else:
work.wait()
def all_gather_coalesced(output_tensor_lists,
input_tensor_list,
group=group.WORLD,
async_op=False):
if _rank_not_in_group(group):
return
_check_tensor_list(input_tensor_list, "tensor_list")
if not isinstance(output_tensor_lists, list):
raise RuntimeError("Invalid function argument: "
"output_tensor_lists should be a list")
for output_tensor_list in output_tensor_lists:
_check_tensor_list(output_tensor_list, "output_tensor_lists")
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allgather_coalesced(
output_tensor_lists, input_tensor_list)
else:
work = group.allgather_coalesced(output_tensor_lists, input_tensor_list)
if async_op:
return work
else:
work.wait()
def _validate_output_list_for_rank(my_rank, dst, gather_list):
if dst == my_rank:
if not gather_list:
raise ValueError(
"Argument ``gather_list`` must be specified on destination rank."
)
elif gather_list:
raise ValueError(
"Argument ``gather_list`` must NOT be specified "
"on non-destination ranks."
)
def gather(tensor,
gather_list=None,
dst=0,
group=group.WORLD,
async_op=False):
_check_single_tensor(tensor, "tensor")
if gather_list:
_check_tensor_list(gather_list, "gather_list")
else:
gather_list = []
if _rank_not_in_group(group):
return
my_rank = get_rank()
_validate_output_list_for_rank(my_rank, dst, gather_list)
output_tensors = [gather_list] if dst == my_rank else []
input_tensors = [tensor]
opts = GatherOptions()
opts.rootRank = dst
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.gather(output_tensors, input_tensors, opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.gather(output_tensors, input_tensors, opts)
if async_op:
return work
else:
work.wait()
def scatter(tensor,
scatter_list=None,
src=0,
group=group.WORLD,
async_op=False):
_check_single_tensor(tensor, "tensor")
if scatter_list:
_check_tensor_list(scatter_list, "scatter_list")
else:
scatter_list = []
if _rank_not_in_group(group):
return
my_rank = get_rank()
if src == my_rank:
if not scatter_list:
raise ValueError("Argument ``scatter_list`` must be specified "
"on source rank.")
input_tensors = [scatter_list]
output_tensors = [tensor]
else:
if scatter_list:
raise ValueError("Argument ``scatter_list`` must NOT be specified "
"on non-source ranks.")
input_tensors = []
output_tensors = [tensor]
opts = ScatterOptions()
opts.rootRank = src
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.scatter(output_tensors, input_tensors, opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.scatter(output_tensors, input_tensors, opts)
if async_op:
return work
else:
work.wait()
def reduce_scatter_multigpu(output_tensor_list,
input_tensor_lists,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
if _rank_not_in_group(group):
return
opts = ReduceScatterOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce_scatter(
output_tensor_list,
input_tensor_lists,
opts
)
else:
work = group.reduce_scatter(
output_tensor_list,
input_tensor_lists,
opts
)
if async_op:
return work
else:
work.wait()
def reduce_scatter(output,
input_list,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
_check_single_tensor(output, "output")
_check_tensor_list(input_list, "input_list")
if _rank_not_in_group(group):
return
opts = ReduceScatterOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce_scatter([output], [input_list], opts)
else:
work = group.reduce_scatter([output], [input_list], opts)
if async_op:
return work
else:
work.wait()
def all_to_all_single(output,
input,
output_split_sizes=None,
input_split_sizes=None,
group=group.WORLD,
async_op=False):
if _rank_not_in_group(group):
return
opts = AllToAllOptions()
_check_single_tensor(output, "output")
_check_single_tensor(input, "input")
output_split_sizes = [] if output_split_sizes is None else output_split_sizes
input_split_sizes = [] if input_split_sizes is None else input_split_sizes
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.alltoall_base(output, input, output_split_sizes, input_split_sizes, opts)
else:
work = group.alltoall_base(output, input, output_split_sizes, input_split_sizes, opts)
if async_op:
return work
else:
work.wait()
def all_to_all(output_tensor_list,
input_tensor_list,
group=group.WORLD,
async_op=False):
if _rank_not_in_group(group):
return
opts = AllToAllOptions()
_check_tensor_list(output_tensor_list, "output_tensor_list")
_check_tensor_list(input_tensor_list, "input_tensor_list")
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.alltoall(output_tensor_list, input_tensor_list, opts)
else:
work = group.alltoall(output_tensor_list, input_tensor_list, opts)
if async_op:
return work
else:
work.wait()
def barrier(group=group.WORLD,
async_op=False):
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.barrier()
else:
work = group.barrier()
if async_op:
return work
else:
work.wait()
def new_group(ranks=None, timeout=default_pg_timeout, backend=None):
_check_default_pg()
global _pg_group_ranks
default_backend, default_store = _pg_map[_default_pg]
global_rank = _default_pg.rank()
global_world_size = _default_pg.size()
if not backend:
backend = default_backend
if ranks is not None:
ranks = sorted(ranks)
group_world_size = len(ranks)
if group_world_size > global_world_size:
raise RuntimeError("the new group's world size should be less or "
"equal to the world size set by "
"init_process_group")
# check ranks' sanity
for rank in ranks:
if rank < 0 or rank >= global_world_size:
raise RuntimeError("The new group's rank should be within the "
"the world_size set by init_process_group")
if global_rank in ranks:
group_rank = ranks.index(global_rank)
else:
group_rank = None
else:
ranks = list(range(global_world_size))
group_world_size = global_world_size
group_rank = global_rank
backend = Backend(backend)
pg = _new_process_group_helper(group_world_size,
group_rank,
ranks,
backend,
default_store,
timeout=timeout)
# Create the global rank to group rank mapping
_pg_group_ranks[pg] = {
global_rank: group_rank
for group_rank, global_rank in enumerate(ranks)
}
# barrier at the end to ensure that once we return from this method, all
# process groups including global variables are updated correctly on all
# ranks.
barrier()
return pg
| true | true |
f7f5e320d4652bbee83401635f5dc4e47758d938 | 1,035 | py | Python | src/app_defs.py | muteio/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | 1 | 2019-11-02T01:39:52.000Z | 2019-11-02T01:39:52.000Z | src/app_defs.py | muteio/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | null | null | null | src/app_defs.py | muteio/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | 1 | 2021-03-05T13:34:53.000Z | 2021-03-05T13:34:53.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2018-03
APP_NAME_SHORT = 'GhostnodeTool'
APP_NAME_LONG = 'Ghostnode Tool'
APP_DATA_DIR_NAME = '.ghostnode-tool'
PROJECT_URL = 'https://github.com/nixplatform/ghostnode-tool'
FEE_DUFF_PER_BYTE = 1
MIN_TX_FEE = 10000
SCREENSHOT_MODE = False
class HWType:
trezor = 'TREZOR'
keepkey = 'KEEPKEY'
ledger_nano_s = 'LEDGERNANOS'
@staticmethod
def get_desc(hw_type):
if hw_type == HWType.trezor:
return 'Trezor'
elif hw_type == HWType.keepkey:
return 'KeepKey'
elif hw_type == HWType.ledger_nano_s:
return 'Ledger Nano S'
else:
return '???'
def get_note_url(note_symbol):
"""
Returns an URL to a project documentation page related to the note symbol passed as an argument.
:param note_symbol: Symbol of the note, for example: DMT00001
:return: URL
"""
return PROJECT_URL + f'/blob/master/doc/notes.md#note-{note_symbol.lower()}'
| 25.243902 | 100 | 0.658937 |
APP_NAME_SHORT = 'GhostnodeTool'
APP_NAME_LONG = 'Ghostnode Tool'
APP_DATA_DIR_NAME = '.ghostnode-tool'
PROJECT_URL = 'https://github.com/nixplatform/ghostnode-tool'
FEE_DUFF_PER_BYTE = 1
MIN_TX_FEE = 10000
SCREENSHOT_MODE = False
class HWType:
trezor = 'TREZOR'
keepkey = 'KEEPKEY'
ledger_nano_s = 'LEDGERNANOS'
@staticmethod
def get_desc(hw_type):
if hw_type == HWType.trezor:
return 'Trezor'
elif hw_type == HWType.keepkey:
return 'KeepKey'
elif hw_type == HWType.ledger_nano_s:
return 'Ledger Nano S'
else:
return '???'
def get_note_url(note_symbol):
return PROJECT_URL + f'/blob/master/doc/notes.md#note-{note_symbol.lower()}'
| true | true |
f7f5e35656b2f14a25339f96dfd20edb5759f9e6 | 1,882 | py | Python | aalh_iit_buildings_007/merge-description-columns.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | aalh_iit_buildings_007/merge-description-columns.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | aalh_iit_buildings_007/merge-description-columns.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | from openpyxl import load_workbook
filename = 'aalh_iit_buildings_007.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 8
maximumcol = 8
minimumrow = 7
maximumrow = 519
iterationrow = 7
targetcol = 46
linkstring = 'Terms associated with the photograph are: '
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
descriptiontest = ws.cell(row=iterationrow, column=minimumcol).value
if descriptiontest == None:
print('No description')
elif descriptiontest.endswith(','):
print(descriptiontest)
description1 = descriptiontest
description2 = description1[:-1]
description3 = description2 + '.'
ws.cell(row=iterationrow, column=minimumcol).value = description3
print(ws.cell(row=iterationrow, column=minimumcol).value)
print('Fixed comma')
for cell in row:
iitdescription = ws.cell(row=iterationrow, column=minimumcol).value
#print(iitdescription)
keywords = ws.cell(row=iterationrow, column=targetcol).value
print(keywords)
if iitdescription == None:
descriptionmerged = linkstring + keywords
descriptionfinal = descriptionmerged.replace("'", "'")
ws.cell(row=iterationrow, column=minimumcol).value = descriptionfinal
else:
descriptionmerged = iitdescription + ' ' + linkstring + keywords
descriptionfinal = descriptionmerged.replace("'", "'")
ws.cell(row=iterationrow, column=minimumcol).value = descriptionfinal
print(ws.cell(row=iterationrow, column=minimumcol).value)
iterationrow = iterationrow + 1
wb.save('aalh_iit_buildings_007.xlsx') | 41.822222 | 105 | 0.656217 | from openpyxl import load_workbook
filename = 'aalh_iit_buildings_007.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 8
maximumcol = 8
minimumrow = 7
maximumrow = 519
iterationrow = 7
targetcol = 46
linkstring = 'Terms associated with the photograph are: '
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
descriptiontest = ws.cell(row=iterationrow, column=minimumcol).value
if descriptiontest == None:
print('No description')
elif descriptiontest.endswith(','):
print(descriptiontest)
description1 = descriptiontest
description2 = description1[:-1]
description3 = description2 + '.'
ws.cell(row=iterationrow, column=minimumcol).value = description3
print(ws.cell(row=iterationrow, column=minimumcol).value)
print('Fixed comma')
for cell in row:
iitdescription = ws.cell(row=iterationrow, column=minimumcol).value
keywords = ws.cell(row=iterationrow, column=targetcol).value
print(keywords)
if iitdescription == None:
descriptionmerged = linkstring + keywords
descriptionfinal = descriptionmerged.replace("'", "'")
ws.cell(row=iterationrow, column=minimumcol).value = descriptionfinal
else:
descriptionmerged = iitdescription + ' ' + linkstring + keywords
descriptionfinal = descriptionmerged.replace("'", "'")
ws.cell(row=iterationrow, column=minimumcol).value = descriptionfinal
print(ws.cell(row=iterationrow, column=minimumcol).value)
iterationrow = iterationrow + 1
wb.save('aalh_iit_buildings_007.xlsx') | true | true |
f7f5e37006435c4215de3866be56824b2f8d5101 | 23,397 | py | Python | tests/unit/test_speechManager/speechManagerTestHarness.py | marlon-sousa/nvda | 83738d7d9150fb379083eb3918e9c78c78610489 | [
"bzip2-1.0.6"
] | 19 | 2016-05-11T05:15:31.000Z | 2022-03-17T12:40:10.000Z | tests/unit/test_speechManager/speechManagerTestHarness.py | marlon-sousa/nvda | 83738d7d9150fb379083eb3918e9c78c78610489 | [
"bzip2-1.0.6"
] | 307 | 2015-08-27T11:22:33.000Z | 2022-03-29T10:43:34.000Z | tests/unit/test_speechManager/speechManagerTestHarness.py | marlon-sousa/nvda | 83738d7d9150fb379083eb3918e9c78c78610489 | [
"bzip2-1.0.6"
] | 14 | 2016-03-28T07:31:49.000Z | 2022-03-30T04:56:35.000Z | # A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2020 NV Access Limited
"""A test harness for interacting with the SpeechManager class."""
import typing
import unittest
from contextlib import contextmanager
from typing import (
Callable,
Tuple,
Union,
Optional,
List,
)
from unittest import mock
from unittest.mock import (
MagicMock,
)
from dataclasses import dataclass
import queueHandler
import speech.manager
from speech.commands import (
IndexCommand,
CallbackCommand,
BeepCommand,
WaveFileCommand,
EndUtteranceCommand,
BaseProsodyCommand,
RateCommand,
VolumeCommand,
PitchCommand,
ConfigProfileTriggerCommand,
)
from speech.types import _IndexT
_SentSequenceIndex = int
@dataclass
class ExpectedIndex:
"""To simplify building tests (de-duplication of test data) , ExpectedIndexes are not sent to the speech
manager, but represent indexes that is sent to the synth.
"""
expectedIndexCommandIndex: int
def __eq__(self, other):
if isinstance(other, IndexCommand):
return other.index == self.expectedIndexCommandIndex
elif isinstance(other, ExpectedIndex):
return other.expectedIndexCommandIndex == self.expectedIndexCommandIndex
return False
@dataclass
class ExpectedProsody:
"""To simplify building (de-duplication of test data) tests, ExpectedProsody are not sent to the speech
manager,
but represent prosody
commands that are sent to the synth. This may be as a result of resuming a previous utterance.
"""
expectedProsody: Union[
PitchCommand,
RateCommand,
VolumeCommand
]
def __eq__(self, other):
if type(self.expectedProsody) != type(other):
return False
if isinstance(other, BaseProsodyCommand):
return repr(other) == repr(self.expectedProsody)
return False
class SpeechManagerInteractions:
""" Track expected state and interactions with the speechManager.
SpeechManager has the following functions that external code interacts with. Currently only supports
setting / checking expectations within an expectation block. See L{SpeechManagerInteractions.expectation}
Inputs:
- SpeechManager.speak()
- SpeechManager.cancel()
- SpeechManager.removeCancelledSpeechCommands: Normally via eventHandler.executeEvent
- SpeechManager._onSynthIndexReached(): Normally via synthDriverHandler.synthIndexReached extensionPoint
- SpeechManager._onSynthDoneSpeaking(): Normally via synthDriverHandler.synthDoneSpeaking extensionPoint
- queueHandler.pumpAll(): handles all pending _onSynthIndexReached and _onSynthDoneSpeaking calls.
Outputs:
- synthDriverHandler.getSynth().speak(): Send speech to the synth.
- synthDriverHandler.getSynth().cancel(): Cancel current speech.
- CallbackCommand: Converted into index commands. When the index is reached (by the synth) the callback is
called after queueHandler.pumpAll is called.
The expectation method should be used as a context manager to assert the pre/post conditions on interactions
with the speech manager.
"""
def __init__(self, testCase: unittest.TestCase):
"""
@param testCase: Used to run asserts
"""
self._testCase = testCase
import synthDriverHandler
#: speechManager needs to call to the synth
self.synthMock = MagicMock()
self.synthMock.speak.side_effect = self._sideEffect_synth_speak
self.synthMock.cancel.side_effect = self._sideEffect_synth_cancel
#: Used by CallbackCommands, called when their associated index is reached. Records the indexes.
self._indexReachedCallback = MagicMock()
self._indexReachedCallback.side_effect = self._sideEffect_callbackCommand
#: SequenceIndexes we are awaiting to be sent to the synth
self._awaitingSpeakCalls: List[_SentSequenceIndex] = []
#: Number of cancel calls we are waiting for
self._awaitingCancelCalls: int = 0
#: Index and Callbacks for callback commands
self._awaitingCallbackForIndex: List[Tuple[_IndexT, Optional[Callable[[], None]]]] = []
#: Map of mocks to the number of times that we expect for them get called in this expect block.
self._awaitingMockCalls: typing.Dict[MagicMock, int] = {}
#: All sequence indexes already expected to be sent to the synth
self.expectedState_speak: List[_SentSequenceIndex] = []
#: Number of calls to cancel
self.expectedState_cancelCallCount: int = 0
#: Indexes that have been reached
self.expectedState_indexReached: List[_IndexT] = []
#: map mocks with the number of times we expect them to be called useful for extending this class
self._expectedMockCallCount: typing.Dict[MagicMock, int] = {}
self._unexpectedSideEffectFailureMessages = []
self._inExpectBlock = False
# Install the mock synth
synthDriverHandler._curSynth = self.synthMock
synthDriverHandler.getSynthInstance = mock.Mock(return_value=self.synthMock)
#: Sequences sent to the speechManager so far.
self._knownSequences = []
#: Map ExpectedIndexes (IndexCommand) to knownSequences index
self._speechManagerIndexes = {}
self._indexCommandIndexes = iter(range(1, 1000))
self._lastCommandIndex = 0
self._testDebug_IndexReached: List[int] = []
self.sManager = speech.manager.SpeechManager()
def _sideEffect_synth_speak(self, sequence):
if not self._inExpectBlock: # an ExpectBlock will verify state on exit
failureMessage = (
"Unexpected call to synth.speak. Calls should happen in an expect block."
)
# sometimes for code called by SpeechManager, exceptions caused by failed asserts are caught.
# record them so that at any subsequent _verifyCurrentState calls they can be reported.
self._unexpectedSideEffectFailureMessages.append(failureMessage)
self._testCase.fail(failureMessage)
def _sideEffect_synth_cancel(self):
if not self._inExpectBlock: # an ExpectBlock will verify state on exit
failureMessage = (
"Unexpected call to synth.cancel. Calls should happen in an expect block."
)
# sometimes for code called by SpeechManager, exceptions caused by failed asserts are caught.
# record them so that at any subsequent _verifyCurrentState calls they can be reported.
self._unexpectedSideEffectFailureMessages.append(failureMessage)
self._testCase.fail(failureMessage)
def _sideEffect_callbackCommand(self, index):
"""Callback commands must be expected, so they can deliver new speech.
Asserts can not be run inside callback commands, the exceptions they raise on failure are caught by
speechManager.
"""
if not self._inExpectBlock: # an ExpectBlock will verify state on exit
failureMessage = "Unexpected call to callbackCommand. Calls should happen in an expect block."
# sometimes for code called by SpeechManager, exceptions caused by failed asserts are caught.
# record them so that at any subsequent _verifyCurrentState calls they can be reported.
self._unexpectedSideEffectFailureMessages.append(failureMessage)
self._testCase.fail(failureMessage)
# pop used because there may be multiple callbacks for a given operation.
# We wish to verify the order of the callbacks.
expectedIndex, sideEffect = self._awaitingCallbackForIndex[-1]
if expectedIndex != index:
failureMessage = "Unexpected index for callbackCommand. Calls should happen in an expect block."
self._unexpectedSideEffectFailureMessages.append(failureMessage)
return # by returning early intentionally allow later asserts to fail
if sideEffect:
sideEffect()
self.expectedState_indexReached.append(expectedIndex)
self._awaitingCallbackForIndex.pop()
@contextmanager
def expectation(self):
"""Ensures the pre/post conditions are as expected when exercising the SpeechManager.
"""
self._testCase.assertFalse(self._inExpectBlock, msg="This is likely a logic error in the test.")
self._inExpectBlock = True
self._verifyCurrentState()
yield
self.assertExpectedStateNowMet()
self._inExpectBlock = False
def assertExpectedStateNowMet(self):
self._updateExpectedStateFromAwaiting_speak()
self._updateExpectedStateFromAwaiting_cancel()
self._updateExpectedStateFromAwaiting_callbacks()
self._updateExpectedStateFromAwaiting_mocks()
self._verifyCurrentState()
def _verifyCurrentState(self):
if self._unexpectedSideEffectFailureMessages:
self._testCase.fail(f"Unexpected calls: {self._unexpectedSideEffectFailureMessages!r}")
self._assertCurrentSpeechCallState()
self._assertIndexCallbackState()
self._assertCancelState()
self._assertMockCallsState()
pass
def _updateKnownSequences(self, seq) -> List[_SentSequenceIndex]:
"""Handle EndUtteranceCommands
Sequence gets split after the EndUtteranceCommand and two sequence numbers are returned.
"""
startOfUtteranceIndexes = set(
i + 1 for i, item in enumerate(seq)
if isinstance(item, EndUtteranceCommand)
)
startOfUtteranceIndexes.add(len(seq)) # ensure the last index is included
start = 0
seqNumbers = []
for nextStart in startOfUtteranceIndexes:
seqNumbers.append(len(self._knownSequences))
self._knownSequences.append(seq[start:nextStart])
start = nextStart
# Keep track of which sequence each "expectedIndex" belongs to.
# This allows us to verify that tests don't call "reached index" before the sequence it is part of
# has been sent to the synth. Essentially to prevent bugs in the tests.
for seqNumber in seqNumbers:
indexNumbers = [
speechItem.index
if not hasattr(speechItem, 'expectedIndexCommandIndex')
else speechItem.expectedIndexCommandIndex
for speechItem in self._knownSequences[seqNumber]
if hasattr(speechItem, 'expectedIndexCommandIndex')
or isinstance(speechItem, IndexCommand)
]
for index in indexNumbers:
self._speechManagerIndexes[index] = seqNumber
return seqNumbers
def speak(
self,
seq: List[Union[speech.types.SequenceItemT, ExpectedProsody, ExpectedIndex, EndUtteranceCommand]],
priority=speech.Spri.NORMAL
) -> Union[_SentSequenceIndex, List[_SentSequenceIndex]]:
"""Call SpeechManager.speak and track sequences used."""
sequenceNumbers = self._updateKnownSequences(seq)
self._filterAndSendSpeech(seq, priority)
return sequenceNumbers if len(sequenceNumbers) > 1 else sequenceNumbers[0]
def cancel(self):
"""Call SpeechManager.cancel"""
self.sManager.cancel()
def removeCancelledSpeechCommands(self):
"""Call SpeechManager.removeCancelledSpeechCommands"""
self.sManager.removeCancelledSpeechCommands()
def indexReached(self, index: _IndexT):
"""Call SpeechManager.indexReached
@note SpeechManager requires a call to pumpAll for this to have any affect.
"""
self._assertSpeechManagerKnowsAboutIndex(index)
self._testDebug_IndexReached.append(index)
self.sManager._onSynthIndexReached(self.synthMock, index)
def doneSpeaking(self):
"""Call SpeechManager.doneSpeaking
@note SpeechManager requires a call to pumpAll for this to have any affect
"""
self.sManager._onSynthDoneSpeaking(self.synthMock)
def pumpAll(self):
"""Call queueHandler.pumpAll
@note This is required to process pending events (indexReached, doneSpeaking) for SpeechManager.
"""
queueHandler.pumpAll()
def create_CallBackCommand(self, expectedToBecomeIndex):
"""While a CallBackCommand could be created directly, this method augments it with the index number it
is expected to be represented by when sent to the synth. This reduces the duplication of the test data.
"""
self._assertStrictIndexOrder(expectedToBecomeIndex)
cb = CallbackCommand(
lambda i=expectedToBecomeIndex: self._indexReachedCallback(i),
name=f"indexCommandIndex: {expectedToBecomeIndex}"
)
cb.expectedIndexCommandIndex = expectedToBecomeIndex
return cb
def create_ExpectedIndex(self, expectedToBecomeIndex):
"""Creates a placeholder for an IndexCommand that should be created by SpeechManager.
ExpectedIndexes are not passed to the SpeechManager. They act as a placeholder to verify what is sent
to the synth. This is useful when you want to confirm that an index is created by speechManager
"""
self._assertStrictIndexOrder(expectedToBecomeIndex)
return ExpectedIndex(expectedToBecomeIndex)
def create_ExpectedProsodyCommand(self, expectedProsody):
"""ExpectedProsodyCommands are not passed to the speechManager. They act as a placeholder to verify
what is sent to the synth. This is useful when you want to confirm that speechManager recreates prosody
effectively.
"""
return ExpectedProsody(expectedProsody)
def create_BeepCommand(self, hz, length, left=50, right=50, expectedToBecomeIndex=None):
"""BeepCommands get converted into IndexCommands by speechManager. The expectedToBecomeIndex argument
allow us to track that.
@note: the expectedToBecomeIndex is tested to be ordered, contiguous, and unique with respect to other
indexed commands to help to prevent errors in the tests.
"""
self._testCase.assertIsNotNone(
expectedToBecomeIndex,
"Did you forget to provide the 'expectedToBecomeIndex' argument?"
)
self._assertStrictIndexOrder(expectedToBecomeIndex)
b = BeepCommand(hz, length, left, right)
b.expectedIndexCommandIndex = expectedToBecomeIndex
return b
def create_ConfigProfileTriggerCommand(self, trigger, enter=True, expectedToBecomeIndex=None):
"""ConfigProfileTriggerCommands get converted into IndexCommands by speechManager. The
expectedToBecomeIndex argument allows tracking that.
@note: the expectedToBecomeIndex is tested to be ordered, contiguous, and unique with respect to other
indexed commands to help to prevent errors in the tests.
"""
self._testCase.assertIsNotNone(
expectedToBecomeIndex,
"Did you forget to provide the 'expectedToBecomeIndex' argument?"
)
self._assertStrictIndexOrder(expectedToBecomeIndex)
t = ConfigProfileTriggerCommand(trigger, enter)
t.expectedIndexCommandIndex = expectedToBecomeIndex
return t
def create_WaveFileCommand(self, filename, expectedToBecomeIndex=None):
"""WaveFileCommands get converted into IndexCommands by speechManager. The expectedToBecomeIndex argument
allows tracking that.
@note: the expectedToBecomeIndex is tested to be ordered, contiguous, and unique with respect to other
indexed commands to help to prevent errors in the tests.
"""
self._testCase.assertIsNotNone(
expectedToBecomeIndex,
"Did you forget to provide the 'expectedToBecomeIndex' argument?"
)
self._assertStrictIndexOrder(expectedToBecomeIndex)
w = WaveFileCommand(filename)
w.expectedIndexCommandIndex = expectedToBecomeIndex
return w
def create_EndUtteranceCommand(self, expectedToBecomeIndex=None):
"""EndUtteranceCommand get converted into IndexCommands by speechManager. The expectedToBecomeIndex argument
allow tracking that.
@note: the expectedToBecomeIndex is tested to be ordered, contiguous, and unique with respect to other
indexed commands to help to prevent errors in the tests.
"""
self._testCase.assertIsNotNone(
expectedToBecomeIndex,
"Did you forget to provide the 'expectedToBecomeIndex' argument?"
)
self._assertStrictIndexOrder(expectedToBecomeIndex)
e = EndUtteranceCommand()
e.expectedIndexCommandIndex = expectedToBecomeIndex
return e
def expect_indexReachedCallback(
self,
forIndex: _IndexT,
sideEffect: Optional[Callable[[], None]] = None
):
"""Expect that upon exiting the expectation block, forIndex will have been reached.
If a side effect is required (such as speaking more text) this must be called before
triggering the index reached code, in this case consider using pumpAllAndSendSpeechOnCallback.
"""
if not self._inExpectBlock:
self._testCase.fail("Expectations should be set in a with expectation() block")
if not (self._lastCommandIndex >= forIndex > 0):
self._testCase.fail(
f"Test Case error. Index {forIndex} not sent to synth yet,"
f" ensure SpeechManagerInteractions.speak has already been called."
)
self._awaitingCallbackForIndex.append((forIndex, sideEffect))
if forIndex not in self._testDebug_IndexReached:
self._testCase.fail(
f"IndexReached not yet called for {forIndex}."
f" Check test for smi.indexReached({forIndex})"
f" IndexReach called for the following: {self._testDebug_IndexReached!r}"
)
self._assertSpeechManagerKnowsAboutIndex(forIndex)
def _assertSpeechManagerKnowsAboutIndex(self, index):
if index not in self._speechManagerIndexes.keys():
self._testCase.fail(f"Index {index} is not one of the index commands sent to speech manager.")
seqNumber = self._speechManagerIndexes[index]
if seqNumber not in self.expectedState_speak: # ensure the index has been sent to the synth
self._testCase.fail(f"Index {index} not yet sent to the synth. This indicates an error in the test.")
def expect_synthCancel(self):
if not self._inExpectBlock:
self._testCase.fail("Expectations should be set in a with expectation() block")
self._awaitingCancelCalls = 1 + self._awaitingCancelCalls
def addMockCallMonitoring(self, monitorMocks: typing.List[mock.Mock]):
""" Allows the call count state for other arbitrary mock objects to be tracked.
@param monitorMocks: Mock objects to track the number of calls to
"""
for m in monitorMocks:
self._expectedMockCallCount[m] = 0
def expect_mockCall(self, m: mock.Mock):
""" Expect another call to the given Mock. The total number of expected calls to this mock is incremented.
@param m: Mock object to expect another call on.
"""
if not self._inExpectBlock:
self._testCase.fail("Expectations should be set in a with expectation() block")
self._awaitingMockCalls[m] = 1 + self._awaitingMockCalls.get(m, 0)
def expect_synthSpeak(
self,
sequenceNumbers: Optional[Union[int, typing.Iterable[int]]] = None,
sequence: Optional[List[Union[speech.types.SequenceItemT, ExpectedProsody, ExpectedIndex]]] = None,
):
isSpeechSpecified = sequence is not None
areNumbersSpecified = sequenceNumbers is not None
if (isSpeechSpecified and areNumbersSpecified) or not (isSpeechSpecified or areNumbersSpecified):
raise ValueError("Exactly one argument should be provided.")
if isSpeechSpecified:
sequenceNumbers = self._updateKnownSequences(sequence)
if isinstance(sequenceNumbers, int):
if not self._inExpectBlock:
self._testCase.fail("Expectations should be set in a with expectation() block")
self._testCase.assertLess(
sequenceNumbers,
len(self._knownSequences),
msg=f"Less than {sequenceNumbers} sequences have been sent to the synth (see calls to speak)"
)
self._awaitingSpeakCalls.append(sequenceNumbers)
else:
for i in sequenceNumbers:
if isinstance(i, int):
self.expect_synthSpeak(i)
else:
self._testCase.fail(
f"sequenceNumbers should be int or Iterable[int]. ArgType: {type(sequenceNumbers)}"
)
def _updateExpectedStateFromAwaiting_speak(self):
self.expectedState_speak.extend(self._awaitingSpeakCalls)
self._awaitingSpeakCalls.clear()
def _updateExpectedStateFromAwaiting_cancel(self):
self.expectedState_cancelCallCount = self.expectedState_cancelCallCount + self._awaitingCancelCalls
self._awaitingCancelCalls = 0
def _updateExpectedStateFromAwaiting_mocks(self):
for m, awaitCount in self._awaitingMockCalls.items():
e = self._expectedMockCallCount.get(m, 0)
self._expectedMockCallCount[m] = e + awaitCount
self._awaitingMockCalls.clear()
def _updateExpectedStateFromAwaiting_callbacks(self):
self.expectedState_indexReached.extend(
index for index, c in self._awaitingCallbackForIndex
)
self._awaitingCallbackForIndex.clear()
def _assertIndexCallbackState(self):
expectedCalls = [mock.call(i) for i in self.expectedState_indexReached]
self._testCase.assertEqual(
len(expectedCalls),
self._indexReachedCallback.call_count,
msg=(
f"Number of CallbackCommand callbacks not as expected."
f"\nExpected: {expectedCalls}"
f"\nGot: {self._indexReachedCallback.call_args_list}"
)
)
self._indexReachedCallback.assert_has_calls(expectedCalls)
def _assertCancelState(self):
expectedCancelCallCount = self.expectedState_cancelCallCount
self._testCase.assertEqual(
expectedCancelCallCount,
self.synthMock.cancel.call_count,
msg=f"The number of calls to synth.cancel was not as expected. Expected {expectedCancelCallCount}"
)
def _assertMockCallsState(self):
for m, e in self._expectedMockCallCount.items():
self._testCase.assertEqual(
e,
m.call_count,
msg=f"The number of calls to {m} was not as expected. Expected {e}"
)
def _assertCurrentSpeechCallState(self):
expectedSeqIndexes = self.expectedState_speak
mockSpeak = self.synthMock.speak
actualCallCount = mockSpeak.call_count
self._testCase.assertEqual(
len(expectedSeqIndexes),
actualCallCount,
msg=(
f"Number of sequences sent to synth not as expected."
f"\nExpected a total of {len(expectedSeqIndexes)}"
f"\nThe index(es) of the expected sequences: {expectedSeqIndexes}"
f"\nActual calls: {mockSpeak.call_args_list}"
)
)
# Build (total) expected call list
replaceWithExpectedIndexTypes = (
CallbackCommand,
BeepCommand,
WaveFileCommand,
EndUtteranceCommand,
ConfigProfileTriggerCommand,
)
expectedCalls = []
for s in expectedSeqIndexes:
expectedSpeech = self._knownSequences[s]
expectedSpeech = [
x if not isinstance(x, replaceWithExpectedIndexTypes)
else ExpectedIndex(x.expectedIndexCommandIndex)
for x in expectedSpeech
]
expectedCalls.append(mock.call(expectedSpeech))
if expectedCalls:
mockSpeak.assert_has_calls(expectedCalls)
def pumpAllAndSendSpeechOnCallback(
self,
expectCallbackForIndex: int,
expectedSendSequenceNumber: Union[int, List[int]],
seq,
priority=speech.Spri.NORMAL
):
"""Must be called in an 'expectation' block. """
def _lineReachedSideEffect():
self._filterAndSendSpeech(seq, priority)
actualSequenceNumber = self._updateKnownSequences(seq)
actualSequenceNumber = actualSequenceNumber if len(actualSequenceNumber) > 1 else actualSequenceNumber[0]
self._testCase.assertEqual(expectedSendSequenceNumber, actualSequenceNumber)
self.expect_indexReachedCallback(expectCallbackForIndex, _lineReachedSideEffect)
self.pumpAll()
self._assertIndexCallbackState()
def _assertStrictIndexOrder(self, expectedToBecomeIndex):
indexCommandIndex = next(self._indexCommandIndexes)
self._lastCommandIndex = indexCommandIndex
self._testCase.assertEqual(
expectedToBecomeIndex,
indexCommandIndex,
msg=f"Did you forget to update the 'expectedToBecomeIndex' argument?"
)
def _filterAndSendSpeech(self, seq, priority):
filteredSpeech = [
x for x in seq
if not isinstance(x, (ExpectedIndex, ExpectedProsody)) # don't send types used for behaviour tracking.
]
self.sManager.speak(filteredSpeech, priority)
| 40.201031 | 111 | 0.76061 |
import typing
import unittest
from contextlib import contextmanager
from typing import (
Callable,
Tuple,
Union,
Optional,
List,
)
from unittest import mock
from unittest.mock import (
MagicMock,
)
from dataclasses import dataclass
import queueHandler
import speech.manager
from speech.commands import (
IndexCommand,
CallbackCommand,
BeepCommand,
WaveFileCommand,
EndUtteranceCommand,
BaseProsodyCommand,
RateCommand,
VolumeCommand,
PitchCommand,
ConfigProfileTriggerCommand,
)
from speech.types import _IndexT
_SentSequenceIndex = int
@dataclass
class ExpectedIndex:
expectedIndexCommandIndex: int
def __eq__(self, other):
if isinstance(other, IndexCommand):
return other.index == self.expectedIndexCommandIndex
elif isinstance(other, ExpectedIndex):
return other.expectedIndexCommandIndex == self.expectedIndexCommandIndex
return False
@dataclass
class ExpectedProsody:
expectedProsody: Union[
PitchCommand,
RateCommand,
VolumeCommand
]
def __eq__(self, other):
if type(self.expectedProsody) != type(other):
return False
if isinstance(other, BaseProsodyCommand):
return repr(other) == repr(self.expectedProsody)
return False
class SpeechManagerInteractions:
def __init__(self, testCase: unittest.TestCase):
self._testCase = testCase
import synthDriverHandler
self.synthMock = MagicMock()
self.synthMock.speak.side_effect = self._sideEffect_synth_speak
self.synthMock.cancel.side_effect = self._sideEffect_synth_cancel
self._indexReachedCallback = MagicMock()
self._indexReachedCallback.side_effect = self._sideEffect_callbackCommand
self._awaitingSpeakCalls: List[_SentSequenceIndex] = []
self._awaitingCancelCalls: int = 0
self._awaitingCallbackForIndex: List[Tuple[_IndexT, Optional[Callable[[], None]]]] = []
self._awaitingMockCalls: typing.Dict[MagicMock, int] = {}
self.expectedState_speak: List[_SentSequenceIndex] = []
self.expectedState_cancelCallCount: int = 0
self.expectedState_indexReached: List[_IndexT] = []
self._expectedMockCallCount: typing.Dict[MagicMock, int] = {}
self._unexpectedSideEffectFailureMessages = []
self._inExpectBlock = False
synthDriverHandler._curSynth = self.synthMock
synthDriverHandler.getSynthInstance = mock.Mock(return_value=self.synthMock)
self._knownSequences = []
self._speechManagerIndexes = {}
self._indexCommandIndexes = iter(range(1, 1000))
self._lastCommandIndex = 0
self._testDebug_IndexReached: List[int] = []
self.sManager = speech.manager.SpeechManager()
def _sideEffect_synth_speak(self, sequence):
if not self._inExpectBlock:
failureMessage = (
"Unexpected call to synth.speak. Calls should happen in an expect block."
)
self._unexpectedSideEffectFailureMessages.append(failureMessage)
self._testCase.fail(failureMessage)
def _sideEffect_synth_cancel(self):
if not self._inExpectBlock:
failureMessage = (
"Unexpected call to synth.cancel. Calls should happen in an expect block."
)
self._unexpectedSideEffectFailureMessages.append(failureMessage)
self._testCase.fail(failureMessage)
def _sideEffect_callbackCommand(self, index):
if not self._inExpectBlock:
failureMessage = "Unexpected call to callbackCommand. Calls should happen in an expect block."
self._unexpectedSideEffectFailureMessages.append(failureMessage)
self._testCase.fail(failureMessage)
expectedIndex, sideEffect = self._awaitingCallbackForIndex[-1]
if expectedIndex != index:
failureMessage = "Unexpected index for callbackCommand. Calls should happen in an expect block."
self._unexpectedSideEffectFailureMessages.append(failureMessage)
return
if sideEffect:
sideEffect()
self.expectedState_indexReached.append(expectedIndex)
self._awaitingCallbackForIndex.pop()
@contextmanager
def expectation(self):
self._testCase.assertFalse(self._inExpectBlock, msg="This is likely a logic error in the test.")
self._inExpectBlock = True
self._verifyCurrentState()
yield
self.assertExpectedStateNowMet()
self._inExpectBlock = False
def assertExpectedStateNowMet(self):
self._updateExpectedStateFromAwaiting_speak()
self._updateExpectedStateFromAwaiting_cancel()
self._updateExpectedStateFromAwaiting_callbacks()
self._updateExpectedStateFromAwaiting_mocks()
self._verifyCurrentState()
def _verifyCurrentState(self):
if self._unexpectedSideEffectFailureMessages:
self._testCase.fail(f"Unexpected calls: {self._unexpectedSideEffectFailureMessages!r}")
self._assertCurrentSpeechCallState()
self._assertIndexCallbackState()
self._assertCancelState()
self._assertMockCallsState()
pass
def _updateKnownSequences(self, seq) -> List[_SentSequenceIndex]:
startOfUtteranceIndexes = set(
i + 1 for i, item in enumerate(seq)
if isinstance(item, EndUtteranceCommand)
)
startOfUtteranceIndexes.add(len(seq))
start = 0
seqNumbers = []
for nextStart in startOfUtteranceIndexes:
seqNumbers.append(len(self._knownSequences))
self._knownSequences.append(seq[start:nextStart])
start = nextStart
# has been sent to the synth. Essentially to prevent bugs in the tests.
for seqNumber in seqNumbers:
indexNumbers = [
speechItem.index
if not hasattr(speechItem, 'expectedIndexCommandIndex')
else speechItem.expectedIndexCommandIndex
for speechItem in self._knownSequences[seqNumber]
if hasattr(speechItem, 'expectedIndexCommandIndex')
or isinstance(speechItem, IndexCommand)
]
for index in indexNumbers:
self._speechManagerIndexes[index] = seqNumber
return seqNumbers
def speak(
self,
seq: List[Union[speech.types.SequenceItemT, ExpectedProsody, ExpectedIndex, EndUtteranceCommand]],
priority=speech.Spri.NORMAL
) -> Union[_SentSequenceIndex, List[_SentSequenceIndex]]:
sequenceNumbers = self._updateKnownSequences(seq)
self._filterAndSendSpeech(seq, priority)
return sequenceNumbers if len(sequenceNumbers) > 1 else sequenceNumbers[0]
def cancel(self):
self.sManager.cancel()
def removeCancelledSpeechCommands(self):
self.sManager.removeCancelledSpeechCommands()
def indexReached(self, index: _IndexT):
self._assertSpeechManagerKnowsAboutIndex(index)
self._testDebug_IndexReached.append(index)
self.sManager._onSynthIndexReached(self.synthMock, index)
def doneSpeaking(self):
self.sManager._onSynthDoneSpeaking(self.synthMock)
def pumpAll(self):
queueHandler.pumpAll()
def create_CallBackCommand(self, expectedToBecomeIndex):
self._assertStrictIndexOrder(expectedToBecomeIndex)
cb = CallbackCommand(
lambda i=expectedToBecomeIndex: self._indexReachedCallback(i),
name=f"indexCommandIndex: {expectedToBecomeIndex}"
)
cb.expectedIndexCommandIndex = expectedToBecomeIndex
return cb
def create_ExpectedIndex(self, expectedToBecomeIndex):
self._assertStrictIndexOrder(expectedToBecomeIndex)
return ExpectedIndex(expectedToBecomeIndex)
def create_ExpectedProsodyCommand(self, expectedProsody):
return ExpectedProsody(expectedProsody)
def create_BeepCommand(self, hz, length, left=50, right=50, expectedToBecomeIndex=None):
self._testCase.assertIsNotNone(
expectedToBecomeIndex,
"Did you forget to provide the 'expectedToBecomeIndex' argument?"
)
self._assertStrictIndexOrder(expectedToBecomeIndex)
b = BeepCommand(hz, length, left, right)
b.expectedIndexCommandIndex = expectedToBecomeIndex
return b
def create_ConfigProfileTriggerCommand(self, trigger, enter=True, expectedToBecomeIndex=None):
self._testCase.assertIsNotNone(
expectedToBecomeIndex,
"Did you forget to provide the 'expectedToBecomeIndex' argument?"
)
self._assertStrictIndexOrder(expectedToBecomeIndex)
t = ConfigProfileTriggerCommand(trigger, enter)
t.expectedIndexCommandIndex = expectedToBecomeIndex
return t
def create_WaveFileCommand(self, filename, expectedToBecomeIndex=None):
self._testCase.assertIsNotNone(
expectedToBecomeIndex,
"Did you forget to provide the 'expectedToBecomeIndex' argument?"
)
self._assertStrictIndexOrder(expectedToBecomeIndex)
w = WaveFileCommand(filename)
w.expectedIndexCommandIndex = expectedToBecomeIndex
return w
def create_EndUtteranceCommand(self, expectedToBecomeIndex=None):
self._testCase.assertIsNotNone(
expectedToBecomeIndex,
"Did you forget to provide the 'expectedToBecomeIndex' argument?"
)
self._assertStrictIndexOrder(expectedToBecomeIndex)
e = EndUtteranceCommand()
e.expectedIndexCommandIndex = expectedToBecomeIndex
return e
def expect_indexReachedCallback(
self,
forIndex: _IndexT,
sideEffect: Optional[Callable[[], None]] = None
):
if not self._inExpectBlock:
self._testCase.fail("Expectations should be set in a with expectation() block")
if not (self._lastCommandIndex >= forIndex > 0):
self._testCase.fail(
f"Test Case error. Index {forIndex} not sent to synth yet,"
f" ensure SpeechManagerInteractions.speak has already been called."
)
self._awaitingCallbackForIndex.append((forIndex, sideEffect))
if forIndex not in self._testDebug_IndexReached:
self._testCase.fail(
f"IndexReached not yet called for {forIndex}."
f" Check test for smi.indexReached({forIndex})"
f" IndexReach called for the following: {self._testDebug_IndexReached!r}"
)
self._assertSpeechManagerKnowsAboutIndex(forIndex)
def _assertSpeechManagerKnowsAboutIndex(self, index):
if index not in self._speechManagerIndexes.keys():
self._testCase.fail(f"Index {index} is not one of the index commands sent to speech manager.")
seqNumber = self._speechManagerIndexes[index]
if seqNumber not in self.expectedState_speak: # ensure the index has been sent to the synth
self._testCase.fail(f"Index {index} not yet sent to the synth. This indicates an error in the test.")
def expect_synthCancel(self):
if not self._inExpectBlock:
self._testCase.fail("Expectations should be set in a with expectation() block")
self._awaitingCancelCalls = 1 + self._awaitingCancelCalls
def addMockCallMonitoring(self, monitorMocks: typing.List[mock.Mock]):
for m in monitorMocks:
self._expectedMockCallCount[m] = 0
def expect_mockCall(self, m: mock.Mock):
if not self._inExpectBlock:
self._testCase.fail("Expectations should be set in a with expectation() block")
self._awaitingMockCalls[m] = 1 + self._awaitingMockCalls.get(m, 0)
def expect_synthSpeak(
self,
sequenceNumbers: Optional[Union[int, typing.Iterable[int]]] = None,
sequence: Optional[List[Union[speech.types.SequenceItemT, ExpectedProsody, ExpectedIndex]]] = None,
):
isSpeechSpecified = sequence is not None
areNumbersSpecified = sequenceNumbers is not None
if (isSpeechSpecified and areNumbersSpecified) or not (isSpeechSpecified or areNumbersSpecified):
raise ValueError("Exactly one argument should be provided.")
if isSpeechSpecified:
sequenceNumbers = self._updateKnownSequences(sequence)
if isinstance(sequenceNumbers, int):
if not self._inExpectBlock:
self._testCase.fail("Expectations should be set in a with expectation() block")
self._testCase.assertLess(
sequenceNumbers,
len(self._knownSequences),
msg=f"Less than {sequenceNumbers} sequences have been sent to the synth (see calls to speak)"
)
self._awaitingSpeakCalls.append(sequenceNumbers)
else:
for i in sequenceNumbers:
if isinstance(i, int):
self.expect_synthSpeak(i)
else:
self._testCase.fail(
f"sequenceNumbers should be int or Iterable[int]. ArgType: {type(sequenceNumbers)}"
)
def _updateExpectedStateFromAwaiting_speak(self):
self.expectedState_speak.extend(self._awaitingSpeakCalls)
self._awaitingSpeakCalls.clear()
def _updateExpectedStateFromAwaiting_cancel(self):
self.expectedState_cancelCallCount = self.expectedState_cancelCallCount + self._awaitingCancelCalls
self._awaitingCancelCalls = 0
def _updateExpectedStateFromAwaiting_mocks(self):
for m, awaitCount in self._awaitingMockCalls.items():
e = self._expectedMockCallCount.get(m, 0)
self._expectedMockCallCount[m] = e + awaitCount
self._awaitingMockCalls.clear()
def _updateExpectedStateFromAwaiting_callbacks(self):
self.expectedState_indexReached.extend(
index for index, c in self._awaitingCallbackForIndex
)
self._awaitingCallbackForIndex.clear()
def _assertIndexCallbackState(self):
expectedCalls = [mock.call(i) for i in self.expectedState_indexReached]
self._testCase.assertEqual(
len(expectedCalls),
self._indexReachedCallback.call_count,
msg=(
f"Number of CallbackCommand callbacks not as expected."
f"\nExpected: {expectedCalls}"
f"\nGot: {self._indexReachedCallback.call_args_list}"
)
)
self._indexReachedCallback.assert_has_calls(expectedCalls)
def _assertCancelState(self):
expectedCancelCallCount = self.expectedState_cancelCallCount
self._testCase.assertEqual(
expectedCancelCallCount,
self.synthMock.cancel.call_count,
msg=f"The number of calls to synth.cancel was not as expected. Expected {expectedCancelCallCount}"
)
def _assertMockCallsState(self):
for m, e in self._expectedMockCallCount.items():
self._testCase.assertEqual(
e,
m.call_count,
msg=f"The number of calls to {m} was not as expected. Expected {e}"
)
def _assertCurrentSpeechCallState(self):
expectedSeqIndexes = self.expectedState_speak
mockSpeak = self.synthMock.speak
actualCallCount = mockSpeak.call_count
self._testCase.assertEqual(
len(expectedSeqIndexes),
actualCallCount,
msg=(
f"Number of sequences sent to synth not as expected."
f"\nExpected a total of {len(expectedSeqIndexes)}"
f"\nThe index(es) of the expected sequences: {expectedSeqIndexes}"
f"\nActual calls: {mockSpeak.call_args_list}"
)
)
# Build (total) expected call list
replaceWithExpectedIndexTypes = (
CallbackCommand,
BeepCommand,
WaveFileCommand,
EndUtteranceCommand,
ConfigProfileTriggerCommand,
)
expectedCalls = []
for s in expectedSeqIndexes:
expectedSpeech = self._knownSequences[s]
expectedSpeech = [
x if not isinstance(x, replaceWithExpectedIndexTypes)
else ExpectedIndex(x.expectedIndexCommandIndex)
for x in expectedSpeech
]
expectedCalls.append(mock.call(expectedSpeech))
if expectedCalls:
mockSpeak.assert_has_calls(expectedCalls)
def pumpAllAndSendSpeechOnCallback(
self,
expectCallbackForIndex: int,
expectedSendSequenceNumber: Union[int, List[int]],
seq,
priority=speech.Spri.NORMAL
):
def _lineReachedSideEffect():
self._filterAndSendSpeech(seq, priority)
actualSequenceNumber = self._updateKnownSequences(seq)
actualSequenceNumber = actualSequenceNumber if len(actualSequenceNumber) > 1 else actualSequenceNumber[0]
self._testCase.assertEqual(expectedSendSequenceNumber, actualSequenceNumber)
self.expect_indexReachedCallback(expectCallbackForIndex, _lineReachedSideEffect)
self.pumpAll()
self._assertIndexCallbackState()
def _assertStrictIndexOrder(self, expectedToBecomeIndex):
indexCommandIndex = next(self._indexCommandIndexes)
self._lastCommandIndex = indexCommandIndex
self._testCase.assertEqual(
expectedToBecomeIndex,
indexCommandIndex,
msg=f"Did you forget to update the 'expectedToBecomeIndex' argument?"
)
def _filterAndSendSpeech(self, seq, priority):
filteredSpeech = [
x for x in seq
if not isinstance(x, (ExpectedIndex, ExpectedProsody)) # don't send types used for behaviour tracking.
]
self.sManager.speak(filteredSpeech, priority)
| true | true |
f7f5e41be16ac5586b75c17018d99a852bcb4239 | 3,204 | py | Python | flowvision/datasets/semeion.py | ZiqiuChi/vision | b5ecf5431f7767d8920a69005e7822185ad31f81 | [
"BSD-3-Clause"
] | null | null | null | flowvision/datasets/semeion.py | ZiqiuChi/vision | b5ecf5431f7767d8920a69005e7822185ad31f81 | [
"BSD-3-Clause"
] | null | null | null | flowvision/datasets/semeion.py | ZiqiuChi/vision | b5ecf5431f7767d8920a69005e7822185ad31f81 | [
"BSD-3-Clause"
] | null | null | null | """
"""
import os
import os.path
from typing import Any, Callable, Optional, Tuple
import numpy as np
from PIL import Image
from .utils import download_url, check_integrity
from .vision import VisionDataset
class SEMEION(VisionDataset):
r"""`SEMEION <http://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``semeion.py`` exists.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = (
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"
)
filename = "semeion.data"
md5_checksum = "cb545d371d2ce14ec121470795a77432"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super(SEMEION, self).__init__(
root, transform=transform, target_transform=target_transform
)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted."
+ " You can use download=True to download it"
)
fp = os.path.join(self.root, self.filename)
data = np.loadtxt(fp)
# convert value to 8 bit unsigned integer
# color (white #255) the pixels
self.data = (data[:, :256] * 255).astype("uint8")
self.data = np.reshape(self.data, (-1, 16, 16))
self.labels = np.nonzero(data[:, 256:])[1]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
root = self.root
download_url(self.url, root, self.filename, self.md5_checksum)
| 32.363636 | 94 | 0.611735 | import os
import os.path
from typing import Any, Callable, Optional, Tuple
import numpy as np
from PIL import Image
from .utils import download_url, check_integrity
from .vision import VisionDataset
class SEMEION(VisionDataset):
url = (
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"
)
filename = "semeion.data"
md5_checksum = "cb545d371d2ce14ec121470795a77432"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super(SEMEION, self).__init__(
root, transform=transform, target_transform=target_transform
)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted."
+ " You can use download=True to download it"
)
fp = os.path.join(self.root, self.filename)
data = np.loadtxt(fp)
ta = (data[:, :256] * 255).astype("uint8")
self.data = np.reshape(self.data, (-1, 16, 16))
self.labels = np.nonzero(data[:, 256:])[1]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
img, target = self.data[index], int(self.labels[index])
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
root = self.root
download_url(self.url, root, self.filename, self.md5_checksum)
| true | true |
f7f5e5203e7d356dd965ea1b26c523bcfb81a50d | 2,341 | py | Python | backend/server/processor/lime_bike_socket_feed_processor.py | shiv12095/realtimeviz | ee2bf10b5f9467212f9a9ce8957d80456ebd0259 | [
"MIT"
] | 1 | 2021-03-03T13:54:15.000Z | 2021-03-03T13:54:15.000Z | backend/server/processor/lime_bike_socket_feed_processor.py | shiv12095/realtimeviz | ee2bf10b5f9467212f9a9ce8957d80456ebd0259 | [
"MIT"
] | null | null | null | backend/server/processor/lime_bike_socket_feed_processor.py | shiv12095/realtimeviz | ee2bf10b5f9467212f9a9ce8957d80456ebd0259 | [
"MIT"
] | 1 | 2021-03-03T13:59:48.000Z | 2021-03-03T13:59:48.000Z | from utils import Constants, TimeUtils
from adapter import KafkaAdapter, DBAdapter
from datetime import datetime
import pytz
class LimeBikeSocketFeedProcessor:
def __init__(self):
self.kafka_adapter = KafkaAdapter()
self.kafka_consumer_group_id = "lime_bike_socket_feed"
self.kafka_consumer = self.kafka_adapter.get_consumer(
topic=Constants.KAFKA_LIME_BIKE_TRIP_ANALYZE_TOPIC,
group_id=self.kafka_consumer_group_id,
auto_offset_reset="latest"
)
def process(self, response):
trip = response.value
start_time = trip['start_time']
end_time = trip['end_time']
val = {
"vehicle_id": trip['vehicle_id'],
"start_time": start_time,
"start_time_day_w": self.__get_day_of_week(start_time),
"start_time_day_m": self.__get_day_of_month(start_time),
"start_time_h": self.__get_hour_of_day(start_time),
"end_time": end_time,
"end_time_day_w": self.__get_day_of_week(end_time),
"end_time_day_m": self.__get_day_of_month(end_time),
"end_time_h": self.__get_hour_of_day(end_time),
"duration": trip['duration'],
"distance": 0,
"stops": 0,
"type": trip['vehicle_type'],
"src": trip['src'],
"dest": trip['dest'],
"timestamps": [],
"route": [],
"start_time_str": TimeUtils.format_timestamp(start_time),
"end_time_str": TimeUtils.format_timestamp(end_time)
}
return val
def get_kafka_consumer(self):
return self.kafka_consumer
def get_socket_group(self):
return Constants.SOCKET_LIME_BIKE_FEED_GROUP
def __get_hour_of_day(self, timestamp):
time_tuple = datetime.fromtimestamp(timestamp, tz=pytz.timezone('America/New_York'))
return time_tuple.hour
def __get_day_of_month(self, timestamp):
time_tuple = datetime.fromtimestamp(timestamp, tz=pytz.timezone('America/New_York'))
return time_tuple.day
def __get_day_of_week(self, timestamp):
time_tuple = datetime.fromtimestamp(timestamp, tz=pytz.timezone('America/New_York'))
return time_tuple.weekday()
def __str__(self):
return "LimeBikeSocketFeedProcessor"
| 36.578125 | 92 | 0.648868 | from utils import Constants, TimeUtils
from adapter import KafkaAdapter, DBAdapter
from datetime import datetime
import pytz
class LimeBikeSocketFeedProcessor:
def __init__(self):
self.kafka_adapter = KafkaAdapter()
self.kafka_consumer_group_id = "lime_bike_socket_feed"
self.kafka_consumer = self.kafka_adapter.get_consumer(
topic=Constants.KAFKA_LIME_BIKE_TRIP_ANALYZE_TOPIC,
group_id=self.kafka_consumer_group_id,
auto_offset_reset="latest"
)
def process(self, response):
trip = response.value
start_time = trip['start_time']
end_time = trip['end_time']
val = {
"vehicle_id": trip['vehicle_id'],
"start_time": start_time,
"start_time_day_w": self.__get_day_of_week(start_time),
"start_time_day_m": self.__get_day_of_month(start_time),
"start_time_h": self.__get_hour_of_day(start_time),
"end_time": end_time,
"end_time_day_w": self.__get_day_of_week(end_time),
"end_time_day_m": self.__get_day_of_month(end_time),
"end_time_h": self.__get_hour_of_day(end_time),
"duration": trip['duration'],
"distance": 0,
"stops": 0,
"type": trip['vehicle_type'],
"src": trip['src'],
"dest": trip['dest'],
"timestamps": [],
"route": [],
"start_time_str": TimeUtils.format_timestamp(start_time),
"end_time_str": TimeUtils.format_timestamp(end_time)
}
return val
def get_kafka_consumer(self):
return self.kafka_consumer
def get_socket_group(self):
return Constants.SOCKET_LIME_BIKE_FEED_GROUP
def __get_hour_of_day(self, timestamp):
time_tuple = datetime.fromtimestamp(timestamp, tz=pytz.timezone('America/New_York'))
return time_tuple.hour
def __get_day_of_month(self, timestamp):
time_tuple = datetime.fromtimestamp(timestamp, tz=pytz.timezone('America/New_York'))
return time_tuple.day
def __get_day_of_week(self, timestamp):
time_tuple = datetime.fromtimestamp(timestamp, tz=pytz.timezone('America/New_York'))
return time_tuple.weekday()
def __str__(self):
return "LimeBikeSocketFeedProcessor"
| true | true |
f7f5e6266694fe969ab87c4e1b4118116cf5b28b | 14,139 | py | Python | turbo_transformers/python/turbo_transformers/layers/modeling_distillbert.py | akirallL/TurboTransformers | 7ca851947b1ae3b08122c45cea0ceac48ee04c3b | [
"BSD-3-Clause"
] | 1,147 | 2020-04-24T06:45:50.000Z | 2022-03-30T15:33:16.000Z | turbo_transformers/python/turbo_transformers/layers/modeling_distillbert.py | akirallL/TurboTransformers | 7ca851947b1ae3b08122c45cea0ceac48ee04c3b | [
"BSD-3-Clause"
] | 140 | 2020-04-25T10:54:15.000Z | 2022-03-11T08:13:11.000Z | turbo_transformers/python/turbo_transformers/layers/modeling_distillbert.py | akirallL/TurboTransformers | 7ca851947b1ae3b08122c45cea0ceac48ee04c3b | [
"BSD-3-Clause"
] | 151 | 2020-04-24T06:49:01.000Z | 2022-03-21T13:48:54.000Z | # Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
try:
# `turbo_transformers_cxxd` is the name on debug mode
import turbo_transformers.turbo_transformers_cxxd as cxx
except ImportError:
import turbo_transformers.turbo_transformers_cxx as cxx
from typing import Union, Optional, Sequence
import torch
from .return_type import convert_returns_as_type, ReturnType
from .utils import try_convert, convert2tt_tensor, to_param_dict_convert_tt, to_param_dict, create_empty_if_none, AnyTensor
from transformers.models.distilbert.modeling_distilbert import DistilBertConfig
from transformers.models.distilbert.modeling_distilbert import MultiHeadSelfAttention as TorchDistilMultiHeadSelfAttention
from transformers.models.distilbert.modeling_distilbert import FFN as TorchDistilFFN
from transformers.models.distilbert.modeling_distilbert import TransformerBlock as TorchDistilTransformerBlock
from transformers.models.distilbert.modeling_distilbert import Transformer as TorchDistilTransformer
from transformers.models.distilbert.modeling_distilbert import Embeddings as TorchDistrilEmbeddings
from transformers.models.distilbert.modeling_distilbert import DistilBertModel as TorchDistilBertModel
from torch import nn
import numpy as np
__all__ = [
'DistillBertAttention', 'DistrillFFN', 'DistrillTransformerBlock',
'DistrillTransformer', 'DistilBertModel'
]
class DistillBertAttention(cxx.BertAttention):
def __call__(self,
input_tensor: AnyTensor,
attention_mask: Optional[AnyTensor] = None,
head_mask: Optional[AnyTensor] = None,
output_attentions: Optional[bool] = False,
return_type: Optional[ReturnType] = None,
is_trans_weight: Optional[cxx.Tensor] = False):
assert (head_mask is None)
# attention mask is different from BERT
if attention_mask is not None:
attention_mask = attention_mask[:, None, None, :]
attention_mask = (
1.0 - attention_mask) * -10000.0 #-float("inf") will cause NAN
input_tensor = try_convert(input_tensor)
attention_mask = try_convert(create_empty_if_none(attention_mask))
context_layer = cxx.Tensor.create_empty()
attn_probs = cxx.Tensor.create_empty()
super(DistillBertAttention,
self).__call__(input_tensor, attention_mask, context_layer,
attn_probs, is_trans_weight)
outputs = (convert_returns_as_type(context_layer, return_type),
convert_returns_as_type(attn_probs, ReturnType.TORCH)
) if output_attentions else (convert_returns_as_type(
context_layer, return_type), )
return outputs
@staticmethod
def from_torch(attention: TorchDistilMultiHeadSelfAttention,
layernorm: nn.LayerNorm):
params = {k: v for k, v in attention.named_parameters()}
layernorm_params = {k: v for k, v in layernorm.named_parameters()}
with torch.no_grad():
# merge self.query.weight, self.query.weight and self.query.weight together as qkv.weight
qkv_weight = torch.clone(
torch.t(
torch.cat((params['q_lin.weight'], params['k_lin.weight'],
params['v_lin.weight']),
0).contiguous()).contiguous())
qkv_bias = torch.cat((params['q_lin.bias'], params['k_lin.bias'],
params['v_lin.bias']), 0).contiguous()
output_weight = torch.clone(
torch.t(params['out_lin.weight']).contiguous())
att = DistillBertAttention(
convert2tt_tensor(qkv_weight), convert2tt_tensor(qkv_bias),
convert2tt_tensor(output_weight),
convert2tt_tensor(params['out_lin.bias']),
convert2tt_tensor(layernorm_params['weight']),
convert2tt_tensor(layernorm_params['bias']), attention.n_heads)
return att
class DistrillFFN(cxx.DistrillFFN):
def __call__(
self,
input_tensor: AnyTensor,
return_type: Optional[ReturnType] = None,
is_trans_weight: Optional[bool] = True, #Intel 61xx True is faster
output: Optional[cxx.Tensor] = None):
input_tensor = try_convert(input_tensor)
output = create_empty_if_none(output)
super(DistrillFFN, self).__call__(input_tensor, output,
is_trans_weight)
return convert_returns_as_type(output, return_type)
@staticmethod
def from_torch(ffn: TorchDistilFFN,
layernorm: nn.LayerNorm,
is_trans_weight: Optional[bool] = True):
ffn_params = {k: v for k, v in ffn.named_parameters()}
layernorm_params = {k: v for k, v in layernorm.named_parameters()}
# Note that torch's weights of linear layer is transposed
if is_trans_weight:
w_1 = convert2tt_tensor(ffn_params['lin1.weight'])
w_2 = convert2tt_tensor(ffn_params['lin2.weight'])
else:
w_1 = convert2tt_tensor(
torch.clone(torch.t(ffn_params['lin1.weight']).contiguous()))
w_2 = convert2tt_tensor(
torch.clone(torch.t(ffn_params['lin2.weight']).contiguous()))
with torch.no_grad():
ffn = DistrillFFN(w_1, convert2tt_tensor(ffn_params['lin1.bias']),
w_2, convert2tt_tensor(ffn_params['lin2.bias']),
convert2tt_tensor(layernorm_params['weight']),
convert2tt_tensor(layernorm_params['bias']))
return ffn
class DistrillTransformerBlock:
def __init__(self, attn: DistillBertAttention, ffn: DistrillFFN):
self.attention = attn
self.ffn = ffn
def __call__(self,
hidden_states: AnyTensor,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions=False,
return_type: Optional[ReturnType] = None):
hidden_states = try_convert(hidden_states)
sa_output = self.attention(hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
return_type=ReturnType.turbo_transformers)
if output_attentions:
sa_output, sa_weights = sa_output
else:
sa_output = sa_output[0]
ffn_output = self.ffn(sa_output)
output = (ffn_output, )
if output_attentions:
output = (sa_weights, ) + output
return output
@staticmethod
def from_torch(layer: TorchDistilTransformerBlock):
return DistrillTransformerBlock(
DistillBertAttention.from_torch(layer.attention,
layer.sa_layer_norm),
DistrillFFN.from_torch(layer.ffn, layer.output_layer_norm))
class DistrillTransformer:
def __init__(self, blocks: Sequence[DistrillTransformerBlock]):
self.blocks = blocks
def __call__(self,
hidden_states: AnyTensor,
attention_mask: Optional[AnyTensor] = None,
head_mask: Optional[AnyTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_type: Optional[ReturnType] = ReturnType.TORCH):
all_hidden_states = ()
all_attentions = ()
hidden_states = try_convert(hidden_states)
for l in self.blocks:
layer_outputs = l(hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
return_type=ReturnType.turbo_transformers)
if output_hidden_states:
all_hidden_states = all_hidden_states + (
convert_returns_as_type(hidden_states, ReturnType.TORCH), )
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1], )
# outputs = (convert_returns_as_type(hidden_states, return_type), )
outputs = (hidden_states, )
# Add last layer
if output_hidden_states:
# TODO(jiaruifang)two return value use the same memory space, that is not supported in dlpack.
# So we do not append the last hidden_state at the buttom of all_hidden_states,
# User should use outputs[0] if necessary
# all_hidden_states = all_hidden_states + (convert_returns_as_type(hidden_states, ReturnType.TORCH),)
pass
if output_hidden_states:
outputs = outputs + (all_hidden_states, )
if output_attentions:
outputs = outputs + (all_attentions, )
return outputs
@staticmethod
def from_torch(transform: TorchDistilTransformer):
blocks = [
DistrillTransformerBlock.from_torch(l) for l in transform.layer
]
return DistrillTransformer(blocks)
class DistilBertModel:
def __init__(self,
embeddings_onnxmodel_variant,
transformer: DistrillTransformer,
backend="turbo"):
if backend == "turbo":
self.embeddings = embeddings_onnxmodel_variant
self.transformer = transformer
self.backend = "turbo"
elif backend == "onnxrt":
self.onnxmodel = embeddings_onnxmodel_variant
self.backend = "onnxrt"
def __call__(self,
input_ids: AnyTensor,
attention_masks: Optional[AnyTensor] = None,
token_type_ids: Optional[AnyTensor] = None,
position_ids: Optional[AnyTensor] = None,
head_mask: Optional[AnyTensor] = None,
inputs_embeds: Optional[AnyTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_type: Optional[ReturnType] = None):
if self.backend == "onnxrt":
if attention_masks is None:
attention_masks = np.ones(input_ids.size(), dtype=np.int64)
else:
attention_masks = attention_masks.cpu().numpy()
data = [input_ids.cpu().numpy(), attention_masks]
outputs = self.onnxmodel.run(inputs=data)
for idx, item in enumerate(outputs):
outputs[idx] = torch.tensor(item, device=input_ids.device)
return outputs
elif self.backend == "turbo":
# torch part
inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim)
inputs_embeds = try_convert(inputs_embeds)
# turbo part
transformer_outputs = self.transformer(
hidden_states=inputs_embeds,
attention_mask=attention_masks,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_type=return_type)
return transformer_outputs
@staticmethod
def from_torch(model: TorchDistilBertModel, backend="turbo"):
"""
:param model: a torch distrilBert Model
backend: turbo or onnxrt
move model to gpu before call this function.
"""
if backend == "turbo":
transformer = DistrillTransformer.from_torch(model.transformer)
return DistilBertModel(model.embeddings, transformer, "turbo")
elif backend == "onnxrt":
import onnx
import onnxruntime.backend
device = model.device
if 'cuda' in device.type and torch.cuda.is_available():
use_gpu = True
else:
use_gpu = False
inputs = {
'input_ids':
torch.randint(32, [2, 32], dtype=torch.long).to(
device), # list of numerical ids for the tokenised text
'attention_mask':
torch.ones([2, 32],
dtype=torch.long).to(device), # dummy list of ones
}
onnx_model_path = "/tmp/temp_turbo_onnx.model"
with open(onnx_model_path, 'wb') as outf:
torch.onnx.export(
model=model,
args=(inputs['input_ids'], inputs['attention_mask']
), # model input (or a tuple for multiple inputs)
f=outf,
input_names=['input_ids', 'attention_mask'],
output_names=['output'],
dynamic_axes={
'input_ids': [0, 1],
'attention_mask': [0, 1]
})
onnx_model = onnx.load_model(f=onnx_model_path)
onnx_model = onnxruntime.backend.prepare(
model=onnx_model,
device='GPU' if use_gpu else "CPU",
graph_optimization_level=onnxruntime.GraphOptimizationLevel.
ORT_ENABLE_ALL)
return DistilBertModel(onnx_model, None, "onnxrt")
| 44.885714 | 123 | 0.614117 |
try:
import turbo_transformers.turbo_transformers_cxxd as cxx
except ImportError:
import turbo_transformers.turbo_transformers_cxx as cxx
from typing import Union, Optional, Sequence
import torch
from .return_type import convert_returns_as_type, ReturnType
from .utils import try_convert, convert2tt_tensor, to_param_dict_convert_tt, to_param_dict, create_empty_if_none, AnyTensor
from transformers.models.distilbert.modeling_distilbert import DistilBertConfig
from transformers.models.distilbert.modeling_distilbert import MultiHeadSelfAttention as TorchDistilMultiHeadSelfAttention
from transformers.models.distilbert.modeling_distilbert import FFN as TorchDistilFFN
from transformers.models.distilbert.modeling_distilbert import TransformerBlock as TorchDistilTransformerBlock
from transformers.models.distilbert.modeling_distilbert import Transformer as TorchDistilTransformer
from transformers.models.distilbert.modeling_distilbert import Embeddings as TorchDistrilEmbeddings
from transformers.models.distilbert.modeling_distilbert import DistilBertModel as TorchDistilBertModel
from torch import nn
import numpy as np
__all__ = [
'DistillBertAttention', 'DistrillFFN', 'DistrillTransformerBlock',
'DistrillTransformer', 'DistilBertModel'
]
class DistillBertAttention(cxx.BertAttention):
def __call__(self,
input_tensor: AnyTensor,
attention_mask: Optional[AnyTensor] = None,
head_mask: Optional[AnyTensor] = None,
output_attentions: Optional[bool] = False,
return_type: Optional[ReturnType] = None,
is_trans_weight: Optional[cxx.Tensor] = False):
assert (head_mask is None)
if attention_mask is not None:
attention_mask = attention_mask[:, None, None, :]
attention_mask = (
1.0 - attention_mask) * -10000.0
input_tensor = try_convert(input_tensor)
attention_mask = try_convert(create_empty_if_none(attention_mask))
context_layer = cxx.Tensor.create_empty()
attn_probs = cxx.Tensor.create_empty()
super(DistillBertAttention,
self).__call__(input_tensor, attention_mask, context_layer,
attn_probs, is_trans_weight)
outputs = (convert_returns_as_type(context_layer, return_type),
convert_returns_as_type(attn_probs, ReturnType.TORCH)
) if output_attentions else (convert_returns_as_type(
context_layer, return_type), )
return outputs
@staticmethod
def from_torch(attention: TorchDistilMultiHeadSelfAttention,
layernorm: nn.LayerNorm):
params = {k: v for k, v in attention.named_parameters()}
layernorm_params = {k: v for k, v in layernorm.named_parameters()}
with torch.no_grad():
qkv_weight = torch.clone(
torch.t(
torch.cat((params['q_lin.weight'], params['k_lin.weight'],
params['v_lin.weight']),
0).contiguous()).contiguous())
qkv_bias = torch.cat((params['q_lin.bias'], params['k_lin.bias'],
params['v_lin.bias']), 0).contiguous()
output_weight = torch.clone(
torch.t(params['out_lin.weight']).contiguous())
att = DistillBertAttention(
convert2tt_tensor(qkv_weight), convert2tt_tensor(qkv_bias),
convert2tt_tensor(output_weight),
convert2tt_tensor(params['out_lin.bias']),
convert2tt_tensor(layernorm_params['weight']),
convert2tt_tensor(layernorm_params['bias']), attention.n_heads)
return att
class DistrillFFN(cxx.DistrillFFN):
def __call__(
self,
input_tensor: AnyTensor,
return_type: Optional[ReturnType] = None,
is_trans_weight: Optional[bool] = True,
output: Optional[cxx.Tensor] = None):
input_tensor = try_convert(input_tensor)
output = create_empty_if_none(output)
super(DistrillFFN, self).__call__(input_tensor, output,
is_trans_weight)
return convert_returns_as_type(output, return_type)
@staticmethod
def from_torch(ffn: TorchDistilFFN,
layernorm: nn.LayerNorm,
is_trans_weight: Optional[bool] = True):
ffn_params = {k: v for k, v in ffn.named_parameters()}
layernorm_params = {k: v for k, v in layernorm.named_parameters()}
if is_trans_weight:
w_1 = convert2tt_tensor(ffn_params['lin1.weight'])
w_2 = convert2tt_tensor(ffn_params['lin2.weight'])
else:
w_1 = convert2tt_tensor(
torch.clone(torch.t(ffn_params['lin1.weight']).contiguous()))
w_2 = convert2tt_tensor(
torch.clone(torch.t(ffn_params['lin2.weight']).contiguous()))
with torch.no_grad():
ffn = DistrillFFN(w_1, convert2tt_tensor(ffn_params['lin1.bias']),
w_2, convert2tt_tensor(ffn_params['lin2.bias']),
convert2tt_tensor(layernorm_params['weight']),
convert2tt_tensor(layernorm_params['bias']))
return ffn
class DistrillTransformerBlock:
def __init__(self, attn: DistillBertAttention, ffn: DistrillFFN):
self.attention = attn
self.ffn = ffn
def __call__(self,
hidden_states: AnyTensor,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions=False,
return_type: Optional[ReturnType] = None):
hidden_states = try_convert(hidden_states)
sa_output = self.attention(hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
return_type=ReturnType.turbo_transformers)
if output_attentions:
sa_output, sa_weights = sa_output
else:
sa_output = sa_output[0]
ffn_output = self.ffn(sa_output)
output = (ffn_output, )
if output_attentions:
output = (sa_weights, ) + output
return output
@staticmethod
def from_torch(layer: TorchDistilTransformerBlock):
return DistrillTransformerBlock(
DistillBertAttention.from_torch(layer.attention,
layer.sa_layer_norm),
DistrillFFN.from_torch(layer.ffn, layer.output_layer_norm))
class DistrillTransformer:
def __init__(self, blocks: Sequence[DistrillTransformerBlock]):
self.blocks = blocks
def __call__(self,
hidden_states: AnyTensor,
attention_mask: Optional[AnyTensor] = None,
head_mask: Optional[AnyTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_type: Optional[ReturnType] = ReturnType.TORCH):
all_hidden_states = ()
all_attentions = ()
hidden_states = try_convert(hidden_states)
for l in self.blocks:
layer_outputs = l(hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
return_type=ReturnType.turbo_transformers)
if output_hidden_states:
all_hidden_states = all_hidden_states + (
convert_returns_as_type(hidden_states, ReturnType.TORCH), )
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1], )
# outputs = (convert_returns_as_type(hidden_states, return_type), )
outputs = (hidden_states, )
# Add last layer
if output_hidden_states:
# TODO(jiaruifang)two return value use the same memory space, that is not supported in dlpack.
# So we do not append the last hidden_state at the buttom of all_hidden_states,
# User should use outputs[0] if necessary
# all_hidden_states = all_hidden_states + (convert_returns_as_type(hidden_states, ReturnType.TORCH),)
pass
if output_hidden_states:
outputs = outputs + (all_hidden_states, )
if output_attentions:
outputs = outputs + (all_attentions, )
return outputs
@staticmethod
def from_torch(transform: TorchDistilTransformer):
blocks = [
DistrillTransformerBlock.from_torch(l) for l in transform.layer
]
return DistrillTransformer(blocks)
class DistilBertModel:
def __init__(self,
embeddings_onnxmodel_variant,
transformer: DistrillTransformer,
backend="turbo"):
if backend == "turbo":
self.embeddings = embeddings_onnxmodel_variant
self.transformer = transformer
self.backend = "turbo"
elif backend == "onnxrt":
self.onnxmodel = embeddings_onnxmodel_variant
self.backend = "onnxrt"
def __call__(self,
input_ids: AnyTensor,
attention_masks: Optional[AnyTensor] = None,
token_type_ids: Optional[AnyTensor] = None,
position_ids: Optional[AnyTensor] = None,
head_mask: Optional[AnyTensor] = None,
inputs_embeds: Optional[AnyTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_type: Optional[ReturnType] = None):
if self.backend == "onnxrt":
if attention_masks is None:
attention_masks = np.ones(input_ids.size(), dtype=np.int64)
else:
attention_masks = attention_masks.cpu().numpy()
data = [input_ids.cpu().numpy(), attention_masks]
outputs = self.onnxmodel.run(inputs=data)
for idx, item in enumerate(outputs):
outputs[idx] = torch.tensor(item, device=input_ids.device)
return outputs
elif self.backend == "turbo":
# torch part
inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim)
inputs_embeds = try_convert(inputs_embeds)
# turbo part
transformer_outputs = self.transformer(
hidden_states=inputs_embeds,
attention_mask=attention_masks,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_type=return_type)
return transformer_outputs
@staticmethod
def from_torch(model: TorchDistilBertModel, backend="turbo"):
if backend == "turbo":
transformer = DistrillTransformer.from_torch(model.transformer)
return DistilBertModel(model.embeddings, transformer, "turbo")
elif backend == "onnxrt":
import onnx
import onnxruntime.backend
device = model.device
if 'cuda' in device.type and torch.cuda.is_available():
use_gpu = True
else:
use_gpu = False
inputs = {
'input_ids':
torch.randint(32, [2, 32], dtype=torch.long).to(
device), # list of numerical ids for the tokenised text
'attention_mask':
torch.ones([2, 32],
dtype=torch.long).to(device), # dummy list of ones
}
onnx_model_path = "/tmp/temp_turbo_onnx.model"
with open(onnx_model_path, 'wb') as outf:
torch.onnx.export(
model=model,
args=(inputs['input_ids'], inputs['attention_mask']
), # model input (or a tuple for multiple inputs)
f=outf,
input_names=['input_ids', 'attention_mask'],
output_names=['output'],
dynamic_axes={
'input_ids': [0, 1],
'attention_mask': [0, 1]
})
onnx_model = onnx.load_model(f=onnx_model_path)
onnx_model = onnxruntime.backend.prepare(
model=onnx_model,
device='GPU' if use_gpu else "CPU",
graph_optimization_level=onnxruntime.GraphOptimizationLevel.
ORT_ENABLE_ALL)
return DistilBertModel(onnx_model, None, "onnxrt")
| true | true |
f7f5e694b4c51be159b3dae1821689d04ef5e36d | 6,013 | py | Python | compositional-rl-benchmark/composition/spinningup_training/train_compositional_ppo_smallscale.py | collassubmission91/CompoSuite-Code | ac544efb68a11ed8a483b0932975c4949f0cec90 | [
"MIT"
] | null | null | null | compositional-rl-benchmark/composition/spinningup_training/train_compositional_ppo_smallscale.py | collassubmission91/CompoSuite-Code | ac544efb68a11ed8a483b0932975c4949f0cec90 | [
"MIT"
] | null | null | null | compositional-rl-benchmark/composition/spinningup_training/train_compositional_ppo_smallscale.py | collassubmission91/CompoSuite-Code | ac544efb68a11ed8a483b0932975c4949f0cec90 | [
"MIT"
] | null | null | null | from email.policy import default
import numpy as np
import argparse
import composition
import os
import json
import torch
from spinup.algos.pytorch.ppo.compositional_core import CompositionalMLPActorCritic
from spinup.algos.pytorch.ppo.ppo import ppo
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.mpi_tools import proc_id, num_procs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='spinningup_training/logs')
parser.add_argument('--load-dir', default=None)
parser.add_argument('--gridsearch-id', type=int, default=-1)
parser.add_argument('--task-id', type=int, default=-1)
parser.add_argument('--hid', type=int, default=64)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=4)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=16000)
parser.add_argument('--epochs', type=int, default=625)
parser.add_argument('--exp-name', type=str, default='ppo')
parser.add_argument('--clip', type=float, default=0.2)
parser.add_argument('--pi-lr', type=float, default=1e-4)
parser.add_argument('--vf-lr', type=float, default=1e-4)
parser.add_argument('--pi-iters', type=int, default=128)
parser.add_argument('--vf-iters', type=int, default=128)
parser.add_argument('--target-kl', type=float, default=0.02)
parser.add_argument('--ent-coef', type=float, default=0.02)
parser.add_argument('--log-std-init', type=float, default=0.)
parser.add_argument('--controller', type=str, default="joint")
parser.add_argument('--robot', type=str, default="IIWA")
parser.add_argument('--object', type=str, default="Hollowbox")
parser.add_argument('--obstacle', type=str, default=None)
parser.add_argument('--task', type=str, default="PickPlace")
parser.add_argument('--horizon', type=int, default=500)
parser.add_argument('--large-net', action='store_true')
args = parser.parse_args()
args.seed = args.task_id % 3
np.random.seed(args.seed)
task_list = np.random.choice(64, num_procs(), replace=False)
axis_indicator = args.task_id // 3
args.task_id = int(task_list[proc_id()])
_robots = ["IIWA", "Jaco", "Kinova3", "Panda"]
_objects = ["Box", "Dumbbell", "Plate", "Hollowbox"]
_objectives = ["PickPlace", "Push", "Shelf", "Trashcan"]
_obstacles = ["None", "GoalWall", "ObjectDoor", "ObjectWall"]
if axis_indicator == 0:
_robots = ["IIWA"]
elif axis_indicator == 1:
_objects = ["Hollowbox"]
elif axis_indicator == 2:
_objectives = ["PickPlace"]
else:
_obstacles = ["None"]
idx = np.unravel_index(args.task_id, (len(_robots), len(_objects), len(_objectives), len(_obstacles)))
args.robot = _robots[idx[0]]
args.object = _objects[idx[1]]
args.task = _objectives[idx[2]]
args.obstacle = _obstacles[idx[3]]
args.exp_name = 'MTL_{}'.format(len(task_list))
args.data_dir = args.data_dir + "_64tasks_axis" + str(axis_indicator) + "_s" + str(args.seed)
return args
def main():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_num_threads(1)
args = parse_args()
os.makedirs(os.path.join(args.data_dir, args.exp_name), exist_ok=True)
with open(os.path.join(args.data_dir, args.exp_name, 'args_{}.json'.format(proc_id())), 'w') as f:
json.dump(args.__dict__, f, indent=2)
logger_kwargs = setup_logger_kwargs(
args.exp_name, data_dir=args.data_dir)
if args.large_net:
hidden_sizes = ((64,), (64, 64), (128, 128, 128), (128, 128, 128))
else:
hidden_sizes = ((32,), (32, 32), (64, 64, 64), (64, 64, 64))
ac_kwargs = {
# 'hidden_sizes': [args.hid]*args.l,
'log_std_init': args.log_std_init,
'hidden_sizes': hidden_sizes,
'module_names': ['obstacle_id', 'object_id', 'subtask_id', 'robot_id'],
'module_input_names': ['obstacle-state', 'object-state', 'goal-state', 'robot0_proprio-state'],
'interface_depths': [-1, 1, 2, 3] ,
'graph_structure': [[0], [1], [2], [3]],
}
checkpoint = None
if args.load_dir is not None:
checkpoint = torch.load(os.path.join(args.load_dir, 'pyt_save', 'state_dicts.pt'))
ppo(lambda: composition.make(
args.robot, args.object, args.obstacle, args.task, args.controller, args.horizon, use_task_id_obs=True), actor_critic=CompositionalMLPActorCritic,
ac_kwargs=ac_kwargs, seed=args.seed, gamma=args.gamma, steps_per_epoch=args.steps, epochs=args.epochs, clip_ratio=args.clip,
pi_lr=args.pi_lr, vf_lr=args.vf_lr, train_pi_iters=args.pi_iters, train_v_iters=args.vf_iters, target_kl=args.target_kl,
logger_kwargs=logger_kwargs, max_ep_len=args.horizon, ent_coef=args.ent_coef, log_per_proc=True, checkpoint=checkpoint)
if __name__ == '__main__':
main()
'''
obs = 14
obj = 14
goal = 17
robot = 32
task-id = 17
(small)
obs: 32 (14 + 1) = 480
obj: 32 (14 + 1) + 32 (64 + 1) = 2560
goal: 64 (17 + 1) + 64 (64 + 1) + 64 (96 + 1) = 11520
robot: 64 (32 + 1) + 64 (64 + 1) + 64 (64 + 1) + 1 (128 + 1) = 10561
= 25121
(large)
obs: 64 (14 + 1) = 960
obj: 64 (14 + 1) + 64 (128 + 1) = 9216
goal: 128 (17 + 1) + 128 (128 + 1) + 128 (192 + 1) = 43520
robot: 128 (32 + 1) + 128 (128 + 1) + 128 (128 + 1) + 1 (256 + 1) = 37505
= 91201
original: 256 (94 + 1) + 256 (256 + 1) + 1 (256 + 1) = 90369
'''
| 39.300654 | 154 | 0.610677 | from email.policy import default
import numpy as np
import argparse
import composition
import os
import json
import torch
from spinup.algos.pytorch.ppo.compositional_core import CompositionalMLPActorCritic
from spinup.algos.pytorch.ppo.ppo import ppo
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.mpi_tools import proc_id, num_procs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='spinningup_training/logs')
parser.add_argument('--load-dir', default=None)
parser.add_argument('--gridsearch-id', type=int, default=-1)
parser.add_argument('--task-id', type=int, default=-1)
parser.add_argument('--hid', type=int, default=64)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=4)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=16000)
parser.add_argument('--epochs', type=int, default=625)
parser.add_argument('--exp-name', type=str, default='ppo')
parser.add_argument('--clip', type=float, default=0.2)
parser.add_argument('--pi-lr', type=float, default=1e-4)
parser.add_argument('--vf-lr', type=float, default=1e-4)
parser.add_argument('--pi-iters', type=int, default=128)
parser.add_argument('--vf-iters', type=int, default=128)
parser.add_argument('--target-kl', type=float, default=0.02)
parser.add_argument('--ent-coef', type=float, default=0.02)
parser.add_argument('--log-std-init', type=float, default=0.)
parser.add_argument('--controller', type=str, default="joint")
parser.add_argument('--robot', type=str, default="IIWA")
parser.add_argument('--object', type=str, default="Hollowbox")
parser.add_argument('--obstacle', type=str, default=None)
parser.add_argument('--task', type=str, default="PickPlace")
parser.add_argument('--horizon', type=int, default=500)
parser.add_argument('--large-net', action='store_true')
args = parser.parse_args()
args.seed = args.task_id % 3
np.random.seed(args.seed)
task_list = np.random.choice(64, num_procs(), replace=False)
axis_indicator = args.task_id // 3
args.task_id = int(task_list[proc_id()])
_robots = ["IIWA", "Jaco", "Kinova3", "Panda"]
_objects = ["Box", "Dumbbell", "Plate", "Hollowbox"]
_objectives = ["PickPlace", "Push", "Shelf", "Trashcan"]
_obstacles = ["None", "GoalWall", "ObjectDoor", "ObjectWall"]
if axis_indicator == 0:
_robots = ["IIWA"]
elif axis_indicator == 1:
_objects = ["Hollowbox"]
elif axis_indicator == 2:
_objectives = ["PickPlace"]
else:
_obstacles = ["None"]
idx = np.unravel_index(args.task_id, (len(_robots), len(_objects), len(_objectives), len(_obstacles)))
args.robot = _robots[idx[0]]
args.object = _objects[idx[1]]
args.task = _objectives[idx[2]]
args.obstacle = _obstacles[idx[3]]
args.exp_name = 'MTL_{}'.format(len(task_list))
args.data_dir = args.data_dir + "_64tasks_axis" + str(axis_indicator) + "_s" + str(args.seed)
return args
def main():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_num_threads(1)
args = parse_args()
os.makedirs(os.path.join(args.data_dir, args.exp_name), exist_ok=True)
with open(os.path.join(args.data_dir, args.exp_name, 'args_{}.json'.format(proc_id())), 'w') as f:
json.dump(args.__dict__, f, indent=2)
logger_kwargs = setup_logger_kwargs(
args.exp_name, data_dir=args.data_dir)
if args.large_net:
hidden_sizes = ((64,), (64, 64), (128, 128, 128), (128, 128, 128))
else:
hidden_sizes = ((32,), (32, 32), (64, 64, 64), (64, 64, 64))
ac_kwargs = {
'log_std_init': args.log_std_init,
'hidden_sizes': hidden_sizes,
'module_names': ['obstacle_id', 'object_id', 'subtask_id', 'robot_id'],
'module_input_names': ['obstacle-state', 'object-state', 'goal-state', 'robot0_proprio-state'],
'interface_depths': [-1, 1, 2, 3] ,
'graph_structure': [[0], [1], [2], [3]],
}
checkpoint = None
if args.load_dir is not None:
checkpoint = torch.load(os.path.join(args.load_dir, 'pyt_save', 'state_dicts.pt'))
ppo(lambda: composition.make(
args.robot, args.object, args.obstacle, args.task, args.controller, args.horizon, use_task_id_obs=True), actor_critic=CompositionalMLPActorCritic,
ac_kwargs=ac_kwargs, seed=args.seed, gamma=args.gamma, steps_per_epoch=args.steps, epochs=args.epochs, clip_ratio=args.clip,
pi_lr=args.pi_lr, vf_lr=args.vf_lr, train_pi_iters=args.pi_iters, train_v_iters=args.vf_iters, target_kl=args.target_kl,
logger_kwargs=logger_kwargs, max_ep_len=args.horizon, ent_coef=args.ent_coef, log_per_proc=True, checkpoint=checkpoint)
if __name__ == '__main__':
main()
| true | true |
f7f5e6e12b4a7192382988cb23f1d0178eabb73a | 289 | py | Python | transformerz/serialization/rison.py | KOLANICH/transformerz.py | cdffce185d22d3d06564755e8106ee02bc00652d | [
"Unlicense"
] | 1 | 2021-03-14T19:44:30.000Z | 2021-03-14T19:44:30.000Z | transformerz/serialization/rison.py | KOLANICH/transformerz.py | cdffce185d22d3d06564755e8106ee02bc00652d | [
"Unlicense"
] | 3 | 2021-03-14T19:52:50.000Z | 2021-03-14T21:47:31.000Z | transformerz/serialization/rison.py | KOLANICH/transformerz.py | cdffce185d22d3d06564755e8106ee02bc00652d | [
"Unlicense"
] | null | null | null | __all__ = ("risonSerializer",)
import typing
try:
import prison as rison
except ImportError:
import rison
from ..core import FileTransformer
from . import jsonSerializableTypes
risonSerializer = FileTransformer("rison", rison.dumps, rison.loads, str, jsonSerializableTypes, "rison")
| 20.642857 | 105 | 0.788927 | __all__ = ("risonSerializer",)
import typing
try:
import prison as rison
except ImportError:
import rison
from ..core import FileTransformer
from . import jsonSerializableTypes
risonSerializer = FileTransformer("rison", rison.dumps, rison.loads, str, jsonSerializableTypes, "rison")
| true | true |
f7f5e728fe6eedc4619f8e94079261935d62e2ab | 4,735 | py | Python | trollo/actions.py | fuzzball81/trollo | 819f305e34c90239be287637e588127607f9e552 | [
"BSD-2-Clause"
] | null | null | null | trollo/actions.py | fuzzball81/trollo | 819f305e34c90239be287637e588127607f9e552 | [
"BSD-2-Clause"
] | null | null | null | trollo/actions.py | fuzzball81/trollo | 819f305e34c90239be287637e588127607f9e552 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2012, Fog Creek Software, Inc.
# Copyright (c) 2016-2018, Red Hat, Inc.
# License: 2-clause BSD; see LICENSE.txt for details
import json
import requests
class Actions(object):
__module__ = 'trollo'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, action_id, fields=None, member=None, member_fields=None, memberCreator=None, memberCreator_fields=None):
resp = requests.get("https://trello.com/1/actions/%s" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields, member=member, member_fields=member_fields, memberCreator=memberCreator, memberCreator_fields=memberCreator_fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def update(self, action_id, text):
resp = requests.put("https://trello.com/1/actions/%s" % (action_id), params=dict(key=self._apikey, token=self._token, text=text), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_board(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/board" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_board_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/board/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_card(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/card" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_card_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/card/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_list(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/list" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_list_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/list/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_member(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/member" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_member_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/member/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_memberCreator(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/memberCreator" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_memberCreator_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/memberCreator/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_organization(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/organization" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_organization_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/organization/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
| 52.611111 | 268 | 0.69018 |
import json
import requests
class Actions(object):
__module__ = 'trollo'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, action_id, fields=None, member=None, member_fields=None, memberCreator=None, memberCreator_fields=None):
resp = requests.get("https://trello.com/1/actions/%s" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields, member=member, member_fields=member_fields, memberCreator=memberCreator, memberCreator_fields=memberCreator_fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def update(self, action_id, text):
resp = requests.put("https://trello.com/1/actions/%s" % (action_id), params=dict(key=self._apikey, token=self._token, text=text), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_board(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/board" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_board_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/board/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_card(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/card" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_card_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/card/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_list(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/list" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_list_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/list/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_member(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/member" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_member_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/member/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_memberCreator(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/memberCreator" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_memberCreator_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/memberCreator/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_organization(self, action_id, fields=None):
resp = requests.get("https://trello.com/1/actions/%s/organization" % (action_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_organization_field(self, field, action_id):
resp = requests.get("https://trello.com/1/actions/%s/organization/%s" % (action_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
| true | true |
f7f5e78a0d6f13711cc3689ea73d52df776eabd9 | 598 | py | Python | setup.py | techlift-tech/erpnext_support | 7209f06d818497f091b3313fa0b766814cbe7613 | [
"MIT"
] | null | null | null | setup.py | techlift-tech/erpnext_support | 7209f06d818497f091b3313fa0b766814cbe7613 | [
"MIT"
] | null | null | null | setup.py | techlift-tech/erpnext_support | 7209f06d818497f091b3313fa0b766814cbe7613 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in erpnext_support/__init__.py
from erpnext_support import __version__ as version
setup(
name='erpnext_support',
version=version,
description='Custom App to ehnance current ERPNext Support to make it a full fledged ticketing system',
author='Techlift',
author_email='palash@techlift.in',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 28.47619 | 104 | 0.779264 |
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
from erpnext_support import __version__ as version
setup(
name='erpnext_support',
version=version,
description='Custom App to ehnance current ERPNext Support to make it a full fledged ticketing system',
author='Techlift',
author_email='palash@techlift.in',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| true | true |
f7f5e7b10578739d02077f5a802c1d6791c0686b | 2,328 | py | Python | azure-batch/azure/batch/models/exit_options.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | null | null | null | azure-batch/azure/batch/models/exit_options.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-batch/azure/batch/models/exit_options.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExitOptions(Model):
"""Specifies how the Batch service responds to a particular exit condition.
:param job_action: An action to take on the job containing the task, if
the task completes with the given exit condition and the job's
onTaskFailed property is 'performExitOptionsJobAction'. The default is
none for exit code 0 and terminate for all other exit conditions. If the
job's onTaskFailed property is noaction, then specifying this property
returns an error and the add task request fails with an invalid property
value error; if you are calling the REST API directly, the HTTP status
code is 400 (Bad Request). Possible values include: 'none', 'disable',
'terminate'
:type job_action: str or ~azure.batch.models.JobAction
:param dependency_action: An action that the Batch service performs on
tasks that depend on this task. The default is 'satisfy' for exit code 0,
and 'block' for all other exit conditions. If the job's
usesTaskDependencies property is set to false, then specifying the
dependencyAction property returns an error and the add task request fails
with an invalid property value error; if you are calling the REST API
directly, the HTTP status code is 400 (Bad Request). Possible values
include: 'satisfy', 'block'
:type dependency_action: str or ~azure.batch.models.DependencyAction
"""
_attribute_map = {
'job_action': {'key': 'jobAction', 'type': 'JobAction'},
'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'},
}
def __init__(self, **kwargs):
super(ExitOptions, self).__init__(**kwargs)
self.job_action = kwargs.get('job_action', None)
self.dependency_action = kwargs.get('dependency_action', None)
| 48.5 | 85 | 0.678694 |
from msrest.serialization import Model
class ExitOptions(Model):
_attribute_map = {
'job_action': {'key': 'jobAction', 'type': 'JobAction'},
'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'},
}
def __init__(self, **kwargs):
super(ExitOptions, self).__init__(**kwargs)
self.job_action = kwargs.get('job_action', None)
self.dependency_action = kwargs.get('dependency_action', None)
| true | true |
f7f5e8783a8bebe67eedd23df055226fe25affe2 | 538 | py | Python | src/feeds/migrations/0002_feed_parent.py | lzomedia/socialtwizy.com | c18a76490dc188347917732156fb0f714668d43c | [
"MIT"
] | null | null | null | src/feeds/migrations/0002_feed_parent.py | lzomedia/socialtwizy.com | c18a76490dc188347917732156fb0f714668d43c | [
"MIT"
] | null | null | null | src/feeds/migrations/0002_feed_parent.py | lzomedia/socialtwizy.com | c18a76490dc188347917732156fb0f714668d43c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-27 12:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feeds', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='feed',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='feeds.Feed'),
),
]
| 24.454545 | 121 | 0.639405 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feeds', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='feed',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='feeds.Feed'),
),
]
| true | true |
f7f5e94fb41767cf28d8ba37d547158674de13ab | 20,761 | py | Python | warehouse/packaging/models.py | pradyunsg/warehouse | 82815b06d9f98deed5f205c66e054de59d22a10d | [
"Apache-2.0"
] | 1 | 2022-03-29T11:56:45.000Z | 2022-03-29T11:56:45.000Z | warehouse/packaging/models.py | pradyunsg/warehouse | 82815b06d9f98deed5f205c66e054de59d22a10d | [
"Apache-2.0"
] | 49 | 2022-03-14T14:43:26.000Z | 2022-03-31T14:45:07.000Z | warehouse/packaging/models.py | pradyunsg/warehouse | 82815b06d9f98deed5f205c66e054de59d22a10d | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from collections import OrderedDict
from urllib.parse import urlparse
import packaging.utils
from citext import CIText
from pyramid.authorization import Allow
from pyramid.threadlocal import get_current_request
from sqlalchemy import (
BigInteger,
Boolean,
CheckConstraint,
Column,
DateTime,
Enum,
Float,
ForeignKey,
Index,
Integer,
String,
Table,
Text,
UniqueConstraint,
func,
orm,
sql,
)
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr # type: ignore
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import validates
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from sqlalchemy.sql import expression
from trove_classifiers import sorted_classifiers
from warehouse import db
from warehouse.accounts.models import User
from warehouse.classifiers.models import Classifier
from warehouse.integrations.vulnerabilities.models import VulnerabilityRecord
from warehouse.sitemap.models import SitemapMixin
from warehouse.utils import dotted_navigator
from warehouse.utils.attrs import make_repr
class Role(db.Model):
__tablename__ = "roles"
__table_args__ = (
Index("roles_user_id_idx", "user_id"),
Index("roles_project_id_idx", "project_id"),
UniqueConstraint("user_id", "project_id", name="_roles_user_project_uc"),
)
__repr__ = make_repr("role_name")
role_name = Column(Text, nullable=False)
user_id = Column(
ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False
)
project_id = Column(
ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
user = orm.relationship(User, lazy=False)
project = orm.relationship("Project", lazy=False)
class RoleInvitationStatus(enum.Enum):
Pending = "pending"
Expired = "expired"
class RoleInvitation(db.Model):
__tablename__ = "role_invitations"
__table_args__ = (
Index("role_invitations_user_id_idx", "user_id"),
UniqueConstraint(
"user_id", "project_id", name="_role_invitations_user_project_uc"
),
)
__repr__ = make_repr("invite_status", "user", "project")
invite_status = Column(
Enum(RoleInvitationStatus, values_callable=lambda x: [e.value for e in x]),
nullable=False,
)
token = Column(Text, nullable=False)
user_id = Column(
ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
project_id = Column(
ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
user = orm.relationship(User, lazy=False)
project = orm.relationship("Project", lazy=False)
class ProjectFactory:
def __init__(self, request):
self.request = request
def __getitem__(self, project):
try:
return (
self.request.db.query(Project)
.filter(Project.normalized_name == func.normalize_pep426_name(project))
.one()
)
except NoResultFound:
raise KeyError from None
class TwoFactorRequireable:
# Project owner requires 2FA for this project
owners_require_2fa = Column(Boolean, nullable=False, server_default=sql.false())
# PyPI requires 2FA for this project
pypi_mandates_2fa = Column(Boolean, nullable=False, server_default=sql.false())
@hybrid_property
def two_factor_required(self):
return self.owners_require_2fa | self.pypi_mandates_2fa
class Project(SitemapMixin, TwoFactorRequireable, db.Model):
__tablename__ = "projects"
__table_args__ = (
CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="projects_valid_name",
),
)
__repr__ = make_repr("name")
name = Column(Text, nullable=False)
normalized_name = orm.column_property(func.normalize_pep426_name(name))
created = Column(
DateTime(timezone=False),
nullable=False,
server_default=sql.func.now(),
index=True,
)
has_docs = Column(Boolean)
upload_limit = Column(Integer, nullable=True)
total_size_limit = Column(BigInteger, nullable=True)
last_serial = Column(Integer, nullable=False, server_default=sql.text("0"))
zscore = Column(Float, nullable=True)
total_size = Column(BigInteger, server_default=sql.text("0"))
users = orm.relationship(User, secondary=Role.__table__, backref="projects") # type: ignore # noqa
releases = orm.relationship(
"Release",
backref="project",
cascade="all, delete-orphan",
order_by=lambda: Release._pypi_ordering.desc(), # type: ignore
passive_deletes=True,
)
events = orm.relationship(
"ProjectEvent", backref="project", cascade="all, delete-orphan", lazy=True
)
def __getitem__(self, version):
session = orm.object_session(self)
canonical_version = packaging.utils.canonicalize_version(version)
try:
return (
session.query(Release)
.filter(
Release.project == self,
Release.canonical_version == canonical_version,
)
.one()
)
except MultipleResultsFound:
# There are multiple releases of this project which have the same
# canonical version that were uploaded before we checked for
# canonical version equivalence, so return the exact match instead
try:
return (
session.query(Release)
.filter(Release.project == self, Release.version == version)
.one()
)
except NoResultFound:
# There are multiple releases of this project which have the
# same canonical version, but none that have the exact version
# specified, so just 404
raise KeyError from None
except NoResultFound:
raise KeyError from None
def __acl__(self):
session = orm.object_session(self)
acls = [
(Allow, "group:admins", "admin"),
(Allow, "group:moderators", "moderator"),
]
# Get all of the users for this project.
query = session.query(Role).filter(Role.project == self)
query = query.options(orm.lazyload("project"))
query = query.options(orm.joinedload("user").lazyload("emails"))
query = query.join(User).order_by(User.id.asc())
for role in sorted(
query.all(), key=lambda x: ["Owner", "Maintainer"].index(x.role_name)
):
if role.role_name == "Owner":
acls.append((Allow, str(role.user.id), ["manage:project", "upload"]))
else:
acls.append((Allow, str(role.user.id), ["upload"]))
return acls
def record_event(self, *, tag, ip_address, additional=None):
session = orm.object_session(self)
event = ProjectEvent(
project=self, tag=tag, ip_address=ip_address, additional=additional
)
session.add(event)
session.flush()
return event
@property
def documentation_url(self):
# TODO: Move this into the database and eliminate the use of the
# threadlocal here.
request = get_current_request()
# If the project doesn't have docs, then we'll just return a None here.
if not self.has_docs:
return
return request.route_url("legacy.docs", project=self.name)
@property
def all_versions(self):
return (
orm.object_session(self)
.query(
Release.version, Release.created, Release.is_prerelease, Release.yanked
)
.filter(Release.project == self)
.order_by(Release._pypi_ordering.desc())
.all()
)
@property
def latest_version(self):
return (
orm.object_session(self)
.query(Release.version, Release.created, Release.is_prerelease)
.filter(Release.project == self, Release.yanked.is_(False))
.order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())
.first()
)
class ProjectEvent(db.Model):
__tablename__ = "project_events"
project_id = Column(
UUID(as_uuid=True),
ForeignKey(
"projects.id", deferrable=True, initially="DEFERRED", ondelete="CASCADE"
),
nullable=False,
index=True,
)
tag = Column(String, nullable=False)
time = Column(DateTime, nullable=False, server_default=sql.func.now())
ip_address = Column(String, nullable=False)
additional = Column(JSONB, nullable=True)
class DependencyKind(enum.IntEnum):
requires = 1
provides = 2
obsoletes = 3
requires_dist = 4
provides_dist = 5
obsoletes_dist = 6
requires_external = 7
# TODO: Move project URLs into their own table, since they are not actually
# a "dependency".
project_url = 8
class Dependency(db.Model):
__tablename__ = "release_dependencies"
__table_args__ = (
Index("release_dependencies_release_kind_idx", "release_id", "kind"),
)
__repr__ = make_repr("name", "version", "kind", "specifier")
release_id = Column(
ForeignKey("releases.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
kind = Column(Integer)
specifier = Column(Text)
def _dependency_relation(kind):
return orm.relationship(
"Dependency",
primaryjoin=lambda: sql.and_(
Release.id == Dependency.release_id, Dependency.kind == kind.value
),
viewonly=True,
)
class Description(db.Model):
__tablename__ = "release_descriptions"
content_type = Column(Text)
raw = Column(Text, nullable=False)
html = Column(Text, nullable=False)
rendered_by = Column(Text, nullable=False)
class Release(db.Model):
__tablename__ = "releases"
@declared_attr
def __table_args__(cls): # noqa
return (
Index("release_created_idx", cls.created.desc()),
Index("release_project_created_idx", cls.project_id, cls.created.desc()),
Index("release_version_idx", cls.version),
UniqueConstraint("project_id", "version"),
)
__repr__ = make_repr("project", "version")
__parent__ = dotted_navigator("project")
__name__ = dotted_navigator("version")
project_id = Column(
ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
version = Column(Text, nullable=False)
canonical_version = Column(Text, nullable=False)
is_prerelease = orm.column_property(func.pep440_is_prerelease(version))
author = Column(Text)
author_email = Column(Text)
maintainer = Column(Text)
maintainer_email = Column(Text)
home_page = Column(Text)
license = Column(Text)
summary = Column(Text)
keywords = Column(Text)
platform = Column(Text)
download_url = Column(Text)
_pypi_ordering = Column(Integer)
requires_python = Column(Text)
created = Column(
DateTime(timezone=False), nullable=False, server_default=sql.func.now()
)
description_id = Column(
ForeignKey("release_descriptions.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
description = orm.relationship(
"Description",
backref=orm.backref(
"release",
cascade="all, delete-orphan",
passive_deletes=True,
passive_updates=True,
single_parent=True,
uselist=False,
),
)
yanked = Column(Boolean, nullable=False, server_default=sql.false())
yanked_reason = Column(Text, nullable=False, server_default="")
_classifiers = orm.relationship(
Classifier,
backref="project_releases",
secondary=lambda: release_classifiers, # type: ignore
order_by=expression.case(
{c: i for i, c in enumerate(sorted_classifiers)},
value=Classifier.classifier,
),
passive_deletes=True,
)
classifiers = association_proxy("_classifiers", "classifier")
files = orm.relationship(
"File",
backref="release",
cascade="all, delete-orphan",
lazy="dynamic",
order_by=lambda: File.filename, # type: ignore
passive_deletes=True,
)
dependencies = orm.relationship(
"Dependency",
backref="release",
cascade="all, delete-orphan",
passive_deletes=True,
)
vulnerabilities = orm.relationship(
VulnerabilityRecord,
back_populates="releases",
secondary="release_vulnerabilities",
passive_deletes=True,
)
_requires = _dependency_relation(DependencyKind.requires)
requires = association_proxy("_requires", "specifier")
_provides = _dependency_relation(DependencyKind.provides)
provides = association_proxy("_provides", "specifier")
_obsoletes = _dependency_relation(DependencyKind.obsoletes)
obsoletes = association_proxy("_obsoletes", "specifier")
_requires_dist = _dependency_relation(DependencyKind.requires_dist)
requires_dist = association_proxy("_requires_dist", "specifier")
_provides_dist = _dependency_relation(DependencyKind.provides_dist)
provides_dist = association_proxy("_provides_dist", "specifier")
_obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist)
obsoletes_dist = association_proxy("_obsoletes_dist", "specifier")
_requires_external = _dependency_relation(DependencyKind.requires_external)
requires_external = association_proxy("_requires_external", "specifier")
_project_urls = _dependency_relation(DependencyKind.project_url)
project_urls = association_proxy("_project_urls", "specifier")
uploader_id = Column(
ForeignKey("users.id", onupdate="CASCADE", ondelete="SET NULL"),
nullable=True,
index=True,
)
uploader = orm.relationship(User)
uploaded_via = Column(Text)
@property
def urls(self):
_urls = OrderedDict()
if self.home_page:
_urls["Homepage"] = self.home_page
if self.download_url:
_urls["Download"] = self.download_url
for urlspec in self.project_urls:
name, _, url = urlspec.partition(",")
name = name.strip()
url = url.strip()
if name and url:
_urls[name] = url
return _urls
@property
def github_repo_info_url(self):
for url in self.urls.values():
parsed = urlparse(url)
segments = parsed.path.strip("/").split("/")
if parsed.netloc in {"github.com", "www.github.com"} and len(segments) >= 2:
user_name, repo_name = segments[:2]
return f"https://api.github.com/repos/{user_name}/{repo_name}"
@property
def has_meta(self):
return any(
[
self.license,
self.keywords,
self.author,
self.author_email,
self.maintainer,
self.maintainer_email,
self.requires_python,
]
)
class File(db.Model):
__tablename__ = "release_files"
@declared_attr
def __table_args__(cls): # noqa
return (
CheckConstraint("sha256_digest ~* '^[A-F0-9]{64}$'"),
CheckConstraint("blake2_256_digest ~* '^[A-F0-9]{64}$'"),
Index(
"release_files_single_sdist",
"release_id",
"packagetype",
unique=True,
postgresql_where=(
(cls.packagetype == "sdist")
& (cls.allow_multiple_sdist == False) # noqa
),
),
Index("release_files_release_id_idx", "release_id"),
)
release_id = Column(
ForeignKey("releases.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
python_version = Column(Text)
requires_python = Column(Text)
packagetype = Column(
Enum(
"bdist_dmg",
"bdist_dumb",
"bdist_egg",
"bdist_msi",
"bdist_rpm",
"bdist_wheel",
"bdist_wininst",
"sdist",
)
)
comment_text = Column(Text)
filename = Column(Text, unique=True)
path = Column(Text, unique=True, nullable=False)
size = Column(Integer)
has_signature = Column(Boolean)
md5_digest = Column(Text, unique=True, nullable=False)
sha256_digest = Column(CIText, unique=True, nullable=False)
blake2_256_digest = Column(CIText, unique=True, nullable=False)
upload_time = Column(DateTime(timezone=False), server_default=func.now())
uploaded_via = Column(Text)
# We need this column to allow us to handle the currently existing "double"
# sdists that exist in our database. Eventually we should try to get rid
# of all of them and then remove this column.
allow_multiple_sdist = Column(Boolean, nullable=False, server_default=sql.false())
@hybrid_property
def pgp_path(self):
return self.path + ".asc"
@pgp_path.expression # type: ignore
def pgp_path(self):
return func.concat(self.path, ".asc")
@validates("requires_python")
def validates_requires_python(self, *args, **kwargs):
raise RuntimeError("Cannot set File.requires_python")
class Filename(db.ModelBase):
__tablename__ = "file_registry"
id = Column(Integer, primary_key=True, nullable=False)
filename = Column(Text, unique=True, nullable=False)
release_classifiers = Table(
"release_classifiers",
db.metadata,
Column(
"release_id",
ForeignKey("releases.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
),
Column("trove_id", Integer(), ForeignKey("trove_classifiers.id")),
Index("rel_class_trove_id_idx", "trove_id"),
Index("rel_class_release_id_idx", "release_id"),
)
class JournalEntry(db.ModelBase):
__tablename__ = "journals"
@declared_attr
def __table_args__(cls): # noqa
return (
Index("journals_changelog", "submitted_date", "name", "version", "action"),
Index("journals_name_idx", "name"),
Index("journals_version_idx", "version"),
Index("journals_submitted_by_idx", "submitted_by"),
Index("journals_submitted_date_id_idx", cls.submitted_date, cls.id),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Text)
version = Column(Text)
action = Column(Text)
submitted_date = Column(
DateTime(timezone=False), nullable=False, server_default=sql.func.now()
)
_submitted_by = Column(
"submitted_by", CIText, ForeignKey("users.username", onupdate="CASCADE")
)
submitted_by = orm.relationship(User, lazy="raise_on_sql")
submitted_from = Column(Text)
class ProhibitedProjectName(db.Model):
__tablename__ = "prohibited_project_names"
__table_args__ = (
CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="prohibited_project_valid_name",
),
)
__repr__ = make_repr("name")
created = Column(
DateTime(timezone=False), nullable=False, server_default=sql.func.now()
)
name = Column(Text, unique=True, nullable=False)
_prohibited_by = Column(
"prohibited_by", UUID(as_uuid=True), ForeignKey("users.id"), index=True
)
prohibited_by = orm.relationship(User)
comment = Column(Text, nullable=False, server_default="")
| 31.219549 | 103 | 0.635952 |
import enum
from collections import OrderedDict
from urllib.parse import urlparse
import packaging.utils
from citext import CIText
from pyramid.authorization import Allow
from pyramid.threadlocal import get_current_request
from sqlalchemy import (
BigInteger,
Boolean,
CheckConstraint,
Column,
DateTime,
Enum,
Float,
ForeignKey,
Index,
Integer,
String,
Table,
Text,
UniqueConstraint,
func,
orm,
sql,
)
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import validates
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from sqlalchemy.sql import expression
from trove_classifiers import sorted_classifiers
from warehouse import db
from warehouse.accounts.models import User
from warehouse.classifiers.models import Classifier
from warehouse.integrations.vulnerabilities.models import VulnerabilityRecord
from warehouse.sitemap.models import SitemapMixin
from warehouse.utils import dotted_navigator
from warehouse.utils.attrs import make_repr
class Role(db.Model):
__tablename__ = "roles"
__table_args__ = (
Index("roles_user_id_idx", "user_id"),
Index("roles_project_id_idx", "project_id"),
UniqueConstraint("user_id", "project_id", name="_roles_user_project_uc"),
)
__repr__ = make_repr("role_name")
role_name = Column(Text, nullable=False)
user_id = Column(
ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False
)
project_id = Column(
ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
user = orm.relationship(User, lazy=False)
project = orm.relationship("Project", lazy=False)
class RoleInvitationStatus(enum.Enum):
Pending = "pending"
Expired = "expired"
class RoleInvitation(db.Model):
__tablename__ = "role_invitations"
__table_args__ = (
Index("role_invitations_user_id_idx", "user_id"),
UniqueConstraint(
"user_id", "project_id", name="_role_invitations_user_project_uc"
),
)
__repr__ = make_repr("invite_status", "user", "project")
invite_status = Column(
Enum(RoleInvitationStatus, values_callable=lambda x: [e.value for e in x]),
nullable=False,
)
token = Column(Text, nullable=False)
user_id = Column(
ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
project_id = Column(
ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
user = orm.relationship(User, lazy=False)
project = orm.relationship("Project", lazy=False)
class ProjectFactory:
def __init__(self, request):
self.request = request
def __getitem__(self, project):
try:
return (
self.request.db.query(Project)
.filter(Project.normalized_name == func.normalize_pep426_name(project))
.one()
)
except NoResultFound:
raise KeyError from None
class TwoFactorRequireable:
owners_require_2fa = Column(Boolean, nullable=False, server_default=sql.false())
pypi_mandates_2fa = Column(Boolean, nullable=False, server_default=sql.false())
@hybrid_property
def two_factor_required(self):
return self.owners_require_2fa | self.pypi_mandates_2fa
class Project(SitemapMixin, TwoFactorRequireable, db.Model):
__tablename__ = "projects"
__table_args__ = (
CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="projects_valid_name",
),
)
__repr__ = make_repr("name")
name = Column(Text, nullable=False)
normalized_name = orm.column_property(func.normalize_pep426_name(name))
created = Column(
DateTime(timezone=False),
nullable=False,
server_default=sql.func.now(),
index=True,
)
has_docs = Column(Boolean)
upload_limit = Column(Integer, nullable=True)
total_size_limit = Column(BigInteger, nullable=True)
last_serial = Column(Integer, nullable=False, server_default=sql.text("0"))
zscore = Column(Float, nullable=True)
total_size = Column(BigInteger, server_default=sql.text("0"))
users = orm.relationship(User, secondary=Role.__table__, backref="projects") releases = orm.relationship(
"Release",
backref="project",
cascade="all, delete-orphan",
order_by=lambda: Release._pypi_ordering.desc(),
passive_deletes=True,
)
events = orm.relationship(
"ProjectEvent", backref="project", cascade="all, delete-orphan", lazy=True
)
def __getitem__(self, version):
session = orm.object_session(self)
canonical_version = packaging.utils.canonicalize_version(version)
try:
return (
session.query(Release)
.filter(
Release.project == self,
Release.canonical_version == canonical_version,
)
.one()
)
except MultipleResultsFound:
try:
return (
session.query(Release)
.filter(Release.project == self, Release.version == version)
.one()
)
except NoResultFound:
raise KeyError from None
except NoResultFound:
raise KeyError from None
def __acl__(self):
session = orm.object_session(self)
acls = [
(Allow, "group:admins", "admin"),
(Allow, "group:moderators", "moderator"),
]
query = session.query(Role).filter(Role.project == self)
query = query.options(orm.lazyload("project"))
query = query.options(orm.joinedload("user").lazyload("emails"))
query = query.join(User).order_by(User.id.asc())
for role in sorted(
query.all(), key=lambda x: ["Owner", "Maintainer"].index(x.role_name)
):
if role.role_name == "Owner":
acls.append((Allow, str(role.user.id), ["manage:project", "upload"]))
else:
acls.append((Allow, str(role.user.id), ["upload"]))
return acls
def record_event(self, *, tag, ip_address, additional=None):
session = orm.object_session(self)
event = ProjectEvent(
project=self, tag=tag, ip_address=ip_address, additional=additional
)
session.add(event)
session.flush()
return event
@property
def documentation_url(self):
request = get_current_request()
if not self.has_docs:
return
return request.route_url("legacy.docs", project=self.name)
@property
def all_versions(self):
return (
orm.object_session(self)
.query(
Release.version, Release.created, Release.is_prerelease, Release.yanked
)
.filter(Release.project == self)
.order_by(Release._pypi_ordering.desc())
.all()
)
@property
def latest_version(self):
return (
orm.object_session(self)
.query(Release.version, Release.created, Release.is_prerelease)
.filter(Release.project == self, Release.yanked.is_(False))
.order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())
.first()
)
class ProjectEvent(db.Model):
__tablename__ = "project_events"
project_id = Column(
UUID(as_uuid=True),
ForeignKey(
"projects.id", deferrable=True, initially="DEFERRED", ondelete="CASCADE"
),
nullable=False,
index=True,
)
tag = Column(String, nullable=False)
time = Column(DateTime, nullable=False, server_default=sql.func.now())
ip_address = Column(String, nullable=False)
additional = Column(JSONB, nullable=True)
class DependencyKind(enum.IntEnum):
requires = 1
provides = 2
obsoletes = 3
requires_dist = 4
provides_dist = 5
obsoletes_dist = 6
requires_external = 7
project_url = 8
class Dependency(db.Model):
__tablename__ = "release_dependencies"
__table_args__ = (
Index("release_dependencies_release_kind_idx", "release_id", "kind"),
)
__repr__ = make_repr("name", "version", "kind", "specifier")
release_id = Column(
ForeignKey("releases.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
kind = Column(Integer)
specifier = Column(Text)
def _dependency_relation(kind):
return orm.relationship(
"Dependency",
primaryjoin=lambda: sql.and_(
Release.id == Dependency.release_id, Dependency.kind == kind.value
),
viewonly=True,
)
class Description(db.Model):
__tablename__ = "release_descriptions"
content_type = Column(Text)
raw = Column(Text, nullable=False)
html = Column(Text, nullable=False)
rendered_by = Column(Text, nullable=False)
class Release(db.Model):
__tablename__ = "releases"
@declared_attr
def __table_args__(cls):
return (
Index("release_created_idx", cls.created.desc()),
Index("release_project_created_idx", cls.project_id, cls.created.desc()),
Index("release_version_idx", cls.version),
UniqueConstraint("project_id", "version"),
)
__repr__ = make_repr("project", "version")
__parent__ = dotted_navigator("project")
__name__ = dotted_navigator("version")
project_id = Column(
ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
version = Column(Text, nullable=False)
canonical_version = Column(Text, nullable=False)
is_prerelease = orm.column_property(func.pep440_is_prerelease(version))
author = Column(Text)
author_email = Column(Text)
maintainer = Column(Text)
maintainer_email = Column(Text)
home_page = Column(Text)
license = Column(Text)
summary = Column(Text)
keywords = Column(Text)
platform = Column(Text)
download_url = Column(Text)
_pypi_ordering = Column(Integer)
requires_python = Column(Text)
created = Column(
DateTime(timezone=False), nullable=False, server_default=sql.func.now()
)
description_id = Column(
ForeignKey("release_descriptions.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
description = orm.relationship(
"Description",
backref=orm.backref(
"release",
cascade="all, delete-orphan",
passive_deletes=True,
passive_updates=True,
single_parent=True,
uselist=False,
),
)
yanked = Column(Boolean, nullable=False, server_default=sql.false())
yanked_reason = Column(Text, nullable=False, server_default="")
_classifiers = orm.relationship(
Classifier,
backref="project_releases",
secondary=lambda: release_classifiers,
order_by=expression.case(
{c: i for i, c in enumerate(sorted_classifiers)},
value=Classifier.classifier,
),
passive_deletes=True,
)
classifiers = association_proxy("_classifiers", "classifier")
files = orm.relationship(
"File",
backref="release",
cascade="all, delete-orphan",
lazy="dynamic",
order_by=lambda: File.filename,
passive_deletes=True,
)
dependencies = orm.relationship(
"Dependency",
backref="release",
cascade="all, delete-orphan",
passive_deletes=True,
)
vulnerabilities = orm.relationship(
VulnerabilityRecord,
back_populates="releases",
secondary="release_vulnerabilities",
passive_deletes=True,
)
_requires = _dependency_relation(DependencyKind.requires)
requires = association_proxy("_requires", "specifier")
_provides = _dependency_relation(DependencyKind.provides)
provides = association_proxy("_provides", "specifier")
_obsoletes = _dependency_relation(DependencyKind.obsoletes)
obsoletes = association_proxy("_obsoletes", "specifier")
_requires_dist = _dependency_relation(DependencyKind.requires_dist)
requires_dist = association_proxy("_requires_dist", "specifier")
_provides_dist = _dependency_relation(DependencyKind.provides_dist)
provides_dist = association_proxy("_provides_dist", "specifier")
_obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist)
obsoletes_dist = association_proxy("_obsoletes_dist", "specifier")
_requires_external = _dependency_relation(DependencyKind.requires_external)
requires_external = association_proxy("_requires_external", "specifier")
_project_urls = _dependency_relation(DependencyKind.project_url)
project_urls = association_proxy("_project_urls", "specifier")
uploader_id = Column(
ForeignKey("users.id", onupdate="CASCADE", ondelete="SET NULL"),
nullable=True,
index=True,
)
uploader = orm.relationship(User)
uploaded_via = Column(Text)
@property
def urls(self):
_urls = OrderedDict()
if self.home_page:
_urls["Homepage"] = self.home_page
if self.download_url:
_urls["Download"] = self.download_url
for urlspec in self.project_urls:
name, _, url = urlspec.partition(",")
name = name.strip()
url = url.strip()
if name and url:
_urls[name] = url
return _urls
@property
def github_repo_info_url(self):
for url in self.urls.values():
parsed = urlparse(url)
segments = parsed.path.strip("/").split("/")
if parsed.netloc in {"github.com", "www.github.com"} and len(segments) >= 2:
user_name, repo_name = segments[:2]
return f"https://api.github.com/repos/{user_name}/{repo_name}"
@property
def has_meta(self):
return any(
[
self.license,
self.keywords,
self.author,
self.author_email,
self.maintainer,
self.maintainer_email,
self.requires_python,
]
)
class File(db.Model):
__tablename__ = "release_files"
@declared_attr
def __table_args__(cls):
return (
CheckConstraint("sha256_digest ~* '^[A-F0-9]{64}$'"),
CheckConstraint("blake2_256_digest ~* '^[A-F0-9]{64}$'"),
Index(
"release_files_single_sdist",
"release_id",
"packagetype",
unique=True,
postgresql_where=(
(cls.packagetype == "sdist")
& (cls.allow_multiple_sdist == False)
),
),
Index("release_files_release_id_idx", "release_id"),
)
release_id = Column(
ForeignKey("releases.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
python_version = Column(Text)
requires_python = Column(Text)
packagetype = Column(
Enum(
"bdist_dmg",
"bdist_dumb",
"bdist_egg",
"bdist_msi",
"bdist_rpm",
"bdist_wheel",
"bdist_wininst",
"sdist",
)
)
comment_text = Column(Text)
filename = Column(Text, unique=True)
path = Column(Text, unique=True, nullable=False)
size = Column(Integer)
has_signature = Column(Boolean)
md5_digest = Column(Text, unique=True, nullable=False)
sha256_digest = Column(CIText, unique=True, nullable=False)
blake2_256_digest = Column(CIText, unique=True, nullable=False)
upload_time = Column(DateTime(timezone=False), server_default=func.now())
uploaded_via = Column(Text)
allow_multiple_sdist = Column(Boolean, nullable=False, server_default=sql.false())
@hybrid_property
def pgp_path(self):
return self.path + ".asc"
@pgp_path.expression
def pgp_path(self):
return func.concat(self.path, ".asc")
@validates("requires_python")
def validates_requires_python(self, *args, **kwargs):
raise RuntimeError("Cannot set File.requires_python")
class Filename(db.ModelBase):
__tablename__ = "file_registry"
id = Column(Integer, primary_key=True, nullable=False)
filename = Column(Text, unique=True, nullable=False)
release_classifiers = Table(
"release_classifiers",
db.metadata,
Column(
"release_id",
ForeignKey("releases.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
),
Column("trove_id", Integer(), ForeignKey("trove_classifiers.id")),
Index("rel_class_trove_id_idx", "trove_id"),
Index("rel_class_release_id_idx", "release_id"),
)
class JournalEntry(db.ModelBase):
__tablename__ = "journals"
@declared_attr
def __table_args__(cls):
return (
Index("journals_changelog", "submitted_date", "name", "version", "action"),
Index("journals_name_idx", "name"),
Index("journals_version_idx", "version"),
Index("journals_submitted_by_idx", "submitted_by"),
Index("journals_submitted_date_id_idx", cls.submitted_date, cls.id),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Text)
version = Column(Text)
action = Column(Text)
submitted_date = Column(
DateTime(timezone=False), nullable=False, server_default=sql.func.now()
)
_submitted_by = Column(
"submitted_by", CIText, ForeignKey("users.username", onupdate="CASCADE")
)
submitted_by = orm.relationship(User, lazy="raise_on_sql")
submitted_from = Column(Text)
class ProhibitedProjectName(db.Model):
__tablename__ = "prohibited_project_names"
__table_args__ = (
CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="prohibited_project_valid_name",
),
)
__repr__ = make_repr("name")
created = Column(
DateTime(timezone=False), nullable=False, server_default=sql.func.now()
)
name = Column(Text, unique=True, nullable=False)
_prohibited_by = Column(
"prohibited_by", UUID(as_uuid=True), ForeignKey("users.id"), index=True
)
prohibited_by = orm.relationship(User)
comment = Column(Text, nullable=False, server_default="")
| true | true |
f7f5ea1fc216eaad5d077a381072d29a4185a1b4 | 9,461 | py | Python | tests/platform_tests/daemon/test_thermalctld.py | amulyan7/sonic-mgmt | b673fe4d830f064ae6f937c514215a7a7d0c7f33 | [
"Apache-2.0"
] | null | null | null | tests/platform_tests/daemon/test_thermalctld.py | amulyan7/sonic-mgmt | b673fe4d830f064ae6f937c514215a7a7d0c7f33 | [
"Apache-2.0"
] | null | null | null | tests/platform_tests/daemon/test_thermalctld.py | amulyan7/sonic-mgmt | b673fe4d830f064ae6f937c514215a7a7d0c7f33 | [
"Apache-2.0"
] | null | null | null | """
Check daemon status inside PMON container. Each daemon status is checked under the conditions below in this script:
* Daemon Running Status
* Daemon Stop status
* Daemon Restart status
This script is to cover the test case in the SONiC platform daemon and service test plan:
https://github.com/Azure/sonic-mgmt/blob/master/docs/testplan/PMON-Services-Daemons-test-plan.md
"""
import logging
import re
import time
from datetime import datetime
import pytest
from tests.common.helpers.assertions import pytest_assert
from tests.common.platform.daemon_utils import check_pmon_daemon_enable_status
from tests.common.platform.processes_utils import wait_critical_processes, check_critical_processes
from tests.common.utilities import compose_dict_from_cli, skip_release, wait_until
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.topology('any'),
pytest.mark.sanity_check(skip_sanity=True),
pytest.mark.disable_loganalyzer
]
expected_running_status = "RUNNING"
expected_stopped_status = "STOPPED"
expected_exited_status = "EXITED"
daemon_name = "thermalctld"
SIG_STOP_SERVICE = None
SIG_TERM = "-15"
SIG_KILL = "-9"
STATE_DB = 6
thermalctld_tbl_key = ""
@pytest.fixture(scope="module", autouse=True)
def setup(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
daemon_en_status = check_pmon_daemon_enable_status(duthost, daemon_name)
if daemon_en_status is False:
pytest.skip("{} is not enabled in {}".format(daemon_name, duthost.facts['platform'], duthost.os_version))
@pytest.fixture(scope="module", autouse=True)
def teardown_module(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
yield
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
if daemon_status is not "RUNNING":
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
logger.info("Tearing down: to make sure all the critical services, interfaces and transceivers are good")
check_critical_processes(duthost, watch_secs=10)
@pytest.fixture
def check_daemon_status(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
if daemon_status is not "RUNNING":
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
def check_expected_daemon_status(duthost, expected_daemon_status):
daemon_status, _ = duthost.get_pmon_daemon_status(daemon_name)
return daemon_status == expected_daemon_status
def collect_data(duthost):
keys = duthost.shell('sonic-db-cli STATE_DB KEYS "PHYSICAL_ENTITY_INFO|*"')['stdout_lines']
dev_data = {}
for k in keys:
data = duthost.shell('sonic-db-cli STATE_DB HGETALL "{}"'.format(k))['stdout_lines']
data = compose_dict_from_cli(data)
dev_data[k] = data
return {'keys': keys, 'data': dev_data}
def wait_data(duthost):
class shared_scope:
data_after_restart = {}
def _collect_data():
shared_scope.data_after_restart = collect_data(duthost)
return bool(shared_scope.data_after_restart['data'])
thermalctld_pooling_interval = 60
wait_until(thermalctld_pooling_interval, 6, 0, _collect_data)
return shared_scope.data_after_restart
@pytest.fixture(scope='module')
def data_before_restart(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
data = collect_data(duthost)
return data
def test_pmon_thermalctld_running_status(duthosts, rand_one_dut_hostname, data_before_restart):
"""
@summary: This test case is to check thermalctld status on dut
"""
duthost = duthosts[rand_one_dut_hostname]
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, daemon_status, daemon_pid))
pytest_assert(daemon_status == expected_running_status,
"{} expected running status is {} but is {}".format(daemon_name, expected_running_status, daemon_status))
pytest_assert(daemon_pid != -1,
"{} expected pid is a positive integer but is {}".format(daemon_name, daemon_pid))
pytest_assert(data_before_restart['keys'], "DB keys is not availale on daemon running")
pytest_assert(data_before_restart['data'], "DB data is not availale on daemon running")
def test_pmon_thermalctld_stop_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart):
"""
@summary: This test case is to check the thermalctld stopped and restarted status
"""
duthost = duthosts[rand_one_dut_hostname]
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_STOP_SERVICE)
time.sleep(2)
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(daemon_status == expected_stopped_status,
"{} expected stopped status is {} but is {}".format(daemon_name, expected_stopped_status, daemon_status))
pytest_assert(daemon_pid == -1,
"{} expected pid is -1 but is {}".format(daemon_name, daemon_pid))
data = collect_data(duthost)
pytest_assert(not data['keys'], "DB data keys is not cleared on daemon stop")
pytest_assert(not data['data'], "DB data is not cleared on daemon stop")
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is not -1 but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid))
data_after_restart = wait_data(duthost)
pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match')
def test_pmon_thermalctld_term_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart):
"""
@summary: This test case is to check the thermalctld terminated and restarted status
"""
duthost = duthosts[rand_one_dut_hostname]
skip_release(duthost, ["201811", "201911"])
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_TERM, pre_daemon_pid)
wait_until(50, 10, 0, check_expected_daemon_status, duthost, expected_running_status)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is not -1 but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid))
data_after_restart = wait_data(duthost)
pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match')
def test_pmon_thermalctld_kill_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart):
"""
@summary: This test case is to check the thermalctld killed unexpectedly (automatically restarted) status
"""
duthost = duthosts[rand_one_dut_hostname]
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_KILL, pre_daemon_pid)
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(daemon_status != expected_running_status,
"{} unexpected killed status is not {}".format(daemon_name, daemon_status))
time.sleep(10)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is not -1 but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid))
data_after_restart = wait_data(duthost)
pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match')
| 45.267943 | 138 | 0.739668 | import logging
import re
import time
from datetime import datetime
import pytest
from tests.common.helpers.assertions import pytest_assert
from tests.common.platform.daemon_utils import check_pmon_daemon_enable_status
from tests.common.platform.processes_utils import wait_critical_processes, check_critical_processes
from tests.common.utilities import compose_dict_from_cli, skip_release, wait_until
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.topology('any'),
pytest.mark.sanity_check(skip_sanity=True),
pytest.mark.disable_loganalyzer
]
expected_running_status = "RUNNING"
expected_stopped_status = "STOPPED"
expected_exited_status = "EXITED"
daemon_name = "thermalctld"
SIG_STOP_SERVICE = None
SIG_TERM = "-15"
SIG_KILL = "-9"
STATE_DB = 6
thermalctld_tbl_key = ""
@pytest.fixture(scope="module", autouse=True)
def setup(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
daemon_en_status = check_pmon_daemon_enable_status(duthost, daemon_name)
if daemon_en_status is False:
pytest.skip("{} is not enabled in {}".format(daemon_name, duthost.facts['platform'], duthost.os_version))
@pytest.fixture(scope="module", autouse=True)
def teardown_module(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
yield
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
if daemon_status is not "RUNNING":
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
logger.info("Tearing down: to make sure all the critical services, interfaces and transceivers are good")
check_critical_processes(duthost, watch_secs=10)
@pytest.fixture
def check_daemon_status(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
if daemon_status is not "RUNNING":
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
def check_expected_daemon_status(duthost, expected_daemon_status):
daemon_status, _ = duthost.get_pmon_daemon_status(daemon_name)
return daemon_status == expected_daemon_status
def collect_data(duthost):
keys = duthost.shell('sonic-db-cli STATE_DB KEYS "PHYSICAL_ENTITY_INFO|*"')['stdout_lines']
dev_data = {}
for k in keys:
data = duthost.shell('sonic-db-cli STATE_DB HGETALL "{}"'.format(k))['stdout_lines']
data = compose_dict_from_cli(data)
dev_data[k] = data
return {'keys': keys, 'data': dev_data}
def wait_data(duthost):
class shared_scope:
data_after_restart = {}
def _collect_data():
shared_scope.data_after_restart = collect_data(duthost)
return bool(shared_scope.data_after_restart['data'])
thermalctld_pooling_interval = 60
wait_until(thermalctld_pooling_interval, 6, 0, _collect_data)
return shared_scope.data_after_restart
@pytest.fixture(scope='module')
def data_before_restart(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
data = collect_data(duthost)
return data
def test_pmon_thermalctld_running_status(duthosts, rand_one_dut_hostname, data_before_restart):
duthost = duthosts[rand_one_dut_hostname]
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, daemon_status, daemon_pid))
pytest_assert(daemon_status == expected_running_status,
"{} expected running status is {} but is {}".format(daemon_name, expected_running_status, daemon_status))
pytest_assert(daemon_pid != -1,
"{} expected pid is a positive integer but is {}".format(daemon_name, daemon_pid))
pytest_assert(data_before_restart['keys'], "DB keys is not availale on daemon running")
pytest_assert(data_before_restart['data'], "DB data is not availale on daemon running")
def test_pmon_thermalctld_stop_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart):
duthost = duthosts[rand_one_dut_hostname]
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_STOP_SERVICE)
time.sleep(2)
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(daemon_status == expected_stopped_status,
"{} expected stopped status is {} but is {}".format(daemon_name, expected_stopped_status, daemon_status))
pytest_assert(daemon_pid == -1,
"{} expected pid is -1 but is {}".format(daemon_name, daemon_pid))
data = collect_data(duthost)
pytest_assert(not data['keys'], "DB data keys is not cleared on daemon stop")
pytest_assert(not data['data'], "DB data is not cleared on daemon stop")
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is not -1 but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid))
data_after_restart = wait_data(duthost)
pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match')
def test_pmon_thermalctld_term_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart):
duthost = duthosts[rand_one_dut_hostname]
skip_release(duthost, ["201811", "201911"])
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_TERM, pre_daemon_pid)
wait_until(50, 10, 0, check_expected_daemon_status, duthost, expected_running_status)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is not -1 but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid))
data_after_restart = wait_data(duthost)
pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match')
def test_pmon_thermalctld_kill_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart):
duthost = duthosts[rand_one_dut_hostname]
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_KILL, pre_daemon_pid)
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(daemon_status != expected_running_status,
"{} unexpected killed status is not {}".format(daemon_name, daemon_status))
time.sleep(10)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is not -1 but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid))
data_after_restart = wait_data(duthost)
pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match')
| true | true |
f7f5eabe413f7e08e0c022e2ba87bc6892feabd3 | 4,130 | py | Python | AlphaGoZero/code/Framework/Mcts.py | OoSnowfxm/AlphaZero_Othello | 3e94ac29dbac413502eb85628a0f8eb6d402d5e9 | [
"MIT"
] | null | null | null | AlphaGoZero/code/Framework/Mcts.py | OoSnowfxm/AlphaZero_Othello | 3e94ac29dbac413502eb85628a0f8eb6d402d5e9 | [
"MIT"
] | null | null | null | AlphaGoZero/code/Framework/Mcts.py | OoSnowfxm/AlphaZero_Othello | 3e94ac29dbac413502eb85628a0f8eb6d402d5e9 | [
"MIT"
] | null | null | null | '''
@Author: fxm
@Date: Dec 27, 2020.
@Title: Mcts class.
'''
import logging
import math
import numpy as np
Eps = 1e-8
log = logging.getLogger(__name__)
# 蒙特卡洛树搜索对象
class MCTS():
'''
初始化过程
参数设置:
game:游戏对象
net:网络对象
args:参数
N(s,a):记录边的访问次数
S(s): 记录该状态的访问次数,有S(s) = sum(N(s,i))
Q(s,a) :平均行动价值
P(s,a) :选择该条边的先验概率
Ended(s):存储状态s是否对应了游戏结束
Valid(s):存储状态s对应的所有的可行动作
'''
def __init__(self, game, net, args):
self.game = game
self.net = net
self.args = args
self.Q = {}
self.N = {}
self.S = {}
self.P = {}
self.Ended = {}
self.Valid = {}
'''
获得当前棋盘得到的动作的概率向量
在temp为0的时候,说明网络深度已经很深,
这时候采用将最大概率设为1来计算概率向量
'''
def getActionProb(self, canonicalBoard, temp=1):
for _ in range(self.args.numMCTSSims):
self.search(canonicalBoard)
# 获得棋盘的字符串解释
s = self.game.stringRepresentation(canonicalBoard)
counts = [self.N[(s, a)] if (s, a) in self.N else 0 \
for a in range(self.game.getActionSize())]
# 如果temp = 0,我们期望获取准确的动作,也就是测试阶段和深度过深的训练阶段
if temp == 0:
idx = np.array(np.argwhere(counts == np.max(counts))).flatten()
idx = np.random.choice(idx)
probs = [0] * len(counts)
probs[idx] = 1
return probs
# 如果temp不为0,我们期望获取动作的概率向量,也就是深度不深的训练阶段
counts = [x ** (1. / temp) for x in counts]
counts_sum = float(sum(counts))
probs = [x / counts_sum for x in counts]
return probs
'''
蒙特卡洛搜索树过程
接收当前玩家看到的棋盘为参数
主要作用为更新Q表的值
'''
def search(self, canonicalBoard):
# 获得当前棋盘的字符串解释,注意是玩家所看到的棋盘
s = self.game.stringRepresentation(canonicalBoard)
# 如果当前状态不在结束判别列表内,就加入到列表中
if s not in self.Ended:
self.Ended[s] = self.game.getGameEnded(canonicalBoard, 1)
if self.Ended[s] != -2:
return -self.Ended[s]
# 如果策略列表中没有,就使用深度网络预测的值
if s not in self.P:
self.P[s], v = self.net.predict(canonicalBoard)
valids = self.game.getValid(canonicalBoard, 1)
self.P[s] = self.P[s] * valids
sump = np.sum(self.P[s])
# 将结果修正到0——1之间
if sump > 0:
self.P[s] /= sump
# 如果神经网络预测的结果有问题,那么直接使用valid作为当前状态下的策略
# 这个时候每一个合法的动作都拥有相同的概率
else:
log.error("All valid moves were masked, doing a workaround.")
self.P[s] = self.P[s] + valids
self.P[s] /= np.sum(self.P[s])
self.Valid[s] = valids
self.S[s] = 0
return -v
# 在当前状态下根据Q表选择最佳的动作
valids = self.Valid[s]
best = -float('inf')
best_action = -1
# 从所有合法动作中选择出UCT值最大的一个作为当前状态的下一个动作
for a in range(self.game.getActionSize()):
if valids[a]:
# 如果Q中已经有这一项
if (s, a) in self.Q:
u = self.Q[(s, a)] + self.args.cpuct * self.P[s][a] * \
math.sqrt(self.S[s]) / (1 + self.N[(s, a)])
# 如果没有
else:
u = self.args.cpuct * self.P[s][a] * math.sqrt(self.S[s] + Eps)
# 更新当前的最优动作
if u > best:
best = u
best_action = a
# 获取下一个动作
a = best_action
next_state, next_player = self.game.getNextState(canonicalBoard, 1, a)
next_state = self.game.getCanonicalForm(next_state, next_player)
# 递归实现搜索过程,本质上是一个回溯过程
v = self.search(next_state)
# 这些是蒙特卡洛树搜索的反向传播过程,也是递归的回溯部分
# 更新Q,原来的加上新的值
if (s, a) in self.Q:
self.Q[(s, a)] = (self.N[(s, a)] * self.Q[(s, a)] + v * 1) / (self.N[(s, a)] + 1)
self.N[(s, a)] += 1
else:
self.Q[(s, a)] = v
self.N[(s, a)] = 1
self.S[s] += 1
return -v
| 29.29078 | 93 | 0.489104 |
import logging
import math
import numpy as np
Eps = 1e-8
log = logging.getLogger(__name__)
class MCTS():
def __init__(self, game, net, args):
self.game = game
self.net = net
self.args = args
self.Q = {}
self.N = {}
self.S = {}
self.P = {}
self.Ended = {}
self.Valid = {}
def getActionProb(self, canonicalBoard, temp=1):
for _ in range(self.args.numMCTSSims):
self.search(canonicalBoard)
s = self.game.stringRepresentation(canonicalBoard)
counts = [self.N[(s, a)] if (s, a) in self.N else 0 \
for a in range(self.game.getActionSize())]
if temp == 0:
idx = np.array(np.argwhere(counts == np.max(counts))).flatten()
idx = np.random.choice(idx)
probs = [0] * len(counts)
probs[idx] = 1
return probs
counts = [x ** (1. / temp) for x in counts]
counts_sum = float(sum(counts))
probs = [x / counts_sum for x in counts]
return probs
def search(self, canonicalBoard):
s = self.game.stringRepresentation(canonicalBoard)
if s not in self.Ended:
self.Ended[s] = self.game.getGameEnded(canonicalBoard, 1)
if self.Ended[s] != -2:
return -self.Ended[s]
if s not in self.P:
self.P[s], v = self.net.predict(canonicalBoard)
valids = self.game.getValid(canonicalBoard, 1)
self.P[s] = self.P[s] * valids
sump = np.sum(self.P[s])
if sump > 0:
self.P[s] /= sump
else:
log.error("All valid moves were masked, doing a workaround.")
self.P[s] = self.P[s] + valids
self.P[s] /= np.sum(self.P[s])
self.Valid[s] = valids
self.S[s] = 0
return -v
valids = self.Valid[s]
best = -float('inf')
best_action = -1
for a in range(self.game.getActionSize()):
if valids[a]:
if (s, a) in self.Q:
u = self.Q[(s, a)] + self.args.cpuct * self.P[s][a] * \
math.sqrt(self.S[s]) / (1 + self.N[(s, a)])
else:
u = self.args.cpuct * self.P[s][a] * math.sqrt(self.S[s] + Eps)
if u > best:
best = u
best_action = a
a = best_action
next_state, next_player = self.game.getNextState(canonicalBoard, 1, a)
next_state = self.game.getCanonicalForm(next_state, next_player)
v = self.search(next_state)
if (s, a) in self.Q:
self.Q[(s, a)] = (self.N[(s, a)] * self.Q[(s, a)] + v * 1) / (self.N[(s, a)] + 1)
self.N[(s, a)] += 1
else:
self.Q[(s, a)] = v
self.N[(s, a)] = 1
self.S[s] += 1
return -v
| true | true |
f7f5eae37407bb2661cb2eeff7ef4ae4939be1fd | 410 | py | Python | autogluon/task/__init__.py | zhanghang1989/autogluon | 8bfe6b0da8915020eeb9895fd18d7688c0d604c1 | [
"Apache-2.0"
] | 2 | 2021-09-14T21:28:54.000Z | 2021-11-17T09:52:41.000Z | autogluon/task/__init__.py | zhanghang1989/autogluon | 8bfe6b0da8915020eeb9895fd18d7688c0d604c1 | [
"Apache-2.0"
] | null | null | null | autogluon/task/__init__.py | zhanghang1989/autogluon | 8bfe6b0da8915020eeb9895fd18d7688c0d604c1 | [
"Apache-2.0"
] | 1 | 2021-03-11T10:45:00.000Z | 2021-03-11T10:45:00.000Z | import logging
logging.basicConfig(format='%(message)s') # just print message in logs
from .base import BaseTask
from .image_classification import ImageClassification
from .object_detection import ObjectDetection
from .text_classification import TextClassification
from .tabular_prediction import TabularPrediction
from . import image_classification, object_detection, text_classification, tabular_prediction
| 41 | 93 | 0.863415 | import logging
logging.basicConfig(format='%(message)s')
from .base import BaseTask
from .image_classification import ImageClassification
from .object_detection import ObjectDetection
from .text_classification import TextClassification
from .tabular_prediction import TabularPrediction
from . import image_classification, object_detection, text_classification, tabular_prediction
| true | true |
f7f5ec6cd6dd1793f3914cca57ef47e8cf737618 | 48 | py | Python | maintenance_mode/version.py | synapticarbors/django-maintenance-mode | bc7739457d6394bcf6664d2f03d36015601572d2 | [
"MIT"
] | null | null | null | maintenance_mode/version.py | synapticarbors/django-maintenance-mode | bc7739457d6394bcf6664d2f03d36015601572d2 | [
"MIT"
] | null | null | null | maintenance_mode/version.py | synapticarbors/django-maintenance-mode | bc7739457d6394bcf6664d2f03d36015601572d2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__version__ = '0.15.0'
| 12 | 23 | 0.520833 |
__version__ = '0.15.0'
| true | true |
f7f5ed1e8b4f1158aaff891dafd048a8c9c718d7 | 5,877 | py | Python | ekorpkit/io/fetch/fomc/meeting.py | entelecheia/ekorpkit | 400cb15005fdbcaa2ab0c311e338799283f28fe0 | [
"CC-BY-4.0"
] | 4 | 2022-02-26T10:54:16.000Z | 2022-02-26T11:01:56.000Z | ekorpkit/io/fetch/fomc/meeting.py | entelecheia/ekorpkit | 400cb15005fdbcaa2ab0c311e338799283f28fe0 | [
"CC-BY-4.0"
] | 1 | 2022-03-25T06:37:12.000Z | 2022-03-25T06:45:53.000Z | ekorpkit/io/fetch/fomc/meeting.py | entelecheia/ekorpkit | 400cb15005fdbcaa2ab0c311e338799283f28fe0 | [
"CC-BY-4.0"
] | null | null | null | import os
import sys
import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import pdfplumber
# Import parent class
from .base import FomcBase
class MeetingScript(FomcBase):
"""
A convenient class for extracting meeting scripts from the FOMC website.
FOMC publishes the meeting scripts after 5 years, so this cannot be used for the prediction of the monetary policy in real-time.
"""
def __init__(self, content_type, **args):
super().__init__(content_type, **args)
left_pct = 0.05 # % Distance of left side of character from left side of page.
top_pct = 0.10 # % Distance of top of character from top of page.
right_pct = (
0.95 # % Distance of right side of character from left side of page.
)
bottom_pct = 0.88 #% Distance of bottom of the character from top of page.
self.crop_coords = [left_pct, top_pct, right_pct, bottom_pct]
def _get_links(self, from_year):
"""
Override private function that sets all the links for the contents to download on FOMC website
from from_year (=min(2015, from_year)) to the current most recent year
"""
self.links = []
self.titles = []
self.speakers = []
self.dates = []
r = requests.get(self.calendar_url)
soup = BeautifulSoup(r.text, "html.parser")
# Meeting Script can be found only in the archive as it is published after five years
if from_year > 2014:
print("Meeting scripts are available for 2014 or older")
if from_year <= 2014:
for year in range(from_year, 2015):
yearly_contents = []
fomc_yearly_url = (
self.base_url
+ "/monetarypolicy/fomchistorical"
+ str(year)
+ ".htm"
)
r_year = requests.get(fomc_yearly_url)
soup_yearly = BeautifulSoup(r_year.text, "html.parser")
meeting_scripts = soup_yearly.find_all(
"a", href=re.compile("^/monetarypolicy/files/FOMC\d{8}meeting.pdf")
)
for meeting_script in meeting_scripts:
self.links.append(meeting_script.attrs["href"])
self.speakers.append(
self._speaker_from_date(
self._date_from_link(meeting_script.attrs["href"])
)
)
self.titles.append("FOMC Meeting Transcript")
self.dates.append(
datetime.strptime(
self._date_from_link(meeting_script.attrs["href"]),
"%Y-%m-%d",
)
)
if self.verbose:
print(
"YEAR: {} - {} meeting scripts found.".format(
year, len(meeting_scripts)
)
)
print("There are total ", len(self.links), " links for ", self.content_type)
def _add_article(self, link, index=None):
"""
Override a private function that adds a related article for 1 link into the instance variable
The index is the index in the article to add to.
Due to concurrent processing, we need to make sure the articles are stored in the right order
"""
if self.verbose:
sys.stdout.write(".")
sys.stdout.flush()
link_url = self.base_url + link
pdf_filepath = (
self.output_raw_dir
+ "/FOMC_MeetingScript_"
+ self._date_from_link(link)
+ ".pdf"
)
if not os.path.exists(pdf_filepath) or self.force_download:
# Scripts are provided only in pdf. Save the pdf and pass the content
res = requests.get(link_url)
with open(pdf_filepath, "wb") as f:
f.write(res.content)
else:
if self.verbose:
print("File already exists: ", pdf_filepath)
# Extract text from the pdf
pdf_file_parsed = "" # new line
with pdfplumber.open(pdf_filepath) as pdf:
for page in pdf.pages:
pg_width = page.width
pg_height = page.height
pg_bbox = (
self.crop_coords[0] * float(pg_width),
self.crop_coords[1] * float(pg_height),
self.crop_coords[2] * float(pg_width),
self.crop_coords[3] * float(pg_height),
)
page_crop = page.crop(bbox=pg_bbox)
text = page_crop.extract_text()
pdf_file_parsed = pdf_file_parsed + "\n" + text
paragraphs = re.sub("(\n)(\n)+", "\n", pdf_file_parsed.strip())
paragraphs = paragraphs.split("\n")
section = -1
paragraph_sections = []
for paragraph in paragraphs:
if not re.search(
"^(page|january|february|march|april|may|june|july|august|september|october|november|december|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)",
paragraph.lower(),
):
if len(re.findall(r"[A-Z]", paragraph[:10])) > 5 and not re.search(
"(present|frb/us|abs cdo|libor|rp–ioer|lsaps|cusip|nairu|s cpi|clos, r)",
paragraph[:10].lower(),
):
section += 1
paragraph_sections.append("")
if section >= 0:
paragraph_sections[section] += paragraph
self.articles[index] = self.segment_separator.join(
[paragraph for paragraph in paragraph_sections]
)
| 41.097902 | 160 | 0.540412 | import os
import sys
import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import pdfplumber
from .base import FomcBase
class MeetingScript(FomcBase):
def __init__(self, content_type, **args):
super().__init__(content_type, **args)
left_pct = 0.05
top_pct = 0.10
right_pct = (
0.95
)
bottom_pct = 0.88
self.crop_coords = [left_pct, top_pct, right_pct, bottom_pct]
def _get_links(self, from_year):
self.links = []
self.titles = []
self.speakers = []
self.dates = []
r = requests.get(self.calendar_url)
soup = BeautifulSoup(r.text, "html.parser")
if from_year > 2014:
print("Meeting scripts are available for 2014 or older")
if from_year <= 2014:
for year in range(from_year, 2015):
yearly_contents = []
fomc_yearly_url = (
self.base_url
+ "/monetarypolicy/fomchistorical"
+ str(year)
+ ".htm"
)
r_year = requests.get(fomc_yearly_url)
soup_yearly = BeautifulSoup(r_year.text, "html.parser")
meeting_scripts = soup_yearly.find_all(
"a", href=re.compile("^/monetarypolicy/files/FOMC\d{8}meeting.pdf")
)
for meeting_script in meeting_scripts:
self.links.append(meeting_script.attrs["href"])
self.speakers.append(
self._speaker_from_date(
self._date_from_link(meeting_script.attrs["href"])
)
)
self.titles.append("FOMC Meeting Transcript")
self.dates.append(
datetime.strptime(
self._date_from_link(meeting_script.attrs["href"]),
"%Y-%m-%d",
)
)
if self.verbose:
print(
"YEAR: {} - {} meeting scripts found.".format(
year, len(meeting_scripts)
)
)
print("There are total ", len(self.links), " links for ", self.content_type)
def _add_article(self, link, index=None):
if self.verbose:
sys.stdout.write(".")
sys.stdout.flush()
link_url = self.base_url + link
pdf_filepath = (
self.output_raw_dir
+ "/FOMC_MeetingScript_"
+ self._date_from_link(link)
+ ".pdf"
)
if not os.path.exists(pdf_filepath) or self.force_download:
res = requests.get(link_url)
with open(pdf_filepath, "wb") as f:
f.write(res.content)
else:
if self.verbose:
print("File already exists: ", pdf_filepath)
pdf_file_parsed = ""
with pdfplumber.open(pdf_filepath) as pdf:
for page in pdf.pages:
pg_width = page.width
pg_height = page.height
pg_bbox = (
self.crop_coords[0] * float(pg_width),
self.crop_coords[1] * float(pg_height),
self.crop_coords[2] * float(pg_width),
self.crop_coords[3] * float(pg_height),
)
page_crop = page.crop(bbox=pg_bbox)
text = page_crop.extract_text()
pdf_file_parsed = pdf_file_parsed + "\n" + text
paragraphs = re.sub("(\n)(\n)+", "\n", pdf_file_parsed.strip())
paragraphs = paragraphs.split("\n")
section = -1
paragraph_sections = []
for paragraph in paragraphs:
if not re.search(
"^(page|january|february|march|april|may|june|july|august|september|october|november|december|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)",
paragraph.lower(),
):
if len(re.findall(r"[A-Z]", paragraph[:10])) > 5 and not re.search(
"(present|frb/us|abs cdo|libor|rp–ioer|lsaps|cusip|nairu|s cpi|clos, r)",
paragraph[:10].lower(),
):
section += 1
paragraph_sections.append("")
if section >= 0:
paragraph_sections[section] += paragraph
self.articles[index] = self.segment_separator.join(
[paragraph for paragraph in paragraph_sections]
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.