hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7952735eef5b3e9978889fffc916a27bb6afa110 | 2,602 | py | Python | example_project/openpyxl-test-01.py | museroom/reportbot | 01edc30482c0027c767fcc9acbf1cd166fb5512f | [
"BSD-3-Clause"
] | null | null | null | example_project/openpyxl-test-01.py | museroom/reportbot | 01edc30482c0027c767fcc9acbf1cd166fb5512f | [
"BSD-3-Clause"
] | null | null | null | example_project/openpyxl-test-01.py | museroom/reportbot | 01edc30482c0027c767fcc9acbf1cd166fb5512f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os,sys
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'example_project.settings')
django.setup()
from django.conf import settings
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.drawing.image import Image
import os
import re
from io import BytesIO
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
static_root = getattr( settings, 'STATIC_ROOT', '' )
static_url = getattr( settings, 'STATIC_URL', '' )
app_url = 'http://reportbot.5tring.com:4000'
tmp_root = '/media/djmedia/mr_forgot/tmp'
xlsx_root = 'xlsx'
filename_in = 'cm-template.xlsx'
filename_out = 'test-photo.xlsx'
fn_in = os.path.join(static_root,xlsx_root,filename_in)
url_in = app_url + os.path.join( static_url, xlsx_root,filename_in )
fn_out = os.path.join(tmp_root,xlsx_root,filename_out)
os.path.exists(fn_in)
print( 'url_in='+url_in)
image_data = BytesIO(urlopen(url_in).read())
#ws.insert_image('B2', img_url, {'image_data':image_data})
img_url = "http://reportbot.5tring.com:4000/media/photologue/photos/image1.png"
wb = load_workbook( image_data )
ws = wb.active
i = 0
for row in ws.rows:
for cell in row:
if cell.value:
print(cell.column+ str(cell.row) + ":" + cell.value)
i = i + 1
fn_out_path, filename = os.path.split(fn_out)
print( "fn_out_path={}, filename={}".format( fn_out_path, filename ) )
if not os.path.exists( fn_out_path ):
os.mkdir( fn_out_path )
# Insert Logo on every page
print ('insert image {}'.format( img_url ) )
image_data = BytesIO(urlopen(img_url).read())
img_width = 151
img_height = 134
img = Image( image_data, size=[img_width,img_height] )
print(dir(img.drawing))
#img.drawing.width = 152
#img.drawing.height = 134
ws.add_image( img, 'B1' )
# regex find all template tags
# replace tags with database field
from photologue.models import PhotoGroup, Photo
q_pg = PhotoGroup.objects.get( id = 6 )
fields_pg = q_pg._meta.get_fields()
pattern = r'^{{(?P<name>\w+)}}$'
non_db_field = ['page_num', 'page_total','serial_no']
for cell in ws.get_cell_collection():
if cell.value:
res = re.match( pattern, cell.value )
if res:
db_field = res.group('name')
if db_field not in non_db_field:
db_value = eval( "q_pg.{}".format( db_field ) )
else:
db_value == 'non db_value FIXME'
cell.value = db_value
# save and exit
print( 'saving {}'.format(fn_out))
wb.save( fn_out )
| 29.235955 | 80 | 0.676403 |
79527382ac59bf2d6f64e86687f393e1ff97bc0e | 66,204 | py | Python | storage/tests/test_storage.py | andressamagblr/vault | f37d61b93a96d91278b5d4163f336ada7209240f | [
"Apache-2.0"
] | null | null | null | storage/tests/test_storage.py | andressamagblr/vault | f37d61b93a96d91278b5d4163f336ada7209240f | [
"Apache-2.0"
] | null | null | null | storage/tests/test_storage.py | andressamagblr/vault | f37d61b93a96d91278b5d4163f336ada7209240f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from unittest.mock import patch, Mock
from unittest import TestCase
import requests
from swiftclient import client
from django.urls import reverse
from django.utils.translation import gettext as _
from django.test.utils import override_settings
from django.contrib.auth.models import Group, User
from storage.tests import fakes
from storage import views
from vault.tests.fakes import fake_request
from vault import utils
class MockRequest:
def __init__(self):
self.text = '{}'
class BaseTestCase(TestCase):
def setUp(self):
self.request = fake_request()
self.anonymous_request = fake_request(user=False)
patch('storage.views.main.actionlog',
Mock(return_value=None)).start()
def tearDown(self):
User.objects.all().delete()
Group.objects.all().delete()
@classmethod
def setUpClass(cls):
patch('identity.keystone.v3').start()
@classmethod
def tearDownClass(cls):
patch.stopall()
class TestStorage(BaseTestCase):
def test_containerview_needs_authentication(self):
response = views.containerview(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_create_container_needs_authentication(self):
response = views.create_container(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_objectview_needs_authentication(self):
response = views.objectview(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_delete_container_view_needs_authentication(self):
response = views.delete_container_view(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_create_object_needs_authentication(self):
response = views.create_object(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_upload_needs_authentication(self):
response = views.upload(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_delete_object_view_needs_authentication(self):
response = views.delete_object_view(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_delete_pseudofolder_needs_authentication(self):
response = views.delete_pseudofolder(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_create_pseudofolder_needs_authentication(self):
response = views.create_pseudofolder(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_object_versioning_needs_authentication(self):
response = views.object_versioning(self.anonymous_request)
self.assertEqual(response.status_code, 302)
def test_remove_from_cache_needs_authentication(self):
response = views.remove_from_cache(self.anonymous_request)
self.assertEqual(response.status_code, 302)
@patch('storage.views.main.client.get_account')
def test_containerview_redirect_to_dashboard_without_project_in_session(self, mock_get_account):
mock_get_account.return_value = fakes.get_account()
self.request.session['project_id'] = None
response = views.containerview(self.request, None)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('change_project'))
@patch('requests.get')
@patch('storage.views.main.client.get_account')
def test_containerview_list_containters(self, mock_get_account, mock_get):
mock_get_account.return_value = fakes.get_account()
mock_get.return_value = fakes.FakeRequestResponse(200, headers={"get": {"X-Bla": "Bla", "X-Ble": "Ble"}})
self.request.META.update({
'HTTP_HOST': 'localhost'
})
project_name = self.request.session.get('project_name')
response = views.containerview(self.request, project_name)
self.assertEqual(response.status_code, 200)
expected = '/p/{}/storage/objects/container1/'.format(project_name)
self.assertIn(expected, response.content.decode('UTF-8'))
expected = '/p/{}/storage/objects/container2/'.format(project_name)
self.assertIn(expected, response.content.decode('UTF-8'))
expected = '/p/{}/storage/objects/container3/'.format(project_name)
self.assertIn(expected, response.content.decode('UTF-8'))
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.get_account')
def test_containerview_clientexception(self, mock_get_account, mock_logging):
mock_get_account.side_effect = client.ClientException('')
project_name = self.request.session.get('project_name')
self.request.META.update({'HTTP_HOST': 'localhost'})
views.containerview(self.request, project_name)
msgs = [msg for msg in self.request._messages]
self.assertTrue(mock_get_account.called)
self.assertTrue(mock_logging.called)
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Unable to list containers'))
@patch('storage.views.main.client.get_container')
def test_objectview_list_objects(self, mock_get_container):
mock_get_container.return_value = fakes.get_container()
project_name = self.request.session.get('project_name')
self.request.META.update({
'HTTP_HOST': 'localhost'
})
response = views.objectview(self.request, project_name, 'fakecontainer')
self.assertEqual(response.status_code, 200)
expected = '/AUTH_1/fakecontainer/obj_pelo_vip.html'
self.assertIn(expected, response.content.decode('UTF-8'))
# Botao de views.upload File
self.assertIn('/p/{}/storage/upload/fakecontainer/'.format(project_name), response.content.decode('UTF-8'))
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.get_container')
def test_objectview_clientexception(self, mock_get_container, mock_logging):
mock_get_container.side_effect = client.ClientException('')
project_name = self.request.session.get('project_name')
views.objectview(self.request, project_name, 'fakecontainer')
msgs = [msg for msg in self.request._messages]
self.assertTrue(mock_get_container.called)
self.assertTrue(mock_logging.called)
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Access denied'))
@patch("storage.views.main.actionlog.log")
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.put_container')
def test_create_container_valid_form(self, mock_put_container, mock_logging, mock_log):
self.request.method = 'POST'
post = self.request.POST.copy()
post.update({'containername': 'fakecontainer'})
self.request.POST = post
project_name = self.request.session.get('project_name')
views.create_container(self.request, project_name)
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Container created'))
self.assertTrue(mock_put_container.called)
self.assertFalse(mock_logging.called)
user = self.request.user.username
mock_log.assert_called_with(user, "create", "fakecontainer")
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.put_container')
def test_create_container_invalid_form(self, mock_put_container, mock_logging):
mock_put_container.side_effect = client.ClientException('')
self.request.method = 'POST'
self.request.META.update({
'HTTP_HOST': 'localhost'
})
post = self.request.POST.copy()
post.update({'containername': ''})
self.request.POST = post
project_name = self.request.session.get('project_name')
response = views.create_container(self.request, project_name)
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 0)
self.assertFalse(mock_put_container.called)
self.assertFalse(mock_logging.called)
self.assertEqual(response.status_code, 200)
self.assertIn(_('This field is required.'), response.content.decode('UTF-8'))
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.put_container')
def test_create_container_invalid_container_names(self, mock_put_container, mock_logging):
mock_put_container.side_effect = client.ClientException('')
self.request.method = 'POST'
self.request.META.update({
'HTTP_HOST': 'localhost'
})
post = self.request.POST.copy()
post.update({'containername': '.'})
self.request.POST = post
project_name = self.request.session.get('project_name')
response = views.create_container(self.request, project_name)
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 0)
self.assertFalse(mock_put_container.called)
self.assertFalse(mock_logging.called)
self.assertEqual(response.status_code, 200)
self.assertIn('Enter a valid name consisting of letters, numbers, underscores or hyphens.', response.content.decode('UTF-8'))
post.update({'containername': '..'})
self.request.POST = post
response = views.create_container(self.request, project_name)
self.assertIn('Enter a valid name consisting of letters, numbers, underscores or hyphens.', response.content.decode('UTF-8'))
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.put_container')
def test_create_container_fail_to_create(self, mock_put_container, mock_logging):
mock_put_container.side_effect = client.ClientException('')
self.request.method = 'POST'
post = self.request.POST.copy()
post.update({'containername': 'fakecontainer'})
self.request.POST = post
project_name = self.request.session.get('project_name')
views.create_container(self.request, project_name)
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Access denied'))
self.assertTrue(mock_put_container.called)
self.assertTrue(mock_logging.called)
@patch('storage.views.main.requests.put')
def test_create_object_status_201(self, mock_requests_put):
mock_requests_put.return_value = fakes.FakeRequestResponse(201)
self.request.FILES['file1'] = fakes.get_temporary_text_file()
prefix = ''
fakecontainer = 'fakecontainer'
project_name = self.request.session.get('project_name')
response = views.create_object(self.request, project_name, fakecontainer)
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Object created'))
self.assertTrue(mock_requests_put.called)
headers = dict([i for i in response.items()])
expected = reverse('objectview', kwargs={'container': fakecontainer,
'prefix': prefix,
'project': project_name})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.requests.put')
def test_create_object_status_201_with_prefix(self, mock_requests_put):
mock_requests_put.return_value = fakes.FakeRequestResponse(201)
self.request.FILES['file1'] = fakes.get_temporary_text_file()
prefix = 'prefix/'
fakecontainer = 'fakecontainer'
project_name = self.request.session.get('project_name')
response = views.create_object(self.request, project_name, fakecontainer, prefix)
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Object created'))
self.assertTrue(mock_requests_put.called)
# Transforma em string toda a chamada do Mock, pois nao foi possivel
# pegar os argumentos posicionais para validar
str_call = str(mock_requests_put.call_args)
self.assertIn('/fakecontainer/prefix/foo.txt', str_call)
headers = dict([i for i in response.items()])
expected = reverse('objectview', kwargs={'container': fakecontainer,
'prefix': prefix,
'project': project_name})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.log.exception')
@patch('storage.views.main.requests.put')
def test_create_object_status_401(self, mock_requests_put, mock_logging):
mock_requests_put.return_value = fakes.FakeRequestResponse(401)
self.request.FILES['file1'] = fakes.get_temporary_text_file()
project_name = self.request.session.get('project_name')
views.create_object(self.request, project_name, 'fakecontainer')
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Access denied'))
self.assertTrue(mock_requests_put.called)
self.assertFalse(mock_logging.called)
@patch('storage.views.main.log.exception')
@patch('storage.views.main.requests.put')
def test_create_object_status_403(self, mock_requests_put, mock_logging):
mock_requests_put.return_value = fakes.FakeRequestResponse(403)
self.request.FILES['file1'] = fakes.get_temporary_text_file()
project_name = self.request.session.get('project_name')
views.create_object(self.request, project_name, 'fakecontainer')
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Access denied'))
self.assertTrue(mock_requests_put.called)
self.assertFalse(mock_logging.called)
@patch('storage.views.main.log.exception')
@patch('storage.views.main.requests.put')
def test_create_object_status_other_than_above(self, mock_requests_put, mock_logging):
mock_requests_put.return_value = fakes.FakeRequestResponse(404)
self.request.FILES['file1'] = fakes.get_temporary_text_file()
project_name = self.request.session.get('project_name')
views.create_object(self.request, project_name, 'fakecontainer')
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
expected = msg = 'Fail to create object ({0}).'.format(404)
self.assertEqual(msgs[0].message, expected)
self.assertTrue(mock_requests_put.called)
self.assertFalse(mock_logging.called)
@patch('storage.views.main.delete_container')
def test_delete_container_view_deletes_with_success(self, mock_delete_container):
mock_delete_container.return_value = True
self.request.method = 'DELETE'
project_name = self.request.session.get('project_name')
response = views.delete_container_view(self.request, project_name, 'container')
self.assertTrue(mock_delete_container.called)
self.assertEqual(response.status_code, 200)
self.assertIn(_('Container deleted'), response.content.decode('unicode-escape'))
@patch('storage.views.main.delete_container')
def test_delete_container_view_deletes_with_failure(self, mock_delete_container):
mock_delete_container.return_value = False
self.request.method = 'DELETE'
project_name = self.request.session.get('project_name')
response = views.delete_container_view(self.request, project_name, 'container')
self.assertTrue(mock_delete_container.called)
self.assertEqual(response.status_code, 500)
self.assertIn(_('Container delete error'), response.content.decode('UTF-8'))
@patch('storage.views.main.client.get_object')
@patch('storage.views.main.client.delete_object')
@patch("storage.views.main.actionlog.log")
@patch('storage.views.main.client.delete_container')
def test_delete_container_without_deleting_objects(self,
mock_delete_container,
mock_action_log,
mock_delete_object,
mock_get_object):
fakecontainer = 'fakecontainer'
resp = views.delete_container(self.request, fakecontainer, force=False)
self.assertTrue(resp)
self.assertFalse(mock_get_object.called)
self.assertFalse(mock_delete_object.called)
self.assertTrue(mock_delete_container.called)
self.assertTrue(mock_action_log.called)
@patch('storage.views.main.client.head_container')
@patch('storage.views.main.client.get_container')
@patch('storage.views.main.get_info')
@patch('storage.views.main.prepare_data_name')
@patch('storage.views.main.client.delete_object')
@patch("storage.views.main.actionlog.log")
@patch('storage.views.main.client.delete_container')
def test_delete_container_deleting_objects(self, mock_delete_container,
mock_action_log,
mock_delete_object,
mock_prepare_data_name,
mock_get_info,
mock_get_container,
mock_head_container):
fakecontainer = 'fakecontainer'
mock_head_container.return_value = ({'x-container-object-count': 1})
mock_get_container.return_value = (None, [{'name': 'object1'}])
mock_prepare_data_name.return_value = None
resp = views.delete_container(self.request, fakecontainer, force=True)
self.assertTrue(resp)
kargs = mock_delete_object.mock_calls[0][2]
self.assertEqual('fakecontainer', kargs['container'])
self.assertEqual('object1', kargs['name'])
self.assertTrue(mock_delete_container.called)
self.assertTrue(mock_action_log.called)
@patch('storage.views.main.client.head_container')
@patch('storage.views.main.client.get_container')
@patch('storage.views.main.get_info')
@patch('storage.views.main.prepare_data_name')
@patch('storage.views.main.client.delete_object')
@patch("storage.views.main.actionlog.log")
@patch('storage.views.main.client.delete_container')
def test_delete_container_fail_to_get_objects(self, mock_delete_container,
mock_action_log,
mock_delete_object,
mock_prepare_data_name,
mock_get_info,
mock_get_container,
mock_head_container):
fakecontainer = 'fakecontainer'
mock_get_container.side_effect = client.ClientException('')
expected = False
computed = views.delete_container(self.request, fakecontainer, True)
self.assertEqual(computed, expected)
self.assertFalse(mock_delete_object.called)
self.assertFalse(mock_delete_container.called)
self.assertFalse(mock_action_log.called)
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.head_container')
@patch('storage.views.main.get_info')
@patch('storage.views.main.prepare_data_name')
@patch('storage.views.main.client.get_container')
@patch('storage.views.main.client.delete_object')
@patch("storage.views.main.actionlog.log")
@patch('storage.views.main.client.delete_container')
def test_delete_container_fail_to_delete_container(self,
mock_delete_container,
mock_action_log,
mock_delete_object,
mock_prepare_data_name,
mock_get_info,
mock_get_container,
mock_head_container,
mock_log_exception):
fakecontainer = 'fakecontainer'
mock_delete_container.side_effect = client.ClientException('')
mock_head_container.return_value = ({'x-container-object-count': 0})
mock_get_container.return_value = (None, None)
mock_prepare_data_name.return_value = None
expected = False
computed = views.delete_container(self.request, fakecontainer, True)
self.assertEqual(computed, expected)
self.assertTrue(mock_log_exception.called)
self.assertTrue(mock_delete_container.called)
@patch('storage.views.main.client.put_object')
def test_create_pseudofolder_with_no_prefix(self, mock_put_object):
self.request.method = 'POST'
post = self.request.POST.copy()
post.update({'foldername': 'fakepseudofolder'})
self.request.POST = post
project_name = self.request.session.get('project_name')
response = views.create_pseudofolder(self.request, project_name, 'fakecontainer')
expected_redirect_arg = ('Location', '/p/{}/storage/objects/fakecontainer/'.format(project_name))
self.assertIn(expected_redirect_arg, response.items())
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Pseudofolder created'))
self.assertTrue(mock_put_object.called)
name, args, kargs = mock_put_object.mock_calls[0]
expected_arg = 'application/directory'
self.assertEqual(expected_arg, kargs['content_type'])
# Transforma em string toda a chamada do Mock, pois nao foi possivel
# pegar os argumentos posicionais para validar
str_call = str(mock_put_object.call_args)
self.assertIn(", 'fakepseudofolder/',", str_call)
@patch('storage.views.main.client.put_object')
def test_create_pseudofolder_with_prefix(self, mock_put_object):
self.request.method = 'POST'
post = self.request.POST.copy()
prefix = 'prefix/'
pseudofolder = 'fakepseudofolder'
project_name = self.request.session.get('project_name')
post.update({'foldername': pseudofolder})
self.request.POST = post
response = views.create_pseudofolder(self.request,
project_name,
'fakecontainer',
prefix)
expected_redirect_arg = (
'Location',
'/p/{}/storage/objects/fakecontainer/{}'.format(project_name, prefix))
self.assertIn(expected_redirect_arg, response.items())
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Pseudofolder created'))
self.assertTrue(mock_put_object.called)
name, args, kargs = mock_put_object.mock_calls[0]
expected_arg = 'application/directory'
self.assertEqual(expected_arg, kargs['content_type'])
# Transforma em string toda a chamada do Mock, pois nao foi possivel pegar
# os argumentos posicionais para validar
str_call = str(mock_put_object.call_args)
expected_foldername = '{0}{1}/'.format(prefix, pseudofolder)
self.assertIn(expected_foldername, str_call)
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.put_object')
def test_create_pseudofolder_exception(self, mock_put_object, mock_logging):
mock_put_object.side_effect = client.ClientException('')
self.request.method = 'POST'
post = self.request.POST.copy()
post.update({'foldername': 'fakepseudofolder'})
self.request.POST = post
project_name = self.request.session.get('project_name')
views.create_pseudofolder(self.request, project_name, 'fakecontainer')
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('Access denied'))
self.assertTrue(mock_put_object.called)
self.assertTrue(mock_logging.called)
@patch('storage.views.main.client.put_object')
def test_create_pseudofolder_invalid_form(self, mock_put_object):
self.request.method = 'POST'
self.request.META.update({
'HTTP_HOST': 'localhost'
})
post = self.request.POST.copy()
post.update({'foldername': ''})
self.request.POST = post
project_name = self.request.session.get('project_name')
response = views.create_pseudofolder(self.request, project_name, 'fakecontainer')
self.assertFalse(mock_put_object.called)
self.assertIn(_('This field is required.'), response.content.decode('UTF-8'))
@patch('storage.views.main.delete_object')
def test_view_delete_object_inside_a_container(self, mock_delete_object):
mock_delete_object.return_value = True
fakecontainer = 'fakecontainer'
fakeobject_name = 'fakeobject'
project_name = self.request.session.get('project_name')
response = views.delete_object_view(self.request,
project_name,
fakecontainer,
fakeobject_name)
msgs = [msg for msg in self.request._messages]
self.assertEqual(msgs[0].message, _('Object deleted'))
headers = dict([i for i in response.items()])
expected = reverse('objectview', kwargs={'container': fakecontainer,
'project': project_name})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.delete_object')
def test_view_delete_object_inside_a_pseudofolder(self, mock_delete_object):
mock_delete_object.return_value = True
fakecontainer = 'fakecontainer'
fakepseudofolder = 'fakepseudofolder/'
fakeobject_name = fakepseudofolder + 'fakeobject'
project_name = self.request.session.get('project_name')
response = views.delete_object_view(self.request,
project_name,
fakecontainer,
fakeobject_name)
msgs = [msg for msg in self.request._messages]
self.assertEqual(msgs[0].message, _('Object deleted'))
headers = dict([i for i in response.items()])
expected = reverse('objectview', kwargs={'container': fakecontainer,
'prefix': fakepseudofolder,
'project': project_name})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.delete_object')
def test_view_delete_object_fail_to_delete(self, mock_delete_object):
mock_delete_object.return_value = False
fakecontainer = 'fakecontainer'
fakeobject_name = 'fakeobject'
project_name = self.request.session.get('project_name')
response = views.delete_object_view(self.request,
project_name,
fakecontainer,
fakeobject_name)
msgs = [msg for msg in self.request._messages]
self.assertEqual(msgs[0].message, _('Access denied'))
headers = dict([i for i in response.items()])
expected = reverse('objectview', kwargs={'container': fakecontainer,
'project': project_name})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.actionlog.log')
@patch('storage.views.main.client.delete_object')
def test_delete_object(self, mock_delete_object, mock_actionlog):
fakecontainer = 'fakecontainer'
fakeobject = 'fakeobject'
response = views.delete_object(self.request, fakecontainer, fakeobject)
self.assertTrue(response)
self.assertTrue(mock_actionlog.called)
@patch('storage.views.main.actionlog.log')
@patch('storage.views.main.client.delete_object')
def test_delete_object_fail_to_delete(self, mock_delete_object, mock_actionlog):
mock_delete_object.side_effect = client.ClientException('')
fakecontainer = 'fakecontainer'
fakeobject = 'fakeobject'
response = views.delete_object(self.request, fakecontainer, fakeobject)
self.assertFalse(response)
self.assertTrue(mock_delete_object.called)
@patch('storage.views.main.client.delete_object')
@patch('storage.views.main.client.get_container')
def test_delete_empty_pseudofolder(self, mock_get_container, mock_delete_object):
fakecontainer = 'fakecontainer'
fakepseudofolder = 'fakepseudofolder/'
project_name = self.request.session.get('project_name')
mock_get_container.return_value = ['stats', [{'name': fakepseudofolder}]]
response = views.delete_pseudofolder(self.request, project_name,
fakecontainer, fakepseudofolder)
msgs = [msg for msg in self.request._messages]
self.assertTrue(mock_delete_object.called)
self.assertEqual(msgs[0].message, _('Pseudofolder deleted'))
kargs = mock_delete_object.mock_calls[0][2]
self.assertEqual(kargs['name'], fakepseudofolder)
self.assertEqual(kargs['container'], fakecontainer)
headers = dict([i for i in response.items()])
expected = reverse('objectview', kwargs={'container': fakecontainer,
'project': project_name})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.client.delete_object')
@patch('storage.views.main.client.get_container')
def test_delete_non_empty_pseudofolder(self, mock_get_container, mock_delete_object):
fakecontainer = 'fakecontainer'
fakepseudofolder = 'fakepseudofolder/'
project_name = self.request.session.get('project_name')
fakepseudofolder_content = [
{'name': fakepseudofolder},
{'name': fakepseudofolder + 'fakeobjeto1'},
{'name': fakepseudofolder + 'fakeobjeto2'}
]
mock_get_container.return_value = ['stats', fakepseudofolder_content]
views.delete_pseudofolder(self.request, project_name, fakecontainer, fakepseudofolder)
msgs = [msg for msg in self.request._messages]
self.assertEqual(msgs[0].message, 'Pseudofolder and 2 objects deleted.')
self.assertTrue(mock_delete_object.called)
kargs = mock_delete_object.mock_calls[0][2]
self.assertEqual(kargs['name'], fakepseudofolder_content[0]['name'])
kargs = mock_delete_object.mock_calls[1][2]
self.assertEqual(kargs['name'], fakepseudofolder_content[1]['name'])
kargs = mock_delete_object.mock_calls[2][2]
self.assertEqual(kargs['name'], fakepseudofolder_content[2]['name'])
@patch('storage.views.main.client.delete_object')
@patch('storage.views.main.client.get_container')
def test_delete_non_empty_pseudofolder_with_some_failures(self, mock_get_container, mock_delete_object):
# TODO: Find a way to simulate one failures among successful deletes
pass
@patch('storage.views.main.client.delete_object')
@patch('storage.views.main.client.get_container')
def test_delete_empty_pseudofolder_inside_other_pseudofolder(self, mock_get_container, mock_delete_object):
prefix = 'fakepseudofolder1/'
fakecontainer = 'fakecontainer'
fakepseudofolder = prefix + 'fakepseudofolder2/'
project_name = self.request.session.get('project_name')
mock_get_container.return_value = ['stats', [{'name': fakepseudofolder}]]
response = views.delete_pseudofolder(self.request, project_name,
fakecontainer, fakepseudofolder)
msgs = [msg for msg in self.request._messages]
self.assertTrue(mock_delete_object.called)
self.assertEqual(msgs[0].message, _('Pseudofolder deleted'))
kargs = mock_delete_object.mock_calls[0][2]
self.assertEqual(kargs['name'], fakepseudofolder)
self.assertEqual(kargs['container'], fakecontainer)
headers = dict([i for i in response.items()])
expected = reverse('objectview', kwargs={'container': fakecontainer,
'prefix': prefix,
'project': project_name})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.client.delete_object')
@patch('storage.views.main.client.get_container')
def test_delete_pseudofolder_fail(self, mock_get_container, mock_delete_object):
fakecontainer = 'fakecontainer'
fakepseudofolder = 'fakepseudofolder/'
project_name = self.request.session.get('project_name')
mock_delete_object.side_effect = client.ClientException('')
mock_get_container.return_value = ['stats', [{'name': fakepseudofolder}]]
views.delete_pseudofolder(self.request, project_name, fakecontainer, fakepseudofolder)
msgs = [msg for msg in self.request._messages]
self.assertTrue(mock_delete_object.called)
self.assertEqual(msgs[0].message, _('Fail to delete pseudofolder'))
kargs = mock_delete_object.mock_calls[0][2]
self.assertEqual(kargs['name'], fakepseudofolder)
self.assertEqual(kargs['container'], fakecontainer)
@patch('storage.views.main.client.get_account')
def test_render_upload_view(self, mock_get_account):
mock_get_account.return_value = fakes.get_account()
project_name = self.request.session.get('project_name')
self.request.META.update({
'HTTP_HOST': 'localhost'
})
response = views.upload(self.request, project_name, 'fakecontainer')
self.assertIn('enctype="multipart/form-data"', response.content.decode('UTF-8'))
@patch('storage.views.main.client.get_account')
def test_render_upload_view_with_prefix(self, mock_get_account):
mock_get_account.return_value = fakes.get_account()
self.request.META.update({
'HTTP_HOST': 'localhost'
})
response = views.upload(self.request, 'fakecontainer', 'prefixTest')
self.assertIn('prefixTest', response.content.decode('UTF-8'))
@patch('storage.views.main.client.get_account')
def test_upload_view_without_temp_key_without_prefix(self, mock_get_account):
mock_get_account.return_value = fakes.get_account()
patch('storage.views.main.get_temp_key',
Mock(return_value=None)).start()
prefix = ''
fakecontainer = 'fakecontainer'
project_name = self.request.session.get('project_name')
response = views.upload(self.request, project_name, fakecontainer, prefix)
self.assertEqual(response.status_code, 302)
headers = dict([i for i in response.items()])
expected = reverse('objectview', kwargs={'container': fakecontainer,
'prefix': prefix,
'project': project_name})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.client.get_account')
def test_upload_view_without_temp_key_with_prefix(self, mock_get_account):
mock_get_account.return_value = fakes.get_account()
patch('storage.views.main.get_temp_key',
Mock(return_value=None)).start()
prefix = 'prefix/'
fakecontainer = 'fakecontainer'
project_name = self.request.session.get('project_name')
response = views.upload(self.request, project_name, fakecontainer, prefix)
self.assertEqual(response.status_code, 302)
headers = dict([i for i in response.items()])
expected = reverse('objectview', kwargs={'container': fakecontainer,
'prefix': prefix,
'project': project_name})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.requests.get')
def test_download(self, mock_get):
content = b'ola'
headers = {'Content-Type': 'fake/object'}
project_name = self.request.session.get('project_name')
mock_get.return_value = fakes.FakeRequestResponse(content=content,
headers=headers)
response = views.download(self.request, project_name, 'fakecontainer', 'fakeobject')
computed_headers = dict([i for i in response.items()])
self.assertEqual(response.content, content)
self.assertEqual(headers, computed_headers)
@patch('storage.views.main.requests.head')
def test_metadataview_return_headers_from_container(self, mock_head):
headers = {'content-type': 'fake/container'}
project_name = self.request.session.get('project_name')
mock_head.return_value = fakes.FakeRequestResponse(content='',
headers=headers)
response = views.metadataview(self.request, project_name, 'fakecontainer')
self.assertIn('fake/container', response.content.decode('UTF-8'))
@patch('storage.views.main.requests.head')
def test_metadataview_return_headers_from_object(self, mock_head):
headers = {'content-type': 'fake/object'}
mock_head.return_value = fakes.FakeRequestResponse(content='',
headers=headers)
response = views.metadataview(self.request,
'fakecontainer',
'fakeobject')
self.assertIn('fake/object', response.content.decode('UTF-8'))
@patch('storage.views.main.actionlog.log')
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.put_container')
def test_enable_versioning(self,
mock_put_container,
mock_post_container,
mock_actionlog):
container = 'fakecontainer'
computed = views.enable_versioning(self.request, container)
self.assertEqual(computed, True)
self.assertTrue(mock_put_container.called)
kargs = mock_post_container.mock_calls[0][2]
headers = kargs['headers']
version_location = '_version_{}'.format(container)
self.assertEqual(version_location, headers['x-versions-location'])
# Create container/update container
self.assertEqual(mock_actionlog.call_count, 2)
@patch('storage.views.main.actionlog.log')
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.put_container')
def test_enable_versioning_fail_to_create_container(self,
mock_put_container,
mock_post_container,
mock_actionlog):
mock_put_container.side_effect = client.ClientException('')
container = 'fakecontainer'
computed = views.enable_versioning(self.request, container)
self.assertEqual(computed, False)
self.assertFalse(mock_post_container.called)
self.assertEqual(mock_actionlog.call_count, 0)
@patch('storage.views.main.actionlog.log')
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.put_container')
def test_enable_versioning_fail_to_update_container(self,
mock_put_container,
mock_post_container,
mock_actionlog):
mock_post_container.side_effect = client.ClientException('')
container = 'fakecontainer'
computed = views.enable_versioning(self.request, container)
self.assertEqual(computed, False)
self.assertTrue(mock_post_container.called)
self.assertEqual(mock_actionlog.call_count, 1)
@patch('storage.views.main.actionlog.log')
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.delete_container')
@patch('storage.views.main.client.head_container')
def test_disable_versioning(self,
mock_head_container,
mock_delete_container,
mock_post_container,
mock_actionlog):
version_location = '_version_fakecontainer'
mock_head_container.return_value = {'x-versions-location': version_location}
container = 'fakecontainer'
computed = views.disable_versioning(self.request, container)
self.assertEqual(computed, True)
kargs = mock_delete_container.mock_calls[0][2]
self.assertEqual(version_location, kargs['container'])
self.assertEqual(mock_actionlog.call_count, 1)
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.delete_container')
@patch('storage.views.main.client.head_container')
def test_disable_versioning_fail_to_get_container_headers(self,
mock_head_container,
mock_delete_container,
mock_post_container,
mock_logging):
mock_head_container.side_effect = client.ClientException('')
container = 'fakecontainer'
computed = views.disable_versioning(self.request, container)
self.assertEqual(computed, False)
msgs = [msg for msg in self.request._messages]
self.assertEqual(msgs[0].message, _('Access denied'))
self.assertEqual(mock_logging.call_count, 1)
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.delete_container')
@patch('storage.views.main.client.head_container')
def test_disable_versioning_fail_to_update_container_header(self,
mock_head_container,
mock_delete_container,
mock_post_container,
mock_logging):
mock_post_container.side_effect = client.ClientException('')
version_location = '_version_fakecontainer'
mock_head_container.return_value = {'x-versions-location': version_location}
container = 'fakecontainer'
computed = views.disable_versioning(self.request, container)
self.assertEqual(computed, False)
self.assertFalse(mock_delete_container.called)
msgs = [msg for msg in self.request._messages]
self.assertEqual(msgs[0].message, _('Access denied'))
self.assertEqual(mock_logging.call_count, 1)
@patch('storage.views.main.render')
@patch('storage.views.main.client.head_container')
def test_object_versioning_view_versioning_disabled(self,
mock_head_container,
mock_render):
mock_head_container.return_value = {}
project_name = self.request.session.get('project_name')
views.object_versioning(self.request, project_name, 'fakecontainer')
kargs = mock_render.mock_calls[0][1]
computed = kargs[2]
expected = {'objects': utils.generic_pagination([], 1),
'container': 'fakecontainer',
'version_location': None}
self.assertEqual(str(computed['objects']), str(expected['objects']))
self.assertEqual(computed['container'], expected['container'])
self.assertEqual(computed['version_location'], expected['version_location'])
@patch('storage.views.main.render')
@patch('storage.views.main.client.get_container')
@patch('storage.views.main.client.head_container')
def test_object_versioning_view_versioning_enabled(self,
mock_head_container,
mock_get_container,
mock_render):
mock_head_container.return_value = {'x-versions-location': 'abc'}
mock_get_container.return_value = (None, [])
project_name = self.request.session.get('project_name')
views.object_versioning(self.request, project_name, 'fakecontainer')
kargs = mock_render.mock_calls[0][1]
computed = kargs[2]
expected = {'objects': utils.generic_pagination([], 1),
'container': 'fakecontainer',
'version_location': 'abc'}
self.assertEqual(str(computed['objects']), str(expected['objects']))
self.assertEqual(computed['container'], expected['container'])
self.assertEqual(computed['version_location'], expected['version_location'])
@patch('storage.views.main.enable_versioning')
def test_object_versioning_view_enabling_versioning(self, mock_enable):
post = self.request.method = 'POST'
post = self.request.POST.copy()
project_name = self.request.session.get('project_name')
post.update({'action': 'enable'})
self.request.POST = post
response = views.object_versioning(self.request, project_name, 'fakecontainer')
self.assertEqual(mock_enable.call_count, 1)
headers = dict([i for i in response.items()])
expected = reverse('object_versioning',
kwargs={'project': project_name, 'container': 'fakecontainer'})
self.assertEqual(headers['Location'], expected)
@patch('storage.views.main.disable_versioning')
def test_object_versioning_view_disabling_versioning(self, mock_disable):
post = self.request.method = 'POST'
post = self.request.POST.copy()
project_name = self.request.session.get('project_name')
post.update({'action': 'disable'})
self.request.POST = post
response = views.object_versioning(self.request, project_name, 'fakecontainer')
self.assertEqual(mock_disable.call_count, 1)
headers = dict([i for i in response.items()])
expected = reverse('object_versioning',
kwargs={'project': project_name, 'container': 'fakecontainer'})
self.assertEqual(headers['Location'], expected)
@override_settings(SWIFT_HIDE_PREFIXES=['.'])
@patch('storage.views.main.client.get_account')
def test_filter_containers_with_prefix_listed_in_SWIFT_HIDE_PREFIXES(self, mock_get_account):
fake_get_acc = fakes.get_account()
containers = [{'count': 4, 'bytes': 4, 'name': '.container4'}]
project_name = self.request.session.get('project_name')
self.request.META.update({
'HTTP_HOST': 'localhost'
})
mock_get_account.return_value = (fake_get_acc[0], containers)
response = views.containerview(self.request, project_name)
self.assertEqual(response.status_code, 200)
self.assertNotIn('/storage/objects/.container4/', response.content.decode('UTF-8'))
@patch('json.loads')
@patch('requests.get')
def test_get_info(self, mock_get, mock_json_loads):
expect_url = 'https://fake.api.globoi.com/info'
url = "https://fake.api.globoi.com/v1/AUTH_12314"
views.main.get_info(url)
mock_json_loads.assert_called()
mock_get.assert_called_with(expect_url)
def test_prepare_data_name(self):
fakecontainer = 'fakecontainer'
fake_obj_name = 'fakename'
expectd_data_name = fakecontainer.encode() + b"/" + fake_obj_name.encode()
data_name = views.main.prepare_data_name(fakecontainer, fake_obj_name)
self.assertEqual(data_name, expectd_data_name)
@patch('storage.views.main.client.head_container')
@patch('storage.views.main.client.get_container')
@patch('storage.views.main.get_info')
@patch('storage.views.main.prepare_data_name')
@patch('requests.post')
@patch("storage.views.main.actionlog.log")
@patch('storage.views.main.client.delete_container')
def test_delete_container_bulk_delete(self, mock_delete_container,
mock_action_log,
mock_post,
mock_prepare_data_name,
mock_get_info,
mock_get_container,
mock_head_container):
fakecontainer = 'fakecontainer'
fake_obj_name = 'fakename'
headers = {"X-Auth-Token": None}
expected_data = fakecontainer.encode() + b"/" + fake_obj_name.encode()
mock_head_container.return_value = ({'x-container-object-count': 1})
mock_get_container.return_value = (None, [{'name': 'object1'}])
mock_get_info.return_value = {'bulk_delete': {'max_deletes_per_request': 1}}
mock_prepare_data_name.return_value = expected_data
resp = views.delete_container(self.request, fakecontainer, force=True)
self.assertTrue(resp)
mock_head_container.assert_called()
mock_get_container.assert_called()
mock_get_info.assert_called()
mock_prepare_data_name.assert_called()
mock_post.assert_called_with('https://fake.api.globoi.com/v1/AUTH_1?bulk-delete=true', headers=headers,
data=expected_data)
mock_delete_container.assert_called()
mock_action_log.assert_called()
class TestStorageAcls(BaseTestCase):
def test_edit_acl_needs_authentication(self):
response = views.edit_acl(self.anonymous_request)
self.assertEqual(response.status_code, 302)
@patch('storage.views.main.client.head_container')
def test_edit_acl_list_acls_container_public(self, mock_get_container):
"""
Verify the ACL list for a container public and
if the "Make private" action is available
"""
mock_get_container.return_value = {
'x-container-read': '.r:*',
'x-container-write': '',
}
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
self.assertEqual(response.status_code, 200)
self.assertIn('Add ACL to container fakecontainer', response.content.decode('UTF-8'))
self.assertIn('Make private', response.content.decode('UTF-8'))
self.assertIn('Add ACL', response.content.decode('UTF-8'))
self.assertIn('.r:*', response.content.decode('UTF-8'))
@patch('storage.views.main.client.head_container')
def test_edit_acl_list_acls_container_private(self, mock_get_container):
"""
Verify the ACL list for a private container with no ACLS and
if the "Make Public" action is available
"""
mock_get_container.return_value = {
'x-container-read': '',
'x-container-write': '',
}
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
self.assertEqual(response.status_code, 200)
self.assertIn('Make public', response.content.decode('UTF-8'))
self.assertIn('Add ACL', response.content.decode('UTF-8'))
expected = 'There are no ACLs for this container yet. Add a new ACL by clicking the red button.'
self.assertIn(expected, response.content.decode('UTF-8'))
@patch('storage.views.main.client.head_container')
def test_edit_acl_private_container_but_public_for_read_and_write_for_an_user_and_project(self, mock_head_container):
"""
Verify if it's properly listing container's acl and
if the "Make Public" action is available
"""
mock_head_container.return_value = {
'x-container-read': 'projectfake:userfake',
'x-container-write': 'projectfake:userfake',
}
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
self.assertEqual(response.status_code, 200)
self.assertIn('Make public', response.content.decode('UTF-8'))
self.assertIn('Add ACL', response.content.decode('UTF-8'))
self.assertIn('projectfake:userfake', response.content.decode('UTF-8'))
@patch('storage.views.main.client.head_container')
def test_edit_acl_public_container_and_public_for_read_for_more_than_one_user_and_project(self, mock_head_container):
"""
Verify if it's properly listing container's acl for a public container
and if Make Private is available
"""
mock_head_container.return_value = {
'x-container-read': '.r:*,projectfake:userfake',
'x-container-write': 'projectfake2:userfake2',
}
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
self.assertEqual(response.status_code, 200)
self.assertIn('Make private', response.content.decode('UTF-8'))
self.assertIn('Add ACL', response.content.decode('UTF-8'))
self.assertIn('projectfake:userfake', response.content.decode('UTF-8'))
self.assertIn('projectfake2:userfake2', response.content.decode('UTF-8'))
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.head_container')
def test_edit_acl_grant_read_and_write_permission_for_a_project_and_user(self, mock_head_container, mock_post_container):
mock_head_container.return_value = {
'x-container-read': '',
'x-container-write': '',
}
self.request.method = 'POST'
post = self.request.POST.copy()
post.update({
'username': 'projectfake:userfake',
'read': 'On',
'write': 'On'
})
self.request.POST = post
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
name, args, kargs = mock_post_container.mock_calls[0]
expected_arg = {
'X-Container-Write': ',projectfake:userfake',
'X-Container-Read': ',projectfake:userfake'
}
self.assertEqual(expected_arg, kargs['headers'])
msgs = [msg for msg in self.request._messages]
self.assertEqual(response.status_code, 200)
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].message, _('ACLs updated'))
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.head_container')
def test_edit_acl_expcetion_on_grant_read_and_write_permission_for_a_project_and_user(self, mock_head_container, mock_post_container, mock_logging):
mock_head_container.return_value = {
'x-container-read': '',
'x-container-write': '',
}
mock_post_container.side_effect = client.ClientException('')
self.request.method = 'POST'
post = self.request.POST.copy()
post.update({
'username': 'projectfake:userfake',
'read': 'On',
'write': 'On'
})
self.request.POST = post
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
msgs = [msg for msg in self.request._messages]
self.assertEqual(response.status_code, 200)
self.assertEqual(len(msgs), 1)
self.assertTrue(mock_logging.called)
self.assertEqual(msgs[0].message, _('ACL update failed'))
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.head_container')
def test_edit_acl_make_private(self, mock_head_container, mock_post_container):
"""
Verify if the action "Making Private" is
removing ".r:*" from x-container-read header
"""
mock_head_container.return_value = {
'x-container-read': '.r:*,projectfake:userfake',
'x-container-write': 'projectfake2:userfake2',
}
self.request.method = 'GET'
get = self.request.GET.copy()
get.update({'delete': '.r:*,.rlistings'})
self.request.GET = get
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_post_container.called)
name, args, kargs = mock_post_container.mock_calls[0]
expected_arg = {
'X-Container-Write': 'projectfake2:userfake2,',
'X-Container-Read': 'projectfake:userfake,'
}
self.assertEqual(expected_arg, kargs['headers'])
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.head_container')
def test_edit_acl_make_public(self, mock_head_container, mock_post_container):
"""
Verify if the action "Making Public" is
including ".r:*" in x-container-read header
"""
mock_head_container.return_value = {
'x-container-read': 'projectfake:userfake',
'x-container-write': 'projectfake2:userfake2',
}
self.request.method = 'POST'
post = self.request.POST.copy()
post.update({'username': '.r:*', 'read': 'On'})
self.request.POST = post
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_post_container.called)
name, args, kargs = mock_post_container.mock_calls[0]
expected_arg = {
'X-Container-Write': 'projectfake2:userfake2',
'X-Container-Read': 'projectfake:userfake,.r:*'
}
self.assertEqual(expected_arg, kargs['headers'])
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.head_container')
def test_edit_acl_delete_acl_for_user_in_a_public_container(self, mock_head_container, mock_post_container):
""" Verify if is deleting the correct ACL """
mock_head_container.return_value = {
'x-container-read': '.r:*,projectfake:userfake',
'x-container-write': 'projectfake:userfake,projectfake2:userfake2',
}
self.request.method = 'GET'
get = self.request.GET.copy()
get.update({'delete': 'projectfake:userfake'})
self.request.GET = get
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
self.assertEqual(response.status_code, 200)
name, args, kargs = mock_post_container.mock_calls[0]
expected_arg = {
'X-Container-Write': 'projectfake2:userfake2,',
'X-Container-Read': '.r:*,'
}
self.assertEqual(expected_arg, kargs['headers'])
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.head_container')
def test_edit_acl_delete_acl_for_user_in_a_private_container(self, mock_head_container, mock_post_container):
mock_head_container.return_value = {
'x-container-read': 'projectfake:userfake',
'x-container-write': 'projectfake:userfake,projectfake2:userfake2',
}
self.request.method = 'GET'
get = self.request.GET.copy()
get.update({'delete': 'projectfake:userfake'})
self.request.GET = get
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
self.assertEqual(response.status_code, 200)
name, args, kargs = mock_post_container.mock_calls[0]
expected_arg = {
'X-Container-Write': 'projectfake2:userfake2,',
'X-Container-Read': ''
}
self.assertEqual(expected_arg, kargs['headers'])
@patch('storage.views.main.log.exception')
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.head_container')
def test_edit_acl_delete_acl_exception(self, mock_head_container, mock_post_container, mock_logging):
mock_head_container.return_value = {
'x-container-read': 'projectfake:userfake',
'x-container-write': 'projectfake:userfake',
}
mock_post_container.side_effect = client.ClientException('')
self.request.method = 'GET'
get = self.request.GET.copy()
get.update({'delete': 'projectfake:userfake'})
self.request.GET = get
project_name = self.request.session.get('project_name')
response = views.edit_acl(self.request, project_name, 'fakecontainer')
msgs = [msg for msg in self.request._messages]
self.assertEqual(len(msgs), 1)
self.assertTrue(mock_logging.called)
self.assertEqual(msgs[0].message, _('ACL update failed'))
self.assertIn('projectfake:userfake', response.content.decode('UTF-8'))
class TestStorageCORS(BaseTestCase):
def test_edit_cors_needs_authentication(self):
response = views.edit_cors(self.anonymous_request)
self.assertEqual(response.status_code, 302)
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.head_container')
def test_define_novo_host_para_regra_de_cors_no_container(self, mock_head_container, mock_post_container):
mock_head_container.return_value = {
'x-container-meta-access-control-allow-origin': '',
}
self.request.method = 'POST'
self.request.META.update({
'HTTP_HOST': 'localhost'
})
post = self.request.POST.copy()
project_name = self.request.session.get('project_name')
post.update({'host': 'globo.com'})
self.request.POST = post
views.edit_cors(self.request, project_name, 'fakecontainer')
name, args, kargs = mock_post_container.mock_calls[0]
expected_arg = {'x-container-meta-access-control-allow-origin': 'globo.com'}
self.assertEqual(expected_arg, kargs['headers'])
@patch('storage.views.main.client.post_container')
@patch('storage.views.main.client.head_container')
def test_remove_host_da_regra_de_cors_do_container(self, mock_head_container, mock_post_container):
mock_head_container.return_value = {
'x-container-meta-access-control-allow-origin': 'globo.com globoi.com',
}
self.request.method = 'GET'
self.request.META.update({
'HTTP_HOST': 'localhost'
})
get = self.request.GET.copy()
project_name = self.request.session.get('project_name')
get.update({'delete': 'globo.com'})
self.request.GET = get
views.edit_cors(self.request, project_name, 'fakecontainer')
name, args, kargs = mock_post_container.mock_calls[0]
expected_arg = {'x-container-meta-access-control-allow-origin': 'globoi.com'}
self.assertEqual(expected_arg, kargs['headers'])
@patch('storage.views.main.requests.post')
def test_remove_from_cache_status_201(self, mock_requests_post):
mock_requests_post.return_value = fakes.FakeRequestResponse(201)
self.request.method = 'POST'
self.request.META.update({
'HTTP_HOST': 'localhost'
})
post = self.request.POST.copy()
post.update({'urls': 'http://localhost/1\r\nhttp://localhost/2'})
self.request.POST = post
project_name = self.request.session.get('project_name')
response = views.remove_from_cache(self.request, project_name)
host, kargs = mock_requests_post.call_args
expected_json = {
'json': {
'url': [u'http://localhost/1', u'http://localhost/2'],
'user': self.request.user.username
}
}
# Verifica se a chamada para a API estava com os argumentos corretos
self.assertEqual(expected_json, kargs)
| 41.044017 | 152 | 0.64552 |
795274505ce997e5df48259543ac10dd84916391 | 970 | py | Python | src/economy/migrations/0012_auto_20200811_0233.py | lgandersen/bornhack-website | fbda2b4b53dc2cb266d1d7c13ba0aad59d9079df | [
"BSD-3-Clause"
] | 7 | 2017-04-14T15:28:29.000Z | 2021-09-10T09:45:38.000Z | src/economy/migrations/0012_auto_20200811_0233.py | lgandersen/bornhack-website | fbda2b4b53dc2cb266d1d7c13ba0aad59d9079df | [
"BSD-3-Clause"
] | 799 | 2016-04-28T09:31:50.000Z | 2022-03-29T09:05:02.000Z | src/economy/migrations/0012_auto_20200811_0233.py | lgandersen/bornhack-website | fbda2b4b53dc2cb266d1d7c13ba0aad59d9079df | [
"BSD-3-Clause"
] | 35 | 2016-04-28T09:23:53.000Z | 2021-05-02T12:36:01.000Z | # Generated by Django 3.1 on 2020-08-11 00:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("economy", "0011_pos_posreport"),
]
operations = [
migrations.AlterField(
model_name="expense",
name="approved",
field=models.BooleanField(
default=None,
help_text="True if this expense has been approved by the responsible team. False if it has been rejected. Blank if noone has decided yet.",
null=True,
),
),
migrations.AlterField(
model_name="revenue",
name="approved",
field=models.BooleanField(
default=None,
help_text="True if this Revenue has been approved by the responsible team. False if it has been rejected. Blank if noone has decided yet.",
null=True,
),
),
]
| 30.3125 | 155 | 0.565979 |
79527584e7406ad4cdd51a45acf9f943279c5990 | 1,746 | py | Python | examples/event_listener_modern.py | Solotov/panoramisk | 04e90b5972788e47c678bbd86f2de16048fa8de6 | [
"MIT"
] | null | null | null | examples/event_listener_modern.py | Solotov/panoramisk | 04e90b5972788e47c678bbd86f2de16048fa8de6 | [
"MIT"
] | null | null | null | examples/event_listener_modern.py | Solotov/panoramisk | 04e90b5972788e47c678bbd86f2de16048fa8de6 | [
"MIT"
] | null | null | null | import asyncio
import logging
import os
from panoramisk import Manager, Message
manager = Manager(
host=os.getenv('AMI_HOST', '127.0.0.1'),
port=os.getenv('AMI_PORT', 5038),
username=os.getenv('AMI_USERNAME', 'user'),
secret=os.getenv('AMI_SECRET', 'password'),
ping_delay=10, # Delay after start
ping_interval=10, # Periodically ping AMI (dead or alive)
reconnect_timeout=2, # Timeout reconnect if connection lost
)
def on_connect(mngr: Manager):
logging.info(
'Connected to %s:%s AMI socket successfully' %
(mngr.config['host'], mngr.config['port'])
)
def on_login(mngr: Manager):
logging.info(
'Connected user:%s to AMI %s:%s successfully' %
(mngr.config['username'], mngr.config['host'], mngr.config['port'])
)
def on_disconnect(mngr: Manager, exc: Exception):
logging.info(
'Disconnect user:%s from AMI %s:%s' %
(mngr.config['username'], mngr.config['host'], mngr.config['port'])
)
logging.debug(str(exc))
async def on_startup(mngr: Manager):
await asyncio.sleep(0.1)
logging.info('Something action...')
async def on_shutdown(mngr: Manager):
await asyncio.sleep(0.1)
logging.info(
'Shutdown AMI connection on %s:%s' % (mngr.config['host'], mngr.config['port'])
)
@manager.register_event('*') # Register all events
async def ami_callback(mngr: Manager, msg: Message):
if msg.Event == 'FullyBooted':
print(msg)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
manager.on_connect = on_connect
manager.on_login = on_login
manager.on_disconnect = on_disconnect
manager.connect(run_forever=True, on_startup=on_startup, on_shutdown=on_shutdown)
| 26.861538 | 87 | 0.667812 |
7952766da6086698fbd6c44b7d836aab65ab99c2 | 1,767 | py | Python | Coloring/learning/ppo/utils/arguments.py | zarahz/MARL-and-Markets | 3591a160e098e7251b9e7c7b59c6d0ab08ba0779 | [
"MIT"
] | 1 | 2022-03-12T09:17:32.000Z | 2022-03-12T09:17:32.000Z | Coloring/learning/ppo/utils/arguments.py | zarahz/MARL-and-Markets | 3591a160e098e7251b9e7c7b59c6d0ab08ba0779 | [
"MIT"
] | null | null | null | Coloring/learning/ppo/utils/arguments.py | zarahz/MARL-and-Markets | 3591a160e098e7251b9e7c7b59c6d0ab08ba0779 | [
"MIT"
] | null | null | null | import argparse
def get_train_args(parser):
'''
Add PPO relevant training arguments to the parser.
'''
# epochs range(3,30), wie oft anhand der experience gelernt wird
parser.add_argument("--epochs", type=int, default=4,
help="[PPO] Number of epochs for PPO optimization. (default: 4)")
# GAE = Generalized advantage estimator wird in verbindung mit dem advantage estimator berechnet
# Â von GAE(delta, lambda) zum zeitpunkt t = Summe (lambda*gamma)^l * delta zum zeitpunkt (t+l) ^ V
# range(0.9,1)
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="[PPO] Lambda coefficient in GAE formula, used for calculation of the advantage values. (default: 0.95, 1 means no gae)")
# entropy coef -> c2 * S[pi von theta](state t)
# with S as "entropy Bonus"
# range(0, 0.01)
parser.add_argument("--entropy-coef", type=float, default=0.01,
help="[PPO] Entropy term coefficient. (default: 0.01)")
# value function coef -> c1 * Loss func von VF zum Zeitpunkt t
# with LVF in t = (Vtheta(state t) - Vt ^ targ)^2 => squared error loss
# range(0.5,1)
# nötig wenn parameter zwischen policy und value funct. geteilt werden
parser.add_argument("--value-loss-coef", type=float, default=0.5,
help="[PPO] Value loss term coefficient. (default: 0.5)")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="[PPO] Maximum norm of gradient. (default: 0.5)")
# epsilon of clipping range(0.1,0.3)
parser.add_argument("--clip-eps", type=float, default=0.2,
help="[PPO] Clipping epsilon for PPO. (default: 0.2)")
return parser
| 49.083333 | 150 | 0.625354 |
795276e0a7aa3ca3eded03e1fa003c1803fbef19 | 39,715 | py | Python | Lib/test/test_imaplib.py | fongchinghinunsw/cpython | 19926d058dc33856631c6c6b3fcb45b04fcab666 | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2020-01-18T05:06:26.000Z | 2020-06-16T07:33:52.000Z | Lib/test/test_imaplib.py | fongchinghinunsw/cpython | 19926d058dc33856631c6c6b3fcb45b04fcab666 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2020-01-01T13:39:13.000Z | 2020-01-01T13:39:13.000Z | Lib/test/test_imaplib.py | fongchinghinunsw/cpython | 19926d058dc33856631c6c6b3fcb45b04fcab666 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2020-01-01T13:46:42.000Z | 2020-01-01T13:46:42.000Z | from test import support
from contextlib import contextmanager
import imaplib
import os.path
import socketserver
import time
import calendar
import threading
import socket
from test.support import (reap_threads, verbose, transient_internet,
run_with_tz, run_with_locale, cpython_only,
requires_hashdigest)
import unittest
from unittest import mock
from datetime import datetime, timezone, timedelta
try:
import ssl
except ImportError:
ssl = None
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "pycacert.pem")
class TestImaplib(unittest.TestCase):
def test_Internaldate2tuple(self):
t0 = calendar.timegm((2000, 1, 1, 0, 0, 0, -1, -1, -1))
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "01-Jan-2000 00:00:00 +0000")')
self.assertEqual(time.mktime(tt), t0)
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "01-Jan-2000 11:30:00 +1130")')
self.assertEqual(time.mktime(tt), t0)
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "31-Dec-1999 12:30:00 -1130")')
self.assertEqual(time.mktime(tt), t0)
@run_with_tz('MST+07MDT,M4.1.0,M10.5.0')
def test_Internaldate2tuple_issue10941(self):
self.assertNotEqual(imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "02-Apr-2000 02:30:00 +0000")'),
imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "02-Apr-2000 03:30:00 +0000")'))
def timevalues(self):
return [2000000000, 2000000000.0, time.localtime(2000000000),
(2033, 5, 18, 5, 33, 20, -1, -1, -1),
(2033, 5, 18, 5, 33, 20, -1, -1, 1),
datetime.fromtimestamp(2000000000,
timezone(timedelta(0, 2 * 60 * 60))),
'"18-May-2033 05:33:20 +0200"']
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
# DST rules included to work around quirk where the Gnu C library may not
# otherwise restore the previous time zone
@run_with_tz('STD-1DST,M3.2.0,M11.1.0')
def test_Time2Internaldate(self):
expected = '"18-May-2033 05:33:20 +0200"'
for t in self.timevalues():
internal = imaplib.Time2Internaldate(t)
self.assertEqual(internal, expected)
def test_that_Time2Internaldate_returns_a_result(self):
# Without tzset, we can check only that it successfully
# produces a result, not the correctness of the result itself,
# since the result depends on the timezone the machine is in.
for t in self.timevalues():
imaplib.Time2Internaldate(t)
def test_imap4_host_default_value(self):
# Check whether the IMAP4_PORT is truly unavailable.
with socket.socket() as s:
try:
s.connect(('', imaplib.IMAP4_PORT))
self.skipTest(
"Cannot run the test with local IMAP server running.")
except socket.error:
pass
# This is the exception that should be raised.
expected_errnos = support.get_socket_conn_refused_errs()
with self.assertRaises(OSError) as cm:
imaplib.IMAP4()
self.assertIn(cm.exception.errno, expected_errnos)
if ssl:
class SecureTCPServer(socketserver.TCPServer):
def get_request(self):
newsocket, fromaddr = self.socket.accept()
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
connstream = context.wrap_socket(newsocket, server_side=True)
return connstream, fromaddr
IMAP4_SSL = imaplib.IMAP4_SSL
else:
class SecureTCPServer:
pass
IMAP4_SSL = None
class SimpleIMAPHandler(socketserver.StreamRequestHandler):
timeout = support.LOOPBACK_TIMEOUT
continuation = None
capabilities = ''
def setup(self):
super().setup()
self.server.logged = None
def _send(self, message):
if verbose:
print("SENT: %r" % message.strip())
self.wfile.write(message)
def _send_line(self, message):
self._send(message + b'\r\n')
def _send_textline(self, message):
self._send_line(message.encode('ASCII'))
def _send_tagged(self, tag, code, message):
self._send_textline(' '.join((tag, code, message)))
def handle(self):
# Send a welcome message.
self._send_textline('* OK IMAP4rev1')
while 1:
# Gather up input until we receive a line terminator or we timeout.
# Accumulate read(1) because it's simpler to handle the differences
# between naked sockets and SSL sockets.
line = b''
while 1:
try:
part = self.rfile.read(1)
if part == b'':
# Naked sockets return empty strings..
return
line += part
except OSError:
# ..but SSLSockets raise exceptions.
return
if line.endswith(b'\r\n'):
break
if verbose:
print('GOT: %r' % line.strip())
if self.continuation:
try:
self.continuation.send(line)
except StopIteration:
self.continuation = None
continue
splitline = line.decode('ASCII').split()
tag = splitline[0]
cmd = splitline[1]
args = splitline[2:]
if hasattr(self, 'cmd_' + cmd):
continuation = getattr(self, 'cmd_' + cmd)(tag, args)
if continuation:
self.continuation = continuation
next(continuation)
else:
self._send_tagged(tag, 'BAD', cmd + ' unknown')
def cmd_CAPABILITY(self, tag, args):
caps = ('IMAP4rev1 ' + self.capabilities
if self.capabilities
else 'IMAP4rev1')
self._send_textline('* CAPABILITY ' + caps)
self._send_tagged(tag, 'OK', 'CAPABILITY completed')
def cmd_LOGOUT(self, tag, args):
self.server.logged = None
self._send_textline('* BYE IMAP4ref1 Server logging out')
self._send_tagged(tag, 'OK', 'LOGOUT completed')
def cmd_LOGIN(self, tag, args):
self.server.logged = args[0]
self._send_tagged(tag, 'OK', 'LOGIN completed')
class NewIMAPTestsMixin():
client = None
def _setup(self, imap_handler, connect=True):
"""
Sets up imap_handler for tests. imap_handler should inherit from either:
- SimpleIMAPHandler - for testing IMAP commands,
- socketserver.StreamRequestHandler - if raw access to stream is needed.
Returns (client, server).
"""
class TestTCPServer(self.server_class):
def handle_error(self, request, client_address):
"""
End request and raise the error if one occurs.
"""
self.close_request(request)
self.server_close()
raise
self.addCleanup(self._cleanup)
self.server = self.server_class((support.HOST, 0), imap_handler)
self.thread = threading.Thread(
name=self._testMethodName+'-server',
target=self.server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval': 0.01})
self.thread.daemon = True # In case this function raises.
self.thread.start()
if connect:
self.client = self.imap_class(*self.server.server_address)
return self.client, self.server
def _cleanup(self):
"""
Cleans up the test server. This method should not be called manually,
it is added to the cleanup queue in the _setup method already.
"""
# if logout was called already we'd raise an exception trying to
# shutdown the client once again
if self.client is not None and self.client.state != 'LOGOUT':
self.client.shutdown()
# cleanup the server
self.server.shutdown()
self.server.server_close()
support.join_thread(self.thread)
# Explicitly clear the attribute to prevent dangling thread
self.thread = None
def test_EOF_without_complete_welcome_message(self):
# http://bugs.python.org/issue5949
class EOFHandler(socketserver.StreamRequestHandler):
def handle(self):
self.wfile.write(b'* OK')
_, server = self._setup(EOFHandler, connect=False)
self.assertRaises(imaplib.IMAP4.abort, self.imap_class,
*server.server_address)
def test_line_termination(self):
class BadNewlineHandler(SimpleIMAPHandler):
def cmd_CAPABILITY(self, tag, args):
self._send(b'* CAPABILITY IMAP4rev1 AUTH\n')
self._send_tagged(tag, 'OK', 'CAPABILITY completed')
_, server = self._setup(BadNewlineHandler, connect=False)
self.assertRaises(imaplib.IMAP4.abort, self.imap_class,
*server.server_address)
def test_enable_raises_error_if_not_AUTH(self):
class EnableHandler(SimpleIMAPHandler):
capabilities = 'AUTH ENABLE UTF8=ACCEPT'
client, _ = self._setup(EnableHandler)
self.assertFalse(client.utf8_enabled)
with self.assertRaisesRegex(imaplib.IMAP4.error, 'ENABLE.*NONAUTH'):
client.enable('foo')
self.assertFalse(client.utf8_enabled)
def test_enable_raises_error_if_no_capability(self):
client, _ = self._setup(SimpleIMAPHandler)
with self.assertRaisesRegex(imaplib.IMAP4.error,
'does not support ENABLE'):
client.enable('foo')
def test_enable_UTF8_raises_error_if_not_supported(self):
client, _ = self._setup(SimpleIMAPHandler)
typ, data = client.login('user', 'pass')
self.assertEqual(typ, 'OK')
with self.assertRaisesRegex(imaplib.IMAP4.error,
'does not support ENABLE'):
client.enable('UTF8=ACCEPT')
def test_enable_UTF8_True_append(self):
class UTF8AppendServer(SimpleIMAPHandler):
capabilities = 'ENABLE UTF8=ACCEPT'
def cmd_ENABLE(self, tag, args):
self._send_tagged(tag, 'OK', 'ENABLE successful')
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.server.response = yield
self._send_tagged(tag, 'OK', 'FAKEAUTH successful')
def cmd_APPEND(self, tag, args):
self._send_textline('+')
self.server.response = yield
self._send_tagged(tag, 'OK', 'okay')
client, server = self._setup(UTF8AppendServer)
self.assertEqual(client._encoding, 'ascii')
code, _ = client.authenticate('MYAUTH', lambda x: b'fake')
self.assertEqual(code, 'OK')
self.assertEqual(server.response, b'ZmFrZQ==\r\n') # b64 encoded 'fake'
code, _ = client.enable('UTF8=ACCEPT')
self.assertEqual(code, 'OK')
self.assertEqual(client._encoding, 'utf-8')
msg_string = 'Subject: üñí©öðé'
typ, data = client.append(None, None, None, msg_string.encode('utf-8'))
self.assertEqual(typ, 'OK')
self.assertEqual(server.response,
('UTF8 (%s)\r\n' % msg_string).encode('utf-8'))
def test_search_disallows_charset_in_utf8_mode(self):
class UTF8Server(SimpleIMAPHandler):
capabilities = 'AUTH ENABLE UTF8=ACCEPT'
def cmd_ENABLE(self, tag, args):
self._send_tagged(tag, 'OK', 'ENABLE successful')
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.server.response = yield
self._send_tagged(tag, 'OK', 'FAKEAUTH successful')
client, _ = self._setup(UTF8Server)
typ, _ = client.authenticate('MYAUTH', lambda x: b'fake')
self.assertEqual(typ, 'OK')
typ, _ = client.enable('UTF8=ACCEPT')
self.assertEqual(typ, 'OK')
self.assertTrue(client.utf8_enabled)
with self.assertRaisesRegex(imaplib.IMAP4.error, 'charset.*UTF8'):
client.search('foo', 'bar')
def test_bad_auth_name(self):
class MyServer(SimpleIMAPHandler):
def cmd_AUTHENTICATE(self, tag, args):
self._send_tagged(tag, 'NO',
'unrecognized authentication type {}'.format(args[0]))
client, _ = self._setup(MyServer)
with self.assertRaisesRegex(imaplib.IMAP4.error,
'unrecognized authentication type METHOD'):
client.authenticate('METHOD', lambda: 1)
def test_invalid_authentication(self):
class MyServer(SimpleIMAPHandler):
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.response = yield
self._send_tagged(tag, 'NO', '[AUTHENTICATIONFAILED] invalid')
client, _ = self._setup(MyServer)
with self.assertRaisesRegex(imaplib.IMAP4.error,
r'\[AUTHENTICATIONFAILED\] invalid'):
client.authenticate('MYAUTH', lambda x: b'fake')
def test_valid_authentication_bytes(self):
class MyServer(SimpleIMAPHandler):
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.server.response = yield
self._send_tagged(tag, 'OK', 'FAKEAUTH successful')
client, server = self._setup(MyServer)
code, _ = client.authenticate('MYAUTH', lambda x: b'fake')
self.assertEqual(code, 'OK')
self.assertEqual(server.response, b'ZmFrZQ==\r\n') # b64 encoded 'fake'
def test_valid_authentication_plain_text(self):
class MyServer(SimpleIMAPHandler):
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.server.response = yield
self._send_tagged(tag, 'OK', 'FAKEAUTH successful')
client, server = self._setup(MyServer)
code, _ = client.authenticate('MYAUTH', lambda x: 'fake')
self.assertEqual(code, 'OK')
self.assertEqual(server.response, b'ZmFrZQ==\r\n') # b64 encoded 'fake'
@requires_hashdigest('md5')
def test_login_cram_md5_bytes(self):
class AuthHandler(SimpleIMAPHandler):
capabilities = 'LOGINDISABLED AUTH=CRAM-MD5'
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+ PDE4OTYuNjk3MTcwOTUyQHBvc3RvZmZpY2Uucm'
'VzdG9uLm1jaS5uZXQ=')
r = yield
if (r == b'dGltIGYxY2E2YmU0NjRiOWVmYT'
b'FjY2E2ZmZkNmNmMmQ5ZjMy\r\n'):
self._send_tagged(tag, 'OK', 'CRAM-MD5 successful')
else:
self._send_tagged(tag, 'NO', 'No access')
client, _ = self._setup(AuthHandler)
self.assertTrue('AUTH=CRAM-MD5' in client.capabilities)
ret, _ = client.login_cram_md5("tim", b"tanstaaftanstaaf")
self.assertEqual(ret, "OK")
@requires_hashdigest('md5')
def test_login_cram_md5_plain_text(self):
class AuthHandler(SimpleIMAPHandler):
capabilities = 'LOGINDISABLED AUTH=CRAM-MD5'
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+ PDE4OTYuNjk3MTcwOTUyQHBvc3RvZmZpY2Uucm'
'VzdG9uLm1jaS5uZXQ=')
r = yield
if (r == b'dGltIGYxY2E2YmU0NjRiOWVmYT'
b'FjY2E2ZmZkNmNmMmQ5ZjMy\r\n'):
self._send_tagged(tag, 'OK', 'CRAM-MD5 successful')
else:
self._send_tagged(tag, 'NO', 'No access')
client, _ = self._setup(AuthHandler)
self.assertTrue('AUTH=CRAM-MD5' in client.capabilities)
ret, _ = client.login_cram_md5("tim", "tanstaaftanstaaf")
self.assertEqual(ret, "OK")
def test_aborted_authentication(self):
class MyServer(SimpleIMAPHandler):
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.response = yield
if self.response == b'*\r\n':
self._send_tagged(
tag,
'NO',
'[AUTHENTICATIONFAILED] aborted')
else:
self._send_tagged(tag, 'OK', 'MYAUTH successful')
client, _ = self._setup(MyServer)
with self.assertRaisesRegex(imaplib.IMAP4.error,
r'\[AUTHENTICATIONFAILED\] aborted'):
client.authenticate('MYAUTH', lambda x: None)
@mock.patch('imaplib._MAXLINE', 10)
def test_linetoolong(self):
class TooLongHandler(SimpleIMAPHandler):
def handle(self):
# send response line longer than the limit set in the next line
self.wfile.write(b'* OK ' + 11 * b'x' + b'\r\n')
_, server = self._setup(TooLongHandler, connect=False)
with self.assertRaisesRegex(imaplib.IMAP4.error,
'got more than 10 bytes'):
self.imap_class(*server.server_address)
def test_simple_with_statement(self):
_, server = self._setup(SimpleIMAPHandler, connect=False)
with self.imap_class(*server.server_address):
pass
def test_with_statement(self):
_, server = self._setup(SimpleIMAPHandler, connect=False)
with self.imap_class(*server.server_address) as imap:
imap.login('user', 'pass')
self.assertEqual(server.logged, 'user')
self.assertIsNone(server.logged)
def test_with_statement_logout(self):
# It is legal to log out explicitly inside the with block
_, server = self._setup(SimpleIMAPHandler, connect=False)
with self.imap_class(*server.server_address) as imap:
imap.login('user', 'pass')
self.assertEqual(server.logged, 'user')
imap.logout()
self.assertIsNone(server.logged)
self.assertIsNone(server.logged)
# command tests
def test_login(self):
client, _ = self._setup(SimpleIMAPHandler)
typ, data = client.login('user', 'pass')
self.assertEqual(typ, 'OK')
self.assertEqual(data[0], b'LOGIN completed')
self.assertEqual(client.state, 'AUTH')
def test_logout(self):
client, _ = self._setup(SimpleIMAPHandler)
typ, data = client.login('user', 'pass')
self.assertEqual(typ, 'OK')
self.assertEqual(data[0], b'LOGIN completed')
typ, data = client.logout()
self.assertEqual(typ, 'BYE', (typ, data))
self.assertEqual(data[0], b'IMAP4ref1 Server logging out', (typ, data))
self.assertEqual(client.state, 'LOGOUT')
def test_lsub(self):
class LsubCmd(SimpleIMAPHandler):
def cmd_LSUB(self, tag, args):
self._send_textline('* LSUB () "." directoryA')
return self._send_tagged(tag, 'OK', 'LSUB completed')
client, _ = self._setup(LsubCmd)
client.login('user', 'pass')
typ, data = client.lsub()
self.assertEqual(typ, 'OK')
self.assertEqual(data[0], b'() "." directoryA')
class NewIMAPTests(NewIMAPTestsMixin, unittest.TestCase):
imap_class = imaplib.IMAP4
server_class = socketserver.TCPServer
@unittest.skipUnless(ssl, "SSL not available")
class NewIMAPSSLTests(NewIMAPTestsMixin, unittest.TestCase):
imap_class = IMAP4_SSL
server_class = SecureTCPServer
def test_ssl_raises(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ssl_context.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ssl_context.check_hostname, True)
ssl_context.load_verify_locations(CAFILE)
with self.assertRaisesRegex(ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
_, server = self._setup(SimpleIMAPHandler)
client = self.imap_class(*server.server_address,
ssl_context=ssl_context)
client.shutdown()
def test_ssl_verified(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_context.load_verify_locations(CAFILE)
_, server = self._setup(SimpleIMAPHandler)
client = self.imap_class("localhost", server.server_address[1],
ssl_context=ssl_context)
client.shutdown()
# Mock the private method _connect(), so mark the test as specific
# to CPython stdlib
@cpython_only
def test_certfile_arg_warn(self):
with support.check_warnings(('', DeprecationWarning)):
with mock.patch.object(self.imap_class, 'open'):
with mock.patch.object(self.imap_class, '_connect'):
self.imap_class('localhost', 143, certfile=CERTFILE)
class ThreadedNetworkedTests(unittest.TestCase):
server_class = socketserver.TCPServer
imap_class = imaplib.IMAP4
def make_server(self, addr, hdlr):
class MyServer(self.server_class):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
if verbose:
print("creating server")
server = MyServer(addr, hdlr)
self.assertEqual(server.server_address, server.socket.getsockname())
if verbose:
print("server created")
print("ADDR =", addr)
print("CLASS =", self.server_class)
print("HDLR =", server.RequestHandlerClass)
t = threading.Thread(
name='%s serving' % self.server_class,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval': 0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose:
print("server running")
return server, t
def reap_server(self, server, thread):
if verbose:
print("waiting for server")
server.shutdown()
server.server_close()
thread.join()
if verbose:
print("done")
@contextmanager
def reaped_server(self, hdlr):
server, thread = self.make_server((support.HOST, 0), hdlr)
try:
yield server
finally:
self.reap_server(server, thread)
@contextmanager
def reaped_pair(self, hdlr):
with self.reaped_server(hdlr) as server:
client = self.imap_class(*server.server_address)
try:
yield server, client
finally:
client.logout()
@reap_threads
def test_connect(self):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address)
client.shutdown()
@reap_threads
def test_bracket_flags(self):
# This violates RFC 3501, which disallows ']' characters in tag names,
# but imaplib has allowed producing such tags forever, other programs
# also produce them (eg: OtherInbox's Organizer app as of 20140716),
# and Gmail, for example, accepts them and produces them. So we
# support them. See issue #21815.
class BracketFlagHandler(SimpleIMAPHandler):
def handle(self):
self.flags = ['Answered', 'Flagged', 'Deleted', 'Seen', 'Draft']
super().handle()
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.server.response = yield
self._send_tagged(tag, 'OK', 'FAKEAUTH successful')
def cmd_SELECT(self, tag, args):
flag_msg = ' \\'.join(self.flags)
self._send_line(('* FLAGS (%s)' % flag_msg).encode('ascii'))
self._send_line(b'* 2 EXISTS')
self._send_line(b'* 0 RECENT')
msg = ('* OK [PERMANENTFLAGS %s \\*)] Flags permitted.'
% flag_msg)
self._send_line(msg.encode('ascii'))
self._send_tagged(tag, 'OK', '[READ-WRITE] SELECT completed.')
def cmd_STORE(self, tag, args):
new_flags = args[2].strip('(').strip(')').split()
self.flags.extend(new_flags)
flags_msg = '(FLAGS (%s))' % ' \\'.join(self.flags)
msg = '* %s FETCH %s' % (args[0], flags_msg)
self._send_line(msg.encode('ascii'))
self._send_tagged(tag, 'OK', 'STORE completed.')
with self.reaped_pair(BracketFlagHandler) as (server, client):
code, data = client.authenticate('MYAUTH', lambda x: b'fake')
self.assertEqual(code, 'OK')
self.assertEqual(server.response, b'ZmFrZQ==\r\n')
client.select('test')
typ, [data] = client.store(b'1', "+FLAGS", "[test]")
self.assertIn(b'[test]', data)
client.select('test')
typ, [data] = client.response('PERMANENTFLAGS')
self.assertIn(b'[test]', data)
@reap_threads
def test_issue5949(self):
class EOFHandler(socketserver.StreamRequestHandler):
def handle(self):
# EOF without sending a complete welcome message.
self.wfile.write(b'* OK')
with self.reaped_server(EOFHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
@reap_threads
def test_line_termination(self):
class BadNewlineHandler(SimpleIMAPHandler):
def cmd_CAPABILITY(self, tag, args):
self._send(b'* CAPABILITY IMAP4rev1 AUTH\n')
self._send_tagged(tag, 'OK', 'CAPABILITY completed')
with self.reaped_server(BadNewlineHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
class UTF8Server(SimpleIMAPHandler):
capabilities = 'AUTH ENABLE UTF8=ACCEPT'
def cmd_ENABLE(self, tag, args):
self._send_tagged(tag, 'OK', 'ENABLE successful')
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.server.response = yield
self._send_tagged(tag, 'OK', 'FAKEAUTH successful')
@reap_threads
def test_enable_raises_error_if_not_AUTH(self):
with self.reaped_pair(self.UTF8Server) as (server, client):
self.assertFalse(client.utf8_enabled)
self.assertRaises(imaplib.IMAP4.error, client.enable, 'foo')
self.assertFalse(client.utf8_enabled)
# XXX Also need a test that enable after SELECT raises an error.
@reap_threads
def test_enable_raises_error_if_no_capability(self):
class NoEnableServer(self.UTF8Server):
capabilities = 'AUTH'
with self.reaped_pair(NoEnableServer) as (server, client):
self.assertRaises(imaplib.IMAP4.error, client.enable, 'foo')
@reap_threads
def test_enable_UTF8_raises_error_if_not_supported(self):
class NonUTF8Server(SimpleIMAPHandler):
pass
with self.assertRaises(imaplib.IMAP4.error):
with self.reaped_pair(NonUTF8Server) as (server, client):
typ, data = client.login('user', 'pass')
self.assertEqual(typ, 'OK')
client.enable('UTF8=ACCEPT')
pass
@reap_threads
def test_enable_UTF8_True_append(self):
class UTF8AppendServer(self.UTF8Server):
def cmd_APPEND(self, tag, args):
self._send_textline('+')
self.server.response = yield
self._send_tagged(tag, 'OK', 'okay')
with self.reaped_pair(UTF8AppendServer) as (server, client):
self.assertEqual(client._encoding, 'ascii')
code, _ = client.authenticate('MYAUTH', lambda x: b'fake')
self.assertEqual(code, 'OK')
self.assertEqual(server.response,
b'ZmFrZQ==\r\n') # b64 encoded 'fake'
code, _ = client.enable('UTF8=ACCEPT')
self.assertEqual(code, 'OK')
self.assertEqual(client._encoding, 'utf-8')
msg_string = 'Subject: üñí©öðé'
typ, data = client.append(
None, None, None, msg_string.encode('utf-8'))
self.assertEqual(typ, 'OK')
self.assertEqual(
server.response,
('UTF8 (%s)\r\n' % msg_string).encode('utf-8')
)
# XXX also need a test that makes sure that the Literal and Untagged_status
# regexes uses unicode in UTF8 mode instead of the default ASCII.
@reap_threads
def test_search_disallows_charset_in_utf8_mode(self):
with self.reaped_pair(self.UTF8Server) as (server, client):
typ, _ = client.authenticate('MYAUTH', lambda x: b'fake')
self.assertEqual(typ, 'OK')
typ, _ = client.enable('UTF8=ACCEPT')
self.assertEqual(typ, 'OK')
self.assertTrue(client.utf8_enabled)
self.assertRaises(imaplib.IMAP4.error, client.search, 'foo', 'bar')
@reap_threads
def test_bad_auth_name(self):
class MyServer(SimpleIMAPHandler):
def cmd_AUTHENTICATE(self, tag, args):
self._send_tagged(tag, 'NO', 'unrecognized authentication '
'type {}'.format(args[0]))
with self.reaped_pair(MyServer) as (server, client):
with self.assertRaises(imaplib.IMAP4.error):
client.authenticate('METHOD', lambda: 1)
@reap_threads
def test_invalid_authentication(self):
class MyServer(SimpleIMAPHandler):
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.response = yield
self._send_tagged(tag, 'NO', '[AUTHENTICATIONFAILED] invalid')
with self.reaped_pair(MyServer) as (server, client):
with self.assertRaises(imaplib.IMAP4.error):
code, data = client.authenticate('MYAUTH', lambda x: b'fake')
@reap_threads
def test_valid_authentication(self):
class MyServer(SimpleIMAPHandler):
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.server.response = yield
self._send_tagged(tag, 'OK', 'FAKEAUTH successful')
with self.reaped_pair(MyServer) as (server, client):
code, data = client.authenticate('MYAUTH', lambda x: b'fake')
self.assertEqual(code, 'OK')
self.assertEqual(server.response,
b'ZmFrZQ==\r\n') # b64 encoded 'fake'
with self.reaped_pair(MyServer) as (server, client):
code, data = client.authenticate('MYAUTH', lambda x: 'fake')
self.assertEqual(code, 'OK')
self.assertEqual(server.response,
b'ZmFrZQ==\r\n') # b64 encoded 'fake'
@reap_threads
@requires_hashdigest('md5')
def test_login_cram_md5(self):
class AuthHandler(SimpleIMAPHandler):
capabilities = 'LOGINDISABLED AUTH=CRAM-MD5'
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+ PDE4OTYuNjk3MTcwOTUyQHBvc3RvZmZpY2Uucm'
'VzdG9uLm1jaS5uZXQ=')
r = yield
if (r == b'dGltIGYxY2E2YmU0NjRiOWVmYT'
b'FjY2E2ZmZkNmNmMmQ5ZjMy\r\n'):
self._send_tagged(tag, 'OK', 'CRAM-MD5 successful')
else:
self._send_tagged(tag, 'NO', 'No access')
with self.reaped_pair(AuthHandler) as (server, client):
self.assertTrue('AUTH=CRAM-MD5' in client.capabilities)
ret, data = client.login_cram_md5("tim", "tanstaaftanstaaf")
self.assertEqual(ret, "OK")
with self.reaped_pair(AuthHandler) as (server, client):
self.assertTrue('AUTH=CRAM-MD5' in client.capabilities)
ret, data = client.login_cram_md5("tim", b"tanstaaftanstaaf")
self.assertEqual(ret, "OK")
@reap_threads
def test_aborted_authentication(self):
class MyServer(SimpleIMAPHandler):
def cmd_AUTHENTICATE(self, tag, args):
self._send_textline('+')
self.response = yield
if self.response == b'*\r\n':
self._send_tagged(tag, 'NO', '[AUTHENTICATIONFAILED] aborted')
else:
self._send_tagged(tag, 'OK', 'MYAUTH successful')
with self.reaped_pair(MyServer) as (server, client):
with self.assertRaises(imaplib.IMAP4.error):
code, data = client.authenticate('MYAUTH', lambda x: None)
def test_linetoolong(self):
class TooLongHandler(SimpleIMAPHandler):
def handle(self):
# Send a very long response line
self.wfile.write(b'* OK ' + imaplib._MAXLINE * b'x' + b'\r\n')
with self.reaped_server(TooLongHandler) as server:
self.assertRaises(imaplib.IMAP4.error,
self.imap_class, *server.server_address)
@reap_threads
def test_simple_with_statement(self):
# simplest call
with self.reaped_server(SimpleIMAPHandler) as server:
with self.imap_class(*server.server_address):
pass
@reap_threads
def test_with_statement(self):
with self.reaped_server(SimpleIMAPHandler) as server:
with self.imap_class(*server.server_address) as imap:
imap.login('user', 'pass')
self.assertEqual(server.logged, 'user')
self.assertIsNone(server.logged)
@reap_threads
def test_with_statement_logout(self):
# what happens if already logout in the block?
with self.reaped_server(SimpleIMAPHandler) as server:
with self.imap_class(*server.server_address) as imap:
imap.login('user', 'pass')
self.assertEqual(server.logged, 'user')
imap.logout()
self.assertIsNone(server.logged)
self.assertIsNone(server.logged)
@unittest.skipUnless(ssl, "SSL not available")
class ThreadedNetworkedTestsSSL(ThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
@reap_threads
def test_ssl_verified(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_context.load_verify_locations(CAFILE)
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address,
ssl_context=ssl_context)
client.shutdown()
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class("localhost", server.server_address[1],
ssl_context=ssl_context)
client.shutdown()
@unittest.skipUnless(
support.is_resource_enabled('network'), 'network resource disabled')
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
port = 143
username = 'anonymous'
password = 'pass'
imap_class = imaplib.IMAP4
def setUp(self):
with transient_internet(self.host):
self.server = self.imap_class(self.host, self.port)
def tearDown(self):
if self.server is not None:
with transient_internet(self.host):
self.server.logout()
def test_logincapa(self):
with transient_internet(self.host):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertIn('LOGINDISABLED', self.server.capabilities)
self.assertIn('AUTH=ANONYMOUS', self.server.capabilities)
rs = self.server.login(self.username, self.password)
self.assertEqual(rs[0], 'OK')
def test_logout(self):
with transient_internet(self.host):
rs = self.server.logout()
self.server = None
self.assertEqual(rs[0], 'BYE', rs)
@unittest.skipUnless(ssl, "SSL not available")
@unittest.skipUnless(
support.is_resource_enabled('network'), 'network resource disabled')
class RemoteIMAP_STARTTLSTest(RemoteIMAPTest):
def setUp(self):
super().setUp()
with transient_internet(self.host):
rs = self.server.starttls()
self.assertEqual(rs[0], 'OK')
def test_logincapa(self):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertNotIn('LOGINDISABLED', self.server.capabilities)
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_SSLTest(RemoteIMAPTest):
port = 993
imap_class = IMAP4_SSL
def setUp(self):
pass
def tearDown(self):
pass
def create_ssl_context(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
ssl_context.load_cert_chain(CERTFILE)
return ssl_context
def check_logincapa(self, server):
try:
for cap in server.capabilities:
self.assertIsInstance(cap, str)
self.assertNotIn('LOGINDISABLED', server.capabilities)
self.assertIn('AUTH=PLAIN', server.capabilities)
rs = server.login(self.username, self.password)
self.assertEqual(rs[0], 'OK')
finally:
server.logout()
def test_logincapa(self):
with transient_internet(self.host):
_server = self.imap_class(self.host, self.port)
self.check_logincapa(_server)
def test_logout(self):
with transient_internet(self.host):
_server = self.imap_class(self.host, self.port)
rs = _server.logout()
self.assertEqual(rs[0], 'BYE', rs)
def test_ssl_context_certfile_exclusive(self):
with transient_internet(self.host):
self.assertRaises(
ValueError, self.imap_class, self.host, self.port,
certfile=CERTFILE, ssl_context=self.create_ssl_context())
def test_ssl_context_keyfile_exclusive(self):
with transient_internet(self.host):
self.assertRaises(
ValueError, self.imap_class, self.host, self.port,
keyfile=CERTFILE, ssl_context=self.create_ssl_context())
if __name__ == "__main__":
unittest.main()
| 39.01277 | 82 | 0.599673 |
7952780fb1c8fa0e91e8f7f062bc73f4d2c43130 | 6,932 | gyp | Python | ion/external/freeglut.gyp | RobLoach/ion | 9e659416fb04bb3d3a67df1e018d7c2ccab9d468 | [
"Apache-2.0"
] | null | null | null | ion/external/freeglut.gyp | RobLoach/ion | 9e659416fb04bb3d3a67df1e018d7c2ccab9d468 | [
"Apache-2.0"
] | null | null | null | ion/external/freeglut.gyp | RobLoach/ion | 9e659416fb04bb3d3a67df1e018d7c2ccab9d468 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file only gets included on Windows and Linux.
{
'includes' : [
'../common.gypi',
'external_common.gypi',
],
'variables' : {
'freeglut_rel_dir' : '../../third_party/freeglut/freeglut',
},
'targets' : [
{
'target_name' : 'freeglut',
'type': 'static_library',
'dependencies': [
'external.gyp:graphics',
],
'sources' : [
'<(freeglut_rel_dir)/include/GL/freeglut.h',
'<(freeglut_rel_dir)/include/GL/freeglut_ext.h',
'<(freeglut_rel_dir)/include/GL/freeglut_std.h',
'<(freeglut_rel_dir)/src/freeglut_callbacks.c',
'<(freeglut_rel_dir)/src/freeglut_cursor.c',
'<(freeglut_rel_dir)/src/freeglut_display.c',
'<(freeglut_rel_dir)/src/freeglut_ext.c',
'<(freeglut_rel_dir)/src/freeglut_font.c',
'<(freeglut_rel_dir)/src/freeglut_font_data.c',
'<(freeglut_rel_dir)/src/freeglut_gamemode.c',
'<(freeglut_rel_dir)/src/freeglut_geometry.c',
'<(freeglut_rel_dir)/src/freeglut_glutfont_definitions.c',
'<(freeglut_rel_dir)/src/freeglut_init.c',
'<(freeglut_rel_dir)/src/freeglut_input_devices.c',
'<(freeglut_rel_dir)/src/freeglut_joystick.c',
'<(freeglut_rel_dir)/src/freeglut_main.c',
'<(freeglut_rel_dir)/src/freeglut_menu.c',
'<(freeglut_rel_dir)/src/freeglut_misc.c',
'<(freeglut_rel_dir)/src/freeglut_overlay.c',
'<(freeglut_rel_dir)/src/freeglut_spaceball.c',
'<(freeglut_rel_dir)/src/freeglut_state.c',
'<(freeglut_rel_dir)/src/freeglut_stroke_mono_roman.c',
'<(freeglut_rel_dir)/src/freeglut_stroke_roman.c',
'<(freeglut_rel_dir)/src/freeglut_structure.c',
'<(freeglut_rel_dir)/src/freeglut_teapot.c',
'<(freeglut_rel_dir)/src/freeglut_videoresize.c',
'<(freeglut_rel_dir)/src/freeglut_window.c',
'<(freeglut_rel_dir)/src/freeglut_xinput.c',
],
'include_dirs' : [
'freeglut',
'<(freeglut_rel_dir)/include',
'<(freeglut_rel_dir)/src',
],
'all_dependent_settings' : {
'include_dirs' : [
'freeglut',
'<(freeglut_rel_dir)/include',
'<(freeglut_rel_dir)/src',
],
'defines': [
# This is necessary so that symbol visibility is correctly handled.
'FREEGLUT_STATIC',
],
}, # all_dependent_settings
'defines': [
'FREEGLUT_STATIC',
],
'defines!': [
# Freeglut copiously prints out every event when _DEBUG is defined, so
# undefine it.
'_DEBUG',
],
'conditions': [
['OS in ["linux", "mac"]', {
'cflags': [
'-Wno-int-to-pointer-cast',
'-Wno-pointer-to-int-cast',
],
'all_dependent_settings': {
'cflags_cc': [
'-Wno-mismatched-tags',
],
},
}],
['OS in ["mac", "ios"]', {
'xcode_settings': {
'OTHER_CFLAGS': [
'-Wno-conversion',
],
},
}], # mac or ios
['OS=="mac"', {
'include_dirs': [
'/usr/X11/include',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AGL.framework',
'$(SDKROOT)/System/Library/Frameworks/Cocoa.framework',
'$(SDKROOT)/System/Library/Frameworks/OpenGL.framework',
'/usr/X11/lib/libX11.dylib',
'/usr/X11/lib/libGL.dylib',
'/usr/X11/lib/libXext.dylib',
'/usr/X11/lib/libXxf86vm.dylib',
'/usr/X11/lib/libXrandr.dylib',
],
},
'defines': [
# This prevents freeglut from using its hacky font definitions on OSX.
'__CYGWIN__',
'HAVE_DLFCN_H',
'HAVE_ERRNO_H',
'HAVE_FCNTL_H',
'HAVE_GETTIMEOFDAY',
'HAVE_GL_GLU_H',
'HAVE_GL_GLX_H',
'HAVE_GL_GL_H',
'HAVE_INTTYPES_H',
'HAVE_LIMITS_H',
'HAVE_MEMORY_H',
'HAVE_STDINT_H',
'HAVE_STDLIB_H',
'HAVE_STRINGS_H',
'HAVE_STRING_H',
'HAVE_SYS_IOCTL_H',
'HAVE_SYS_PARAM_H',
'HAVE_SYS_STAT_H',
'HAVE_SYS_TIME_H',
'HAVE_SYS_TYPES_H',
'HAVE_UNISTD_H',
'HAVE_VFPRINTF',
'HAVE_VPRINTF',
'HAVE_X11_EXTENSIONS_XF86VMODE_H',
'HAVE_X11_EXTENSIONS_XINPUT_H',
'HAVE_X11_EXTENSIONS_XI_H',
'HAVE_X11_EXTENSIONS_XRANDR_H',
'STDC_HEADERS',
'TIME_WITH_SYS_TIME',
],
}],
['OS=="linux"', {
'defines' : [
# '_DEBUG',
# '_GNU_SOURCE',
'HAVE_DLFCN_H',
'HAVE_ERRNO_H',
'HAVE_FCNTL_H',
'HAVE_GETTIMEOFDAY',
'HAVE_GL_GLU_H',
'HAVE_GL_GLX_H',
'HAVE_GL_GL_H',
'HAVE_INTTYPES_H',
'HAVE_LIBXI',
'HAVE_LIMITS_H'
'HAVE_MEMORY_H',
'HAVE_STDINT_H',
'HAVE_STDINT_H',
'HAVE_STDLIB_H',
'HAVE_STRINGS_H',
'HAVE_STRING_H',
'HAVE_SYS_IOCTL_H',
'HAVE_SYS_PARAM_H',
'HAVE_SYS_STAT_H',
'HAVE_SYS_TIME_H',
'HAVE_SYS_TYPES_H',
'HAVE_UNISTD_H',
'HAVE_VFPRINTF',
'HAVE_VPRINTF',
'STDC_HEADERS',
],
'link_settings': {
'libraries': [
'-lrt', # For clock_gettime.
],
}, # link_settings
}],
['OS=="windows"', {
'link_settings': {
'libraries': [
'-lWinmm', # For time stuff.
'-lAdvapi32', # For registry stuff.
],
}, # link_settings
'defines': [
'FREEGLUT_LIB_PRAGMAS=0',
],
'all_dependent_settings': {
'defines': [
'FREEGLUT_LIB_PRAGMAS=0',
],
}, # all_dependent_settings
'defines!': [
'NOGDI',
],
}],
],
},
],
}
| 32.24186 | 82 | 0.525822 |
79527a747d0bdc574a6ee0bf2d481636d25d00cb | 11,233 | py | Python | topi/python/topi/x86/conv2d_int8.py | zhanghaohit/incubator-tvm | ee0af843f3c5a3429e888079afb5f30789bd9bee | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 2 | 2019-01-07T06:00:27.000Z | 2019-02-28T15:07:16.000Z | topi/python/topi/x86/conv2d_int8.py | zhanghaohit/incubator-tvm | ee0af843f3c5a3429e888079afb5f30789bd9bee | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 4 | 2021-03-30T11:59:59.000Z | 2022-03-12T00:40:23.000Z | topi/python/topi/x86/conv2d_int8.py | zhanghaohit/incubator-tvm | ee0af843f3c5a3429e888079afb5f30789bd9bee | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 3 | 2021-07-20T07:40:15.000Z | 2021-08-03T08:39:17.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D int8 schedule on x86"""
import re
import tvm
from tvm import autotvm
from tvm.autotvm.task import get_config
from tvm.autotvm.task.topi_integration import deserialize_args
from ..nn.conv2d import _get_workload as _get_conv2d_workload
from .. import generic, tag
from ..generic import conv2d as conv2d_generic
from ..nn.util import get_pad_tuple
from ..util import get_const_tuple
from ..nn.conv2d import conv2d_NCHWc_int8
from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload
from .. import nn
from . import conv2d_avx_1x1, conv2d_avx_common
def _get_default_config_int8(cfg, data, kernel, strides, padding, out_dtype, is_depthwise=False,
layout='NCHW'):
"""
Get default schedule config for the workload
"""
if is_depthwise:
# Fallback to FP32 default config until a VNNI schedule is defined.
wkl = _get_depthwise_conv2d_workload(data, kernel, strides, padding, out_dtype)
from .depthwise_conv2d import _fallback_schedule
_fallback_schedule(cfg, wkl)
else:
wkl = _get_conv2d_workload(data, kernel, strides, padding, out_dtype, layout)
is_kernel_1x1 = wkl.hkernel == 1 and wkl.wkernel == 1
if is_kernel_1x1:
conv2d_generic.fallback_schedule_cpu_1x1_int8(
cfg, wkl, int32_lanes=16, num_int8_elements=4)
else:
conv2d_generic.fallback_schedule_cpu_common_int8(
cfg, wkl, int32_lanes=16, num_int8_elements=4)
def _is_int8_hw_support(data_dtype, kernel_dtype):
"""
Checks to ensure that we can use Intel DLBoost instructions
1) The datatypes are correct.
2) LLVM version has support for the instructions.
3) Target is skylake and above.
"""
# 1) Check datatypes
is_dtype_support = data_dtype == 'uint8' and kernel_dtype == 'int8'
# 2) Check LLVM support
llvm_version = tvm.codegen.llvm_version_major()
is_llvm_support = llvm_version >= 8
# 3) Check target
mcpu = tvm.target.current_target().mcpu
is_target_support = False
if mcpu == 'skylake-avx512' or mcpu == 'cascadelake':
is_target_support = True
return is_dtype_support and is_llvm_support and is_target_support
def _create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, layout):
"""Create schedule configuration from input arguments"""
dshape = get_const_tuple(data.shape)
kshape = get_const_tuple(kernel.shape)
pat = re.compile(r'NCHW.+(\d+)c')
if layout == 'NCHW':
n, ic, h, w = dshape
oc, _, kh, kw = kshape
elif layout == 'NHWC':
n, h, w, ic = dshape
kh, kw, oc, _ = kshape
elif pat.match(layout) is not None:
n, ic_chunk, h, w, ic_bn = dshape
target = tvm.target.current_target(allow_none=False)
oc_chunk, k_ic, kh, kw, k_ic_f, oc_bn, k_ic_s = kshape
ic = ic_chunk * ic_bn
assert ic == k_ic * k_ic_f * k_ic_s
oc = oc_chunk*oc_bn
else:
raise ValueError("Not support this layout {} with "
"schedule template.".format(layout))
is_kernel_1x1 = kh == 1 and kw == 1
pt, pl, pb, pr = get_pad_tuple(padding, kernel)
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
oh = (h - kh + pt + pb) // sh + 1
ow = (w - kw + pl + pr) // sw + 1
# Create schedule config
cfg.define_split('tile_ic', ic, num_outputs=2, filter=lambda y: y.size[-1] % 4 == 0)
cfg.define_split('tile_oc', oc, num_outputs=2, filter=lambda y: y.size[-1] % 16 == 0)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
if is_kernel_1x1:
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
# Define template function for autotvm task
# We define schedule template in this function instead of
# declaration function since actual input arguments need
# to be altered by the schedule selected.
@autotvm.task.register("topi_x86_conv2d_NCHWc_int8")
def _topi_nn_conv2d_NCHWc_int8(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
if len(args) == 7:
data, kernel, strides, padding, dilation, origin_layout, dtype = args
else:
assert len(args) == 8
data, kernel, strides, padding, dilation, origin_layout, out_layout, dtype = args
raw_data_shape = get_const_tuple(data.shape)
raw_kernel_shape = get_const_tuple(kernel.shape)
# get config here
cfg = get_config()
_create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, origin_layout)
# change shape with the value in config
ic_bn, oc_bn, ow_bn = (cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1])
data_layout = "NCHW%dc" % ic_bn
out_layout = "NCHW%dc" % oc_bn
# Set up the new shape for data and kernel
new_data_shape = (raw_data_shape[0], raw_data_shape[1] // ic_bn,
raw_data_shape[2], raw_data_shape[3], ic_bn)
n_elems = 4
new_kernel_shape = (raw_kernel_shape[0] // oc_bn,
raw_kernel_shape[1] // ic_bn,
raw_kernel_shape[2],
raw_kernel_shape[3],
ic_bn // n_elems,
oc_bn,
n_elems)
new_data = tvm.placeholder(new_data_shape, data.dtype)
new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype)
C = _declaration_conv_NCHWc_int8(cfg, new_data, new_kernel, strides, padding, dilation,
data_layout, out_layout, dtype)
s = _schedule_conv2d_NCHWc_int8(cfg, [C])
return s, [new_data, new_kernel, C]
@autotvm.register_topi_compute(conv2d_NCHWc_int8, 'cpu', 'direct')
def _declaration_conv_NCHWc_int8(cfg, data, kernel, strides,
padding, dilation, layout, out_layout, out_dtype):
return nn.conv2d_NCHWc_int8_compute(data,
kernel,
strides,
padding,
dilation,
layout,
out_layout,
out_dtype)
@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc_int8, 'cpu', ['direct'])
def _schedule_conv2d_NCHWc_int8(cfg, outs):
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d_NCHWc_int8' in op.tag:
conv_out = op.output(0)
kernel = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0] \
if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \
else data_vec
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, conv_out, outs[0]]
target = tvm.target.current_target(allow_none=False)
# int8 conv kernel is 7-dim
_, _, kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_NCHWc_int8(*args)
else:
conv2d_avx_common._schedule_conv_NCHWc_int8(*args)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
@autotvm.register_topi_schedule(generic.schedule_conv2d_nhwc_pack, 'cpu', ['direct'])
def schedule_conv2d_nhwc_pack(cfg, outs):
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 4: # schedule bias + bn + relu
n, h, w, c = op.axis
fused = s[op].fuse(n, h, w)
s[op].parallel(fused)
s[op].vectorize(c)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d_nhwc_pack_int8' in op.tag:
conv_out = op.output(0)
kernel = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0] \
if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \
else data_vec
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, conv_out, outs[0]]
if data.dtype == 'uint8':
kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_nhwc_pack_int8(*args)
else:
raise ValueError("Only support 1x1 kernel with "
"schedule_conv2d_nhwc_pack.")
else:
raise ValueError("Not support this data type {} with "
"schedule_conv2d_nhwc_pack. Only support int8".format(data.dtype))
scheduled_ops.append(op)
traverse(output_op)
return s
| 41.603704 | 99 | 0.62165 |
79527bfc6710346163d034728c64bb7abd8fef2b | 1,056 | py | Python | 06.py | rubenhortas/python_challenge | b20041049421c1a28357065c91c84fd101c8ec78 | [
"MIT"
] | null | null | null | 06.py | rubenhortas/python_challenge | b20041049421c1a28357065c91c84fd101c8ec78 | [
"MIT"
] | null | null | null | 06.py | rubenhortas/python_challenge | b20041049421c1a28357065c91c84fd101c8ec78 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# http://www.pythonchallenge.com/pc/def/channel.html
import signal
from zipfile import ZipFile
from handlers.python import exit_signal_handler
# noinspection PyShadowingNames
def get_next_file(i, zf, file_name, result):
is_last = True
try:
file_words = zf.read(file_name).split()
info = zf.getinfo(file_name).comment
result = f"{result}{info.decode('utf-8')}"
for word in file_words:
if word.isdigit():
is_last = False
next_file = f"{word.decode('utf-8')}.txt"
get_next_file(i + 1, zf, next_file, result)
if is_last:
print(result)
except Exception as e:
raise e
if __name__ == '__main__':
signal.signal(signal.SIGINT, exit_signal_handler)
result = ""
# Download the zip file from http://www.pythonchallenge.com/pc/def/channel.zip
try:
with ZipFile('channel.zip') as zf:
get_next_file(0, zf, 'readme.txt', result)
except Exception as e:
print(e)
| 24.55814 | 82 | 0.619318 |
79527cad6b50203de24bb9d863b8b7772c6881d9 | 588 | py | Python | tiny_app/migrations/0015_auto_20160125_1130.py | DJMedhaug/tiny_spot | ddae45bb6bbb62c4e5f9f0baf9c3c82b0bd3cb99 | [
"MIT"
] | null | null | null | tiny_app/migrations/0015_auto_20160125_1130.py | DJMedhaug/tiny_spot | ddae45bb6bbb62c4e5f9f0baf9c3c82b0bd3cb99 | [
"MIT"
] | null | null | null | tiny_app/migrations/0015_auto_20160125_1130.py | DJMedhaug/tiny_spot | ddae45bb6bbb62c4e5f9f0baf9c3c82b0bd3cb99 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tiny_app', '0014_auto_20160125_1115'),
]
operations = [
migrations.RemoveField(
model_name='images',
name='post',
),
migrations.RemoveField(
model_name='post',
name='user',
),
migrations.DeleteModel(
name='Images',
),
migrations.DeleteModel(
name='Post',
),
]
| 20.275862 | 48 | 0.534014 |
79527d4d34dca82441d22ab61590c529661f0521 | 2,137 | py | Python | tests/functional/tasks/test_task310.py | SergeyNazarovSam/SergeyPythonfirst | fd2bddf1f5ba28c6802be921177917f369f7ef2e | [
"MIT"
] | 2 | 2020-12-17T20:19:21.000Z | 2020-12-22T12:46:43.000Z | tests/functional/tasks/test_task310.py | alexander-sidorov/tms-z43 | 61ecd204f5de4e97ff0300f6ef91c36c2bcda31c | [
"MIT"
] | 4 | 2021-04-20T08:40:30.000Z | 2022-02-10T07:50:30.000Z | tests/functional/tasks/test_task310.py | SergeyNazarovSam/SergeyPythonfirst | fd2bddf1f5ba28c6802be921177917f369f7ef2e | [
"MIT"
] | 1 | 2021-02-10T06:42:19.000Z | 2021-02-10T06:42:19.000Z | import pytest
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from tests.functional.pages import Task310Page
from tests.functional.utils import screenshot_on_failure
@pytest.fixture(scope="session")
def task_url(service_url) -> str:
result = f"{service_url}/tasks/310/"
yield result
@pytest.mark.functional
@screenshot_on_failure
def test(browser, request, task_url):
page = Task310Page(browser, task_url)
assert page.heading.tag_name == "h1"
assert page.heading.text == "Задание 3.10"
assert page.money.tag_name == "input"
assert not page.money.text
assert page.submit.tag_name == "button"
with pytest.raises(NoSuchElementException):
assert page.result1.tag_name
with pytest.raises(NoSuchElementException):
assert page.result2.tag_name
verify_result(page, task_url, "")
verify_result(
page,
task_url,
"1",
"1 рубль",
"1 рубль \N{FIRST PLACE MEDAL} × 1",
)
verify_result(
page,
task_url,
"11",
"11 рублей",
(
"10 рублей \N{BANKNOTE WITH DOLLAR SIGN} × 1\n"
"1 рубль \N{FIRST PLACE MEDAL} × 1"
),
)
def verify_result(
page: Task310Page,
task_url: str,
money: str,
result1: str = "",
result2: str = "",
) -> None:
page.money.clear()
if money:
page.money.send_keys(money)
page.submit.click()
WebDriverWait(page.browser, timeout=4).until(
EC.url_to_be(task_url),
f"no page reload",
)
if not money:
with pytest.raises(NoSuchElementException):
assert page.result1.tag_name
with pytest.raises(NoSuchElementException):
assert page.result2.tag_name
return
assert page.result1.tag_name == "span"
assert page.result1.text == result1, f"result1 mismatch for: {money!r}"
assert page.result2.tag_name == "pre"
assert page.result2.text == result2, f"result2 mismatch for: {money!r}"
| 27.050633 | 75 | 0.65372 |
79527d9d557be492f7c58aa482433913fb074586 | 3,600 | py | Python | homeassistant/components/withings/const.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 3 | 2020-01-21T18:09:09.000Z | 2022-01-17T08:06:03.000Z | homeassistant/components/withings/const.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 39 | 2016-12-16T12:40:34.000Z | 2017-02-13T17:53:42.000Z | homeassistant/components/withings/const.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 4 | 2019-10-15T21:03:53.000Z | 2020-05-27T19:53:20.000Z | """Constants used by the Withings component."""
import homeassistant.const as const
DATA_MANAGER = "data_manager"
BASE_URL = "base_url"
CLIENT_ID = "client_id"
CLIENT_SECRET = "client_secret"
CODE = "code"
CONFIG = "config"
CREDENTIALS = "credentials"
DOMAIN = "withings"
LOG_NAMESPACE = "homeassistant.components.withings"
MEASURES = "measures"
PROFILE = "profile"
PROFILES = "profiles"
AUTH_CALLBACK_PATH = "/api/withings/authorize"
AUTH_CALLBACK_NAME = "withings:authorize"
THROTTLE_INTERVAL = 60
STATE_UNKNOWN = const.STATE_UNKNOWN
STATE_AWAKE = "awake"
STATE_DEEP = "deep"
STATE_LIGHT = "light"
STATE_REM = "rem"
MEASURE_TYPE_BODY_TEMP = 71
MEASURE_TYPE_BONE_MASS = 88
MEASURE_TYPE_DIASTOLIC_BP = 9
MEASURE_TYPE_FAT_MASS = 8
MEASURE_TYPE_FAT_MASS_FREE = 5
MEASURE_TYPE_FAT_RATIO = 6
MEASURE_TYPE_HEART_PULSE = 11
MEASURE_TYPE_HEIGHT = 4
MEASURE_TYPE_HYDRATION = 77
MEASURE_TYPE_MUSCLE_MASS = 76
MEASURE_TYPE_PWV = 91
MEASURE_TYPE_SKIN_TEMP = 73
MEASURE_TYPE_SLEEP_DEEP_DURATION = "deepsleepduration"
MEASURE_TYPE_SLEEP_HEART_RATE_AVERAGE = "hr_average"
MEASURE_TYPE_SLEEP_HEART_RATE_MAX = "hr_max"
MEASURE_TYPE_SLEEP_HEART_RATE_MIN = "hr_min"
MEASURE_TYPE_SLEEP_LIGHT_DURATION = "lightsleepduration"
MEASURE_TYPE_SLEEP_REM_DURATION = "remsleepduration"
MEASURE_TYPE_SLEEP_RESPIRATORY_RATE_AVERAGE = "rr_average"
MEASURE_TYPE_SLEEP_RESPIRATORY_RATE_MAX = "rr_max"
MEASURE_TYPE_SLEEP_RESPIRATORY_RATE_MIN = "rr_min"
MEASURE_TYPE_SLEEP_STATE_AWAKE = 0
MEASURE_TYPE_SLEEP_STATE_DEEP = 2
MEASURE_TYPE_SLEEP_STATE_LIGHT = 1
MEASURE_TYPE_SLEEP_STATE_REM = 3
MEASURE_TYPE_SLEEP_TOSLEEP_DURATION = "durationtosleep"
MEASURE_TYPE_SLEEP_TOWAKEUP_DURATION = "durationtowakeup"
MEASURE_TYPE_SLEEP_WAKEUP_DURATION = "wakeupduration"
MEASURE_TYPE_SLEEP_WAKUP_COUNT = "wakeupcount"
MEASURE_TYPE_SPO2 = 54
MEASURE_TYPE_SYSTOLIC_BP = 10
MEASURE_TYPE_TEMP = 12
MEASURE_TYPE_WEIGHT = 1
MEAS_BODY_TEMP_C = "body_temperature_c"
MEAS_BONE_MASS_KG = "bone_mass_kg"
MEAS_DIASTOLIC_MMHG = "diastolic_blood_pressure_mmhg"
MEAS_FAT_FREE_MASS_KG = "fat_free_mass_kg"
MEAS_FAT_MASS_KG = "fat_mass_kg"
MEAS_FAT_RATIO_PCT = "fat_ratio_pct"
MEAS_HEART_PULSE_BPM = "heart_pulse_bpm"
MEAS_HEIGHT_M = "height_m"
MEAS_HYDRATION = "hydration"
MEAS_MUSCLE_MASS_KG = "muscle_mass_kg"
MEAS_PWV = "pulse_wave_velocity"
MEAS_SKIN_TEMP_C = "skin_temperature_c"
MEAS_SLEEP_DEEP_DURATION_SECONDS = "sleep_deep_duration_seconds"
MEAS_SLEEP_HEART_RATE_AVERAGE = "sleep_heart_rate_average_bpm"
MEAS_SLEEP_HEART_RATE_MAX = "sleep_heart_rate_max_bpm"
MEAS_SLEEP_HEART_RATE_MIN = "sleep_heart_rate_min_bpm"
MEAS_SLEEP_LIGHT_DURATION_SECONDS = "sleep_light_duration_seconds"
MEAS_SLEEP_REM_DURATION_SECONDS = "sleep_rem_duration_seconds"
MEAS_SLEEP_RESPIRATORY_RATE_AVERAGE = "sleep_respiratory_average_bpm"
MEAS_SLEEP_RESPIRATORY_RATE_MAX = "sleep_respiratory_max_bpm"
MEAS_SLEEP_RESPIRATORY_RATE_MIN = "sleep_respiratory_min_bpm"
MEAS_SLEEP_STATE = "sleep_state"
MEAS_SLEEP_TOSLEEP_DURATION_SECONDS = "sleep_tosleep_duration_seconds"
MEAS_SLEEP_TOWAKEUP_DURATION_SECONDS = "sleep_towakeup_duration_seconds"
MEAS_SLEEP_WAKEUP_COUNT = "sleep_wakeup_count"
MEAS_SLEEP_WAKEUP_DURATION_SECONDS = "sleep_wakeup_duration_seconds"
MEAS_SPO2_PCT = "spo2_pct"
MEAS_SYSTOLIC_MMGH = "systolic_blood_pressure_mmhg"
MEAS_TEMP_C = "temperature_c"
MEAS_WEIGHT_KG = "weight_kg"
UOM_BEATS_PER_MINUTE = "bpm"
UOM_BREATHS_PER_MINUTE = "br/m"
UOM_FREQUENCY = "times"
UOM_METERS_PER_SECOND = "m/s"
UOM_MMHG = "mmhg"
UOM_PERCENT = "%"
UOM_LENGTH_M = const.LENGTH_METERS
UOM_MASS_KG = const.MASS_KILOGRAMS
UOM_SECONDS = "seconds"
UOM_TEMP_C = const.TEMP_CELSIUS
| 34.615385 | 72 | 0.848056 |
79527dd1fda6a95036b82bd332fd9c2d37965b59 | 2,440 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/express_route_service_provider.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure/mgmt/network/v2017_10_01/models/express_route_service_provider.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure/mgmt/network/v2017_10_01/models/express_route_service_provider.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ExpressRouteServiceProvider(Resource):
"""A ExpressRouteResourceProvider object.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param peering_locations: Get a list of peering locations.
:type peering_locations: list[str]
:param bandwidths_offered: Gets bandwidths offered.
:type bandwidths_offered:
list[~azure.mgmt.network.v2017_10_01.models.ExpressRouteServiceProviderBandwidthsOffered]
:param provisioning_state: Gets the provisioning state of the resource.
:type provisioning_state: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'peering_locations': {'key': 'properties.peeringLocations', 'type': '[str]'},
'bandwidths_offered': {'key': 'properties.bandwidthsOffered', 'type': '[ExpressRouteServiceProviderBandwidthsOffered]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, peering_locations=None, bandwidths_offered=None, provisioning_state=None):
super(ExpressRouteServiceProvider, self).__init__(id=id, location=location, tags=tags)
self.peering_locations = peering_locations
self.bandwidths_offered = bandwidths_offered
self.provisioning_state = provisioning_state
| 40 | 132 | 0.636885 |
79527e1f83321c6621bf1f3bc8cda56c294dbb64 | 2,571 | py | Python | pysofe/spaces/functions.py | pysofe/pysofe | 088d4061fcf194a85ff3332e7bdd3bde095e4f69 | [
"BSD-3-Clause"
] | null | null | null | pysofe/spaces/functions.py | pysofe/pysofe | 088d4061fcf194a85ff3332e7bdd3bde095e4f69 | [
"BSD-3-Clause"
] | null | null | null | pysofe/spaces/functions.py | pysofe/pysofe | 088d4061fcf194a85ff3332e7bdd3bde095e4f69 | [
"BSD-3-Clause"
] | null | null | null | """
Provides convinience classes for functions in the fem framework.
"""
# IMPORTS
import numpy as np
# DEBUGGING
from IPython import embed as IPS
class FEFunction(object):
'''
A finite element function defined via degrees of freedoms.
Parameters
----------
fe_space : pysofe.spaces.space.FESpace
The function space
dof_values : array_like
Values for the degrees of freedom of the function space
'''
def __init__(self, fe_space, dof_values):
if not isinstance(dof_values, np.ndarray):
dof_values = np.asarray(dof_values)
assert dof_values.ndim == 1
if not np.size(dof_values) == fe_space.n_dof:
raise ValueError("fe space and dof values don't match!")
self.fe_space = fe_space
self.dofs = dof_values
@property
def order(self):
'''
The polynomial order of the function.
'''
return self.fe_space.element.order
def __call__(self, points, deriv=0):
return self._evaluate(points, deriv)
def _evaluate(self, points, deriv=0):
'''
Evaluates the function or its derivatives at given points.
Parameters
----------
points : array_like
The local points at the reference domain
deriv : int
The derivation order
'''
# determine for which entities to evaluate the function
dim = np.size(points, axis=0)
# check input
if dim < self.fe_space.mesh.dimension and deriv > 0:
raise NotImplementedError('Higher order derivatives for traces not supported!')
# get dof map and adjust values
dof_map = self.fe_space.get_dof_map(d=dim)
values = self.dofs.take(indices=dof_map-1, axis=0)
# evaluate basis functions (or derivatives)
if deriv == 0:
# values : nB x nE
basis = self.fe_space.element.eval_basis(points, deriv) # nB x nP
#U = np.dot(values.T, basis) # nE x nP
U = (values[:,:,None] * basis[:,None,:]).sum(axis=0) # nE x nP
elif deriv == 1:
# values : nB x nE
dbasis_global = self.fe_space.eval_global_derivatives(points) # nE x nB x nP x nD
U = (values.T[:,:,None,None] * dbasis_global).sum(axis=1) # nE x nP x nD
else:
raise NotImplementedError('Invalid derivation order ({})'.format(d))
return U
| 28.88764 | 94 | 0.572929 |
79527ef3b672bf80a0fe21b3c993d803710a4eda | 4,478 | py | Python | helper.py | jeffhsu3/BERMUDA | 1e1e031b7dc59bca98a494729f151905bf05e13b | [
"MIT"
] | 31 | 2019-05-19T04:11:29.000Z | 2021-07-04T13:27:34.000Z | helper.py | jeffhsu3/BERMUDA | 1e1e031b7dc59bca98a494729f151905bf05e13b | [
"MIT"
] | 10 | 2019-05-23T12:18:04.000Z | 2021-08-02T07:19:56.000Z | helper.py | jeffhsu3/BERMUDA | 1e1e031b7dc59bca98a494729f151905bf05e13b | [
"MIT"
] | 8 | 2019-08-14T06:38:58.000Z | 2021-05-13T01:11:52.000Z | # !/usr/bin/env python
import numpy as np
from sklearn.decomposition import PCA
import umap
import matplotlib.pyplot as plt
# 9-class Set1, for plotting data with qualitative labels
color_dict = {0:'#e41a1c', 1:'#377eb8', 2:'#4daf4a', 3: '#984ea3', 4:'#ff7f00',
5:'#ffff33', 6:'#a65628', 7:'#f781bf', 8:'#999999'}
def cal_UMAP(code, pca_dim = 50, n_neighbors = 30, min_dist=0.1, n_components=2, metric='cosine'):
""" Calculate UMAP dimensionality reduction
Args:
code: num_cells * num_features
pca_dim: if dimensionality of code > pca_dim, apply PCA first
n_neighbors: UMAP parameter
min_dist: UMAP parameter
n_components: UMAP parameter
metric: UMAP parameter
Returns:
umap_code: num_cells * n_components
"""
if code.shape[1] > pca_dim:
pca = PCA(n_components=pca_dim)
code = pca.fit_transform(code)
fit = umap.UMAP(n_neighbors=n_neighbors,
min_dist=min_dist,
n_components=n_components,
metric=metric,
random_state=0)
umap_code = fit.fit_transform(code)
return umap_code
def plot_labels(coor_code, labels, label_dict, axis_name, save_path):
''' Plot cells with qualitative labels
Args:
coor_code: num_cells * 2 matrix for visualization
labels: labels in integer
label_dict: dictionary converting integer to labels names
axis_name: list of two, names of x and y axis
save_path: path to save the plot
Returns:
'''
fig, ax = plt.subplots(figsize=(8, 5))
unique_labels = np.unique(labels)
unique_labels.sort()
for i in range(len(unique_labels)):
g = unique_labels[i]
ix = np.where(labels == g)
ax.scatter(coor_code[ix, 0], coor_code[ix, 1], s=1, c=color_dict[i % len(color_dict)], label=label_dict[g], alpha=0.2)
ax.set_xlabel(axis_name[0])
ax.set_ylabel(axis_name[1])
ax.legend(bbox_to_anchor=(0,1.04,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=10,
prop={'size': 6})
fig.savefig(save_path, dpi=300)
plt.close(fig)
def plot_expr(coor_code, vals, axis_name, save_path):
''' Plot cells with continuous expression levels
Args:
coor_code: num_cells * 2 matrix for visualization
vals: expression values
axis_name: list of two, names of x and y axis
save_path: path to save the plot
Returns:
'''
fig, ax = plt.subplots(figsize=(8, 5))
ax.set_xlabel(axis_name[0])
ax.set_ylabel(axis_name[1])
# random permutate to solve covering issue of datasets in the visualization
tmp = np.argsort(vals)
coor_code = coor_code[tmp,:]
vals = vals[tmp]
g = ax.scatter(coor_code[:, 0], coor_code[:, 1], s=1, c=vals, cmap='viridis',alpha=0.2)
g.set_facecolor('none')
fig.colorbar(g)
fig.savefig(save_path, dpi=300)
plt.close(fig)
def plot_loss(loss_total_list, loss_reconstruct_list, loss_transfer_list, save_path):
''' Plot loss versus epochs
Args:
loss_total_list: list of total loss
loss_reconstruct_list: list of reconstruction loss
loss_transfer_list: list of transfer loss
save_path: path to save the plot
Returns:
'''
fig, ax = plt.subplots(2, 1, figsize=(8, 8))
ax[0].plot(range(len(loss_total_list)), loss_total_list, "r:",linewidth=1)
ax[0].legend(['total_loss'])
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Loss")
ax[1].plot(range(len(loss_reconstruct_list)), loss_reconstruct_list, "b--",linewidth=1)
ax[1].plot(range(len(loss_transfer_list)), loss_transfer_list, "g-",linewidth=1)
ax[1].legend(['loss_reconstruct', 'loss_transfer'])
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
fig.savefig(save_path, dpi=300)
plt.close(fig)
def gen_dataset_idx(code_list):
''' Generate dataset labels from code list
Args:
code_list: list of length num_datasets, each with a matrix of num_features * num_cells
Returns:
dataset_idx: dataset labels, np array with size(num_total_cells, )
'''
total_cells = 0
for code in code_list:
total_cells += code.shape[1]
dataset_labels = np.zeros(total_cells, dtype=int)
idx = 0
for i in range(len(code_list)):
dataset_labels[idx:idx + code_list[i].shape[1]] = i+1
idx += code_list[i].shape[1]
return dataset_labels | 36.112903 | 126 | 0.649397 |
79527f7a130817184181220d09ca4a2dbc6f99e2 | 4,498 | py | Python | python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-09-24T11:23:36.000Z | 2021-09-24T11:23:36.000Z | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
def set_training(self):
self.is_training = True
self.epoch = 100
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]).astype('float32')
self.feed_fp32 = {"image": data.astype(np.float32)}
self.feed_fp16 = {"image": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
self.attrs = {
"optimizer": 'lamb',
"weight_decay": 0.0,
"scaled_optimizer_state": True
}
@IPUOpTest.static_graph
def build_model(self):
image = paddle.static.data(name='image',
shape=[1, 3, 10, 10],
dtype='float32')
conv1 = paddle.static.nn.conv2d(image,
num_filters=3,
filter_size=3,
bias_attr=False)
loss = paddle.mean(conv1)
weight_decay = self.attrs['weight_decay']
opt = paddle.optimizer.Adam(learning_rate=1e-1,
weight_decay=weight_decay)
if self.attrs['optimizer'] == 'lamb':
opt = paddle.optimizer.Lamb(learning_rate=1e-1,
lamb_weight_decay=weight_decay)
opt.minimize(loss)
self.feed_list = [image.name]
self.fetch_list = [loss.name]
def run_model(self, exec_mode):
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if self.is_ipu_mode(exec_mode):
if "use_no_bias_optimizer" in self.attrs.keys():
ipu_strategy.set_options({
"use_no_bias_optimizer":
self.attrs["use_no_bias_optimizer"]
})
if "scaled_optimizer_state" in self.attrs.keys():
ipu_strategy.set_options({
"scaled_optimizer_state":
self.attrs["scaled_optimizer_state"]
})
self.run_op_test(exec_mode, ipu_strategy=ipu_strategy)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestScaledAdam(TestBase):
def set_attrs(self):
self.attrs = {
"optimizer": 'adam',
"weight_decay": 0.0,
"scaled_optimizer_state": True
}
def set_atol(self):
super().set_atol()
self.atol = 1e-5
self.rtol = 1e-5
@unittest.skip('cpu do not support AdamNoBias')
class TestScaledAdamNoBias(TestBase):
def set_attrs(self):
self.attrs = {
"optimizer": 'adam',
"weight_decay": 0.0,
"use_no_bias_optimizer": True,
"scaled_optimizer_state": True
}
@unittest.skip('cpu do not support LambNoBias')
class TestScaledLambNoBias(TestBase):
def set_attrs(self):
self.attrs = {
"optimizer": 'lamb',
"weight_decay": 0.0,
"use_no_bias_optimizer": True,
"scaled_optimizer_state": True
}
if __name__ == "__main__":
unittest.main()
| 32.128571 | 74 | 0.585371 |
79527f9a77c3dee4d48d99abfb67fd634c5d2bc5 | 1,226 | py | Python | class3/exercise4/exercise4.py | papri-entropy/nornir-course | 122c5ecce19cca6c17a1eec0066be7c6b58e6eb5 | [
"MIT"
] | 1 | 2020-06-23T06:36:43.000Z | 2020-06-23T06:36:43.000Z | class3/exercise4/exercise4.py | papri-entropy/nornir-course | 122c5ecce19cca6c17a1eec0066be7c6b58e6eb5 | [
"MIT"
] | null | null | null | class3/exercise4/exercise4.py | papri-entropy/nornir-course | 122c5ecce19cca6c17a1eec0066be7c6b58e6eb5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# import general use modules
import os
from pprint import pprint as pp
# import nornir specifics
from nornir import InitNornir
from nornir.plugins.functions.text import print_result
from nornir.plugins.tasks.networking import netmiko_send_command
from nornir.core.filter import F
def main():
nr = InitNornir(config_file="config.yaml")
hosts = nr.inventory.hosts
#print(hosts)
eos_group = nr.filter(F(groups__contains="eos"))
eos_hosts = eos_group.inventory.hosts
#print(eos_hosts)
int_status = eos_group.run(task=netmiko_send_command, command_string="show interface status", use_textfsm=True)
#print(int_status['arista4'][0].result)
d = {}
for arista in eos_hosts:
#print(arista)
d[arista] = {}
for element in int_status[arista][0].result:
interface = element['port']
print(interface)
status = element['status']
print(status)
vlan = element['vlan']
print(vlan)
d[arista][interface] = {}
d[arista][interface]['status'] = status
d[arista][interface]['vlan'] = vlan
pp(d)
if __name__=="__main__":
main()
| 27.863636 | 115 | 0.639478 |
7952801d0cbae4b3b05bd9f69fbda5acd7d86462 | 475 | py | Python | modulo 02/multiprocess/multi_simple.py | p-g-krish/CursoSecurityToolsPython | 7b2205a33d23166a37a6b8105b9ca5863855aa85 | [
"Apache-2.0"
] | 10 | 2020-02-13T03:14:29.000Z | 2021-09-16T04:32:40.000Z | modulo 02/multiprocess/multi_simple.py | p-g-krish/CursoSecurityToolsPython | 7b2205a33d23166a37a6b8105b9ca5863855aa85 | [
"Apache-2.0"
] | null | null | null | modulo 02/multiprocess/multi_simple.py | p-g-krish/CursoSecurityToolsPython | 7b2205a33d23166a37a6b8105b9ca5863855aa85 | [
"Apache-2.0"
] | 4 | 2020-02-18T23:42:23.000Z | 2021-09-10T05:52:09.000Z | from multiprocessing import Process
import os
def info(title):
print(title)
print('module name:', __name__)
if hasattr(os, 'getppid'): # only available on Unix
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f(name):
info('function f')
print('hello', name)
if __name__ == '__main__':
info('main line')
p = Process(target=f, args=('bob',))
p.start()
p.join()
print("exit") | 23.75 | 57 | 0.585263 |
7952809d505ad324fe3145d8a6a6cf19f1eb26da | 8,050 | py | Python | tests/test_models.py | deluge/django-pushify | 5c00f606f112cbf4c3647e6d9064d2b1860a7d49 | [
"BSD-3-Clause"
] | 3 | 2018-04-30T10:30:33.000Z | 2018-12-14T22:54:18.000Z | tests/test_models.py | deluge/django-pushify | 5c00f606f112cbf4c3647e6d9064d2b1860a7d49 | [
"BSD-3-Clause"
] | 5 | 2020-02-11T23:36:55.000Z | 2020-10-15T07:29:40.000Z | tests/test_models.py | deluge/django-pushify | 5c00f606f112cbf4c3647e6d9064d2b1860a7d49 | [
"BSD-3-Clause"
] | 1 | 2016-02-10T07:03:39.000Z | 2016-02-10T07:03:39.000Z | from unittest import mock
import pytest
from django.core.exceptions import ImproperlyConfigured
from howl.models import Alert, do_operator_setup
from tests.factories.observers import AlertFactory, ObserverFactory
from tests.resources.models import OperatorTestModel
@pytest.mark.django_db
class TestOperatorSetup:
def test_is_not_subclass_base_operator(self, settings):
settings.HOWL_OPERATORS = ("tests.resources.operators.NotSubclassOperator",)
with pytest.raises(TypeError) as exc:
do_operator_setup(OperatorTestModel)
assert str(exc.value) == (
"Operator \"<class 'tests.resources.operators.NotSubclassOperator'>\" "
'must be of type: "howl.operators.BaseOperator"'
)
def test_operator_already_exists(self, settings):
settings.HOWL_OPERATORS = (
"tests.resources.operators.TestOperator",
"tests.resources.operators.TestOperator",
)
with pytest.raises(ImproperlyConfigured) as exc:
do_operator_setup(OperatorTestModel)
assert str(exc.value) == 'Operator named "TestOperator" already exists.'
@pytest.mark.django_db
class TestObserverModel:
def test_repr(self):
obj = ObserverFactory.create(name="test observer")
assert str(obj) == "test observer"
def test_alert_identifier(self):
obj = ObserverFactory.create(name="test observer")
assert obj.get_alert_identifier() == "howl-observer:{0}".format(obj.pk)
def test_alert_identifier_from_kwargs(self):
obj = ObserverFactory.create(name="test observer")
assert obj.get_alert_identifier(identifier="foo-bar") == "foo-bar"
@pytest.mark.parametrize(
"value, compare_value, count_objects",
[
(49, 50, 1),
(50, 50, 0),
(51, 50, 1),
],
)
def test_get_alert(self, value, compare_value, count_objects):
obj = ObserverFactory.create(value=value)
assert Alert.objects.all().count() == 0
alert = obj.get_alert(compare_value)
assert Alert.objects.all().count() == count_objects
if alert:
assert alert == Alert.objects.first()
@pytest.mark.parametrize(
"value, compare_value, return_value, count_objects",
[
(49, 50, False, 1),
(50, 50, True, 0),
(51, 50, False, 1),
],
)
def test_compare(self, value, compare_value, return_value, count_objects):
obj = ObserverFactory.create(value=value)
assert Alert.objects.all().count() == 0
assert obj.compare(compare_value) is return_value
assert Alert.objects.all().count() == count_objects
@pytest.mark.django_db
class TestAlertModel:
def test_repr(self, activate_en):
observer = ObserverFactory.create(name="my observer")
obj = AlertFactory.create(id=23, identifier=observer.get_alert_identifier())
assert str(obj) == "Alert for howl-observer:{0}".format(observer.pk)
def test_set_observer_and_identifier_missing(self):
with pytest.raises(ValueError):
Alert.set(2)
def test_clear_observer_and_identifier_missing(self):
with pytest.raises(ValueError):
Alert.clear(2)
@mock.patch("howl.models.alert_notify.send")
def test_no_warning_period(self, mock):
observer = ObserverFactory.create(value=4, waiting_period=0)
Alert.set(2, observer=observer)
assert Alert.objects.all().count() == 1
assert Alert.objects.first().state == Alert.STATE_NOTIFIED
assert mock.call_count == 1
@mock.patch("howl.models.alert_wait.send")
def test_warn_observer(self, mock):
observer = ObserverFactory.create(value=4, waiting_period=5)
Alert.set(2, observer=observer)
assert Alert.objects.all().count() == 1
assert Alert.objects.first().state == Alert.STATE_WAITING
assert mock.call_count == 1
@mock.patch("howl.models.alert_notify.send")
def test_warn_critical_waiting_priod_not_achieved(self, mock):
observer = ObserverFactory.create(value=4, waiting_period=5)
AlertFactory.create(identifier=observer.get_alert_identifier())
Alert.set(2, observer=observer)
assert Alert.objects.all().count() == 1
assert Alert.objects.first().state == Alert.STATE_WAITING
assert mock.call_count == 0
@mock.patch("howl.models.alert_notify.send")
def test_warn_critical_observer(self, mock):
observer = ObserverFactory.create(value=4, waiting_period=0)
AlertFactory.create(
identifier=observer.get_alert_identifier(), state=Alert.STATE_WAITING
)
Alert.set(2, observer=observer)
assert Alert.objects.all().count() == 1
assert Alert.objects.first().state == Alert.STATE_NOTIFIED
assert mock.call_count == 1
@mock.patch("howl.models.alert_notify.send")
def test_warn_notified_observer(self, mock):
observer = ObserverFactory.create(value=4, waiting_period=0)
AlertFactory.create(
identifier=observer.get_alert_identifier(), state=Alert.STATE_NOTIFIED
)
Alert.set(2, observer=observer)
assert Alert.objects.all().count() == 1
assert Alert.objects.first().state == Alert.STATE_NOTIFIED
assert mock.call_count == 0
@mock.patch("howl.models.alert_notify.send")
def test_warn_every_time(self, mock):
observer = ObserverFactory.create(
value=4, waiting_period=0, alert_every_time=True
)
AlertFactory.create(
identifier=observer.get_alert_identifier(), state=Alert.STATE_NOTIFIED
)
assert Alert.objects.all().count() == 1
assert Alert.objects.first().state == Alert.STATE_NOTIFIED
assert mock.call_count == 0
Alert.set(2, observer=observer)
assert Alert.objects.all().count() == 1
assert Alert.objects.first().state == Alert.STATE_NOTIFIED
assert mock.call_count == 1
@mock.patch("howl.models.alert_wait.send")
def test_warn_identifier(self, mock):
Alert.set(2, identifier="alert-name", waiting_period=5)
assert Alert.objects.all().count() == 1
alert = Alert.objects.first()
assert alert.state == Alert.STATE_WAITING
assert alert.identifier == "alert-name"
assert mock.call_count == 1
@mock.patch("howl.models.alert_notify.send")
def test_warn_critical_identifier(self, mock):
alert = AlertFactory.create(state=Alert.STATE_WAITING)
Alert.set(identifier=alert.identifier)
assert Alert.objects.all().count() == 1
assert Alert.objects.first().state == Alert.STATE_NOTIFIED
assert mock.call_count == 1
@mock.patch("howl.models.alert_notify.send")
def test_warn_notified_identifier(self, mock):
alert = AlertFactory.create(state=Alert.STATE_NOTIFIED)
Alert.set(identifier=alert.identifier)
assert Alert.objects.all().count() == 1
assert Alert.objects.first().state == Alert.STATE_NOTIFIED
assert mock.call_count == 0
@mock.patch("howl.models.alert_clear.send")
def test_clear_observer(self, mock):
observer = ObserverFactory.create()
AlertFactory.create(identifier=observer.get_alert_identifier())
Alert.clear(observer.value, observer=observer)
assert Alert.objects.all().count() == 0
assert mock.call_count == 1
@mock.patch("howl.models.alert_clear.send")
def test_clear_identifier(self, mock):
alert = AlertFactory.create()
Alert.clear(identifier=alert.identifier)
assert Alert.objects.all().count() == 0
assert mock.call_count == 1
@mock.patch("howl.models.alert_clear.send")
def test_clear_no_object(self, mock):
observer = ObserverFactory.create()
Alert.clear(observer.value, observer=observer)
assert Alert.objects.all().count() == 0
assert mock.call_count == 0
| 35.9375 | 84 | 0.66559 |
795280a3ad5ceea47cf09736fbb4e7f25a05798a | 4,607 | py | Python | task_16/task.py | prashnts/advent-of-code--2021 | 315fcf470c8c1260057aeafa6d2c42f4c0f74f3f | [
"MIT"
] | null | null | null | task_16/task.py | prashnts/advent-of-code--2021 | 315fcf470c8c1260057aeafa6d2c42f4c0f74f3f | [
"MIT"
] | null | null | null | task_16/task.py | prashnts/advent-of-code--2021 | 315fcf470c8c1260057aeafa6d2c42f4c0f74f3f | [
"MIT"
] | null | null | null | import os
from collections import namedtuple
__here__ = os.path.dirname(__file__)
TEST_DATA_0 = 'D2FE28'
TEST_DATA_1 = '8A004A801A8002F478'
TEST_DATA_2 = '620080001611562C8802118E34'
TEST_DATA_3 = 'C0015000016115A2E0802F182340'
TEST_DATA_4 = 'A0016C880162017C3686B18A3D4780'
Packet = namedtuple('Packet', ['version', 'type_id', 'data'])
class BitsParser:
def __init__(self, encoded: str):
self.bits = self._parse_encoded(encoded)
self.pos = 0
def _parse_encoded(self, encoded: str) -> str:
hex_to_bits = lambda x: bin(int(x, base=16))[2:].zfill(4)
return ''.join(map(hex_to_bits, encoded))
def yield_bits(self, nbits: int):
bslice = self.bits[self.pos:self.pos + nbits]
self.pos += nbits
return bslice
def yield_digit(self, nbits: int):
return int(self.yield_bits(nbits), base=2)
def parse_literal_data(self) -> int:
nums = []
while True:
group = self.yield_bits(5)
nums.append(group[1:])
if group[0] == '0':
# stop reading! Otherwise continue
break
return int(''.join(nums), base=2)
def parse_n_packets(self, nb: int):
return [self.parse_packet() for _ in range(nb)]
def parse_len_packets(self, total_len: int):
goal = self.pos + total_len
packets = []
while self.pos < goal:
packets.append(self.parse_packet())
return packets
def parse_operator_data(self):
length_type_id = self.yield_digit(1)
if length_type_id == 1:
nb_packets = self.yield_digit(11)
return self.parse_n_packets(nb_packets)
len_packets = self.yield_digit(15)
return self.parse_len_packets(len_packets)
def parse_packet_data(self, type_id):
if type_id == 4:
return self.parse_literal_data()
return self.parse_operator_data()
def parse_packet(self) -> Packet:
version = self.yield_digit(3)
type_id = self.yield_digit(3)
data = self.parse_packet_data(type_id)
return Packet(version, type_id, data)
def evaluate_packet(packet):
if packet.type_id == 4:
# literal packet.
return packet.data
if packet.type_id == 0:
# sum packet.
val = 0
for pkt in packet.data:
val += evaluate_packet(pkt)
return val
if packet.type_id == 1:
# multiply packet.
val = 1
for pkt in packet.data:
val *= evaluate_packet(pkt)
return val
if packet.type_id == 2:
# minimum packet
return min([evaluate_packet(pkt) for pkt in packet.data])
if packet.type_id == 3:
# maximum packet
return max([evaluate_packet(pkt) for pkt in packet.data])
if packet.type_id == 5:
# greater than packet. Contains two packets.
lhs, rhs = [evaluate_packet(pkt) for pkt in packet.data]
return int(lhs > rhs)
if packet.type_id == 6:
# less than packet. Contains two packets.
lhs, rhs = [evaluate_packet(pkt) for pkt in packet.data]
return int(lhs < rhs)
if packet.type_id == 7:
# equal to packet. Contains two packets.
lhs, rhs = [evaluate_packet(pkt) for pkt in packet.data]
return int(lhs == rhs)
def calculate_1(data):
bp = BitsParser(data)
packet = bp.parse_packet()
# We just need to sum all the packets versions now.
def sum_versions(pkt):
if type(pkt.data) == list:
return pkt.version + sum([sum_versions(p) for p in pkt.data])
return pkt.version
return sum_versions(packet)
def calculate_2(data):
bp = BitsParser(data)
packet = bp.parse_packet()
return evaluate_packet(packet)
if __name__ == '__main__':
assert calculate_1(TEST_DATA_0) == 6
assert calculate_1(TEST_DATA_1) == 16
assert calculate_1(TEST_DATA_2) == 12
assert calculate_1(TEST_DATA_3) == 23
assert calculate_1(TEST_DATA_4) == 31
assert calculate_2('C200B40A82') == 3
assert calculate_2('04005AC33890') == 54
assert calculate_2('880086C3E88112') == 7
assert calculate_2('CE00C43D881120') == 9
assert calculate_2('D8005AC2A8F0') == 1
assert calculate_2('F600BC2D8F') == 0
assert calculate_2('9C005AC2F8F0') == 0
assert calculate_2('9C0141080250320F1802104A08') == 1
with open(os.path.join(__here__, 'input.txt'), 'r') as fp:
data = fp.read()
answer_1 = calculate_1(data)
answer_2 = calculate_2(data)
print(f'{answer_1=}')
print(f'{answer_2=}')
| 29.343949 | 73 | 0.626004 |
7952835fd372bed630441c5724f6c0ad5034c18f | 1,788 | py | Python | experiments/graph_coloring/datasets/mutils.py | achinta/CategoricalNF | d8717a037e8f13641e9d9a89abf66fba38e23f91 | [
"MIT"
] | 47 | 2020-06-20T10:00:39.000Z | 2022-03-08T13:41:45.000Z | experiments/graph_coloring/datasets/mutils.py | achinta/CategoricalNF | d8717a037e8f13641e9d9a89abf66fba38e23f91 | [
"MIT"
] | 1 | 2020-07-05T20:58:19.000Z | 2020-09-08T12:45:29.000Z | experiments/graph_coloring/datasets/mutils.py | achinta/CategoricalNF | d8717a037e8f13641e9d9a89abf66fba38e23f91 | [
"MIT"
] | 10 | 2020-07-05T17:18:40.000Z | 2022-01-07T03:17:40.000Z | import torch
import torch.utils.data as data
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from statistics import mean, median, stdev
class BucketSampler(data.Sampler):
def __init__(self, dataset, batch_size, len_step=1):
super().__init__(dataset)
self.dataset = dataset
self.batch_size = batch_size
self.len_step = len_step
self._prepare()
def _prepare(self):
indices = self.dataset.data_indices
lengths = (self.dataset.__class__.DATASET_NODES[indices] >= 0).sum(axis=-1)
lengths = lengths // self.len_step
linear_indices = np.arange(indices.shape[0]).astype(np.int32)
self.unique_lengths = np.unique(lengths)
self.indices_by_lengths = [linear_indices[lengths==l] for l in self.unique_lengths]
def __iter__(self):
sampled_indices = []
ind_by_len = [np.random.permutation(inds) for inds in self.indices_by_lengths]
while len(sampled_indices) < len(self):
p = [inds.shape[0] for inds in ind_by_len]
p = [e*1.0/sum(p) for e in p]
global_len = np.random.choice(len(ind_by_len), p=p, size=1)[0]
global_inds = []
def add_len(global_inds, local_len):
size_to_add = self.batch_size - len(global_inds)
global_inds += ind_by_len[local_len][:size_to_add].tolist()
if ind_by_len[local_len].shape[0] > size_to_add:
ind_by_len[local_len] = ind_by_len[local_len][size_to_add:]
else:
ind_by_len[local_len] = np.array([])
return global_inds
add_len(global_inds, global_len)
while len(global_inds) < self.batch_size:
if all([inds.shape[0]==0 for inds in ind_by_len]):
break
global_len = (global_len + 1) % len(ind_by_len)
add_len(global_inds, global_len)
sampled_indices += global_inds
return iter(sampled_indices)
def __len__(self):
return len(self.dataset) | 29.311475 | 85 | 0.723154 |
795283b71cc63898412fd2fcff6c640bf430a0ff | 14,997 | py | Python | imageAAE/PPAPGAN/cifar10_PPAPGAN.py | tgisaturday/DP_AAE | 3d6afa549731410156c60ff01cba9b8498bc2436 | [
"MIT"
] | 1 | 2020-02-26T07:24:52.000Z | 2020-02-26T07:24:52.000Z | imageAAE/PPAPGAN/cifar10_PPAPGAN.py | tgisaturday/DP_AAE | 3d6afa549731410156c60ff01cba9b8498bc2436 | [
"MIT"
] | null | null | null | imageAAE/PPAPGAN/cifar10_PPAPGAN.py | tgisaturday/DP_AAE | 3d6afa549731410156c60ff01cba9b8498bc2436 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.python.keras._impl.keras.datasets.cifar10 import load_data
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.gridspec as gridspec
import os
import math
import cv2
import time
from scipy.misc import toimage
from utils import add_noise_to_gradients
initializer = tf.contrib.layers.xavier_initializer()
rand_uniform = tf.random_uniform_initializer(-1,1,seed=2)
def exponential_lambda_decay(seq_lambda, global_step, decay_steps, decay_rate, staircase=False):
global_step = float(global_step)
decay_steps = float(decay_steps)
decay_rate = float(decay_rate)
p = global_step / decay_steps
if staircase:
p = math.floor(p)
return seq_lambda * math.pow(decay_rate, p)
def random_laplace(shape,sensitivity, epsilon):
rand_uniform = tf.random_uniform(shape,-0.5,0.5,dtype=tf.float32)
rand_lap= - (sensitivity/epsilon)*tf.multiply(tf.sign(rand_uniform),tf.log(1.0 - 2.0*tf.abs(rand_uniform)))
return tf.clip_by_norm(tf.clip_by_value(rand_lap, -3.0,3.0),sensitivity)
mb_size = 256
X_dim = 1024
len_x_train = 50000
def next_batch(num, data, labels,shuffle=True):
'''
Return a total of `num` random samples and labels.
'''
idx = np.arange(0 , len(data))
if shuffle == True:
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[ i] for i in idx]
labels_shuffle = [labels[ i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
def normalize(x):
"""
argument
- x: input image data in numpy array [32, 32, 3]
return
- normalized x
"""
min_val = np.min(x)
max_val = np.max(x)
x = (x-min_val) / (max_val-min_val)
return x
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(8, 8)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
img = sample.reshape(32, 32,3)
plt.imshow(toimage(img),interpolation='nearest')
return fig
initializer = tf.contrib.layers.xavier_initializer()
rand_uniform = tf.random_uniform_initializer(-1,1,seed=2)
X = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
N = tf.placeholder(tf.float32, shape=[None, 100])
(x_train, y_train), (x_test, y_test) = load_data()
#x_train = np.concatenate((x_train, x_test), axis=0)
#y_train = np.concatenate((y_train, y_test), axis=0)
x_train = normalize(x_train)
y_train_one_hot = tf.squeeze(tf.one_hot(y_train, 10),axis=1)
theta_G =[]
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
def autoencoder(x):
input_shape=[None, 32, 32, 3]
n_filters=[3, 128, 256, 512]
filter_sizes=[5, 5, 5, 5]
if len(x.get_shape()) == 3:
x_dim = np.sqrt(x.get_shape().as_list()[1])
if x_dim != int(x_dim):
raise ValueError('Unsupported input dimensions')
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 3])
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
current_input = x_tensor
encoder = []
decoder = []
shapes_enc = []
shapes_dec = []
with tf.name_scope("Encoder"):
for layer_i, n_output in enumerate(n_filters[1:]):
n_input = current_input.get_shape().as_list()[3]
shapes_enc.append(current_input.get_shape().as_list())
W = tf.Variable(xavier_init([filter_sizes[layer_i],filter_sizes[layer_i],n_input, n_output]))
theta_G.append(W)
encoder.append(W)
conv = tf.nn.conv2d(current_input, W, strides=[1, 2, 2, 1], padding='SAME')
conv = tf.contrib.layers.batch_norm(conv,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
output = tf.nn.leaky_relu(conv)
current_input = output
encoder.reverse()
shapes_enc.reverse()
z = current_input
z_value = tf.layers.flatten(z)
for layer_i, shape in enumerate(shapes_enc):
W_enc = encoder[layer_i]
W = tf.Variable(xavier_init(W_enc.get_shape().as_list()))
theta_G.append(W)
decoder.append(W)
shapes_dec.append(current_input.get_shape().as_list())
deconv = tf.nn.conv2d_transpose(current_input, W,
tf.stack([tf.shape(x)[0], shape[1], shape[2], shape[3]]),
strides=[1, 2, 2, 1], padding='SAME')
deconv = tf.contrib.layers.batch_norm(deconv,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
if layer_i == 2:
output = tf.nn.sigmoid(deconv)
else:
output = tf.nn.relu(deconv)
current_input = output
g = current_input
g_logits = deconv
encoder.reverse()
shapes_enc.reverse()
decoder.reverse()
shapes_dec.reverse()
with tf.name_scope("Decoder"):
for layer_i, shape in enumerate(shapes_dec):
W_dec = decoder[layer_i]
conv = tf.nn.conv2d(current_input, W_dec, strides=[1, 2, 2, 1], padding='SAME')
conv = tf.contrib.layers.batch_norm(conv,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
output = tf.nn.leaky_relu(conv)
current_input = output
encoder.reverse()
shapes_enc.reverse()
z = current_input
for layer_i, shape in enumerate(shapes_enc):
W_enc = encoder[layer_i]
deconv = tf.nn.conv2d_transpose(current_input, W_enc,
tf.stack([tf.shape(x)[0], shape[1], shape[2], shape[3]]),
strides=[1, 2, 2, 1], padding='SAME')
deconv = tf.contrib.layers.batch_norm(deconv,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
if layer_i == 2:
output = tf.nn.sigmoid(deconv)
else:
output = tf.nn.relu(deconv)
current_input = output
a = current_input
a_logits = deconv
return g_logits, g, a_logits, a, z_value
W1_D = tf.Variable(xavier_init([5,5,3,64]))
W2_D = tf.Variable(xavier_init([5,5,64,128]))
W3_D = tf.Variable(xavier_init([5,5,128,256]))
W4_D = tf.Variable(xavier_init([4096, 1]))
b4_D = tf.Variable(tf.zeros(shape=[1]))
theta_D = [W1_D,W2_D,W3_D,W4_D,b4_D]
def discriminator(x):
if len(x.get_shape()) == 2:
x_dim = np.sqrt(x.get_shape().as_list()[1])
if x_dim != int(x_dim):
raise ValueError('Unsupported input dimensions')
x_dim = int(x_dim)
x_tensor = tf.reshape(x, [-1, 32, 32, 3])
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
with tf.name_scope("Discriminator"):
conv1 = tf.nn.conv2d(x_tensor, W1_D, strides=[1,2,2,1],padding='SAME')
conv1 = tf.contrib.layers.batch_norm(conv1,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
h1 = tf.nn.leaky_relu(conv1)
conv2 = tf.nn.conv2d(h1, W2_D, strides=[1,2,2,1],padding='SAME')
conv2 = tf.contrib.layers.batch_norm(conv2,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
h2 = tf.nn.leaky_relu(conv2)
conv3 = tf.nn.conv2d(h2, W3_D, strides=[1,2,2,1],padding='SAME')
conv3 = tf.contrib.layers.batch_norm(conv3,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
h3 = tf.nn.leaky_relu(conv3)
h4 = tf.layers.flatten(h3)
d = tf.nn.xw_plus_b(h4, W4_D, b4_D)
return d
W1_H = tf.Variable(xavier_init([5,5,3,128]))
W2_H = tf.Variable(xavier_init([5,5,128,256]))
W3_H = tf.Variable(xavier_init([5,5,256,512]))
theta_H = [W1_H,W2_H,W3_H]
def hacker(x):
if len(x.get_shape()) == 2:
x_dim = np.sqrt(x.get_shape().as_list()[1])
if x_dim != int(x_dim):
raise ValueError('Unsupported input dimensions')
x_dim = int(x_dim)
x_tensor = tf.reshape(x, [-1, 32, 32, 3])
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
with tf.name_scope("Hacker"):
conv1 = tf.nn.conv2d(x_tensor, W1_H, strides=[1,2,2,1],padding='SAME')
conv1 = tf.contrib.layers.batch_norm(conv1,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
h1 = tf.nn.leaky_relu(conv1)
conv2 = tf.nn.conv2d(h1, W2_H, strides=[1,2,2,1],padding='SAME')
conv2 = tf.contrib.layers.batch_norm(conv2,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
h2 = tf.nn.leaky_relu(conv2)
conv3 = tf.nn.conv2d(h2, W3_H, strides=[1,2,2,1],padding='SAME')
conv3 = tf.contrib.layers.batch_norm(conv3,updates_collections=None,decay=0.9, zero_debias_moving_mean=True,is_training=True)
h3 = tf.nn.leaky_relu(conv3)
z = tf.layers.flatten(h3)
return z
G_logits,G_sample,A_logits,A_sample, gen_real_z = autoencoder(X)
D_real_logits = discriminator(X)
disc_real_z = hacker(X)
D_fake_logits = discriminator(G_sample)
disc_fake_z = hacker(G_sample)
A_true_flat = tf.reshape(X, [-1,32,32,3])
global_step = tf.Variable(0, name="global_step", trainable=False)
A_loss = tf.reduce_mean(tf.pow(A_true_flat - A_sample, 2))
D_z_loss =tf.reduce_mean(tf.pow(disc_fake_z - gen_real_z, 2))
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits,labels=tf.ones_like(D_real_logits)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits,labels=tf.zeros_like(D_fake_logits)))
G_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits,labels=tf.ones_like(D_fake_logits)))
D_loss = D_loss_real+D_loss_fake
H_loss = D_z_loss
G_loss = G_loss_fake - D_z_loss + A_loss
tf.summary.image('Original',A_true_flat)
tf.summary.image('G_sample',G_sample)
tf.summary.image('A_sample',A_sample)
tf.summary.scalar('D_loss', D_loss)
tf.summary.scalar('G_loss',G_loss_fake)
tf.summary.scalar('A_loss',A_loss)
tf.summary.scalar('H_loss',H_loss)
merged = tf.summary.merge_all()
num_batches_per_epoch = int((len_x_train-1)/mb_size) + 1
D_optimizer = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5, beta2=0.9)
G_optimizer = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5, beta2=0.9)
H_optimizer = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5, beta2=0.9)
D_grads_and_vars=D_optimizer.compute_gradients(D_loss, var_list=theta_D)
G_grads_and_vars=G_optimizer.compute_gradients(G_loss, var_list=theta_G)
H_grads_and_vars=H_optimizer.compute_gradients(H_loss, var_list=theta_H)
D_solver = D_optimizer.apply_gradients(D_grads_and_vars, global_step=global_step)
G_solver = G_optimizer.apply_gradients(G_grads_and_vars, global_step=global_step)
H_solver = H_optimizer.apply_gradients(H_grads_and_vars, global_step=global_step)
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "models/cifar10_" + timestamp))
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists('models/'):
os.makedirs('models/')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables())
if not os.path.exists('dc_out_cifar10/'):
os.makedirs('dc_out_cifar10/')
with tf.Session() as sess:
train_writer = tf.summary.FileWriter('graphs/'+'cifar10',sess.graph)
sess.run(tf.global_variables_initializer())
i = 0
for it in range(1000000000):
X_mb, Y_mb = next_batch(mb_size, x_train, y_train_one_hot.eval())
_, D_loss_curr= sess.run([D_solver, D_loss],feed_dict={X: X_mb})
summary,_, G_loss_curr,A_loss_curr = sess.run([merged,G_solver, G_loss, A_loss],feed_dict={X: X_mb})
current_step = tf.train.global_step(sess, global_step)
train_writer.add_summary(summary,current_step)
if it % 100 == 0:
print('Iter: {}; D_loss: {:.4}; G_loss: {:.4}; A_loss: {:.4};'.format(it,D_loss_curr, G_loss_curr, A_loss_curr))
if it % 1000 == 0:
samples = sess.run(G_sample, feed_dict={X: X_mb})
samples_flat = tf.reshape(samples,[-1,32,32,3]).eval()
fig = plot(np.append(X_mb[:32], samples_flat[:32], axis=0))
plt.savefig('dc_out_cifar10/{}_G.png'.format(str(i).zfill(3)), bbox_inches='tight')
plt.close(fig)
samples = sess.run(A_sample, feed_dict={X: X_mb})
samples_flat = tf.reshape(samples,[-1,32,32,3]).eval()
fig = plot(np.append(X_mb[:32], samples_flat[:32], axis=0))
plt.savefig('dc_out_cifar10/{}_A.png'.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
plt.close(fig)
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print('Saved model at {} at step {}'.format(path, current_step))
'''
if it% 100000 == 0:
for ii in range(len_x_train//100):
xt_mb, y_mb = next_batch(100,x_train, y_train_one_hot.eval(),shuffle=False)
enc_noise = np.random.normal(0.0,1.0,[100,2,2,512]).astype(np.float32)
samples = sess.run(G_sample, feed_dict={X: xt_mb,N: enc_noise})
if ii == 0:
generated = samples
labels = y_mb
else:
np.append(generated,samples,axis=0)
np.append(labels,y_mb, axis=0)
np.save('./generated_cifar10/generated_{}_image.npy'.format(str(it)), generated)
np.save('./generated_cifar10/generated_{}_label.npy'.format(str(it)), labels)
for iii in range(len_x_train//100):
xt_mb, y_mb = next_batch(100,x_train, y_train_one_hot.eval(),shuffle=False)
enc_noise = np.random.normal(0.0,1.0,[100,2,2,512]).astype(np.float32)
samples = sess.run(G_sample, feed_dict={X: xt_mb,N: enc_noise})
if iii == 0:
generated = samples
labels = y_mb
else:
np.append(generated,samples,axis=0)
np.append(labels,y_mb, axis=0)
np.save('./generated_cifar10/generated_{}_image.npy'.format(str(it)), generated)
np.save('./generated_cifar10/generated_{}_label.npy'.format(str(it)), labels)
'''
| 41.428177 | 139 | 0.647263 |
79528431be28965ec70bce50b9271cdd080eb83c | 47,394 | py | Python | fiftyone/core/aggregations.py | Site-Command/fiftyone | 5bfea96d2aba037e77b189d0ba3713e1b87ef654 | [
"Apache-2.0"
] | 1 | 2021-06-10T11:46:56.000Z | 2021-06-10T11:46:56.000Z | fiftyone/core/aggregations.py | Site-Command/fiftyone | 5bfea96d2aba037e77b189d0ba3713e1b87ef654 | [
"Apache-2.0"
] | null | null | null | fiftyone/core/aggregations.py | Site-Command/fiftyone | 5bfea96d2aba037e77b189d0ba3713e1b87ef654 | [
"Apache-2.0"
] | null | null | null | """
Aggregations.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import numpy as np
import eta.core.utils as etau
import fiftyone.core.expressions as foe
from fiftyone.core.expressions import ViewField as F
import fiftyone.core.media as fom
class Aggregation(object):
"""Abstract base class for all aggregations.
:class:`Aggregation` instances represent an aggregation or reduction
of a :class:`fiftyone.core.collections.SampleCollection` instance.
Args:
field_or_expr: a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
"""
def __init__(self, field_or_expr, expr=None):
if field_or_expr is not None and not etau.is_str(field_or_expr):
if expr is not None:
raise ValueError(
"`field_or_expr` must be a field name when the `expr` "
"argument is provided"
)
field_name = None
expr = field_or_expr
else:
field_name = field_or_expr
self._field_name = field_name
self._expr = expr
@property
def field_name(self):
"""The name of the field being computed on, if any."""
return self._field_name
@property
def expr(self):
"""The expression being computed, if any."""
return self._expr
@property
def _has_big_result(self):
"""Whether the aggregation's result is returned across multiple
documents.
"""
return False
def to_mongo(self, sample_collection):
"""Returns the MongoDB aggregation pipeline for this aggregation.
Args:
sample_collection: the
:class:`fiftyone.core.collections.SampleCollection` to which
the aggregation is being applied
Returns:
a MongoDB aggregation pipeline (list of dicts)
"""
raise NotImplementedError("subclasses must implement to_mongo()")
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the aggregation result
"""
raise NotImplementedError("subclasses must implement parse_result()")
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
the aggregation result
"""
raise NotImplementedError("subclasses must implement default_result()")
def _needs_frames(self, sample_collection):
"""Whether the aggregation requires frame labels of video samples to be
attached.
Args:
sample_collection: the
:class:`fiftyone.core.collections.SampleCollection` to which
the aggregation is being applied
Returns:
True/False
"""
if self._field_name is not None:
return sample_collection._is_frame_field(self._field_name)
if self._expr is not None:
field_name, _ = _extract_prefix_from_expr(self._expr)
return sample_collection._is_frame_field(field_name)
return False
def _parse_field_and_expr(
self,
sample_collection,
auto_unwind=True,
omit_terminal_lists=False,
allow_missing=False,
):
return _parse_field_and_expr(
sample_collection,
self._field_name,
self._expr,
auto_unwind,
omit_terminal_lists,
allow_missing,
)
class AggregationError(Exception):
"""An error raised during the execution of an :class:`Aggregation`."""
pass
class Bounds(Aggregation):
"""Computes the bounds of a numeric field of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the bounds of a numeric field
#
aggregation = fo.Bounds("numeric_field")
bounds = dataset.aggregate(aggregation)
print(bounds) # (min, max)
#
# Compute the a bounds of a numeric list field
#
aggregation = fo.Bounds("numeric_list_field")
bounds = dataset.aggregate(aggregation)
print(bounds) # (min, max)
#
# Compute the bounds of a transformation of a numeric field
#
aggregation = fo.Bounds(2 * (F("numeric_field") + 1))
bounds = dataset.aggregate(aggregation)
print(bounds) # (min, max)
Args:
field_or_expr: a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``(None, None)``
"""
return None, None
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the ``(min, max)`` bounds
"""
return d["min"], d["max"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline.append(
{
"$group": {
"_id": None,
"min": {"$min": "$" + path},
"max": {"$max": "$" + path},
}
}
)
return pipeline
class Count(Aggregation):
"""Counts the number of field values in a collection.
``None``-valued fields are ignored.
If no field or expression is provided, the samples themselves are counted.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="dog"),
]
),
),
fo.Sample(
filepath="/path/to/image2.png",
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="rabbit"),
fo.Detection(label="squirrel"),
]
),
),
fo.Sample(
filepath="/path/to/image3.png",
predictions=None,
),
]
)
#
# Count the number of samples in the dataset
#
aggregation = fo.Count()
count = dataset.aggregate(aggregation)
print(count) # the count
#
# Count the number of samples with `predictions`
#
aggregation = fo.Count("predictions")
count = dataset.aggregate(aggregation)
print(count) # the count
#
# Count the number of objects in the `predictions` field
#
aggregation = fo.Count("predictions.detections")
count = dataset.aggregate(aggregation)
print(count) # the count
#
# Count the number of objects in samples with > 2 predictions
#
aggregation = fo.Count(
(F("predictions.detections").length() > 2).if_else(
F("predictions.detections"), None
)
)
count = dataset.aggregate(aggregation)
print(count) # the count
Args:
field_or_expr (None): a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate. If neither
``field_or_expr`` or ``expr`` is provided, the samples themselves
are counted
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
"""
def __init__(self, field_or_expr=None, expr=None):
super().__init__(field_or_expr, expr=expr)
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the count
"""
return d["count"]
def to_mongo(self, sample_collection):
if self._field_name is None and self._expr is None:
return [{"$count": "count"}]
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
if sample_collection.media_type != fom.VIDEO or path != "frames":
pipeline.append({"$match": {"$expr": {"$gt": ["$" + path, None]}}})
pipeline.append({"$count": "count"})
return pipeline
class CountValues(Aggregation):
"""Counts the occurrences of field values in a collection.
This aggregation is typically applied to *countable* field types (or lists
of such types):
- :class:`fiftyone.core.fields.BooleanField`
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.StringField`
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
tags=["sunny"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="dog"),
]
),
),
fo.Sample(
filepath="/path/to/image2.png",
tags=["cloudy"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="rabbit"),
]
),
),
fo.Sample(
filepath="/path/to/image3.png",
predictions=None,
),
]
)
#
# Compute the tag counts in the dataset
#
aggregation = fo.CountValues("tags")
counts = dataset.aggregate(aggregation)
print(counts) # dict mapping values to counts
#
# Compute the predicted label counts in the dataset
#
aggregation = fo.CountValues("predictions.detections.label")
counts = dataset.aggregate(aggregation)
print(counts) # dict mapping values to counts
#
# Compute the predicted label counts after some normalization
#
aggregation = fo.CountValues(
F("predictions.detections.label").map_values(
{"cat": "pet", "dog": "pet"}
).upper()
)
counts = dataset.aggregate(aggregation)
print(counts) # dict mapping values to counts
Args:
field_or_expr: a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``{}``
"""
return {}
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
a dict mapping values to counts
"""
return {i["k"]: i["count"] for i in d["result"]}
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline += [
{"$group": {"_id": "$" + path, "count": {"$sum": 1}}},
{
"$group": {
"_id": None,
"result": {"$push": {"k": "$_id", "count": "$count"}},
}
},
]
return pipeline
class Distinct(Aggregation):
"""Computes the distinct values of a field in a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *countable* field types (or lists
of such types):
- :class:`fiftyone.core.fields.BooleanField`
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.StringField`
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
tags=["sunny"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="dog"),
]
),
),
fo.Sample(
filepath="/path/to/image2.png",
tags=["sunny", "cloudy"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="rabbit"),
]
),
),
fo.Sample(
filepath="/path/to/image3.png",
predictions=None,
),
]
)
#
# Get the distinct tags in a dataset
#
aggregation = fo.Distinct("tags")
values = dataset.aggregate(aggregation)
print(values) # list of distinct values
#
# Get the distinct predicted labels in a dataset
#
aggregation = fo.Distinct("predictions.detections.label")
values = dataset.aggregate(aggregation)
print(values) # list of distinct values
#
# Get the distinct predicted labels after some normalization
#
aggregation = fo.Distinct(
F("predictions.detections.label").map_values(
{"cat": "pet", "dog": "pet"}
).upper()
)
values = dataset.aggregate(aggregation)
print(values) # list of distinct values
Args:
field_or_expr: a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
"""
def __init__(self, field_or_expr, expr=None, _first=None):
super().__init__(field_or_expr, expr=expr)
self._first = _first
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``[]``
"""
if self._first is None:
return []
return 0, []
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
a sorted list of distinct values
"""
if self._first is None:
return d["values"]
return d["count"], d["values"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline += [
{"$match": {"$expr": {"$gt": ["$" + path, None]}}},
{"$group": {"_id": None, "values": {"$addToSet": "$" + path}}},
{"$unwind": "$values"},
{"$sort": {"values": 1}},
{"$group": {"_id": None, "values": {"$push": "$values"}}},
]
if self._first is not None:
pipeline += [
{
"$set": {
"count": {"$size": "$values"},
"values": {"$slice": ["$values", self._first]},
}
},
]
return pipeline
class HistogramValues(Aggregation):
"""Computes a histogram of the field values in a collection.
This aggregation is typically applied to *numeric* field types (or
lists of such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import numpy as np
import matplotlib.pyplot as plt
import fiftyone as fo
from fiftyone import ViewField as F
samples = []
for idx in range(100):
samples.append(
fo.Sample(
filepath="/path/to/image%d.png" % idx,
numeric_field=np.random.randn(),
numeric_list_field=list(np.random.randn(10)),
)
)
dataset = fo.Dataset()
dataset.add_samples(samples)
def plot_hist(counts, edges):
counts = np.asarray(counts)
edges = np.asarray(edges)
left_edges = edges[:-1]
widths = edges[1:] - edges[:-1]
plt.bar(left_edges, counts, width=widths, align="edge")
#
# Compute a histogram of a numeric field
#
aggregation = fo.HistogramValues("numeric_field", bins=50)
counts, edges, other = dataset.aggregate(aggregation)
plot_hist(counts, edges)
plt.show(block=False)
#
# Compute the histogram of a numeric list field
#
aggregation = fo.HistogramValues("numeric_list_field", bins=50)
counts, edges, other = dataset.aggregate(aggregation)
plot_hist(counts, edges)
plt.show(block=False)
#
# Compute the histogram of a transformation of a numeric field
#
aggregation = fo.HistogramValues(2 * (F("numeric_field") + 1), bins=50)
counts, edges, other = dataset.aggregate(aggregation)
plot_hist(counts, edges)
plt.show(block=False)
Args:
field_or_expr: a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
bins (None): can be either an integer number of bins to generate or a
monotonically increasing sequence specifying the bin edges to use.
By default, 10 bins are created. If ``bins`` is an integer and no
``range`` is specified, bin edges are automatically computed from
the bounds of the field
range (None): a ``(lower, upper)`` tuple specifying a range in which to
generate equal-width bins. Only applicable when ``bins`` is an
integer or ``None``
auto (False): whether to automatically choose bin edges in an attempt
to evenly distribute the counts in each bin. If this option is
chosen, ``bins`` will only be used if it is an integer, and the
``range`` parameter is ignored
"""
def __init__(
self, field_or_expr, expr=None, bins=None, range=None, auto=False
):
super().__init__(field_or_expr, expr=expr)
self._bins = bins
self._range = range
self._auto = auto
self._num_bins = None
self._edges = None
self._edges_last_used = None
self._parse_args()
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
a tuple of
- counts: ``[]``
- edges: ``[]``
- other: ``0``
"""
return [], [], 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
a tuple of
- counts: a list of counts in each bin
- edges: an increasing list of bin edges of length
``len(counts) + 1``. Note that each bin is treated as having an
inclusive lower boundary and exclusive upper boundary,
``[lower, upper)``, including the rightmost bin
- other: the number of items outside the bins
"""
if self._auto:
return self._parse_result_auto(d)
return self._parse_result_edges(d)
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
if self._auto:
pipeline.append(
{
"$bucketAuto": {
"groupBy": "$" + path,
"buckets": self._num_bins,
"output": {"count": {"$sum": 1}},
}
}
)
else:
if self._edges is not None:
edges = self._edges
else:
edges = self._compute_bin_edges(sample_collection)
self._edges_last_used = edges
pipeline.append(
{
"$bucket": {
"groupBy": "$" + path,
"boundaries": edges,
"default": "other", # counts documents outside of bins
"output": {"count": {"$sum": 1}},
}
}
)
pipeline.append({"$group": {"_id": None, "bins": {"$push": "$$ROOT"}}})
return pipeline
def _parse_args(self):
if self._bins is None:
bins = 10
else:
bins = self._bins
if self._auto:
if etau.is_numeric(bins):
self._num_bins = bins
else:
self._num_bins = 10
return
if not etau.is_numeric(bins):
# User-provided bin edges
self._edges = list(bins)
return
if self._range is not None:
# Linearly-spaced bins within `range`
self._edges = list(
np.linspace(self._range[0], self._range[1], bins + 1)
)
else:
# Compute bin edges from bounds
self._num_bins = bins
def _compute_bin_edges(self, sample_collection):
bounds = sample_collection.bounds(self._field_name, expr=self._expr)
if any(b is None for b in bounds):
bounds = (-1, -1)
return list(
np.linspace(bounds[0], bounds[1] + 1e-6, self._num_bins + 1)
)
def _parse_result_edges(self, d):
_edges_array = np.array(self._edges_last_used)
edges = list(_edges_array)
counts = [0] * (len(edges) - 1)
other = 0
for di in d["bins"]:
left = di["_id"]
if left == "other":
other = di["count"]
else:
idx = np.abs(_edges_array - left).argmin()
counts[idx] = di["count"]
return counts, edges, other
def _parse_result_auto(self, d):
counts = []
edges = []
for di in d["bins"]:
counts.append(di["count"])
edges.append(di["_id"]["min"])
edges.append(di["_id"]["max"])
return counts, edges, 0
class Mean(Aggregation):
"""Computes the arithmetic mean of the field values of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the mean of a numeric field
#
aggregation = fo.Mean("numeric_field")
mean = dataset.aggregate(aggregation)
print(mean) # the mean
#
# Compute the mean of a numeric list field
#
aggregation = fo.Mean("numeric_list_field")
mean = dataset.aggregate(aggregation)
print(mean) # the mean
#
# Compute the mean of a transformation of a numeric field
#
aggregation = fo.Mean(2 * (F("numeric_field") + 1))
mean = dataset.aggregate(aggregation)
print(mean) # the mean
Args:
field_or_expr: a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the mean
"""
return d["mean"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline.append(
{"$group": {"_id": None, "mean": {"$avg": "$" + path}}}
)
return pipeline
class Std(Aggregation):
"""Computes the standard deviation of the field values of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the standard deviation of a numeric field
#
aggregation = fo.Std("numeric_field")
std = dataset.aggregate(aggregation)
print(std) # the standard deviation
#
# Compute the standard deviation of a numeric list field
#
aggregation = fo.Std("numeric_list_field")
std = dataset.aggregate(aggregation)
print(std) # the standard deviation
#
# Compute the standard deviation of a transformation of a numeric field
#
aggregation = fo.Std(2 * (F("numeric_field") + 1))
std = dataset.aggregate(aggregation)
print(std) # the standard deviation
Args:
field_or_expr: a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
sample (False): whether to compute the sample standard deviation rather
than the population standard deviation
"""
def __init__(self, field_or_expr, expr=None, sample=False):
super().__init__(field_or_expr, expr=expr)
self._sample = sample
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the standard deviation
"""
return d["std"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
op = "$stdDevSamp" if self._sample else "$stdDevPop"
pipeline.append({"$group": {"_id": None, "std": {op: "$" + path}}})
return pipeline
class Sum(Aggregation):
"""Computes the sum of the field values of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the sum of a numeric field
#
aggregation = fo.Sum("numeric_field")
total = dataset.aggregate(aggregation)
print(total) # the sum
#
# Compute the sum of a numeric list field
#
aggregation = fo.Sum("numeric_list_field")
total = dataset.aggregate(aggregation)
print(total) # the sum
#
# Compute the sum of a transformation of a numeric field
#
aggregation = fo.Sum(2 * (F("numeric_field") + 1))
total = dataset.aggregate(aggregation)
print(total) # the sum
Args:
field_or_expr: a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the sum
"""
return d["sum"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline.append({"$group": {"_id": None, "sum": {"$sum": "$" + path}}})
return pipeline
class Values(Aggregation):
"""Extracts the values of the field from all samples in a collection.
Values aggregations are useful for efficiently extracting a slice of field
or embedded field values across all samples in a collection. See the
examples below for more details.
The dual function of :class:`Values` is
:meth:`set_values() <fiftyone.core.collections.SampleCollection.set_values>`,
which can be used to efficiently set a field or embedded field of all
samples in a collection by providing lists of values of same structure
returned by this aggregation.
.. note::
Unlike other aggregations, :class:`Values` does not automatically
unwind list fields, which ensures that the returned values match the
potentially-nested structure of the documents.
You can opt-in to unwinding specific list fields using the ``[]``
syntax, or you can pass the optional ``unwind=True`` parameter to
unwind all supported list fields. See :ref:`aggregations-list-fields`
for more information.
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Get all values of a field
#
aggregation = fo.Values("numeric_field")
values = dataset.aggregate(aggregation)
print(values) # [1.0, 4.0, None]
#
# Get all values of a list field
#
aggregation = fo.Values("numeric_list_field")
values = dataset.aggregate(aggregation)
print(values) # [[1, 2, 3], [1, 2], None]
#
# Get all values of transformed field
#
aggregation = fo.Values(2 * (F("numeric_field") + 1))
values = dataset.aggregate(aggregation)
print(values) # [4.0, 10.0, None]
#
# Get values from a label list field
#
dataset = foz.load_zoo_dataset("quickstart")
# list of `Detections`
aggregation = fo.Values("ground_truth")
detections = dataset.aggregate(aggregation)
# list of lists of `Detection` instances
aggregation = fo.Values("ground_truth.detections")
detections = dataset.aggregate(aggregation)
# list of lists of detection labels
aggregation = fo.Values("ground_truth.detections.label")
labels = dataset.aggregate(aggregation)
Args:
field_or_expr: a field name, ``embedded.field.name``,
:class:`fiftyone.core.expressions.ViewExpression`, or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
defining the field or expression to aggregate
expr (None): a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to ``field_or_expr`` (which must be a field) before
aggregating
missing_value (None): a value to insert for missing or ``None``-valued
fields
unwind (False): whether to automatically unwind all recognized list
fields
"""
def __init__(
self,
field_or_expr,
expr=None,
missing_value=None,
unwind=False,
_allow_missing=False,
_big_result=True,
_raw=False,
):
field_or_expr, found_id_field = _handle_id_fields(field_or_expr)
super().__init__(field_or_expr, expr=expr)
self._missing_value = missing_value
self._unwind = unwind
self._allow_missing = _allow_missing
self._big_result = _big_result
self._raw = _raw
self._found_id_field = found_id_field
self._field_type = None
self._num_list_fields = None
@property
def _has_big_result(self):
return self._big_result
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``[]``
"""
return []
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the list of field values
"""
if self._big_result:
values = [di["value"] for di in d]
else:
values = d["values"]
if self._raw:
return values
if self._found_id_field:
level = 1 + self._num_list_fields
return _transform_values(values, str, level=level)
if self._field_type is not None:
fcn = self._field_type.to_python
level = 1 + self._num_list_fields
return _transform_values(values, fcn, level=level)
return values
def to_mongo(self, sample_collection):
path, pipeline, list_fields = self._parse_field_and_expr(
sample_collection,
auto_unwind=self._unwind,
omit_terminal_lists=not self._unwind,
allow_missing=self._allow_missing,
)
if self._expr is None:
self._field_type = sample_collection._get_field_type(
self._field_name, ignore_primitives=True
)
self._num_list_fields = len(list_fields)
pipeline.extend(
_make_extract_values_pipeline(
path, list_fields, self._missing_value, self._big_result
)
)
return pipeline
def _handle_id_fields(field_name):
if not etau.is_str(field_name):
found_id_field = False
elif field_name == "id":
field_name = "_id"
found_id_field = True
elif field_name.endswith(".id"):
field_name = field_name[: -len(".id")] + "._id"
found_id_field = True
else:
found_id_field = False
return field_name, found_id_field
def _transform_values(values, fcn, level=1):
if values is None:
return None
if level < 1:
return fcn(values)
return [_transform_values(v, fcn, level=level - 1) for v in values]
def _make_extract_values_pipeline(
path, list_fields, missing_value, big_result
):
if not list_fields:
root = path
else:
root = list_fields[0]
expr = (F() != None).if_else(F(), missing_value)
if list_fields:
subfield = path[len(list_fields[-1]) + 1 :]
expr = _extract_list_values(subfield, expr)
if len(list_fields) > 1:
for list_field1, list_field2 in zip(
reversed(list_fields[:-1]), reversed(list_fields[1:])
):
inner_list_field = list_field2[len(list_field1) + 1 :]
expr = _extract_list_values(inner_list_field, expr)
pipeline = [{"$set": {root: expr.to_mongo(prefix="$" + root)}}]
if big_result:
pipeline.append({"$project": {"value": "$" + root}})
else:
pipeline.append(
{"$group": {"_id": None, "values": {"$push": "$" + root}}}
)
return pipeline
def _extract_list_values(subfield, expr):
if subfield:
map_expr = F(subfield).apply(expr)
else:
map_expr = expr
return F().map(map_expr)
def _parse_field_and_expr(
sample_collection,
field_name,
expr,
auto_unwind,
omit_terminal_lists,
allow_missing,
):
if field_name is None and expr is None:
raise ValueError(
"You must provide a field or an expression in order to define an "
"aggregation"
)
if field_name is None:
field_name, expr = _extract_prefix_from_expr(expr)
if expr is not None:
if field_name is None:
field_name = "value"
embedded_root = True
allow_missing = True
else:
embedded_root = False
allow_missing = False
pipeline, _ = sample_collection._make_set_field_pipeline(
field_name,
expr,
embedded_root=embedded_root,
allow_missing=allow_missing,
)
else:
pipeline = []
(
path,
is_frame_field,
unwind_list_fields,
other_list_fields,
) = sample_collection._parse_field_name(
field_name,
auto_unwind=auto_unwind,
omit_terminal_lists=omit_terminal_lists,
allow_missing=allow_missing,
)
if is_frame_field and auto_unwind:
pipeline.extend(
[{"$unwind": "$frames"}, {"$replaceRoot": {"newRoot": "$frames"}}]
)
for list_field in unwind_list_fields:
pipeline.append({"$unwind": "$" + list_field})
if other_list_fields:
root = other_list_fields[0]
else:
root = path
pipeline.append({"$project": {root: True}})
return path, pipeline, other_list_fields
def _extract_prefix_from_expr(expr):
prefixes = []
_find_prefixes(expr, prefixes)
common = _get_common_prefix(prefixes)
if common:
expr = deepcopy(expr)
_remove_prefix(expr, common)
return common, expr
def _find_prefixes(expr, prefixes):
if isinstance(expr, foe.ViewExpression):
if expr.is_frozen:
return
if isinstance(expr, foe.ViewField):
prefixes.append(expr._expr)
else:
_find_prefixes(expr._expr, prefixes)
elif isinstance(expr, (list, tuple)):
for e in expr:
_find_prefixes(e, prefixes)
elif isinstance(expr, dict):
for e in expr.values():
_find_prefixes(e, prefixes)
def _get_common_prefix(prefixes):
if not prefixes:
return None
chunks = [p.split(".") for p in prefixes]
min_chunks = min(len(c) for c in chunks)
common = None
idx = 0
pre = [c[0] for c in chunks]
while len(set(pre)) == 1:
common = pre[0]
idx += 1
if idx >= min_chunks:
break
pre = [common + "." + c[idx] for c in chunks]
return common
def _remove_prefix(expr, prefix):
if isinstance(expr, foe.ViewExpression):
if expr.is_frozen:
return
if isinstance(expr, foe.ViewField):
if expr._expr == prefix:
expr._expr = ""
elif expr._expr.startswith(prefix + "."):
expr._expr = expr._expr[len(prefix) + 1 :]
else:
_remove_prefix(expr._expr, prefix)
elif isinstance(expr, (list, tuple)):
for e in expr:
_remove_prefix(e, prefix)
elif isinstance(expr, dict):
for e in expr.values():
_remove_prefix(e, prefix)
| 30.072335 | 125 | 0.553741 |
795284854d94d6e4b131fd616c742fa85e44442d | 901 | py | Python | club/migrations/0021_auto_20191226_1715.py | DSC-RPI/dsc-portal | bf2d0c067d10dd199317ccc00863d85db0d07094 | [
"MIT"
] | 2 | 2020-01-29T20:14:35.000Z | 2020-02-15T23:01:42.000Z | club/migrations/0021_auto_20191226_1715.py | DSC-RPI/dsc-portal | bf2d0c067d10dd199317ccc00863d85db0d07094 | [
"MIT"
] | 62 | 2019-11-26T17:47:58.000Z | 2022-01-13T02:05:51.000Z | club/migrations/0021_auto_20191226_1715.py | DSC-RPI/dsc-portal | bf2d0c067d10dd199317ccc00863d85db0d07094 | [
"MIT"
] | 1 | 2020-01-23T17:12:38.000Z | 2020-01-23T17:12:38.000Z | # Generated by Django 3.0.1 on 2019-12-26 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('club', '0020_eventrsvp'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='presentation_link',
),
migrations.AddField(
model_name='event',
name='presentation_id',
field=models.URLField(blank=True, help_text='(optional) The ID of the Google Slides slideshow.', max_length=300, null=True),
),
migrations.AddField(
model_name='event',
name='thumbnail_link',
field=models.URLField(blank=True, help_text='An optional link to an image to show for the event. If a slideshow is associated with the event, it will automatically use the slide thumbnail.', null=True),
),
]
| 32.178571 | 214 | 0.619312 |
795284c8fef20df4e9d6d58a822dba32e7026ac8 | 3,376 | py | Python | openerp/addons/account/wizard/account_use_model.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 3 | 2016-01-29T14:39:49.000Z | 2018-12-29T22:42:00.000Z | openerp/addons/account/wizard/account_use_model.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 2 | 2016-03-23T14:29:41.000Z | 2017-02-20T17:11:30.000Z | openerp/addons/account/wizard/account_use_model.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_use_model(osv.osv_memory):
_name = 'account.use.model'
_description = 'Use model'
_columns = {
'model': fields.many2many('account.model', 'account_use_model_relation', 'account_id', 'model_id', 'Account Model'),
}
def view_init(self, cr , uid , fields_list, context=None):
account_model_obj = self.pool.get('account.model')
if context is None:
context = {}
if context.get('active_ids',False):
data_model = account_model_obj.browse(cr, uid, context['active_ids'])
for model in data_model:
for line in model.lines_id:
if line.date_maturity == 'partner':
if not line.partner_id:
raise osv.except_osv(_('Error!'), _("Maturity date of entry line generated by model line '%s' is based on partner payment term!"\
"\nPlease define partner on it!")%line.name)
pass
def create_entries(self, cr, uid, ids, context=None):
account_model_obj = self.pool.get('account.model')
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
record_id = context and context.get('model_line', False) or False
if record_id:
model_ids = data['model']
else:
model_ids = context['active_ids']
move_ids = account_model_obj.generate(cr, uid, model_ids, context=context)
context.update({'move_ids':move_ids})
model_data_ids = mod_obj.search(cr, uid,[('model','=','ir.ui.view'),('name','=','view_move_form')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'domain': "[('id','in', ["+','.join(map(str,context['move_ids']))+"])]",
'name': 'Entries',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'views': [(False,'tree'),(resource_id,'form')],
'type': 'ir.actions.act_window',
}
account_use_model()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 43.844156 | 157 | 0.585012 |
795285583964dc2ca50270134393dc7f82e0f21c | 4,176 | py | Python | API/src/main/resources/Lib/robot/running/__init__.py | TagExpress/SikuliX1 | de9da11794dd94b3821eddc5c01b534d3f2fe828 | [
"MIT"
] | null | null | null | API/src/main/resources/Lib/robot/running/__init__.py | TagExpress/SikuliX1 | de9da11794dd94b3821eddc5c01b534d3f2fe828 | [
"MIT"
] | null | null | null | API/src/main/resources/Lib/robot/running/__init__.py | TagExpress/SikuliX1 | de9da11794dd94b3821eddc5c01b534d3f2fe828 | [
"MIT"
] | null | null | null | # Copyright (c) 2010-2020, sikuli.org, sikulix.com - MIT license
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the core test execution logic.
The main public entry points of this package are of the following two classes:
* :class:`~robot.running.builder.TestSuiteBuilder` for creating executable
test suites based on existing test case files and directories.
* :class:`~robot.running.model.TestSuite` for creating an executable
test suite structure programmatically.
It is recommended to import both of these classes via the :mod:`robot.api`
package like in the examples below. Also :class:`~robot.running.model.TestCase`
and :class:`~robot.running.model.Keyword` classes used internally by the
:class:`~robot.running.model.TestSuite` class are part of the public API.
In those rare cases where these classes are needed directly, they can be
imported from this package.
Examples
--------
First, let's assume we have the following test suite in file
``activate_skynet.robot``::
*** Settings ***
Library OperatingSystem
*** Test Cases ***
Should Activate Skynet
[Tags] smoke
[Setup] Set Environment Variable SKYNET activated
Environment Variable Should Be Set SKYNET
We can easily parse and create an executable test suite based on the above file
using the :class:`~robot.running.builder.TestSuiteBuilder` class as follows::
from robot.api import TestSuiteBuilder
suite = TestSuiteBuilder().build('path/to/activate_skynet.robot')
That was easy. Let's next generate the same test suite from scratch
using the :class:`~robot.running.model.TestSuite` class::
from robot.api import TestSuite
suite = TestSuite('Activate Skynet')
suite.resource.imports.library('OperatingSystem')
test = suite.tests.create('Should Activate Skynet', tags=['smoke'])
test.keywords.create('Set Environment Variable', args=['SKYNET', 'activated'], type='setup')
test.keywords.create('Environment Variable Should Be Set', args=['SKYNET'])
Not that complicated either, especially considering the flexibility. Notice
that the suite created based on the file could also be edited further using
the same API.
Now that we have a test suite ready, let's :meth:`execute it
<robot.running.model.TestSuite.run>` and verify that the returned
:class:`~robot.result.executionresult.Result` object contains correct
information::
result = suite.run(critical='smoke', output='skynet.xml')
assert result.return_code == 0
assert result.suite.name == 'Activate Skynet'
test = result.suite.tests[0]
assert test.name == 'Should Activate Skynet'
assert test.passed and test.critical
stats = result.suite.statistics
assert stats.critical.total == 1 and stats.critical.failed == 0
Running the suite generates a normal output XML file, unless it is disabled
by using ``output=None``. Generating log, report, and xUnit files based on
the results is possible using the
:class:`~robot.reporting.resultwriter.ResultWriter` class::
from robot.api import ResultWriter
# Report and xUnit files can be generated based on the result object.
ResultWriter(result).write_results(report='skynet.html', log=None)
# Generating log files requires processing the earlier generated output XML.
ResultWriter('skynet.xml').write_results()
"""
from .builder import TestSuiteBuilder, ResourceFileBuilder
from .context import EXECUTION_CONTEXTS
from .model import Keyword, TestCase, TestSuite
from .testlibraries import TestLibrary
from .usererrorhandler import UserErrorHandler
from .userkeyword import UserLibrary
from .runkwregister import RUN_KW_REGISTER
| 40.153846 | 96 | 0.755268 |
795285cdf03b581ac379000a611963bf2f0aeb15 | 1,959 | py | Python | tests/cli/conftest.py | mmchougule/kedro | 6a04ab1f7b6b943008e298d41624a79c04a8ff8c | [
"Apache-2.0"
] | null | null | null | tests/cli/conftest.py | mmchougule/kedro | 6a04ab1f7b6b943008e298d41624a79c04a8ff8c | [
"Apache-2.0"
] | null | null | null | tests/cli/conftest.py | mmchougule/kedro | 6a04ab1f7b6b943008e298d41624a79c04a8ff8c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains the fixtures that are reusable by any tests within
this directory. You don’t need to import the fixtures as pytest will
discover them automatically. More info here:
https://docs.pytest.org/en/latest/fixture.html
"""
from os import makedirs
from click.testing import CliRunner
from pytest import fixture
MOCKED_HOME = "user/path/"
@fixture(name="cli_runner")
def cli_runner_fixture():
runner = CliRunner()
with runner.isolated_filesystem():
makedirs(MOCKED_HOME)
yield runner
| 39.18 | 77 | 0.771312 |
795285d260c0372d12a9a7e26a2e218533b59cfb | 950 | py | Python | tests/numpy/assignment_test.py | xiacijie/dace | 2d942440b1d7b139ba112434bfa78f754e10bfe5 | [
"BSD-3-Clause"
] | 1 | 2021-07-26T07:58:06.000Z | 2021-07-26T07:58:06.000Z | tests/numpy/assignment_test.py | xiacijie/dace | 2d942440b1d7b139ba112434bfa78f754e10bfe5 | [
"BSD-3-Clause"
] | null | null | null | tests/numpy/assignment_test.py | xiacijie/dace | 2d942440b1d7b139ba112434bfa78f754e10bfe5 | [
"BSD-3-Clause"
] | 1 | 2021-03-04T13:01:48.000Z | 2021-03-04T13:01:48.000Z | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
def test_multiassign():
@dace.program
def multiassign(A: dace.float64[20], B: dace.float64[1],
C: dace.float64[2]):
tmp = C[0] = A[5]
B[0] = tmp
A = np.random.rand(20)
B = np.random.rand(1)
C = np.random.rand(2)
multiassign(A, B, C)
assert B == C[0] and C[0] == A[5]
def test_multiassign_mutable():
@dace.program
def mutable(D: dace.float64[2]):
D[0] += 1
return D[0]
@dace.program
def multiassign(B: dace.float64[1],
C: dace.float64[2]):
tmp = C[1] = mutable(C)
B[0] = tmp
B = np.random.rand(1)
C = np.random.rand(2)
expected = C[0] + 1
multiassign(B, C)
assert B[0] == expected and C[1] == expected
if __name__ == '__main__':
test_multiassign()
test_multiassign_mutable()
| 22.619048 | 75 | 0.562105 |
7952867b7a1e0dee470825aee2549ae7d361a615 | 6,733 | py | Python | python/test/test_game_state.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | python/test/test_game_state.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | python/test/test_game_state.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | from python.lib.events import Events
from python.lib.board_state import BoardState
from python.lib.game_state import GameState
from unittest.mock import MagicMock
from collections import Counter
from .constants import (
DEFAULT_EXPECTED_OUTPUT_GRID_MAP,
DEFAULT_ELEVATION_MAP,
DEFAULT_ELEVATION_MAP_SHAPE,
)
DEFAULT_RANDOM_SEED = 20201224
DEFAULT_INITIAL_GAME_STATE = {
"grid": BoardState.create_grid_map_from_elevation_array(
DEFAULT_ELEVATION_MAP, DEFAULT_ELEVATION_MAP_SHAPE
),
"characters": [
{
"id": "i1",
"position": (1, 0, 1),
"name": "c1",
"hit_points": 1,
"last_position": (1, 0, 1),
"team_id": "t2",
},
{
"id": "i2",
"position": (5, 2, 0),
"name": "c2",
"hit_points": 1,
"last_position": (5, 2, 0),
"team_id": "t1",
},
{
"id": "i3",
"position": (6, 0, 1),
"name": "c3",
"hit_points": 1,
"last_position": (6, 0, 1),
"team_id": "t1",
},
{
"id": "i4",
"position": (4, 0, 2),
"name": "c4",
"hit_points": 1,
"last_position": (4, 0, 2),
"team_id": "t2",
},
{
"id": "i5",
"position": (7, -1, 5),
"name": "c5",
"hit_points": 1,
"last_position": (7, -1, 5),
"team_id": "t1",
},
{
"id": "i6",
"position": (7, 0, 9),
"name": "c6",
"hit_points": 1,
"last_position": (7, 0, 9),
"team_id": "t2",
},
],
"teams": [
{"id": "t1", "name": "t1"},
{"id": "t2", "name": "t2"},
],
"num_characters_per_team": 3,
"num_teams": 2,
}
def test_GameState_setup():
gs = GameState(
events=Events,
initial_state=DEFAULT_INITIAL_GAME_STATE,
rng_seed=DEFAULT_RANDOM_SEED,
)
gs.broadcast = MagicMock(name="broadcast")
gs.setup(skip_character_team_assignment=True, skip_character_repositioning=True)
gs.broadcast.assert_called_with(
Events.EVENT_PLAYER_MOVE_SUCCESS,
gs.selected_character.position,
gs.selected_character.position,
)
output_terrain_grid = DEFAULT_EXPECTED_OUTPUT_GRID_MAP
output_meta_grid = {
(2, 1, 1): BoardState.NEIGHBOR_TILE,
(1, 0, 1): BoardState.MOVABLE_TILE,
(2, 1, 0): BoardState.MOVABLE_TILE,
(0, 0, 0): BoardState.MOVABLE_TILE,
(1, 0, 0): BoardState.NEIGHBOR_TILE,
(2, 1, 2): BoardState.MOVABLE_TILE,
(1, 0, 3): BoardState.MOVABLE_TILE,
(0, 0, 2): BoardState.MOVABLE_TILE,
(1, 0, 2): BoardState.NEIGHBOR_TILE,
(0, 0, 1): BoardState.NEIGHBOR_TILE,
}
for k, v in output_meta_grid.items():
print(f"{k}: {v},")
expected_output_state = {
"selected_character": DEFAULT_INITIAL_GAME_STATE["characters"][0],
"grid": output_terrain_grid,
"meta_grid": output_meta_grid,
"characters": DEFAULT_INITIAL_GAME_STATE["characters"],
"teams": DEFAULT_INITIAL_GAME_STATE["teams"],
}
actual_output_state = gs.state()
for k, v in actual_output_state.items():
# DEBUG:
print(f"[{k}]: actual: {v}, expected: {expected_output_state[k]}")
assert actual_output_state[k] == expected_output_state[k]
def test_GameState_setup_with_auto_assigned_positions_and_teams():
initial_state = {**DEFAULT_INITIAL_GAME_STATE, "characters": [], "teams": []}
gs = GameState(
events=Events,
initial_state=initial_state,
rng_seed=DEFAULT_RANDOM_SEED,
)
gs.broadcast = MagicMock(name="broadcast")
gs.setup()
gs.broadcast.assert_called_with(
Events.EVENT_PLAYER_MOVE_SUCCESS,
gs.selected_character.position,
gs.selected_character.position,
)
expected_num_characters = int(
initial_state["num_characters_per_team"] * initial_state["num_teams"]
)
assert len(gs.characters) == expected_num_characters
assert len(gs.teams) == initial_state["num_teams"]
def test_GameState_on_request_player_move():
gs = GameState(
events=Events,
initial_state=DEFAULT_INITIAL_GAME_STATE,
rng_seed=DEFAULT_RANDOM_SEED,
)
gs.broadcast = MagicMock(name="broadcast")
# Use fixed positions
gs.setup(skip_character_team_assignment=True, skip_character_repositioning=True)
# Check initial position
# print("selected character: ", gs.state()["selected_character"])
assert (
gs.state()["selected_character"]["position"]
== DEFAULT_INITIAL_GAME_STATE["characters"][0]["position"]
)
# Valid move
gs.on_request_player_move((0, 0, 1))
from_pos = (1, 0, 1) # aka character start position
to_pos = (1, 0, 2)
gs.broadcast.assert_called_with(
Events.EVENT_PLAYER_MOVE_SUCCESS,
from_pos,
to_pos,
)
assert gs.state()["selected_character"]["position"] == to_pos
# Invalid move
gs.on_request_player_move((0, 0, 10))
assert gs.state()["selected_character"]["position"] == to_pos
# No movement change
gs.broadcast.assert_called_with(
Events.EVENT_PLAYER_MOVE_FAILED,
to_pos,
to_pos,
)
def test_GameState_create_characters():
gs = GameState(
events=Events,
initial_state={
"grid": DEFAULT_INITIAL_GAME_STATE["grid"],
"num_characters_per_team": 3,
"num_teams": 2,
},
)
gs.setup()
# Each character is assigned a random position on the grid
positions = [c.position for c in gs.characters]
ids = [c.id for c in gs.characters]
character_team_ids = [c.team_id for c in gs.characters]
team_ids = [t.id for t in gs.teams]
assert len(gs.characters) > 0
# ensure each character has a unique id
assert len(ids) == gs.get_total_num_characters()
assert len(ids) == len(set(ids))
# ensure the number of unique positions matches the total num of characters
assert len(positions) == gs.get_total_num_characters()
assert len(positions) == len(set(positions))
# ensure the number of unique positions matches the total num of characters
assert len(set(character_team_ids)) == gs.num_teams
# ensure there is an even number of teams
actual_counts = Counter(character_team_ids)
expected_counts = Counter(
{
(team_ids[0]): 3,
(team_ids[1]): 3,
}
)
assert actual_counts == expected_counts
| 27.822314 | 84 | 0.592455 |
795286e6c4d68a7895a662a1b66cd74796d95c8a | 119 | py | Python | problems/Codeforces/A. Ehab Fails to Be Thanos.py | jspw/Basic_Python | aa159f576a471c6deebdf1e5f462dfc9ffb4930b | [
"Unlicense"
] | 6 | 2020-06-25T14:52:09.000Z | 2021-08-05T20:54:15.000Z | problems/Codeforces/A. Ehab Fails to Be Thanos.py | jspw/Basic_Python | aa159f576a471c6deebdf1e5f462dfc9ffb4930b | [
"Unlicense"
] | null | null | null | problems/Codeforces/A. Ehab Fails to Be Thanos.py | jspw/Basic_Python | aa159f576a471c6deebdf1e5f462dfc9ffb4930b | [
"Unlicense"
] | null | null | null | n=int(input())
l=sorted(list(map(int,input().split())))
if l[0]==l[-1]:
print(-1)
else :
l.sort()
print(*l) | 17 | 40 | 0.529412 |
7952876c1c0dd5c3b60b0b613ba4ac4f86bc93d9 | 5,137 | py | Python | lib/focaltouch.py | szczys/st7789_mpy | bc854ec453d7644ce1773f7ed4d41504f37d376b | [
"MIT"
] | 153 | 2020-02-02T11:03:14.000Z | 2022-03-30T05:47:07.000Z | lib/focaltouch.py | skylin008/st7789_mpy | f304991fc5558be653df5f0de928494b85cbc60d | [
"MIT"
] | 58 | 2020-04-11T23:23:02.000Z | 2022-03-26T20:45:23.000Z | lib/focaltouch.py | skylin008/st7789_mpy | f304991fc5558be653df5f0de928494b85cbc60d | [
"MIT"
] | 50 | 2020-02-02T11:05:23.000Z | 2022-03-22T15:24:42.000Z | # The MIT License (MIT)
#
# Copyright (c) 2017 ladyada for adafruit industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_focaltouch`
====================================================
CircuitPython driver for common low-cost FocalTech capacitive touch chips.
Currently supports FT6206 & FT6236.
* Author(s): ladyada
* adopted for micropython => franz schaefer (mond)
Implementation Notes
--------------------
**Hardware:**
* Adafruit `2.8" TFT LCD with Cap Touch Breakout Board w/MicroSD Socket
<http://www.adafruit.com/product/2090>`_ (Product ID: 2090)
* Adafruit `2.8" TFT Touch Shield for Arduino w/Capacitive Touch
<http://www.adafruit.com/product/1947>`_ (Product ID: 1947)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the ESP8622 and M0-based boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library (when using I2C/SPI):
https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
# imports
try:
import struct
except ImportError:
import ustruct as struct
from machine import SoftI2C
from micropython import const
_FT6206_DEFAULT_I2C_ADDR = 0x38
_FT6XXX_REG_DATA = const(0x00)
_FT6XXX_REG_NUMTOUCHES = const(0x02)
_FT6XXX_REG_THRESHHOLD = const(0x80)
_FT6XXX_REG_POINTRATE = const(0x88)
_FT6XXX_REG_LIBH = const(0xA1)
_FT6XXX_REG_LIBL = const(0xA2)
_FT6XXX_REG_CHIPID = const(0xA3)
_FT6XXX_REG_FIRMVERS = const(0xA6)
_FT6XXX_REG_VENDID = const(0xA8)
_FT6XXX_REG_RELEASE = const(0xAF)
class FocalTouch:
"""
A driver for the FocalTech capacitive touch sensor.
"""
_debug = False
chip = None
def __init__(self, i2c, address=_FT6206_DEFAULT_I2C_ADDR, debug=False):
self.bus = i2c
self.address = address
self._debug = debug
chip_data = self._read(_FT6XXX_REG_LIBH, 8)
lib_ver, chip_id, _, _, firm_id, _, vend_id = struct.unpack(
">HBBBBBB", chip_data
)
if debug:
print("Vendor ID %02x" % vend_id)
self.vend_id=vend_id
if chip_id == 0x06:
self.chip = "FT6206"
elif chip_id == 0x64:
self.chip = "FT6236"
elif debug:
print("Chip Id: %02x" % chip_id)
if debug:
print("Library vers %04X" % lib_ver)
print("Firmware ID %02X" % firm_id)
print("Point rate %d Hz" % self._read(_FT6XXX_REG_POINTRATE, 1)[0])
print("Thresh %d" % self._read(_FT6XXX_REG_THRESHHOLD, 1)[0])
@property
def touched(self):
""" Returns the number of touches currently detected """
return self._read(_FT6XXX_REG_NUMTOUCHES, 1)[0]
# pylint: disable=unused-variable
@property
def touches(self):
"""
Returns a list of touchpoint dicts, with 'x' and 'y' containing the
touch coordinates, and 'id' as the touch # for multitouch tracking
"""
touchpoints = []
data = self._read(_FT6XXX_REG_DATA, 32)
for i in range(2):
point_data = data[i * 6 + 3 : i * 6 + 9]
if all([i == 0xFF for i in point_data]):
continue
# print([hex(i) for i in point_data])
x, y, weight, misc = struct.unpack(">HHBB", point_data)
# print(x, y, weight, misc)
touch_id = y >> 12
x &= 0xFFF
y &= 0xFFF
point = {"x": x, "y": y, "id": touch_id}
touchpoints.append(point)
return touchpoints
def _read(self, reg, length):
"""Returns an array of 'length' bytes from the 'register'"""
result = bytearray(length)
self.bus.readfrom_mem_into(self.address, reg, result)
if self._debug:
print("\t$%02X => %s" % (reg, [hex(i) for i in result]))
return result
def _write(self, reg, values):
"""Writes an array of 'length' bytes to the 'register'"""
values = [(v & 0xFF) for v in values]
self.bus.writeto_mem(self.address,reg,bytes(values))
if self._debug:
print("\t$%02X <= %s" % (reg, [hex(i) for i in values]))
| 32.929487 | 79 | 0.648628 |
7952877367eb3250754b22940bea511418ee9af9 | 620 | py | Python | app/members/admin.py | FinalProject-Team4/Server_DaangnMarket | 258eb8c79fcbed69d516f521e6194cac7b315a58 | [
"MIT"
] | 1 | 2020-06-20T12:52:07.000Z | 2020-06-20T12:52:07.000Z | app/members/admin.py | FinalProject-Team4/Server_DaangnMarket | 258eb8c79fcbed69d516f521e6194cac7b315a58 | [
"MIT"
] | 34 | 2020-03-16T12:09:14.000Z | 2021-04-08T20:23:42.000Z | app/members/admin.py | FinalProject-Team4/Server_DaangnMarket | 258eb8c79fcbed69d516f521e6194cac7b315a58 | [
"MIT"
] | 2 | 2020-03-22T06:45:39.000Z | 2020-03-22T06:55:11.000Z | from django.contrib import admin
from .models import User, SelectedLocation
@admin.register(User)
class MembersAdmin(admin.ModelAdmin):
list_display = ['username', 'phone', 'avatar']
list_filter = ['username', 'created', 'updated']
search_fields = ['username', 'phone']
fields = ('uid', 'username', 'phone', 'avatar', 'created', 'updated')
readonly_fields = ("created", 'updated',)
@admin.register(SelectedLocation)
class MembersAdmin(admin.ModelAdmin):
list_display = ['username', 'locate', 'verified', 'activated', 'distance']
def username(self, obj):
return obj.user.username
| 31 | 78 | 0.685484 |
795287be0cc014c0e5cdf118dff4abd34eff118c | 15,661 | py | Python | lib/datasets/comic.py | PhoneSix/Domain-Contrast | 5c674b581bce9beacf5bc0dd13113f33c4050495 | [
"MIT"
] | 4 | 2021-07-31T01:04:15.000Z | 2022-03-09T07:23:10.000Z | lib/datasets/comic.py | PhoneSix/Domain-Contrast | 5c674b581bce9beacf5bc0dd13113f33c4050495 | [
"MIT"
] | null | null | null | lib/datasets/comic.py | PhoneSix/Domain-Contrast | 5c674b581bce9beacf5bc0dd13113f33c4050495 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import xml.dom.minidom as minidom
import os
# import PIL
import numpy as np
import scipy.sparse
import subprocess
import math
import glob
import uuid
import scipy.io as sio
import xml.etree.ElementTree as ET
import pickle
from .imdb import imdb
from .imdb import ROOT_DIR
from . import ds_utils
from .voc_eval import voc_eval
import time
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from model.utils.config import cfg
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
# <<<< obsolete
class comic(imdb):
def __init__(self, image_set, year, devkit_path=None):
imdb.__init__(self, 'comic_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
# self._classes = ('__background__', # always index 0
# 'aeroplane', 'bicycle', 'bird', 'boat',
# 'bottle', 'bus', 'car', 'cat', 'chair',
# 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant',
# 'sheep', 'sofa', 'train', 'tvmonitor')
self._classes = ('__background__', # always index 0
'bicycle', 'bird', 'car', 'cat', 'dog', 'person')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
# self._roidb_handler = self.selective_search_roidb
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None,
'min_size': 2}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_id_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return i
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
# return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
return cfg.DATA_DIR
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
if not os.path.exists(os.path.join(output_dir, 'test_results.txt')):
os.mknod(os.path.join(output_dir, 'test_results.txt'))
with open(os.path.join(output_dir, 'test_results.txt'), 'a') as f:
f.write(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))+'\n')
f.write("mAP: " + str(np.mean(aps)) + '\n')
f.write('AP: ' + '\n')
for ap in aps:
f.write(str(ap) + '\n')
f.write('\n')
f.close()
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = comic('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| 39.748731 | 88 | 0.544921 |
795288a84376f9da54f13f0e7e81cd55de4b4b58 | 2,791 | py | Python | superset/dashboards/commands/delete.py | AmritaTech/superset | c685c9ea8fa70ba6646617d0a272c11e1130081c | [
"Apache-2.0"
] | 44 | 2021-04-14T10:53:36.000Z | 2021-09-11T00:29:50.000Z | superset/dashboards/commands/delete.py | AmritaTech/superset | c685c9ea8fa70ba6646617d0a272c11e1130081c | [
"Apache-2.0"
] | 77 | 2020-02-02T07:54:13.000Z | 2022-03-23T18:22:04.000Z | superset/dashboards/commands/delete.py | AmritaTech/superset | c685c9ea8fa70ba6646617d0a272c11e1130081c | [
"Apache-2.0"
] | 15 | 2019-04-29T05:38:31.000Z | 2022-02-12T10:47:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Optional
from flask_appbuilder.models.sqla import Model
from flask_appbuilder.security.sqla.models import User
from flask_babel import lazy_gettext as _
from superset.commands.base import BaseCommand
from superset.dao.exceptions import DAODeleteFailedError
from superset.dashboards.commands.exceptions import (
DashboardDeleteFailedError,
DashboardDeleteFailedReportsExistError,
DashboardForbiddenError,
DashboardNotFoundError,
)
from superset.dashboards.dao import DashboardDAO
from superset.exceptions import SupersetSecurityException
from superset.models.dashboard import Dashboard
from superset.reports.dao import ReportScheduleDAO
from superset.views.base import check_ownership
logger = logging.getLogger(__name__)
class DeleteDashboardCommand(BaseCommand):
def __init__(self, user: User, model_id: int):
self._actor = user
self._model_id = model_id
self._model: Optional[Dashboard] = None
def run(self) -> Model:
self.validate()
try:
dashboard = DashboardDAO.delete(self._model)
except DAODeleteFailedError as ex:
logger.exception(ex.exception)
raise DashboardDeleteFailedError()
return dashboard
def validate(self) -> None:
# Validate/populate model exists
self._model = DashboardDAO.find_by_id(self._model_id)
if not self._model:
raise DashboardNotFoundError()
# Check there are no associated ReportSchedules
reports = ReportScheduleDAO.find_by_dashboard_id(self._model_id)
if reports:
report_names = [report.name for report in reports]
raise DashboardDeleteFailedReportsExistError(
_("There are associated alerts or reports: %s" % ",".join(report_names))
)
# Check ownership
try:
check_ownership(self._model)
except SupersetSecurityException:
raise DashboardForbiddenError()
| 38.232877 | 88 | 0.73522 |
795288afea901a1773f78721bd3b16d2bd93da9e | 960 | py | Python | 11_data_science/pdf/test_pdf.py | edgardeng/python-advance-interview | 59fd7bee8e871acdc7fdfecf2a110db840c47ebb | [
"Apache-2.0"
] | 1 | 2022-03-06T13:03:56.000Z | 2022-03-06T13:03:56.000Z | 11_data_science/pdf/test_pdf.py | edgardeng/python-advance-interview | 59fd7bee8e871acdc7fdfecf2a110db840c47ebb | [
"Apache-2.0"
] | null | null | null | 11_data_science/pdf/test_pdf.py | edgardeng/python-advance-interview | 59fd7bee8e871acdc7fdfecf2a110db840c47ebb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @author: edgardeng
# @date: 2021-05-25
# @file: demo fo PyPDF4
import PyPDF4
def read_pdf(path_file):
with open(path_file, 'rb') as file:
reader = PyPDF4.PdfFileReader(file)
information = reader.getDocumentInfo()
number_of_pages = reader.numPages
txt = f"""
Information about {path_file}:
Author: {information.author}
Creator: {information.creator}
Producer: {information.producer}
Subject: {information.subject}
Title: {information.title}
Number of pages: {number_of_pages}
"""
print(txt)
for i in range(reader.numPages):
print('*' * 40)
page = reader.getPage(i)
print(page.extractText())
print('-'*40)
print(page.getContents())
if __name__ == '__main__':
path = r'D:\聚均科技-研发\2021 聚均科技AI平台\OCR\天津锦湖三年报表\2020年12月财务报表.pdf'
read_pdf(path)
| 24.615385 | 68 | 0.586458 |
795288b10818070e31cc0b3233e82812d971a819 | 1,414 | py | Python | venv/lib/python3.8/site-packages/vsts/test/v4_0/models/points_filter.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/test/v4_0/models/points_filter.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/test/v4_0/models/points_filter.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class PointsFilter(Model):
"""PointsFilter.
:param configuration_names:
:type configuration_names: list of str
:param testcase_ids:
:type testcase_ids: list of int
:param testers:
:type testers: list of :class:`IdentityRef <test.v4_0.models.IdentityRef>`
"""
_attribute_map = {
'configuration_names': {'key': 'configurationNames', 'type': '[str]'},
'testcase_ids': {'key': 'testcaseIds', 'type': '[int]'},
'testers': {'key': 'testers', 'type': '[IdentityRef]'}
}
def __init__(self, configuration_names=None, testcase_ids=None, testers=None):
super(PointsFilter, self).__init__()
self.configuration_names = configuration_names
self.testcase_ids = testcase_ids
self.testers = testers
| 41.588235 | 95 | 0.534653 |
79528ac5eb9627b93487a7ff93f3cd0524d909a0 | 10,814 | py | Python | src/oci/apm_synthetics/models/update_script_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/apm_synthetics/models/update_script_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/apm_synthetics/models/update_script_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateScriptDetails(object):
"""
Details of the request body used to update a script.
Only Side or JavaScript content types are supported and content should be in Side or JavaScript formats only.
"""
#: A constant which can be used with the content_type property of a UpdateScriptDetails.
#: This constant has a value of "SIDE"
CONTENT_TYPE_SIDE = "SIDE"
#: A constant which can be used with the content_type property of a UpdateScriptDetails.
#: This constant has a value of "JS"
CONTENT_TYPE_JS = "JS"
def __init__(self, **kwargs):
"""
Initializes a new UpdateScriptDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateScriptDetails.
:type display_name: str
:param content_type:
The value to assign to the content_type property of this UpdateScriptDetails.
Allowed values for this property are: "SIDE", "JS"
:type content_type: str
:param content:
The value to assign to the content property of this UpdateScriptDetails.
:type content: str
:param content_file_name:
The value to assign to the content_file_name property of this UpdateScriptDetails.
:type content_file_name: str
:param parameters:
The value to assign to the parameters property of this UpdateScriptDetails.
:type parameters: list[oci.apm_synthetics.models.ScriptParameter]
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateScriptDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateScriptDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'content_type': 'str',
'content': 'str',
'content_file_name': 'str',
'parameters': 'list[ScriptParameter]',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'content_type': 'contentType',
'content': 'content',
'content_file_name': 'contentFileName',
'parameters': 'parameters',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._content_type = None
self._content = None
self._content_file_name = None
self._parameters = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
Gets the display_name of this UpdateScriptDetails.
Unique name that can be edited. The name should not contain any confidential information.
:return: The display_name of this UpdateScriptDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateScriptDetails.
Unique name that can be edited. The name should not contain any confidential information.
:param display_name: The display_name of this UpdateScriptDetails.
:type: str
"""
self._display_name = display_name
@property
def content_type(self):
"""
Gets the content_type of this UpdateScriptDetails.
Content type of script.
Allowed values for this property are: "SIDE", "JS"
:return: The content_type of this UpdateScriptDetails.
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""
Sets the content_type of this UpdateScriptDetails.
Content type of script.
:param content_type: The content_type of this UpdateScriptDetails.
:type: str
"""
allowed_values = ["SIDE", "JS"]
if not value_allowed_none_or_none_sentinel(content_type, allowed_values):
raise ValueError(
"Invalid value for `content_type`, must be None or one of {0}"
.format(allowed_values)
)
self._content_type = content_type
@property
def content(self):
"""
Gets the content of this UpdateScriptDetails.
The content of the script. It may contain custom-defined tags that can be used for setting dynamic parameters.
The format to set dynamic parameters is: `<ORAP><ON>param name</ON><OV>param value</OV><OS>isParamValueSecret(true/false)</OS></ORAP>`.
Param value and isParamValueSecret are optional, the default value for isParamValueSecret is false.
Examples:
With mandatory param name : `<ORAP><ON>param name</ON></ORAP>`
With parameter name and value : `<ORAP><ON>param name</ON><OV>param value</OV></ORAP>`
Note that the content is valid if it matches the given content type. For example, if the content type is SIDE, then the content should be in Side script format. If the content type is JS, then the content should be in JavaScript format.
:return: The content of this UpdateScriptDetails.
:rtype: str
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this UpdateScriptDetails.
The content of the script. It may contain custom-defined tags that can be used for setting dynamic parameters.
The format to set dynamic parameters is: `<ORAP><ON>param name</ON><OV>param value</OV><OS>isParamValueSecret(true/false)</OS></ORAP>`.
Param value and isParamValueSecret are optional, the default value for isParamValueSecret is false.
Examples:
With mandatory param name : `<ORAP><ON>param name</ON></ORAP>`
With parameter name and value : `<ORAP><ON>param name</ON><OV>param value</OV></ORAP>`
Note that the content is valid if it matches the given content type. For example, if the content type is SIDE, then the content should be in Side script format. If the content type is JS, then the content should be in JavaScript format.
:param content: The content of this UpdateScriptDetails.
:type: str
"""
self._content = content
@property
def content_file_name(self):
"""
Gets the content_file_name of this UpdateScriptDetails.
File name of uploaded script content.
:return: The content_file_name of this UpdateScriptDetails.
:rtype: str
"""
return self._content_file_name
@content_file_name.setter
def content_file_name(self, content_file_name):
"""
Sets the content_file_name of this UpdateScriptDetails.
File name of uploaded script content.
:param content_file_name: The content_file_name of this UpdateScriptDetails.
:type: str
"""
self._content_file_name = content_file_name
@property
def parameters(self):
"""
Gets the parameters of this UpdateScriptDetails.
List of script parameters. Example: `[{\"paramName\": \"userid\", \"paramValue\":\"testuser\", \"isSecret\": false}]`
:return: The parameters of this UpdateScriptDetails.
:rtype: list[oci.apm_synthetics.models.ScriptParameter]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""
Sets the parameters of this UpdateScriptDetails.
List of script parameters. Example: `[{\"paramName\": \"userid\", \"paramValue\":\"testuser\", \"isSecret\": false}]`
:param parameters: The parameters of this UpdateScriptDetails.
:type: list[oci.apm_synthetics.models.ScriptParameter]
"""
self._parameters = parameters
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateScriptDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this UpdateScriptDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateScriptDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this UpdateScriptDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateScriptDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this UpdateScriptDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateScriptDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this UpdateScriptDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 37.161512 | 245 | 0.65369 |
79528b13a7d3e35e791aa2ed720aca24f869f998 | 7,291 | py | Python | env/lib/python3.6/site-packages/dipy/tracking/tests/test_life.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | env/lib/python3.6/site-packages/dipy/tracking/tests/test_life.py | Raniac/neurolearn_dev | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | env/lib/python3.6/site-packages/dipy/tracking/tests/test_life.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | import os
import os.path as op
import numpy as np
import numpy.testing as npt
import numpy.testing.decorators as dec
import scipy.sparse as sps
import scipy.linalg as la
import nibabel as nib
import dipy.tracking.life as life
import dipy.tracking.eudx as edx
import dipy.core.sphere as dps
import dipy.core.gradients as dpg
import dipy.data as dpd
import dipy.core.optimize as opt
import dipy.core.ndindex as nd
import dipy.core.gradients as grad
import dipy.reconst.dti as dti
from dipy.io.gradients import read_bvals_bvecs
THIS_DIR = op.dirname(__file__)
def test_streamline_gradients():
streamline = [[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]]
grads = np.array([[3, 3, 3], [2, 2, 2], [2, 2, 2], [3, 3, 3]])
npt.assert_array_equal(life.streamline_gradients(streamline), grads)
def test_streamline_tensors():
# Small streamline
streamline = [[1, 2, 3], [4, 5, 3], [5, 6, 3]]
# Non-default eigenvalues:
evals = [0.0012, 0.0006, 0.0004]
streamline_tensors = life.streamline_tensors(streamline, evals=evals)
npt.assert_array_almost_equal(streamline_tensors[0],
np.array([[0.0009, 0.0003, 0.],
[0.0003, 0.0009, 0.],
[0., 0., 0.0004]]))
# Get the eigenvalues/eigenvectors:
eigvals, eigvecs = la.eig(streamline_tensors[0])
eigvecs = eigvecs[np.argsort(eigvals)[::-1]]
eigvals = eigvals[np.argsort(eigvals)[::-1]]
npt.assert_array_almost_equal(eigvals,
np.array([0.0012, 0.0006, 0.0004]))
npt.assert_array_almost_equal(eigvecs[0],
np.array([0.70710678, -0.70710678, 0.]))
# Another small streamline
streamline = [[1, 0, 0], [2, 0, 0], [3, 0, 0]]
streamline_tensors = life.streamline_tensors(streamline, evals=evals)
for t in streamline_tensors:
eigvals, eigvecs = la.eig(t)
eigvecs = eigvecs[np.argsort(eigvals)[::-1]]
# This one has no rotations - all tensors are simply the canonical:
npt.assert_almost_equal(np.rad2deg(np.arccos(
np.dot(eigvecs[0], [1, 0, 0]))), 0)
npt.assert_almost_equal(np.rad2deg(np.arccos(
np.dot(eigvecs[1], [0, 1, 0]))), 0)
npt.assert_almost_equal(np.rad2deg(np.arccos(
np.dot(eigvecs[2], [0, 0, 1]))), 0)
def test_streamline_signal():
data_file, bval_file, bvec_file = dpd.get_fnames('small_64D')
gtab = dpg.gradient_table(bval_file, bvec_file)
evals = [0.0015, 0.0005, 0.0005]
streamline1 = [[[1, 2, 3], [4, 5, 3], [5, 6, 3], [6, 7, 3]],
[[1, 2, 3], [4, 5, 3], [5, 6, 3]]]
[life.streamline_signal(s, gtab, evals) for s in streamline1]
streamline2 = [[[1, 2, 3], [4, 5, 3], [5, 6, 3], [6, 7, 3]]]
[life.streamline_signal(s, gtab, evals) for s in streamline2]
npt.assert_array_equal(streamline2[0], streamline1[0])
def test_voxel2streamline():
streamline = [[[1.1, 2.4, 2.9], [4, 5, 3], [5, 6, 3], [6, 7, 3]],
[[1, 2, 3], [4, 5, 3], [5, 6, 3]]]
affine = np.eye(4)
v2f, v2fn = life.voxel2streamline(streamline, False, affine)
npt.assert_equal(v2f, {0: [0, 1], 1: [0, 1], 2: [0, 1], 3: [0]})
npt.assert_equal(v2fn, {0: {0: [0], 1: [1], 2: [2], 3: [3]},
1: {0: [0], 1: [1], 2: [2]}})
affine = np.array([[0.9, 0, 0, 10],
[0, 0.9, 0, -100],
[0, 0, 0.9, 2],
[0, 0, 0, 1]])
xform_sl = life.transform_streamlines(streamline, np.linalg.inv(affine))
v2f, v2fn = life.voxel2streamline(xform_sl, False, affine)
npt.assert_equal(v2f, {0: [0, 1], 1: [0, 1], 2: [0, 1], 3: [0]})
npt.assert_equal(v2fn, {0: {0: [0], 1: [1], 2: [2], 3: [3]},
1: {0: [0], 1: [1], 2: [2]}})
def test_FiberModel_init():
# Get some small amount of data:
data_file, bval_file, bvec_file = dpd.get_fnames('small_64D')
data_ni = nib.load(data_file)
bvals, bvecs = read_bvals_bvecs(bval_file, bvec_file)
gtab = dpg.gradient_table(bvals, bvecs)
FM = life.FiberModel(gtab)
streamline = [[[1, 2, 3], [4, 5, 3], [5, 6, 3], [6, 7, 3]],
[[1, 2, 3], [4, 5, 3], [5, 6, 3]]]
affine = np.eye(4)
for sphere in [None, False, dpd.get_sphere('symmetric362')]:
fiber_matrix, vox_coords = FM.setup(streamline, affine, sphere=sphere)
npt.assert_array_equal(np.array(vox_coords),
np.array([[1, 2, 3], [4, 5, 3],
[5, 6, 3], [6, 7, 3]]))
npt.assert_equal(fiber_matrix.shape, (len(vox_coords) * 64,
len(streamline)))
def test_FiberFit():
data_file, bval_file, bvec_file = dpd.get_fnames('small_64D')
data_ni = nib.load(data_file)
data = data_ni.get_data()
data_aff = data_ni.affine
bvals, bvecs = read_bvals_bvecs(bval_file, bvec_file)
gtab = dpg.gradient_table(bvals, bvecs)
FM = life.FiberModel(gtab)
evals = [0.0015, 0.0005, 0.0005]
streamline = [[[1, 2, 3], [4, 5, 3], [5, 6, 3], [6, 7, 3]],
[[1, 2, 3], [4, 5, 3], [5, 6, 3]]]
fiber_matrix, vox_coords = FM.setup(streamline, None, evals)
w = np.array([0.5, 0.5])
sig = opt.spdot(fiber_matrix, w) + 1.0 # Add some isotropic stuff
S0 = data[..., gtab.b0s_mask]
this_data = np.zeros((10, 10, 10, 64))
this_data[vox_coords[:, 0], vox_coords[:, 1], vox_coords[:, 2]] =\
(sig.reshape((4, 64)) *
S0[vox_coords[:, 0], vox_coords[:, 1], vox_coords[:, 2]])
# Grab some realistic S0 values:
this_data = np.concatenate([data[..., gtab.b0s_mask], this_data], -1)
fit = FM.fit(this_data, streamline)
npt.assert_almost_equal(fit.predict()[1],
fit.data[1], decimal=-1)
# Predict with an input GradientTable
npt.assert_almost_equal(fit.predict(gtab)[1],
fit.data[1], decimal=-1)
npt.assert_almost_equal(
this_data[vox_coords[:, 0], vox_coords[:, 1], vox_coords[:, 2]],
fit.data)
def test_fit_data():
fdata, fbval, fbvec = dpd.get_fnames('small_25')
gtab = grad.gradient_table(fbval, fbvec)
ni_data = nib.load(fdata)
data = ni_data.get_data()
dtmodel = dti.TensorModel(gtab)
dtfit = dtmodel.fit(data)
sphere = dpd.get_sphere()
peak_idx = dti.quantize_evecs(dtfit.evecs, sphere.vertices)
eu = edx.EuDX(dtfit.fa.astype('f8'), peak_idx,
seeds=list(nd.ndindex(data.shape[:-1])),
odf_vertices=sphere.vertices, a_low=0)
tensor_streamlines = [streamline for streamline in eu]
life_model = life.FiberModel(gtab)
life_fit = life_model.fit(data, tensor_streamlines)
model_error = life_fit.predict() - life_fit.data
model_rmse = np.sqrt(np.mean(model_error ** 2, -1))
matlab_rmse, matlab_weights = dpd.matlab_life_results()
# Lower error than the matlab implementation for these data:
npt.assert_(np.median(model_rmse) < np.median(matlab_rmse))
# And a moderate correlation with the Matlab implementation weights:
npt.assert_(np.corrcoef(matlab_weights, life_fit.beta)[0, 1] > 0.6)
| 38.989305 | 78 | 0.583185 |
79528baf61119fd053ccc4e147226145b0cca41d | 381 | py | Python | ramda/difference.py | zydmayday/pamda | 6740d0294f3bedbeeef3bbc3042a43dceb3239b2 | [
"MIT"
] | 1 | 2022-03-14T07:35:13.000Z | 2022-03-14T07:35:13.000Z | ramda/difference.py | zydmayday/pamda | 6740d0294f3bedbeeef3bbc3042a43dceb3239b2 | [
"MIT"
] | 3 | 2022-03-24T02:30:18.000Z | 2022-03-31T07:46:04.000Z | ramda/difference.py | zydmayday/pamda | 6740d0294f3bedbeeef3bbc3042a43dceb3239b2 | [
"MIT"
] | null | null | null | from .private._curry2 import _curry2
from .private._Set import _Set
def inner_difference(first, second):
out = []
idx = 0
toFilterOut = _Set()
for item in second:
toFilterOut.add(item)
firstLen = len(first)
while idx < firstLen:
if toFilterOut.add(first[idx]):
out.append(first[idx])
idx += 1
return out
difference = _curry2(inner_difference)
| 17.318182 | 38 | 0.682415 |
79528ca1a2e970007f6cfbff361329f70b7e427a | 1,196 | py | Python | project-euler/python/p0008.py | sanchopanca/coding-for-pleasure | fed1910e8a5a4241bd55aed333afd79b4405a71d | [
"MIT"
] | null | null | null | project-euler/python/p0008.py | sanchopanca/coding-for-pleasure | fed1910e8a5a4241bd55aed333afd79b4405a71d | [
"MIT"
] | null | null | null | project-euler/python/p0008.py | sanchopanca/coding-for-pleasure | fed1910e8a5a4241bd55aed333afd79b4405a71d | [
"MIT"
] | null | null | null | from math import prod
number = """
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
""".replace('\n', '')
res = 0
for i in range(0, len(number) - 13):
p = prod(map(int, number[i:i+13]))
res = max(res, p)
print(res)
| 38.580645 | 50 | 0.91806 |
79528d7d344dc49cbed42df1664f6a692dec44fe | 2,776 | py | Python | custom_components/rika_firenet/switch.py | antibill51/rika-firenet-custom-component | ad81d12a466d953148ebbb1440f5fd8d81edd1d2 | [
"MIT"
] | 2 | 2022-02-05T11:53:23.000Z | 2022-02-19T23:55:56.000Z | custom_components/rika_firenet/switch.py | antibill51/rika-firenet-custom-component | ad81d12a466d953148ebbb1440f5fd8d81edd1d2 | [
"MIT"
] | null | null | null | custom_components/rika_firenet/switch.py | antibill51/rika-firenet-custom-component | ad81d12a466d953148ebbb1440f5fd8d81edd1d2 | [
"MIT"
] | null | null | null | import logging
from homeassistant.components.switch import SwitchEntity
from .entity import RikaFirenetEntity
from .const import (
DOMAIN
)
from .core import RikaFirenetCoordinator
from .core import RikaFirenetStove
_LOGGER = logging.getLogger(__name__)
DEVICE_SWITCH = [
"on off",
"convection fan1",
"convection fan2",
"heating times"
]
async def async_setup_entry(hass, entry, async_add_entities):
_LOGGER.info("setting up platform switches")
coordinator: RikaFirenetCoordinator = hass.data[DOMAIN][entry.entry_id]
stove_entities = []
# Create stove switches
for stove in coordinator.get_stoves():
stove_entities.extend(
[
RikaFirenetStoveBinarySwitch(entry, stove, coordinator, number)
for number in DEVICE_SWITCH
]
)
if stove_entities:
async_add_entities(stove_entities, True)
class RikaFirenetStoveBinarySwitch(RikaFirenetEntity, SwitchEntity):
def __init__(self, config_entry, stove: RikaFirenetStove, coordinator: RikaFirenetCoordinator, number):
super().__init__(config_entry, stove, coordinator, number)
self._number = number
def turn_on(self, **kwargs): # pylint: disable=unused-argument
_LOGGER.info("turn_on " + self._number)
if self._number == "on off":
self._stove.turn_on()
elif self._number == "convection fan1":
self._stove.turn_convection_fan1_on()
elif self._number == "convection fan2":
self._stove.turn_convection_fan2_on()
elif self._number == "heating times":
self._stove.turn_heating_times_on()
self.schedule_update_ha_state()
def turn_off(self, **kwargs): # pylint: disable=unused-argument
_LOGGER.info("turn_off " + self._number)
if self._number == "on off":
self._stove.turn_off()
elif self._number == "convection fan1":
self._stove.turn_convection_fan1_off()
elif self._number == "convection fan2":
self._stove.turn_convection_fan2_off()
elif self._number == "heating times":
self._stove.turn_heating_times_off()
self.schedule_update_ha_state()
@property
def icon(self):
return "hass:power"
@property
def is_on(self):
if self._number == "on off":
return self._stove.is_stove_on()
elif self._number == "convection fan1":
return self._stove.is_stove_convection_fan1_on()
elif self._number == "convection fan2":
return self._stove.is_stove_convection_fan2_on()
elif self._number == "heating times":
return self._stove.is_stove_heating_times_on()
| 31.191011 | 107 | 0.651297 |
79528ee2ad62316b8efee0a688f407487e60375c | 8,649 | py | Python | docs/conf.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1 | 2020-07-21T10:01:20.000Z | 2020-07-21T10:01:20.000Z | docs/conf.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Pythran documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 19 20:57:04 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
from pythran import __version__
with open("../README.rst") as readme:
readme_body = readme.read()
toc = '''
.. toctree::
:maxdepth: 1
MANUAL
EXAMPLES
CLI
SUPPORT
DEVGUIDE
TUTORIAL
INTERNAL
LICENSE
AUTHORS
Changelog
'''
readme_body = readme_body.replace('http://pythran.readthedocs.io', toc)
with open("index.rst", "w") as index:
index.write(readme_body)
del readme_body
with open("../LICENSE") as license:
with open('LICENSE.rst', 'w') as license_rst:
license_rst.write("=======\nLICENSE\n=======\n\n")
license_rst.write(license.read())
with open("../Changelog") as changelog:
with open('Changelog.rst', 'w') as changelog_rst:
changelog_rst.write('=========\nChangelog\n=========\n\n')
changelog_rst.write(changelog.read())
with open("../AUTHORS") as authors:
with open('AUTHORS.rst', 'w') as authors_rst:
authors_rst.write(authors.read())
def make_support():
from pythran import tables
TITLE = "Supported Modules and Functions"
DEPTHS = '=*-+:~#.^"`'
body = []
body.append(DEPTHS[0]*len(TITLE))
body.append(TITLE)
body.append(DEPTHS[0]*len(TITLE))
body.append("")
def format_name(name):
if name.endswith('_') and not name.startswith('_'):
name = name[:-1]
return name
def isiterable(obj):
return hasattr(obj, '__iter__')
def dump_entry(entry_name, entry_value, depth):
if isiterable(entry_value):
body.append(entry_name)
body.append(DEPTHS[depth] * len(entry_name))
body.append("")
sym_entries, sub_entries = [], []
for sym in entry_value:
w = sub_entries if isiterable(entry_value[sym]) else sym_entries
w.append(sym)
for k in sorted(sym_entries):
dump_entry(format_name(k), entry_value[k], depth + 1)
body.append("")
for k in sorted(sub_entries):
dump_entry(format_name(k), entry_value[k], depth + 1)
body.append("")
else:
body.append(entry_name)
for MODULE in sorted(tables.MODULES):
if MODULE != '__dispatch__':
dump_entry(format_name(MODULE), tables.MODULES[MODULE], 1)
return "\n".join(body)
with open('SUPPORT.rst', 'w') as support:
support.write(make_support())
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['nbsphinx',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pythran'
copyright = u'2014, Serge Guelton, Pierrick Brunet et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import guzzle_sphinx_theme
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Set the name of the project to appear in the sidebar
"project_nav_name": "Project Name",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'pythran.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['globaltoc.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pythrandoc'
| 30.561837 | 80 | 0.695109 |
79528f67fe1003dab91a44d69ce6d8fcbfed3355 | 5,219 | py | Python | sdk/python/pulumi_azure_native/web/v20200601/get_web_app_swift_virtual_network_connection_slot.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20200601/get_web_app_swift_virtual_network_connection_slot.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20200601/get_web_app_swift_virtual_network_connection_slot.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetWebAppSwiftVirtualNetworkConnectionSlotResult',
'AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult',
'get_web_app_swift_virtual_network_connection_slot',
]
@pulumi.output_type
class GetWebAppSwiftVirtualNetworkConnectionSlotResult:
"""
Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
"""
def __init__(__self__, id=None, kind=None, name=None, subnet_resource_id=None, swift_supported=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if subnet_resource_id and not isinstance(subnet_resource_id, str):
raise TypeError("Expected argument 'subnet_resource_id' to be a str")
pulumi.set(__self__, "subnet_resource_id", subnet_resource_id)
if swift_supported and not isinstance(swift_supported, bool):
raise TypeError("Expected argument 'swift_supported' to be a bool")
pulumi.set(__self__, "swift_supported", swift_supported)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="subnetResourceId")
def subnet_resource_id(self) -> Optional[str]:
"""
The Virtual Network subnet's resource ID. This is the subnet that this Web App will join. This subnet must have a delegation to Microsoft.Web/serverFarms defined first.
"""
return pulumi.get(self, "subnet_resource_id")
@property
@pulumi.getter(name="swiftSupported")
def swift_supported(self) -> Optional[bool]:
"""
A flag that specifies if the scale unit this Web App is on supports Swift integration.
"""
return pulumi.get(self, "swift_supported")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult(GetWebAppSwiftVirtualNetworkConnectionSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSwiftVirtualNetworkConnectionSlotResult(
id=self.id,
kind=self.kind,
name=self.name,
subnet_resource_id=self.subnet_resource_id,
swift_supported=self.swift_supported,
type=self.type)
def get_web_app_swift_virtual_network_connection_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult:
"""
Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get a gateway for the production slot's Virtual Network.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20200601:getWebAppSwiftVirtualNetworkConnectionSlot', __args__, opts=opts, typ=GetWebAppSwiftVirtualNetworkConnectionSlotResult).value
return AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
subnet_resource_id=__ret__.subnet_resource_id,
swift_supported=__ret__.swift_supported,
type=__ret__.type)
| 38.947761 | 189 | 0.659513 |
79529003da7a418a0558b57b04454fe0386abc9b | 481 | py | Python | config/main.py | elmsantospy/tests_kivy | 290441ec090975e3bf8c1379b251e5912317cc82 | [
"MIT"
] | null | null | null | config/main.py | elmsantospy/tests_kivy | 290441ec090975e3bf8c1379b251e5912317cc82 | [
"MIT"
] | null | null | null | config/main.py | elmsantospy/tests_kivy | 290441ec090975e3bf8c1379b251e5912317cc82 | [
"MIT"
] | null | null | null | from kivymd.app import MDApp
from mixinmain import MixinMain
import config
import rootwidget
from kivy.properties import BooleanProperty
class MainApp(MDApp, MixinMain):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# self.borderless_status = BooleanProperty(config.borderless_status)
def build(self):
return rootwidget.RootWidget()
if __name__ == '__main__':
mainapp = MainApp()
mainapp.title = 'Config'
mainapp.run()
| 20.913043 | 76 | 0.711019 |
795290363b2b7b6e47748e725a390a7f16332905 | 5,445 | py | Python | MRPO/examples/plot_heatmaps.py | hai-h-nguyen/pomdp-baselines | 629180d56641810d99653a116cca41ede65172eb | [
"MIT"
] | 40 | 2021-10-15T14:53:00.000Z | 2022-03-31T02:27:20.000Z | MRPO/examples/plot_heatmaps.py | hai-h-nguyen/pomdp-baselines | 629180d56641810d99653a116cca41ede65172eb | [
"MIT"
] | 1 | 2022-03-13T04:02:30.000Z | 2022-03-13T04:02:30.000Z | MRPO/examples/plot_heatmaps.py | hai-h-nguyen/pomdp-baselines | 629180d56641810d99653a116cca41ede65172eb | [
"MIT"
] | 5 | 2021-11-28T04:08:13.000Z | 2022-03-17T02:33:51.000Z | """Plot heatmaps of evaluation results."""
import argparse
import itertools
import json
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import griddata
import scipy.spatial
CELL_AGGREGATIONS = {
"mean": np.mean,
"min": np.min,
"max": np.max,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=None)
parser.add_argument("results", type=str, help="Experiments results file")
parser.add_argument("--grid-size", type=int, default=10, help="Heatmap grid size")
parser.add_argument(
"--cell-aggregation", type=str, choices=CELL_AGGREGATIONS.keys(), default="mean"
)
parser.add_argument("--scale-min", type=int)
parser.add_argument("--scale-max", type=int)
parser.add_argument("--output", type=str, help="Output directory")
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
cell_aggregation = CELL_AGGREGATIONS[args.cell_aggregation]
with open(args.results, "r") as results_file:
# Read results for each experiment (in its own line).
for index, experiment in enumerate(results_file):
try:
experiment = json.loads(experiment)
except ValueError:
print("WARNING: Skipping malformed experiment result.")
continue
print(
"Processing result {index}/{trained_on}/{evaluated_on}...".format(
index=index, **experiment
)
)
# Assume all episodes vary the same parameters.
episodes = experiment["episodes"]
parameters = set(episodes[0]["environment"].keys())
parameters.remove("id")
parameters.remove("world")
# Remove non-numeric parameters.
for parameter in parameters.copy():
values = np.asarray(
[episode["environment"][parameter] for episode in episodes]
)
if not np.issubdtype(values.dtype, np.number):
parameters.remove(parameter)
# Sort parameters alphabetically for a consistent order.
parameters = sorted(parameters)
# Rewards.
rewards = np.asarray([episode["reward"] for episode in episodes])
# Colormap.
colormap = plt.cm.rainbow
colornorm = matplotlib.colors.Normalize(
vmin=args.scale_min or np.min(rewards),
vmax=args.scale_max or np.max(rewards),
)
# Compute all-pairs heatmaps.
items = len(parameters)
figure, axes = plt.subplots(
items, items, sharex="col", sharey="row", figsize=(12, 12)
)
if items == 1:
axes = np.asarray([axes]).reshape([1, 1])
for row, param_a in enumerate(parameters):
axes[0, row].set_title(param_a)
for col, param_b in enumerate(parameters):
axes[col, 0].set_ylabel(param_b, size="large")
values_a = np.asarray(
[float(episode["environment"][param_a]) for episode in episodes]
)
values_b = np.asarray(
[float(episode["environment"][param_b]) for episode in episodes]
)
# Sort by rewards.
rewards_idx = sorted(
np.arange(rewards.shape[0]), key=lambda index: rewards[index]
)
rewards = rewards[rewards_idx]
values_a = values_a[rewards_idx]
values_b = values_b[rewards_idx]
zmin = rewards.min()
zmax = rewards.max()
ax = axes[col, row]
# Plot heatmap.
heatmap = ax.hexbin(
values_a,
values_b,
rewards,
cmap=colormap,
norm=colornorm,
gridsize=args.grid_size,
reduce_C_function=cell_aggregation,
)
# Plot colorbar.
figure.colorbar(heatmap, ax=axes.ravel().tolist())
plt.suptitle(
"Model: $\\bf{{{model[name]}}}$ Trained: {trained_on}\n"
"Evaluated: {evaluated_on}\n"
"Episodes: {n_episodes} Mean: {mean:.2f} Median: {median:.2f} Min: {min:.2f} Max: {max:.2f}\n"
"Grid size: {grid_size}x{grid_size} Cell aggregation: {cell_aggregation}"
"".format(
n_episodes=len(episodes),
mean=np.mean(rewards),
median=np.median(rewards),
min=np.min(rewards),
max=np.max(rewards),
grid_size=args.grid_size,
cell_aggregation=args.cell_aggregation,
**experiment,
)
)
plt.savefig(
os.path.join(
args.output,
"heatmap-train-{trained_on}-test-{evaluated_on}-{index:02d}.png".format(
index=index, **experiment
),
)
)
plt.close()
| 36.3 | 110 | 0.514233 |
795290daed599d6dc369ab32873b3ae472bc12a0 | 5,259 | py | Python | hyde/tests/test_model.py | dcode/hyde | 7ce58157a9e74cc767cd602097441b8424a2052f | [
"MIT"
] | 1 | 2019-01-03T00:52:22.000Z | 2019-01-03T00:52:22.000Z | hyde/tests/test_model.py | eliethesaiyan/hyde | 7ce58157a9e74cc767cd602097441b8424a2052f | [
"MIT"
] | null | null | null | hyde/tests/test_model.py | eliethesaiyan/hyde | 7ce58157a9e74cc767cd602097441b8424a2052f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from hyde.model import Config, Expando
from hyde.fs import *
def test_expando_one_level():
d = {"a": 123, "b": "abc"}
x = Expando(d)
assert x.a == d['a']
assert x.b == d['b']
def test_expando_two_levels():
d = {"a": 123, "b": {"c": 456}}
x = Expando(d)
assert x.a == d['a']
assert x.b.c == d['b']['c']
def test_expando_three_levels():
d = {"a": 123, "b": {"c": 456, "d": {"e": "abc"}}}
x = Expando(d)
assert x.a == d['a']
assert x.b.c == d['b']['c']
assert x.b.d.e == d['b']['d']['e']
def test_expando_update():
d1 = {"a": 123, "b": "abc"}
x = Expando(d1)
assert x.a == d1['a']
assert x.b == d1['b']
d = {"b": {"c": 456, "d": {"e": "abc"}}, "f": "lmn"}
x.update(d)
assert x.a == d1['a']
assert x.b.c == d['b']['c']
assert x.b.d.e == d['b']['d']['e']
assert x.f == d["f"]
d2 = {"a": 789, "f": "opq"}
y = Expando(d2)
x.update(y)
assert x.a == 789
assert x.f == "opq"
def test_expando_to_dict():
d = {"a": 123, "b": {"c": 456, "d": {"e": "abc"}}}
x = Expando(d)
assert d == x.to_dict()
def test_expando_to_dict_with_update():
d1 = {"a": 123, "b": "abc"}
x = Expando(d1)
d = {"b": {"c": 456, "d": {"e": "abc"}}, "f": "lmn"}
x.update(d)
expected = {}
expected.update(d1)
expected.update(d)
assert expected == x.to_dict()
d2 = {"a": 789, "f": "opq"}
y = Expando(d2)
x.update(y)
expected.update(d2)
assert expected == x.to_dict()
TEST_SITE = File(__file__).parent.child_folder('_test')
import yaml
class TestConfig(object):
@classmethod
def setup_class(cls):
cls.conf1 = """
mode: development
content_root: stuff # Relative path from site root
media_root: media # Relative path from site root
media_url: /media
widgets:
plugins:
aggregators:
"""
cls.conf2 = """
mode: development
deploy_root: ~/deploy_site
content_root: site/stuff # Relative path from site root
media_root: mmm # Relative path from site root
media_url: /media
widgets:
plugins:
aggregators:
"""
def setUp(self):
TEST_SITE.make()
TEST_SITE.parent.child_folder('sites/test_jinja').copy_contents_to(TEST_SITE)
def tearDown(self):
TEST_SITE.delete()
def test_default_configuration(self):
c = Config(sitepath=TEST_SITE, config_dict={})
for root in ['content', 'layout']:
name = root + '_root'
path = name + '_path'
assert hasattr(c, name)
assert getattr(c, name) == root
assert hasattr(c, path)
assert getattr(c, path) == TEST_SITE.child_folder(root)
assert c.media_root_path == c.content_root_path.child_folder('media')
assert hasattr(c, 'plugins')
assert len(c.plugins) == 0
assert hasattr(c, 'ignore')
assert c.ignore == ["*~", "*.bak"]
assert c.deploy_root_path == TEST_SITE.child_folder('deploy')
assert c.not_found == '404.html'
def test_conf1(self):
c = Config(sitepath=TEST_SITE, config_dict=yaml.load(self.conf1))
assert c.content_root_path == TEST_SITE.child_folder('stuff')
def test_conf2(self):
c = Config(sitepath=TEST_SITE, config_dict=yaml.load(self.conf2))
assert c.content_root_path == TEST_SITE.child_folder('site/stuff')
assert c.media_root_path == c.content_root_path.child_folder('mmm')
assert c.media_url == TEST_SITE.child_folder('/media')
assert c.deploy_root_path == Folder('~/deploy_site')
def test_read_from_file_by_default(self):
File(TEST_SITE.child('site.yaml')).write(self.conf2)
c = Config(sitepath=TEST_SITE)
assert c.content_root_path == TEST_SITE.child_folder('site/stuff')
assert c.media_root_path == c.content_root_path.child_folder('mmm')
assert c.media_url == TEST_SITE.child_folder('/media')
assert c.deploy_root_path == Folder('~/deploy_site')
def test_read_from_specified_file(self):
File(TEST_SITE.child('another.yaml')).write(self.conf2)
c = Config(sitepath=TEST_SITE, config_file='another.yaml')
assert c.content_root_path == TEST_SITE.child_folder('site/stuff')
assert c.media_root_path == c.content_root_path.child_folder('mmm')
assert c.media_url == TEST_SITE.child_folder('/media')
assert c.deploy_root_path == Folder('~/deploy_site')
def test_extends(self):
another = """
extends: site.yaml
mode: production
media_root: xxx
"""
File(TEST_SITE.child('site.yaml')).write(self.conf2)
File(TEST_SITE.child('another.yaml')).write(another)
c = Config(sitepath=TEST_SITE, config_file='another.yaml')
assert c.mode == 'production'
assert c.content_root_path == TEST_SITE.child_folder('site/stuff')
assert c.media_root_path == c.content_root_path.child_folder('xxx')
assert c.media_url == TEST_SITE.child_folder('/media')
assert c.deploy_root_path == Folder('~/deploy_site')
| 33.075472 | 85 | 0.590607 |
795291de9d99efbff12609e571f8bb8bb3ba49d2 | 822 | py | Python | env/lib/python3.8/site-packages/JavaScriptCore/__init__.py | evilcomputer12/macOSAutoJoinAndRecordZoomSession | 2157de803c1d67fe493ff330f1558892507e8e49 | [
"MIT"
] | 2 | 2021-08-11T15:50:12.000Z | 2021-09-03T17:53:47.000Z | env/lib/python3.8/site-packages/JavaScriptCore/__init__.py | evilcomputer12/macOSAutoJoinAndRecordZoomSession | 2157de803c1d67fe493ff330f1558892507e8e49 | [
"MIT"
] | null | null | null | env/lib/python3.8/site-packages/JavaScriptCore/__init__.py | evilcomputer12/macOSAutoJoinAndRecordZoomSession | 2157de803c1d67fe493ff330f1558892507e8e49 | [
"MIT"
] | null | null | null | """
Python mapping for the JavaScriptCore framework.
This module does not contain docstrings for the wrapped code, check Apple's
documentation for details on how to use these functions and classes.
"""
import sys
import CoreFoundation
import JavaScriptCore._util
import objc
from JavaScriptCore import _metadata
sys.modules["JavaScriptCore"] = mod = objc.ObjCLazyModule(
"JavaScriptCore",
"com.apple.JavaScriptCore",
objc.pathForFramework("/System/Library/Frameworks/JavaScriptCore.framework"),
_metadata.__dict__,
None,
{
"__doc__": __doc__,
"objc": objc,
"__path__": __path__,
"__loader__": globals().get("__loader__", None),
},
(CoreFoundation,),
)
del sys.modules["JavaScriptCore._metadata"]
mod.autoreleasing = JavaScriptCore._util.autoreleasing
| 24.176471 | 81 | 0.725061 |
7952927ceeb8d190d79adc9909af5fd5827b0fe0 | 1,700 | py | Python | nebula/dao/base_dao.py | threathunterX/nebula_web | 2e32e6e7b225e0bd87ee8c847c22862f12c51bb1 | [
"Apache-2.0"
] | 2 | 2019-05-01T09:42:32.000Z | 2019-05-31T01:08:37.000Z | nebula/dao/base_dao.py | threathunterX/nebula_web | 2e32e6e7b225e0bd87ee8c847c22862f12c51bb1 | [
"Apache-2.0"
] | 1 | 2021-06-01T23:30:04.000Z | 2021-06-01T23:30:04.000Z | nebula/dao/base_dao.py | threathunterX/nebula_web | 2e32e6e7b225e0bd87ee8c847c22862f12c51bb1 | [
"Apache-2.0"
] | 5 | 2019-05-14T09:30:12.000Z | 2020-09-29T04:57:26.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from ..models.engine import DB_Session, Default_DB_Session, Data_DB_Session
Global_Session = None
Global_Default_Session = None
Global_Data_Session = None
class BaseDao(object):
def __init__(self, session=None):
if session:
self.session = session
self.own_session = False
elif Global_Session:
self.session = Global_Session
self.own_session = False
else:
# create its own session
self.session = DB_Session()
self.own_session = True
def __del__(self):
if self.own_session:
self.session.close()
class BaseDefaultDao(object):
def __init__(self, session=None):
if session:
self.session = session
self.own_session = False
elif Global_Default_Session:
self.session = Global_Default_Session
self.own_session = False
else:
# create its own session
self.session = Default_DB_Session()
self.own_session = True
def __del__(self):
if self.own_session:
self.session.close()
class BaseDataDao(object):
def __init__(self, session=None):
if session:
self.session = session
self.own_session = False
elif Global_Data_Session:
self.session = Global_Data_Session
self.own_session = False
else:
# create its own session
self.session = Data_DB_Session()
self.own_session = True
def __del__(self):
if self.own_session:
self.session.close() | 26.153846 | 75 | 0.599412 |
7952936b89556b1bddb7a35d2a99e154978c8814 | 3,873 | py | Python | validation/ProteinSequenceAssembly_NW_Validation.py | griffincalme/AminoAcidSequenceAssembly | a49f1b6a40098c56f098d8d9ebd90247f4c9645b | [
"Apache-2.0"
] | null | null | null | validation/ProteinSequenceAssembly_NW_Validation.py | griffincalme/AminoAcidSequenceAssembly | a49f1b6a40098c56f098d8d9ebd90247f4c9645b | [
"Apache-2.0"
] | null | null | null | validation/ProteinSequenceAssembly_NW_Validation.py | griffincalme/AminoAcidSequenceAssembly | a49f1b6a40098c56f098d8d9ebd90247f4c9645b | [
"Apache-2.0"
] | null | null | null | #Must install more_itertools and scikit-bio(need to run "python setup.py install" from source for skbio)
from skbio import Protein
from skbio.alignment import global_pairwise_align_protein
from more_itertools import unique_everseen
import random
def contig_merger(first_seq, second_seq):
merged_contig = [] # List for containing each AA of merged sequence
counter = 0
for AA in first_seq:
if AA == '-': # if the item is hyphen, take letter from other sequence
merged_contig.append(second_seq[counter])
elif AA.isalpha(): # else if the item is a letter, use that letter
merged_contig.append(AA)
else: # otherwise there must be an error, delete both sequences
merged_contig = []
break
counter += 1
merged_contig = ''.join(merged_contig)
return merged_contig
min_overlap = 6
amino_acids = 'ACDEFGHIKLMNPQRSTVWY'
sequence_length = 200
max_subsequence_length = 8
min_subsequence_length = 7
Validation_Runs = 5
correct_list = []
for i in range(0,Validation_Runs):
sequence = ''.join(random.choice(amino_acids) for i in range(sequence_length))
subsequence_number = 1000
subsequence_library = [sequence]
for i in range(0, subsequence_number):
subsequence_length = random.randrange(min_subsequence_length, max_subsequence_length + 1)
start = random.randint(0, sequence_length - subsequence_length) # Random starting point for slice
end = start + subsequence_length # Ending point for slice
subsequence = sequence[start:end]
subsequence_library.append(subsequence)
original_supersequence = subsequence_library[0] #DO NOT USE IN PRODUCTION, for validating original sequence
subsequence_library = subsequence_library[1:] #DO NOT USE IN PRODUCTION, for validating original sequence
no_duplicates_library = list(unique_everseen(subsequence_library)) # Remove duplicate subsequences
no_duplicates_library = [x.upper() for x in no_duplicates_library] # Force uppercase amino acids
growing_sequence = no_duplicates_library[0] # Assign the first subsequence to be the seed
no_duplicates_library.remove(growing_sequence) # Remove the seed from the subsequence library
print('\nTotal number of amino acid subsequences: ' + str(len(subsequence_library)))
print('Unique amino acid subsequences: ' + str(len(no_duplicates_library)))
print('\nSeed sequence is ' + growing_sequence)
print('\nSubsequence library with seed and any duplicates removed:')
print(no_duplicates_library)
print('\n')
working_library = no_duplicates_library
#this part runs the assembly
for i in range(0,100):
for j in working_library:
if j in growing_sequence:
working_library.remove(j)
else:
aln, _, _ = global_pairwise_align_protein(Protein(growing_sequence), Protein(j), penalize_terminal_gaps=False)
seq1, seq2 = aln
match = seq1.match_frequency(seq2, relative=False)
if match >= min_overlap:
merged_contig = contig_merger(str(seq1), str(seq2))
growing_sequence = merged_contig
print(growing_sequence)
print('\nLeftover unmatched subsequences: ')
if len(working_library) == 0:
print('None!')
else:
print(working_library)
print('\nYour original supersequence is: ')
print(original_supersequence)
print('\nYour guessed supersequence is: ')
print(growing_sequence)
if original_supersequence == growing_sequence:
print('\nCorrect!')
correct_list.append(True)
else:
correct_list.append(False)
print(correct_list)
print(str(sum(correct_list)/len(correct_list) * 100) + "%")
| 33.102564 | 126 | 0.688613 |
79529515f70dca8cbaf975bf0ed5dd4d1b3d46a2 | 763 | py | Python | src/sentry/models/deletedentry.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 4 | 2019-05-27T13:55:07.000Z | 2021-03-30T07:05:09.000Z | src/sentry/models/deletedentry.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 196 | 2019-06-10T08:34:10.000Z | 2022-02-22T01:26:13.000Z | src/sentry/models/deletedentry.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2020-08-10T07:55:40.000Z | 2020-08-10T07:55:40.000Z | from __future__ import absolute_import
from django.db import models
from django.utils import timezone
from sentry.db.models import (
Model, BoundedBigIntegerField
)
class DeletedEntry(Model):
__core__ = False
actor_label = models.CharField(max_length=64, null=True)
# if the entry was created via a user
actor_id = BoundedBigIntegerField(null=True)
# if the entry was created via an api key
actor_key = models.CharField(max_length=32, null=True)
ip_address = models.GenericIPAddressField(null=True, unpack_ipv4=True)
date_deleted = models.DateTimeField(default=timezone.now)
date_created = models.DateTimeField(null=True)
reason = models.TextField(blank=True, null=True)
class Meta:
abstract = True
| 27.25 | 74 | 0.74443 |
79529559862c2c54eafb279d516f074a13ab1768 | 13,810 | py | Python | src/skmultiflow/meta/online_under_over_bagging.py | denisesato/scikit-multiflow | 3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95 | [
"BSD-3-Clause"
] | 663 | 2017-11-16T15:48:45.000Z | 2022-03-28T07:38:17.000Z | src/skmultiflow/meta/online_under_over_bagging.py | denisesato/scikit-multiflow | 3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95 | [
"BSD-3-Clause"
] | 293 | 2017-12-16T12:33:49.000Z | 2022-02-22T03:34:25.000Z | src/skmultiflow/meta/online_under_over_bagging.py | denisesato/scikit-multiflow | 3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95 | [
"BSD-3-Clause"
] | 201 | 2017-11-30T15:52:30.000Z | 2022-03-25T21:46:55.000Z | import copy as cp
import warnings
import numpy as np
from skmultiflow.core import BaseSKMObject, ClassifierMixin, MetaEstimatorMixin
from skmultiflow.drift_detection import ADWIN
from skmultiflow.lazy import KNNADWINClassifier
from skmultiflow.utils import check_random_state
from skmultiflow.utils.utils import get_dimensions
def OnlineUnderOverBagging(base_estimator=KNNADWINClassifier(), n_estimators=10, sampling_rate=2,
drift_detection=True, random_state=None): # pragma: no cover
warnings.warn("'OnlineUnderOverBagging' has been renamed to 'OnlineUnderOverBaggingClassifier'"
" in v0.5.0.\nThe old name will be removed in v0.7.0", category=FutureWarning)
return OnlineUnderOverBaggingClassifier(base_estimator=base_estimator,
n_estimators=n_estimators,
sampling_rate=sampling_rate,
drift_detection=drift_detection,
random_state=random_state)
class OnlineUnderOverBaggingClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin):
r""" Online Under-Over-Bagging ensemble classifier.
Online UnderOverBagging [1]_ is the online version of the ensemble method.
In case of imbalanced classes UnderOverBagging uses the strategy of under-sampling
the majority class and oversampling the minority class. In addition the sampling
rate can be also varied over the bagging iterations, which further boosts the
diversity of the base learners.
The derivation of the online UnderOverBagging algorithm is made through the observation
that a Binomial distribution with sampling rate :math:`\frac{C}{N}` corresponds to a
poisson distribution with :math:`\lambda=C`.
This online ensemble learner method is improved by the addition of an ADWIN change
detector.
ADWIN stands for Adaptive Windowing. It works by keeping updated
statistics of a variable sized window, so it can detect changes and
perform cuts in its window to better adapt the learning algorithms.
Parameters
----------
base_estimator: skmultiflow.core.BaseSKMObject or sklearn.BaseEstimator
(default=KNNADWINClassifier) Each member of the ensemble is
an instance of the base estimator.
n_estimators: int, optional (default=10)
The size of the ensemble, in other words, how many classifiers to train.
sampling_rate: int, optional (default=2)
The sampling rate of the positive instances.
drift_detection: bool, optional (default=True)
A drift detector (ADWIN) can be used by the method to track the performance
of the classifiers and adapt when a drift is detected.
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by `np.random`.
Raises
------
NotImplementedError: A few of the functions described here are not
implemented since they have no application in this context.
ValueError: A ValueError is raised if the 'classes' parameter is
not passed in the first partial_fit call.
References
----------
.. [1] B. Wang and J. Pineau, "Online Bagging and Boosting for Imbalanced Data Streams,"
in IEEE Transactions on Knowledge and Data Engineering, vol. 28, no. 12, pp. 3353-3366,
1 Dec. 2016. doi: 10.1109/TKDE.2016.2609424
Examples
--------
>>> # Imports
>>> from skmultiflow.data import SEAGenerator
>>> from skmultiflow.meta import OnlineUnderOverBaggingClassifier
>>>
>>> # Setup a data stream
>>> stream = SEAGenerator(random_state=1)
>>>
>>> # Setup variables to control loop and track performance
>>> n_samples = 0
>>> correct_cnt = 0
>>> max_samples = 200
>>>
>>> # Setup the Online Under-Over-Bagging ensemble classifier
>>> online_under_over_bagging = OnlineUnderOverBaggingClassifier()
>>>
>>> # Train the classifier with the samples provided by the data stream
>>> while n_samples < max_samples and stream.has_more_samples():
>>> X, y = stream.next_sample()
>>> y_pred = online_under_over_bagging.predict(X)
>>> if y[0] == y_pred[0]:
>>> correct_cnt += 1
>>> online_under_over_bagging.partial_fit(X, y)
>>> n_samples += 1
>>>
>>> # Display results
>>> print('{} samples analyzed.'.format(n_samples))
>>> print('Online Under Over Bagging performance: {}'.format(correct_cnt / n_samples))
"""
def __init__(
self,
base_estimator=KNNADWINClassifier(),
n_estimators=10,
sampling_rate=2,
drift_detection=True,
random_state=None):
super().__init__()
# default values
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.sampling_rate = sampling_rate
self.drift_detection = drift_detection
self.random_state = random_state
self.ensemble = None
self.actual_n_estimators = None
self.classes = None
self._random_state = None
self.n_samples = None
self.adwin_ensemble = None
def __configure(self):
if hasattr(self.base_estimator, "reset"):
self.base_estimator.reset()
self.actual_n_estimators = self.n_estimators
self.adwin_ensemble = []
for i in range(self.actual_n_estimators):
self.adwin_ensemble.append(ADWIN())
self.ensemble = [cp.deepcopy(self.base_estimator) for _ in range(self.actual_n_estimators)]
self._random_state = check_random_state(self.random_state)
def reset(self):
self.__configure()
def partial_fit(self, X, y, classes=None, sample_weight=None):
""" Partially fits the model, based on the X and y matrix.
Since it's an ensemble learner, if X and y matrix of more than one
sample are passed, the algorithm will partial fit the model one sample
at a time.
Each sample is trained by each classifier a total of K times, where K
is drawn by a Poisson(l) distribution. l is updated after every example
using :math:`lambda_{sc}` if th estimator correctly classifies the example or
:math:`lambda_{sw}` in the other case.
Parameters
----------
X : numpy.ndarray of shape (n_samples, n_features)
The features to train the model.
y: numpy.ndarray of shape (n_samples)
An array-like with the class labels of all samples in X.
classes: numpy.ndarray, optional (default=None)
Array with all possible/known class labels. This is an optional parameter, except
for the first partial_fit call where it is compulsory.
sample_weight: Array-like
Instance weight. If not provided, uniform weights are assumed.
Usage varies depending on the base estimator.
Raises
------
ValueError: A ValueError is raised if the 'classes' parameter is not
passed in the first partial_fit call, or if they are passed in further
calls but differ from the initial classes list passed..
"""
if self.ensemble is None:
self.__configure()
if self.classes is None:
if classes is None:
raise ValueError("The first partial_fit call should pass all the classes.")
else:
self.classes = classes
if self.classes is not None and classes is not None:
if set(self.classes) == set(classes):
pass
else:
raise ValueError("The classes passed to the partial_fit function differ "
"from those passed earlier.")
self.__adjust_ensemble_size()
r, _ = get_dimensions(X)
for j in range(r):
change_detected = False
for i in range(self.actual_n_estimators):
a = (i + 1) / self.actual_n_estimators
if y[j] == 1:
lam = a * self.sampling_rate
else:
lam = a
k = self._random_state.poisson(lam)
if k > 0:
for b in range(k):
self.ensemble[i].partial_fit([X[j]], [y[j]], classes, sample_weight)
if self.drift_detection:
try:
pred = self.ensemble[i].predict(X)
error_estimation = self.adwin_ensemble[i].estimation
for k in range(r):
if pred[k] is not None:
self.adwin_ensemble[i].add_element(int(pred[k] == y[k]))
if self.adwin_ensemble[i].detected_change():
if self.adwin_ensemble[i].estimation > error_estimation:
change_detected = True
except ValueError:
change_detected = False
pass
if change_detected and self.drift_detection:
max_threshold = 0.0
i_max = -1
for i in range(self.actual_n_estimators):
if max_threshold < self.adwin_ensemble[i].estimation:
max_threshold = self.adwin_ensemble[i].estimation
i_max = i
if i_max != -1:
self.ensemble[i_max].reset()
self.adwin_ensemble[i_max] = ADWIN()
return self
def __adjust_ensemble_size(self):
if len(self.classes) != len(self.ensemble):
if len(self.classes) > len(self.ensemble):
for i in range(len(self.ensemble), len(self.classes)):
self.ensemble.append(cp.deepcopy(self.base_estimator))
self.actual_n_estimators += 1
self.adwin_ensemble.append(ADWIN())
def predict(self, X):
""" predict
The predict function will average the predictions from all its learners
to find the most likely prediction for the sample matrix X.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
A matrix of the samples we want to predict.
Returns
-------
numpy.ndarray
A numpy.ndarray with the label prediction for all the samples in X.
"""
r, c = get_dimensions(X)
proba = self.predict_proba(X)
predictions = []
if proba is None:
return None
for i in range(r):
predictions.append(np.argmax(proba[i]))
return np.asarray(predictions)
def predict_proba(self, X):
""" predict_proba
Predicts the probability of each sample belonging to each one of the
known classes.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
A matrix of the samples we want to predict.
Raises
------
ValueError: A ValueError is raised if the number of classes in the base_estimator
learner differs from that of the ensemble learner.
Returns
-------
numpy.ndarray
An array of shape (n_samples, n_features), in which each outer entry is
associated with the X entry of the same index. And where the list in
index [i] contains len(self.target_values) elements, each of which represents
the probability that the i-th sample of X belongs to a certain label.
"""
proba = []
r, c = get_dimensions(X)
if self.ensemble is None:
return np.zeros((r, 1))
# Context manager to catch errors raised by numpy as RuntimeWarning
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
for i in range(self.actual_n_estimators):
partial_proba = self.ensemble[i].predict_proba(X)
if len(partial_proba[0]) > max(self.classes) + 1:
raise ValueError("The number of classes in the base learner is larger "
"than in the ensemble.")
if len(proba) < 1:
for n in range(r):
proba.append([0.0 for _ in partial_proba[n]])
for n in range(r):
for k in range(len(partial_proba[n])):
try:
proba[n][k] += partial_proba[n][k]
except IndexError:
proba[n].append(partial_proba[n][k])
except RuntimeWarning:
# Catch division by zero errors raised by numpy as RuntimeWarning
continue
except ValueError:
return np.zeros((r, 1))
except TypeError:
return np.zeros((r, 1))
# normalizing probabilities
sum_proba = []
for k in range(r):
sum_proba.append(np.sum(proba[k]))
aux = []
for i in range(len(proba)):
if sum_proba[i] > 0.:
aux.append([x / sum_proba[i] for x in proba[i]])
else:
aux.append(proba[i])
return np.asarray(aux)
| 40.028986 | 99 | 0.594859 |
79529625059eac765012ea440b1e26da5e26caf7 | 15,122 | py | Python | sdk/python/pulumi_azure_native/documentdb/v20190801preview/private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20190801preview/private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20190801preview/private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["group_id"] = group_id
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:documentdb/v20190801preview:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["group_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 51.261017 | 1,154 | 0.698386 |
7952968675e874881553678987e3b67361b78e7d | 5,857 | py | Python | bot/cogs/thank.py | Nirlep5252/Tech-Struck | 87e00e4565f7bf7b9d667271ea3d625e40085895 | [
"MIT"
] | 2 | 2021-06-12T19:37:42.000Z | 2021-12-12T03:17:20.000Z | bot/cogs/thank.py | zeffo/Tech-Struck | 89dffa0e650d0102139bb932d046b08b371d3c61 | [
"MIT"
] | null | null | null | bot/cogs/thank.py | zeffo/Tech-Struck | 89dffa0e650d0102139bb932d046b08b371d3c61 | [
"MIT"
] | null | null | null | import asyncio
from typing import Optional
from discord import Color, Embed, Member, Reaction
from discord.ext import commands
from tortoise.functions import Count, Q
from models import ThankModel, UserModel
delete_thank_message = """**Thanked**: <@!{0.thanked_id}>
**Thanker**: <@!{0.thanker_id}>
**Description**: {0.description}
**Time**: {0.time}\n
Confirmation required!"""
thank_list_message = """`{0.time:%D %T}` ID:`{0.id}`
From: <@!{0.thanker_id}> ({0.thanker_id})
Description: {0.description}\n"""
class Thank(commands.Cog):
"""Commands related to thanking members/helpers for help received"""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.group(invoke_without_command=True)
async def thank(self, ctx: commands.Context, recv: Member, *, description: str):
"""Thank someone for their help with a description to show gratitude"""
if recv.id == ctx.author.id:
return await ctx.send(
embed=Embed(
title="Bruh",
description="You can't thank yourselves",
color=Color.red(),
)
)
if recv.bot:
return await ctx.send(
embed=Embed(
title="Bruh", description="You can't thank a bot", color=Color.red()
)
)
# TODO: Convert this to an expression (?) for efficiency
thanked, _ = await UserModel.get_or_create(id=recv.id)
thanker, _ = await UserModel.get_or_create(id=ctx.author.id)
await ThankModel.create(
thanker=thanker,
thanked=thanked,
description=description,
guild_id=ctx.guild.id,
)
await ctx.send(f"You have thanked {recv}")
@thank.command(name="stats", aliases=["check"])
async def thank_stats(
self, ctx: commands.Context, *, member: Optional[Member] = None
):
"""View stats for thanks you've received and sent, in the current server and globally"""
member = member or ctx.author
sent_thanks = await ThankModel.filter(thanker__id=member.id).count()
recv_thanks = await ThankModel.filter(thanked__id=member.id).count()
server_sent_thanks = await ThankModel.filter(
thanker__id=member.id, guild__id=ctx.guild.id
).count()
server_recv_thanks = await ThankModel.filter(
thanked__id=member.id, guild__id=ctx.guild.id
).count()
embed = Embed(title=f"Thank stats for: {member}", color=Color.green())
embed.add_field(
name="Thanks received",
value="Global: {}\nThis server: {}".format(recv_thanks, server_recv_thanks),
)
embed.add_field(
name="Thanks sent",
value="Global: {}\nThis server: {}".format(sent_thanks, server_sent_thanks),
)
await ctx.send(embed=embed)
@thank.command(name="leaderboard", aliases=["lb"])
async def thank_leaderboard(self, ctx: commands.Context):
"""View a leaderboard of top helpers in the current server"""
await ctx.trigger_typing()
lb = (
await UserModel.annotate(
thank_count=Count("thanks", _filter=Q(thanks__guild_id=ctx.guild.id))
)
.filter(thank_count__gt=0)
.order_by("-thank_count")
.limit(5)
)
if not lb:
return await ctx.send(
embed=Embed(
title="Oopsy",
description="There are no thanks here yet!",
color=Color.red(),
)
)
invis = "\u2800"
embed = Embed(
title="LeaderBoard",
color=Color.blue(),
description="\n\n".join(
[
f"**{m.thank_count} Thanks**{invis * (4 - len(str(m.thank_count)))}<@!{m.id}>"
for m in lb
]
),
)
await ctx.send(embed=embed)
@thank.command(name="delete")
@commands.has_guild_permissions(kick_members=True)
async def delete_thank(self, ctx: commands.Context, thank_id: int):
"""Remove an invalid/fake thank record"""
thank = await ThankModel.get_or_none(pk=thank_id, guild_id=ctx.guild.id)
if not thank:
return await ctx.send("Thank with given ID not found")
msg = await ctx.send(
embed=Embed(
title="Delete thank",
description=delete_thank_message.format(thank),
)
)
await msg.add_reaction("\u2705")
await msg.add_reaction("\u274e")
def check(r: Reaction, u: Member):
return u.id == ctx.author.id and str(r.emoji) in ("\u2705", "\u274e")
try:
r, _ = await self.bot.wait_for("reaction_add", check=check)
except asyncio.TimeoutError:
return await ctx.reply("Cancelled.")
if str(r.emoji) == "\u2705":
await thank.delete()
return await ctx.reply("Deleted.")
return await ctx.reply("Cancelled.")
@thank.command(name="list")
@commands.has_guild_permissions(kick_members=True)
async def list_thanks(self, ctx: commands.Context, member: Member):
"""List the most recent 10 thanks received by a user in the current server"""
thanks = (
await ThankModel.filter(thanked_id=member.id, guild_id=ctx.guild.id)
.order_by("-time")
.limit(10)
)
await ctx.send(
embed=Embed(
title="Listing",
description="\n".join([thank_list_message.format(t) for t in thanks]),
color=Color.dark_blue(),
)
)
def setup(bot: commands.Bot):
bot.add_cog(Thank(bot))
| 35.932515 | 98 | 0.571282 |
7952969e6d0b68c2f855a88488afaf384b30fc3f | 2,377 | py | Python | days/day_04/main.py | gkpotter/advent-of-code-2020 | 76ed77719a8f6396511dabce99d46995946edb01 | [
"MIT"
] | null | null | null | days/day_04/main.py | gkpotter/advent-of-code-2020 | 76ed77719a8f6396511dabce99d46995946edb01 | [
"MIT"
] | null | null | null | days/day_04/main.py | gkpotter/advent-of-code-2020 | 76ed77719a8f6396511dabce99d46995946edb01 | [
"MIT"
] | null | null | null | import time
import os
def all_fields_present(passport):
fields = ['byr','iyr','eyr','hgt','hcl','ecl','pid']
for field in fields:
if field not in passport:
return False
return True
def is_valid(passport):
if not all_fields_present(passport):
return False
byr = passport['byr']
if not (is_year(byr) and int(byr) in range(1920, 2003)):
return False
iyr = passport['iyr']
if not (is_year(iyr) and int(iyr) in range(2010, 2021)):
return False
eyr = passport['eyr']
if not (is_year(eyr) and int(eyr) in range(2020, 2031)):
return False
hgt = passport['hgt']
if not valid_height(hgt):
return False
hcl = passport['hcl']
if not(hcl[0]=='#' and len(hcl)==7 and
all(is_digit(x) or x in ['a', 'b', 'c', 'd', 'e', 'f'] for x in hcl[1:])):
return False
ecl = passport['ecl']
if ecl not in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
return False
pid = passport['pid']
if not(len(pid) == 9 and all(is_digit(x) for x in pid)):
return False
return True
def is_year(y):
return len(y) == 4 and all(is_digit(x) for x in y)
def is_digit(x):
try:
return int(x) in range(0, 10)
except:
return False
def valid_height(hgt):
try:
if hgt[2:] == 'in' and int(hgt[:2]) in range(59, 77):
return True
if hgt[3:] == 'cm' and int(hgt[:3]) in range(150, 194):
return True
return False
except:
return False
def str_to_passport(s):
passport = {}
items = [x.split(':') for x in s.strip().split(' ')]
for item in items:
passport[item[0]] = item[1]
return passport
def part_one(passports):
total_valid = 0
for passport in passports:
total_valid += all_fields_present(passport)
return total_valid
def part_two(passports):
total_valid = 0
for passport in passports:
total_valid += is_valid(passport)
return total_valid
def main():
start_time = time.time()
with open(os.path.dirname(__file__) + '/input.txt', 'r') as data:
passports = []
s = ''
for line in data.readlines():
if line == '\n':
passports.append(str_to_passport(s))
s = ''
else:
s += line.strip()+' '
passports.append(str_to_passport(s))
part_one_ans = part_one(passports)
part_two_ans = part_two(passports)
print('Day 4 ({:,.3f}s)'.format(time.time() - start_time))
print(' Part 1: {}'.format(part_one_ans))
print(' Part 2: {}'.format(part_two_ans))
if __name__ == "__main__":
main() | 20.491379 | 76 | 0.641565 |
7952972e448d8c9865fc3333c4eed1ec992fd69c | 6,228 | py | Python | wagtailmenus/views.py | gidsey/wagtailmenus | f1975e501c22258ec9e4aa78a53511946eeeac64 | [
"MIT"
] | null | null | null | wagtailmenus/views.py | gidsey/wagtailmenus | f1975e501c22258ec9e4aa78a53511946eeeac64 | [
"MIT"
] | null | null | null | wagtailmenus/views.py | gidsey/wagtailmenus | f1975e501c22258ec9e4aa78a53511946eeeac64 | [
"MIT"
] | null | null | null | from copy import copy
from django import forms
from django.contrib.admin.utils import quote, unquote
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
from wagtail.admin import messages
from wagtail.admin.edit_handlers import ObjectList, TabbedInterface
from wagtail.contrib.modeladmin.views import (
WMABaseView, CreateView, EditView, ModelFormView
)
from wagtail.core.models import Site
from wagtailmenus.conf import settings
class SiteSwitchForm(forms.Form):
site = forms.ChoiceField(choices=[])
class Media:
js = [
'wagtailmenus/js/site-switcher.js',
]
def __init__(self, current_site, url_helper, **kwargs):
initial = {'site': url_helper.get_action_url('edit', current_site.pk)}
super().__init__(initial=initial, **kwargs)
sites = []
for site in Site.objects.all():
sites.append((url_helper.get_action_url('edit', site.pk), site))
self.fields['site'].choices = sites
class MainMenuIndexView(WMABaseView):
def dispatch(self, request, *args, **kwargs):
site = Site.find_for_request(request)
return redirect(
self.model_admin.url_helper.get_action_url('edit', site.pk))
class MenuTabbedInterfaceMixin:
def get_edit_handler(self):
if hasattr(self.model, 'edit_handler'):
edit_handler = self.model.edit_handler
elif hasattr(self.model, 'panels'):
edit_handler = ObjectList(self.model.panels)
else:
edit_handler = TabbedInterface([
ObjectList(self.model.content_panels, heading=_("Content")),
ObjectList(self.model.settings_panels, heading=_("Settings"),
classname="settings"),
])
if hasattr(edit_handler, 'bind_to'):
# For Wagtail>=2.5
return edit_handler.bind_to(model=self.model)
return edit_handler.bind_to_model(self.model)
def form_invalid(self, form):
# TODO: This override is only required for Wagtail<2.1
messages.validation_error(
self.request, self.get_error_message(), form
)
return self.render_to_response(self.get_context_data())
class MainMenuEditView(MenuTabbedInterfaceMixin, ModelFormView):
page_title = _('Editing')
instance_pk = None
instance = None
def __init__(self, model_admin, instance_pk):
super().__init__(model_admin)
self.instance_pk = unquote(instance_pk)
self.pk_safe = quote(self.instance_pk)
self.site = get_object_or_404(Site, id=self.instance_pk)
self.instance = self.model.get_for_site(self.site)
self.instance.save()
@property
def media(self):
media = super().media
if self.site_switcher:
media += self.site_switcher.media
return media
@property
def edit_url(self):
return self.url_helper.get_action_url('edit', self.instance_pk)
def get_meta_title(self):
return _('Editing %(model_name)s') % {
'model_name': self.opts.verbose_name
}
def get_page_subtitle(self):
return capfirst(self.opts.verbose_name)
def dispatch(self, request, *args, **kwargs):
user = request.user
if not self.permission_helper.user_can_edit_obj(user, self.instance):
raise PermissionDenied
self.site_switcher = None
if Site.objects.count() > 1:
url_helper = self.model_admin.url_helper
self.site_switcher = SiteSwitchForm(self.site, url_helper)
site_from_get = request.GET.get('site', None)
if site_from_get and site_from_get != self.instance_pk:
return redirect(
url_helper.get_action_url('edit', site_from_get))
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'site': self.site,
'site_switcher': self.site_switcher,
})
return context
def form_valid(self, form):
form.save()
messages.success(self.request, _("Main menu updated successfully."))
return redirect(self.edit_url)
def get_error_message(self):
return _("The menu could not be saved due to errors.")
def get_template_names(self):
return ['wagtailmenus/mainmenu_edit.html']
class FlatMenuCreateView(MenuTabbedInterfaceMixin, CreateView):
pass
class FlatMenuEditView(MenuTabbedInterfaceMixin, EditView):
pass
class FlatMenuCopyView(FlatMenuEditView):
page_title = _('Copying')
@property
def copy_url(self):
return self.url_helper.get_action_url('copy', self.pk_quoted)
def get_meta_title(self):
return _('Copying %(model_name)s') % {
'model_name': self.opts.verbose_name,
}
def check_action_permitted(self, user):
return self.permission_helper.user_can_create(user)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
"""
When the form is posted, don't pass an instance to the form. It should
create a new one out of the posted data. We also need to nullify any
IDs posted for inline menu items, so that new instances of those are
created too.
"""
if self.request.method == 'POST':
data = copy(self.request.POST)
i = 0
while(data.get('%s-%s-id' % (
settings.FLAT_MENU_ITEMS_RELATED_NAME, i
))):
data['%s-%s-id' % (
settings.FLAT_MENU_ITEMS_RELATED_NAME, i
)] = None
i += 1
kwargs.update({
'data': data,
'instance': self.model()
})
return kwargs
def get_success_message(self, instance):
return _("Flat menu '{instance}' created.").format(instance=instance)
def get_template_names(self):
return ['wagtailmenus/flatmenu_copy.html']
| 33.12766 | 78 | 0.640013 |
79529769af4c820393d0e8523f2c7c8c07737628 | 22,512 | py | Python | contrib/devtools/copyright_header.py | Crypto-Robin-Hood/Qbitcash | 65ee26b84c8046ef68b668b87f4ef376bdf18462 | [
"MIT"
] | 6 | 2020-06-10T09:46:27.000Z | 2021-09-04T21:22:39.000Z | contrib/devtools/copyright_header.py | Crypto-Robin-Hood/Qbitcash | 65ee26b84c8046ef68b668b87f4ef376bdf18462 | [
"MIT"
] | 1 | 2020-07-25T17:28:27.000Z | 2020-08-10T05:02:11.000Z | contrib/devtools/copyright_header.py | Crypto-Robin-Hood/Qbitcash | 65ee26b84c8046ef68b668b87f4ef376bdf18462 | [
"MIT"
] | 3 | 2020-08-28T05:51:21.000Z | 2021-06-08T22:05:13.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_bitcoinrand_NativeSecp256k1.c',
'src/secp256k1/src/java/org_bitcoinrand_NativeSecp256k1.h',
'src/secp256k1/src/java/org_bitcoinrand_Secp256k1Context.c',
'src/secp256k1/src/java/org_bitcoinrand_Secp256k1Context.h',
# univalue:
'src/univalue/test/object.cpp',
'src/univalue/lib/univalue_escapes.h',
# auto generated:
'src/qt/bitcoinrandstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Bitcoinrand Core developers\n",
"The Bitcoinrand Core developers \n",
"Bitcoinrand Core Developers\n",
"the Bitcoinrand Core developers\n",
"The Bitcoinrand developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r', encoding="utf8").read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a bitcoinrand source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r', encoding="utf8")
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w', encoding="utf8")
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Bitcoinrand Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Bitcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a bitcoinrand source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Bitcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Bitcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the bitcoinrand repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Bitcoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| 36.724307 | 92 | 0.602479 |
7952979daeb72c60bca63694ebd7757fb12e78a8 | 567 | py | Python | setup.py | wtbarnes/hydrad_tools | 7cd67c12fb8bc55a1b610eb0f4696fe1acffc16a | [
"MIT"
] | 1 | 2018-07-23T20:54:09.000Z | 2018-07-23T20:54:09.000Z | setup.py | wtbarnes/hydrad_tools | 7cd67c12fb8bc55a1b610eb0f4696fe1acffc16a | [
"MIT"
] | 57 | 2018-03-22T16:18:06.000Z | 2020-01-31T09:54:23.000Z | setup.py | wtbarnes/hydrad_tools | 7cd67c12fb8bc55a1b610eb0f4696fe1acffc16a | [
"MIT"
] | 1 | 2018-03-19T03:19:41.000Z | 2018-03-19T03:19:41.000Z | from distutils.core import setup
setup(
name='pydrad',
license='MIT',
version='0.1',
author='Will Barnes',
url='https://github.com/rice-solar-physics/pydrad',
package_data={'pydrad': ['configure/templates/*',
'configure/data/defaults.asdf']},
packages=[
'pydrad',
'pydrad.configure',
'pydrad.configure.data',
'pydrad.parse',
'pydrad.visualize',
],
author_email='will.t.barnes@gmail.com',
description='Tools for configuring and parsing HYDRAD simulations'
)
| 27 | 70 | 0.599647 |
795297d18c848fc4e6148d953b247d15d65c5104 | 1,895 | py | Python | src/coinflip/cli/parsing.py | Honno/coinflip | 15c1e0419e13c91c3f988558f3f050b516413c2b | [
"BSD-3-Clause"
] | 10 | 2020-08-02T16:58:36.000Z | 2021-02-11T06:02:45.000Z | src/coinflip/cli/parsing.py | honno/coinflip | 15c1e0419e13c91c3f988558f3f050b516413c2b | [
"BSD-3-Clause"
] | 31 | 2020-07-24T10:55:25.000Z | 2021-02-26T11:13:50.000Z | src/coinflip/cli/parsing.py | honno/coinflip | 15c1e0419e13c91c3f988558f3f050b516413c2b | [
"BSD-3-Clause"
] | null | null | null | from dataclasses import dataclass
import pandas as pd
from coinflip._randtests.common.exceptions import NonBinarySequenceError
__all__ = ["DataParsingError", "parse_text", "parse_binary"]
class DataParsingError(ValueError):
"""Base class for parsing-related errors"""
@dataclass
class MultipleColumnsError(DataParsingError):
"""Error for when only one column of data was expected"""
ncols: int
def __str__(self):
return (
f"Parsed data contains {self.ncols} columns, but only 1 column was expected"
)
def parse_text(data_file) -> pd.Series:
"""Reads file containing data into a pandas Series
Reads from file containing RNG output and produces a representitive pandas
Series. The appropiate dtype is inferred from the data itself.
Parameters
----------
data_file : file-like object
File containing RNG output
Returns
-------
``Series``
A pandas ``Series`` which represents the data
Raises
------
MultipleColumnsError
If inputted data contains multiple values per line
NonBinarySequenceError
If sequence does not contain only 2 values
See Also
--------
pandas.read_csv : The pandas method for reading ``data_file``
"""
df = pd.read_csv(data_file, header=None)
ncols = len(df.columns)
if ncols > 1:
raise MultipleColumnsError(ncols)
series = df.iloc[:, 0]
if series.nunique() != 2:
raise NonBinarySequenceError()
series = series.infer_objects()
return series
def parse_binary(data_file) -> pd.Series:
sequence = []
with open(data_file, "rb") as f:
bytes_ = f.read()
for byte in bytes_:
bitstring = format(byte, "08b")
bits = [int(bit) for bit in bitstring]
sequence += bits
series = pd.Series(sequence)
return series
| 23.395062 | 88 | 0.650132 |
795298877dd02ef6148f327b0696c5f7eaecd9e4 | 8,630 | py | Python | tests/unit/utils/test_verify.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/test_verify.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/test_verify.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Test the verification routines
'''
# Import Python libs
from __future__ import absolute_import
import getpass
import os
import sys
import stat
import shutil
import resource
import tempfile
import socket
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.paths import TMP
from tests.support.helpers import (
requires_network,
TestsLoggingHandler
)
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import salt libs
import salt.utils
from salt.utils.verify import (
check_user,
verify_env,
verify_socket,
zmq_version,
check_max_open_files,
valid_id,
log,
verify_log,
)
# Import 3rd-party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
class TestVerify(TestCase):
'''
Verify module tests
'''
def test_valid_id_exception_handler(self):
'''
Ensure we just return False if we pass in invalid or undefined paths.
Refs #8259
'''
opts = {'pki_dir': '/tmp/whatever'}
self.assertFalse(valid_id(opts, None))
def test_zmq_verify(self):
self.assertTrue(zmq_version())
def test_zmq_verify_insufficient(self):
import zmq
with patch.object(zmq, '__version__', '2.1.0'):
self.assertFalse(zmq_version())
def test_user(self):
self.assertTrue(check_user(getpass.getuser()))
def test_no_user(self):
# Catch sys.stderr here since no logging is configured and
# check_user WILL write to sys.stderr
class FakeWriter(object):
def __init__(self):
self.output = ""
def write(self, data):
self.output += data
stderr = sys.stderr
writer = FakeWriter()
sys.stderr = writer
# Now run the test
self.assertFalse(check_user('nouser'))
# Restore sys.stderr
sys.stderr = stderr
if writer.output != 'CRITICAL: User not found: "nouser"\n':
# If there's a different error catch, write it to sys.stderr
sys.stderr.write(writer.output)
@skipIf(sys.platform.startswith('win'), 'No verify_env Windows')
def test_verify_env(self):
root_dir = tempfile.mkdtemp(dir=TMP)
var_dir = os.path.join(root_dir, 'var', 'log', 'salt')
verify_env([var_dir], getpass.getuser())
self.assertTrue(os.path.exists(var_dir))
dir_stat = os.stat(var_dir)
self.assertEqual(dir_stat.st_uid, os.getuid())
self.assertEqual(dir_stat.st_mode & stat.S_IRWXU, stat.S_IRWXU)
self.assertEqual(dir_stat.st_mode & stat.S_IRWXG, 40)
self.assertEqual(dir_stat.st_mode & stat.S_IRWXO, 5)
@requires_network(only_local_network=True)
def test_verify_socket(self):
self.assertTrue(verify_socket('', 18000, 18001))
if socket.has_ipv6:
# Only run if Python is built with IPv6 support; otherwise
# this will just fail.
try:
self.assertTrue(verify_socket('::', 18000, 18001))
except socket.error as serr:
# Python has IPv6 enabled, but the system cannot create
# IPv6 sockets (otherwise the test would return a bool)
# - skip the test
#
# FIXME - possibly emit a message that the system does
# not support IPv6.
pass
@skipIf(True, 'Skipping until we can find why Jenkins is bailing out')
def test_max_open_files(self):
with TestsLoggingHandler() as handler:
logmsg_dbg = (
'DEBUG:This salt-master instance has accepted {0} minion keys.'
)
logmsg_chk = (
'{0}:The number of accepted minion keys({1}) should be lower '
'than 1/4 of the max open files soft setting({2}). According '
'to the system\'s hard limit, there\'s still a margin of {3} '
'to raise the salt\'s max_open_files setting. Please consider '
'raising this value.'
)
logmsg_crash = (
'{0}:The number of accepted minion keys({1}) should be lower '
'than 1/4 of the max open files soft setting({2}). '
'salt-master will crash pretty soon! According to the '
'system\'s hard limit, there\'s still a margin of {3} to '
'raise the salt\'s max_open_files setting. Please consider '
'raising this value.'
)
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
tempdir = tempfile.mkdtemp(prefix='fake-keys')
keys_dir = os.path.join(tempdir, 'minions')
os.makedirs(keys_dir)
mof_test = 256
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_test, mof_h))
try:
prev = 0
for newmax, level in ((24, None), (66, 'INFO'),
(127, 'WARNING'), (196, 'CRITICAL')):
for n in range(prev, newmax):
kpath = os.path.join(keys_dir, str(n))
with salt.utils.fopen(kpath, 'w') as fp_:
fp_.write(str(n))
opts = {
'max_open_files': newmax,
'pki_dir': tempdir
}
check_max_open_files(opts)
if level is None:
# No log message is triggered, only the DEBUG one which
# tells us how many minion keys were accepted.
self.assertEqual(
[logmsg_dbg.format(newmax)], handler.messages
)
else:
self.assertIn(
logmsg_dbg.format(newmax), handler.messages
)
self.assertIn(
logmsg_chk.format(
level,
newmax,
mof_test,
mof_h - newmax,
),
handler.messages
)
handler.clear()
prev = newmax
newmax = mof_test
for n in range(prev, newmax):
kpath = os.path.join(keys_dir, str(n))
with salt.utils.fopen(kpath, 'w') as fp_:
fp_.write(str(n))
opts = {
'max_open_files': newmax,
'pki_dir': tempdir
}
check_max_open_files(opts)
self.assertIn(logmsg_dbg.format(newmax), handler.messages)
self.assertIn(
logmsg_crash.format(
'CRITICAL',
newmax,
mof_test,
mof_h - newmax,
),
handler.messages
)
handler.clear()
except IOError as err:
if err.errno == 24:
# Too many open files
self.skipTest('We\'ve hit the max open files setting')
raise
finally:
shutil.rmtree(tempdir)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_s, mof_h))
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_verify_log(self):
'''
Test that verify_log works as expected
'''
message = 'Insecure logging configuration detected! Sensitive data may be logged.'
mock_cheese = MagicMock()
with patch.object(log, 'warning', mock_cheese):
verify_log({'log_level': 'cheeseshop'})
mock_cheese.assert_called_once_with(message)
mock_trace = MagicMock()
with patch.object(log, 'warning', mock_trace):
verify_log({'log_level': 'trace'})
mock_trace.assert_called_once_with(message)
mock_none = MagicMock()
with patch.object(log, 'warning', mock_none):
verify_log({})
mock_none.assert_called_once_with(message)
mock_info = MagicMock()
with patch.object(log, 'warning', mock_info):
verify_log({'log_level': 'info'})
self.assertTrue(mock_info.call_count == 0)
| 34.658635 | 90 | 0.533372 |
795299413720ddf717f425b6398106745fdc89c1 | 1,056 | py | Python | examples/mnist/simple_model_training.py | mehrdad-shokri/ludwig | f167981683c067b50be6a3656cbf553efbf192e9 | [
"Apache-2.0"
] | null | null | null | examples/mnist/simple_model_training.py | mehrdad-shokri/ludwig | f167981683c067b50be6a3656cbf553efbf192e9 | [
"Apache-2.0"
] | null | null | null | examples/mnist/simple_model_training.py | mehrdad-shokri/ludwig | f167981683c067b50be6a3656cbf553efbf192e9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Simple Model Training Example
#
# This example is the API example for this Ludwig command line example
# (https://ludwig-ai.github.io/ludwig-docs/examples/#image-classification-mnist).
# Import required libraries
import logging
import shutil
import yaml
from ludwig.api import LudwigModel
# clean out prior results
try:
shutil.rmtree('./results')
except FileNotFoundError:
pass
# set up Python dictionary to hold model training parameters
with open('./model_definition.yaml','r') as f:
model_definition = yaml.safe_load(f.read())
# Define Ludwig model object that drive model training
model = LudwigModel(model_definition,
logging_level=logging.INFO)
# initiate model training
train_stats = model.train(data_train_csv='./data/mnist_dataset_training.csv',
data_test_csv='./data/mnist_dataset_testing.csv',
experiment_name='simple_image_experiment',
model_name='single_model')
model.close()
| 22.956522 | 81 | 0.701705 |
79529995294c51def7485085ba6798856e87a693 | 13,899 | bzl | Python | dotnet/toolchain/toolchains.bzl | marleypowell/rules_dotnet | b2a6c7583f7c2cf37ea62d0f1a703c7af4d333ef | [
"Apache-2.0"
] | null | null | null | dotnet/toolchain/toolchains.bzl | marleypowell/rules_dotnet | b2a6c7583f7c2cf37ea62d0f1a703c7af4d333ef | [
"Apache-2.0"
] | null | null | null | dotnet/toolchain/toolchains.bzl | marleypowell/rules_dotnet | b2a6c7583f7c2cf37ea62d0f1a703c7af4d333ef | [
"Apache-2.0"
] | null | null | null | load(
"@io_bazel_rules_dotnet//dotnet/private:dotnet_toolchain.bzl",
"dotnet_toolchain",
)
load(
"@io_bazel_rules_dotnet//dotnet/private:core_toolchain.bzl",
"core_toolchain",
)
load(
"@io_bazel_rules_dotnet//dotnet/private:net_toolchain.bzl",
"net_toolchain",
)
load(
"@io_bazel_rules_dotnet//dotnet/private:net_empty_toolchain.bzl",
"net_empty_toolchain",
)
load(
"@io_bazel_rules_dotnet//dotnet/private:sdk.bzl",
"dotnet_host_sdk",
)
load(
"@io_bazel_rules_dotnet//dotnet/private:sdk_core.bzl",
"core_download_sdk",
)
load(
"@io_bazel_rules_dotnet//dotnet/private:sdk_net.bzl",
"net_download_sdk",
)
load(
"@io_bazel_rules_dotnet//dotnet/platform:list.bzl",
"DOTNETARCH",
"DOTNETOS",
"DOTNETIMPL",
"DOTNET_NET_FRAMEWORKS",
"DOTNET_CORE_FRAMEWORKS",
"DEFAULT_DOTNET_CORE_FRAMEWORK",
"DEFAULT_DOTNET_NET_FRAMEWORK",
)
DEFAULT_VERSION = "4.2.3"
CORE_DEFAULT_VERSION = "v3.1.100"
NET_ROSLYN_DEFAULT_VERSION = "2.10.0"
NET_DEFAULT_VERSION = "net48"
NET_DEFAULT_TOOLS_VERSION = "net48"
SDK_REPOSITORIES = {
"4.2.3": {
"mono_darwin_amd64": (
"http://bazel-mirror.storage.googleapis.com/download.mono-project.com/archive/4.2.3/macos-10-x86/MonoFramework-MDK-4.2.3.4.macos10.xamarin.x86.tar.gz",
"a7afb92d4a81f17664a040c8f36147e57a46bb3c33314b73ec737ad73608e08b",
),
},
}
CORE_SDK_REPOSITORIES = {
"v2.1.200": {
"core_windows_amd64": (
"https://download.microsoft.com/download/3/7/1/37189942-C91D-46E9-907B-CF2B2DE584C7/dotnet-sdk-2.1.200-win-x64.zip",
"f3c92c52d88364ac4359716e11e13b67f0e4ea256676b56334a4eb88c728e7fd",
),
"core_linux_amd64": (
"https://download.microsoft.com/download/3/7/1/37189942-C91D-46E9-907B-CF2B2DE584C7/dotnet-sdk-2.1.200-linux-x64.tar.gz",
"58977b4b232f5fe97f9825340ce473cf1ec1bad76eb512fe6b5e2210c76c09de",
),
"core_darwin_amd64": (
"https://download.microsoft.com/download/3/7/1/37189942-C91D-46E9-907B-CF2B2DE584C7/dotnet-sdk-2.1.200-osx-x64.tar.gz",
"ac695c3319caf043e6b40861906cd4d396ba8922fd206332d2a778635667a2ba",
),
},
"v2.1.502": {
"core_windows_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/c88b53e5-121c-4bc9-af5d-47a9d154ea64/e62eff84357c48dc8052a9c6ce5dfb8a/dotnet-sdk-2.1.502-win-x64.zip",
"2da94993092ebb27ffa4dcfe9e94c1acaafb34f9570ecfbc74291dcec9a8b213",
),
"core_linux_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/4c8893df-3b05-48a5-b760-20f2db692c45/ff0545dbbb3c52f6fa38657ad97d65d8/dotnet-sdk-2.1.502-linux-x64.tar.gz",
"f8bcee4cdc52e6b907f1a94102ec43977e84c62b7a54be6040e906a7b6ee4453",
),
"core_darwin_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/50729ca4-03ce-4e19-af87-bfae014b0431/1c830d9dcffa7663702e32fab6953425/dotnet-sdk-2.1.502-osx-x64.tar.gz",
"47fbc7cd65aacfd9e1002057ba29f1a567bd6c9923b1ff7aa5dcb4e48c85de95",
),
},
"v2.1.503": {
"core_windows_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/81e18dc2-7747-4b2d-9912-3be0f83050f1/5bc41cb27df3da63378df2d051be4b7f/dotnet-sdk-2.1.503-win-x64.zip",
"d81c6fdf758cbb0f433dad32fa2087e5ba09f55590a0e85832a1da414ed8180d",
),
"core_linux_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/04d83723-8370-4b54-b8b9-55708822fcde/63aab1f4d0be5246e3a92e1eb3063935/dotnet-sdk-2.1.503-linux-x64.tar.gz",
"242c812b516de12baffd804a1aed5a6c7341ef6f1e9597a0df0f2eb1e0ddf5c7",
),
"core_darwin_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/c922688d-74e8-4af5-bcc8-5850eafbca7f/cf3b9a0b06c0dfa3a5098f893a9730bd/dotnet-sdk-2.1.503-osx-x64.tar.gz",
"5fb7bf37843645fab6e0c7397a15630c11aaa917c951035c0aaec5e2a8e93fe5",
),
},
"v2.2.101": {
"core_windows_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/25d4104d-1776-41cb-b96e-dff9e9bf1542/b878c013de90f0e6c91f6f3c98a2d592/dotnet-sdk-2.2.101-win-x64.zip",
"fe13ce1eac2ebbc73fb069506d4951c57178935952a30ede029bf849279b4079",
),
"core_linux_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/80e1d007-d6f0-402f-b047-779464dd989b/9ae5e2df9aa166b720bdb92d19977044/dotnet-sdk-2.2.101-linux-x64.tar.gz",
"2b14129d8e0fa01ba027145022e0580796604f091a52fcb86d23c0fa1fa438e9",
),
"core_darwin_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/55c65d12-5f99-45d3-aa14-35359a6d02ca/3f6bcd694e3bfbb84e6b99e65279bd1e/dotnet-sdk-2.2.101-osx-x64.tar.gz",
"fc695c2c797da757251ce643d408e99e6325563bf08d46f1bf8d45a8ebc1aac6",
),
},
"v2.2.402": {
"core_windows_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/8ac3e8b7-9918-4e0c-b1be-5aa3e6afd00f/0be99c6ab9362b3c47050cdd50cba846/dotnet-sdk-2.2.402-win-x64.zip",
"ffdd3d49efea67329cdad9262916bc0263831e79a89af0ee21bf2602a3a5c3b6",
),
"core_linux_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/46411df1-f625-45c8-b5e7-08ab736d3daa/0fbc446088b471b0a483f42eb3cbf7a2/dotnet-sdk-2.2.402-linux-x64.tar.gz",
"4dafe1e6e49c6ddeb658bd702ed7724c4eb393ed719e2d6f557536f17917579a",
),
"core_darwin_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/2079de3a-714b-4fa5-840f-70e898b393ef/d631b5018560873ac350d692290881db/dotnet-sdk-2.2.402-osx-x64.tar.gz",
"adb0aa3a809e097882f9a139af8a7fa9d8c7898ad5249f6cec376f843433c79f",
),
},
"v3.0.100": {
"core_windows_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/a24f4f34-ada1-433a-a437-5bc85fc2576a/7e886d06729949c15c96fe7e70faa8ae/dotnet-sdk-3.0.100-win-x64.zip",
"faf8a92a523558e1659a6f9750c86610fe8430430f58099ccc659b83e3eee1bf",
),
"core_linux_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/886b4a4c-30af-454b-8bec-81c72b7b4e1f/d1a0c8de9abb36d8535363ede4a15de6/dotnet-sdk-3.0.100-linux-x64.tar.gz",
"12098fe29d5c857fd6093b1fd63eda9f91b92798e3748fcedc0e0727f1ac01c2",
),
"core_darwin_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/b9251194-4118-41cb-ae05-6763fb002e5d/1d398b4e97069fa4968628080b617587/dotnet-sdk-3.0.100-osx-x64.tar.gz",
"f0f8af049e0ecbeea9c9c37c16679d6fc2cd4c165510b00e3fad3cd8d0fe0160",
),
},
"v3.1.100": {
# https://dotnet.microsoft.com/download/dotnet-core/thank-you/sdk-3.1.100-windows-x64-binaries
"core_windows_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/28a2c4ff-6154-473b-bd51-c62c76171551/ea47eab2219f323596c039b3b679c3d6/dotnet-sdk-3.1.100-win-x64.zip",
"abcd034b230365d9454459e271e118a851969d82516b1529ee0bfea07f7aae52",
# SHA512 Checsum provided
# "94ee575d6104058cdd31370fc686b5d1aa23bf4a54611843c1f93afc82cad3523217b5f2eaddd4b5c136bca252d2c9047092f7054052c8683fa0f363ca28ad11",
),
# https://dotnet.microsoft.com/download/dotnet-core/thank-you/sdk-3.1.100-linux-x64-binaries
"core_linux_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/d731f991-8e68-4c7c-8ea0-fad5605b077a/49497b5420eecbd905158d86d738af64/dotnet-sdk-3.1.100-linux-x64.tar.gz",
"3687b2a150cd5fef6d60a4693b4166994f32499c507cd04f346b6dda38ecdc46",
# SHA512 Checsum provided
# "5217ae1441089a71103694be8dd5bb3437680f00e263ad28317665d819a92338a27466e7d7a2b1f6b74367dd314128db345fa8fff6e90d0c966dea7a9a43bd21",
),
# https://dotnet.microsoft.com/download/dotnet-core/thank-you/sdk-3.1.100-macos-x64-binaries
"core_darwin_amd64": (
"https://download.visualstudio.microsoft.com/download/pr/bea99127-a762-4f9e-aac8-542ad8aa9a94/afb5af074b879303b19c6069e9e8d75f/dotnet-sdk-3.1.100-osx-x64.tar.gz",
"b38e6f8935d4b82b283d85c6b83cd24b5253730bab97e0e5e6f4c43e2b741aab",
# SHA512 Checsum provided
# "142922cfb98b0cae6b194c3da2478fdf70f2a67603d248bbf859938bd05c4a4a5facea05d49b0db8b382d8cf73f9a45246a2022c9cf0ccf1501b1138cd0b3e76",
),
},
}
NET_ROSLYN_REPOSITORIES = {
"2.6.0": {
"net_windows_amd64": (
"https://www.nuget.org/api/v2/package/Microsoft.Net.Compilers/2.6.0/",
"4ec0a588dc7b538e3f05fb9637931941320defef8e8fde1a79392de9c2a5a276",
),
},
"2.6.1": {
"net_windows_amd64": (
"https://www.nuget.org/api/v2/package/Microsoft.Net.Compilers/2.6.1/",
"2e38aa455b5a1acce260a8ea30696804e6fa66aa366a04118f0cb4c8d7d08cf9",
),
},
"2.7.0": {
"net_windows_amd64": (
"https://www.nuget.org/api/v2/package/Microsoft.Net.Compilers/2.7.0/",
"e6ab3b41c4a30f3c98d4fd318a780d4051c4f35b2bb575b1ea11f621275f1597",
),
},
"2.8.0": {
"net_windows_amd64": (
"https://www.nuget.org/api/v2/package/Microsoft.Net.Compilers/2.8.0/",
"ec357a48b5ce3a2890c744967f4057a6c2c953fdd407bd57c003c9d300fe015f",
),
},
"2.8.2": {
"net_windows_amd64": (
"https://www.nuget.org/api/v2/package/Microsoft.Net.Compilers/2.8.2/",
"96684975e6934859e3a9f42536d2ffeb63f8e5761eb38838d0604d4cc31e0b47",
),
},
"2.9.0": {
"net_windows_amd64": (
"https://www.nuget.org/api/v2/package/Microsoft.Net.Compilers/2.9.0/",
"63a6828a1a4ada4b813f48784b1797b4b270a8c0559fcafc64666efb3de84bfd",
),
},
"2.10.0": {
"net_windows_amd64": (
"https://www.nuget.org/api/v2/package/Microsoft.Net.Compilers/2.10.0/",
"854d162cbe3c90100922c970ba593631f2d106f7b757a99425d50dc5cdafbdc0",
),
},
}
def _generate_toolchains():
# Use all the above information to generate all the possible toolchains we might support
toolchains = []
for arch in DOTNETARCH:
for os in DOTNETOS:
for impl in DOTNETIMPL:
constraints = [DOTNETARCH[arch], DOTNETOS[os]]
host = "{}_{}_{}".format(impl, os, arch)
toolchain_name = "dotnet_{}".format(host)
toolchains.append(dict(
name = toolchain_name,
impl = impl,
os = os,
arch = arch,
constraints = constraints,
))
return toolchains
_toolchains = _generate_toolchains()
_label_prefix = "@io_bazel_rules_dotnet//dotnet/toolchain:"
def dotnet_register_toolchains(dotnet_version = DEFAULT_VERSION, core_version = CORE_DEFAULT_VERSION, net_version = NET_DEFAULT_VERSION, net_roslyn_version = NET_ROSLYN_DEFAULT_VERSION):
"""See /dotnet/toolchains.rst#dostnet-register-toolchains for full documentation."""
# Use the final dictionaries to register all the toolchains
for toolchain in _toolchains:
name = _label_prefix + toolchain["name"]
native.register_toolchains(name)
def declare_toolchains():
# Use the final dictionaries to create all the toolchains
for toolchain in _toolchains:
if toolchain["impl"] == "mono":
dotnet_toolchain(
name = toolchain["name"],
arch = toolchain["arch"],
os = toolchain["os"],
constraints = toolchain["constraints"],
)
elif toolchain["impl"] == "core":
core_toolchain(
name = toolchain["name"],
arch = toolchain["arch"],
os = toolchain["os"],
constraints = toolchain["constraints"],
)
elif toolchain["impl"] == "net":
if toolchain["os"] == "windows" and toolchain["arch"]=="amd64":
net_toolchain(
name = toolchain["name"],
arch = toolchain["arch"],
os = toolchain["os"],
constraints = toolchain["constraints"],
)
else:
# Hardcoded empty rules for .NET on Linux and ocx
net_empty_toolchain(
name = toolchain["name"],
arch = toolchain["arch"],
os = toolchain["os"],
constraints = toolchain["constraints"],
)
def net_register_sdk(net_version = NET_DEFAULT_VERSION, net_roslyn_version = NET_ROSLYN_DEFAULT_VERSION, tools_version = NET_DEFAULT_TOOLS_VERSION, name = "net_sdk"):
if net_roslyn_version not in NET_ROSLYN_REPOSITORIES:
fail("Unknown .net Roslyn version {}".format(net_roslyn_version))
net_download_sdk(
name = name if name else "net_sdk_" + net_version,
version = DOTNET_NET_FRAMEWORKS[net_version][3],
toolsVersion = DOTNET_NET_FRAMEWORKS[tools_version][3],
sdks = NET_ROSLYN_REPOSITORIES[net_roslyn_version],
)
def core_register_sdk(core_version = CORE_DEFAULT_VERSION, name = "core_sdk"):
if core_version not in CORE_SDK_REPOSITORIES:
fail("Unknown core version {}".format(core_version))
core_download_sdk(
name = name if name else "core_sdk_{}".format(core_version),
version = core_version[1:],
sdks = CORE_SDK_REPOSITORIES[core_version],
)
def mono_register_sdk():
"""See /dotnet/toolchains.rst#dostnet-register-toolchains for full documentation."""
dotnet_host_sdk(
name = "dotnet_sdk",
)
| 45.871287 | 186 | 0.671703 |
795299febd0881f339bf75a4c01b525d81a4103e | 1,089 | py | Python | fa_management_server/models/role.py | Msms-NJ/fa_management_server | 6787e35a5ac27c27c61fcaa0f508a78f4dc6e8f9 | [
"MIT"
] | null | null | null | fa_management_server/models/role.py | Msms-NJ/fa_management_server | 6787e35a5ac27c27c61fcaa0f508a78f4dc6e8f9 | [
"MIT"
] | null | null | null | fa_management_server/models/role.py | Msms-NJ/fa_management_server | 6787e35a5ac27c27c61fcaa0f508a78f4dc6e8f9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Role models."""
from dataclasses import dataclass
from array import array
from .database import Column, Model, SurrogatePK, db, reference_col, relationship
from sqlalchemy.dialects.postgresql import ARRAY
@dataclass
class Role(SurrogatePK, Model):
"""用户角色信息表"""
__tablename__ = "roles"
# 配置JSON返回字段信息
name: str
id: str
remarks: str
web_menus: array
update_date: str
# role 角色数据权限 data_scope
# 0 默认值 1 只能看到自己数据 2 能看到当前所在机构下的数据 3 能看到系统中的所有数据
DATA_SCOPE_DEFAULT = 0
DATA_SCOPE_SELF = 1
DATA_SCOPE_OFFICE = 2
DATA_SCOPE_ALL = 3
# 配置数据库字段信息
name = Column(db.String(80), unique=True, nullable=False)
users = relationship("UserRole", back_populates="role")
data_scope = Column(db.SmallInteger, nullable=False)
web_menus = Column(ARRAY(db.String))
def __init__(self, **kwargs):
"""Create instance."""
db.Model.__init__(self, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return "<Role({name})>".format(name=self.name)
| 26.560976 | 81 | 0.673095 |
79529b4a04fbf744447ba27c0767fcb246de732c | 350 | py | Python | ZenVis/Blender/convert_mrm_to_obj.py | ThielHater/ZenVis | e40d4cf1e7cd4b12bab72f6bebae6e3a78f1a958 | [
"MIT"
] | 3 | 2021-04-13T07:12:30.000Z | 2021-06-18T17:26:10.000Z | ZenVis/Blender/convert_mrm_to_obj.py | ThielHater/ZenVis | e40d4cf1e7cd4b12bab72f6bebae6e3a78f1a958 | [
"MIT"
] | null | null | null | ZenVis/Blender/convert_mrm_to_obj.py | ThielHater/ZenVis | e40d4cf1e7cd4b12bab72f6bebae6e3a78f1a958 | [
"MIT"
] | 2 | 2021-03-23T19:45:39.000Z | 2021-04-17T17:21:48.000Z | import bpy
import sys
argv = sys.argv
argv = argv[argv.index("--") + 1:]
bpy.ops.import_scene.krxmrmimp(filepath=argv[1])
for obj in bpy.data.objects:
obj.select = (obj.type == 'MESH')
bpy.context.scene.objects.active = obj
bpy.ops.object.join()
bpy.ops.export_scene.obj(filepath=argv[2], check_existing=False, path_mode='STRIP') | 29.166667 | 83 | 0.694286 |
79529d6859c31cf9e51236ddcf635ecb84f88dec | 9,738 | py | Python | Streamlit/funct_fourth_page.py | amorea04/MushPy | 63c87bd01ac41bd1002f065de18bb48061cc1b5e | [
"MIT"
] | null | null | null | Streamlit/funct_fourth_page.py | amorea04/MushPy | 63c87bd01ac41bd1002f065de18bb48061cc1b5e | [
"MIT"
] | null | null | null | Streamlit/funct_fourth_page.py | amorea04/MushPy | 63c87bd01ac41bd1002f065de18bb48061cc1b5e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 20 23:05:42 2021
@author: Adrien
"""
#%% Imports
import streamlit as st
#import numpy as np
import pandas as pd
import ast
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from st_funct import *
#%% fuction for the fourth page
def fourthpage_part1(df, df_clean, df_first, df1, folderpath):
#%%% Définition d'une "couleur de page"
s4 = f"""
<style>
div.row-widget.stRadio > div[role="radiogroup"] > label[data-baseweb="radio"] > div:first-child {{background-color: #64DB2D;}}
</style>
"""
st.markdown(s4, unsafe_allow_html=True)
#%% Entête de la page
st.markdown("""<div style="color:#64DB2D ; font-size: 34px ;font-weight: bold;">
Modèles finaux :
</div>
""", unsafe_allow_html=True)
#st.title("Optimisation du modèle EfficientNetB1 :")
st.header("Optimisation du modèle EfficientNetB1 :")
st.markdown(""" <div style='text-align: justify'>
Compte tenu des performances du modèle que nous avons construit avec EfficentNetB1
pour modèle de base (accuracy à 71%), nous avons choisi de poursuivre avec ce modèle.<br>
<br>
Ainsi, nous avons choisi de réaliser notre phase d’optimisation de la modélisation en
travaillant sur 3 aspects :
<ol>
<li style="margin-left: 20mm;">L’entraînement et le surentraînement : en variant l’ImageDataGenerator et les couches de dropout.</li>
<li style="margin-left: 20mm;">Les performances du test : en permettant l’ajustement de la dernière couche de convolution du modèle de base (EfficientNetB1).</li>
<li style="margin-left: 20mm;">L’interprétabilité en implémentant un algorithme Grad-Cam.</li>
</ol>
</div><br>
""", unsafe_allow_html=True)
#%%% Présentation du process d'image data generator
st.subheader("1. Image Data Generator :")
imagedata(folderpath)
#%%% un-freeze des couches de convolution
st.subheader("2. Entrainement de la dernière couche de convolution :")
st.markdown(""" <div style='text-align: justify'>
Le principe du <i>transfer learning</i> est de bénéficier d'un entrainement
long et poussé d'un modèle sur une base de donées très complète.<br>
Cependant, bien que cet entrainement soit très intéressant, il peut être très
bénéfique de permettre l'entrainement des dernières couches de convolution
afin d'aobtenir un modèle aussi sensible que possible à notre problème
spécifique.<br>
Il faut alors trouver l'équilibre entre les ressources informatiques nécessaires
et le gain obtenu en permettant l'entrainement de plus en plus de couches de
convolution.<br><br>
Dans notre cas nous avons trouvé que <b>l'entrainement de la denière couche de
convolution</b> était un bon compromis.
""", unsafe_allow_html=True)
st.markdown("""<div>
<b><u>Résumé des modèles :</b></u>
</div>""", unsafe_allow_html=True)
col1, col2 = st.beta_columns(2)
col1.write("Modèle initial :")
imagename2 = folderpath + "20210707_modele_summary_effnetB1_initial.jpg"
col1.image(imagename2)
col2.write("Modèle final :")
imagename3 = folderpath + "20210707_modele_summary_effnetB1_final.jpg"
col2.image(imagename3)
st.markdown(""" <div style='text-align: justify'>
En établissant une comparaison avec l’architecture utilisée précédemment
que nous avons maintenant un nombre bien plus important de paramètres que
<b>l’on peut entraîner (trainable params)</b>, une diminution concomitante du nombre de paramètres
qu’il n’est pas possible d’entraîner, le tout, comme attendu, avec un nombre
de paramètres total qui ne change pas.<br><br>
</div>""", unsafe_allow_html=True)
st.markdown("""<div>
<b><u>Résultats :</b></u>
</div>""", unsafe_allow_html=True)
col3, col4 = st.beta_columns(2)
col3.write("Rapport de classification :")
imagename4 = folderpath + "20210707_Classif_report_modele_efficientnetB1_modele_final.jpg"
col3.image(imagename4)
col4.write("Matrice de confusion :")
imagename5 = folderpath + "20210707_confusion_matrix_efficientnetB1_modele_final.png"
col4.image(imagename5)
st.markdown(""" <div style='text-align: justify'>
À l’issue de cet entraînement, nous avons donc constaté une amélioration
de 6% de l’accuracy sur notre jeu de test (accuracy finale de
<a style="color:#64DB2D ; font-weight: bold;">77%</a>
!), ce qui est une très nette amélioration par rapport au modèle initial.<br><br>
</div>""", unsafe_allow_html=True)
st.text("Caractéristiques du modèle en fin d'entrainement : \n\n\
loss: 0.6498 - accuracy: 0.7423 - val_loss: 0.6500 - val_accuracy: 0.7754")
#%%% Interprétabilité avec le grad-cam
st.subheader("3. Interprétabilité, Grad-CAM :")
st.markdown(""" <div style='text-align: justify'>
Finalement, afin de comprendre sur quelle base notre modèle s’appuyait pour
réaliser les classifications nous nous sommes tournés vers l’algorithme Grad-CAM.
Ce dernier est l’acronyme de Gradient-weighted Class Activation Map développé et
publié par Ramprasaath R. Selvaraju en 2017 (<i>Grad-CAM: Visual Explanations
from Deep Networks via Gradient-based Localization, 2017</i>). Cette approche
provient d’une catégorie plus générale qui consiste à produire des heatmaps
représentant les classes d’activation sur les images d’entrée. Une classe
activation heatmap est associée à une classe de sortie spécifique. Ces classes
sont calculées pour chaque pixel d’une image d’entrée, indiquant l’importance
de chaque pixel par rapport à la classe considérée.<br>
<br>
En d’autres termes, il va être possible d’attribuer à chaque pixel son
importance dans le processus de décision permettant d’attribuer la classe à
l’objet.<br><br>
</div>""", unsafe_allow_html=True)
imagename = folderpath + "process_GradCam.jpeg"
st.image(imagename)
st.caption("Images d’illustration pour la compréhension du fonctionnement de Grad-CAM")
st.markdown("____")
#%%% Modélisation entonoir !
st.header("Prédiction du genre :")
st.markdown(""" <div style='text-align: justify'>
Une piste très intéressante d’amélioration réside dans l’objectif d’atteindre
une classification plus fine. Initialement, nous avons choisi de nous focaliser
sur l’échelle des familles dans la classification, notamment pour des raisons
d’équilibre entre le nombre d’images et le nombre de classes que nous
souhaitions prédire.<br><br>
</div>""", unsafe_allow_html=True)
st.subheader("1. Présentation des données :")
st.markdown(""" <div style='text-align: justify'>
Afin de générer de nouveaux jeux de données pour entraîner 5 nouveaux modèles,
nous nous sommes basés sur le jeu de données initial comprenant les 5 familles.
Pour chaque famille, nous nous sommes alors intéressés au genre et avons exploré
un peu les données. Puisque certains genres possédaient que très peu d’images,
nous avons choisi de ne conserver que les genres possédant plus de 100
images.<br><br>
</div>""", unsafe_allow_html=True)
imagename6 = folderpath + "20210709_genus_repartition.png"
st.image(imagename6)
st.caption("Répartition des genres pour chaque famille.")
st.subheader("2. Entrainement des modèles :")
st.markdown("""<div>
<b><u>Rapport de classification pour les modèles :</b></u>
</div>""", unsafe_allow_html=True)
imagename7 = folderpath + "20210709_Classif_report_modele_efficientnetB1_fam0_genus.jpg"
imagename8 = folderpath + "20210709_Classif_report_modele_efficientnetB1_fam1_genus.jpg"
imagename9 = folderpath + "20210709_Classif_report_modele_efficientnetB1_fam2_genus.jpg"
imagename10 = folderpath + "20210709_Classif_report_modele_efficientnetB1_fam3_genus.jpg"
imagename11 = folderpath + "20210709_Classif_report_modele_efficientnetB1_fam4_genus.jpg"
col5, col6 = st.beta_columns(2)
col5.image(imagename7)
col6.image(imagename8)
st.markdown(""" <br> """, unsafe_allow_html=True)
col7, col8 = st.beta_columns(2)
col7.image(imagename9)
col8.image(imagename10)
st.markdown(""" <br> """, unsafe_allow_html=True)
col9, col10 = st.beta_columns(2)
col9.image(imagename11)
st.markdown(""" <div style='text-align: justify'>
<br>Nous pouvons constater que nos modèles ne souffrent pas particulièrement de
surapprentissage et en plus que nous bénéficions d’une accuracy plutôt bonne
(toujours supérieure à
<a style="color:#64DB2D ; font-weight: bold;">82 %</a>).
Il faut cependant se remémorer que nous avons des
classes plutôt déséquilibrées. Nous avons veillé à utiliser l’ImageDataGenerator
car il est indiqué dans la littérature que cette méthode permet notamment de
contrer ce déséquilibre. Nous pouvons aussi remarquer que l’efficacité des modèles
reste meilleure que le hasard (même avec ce déséquilibre)<br><br>
</div>""", unsafe_allow_html=True)
| 44.465753 | 178 | 0.67365 |
79529e86c383ce68b05015010be01df3355df691 | 8,883 | py | Python | tensorflow/python/layers/utils.py | DennissimOS/platform_external_tensorflow | e5a536fbd36ed64a7a63516299a3061944864080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/layers/utils.py | DennissimOS/platform_external_tensorflow | e5a536fbd36ed64a7a63516299a3061944864080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/layers/utils.py | DennissimOS/platform_external_tensorflow | e5a536fbd36ed64a7a63516299a3061944864080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains layer utilies for input validation and format conversion.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.ops import variables
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.util import nest
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def normalize_data_format(value):
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def normalize_padding(value):
padding = value.lower()
if padding not in {'valid', 'same'}:
raise ValueError('The `padding` argument must be one of "valid", "same". '
'Received: ' + str(padding))
return padding
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding == 'same':
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Arguments:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(input_length, filter_size, padding, stride):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
input_length *= stride
if padding == 'valid':
input_length += max(filter_size - stride, 0)
elif padding == 'full':
input_length -= (stride + filter_size - 2)
return input_length
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if isinstance(pred, variables.Variable):
return control_flow_ops.cond(pred, true_fn=true_fn, false_fn=false_fn,
name=name)
return control_flow_ops.smart_cond(pred, true_fn=true_fn,
false_fn=false_fn, name=name)
def constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor, or the Python integer 1 or 0.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
interger 1 or 0.
"""
# Allow integer booleans.
if isinstance(pred, int):
if pred == 1:
pred = True
elif pred == 0:
pred = False
if isinstance(pred, variables.Variable):
return None
return control_flow_ops.smart_constant_value(pred)
def object_list_uid(object_list):
"""Creates a single string from object ids."""
object_list = nest.flatten(object_list)
return ', '.join([str(abs(id(x))) for x in object_list])
def static_shape(x):
"""Get the static shape of a Tensor, or None if it is unavailable."""
if x is None:
return None
try:
return tuple(x.get_shape().as_list())
except ValueError:
return None
def get_reachable_from_inputs(inputs, targets=None):
"""Returns the set of tensors reachable from `inputs`.
Stops if all targets have been found (target is optional).
Only valid in Symbolic mode, not Eager mode.
Args:
inputs: List of tensors.
targets: List of tensors.
Returns:
A set of tensors reachable from the inputs (includes the inputs themselves).
"""
reachable = set(inputs)
if targets:
targets = set(targets)
queue = inputs[:]
while queue:
x = queue.pop()
outputs = []
try:
consumers = x.consumers()
except AttributeError:
# Case where x is a variable type
consumers = [x.op]
for z in consumers:
consumer_outputs = z.outputs
if consumer_outputs: # May be None
outputs += consumer_outputs
for y in outputs:
if y not in reachable:
reachable.add(y)
queue.insert(0, y)
if targets and targets.issubset(reachable):
return reachable
return reachable
| 30.631034 | 80 | 0.656873 |
79529f3ac5b46e54e166c22b30265b0bc77849f9 | 495 | py | Python | server/dataset/admin.py | johnugeorge/medperf | 5bc3f643064df14e9476bd4d4c1a4c0cce5337d5 | [
"Apache-2.0"
] | 1 | 2021-09-24T18:09:53.000Z | 2021-09-24T18:09:53.000Z | server/dataset/admin.py | johnugeorge/medperf | 5bc3f643064df14e9476bd4d4c1a4c0cce5337d5 | [
"Apache-2.0"
] | 2 | 2021-09-27T16:14:04.000Z | 2021-11-03T14:24:54.000Z | server/dataset/admin.py | johnugeorge/medperf | 5bc3f643064df14e9476bd4d4c1a4c0cce5337d5 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import Dataset
class DatasetAdmin(admin.ModelAdmin):
list_display = (
"name",
"description",
"location",
"owner",
"input_data_hash",
"generated_uid",
"split_seed",
"data_preparation_mlcube",
"state",
"is_valid",
"generated_metadata",
"user_metadata",
"created_at",
"modified_at",
)
admin.site.register(Dataset, DatasetAdmin)
| 19.8 | 42 | 0.575758 |
79529f3fb9cf32e1c14a6f57696bce1ad65c9010 | 528 | bzl | Python | third_party/hexagon/workspace.bzl | carchrae/tensorflow | 6a69a6b2e286b14ac9ae813998bb0d78b6fee440 | [
"Apache-2.0"
] | 1 | 2020-02-15T14:00:01.000Z | 2020-02-15T14:00:01.000Z | third_party/hexagon/workspace.bzl | sagol/tensorflow | 04f2870814d2773e09dcfa00cbe76a66a2c4de88 | [
"Apache-2.0"
] | 2 | 2021-08-25T15:58:27.000Z | 2022-02-10T02:04:40.000Z | third_party/hexagon/workspace.bzl | sagol/tensorflow | 04f2870814d2773e09dcfa00cbe76a66a2c4de88 | [
"Apache-2.0"
] | 2 | 2021-04-28T20:57:17.000Z | 2022-01-11T13:05:41.000Z | """Loads the Hexagon NN Header files library, used by TF Lite."""
load("//third_party:repo.bzl", "third_party_http_archive")
def repo():
third_party_http_archive(
name = "hexagon_nn",
sha256 = "4cbf3c18834e24b1f64cc507f9c2f22b4fe576c6ff938d55faced5d8f1bddf62",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.cloud.google.com/download.tensorflow.org/tflite/hexagon_nn_headers_v1.10.3.1.2.tgz",
],
build_file = "//third_party/hexagon:BUILD",
)
| 37.714286 | 158 | 0.693182 |
7952a03a8a520fab01007c85af5c564f954b244d | 1,209 | py | Python | www/config.py | ericchuhong/WellSoonWeb | 4656c442dbd5413a28dcf2d25bae058a7b5bad85 | [
"MIT"
] | 1 | 2016-05-05T02:25:48.000Z | 2016-05-05T02:25:48.000Z | www/config.py | ericchuhong/WellSoonWeb | 4656c442dbd5413a28dcf2d25bae058a7b5bad85 | [
"MIT"
] | null | null | null | www/config.py | ericchuhong/WellSoonWeb | 4656c442dbd5413a28dcf2d25bae058a7b5bad85 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Configuration
'''
__author__ = 'Chuhong Ma'
import config_default
class Dict(dict):
'''
Simple dict but support accesss as x.y style.
'''
def __init__(self, names=(), values=(), **kw):
super(Dict, self).__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def merge(defaults, override):
r = {}
for k, v in defaults.items():
if k in override:
if isinstance(v, dict):
r[k] = merge(v, override[k])
else:
r[k] = override[k]
else:
r[k] = v
return r
def toDict(d):
D = Dict()
for k, v in d.items():
D[k] = toDict(v) if isinstance(v, dict) else v
return D
configs = config_default.configs
try:
import config_override
configs = merge(configs, config_override.configs)
except ImportError:
pass
configs = toDict(configs)
print(configs)
| 20.844828 | 78 | 0.558313 |
7952a077fbe07cc2c8d08144887c55becfa7d697 | 57,415 | py | Python | ambari-server/src/main/python/ambari_server/setupSecurity.py | bartjanssens92/ambari | c50dc8d8ad7c559bf7baf93e49616135685b9c85 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/python/ambari_server/setupSecurity.py | bartjanssens92/ambari | c50dc8d8ad7c559bf7baf93e49616135685b9c85 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/python/ambari_server/setupSecurity.py | bartjanssens92/ambari | c50dc8d8ad7c559bf7baf93e49616135685b9c85 | [
"Apache-2.0"
] | 1 | 2021-09-06T03:23:25.000Z | 2021-09-06T03:23:25.000Z | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import \
ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import base64
import fileinput
import getpass
import logging
import os
import re
import shutil
import stat
import sys
import tempfile
import time
import urllib2
from ambari_commons.exceptions import FatalException, NonFatalException
from ambari_commons.logging_utils import print_warning_msg, print_error_msg, print_info_msg, get_verbose
from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons.os_utils import is_root, set_file_permissions, \
run_os_command, search_file, is_valid_filepath, change_owner, get_ambari_repo_file_full_name, get_file_owner
from ambari_server.dbConfiguration import ensure_jdbc_driver_is_installed
from ambari_server.serverClassPath import ServerClassPath
from ambari_server.serverConfiguration import configDefaults, parse_properties_file, \
encrypt_password, find_jdk, find_properties_file, get_alias_string, get_ambari_properties, get_conf_dir, \
get_credential_store_location, get_is_persisted, get_is_secure, get_master_key_location, get_db_type, write_property, \
get_original_master_key, get_value_from_properties, get_java_exe_path, is_alias_string, read_ambari_user, \
read_passwd_for_alias, remove_password_file, save_passwd_for_alias, store_password_file, update_properties_2, \
BLIND_PASSWORD, BOOTSTRAP_DIR_PROPERTY, JDBC_PASSWORD_FILENAME, JDBC_PASSWORD_PROPERTY, \
JDBC_RCA_PASSWORD_ALIAS, JDBC_RCA_PASSWORD_FILE_PROPERTY, JDBC_USE_INTEGRATED_AUTH_PROPERTY, \
LDAP_MGR_PASSWORD_ALIAS, LDAP_MGR_PASSWORD_PROPERTY, CLIENT_SECURITY, \
SECURITY_IS_ENCRYPTION_ENABLED, SECURITY_KEY_ENV_VAR_NAME, SECURITY_KERBEROS_JASS_FILENAME, \
SECURITY_PROVIDER_KEY_CMD, SECURITY_MASTER_KEY_FILENAME, SSL_TRUSTSTORE_PASSWORD_ALIAS, \
SSL_TRUSTSTORE_PASSWORD_PROPERTY, SSL_TRUSTSTORE_PATH_PROPERTY, SSL_TRUSTSTORE_TYPE_PROPERTY, \
JDK_NAME_PROPERTY, JCE_NAME_PROPERTY, JAVA_HOME_PROPERTY, \
get_resources_location, SECURITY_MASTER_KEY_LOCATION, SETUP_OR_UPGRADE_MSG, \
CHECK_AMBARI_KRB_JAAS_CONFIGURATION_PROPERTY
from ambari_server.serverUtils import is_server_runing, get_ambari_server_api_base, \
get_ambari_admin_username_password_pair, perform_changes_via_rest_api, get_ssl_context, get_cluster_name, \
get_eligible_services, get_boolean_from_dictionary, get_value_from_dictionary
from ambari_server.setupActions import SETUP_ACTION, LDAP_SETUP_ACTION
from ambari_server.userInput import get_validated_string_input, get_prompt_default, read_password, get_YN_input, \
quit_if_has_answer
from contextlib import closing
from urllib2 import HTTPError
logger = logging.getLogger(__name__)
LDAP_AD="AD"
LDAP_IPA="IPA"
LDAP_GENERIC="Generic"
LDAP_TYPES = [LDAP_AD, LDAP_IPA, LDAP_GENERIC]
REGEX_IP_ADDRESS = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
REGEX_HOSTNAME = "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
REGEX_PORT = "^([0-9]{1,5}$)"
REGEX_HOSTNAME_PORT = "^(.*:[0-9]{1,5}$)"
REGEX_TRUE_FALSE = "^(true|false)?$"
REGEX_SKIP_CONVERT = "^(skip|convert)?$"
REGEX_REFERRAL = "^(follow|ignore)?$"
REGEX_LDAP_TYPE = "^({})?$".format("|".join(LDAP_TYPES))
REGEX_ANYTHING = ".*"
LDAP_TO_PAM_MIGRATION_HELPER_CMD = "{0} -cp {1} " + \
"org.apache.ambari.server.security.authentication.LdapToPamMigrationHelper" + \
" >> " + configDefaults.SERVER_OUT_FILE + " 2>&1"
AUTO_GROUP_CREATION = "auto.group.creation"
SERVER_API_LDAP_URL = 'ldap_sync_events'
SETUP_LDAP_CONFIG_URL = 'services/AMBARI/components/AMBARI_SERVER/configurations/ldap-configuration'
PAM_CONFIG_FILE = 'pam.configuration'
LDAP_MGR_USERNAME_PROPERTY = "ambari.ldap.connectivity.bind_dn"
LDAP_MGR_PASSWORD_FILENAME = "ldap-password.dat"
LDAP_ANONYMOUS_BIND="ambari.ldap.connectivity.anonymous_bind"
LDAP_USE_SSL="ambari.ldap.connectivity.use_ssl"
LDAP_DISABLE_ENDPOINT_IDENTIFICATION = "ambari.ldap.advanced.disable_endpoint_identification"
NO_AUTH_METHOD_CONFIGURED = "no auth method"
AMBARI_LDAP_AUTH_ENABLED = "ambari.ldap.authentication.enabled"
LDAP_MANAGE_SERVICES = "ambari.ldap.manage_services"
LDAP_ENABLED_SERVICES = "ambari.ldap.enabled_services"
WILDCARD_FOR_ALL_SERVICES = "*"
FETCH_SERVICES_FOR_LDAP_ENTRYPOINT = "clusters/%s/services?ServiceInfo/ldap_integration_supported=true&fields=ServiceInfo/*"
def read_master_key(isReset=False, options = None):
passwordPattern = ".*"
passwordPrompt = "Please provide master key for locking the credential store: "
passwordDescr = "Invalid characters in password. Use only alphanumeric or "\
"_ or - characters"
passwordDefault = ""
if isReset:
passwordPrompt = "Enter new Master Key: "
input = True
while(input):
masterKey = get_validated_string_input(passwordPrompt, passwordDefault, passwordPattern, passwordDescr,
True, True, answer = options.master_key)
if not masterKey:
print "Master Key cannot be empty!"
continue
masterKey2 = get_validated_string_input("Re-enter master key: ", passwordDefault, passwordPattern, passwordDescr,
True, True, answer = options.master_key)
if masterKey != masterKey2:
print "Master key did not match!"
continue
input = False
return masterKey
def save_master_key(options, master_key, key_location, persist=True):
if master_key:
jdk_path = find_jdk()
if jdk_path is None:
print_error_msg("No JDK found, please run the \"setup\" "
"command to install a JDK automatically or install any "
"JDK manually to " + configDefaults.JDK_INSTALL_DIR)
return 1
serverClassPath = ServerClassPath(get_ambari_properties(), options)
command = SECURITY_PROVIDER_KEY_CMD.format(get_java_exe_path(),
serverClassPath.get_full_ambari_classpath_escaped_for_shell(), master_key, key_location, persist)
(retcode, stdout, stderr) = run_os_command(command)
print_info_msg("Return code from credential provider save KEY: " +
str(retcode))
else:
print_error_msg("Master key cannot be None.")
def adjust_directory_permissions(ambari_user):
properties = get_ambari_properties()
bootstrap_dir = os.path.abspath(get_value_from_properties(properties, BOOTSTRAP_DIR_PROPERTY))
print_info_msg("Cleaning bootstrap directory ({0}) contents...".format(bootstrap_dir))
if os.path.exists(bootstrap_dir):
shutil.rmtree(bootstrap_dir) #Ignore the non-existent dir error
if not os.path.exists(bootstrap_dir):
try:
os.makedirs(bootstrap_dir)
except Exception, ex:
print_warning_msg("Failed recreating the bootstrap directory: {0}".format(str(ex)))
pass
else:
print_warning_msg("Bootstrap directory lingering around after 5s. Unable to complete the cleanup.")
pass
# Add master key and credential store if exists
keyLocation = get_master_key_location(properties)
masterKeyFile = search_file(SECURITY_MASTER_KEY_FILENAME, keyLocation)
if masterKeyFile:
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((masterKeyFile, configDefaults.MASTER_KEY_FILE_PERMISSIONS, "{0}", False))
credStoreFile = get_credential_store_location(properties)
if os.path.exists(credStoreFile):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((credStoreFile, configDefaults.CREDENTIALS_STORE_FILE_PERMISSIONS, "{0}", False))
trust_store_location = properties[SSL_TRUSTSTORE_PATH_PROPERTY]
if trust_store_location:
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((trust_store_location, configDefaults.TRUST_STORE_LOCATION_PERMISSIONS, "{0}", False))
# Update JDK and JCE permissions
resources_dir = get_resources_location(properties)
jdk_file_name = properties.get_property(JDK_NAME_PROPERTY)
jce_file_name = properties.get_property(JCE_NAME_PROPERTY)
java_home = properties.get_property(JAVA_HOME_PROPERTY)
if jdk_file_name:
jdk_file_path = os.path.abspath(os.path.join(resources_dir, jdk_file_name))
if(os.path.exists(jdk_file_path)):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jdk_file_path, "644", "{0}", False))
if jce_file_name:
jce_file_path = os.path.abspath(os.path.join(resources_dir, jce_file_name))
if(os.path.exists(jce_file_path)):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jce_file_path, "644", "{0}", False))
if java_home:
jdk_security_dir = os.path.abspath(os.path.join(java_home, configDefaults.JDK_SECURITY_DIR))
if(os.path.exists(jdk_security_dir)):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jdk_security_dir + "/*", "644", "{0}", True))
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jdk_security_dir, "755", "{0}", False))
# Grant read permissions to all users. This is required when a non-admin user is configured to setup ambari-server.
# However, do not change ownership of the repo file to ambari user.
ambari_repo_file = get_ambari_repo_file_full_name()
if ambari_repo_file:
if (os.path.exists(ambari_repo_file)):
ambari_repo_file_owner = get_file_owner(ambari_repo_file)
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((ambari_repo_file, "644", ambari_repo_file_owner, False))
print "Adjusting ambari-server permissions and ownership..."
for pack in configDefaults.NR_ADJUST_OWNERSHIP_LIST:
file = pack[0]
mod = pack[1]
user = pack[2].format(ambari_user)
recursive = pack[3]
print_info_msg("Setting file permissions: {0} {1} {2} {3}".format(file, mod, user, recursive))
set_file_permissions(file, mod, user, recursive)
for pack in configDefaults.NR_CHANGE_OWNERSHIP_LIST:
path = pack[0]
user = pack[1].format(ambari_user)
recursive = pack[2]
print_info_msg("Changing ownership: {0} {1} {2}".format(path, user, recursive))
change_owner(path, user, recursive)
def configure_ldap_password(ldap_manager_password_option, interactive_mode):
password_default = ""
password_prompt = 'Enter Bind DN Password: '
confirm_password_prompt = 'Confirm Bind DN Password: '
password_pattern = ".*"
password_descr = "Invalid characters in password."
password = read_password(password_default, password_pattern, password_prompt, password_descr, ldap_manager_password_option, confirm_password_prompt) if interactive_mode else ldap_manager_password_option
return password
#
# Get the principal names from the given CSV file and set them on the given LDAP event specs.
#
def get_ldap_event_spec_names(file, specs, new_specs):
try:
if os.path.exists(file):
new_spec = new_specs[0]
with open(file, 'r') as names_file:
names = names_file.read()
new_spec['names'] = names.replace('\n', '').replace('\t', '')
names_file.close()
specs += new_specs
else:
err = 'Sync event creation failed. File ' + file + ' not found.'
raise FatalException(1, err)
except Exception as exception:
err = 'Caught exception reading file ' + file + ' : ' + str(exception)
raise FatalException(1, err)
class LdapSyncOptions:
def __init__(self, options):
try:
self.ldap_sync_all = options.ldap_sync_all
except AttributeError:
self.ldap_sync_all = False
try:
self.ldap_sync_existing = options.ldap_sync_existing
except AttributeError:
self.ldap_sync_existing = False
try:
self.ldap_sync_users = options.ldap_sync_users
except AttributeError:
self.ldap_sync_users = None
try:
self.ldap_sync_groups = options.ldap_sync_groups
except AttributeError:
self.ldap_sync_groups = None
try:
self.ldap_sync_admin_name = options.ldap_sync_admin_name
except AttributeError:
self.ldap_sync_admin_name = None
try:
self.ldap_sync_admin_password = options.ldap_sync_admin_password
except AttributeError:
self.ldap_sync_admin_password = None
try:
self.ldap_sync_post_process_existing_users = options.ldap_sync_post_process_existing_users
except AttributeError:
self.ldap_sync_post_process_existing_users = False
def no_ldap_sync_options_set(self):
return not self.ldap_sync_all and not self.ldap_sync_existing and self.ldap_sync_users is None and self.ldap_sync_groups is None
def get_ldap_property_from_db(properties, admin_login, admin_password, property_name):
ldap_properties_from_db = get_ldap_properties_from_db(properties, admin_login, admin_password)
return ldap_properties_from_db[property_name] if ldap_properties_from_db else None
def get_ldap_properties_from_db(properties, admin_login, admin_password):
ldap_properties = None
url = get_ambari_server_api_base(properties) + SETUP_LDAP_CONFIG_URL
admin_auth = base64.encodestring('%s:%s' % (admin_login, admin_password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
request.get_method = lambda: 'GET'
request_in_progress = True
sys.stdout.write('\nFetching LDAP configuration from DB')
num_of_tries = 0
while request_in_progress:
num_of_tries += 1
if num_of_tries == 60:
raise FatalException(1, "Could not fetch LDAP configuration within a minute; giving up!")
sys.stdout.write('.')
sys.stdout.flush()
try:
with closing(urllib2.urlopen(request, context=get_ssl_context(properties))) as response:
response_status_code = response.getcode()
if response_status_code != 200:
request_in_progress = False
err = 'Error while fetching LDAP configuration. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
else:
response_body = json.loads(response.read())
ldap_properties = response_body['Configuration']['properties']
if not ldap_properties:
time.sleep(1)
else:
request_in_progress = False
except HTTPError as e:
if e.code == 404:
sys.stdout.write(' No configuration.')
return None
err = 'Error while fetching LDAP configuration. Error details: %s' % e
raise FatalException(1, err)
except Exception as e:
err = 'Error while fetching LDAP configuration. Error details: %s' % e
raise FatalException(1, err)
return ldap_properties
def is_ldap_enabled(properties, admin_login, admin_password):
ldap_enabled = get_ldap_property_from_db(properties, admin_login, admin_password, AMBARI_LDAP_AUTH_ENABLED)
return ldap_enabled if ldap_enabled is not None else 'false'
#
# Sync users and groups with configured LDAP
#
def sync_ldap(options):
logger.info("Sync users and groups with configured LDAP.")
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY,"") == 'pam':
err = "PAM is configured. Can not sync LDAP."
raise FatalException(1, err)
server_status, pid = is_server_runing()
if not server_status:
err = 'Ambari Server is not running.'
raise FatalException(1, err)
if properties == -1:
raise FatalException(1, "Failed to read properties file.")
# set ldap sync options
ldap_sync_options = LdapSyncOptions(options)
if ldap_sync_options.no_ldap_sync_options_set():
err = 'Must specify a sync option (all, existing, users or groups). Please invoke ambari-server.py --help to print the options.'
raise FatalException(1, err)
#TODO: use serverUtils.get_ambari_admin_username_password_pair (requires changes in ambari-server.py too to modify option names)
admin_login = ldap_sync_options.ldap_sync_admin_name\
if ldap_sync_options.ldap_sync_admin_name is not None and ldap_sync_options.ldap_sync_admin_name \
else get_validated_string_input(prompt="Enter Ambari Admin login: ", default=None,
pattern=None, description=None,
is_pass=False, allowEmpty=False)
admin_password = ldap_sync_options.ldap_sync_admin_password \
if ldap_sync_options.ldap_sync_admin_password is not None and ldap_sync_options.ldap_sync_admin_password \
else get_validated_string_input(prompt="Enter Ambari Admin password: ", default=None,
pattern=None, description=None,
is_pass=True, allowEmpty=False)
if is_ldap_enabled(properties, admin_login, admin_password) != 'true':
err = "LDAP is not configured. Run 'ambari-server setup-ldap' first."
raise FatalException(1, err)
url = get_ambari_server_api_base(properties) + SERVER_API_LDAP_URL
admin_auth = base64.encodestring('%s:%s' % (admin_login, admin_password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
if ldap_sync_options.ldap_sync_all:
sys.stdout.write('\nSyncing all.')
bodies = [{"Event":{"specs":[{"principal_type":"users","sync_type":"all"},{"principal_type":"groups","sync_type":"all"}]}}]
elif ldap_sync_options.ldap_sync_existing:
sys.stdout.write('\nSyncing existing.')
bodies = [{"Event":{"specs":[{"principal_type":"users","sync_type":"existing"},{"principal_type":"groups","sync_type":"existing"}]}}]
else:
sys.stdout.write('\nSyncing specified users and groups.')
bodies = [{"Event":{"specs":[]}}]
body = bodies[0]
events = body['Event']
specs = events['specs']
if ldap_sync_options.ldap_sync_users is not None:
new_specs = [{"principal_type":"users","sync_type":"specific","names":""}]
get_ldap_event_spec_names(ldap_sync_options.ldap_sync_users, specs, new_specs)
if ldap_sync_options.ldap_sync_groups is not None:
new_specs = [{"principal_type":"groups","sync_type":"specific","names":""}]
get_ldap_event_spec_names(ldap_sync_options.ldap_sync_groups, specs, new_specs)
if ldap_sync_options.ldap_sync_post_process_existing_users:
for spec in bodies[0]["Event"]["specs"]:
spec["post_process_existing_users"] = "true"
if get_verbose():
sys.stdout.write('\nCalling API ' + url + ' : ' + str(bodies) + '\n')
request.add_data(json.dumps(bodies))
request.get_method = lambda: 'POST'
try:
response = urllib2.urlopen(request, context=get_ssl_context(properties))
except Exception as e:
err = 'Sync event creation failed. Error details: %s' % e
raise FatalException(1, err)
response_status_code = response.getcode()
if response_status_code != 201:
err = 'Error during syncing. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
response_body = json.loads(response.read())
url = response_body['resources'][0]['href']
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
body = [{"LDAP":{"synced_groups":"*","synced_users":"*"}}]
request.add_data(json.dumps(body))
request.get_method = lambda: 'GET'
request_in_progress = True
while request_in_progress:
sys.stdout.write('.')
sys.stdout.flush()
try:
response = urllib2.urlopen(request, context=get_ssl_context(properties))
except Exception as e:
request_in_progress = False
err = 'Sync event check failed. Error details: %s' % e
raise FatalException(1, err)
response_status_code = response.getcode()
if response_status_code != 200:
err = 'Error during syncing. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
response_body = json.loads(response.read())
sync_info = response_body['Event']
if sync_info['status'] == 'ERROR':
raise FatalException(1, str(sync_info['status_detail']))
elif sync_info['status'] == 'COMPLETE':
print '\n\nCompleted LDAP Sync.'
print 'Summary:'
for principal_type, summary in sync_info['summary'].iteritems():
print ' {0}:'.format(principal_type)
for action, amount in summary.iteritems():
print ' {0} = {1!s}'.format(action, amount)
request_in_progress = False
else:
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
def setup_master_key(options):
if not is_root():
warn = 'ambari-server setup-https is run as ' \
'non-root user, some sudo privileges might be required'
print warn
properties = get_ambari_properties()
if properties == -1:
raise FatalException(1, "Failed to read properties file.")
db_windows_auth_prop = properties.get_property(JDBC_USE_INTEGRATED_AUTH_PROPERTY)
db_sql_auth = False if db_windows_auth_prop and db_windows_auth_prop.lower() == 'true' else True
db_password = properties.get_property(JDBC_PASSWORD_PROPERTY)
# Encrypt passwords cannot be called before setup
if db_sql_auth and not db_password:
print 'Please call "setup" before "encrypt-passwords". Exiting...'
return 1
# Check configuration for location of master key
isSecure = get_is_secure(properties)
(isPersisted, masterKeyFile) = get_is_persisted(properties)
# Read clear text DB password from file
if db_sql_auth and not is_alias_string(db_password) and os.path.isfile(db_password):
with open(db_password, 'r') as passwdfile:
db_password = passwdfile.read()
ts_password = properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
resetKey = False
masterKey = None
if isSecure:
print "Password encryption is enabled."
resetKey = True if options.security_option is not None else get_YN_input("Do you want to reset Master Key? [y/n] (n): ", False)
# For encrypting of only unencrypted passwords without resetting the key ask
# for master key if not persisted.
if isSecure and not isPersisted and not resetKey:
print "Master Key not persisted."
masterKey = get_original_master_key(properties, options)
pass
# Make sure both passwords are clear-text if master key is lost
if resetKey:
if not isPersisted:
print "Master Key not persisted."
masterKey = get_original_master_key(properties, options)
# Unable get the right master key or skipped question <enter>
if not masterKey:
print "To disable encryption, do the following:"
print "- Edit " + find_properties_file() + \
" and set " + SECURITY_IS_ENCRYPTION_ENABLED + " = " + "false."
err = "{0} is already encrypted. Please call {1} to store unencrypted" \
" password and call 'encrypt-passwords' again."
if db_sql_auth and db_password and is_alias_string(db_password):
print err.format('- Database password', "'" + SETUP_ACTION + "'")
if ts_password and is_alias_string(ts_password):
print err.format('TrustStore password', "'" + LDAP_SETUP_ACTION + "'")
return 1
pass
pass
pass
# Read back any encrypted passwords
if db_sql_auth and db_password and is_alias_string(db_password):
db_password = read_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, masterKey)
if ts_password and is_alias_string(ts_password):
ts_password = read_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, masterKey)
# Read master key, if non-secure or reset is true
if resetKey or not isSecure:
masterKey = read_master_key(resetKey, options)
persist = get_YN_input("Do you want to persist master key. If you choose " \
"not to persist, you need to provide the Master " \
"Key while starting the ambari server as an env " \
"variable named " + SECURITY_KEY_ENV_VAR_NAME + \
" or the start will prompt for the master key."
" Persist [y/n] (y)? ", True, options.master_key_persist)
if persist:
save_master_key(options, masterKey, get_master_key_location(properties) + os.sep +
SECURITY_MASTER_KEY_FILENAME, persist)
elif not persist and masterKeyFile:
try:
os.remove(masterKeyFile)
print_info_msg("Deleting master key file at location: " + str(
masterKeyFile))
except Exception, e:
print 'ERROR: Could not remove master key file. %s' % e
# Blow up the credential store made with previous key, if any
store_file = get_credential_store_location(properties)
if os.path.exists(store_file):
try:
os.remove(store_file)
except:
print_warning_msg("Failed to remove credential store file.")
pass
pass
pass
propertyMap = {SECURITY_IS_ENCRYPTION_ENABLED: 'true'}
# Encrypt only un-encrypted passwords
if db_password and not is_alias_string(db_password):
retCode = save_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, db_password, masterKey)
if retCode != 0:
print 'Failed to save secure database password.'
else:
propertyMap[JDBC_PASSWORD_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
remove_password_file(JDBC_PASSWORD_FILENAME)
if properties.get_property(JDBC_RCA_PASSWORD_FILE_PROPERTY):
propertyMap[JDBC_RCA_PASSWORD_FILE_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
pass
if ts_password and not is_alias_string(ts_password):
retCode = save_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password, masterKey)
if retCode != 0:
print 'Failed to save secure TrustStore password.'
else:
propertyMap[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS)
pass
update_properties_2(properties, propertyMap)
# Since files for store and master are created we need to ensure correct
# permissions
ambari_user = read_ambari_user()
if ambari_user:
adjust_directory_permissions(ambari_user)
return 0
def setup_ambari_krb5_jaas(options):
jaas_conf_file = search_file(SECURITY_KERBEROS_JASS_FILENAME, get_conf_dir())
if os.path.exists(jaas_conf_file):
print 'Setting up Ambari kerberos JAAS configuration to access ' + \
'secured Hadoop daemons...'
principal = get_validated_string_input('Enter ambari server\'s kerberos '
'principal name (ambari@EXAMPLE.COM): ', 'ambari@EXAMPLE.COM', '.*', '', False,
False, answer = options.jaas_principal)
keytab = get_validated_string_input('Enter keytab path for ambari '
'server\'s kerberos principal: ',
'/etc/security/keytabs/ambari.keytab', '.*', False, False,
validatorFunction=is_valid_filepath, answer = options.jaas_keytab)
for line in fileinput.FileInput(jaas_conf_file, inplace=1):
line = re.sub('keyTab=.*$', 'keyTab="' + keytab + '"', line)
line = re.sub('principal=.*$', 'principal="' + principal + '"', line)
print line,
write_property(CHECK_AMBARI_KRB_JAAS_CONFIGURATION_PROPERTY, "true")
else:
raise NonFatalException('No jaas config file found at location: ' +
jaas_conf_file)
class LdapPropTemplate:
def __init__(self, properties, i_option, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_default=None):
self.prop_name = i_prop_name
self.option = i_option
stored_value = get_value_from_properties(properties, i_prop_name)
self.default_value = LdapDefault(stored_value) if stored_value else i_prop_default
self.prompt_pattern = i_prop_val_pattern
self.prompt_regex = i_prompt_regex
self.allow_empty_prompt = i_allow_empty_prompt
def get_default_value(self, ldap_type):
return self.default_value.get_default_value(ldap_type) if self.default_value else None
def get_prompt_text(self, ldap_type):
default_value = self.get_default_value(ldap_type)
return format_prop_val_prompt(self.prompt_pattern, default_value)
def get_input(self, ldap_type, interactive_mode):
default_value = self.get_default_value(ldap_type)
return get_validated_string_input(self.get_prompt_text(ldap_type),
default_value, self.prompt_regex,
"Invalid characters in the input!", False, self.allow_empty_prompt,
answer = self.option) if interactive_mode else self.option
def should_query_ldap_type(self):
return not self.allow_empty_prompt and not self.option and self.default_value and self.default_value.depends_on_ldap_type()
class LdapDefault:
def __init__(self, value):
self.default_value = value
def get_default_value(self, ldap_type):
return self.default_value
def depends_on_ldap_type(self):
return False
class LdapDefaultMap(LdapDefault):
def __init__(self, value_map):
LdapDefault.__init__(self, None)
self.default_value_map = value_map
def get_default_value(self, ldap_type):
return self.default_value_map[ldap_type] if self.default_value_map and ldap_type in self.default_value_map else None
def depends_on_ldap_type(self):
return True
def format_prop_val_prompt(prop_prompt_pattern, prop_default_value):
default_value = get_prompt_default(prop_default_value)
return prop_prompt_pattern.format((" " + default_value) if default_value is not None and default_value != "" else "")
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def init_ldap_properties_list_reqd(properties, options):
# python2.x dict is not ordered
ldap_properties = [
LdapPropTemplate(properties, options.ldap_primary_host, "ambari.ldap.connectivity.server.host", "Primary LDAP Host{0}: ", REGEX_HOSTNAME, False, LdapDefaultMap({LDAP_IPA:'ipa.ambari.apache.org', LDAP_GENERIC:'ldap.ambari.apache.org'})),
LdapPropTemplate(properties, options.ldap_primary_port, "ambari.ldap.connectivity.server.port", "Primary LDAP Port{0}: ", REGEX_PORT, False, LdapDefaultMap({LDAP_IPA:'636', LDAP_GENERIC:'389'})),
LdapPropTemplate(properties, options.ldap_secondary_host, "ambari.ldap.connectivity.secondary.server.host", "Secondary LDAP Host <Optional>{0}: ", REGEX_HOSTNAME, True),
LdapPropTemplate(properties, options.ldap_secondary_port, "ambari.ldap.connectivity.secondary.server.port", "Secondary LDAP Port <Optional>{0}: ", REGEX_PORT, True),
LdapPropTemplate(properties, options.ldap_ssl, "ambari.ldap.connectivity.use_ssl", "Use SSL [true/false]{0}: ", REGEX_TRUE_FALSE, False, LdapDefaultMap({LDAP_AD:'false', LDAP_IPA:'true', LDAP_GENERIC:'false'})),
LdapPropTemplate(properties, options.ldap_user_attr, "ambari.ldap.attributes.user.name_attr", "User ID attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'sAMAccountName', LDAP_IPA:'uid', LDAP_GENERIC:'uid'})),
LdapPropTemplate(properties, options.ldap_base_dn, "ambari.ldap.attributes.user.search_base", "Search Base{0}: ", REGEX_ANYTHING, False, LdapDefault("dc=ambari,dc=apache,dc=org")),
LdapPropTemplate(properties, options.ldap_referral, "ambari.ldap.advanced.referrals", "Referral method [follow/ignore]{0}: ", REGEX_REFERRAL, True, LdapDefault("follow")),
LdapPropTemplate(properties, options.ldap_bind_anonym, "ambari.ldap.connectivity.anonymous_bind" "Bind anonymously [true/false]{0}: ", REGEX_TRUE_FALSE, False, LdapDefault("false"))
]
return ldap_properties
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def init_ldap_properties_list_reqd(properties, options):
ldap_properties = [
LdapPropTemplate(properties, options.ldap_primary_host, "ambari.ldap.connectivity.server.host", "Primary LDAP Host{0}: ", REGEX_HOSTNAME, False, LdapDefaultMap({LDAP_IPA:'ipa.ambari.apache.org', LDAP_GENERIC:'ldap.ambari.apache.org'})),
LdapPropTemplate(properties, options.ldap_primary_port, "ambari.ldap.connectivity.server.port", "Primary LDAP Port{0}: ", REGEX_PORT, False, LdapDefaultMap({LDAP_IPA:'636', LDAP_GENERIC:'389'})),
LdapPropTemplate(properties, options.ldap_secondary_host, "ambari.ldap.connectivity.secondary.server.host", "Secondary LDAP Host <Optional>{0}: ", REGEX_HOSTNAME, True),
LdapPropTemplate(properties, options.ldap_secondary_port, "ambari.ldap.connectivity.secondary.server.port", "Secondary LDAP Port <Optional>{0}: ", REGEX_PORT, True),
LdapPropTemplate(properties, options.ldap_ssl, "ambari.ldap.connectivity.use_ssl", "Use SSL [true/false]{0}: ", REGEX_TRUE_FALSE, False, LdapDefaultMap({LDAP_AD:'false', LDAP_IPA:'true', LDAP_GENERIC:'false'})),
LdapPropTemplate(properties, options.ldap_user_class, "ambari.ldap.attributes.user.object_class", "User object class{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'user', LDAP_IPA:'posixAccount', LDAP_GENERIC:'posixUser'})),
LdapPropTemplate(properties, options.ldap_user_attr, "ambari.ldap.attributes.user.name_attr", "User ID attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'sAMAccountName', LDAP_IPA:'uid', LDAP_GENERIC:'uid'})),
LdapPropTemplate(properties, options.ldap_user_group_member_attr, "ambari.ldap.attributes.user.group_member_attr", "User group member attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'memberof', LDAP_IPA:'member', LDAP_GENERIC:'memberof'})),
LdapPropTemplate(properties, options.ldap_group_class, "ambari.ldap.attributes.group.object_class", "Group object class{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'group', LDAP_IPA:'posixGroup', LDAP_GENERIC:'posixGroup'})),
LdapPropTemplate(properties, options.ldap_group_attr, "ambari.ldap.attributes.group.name_attr", "Group name attribute{0}: ", REGEX_ANYTHING, False, LdapDefault("cn")),
LdapPropTemplate(properties, options.ldap_member_attr, "ambari.ldap.attributes.group.member_attr", "Group member attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'member', LDAP_IPA:'member', LDAP_GENERIC:'memberUid'})),
LdapPropTemplate(properties, options.ldap_dn, "ambari.ldap.attributes.dn_attr", "Distinguished name attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'distinguishedName', LDAP_IPA:'dn', LDAP_GENERIC:'dn'})),
LdapPropTemplate(properties, options.ldap_base_dn, "ambari.ldap.attributes.user.search_base", "Search Base{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'dc=ambari,dc=apache,dc=org', LDAP_IPA:'cn=accounts,dc=ambari,dc=apache,dc=org', LDAP_GENERIC:'dc=ambari,dc=apache,dc=org'})),
LdapPropTemplate(properties, options.ldap_referral, "ambari.ldap.advanced.referrals", "Referral method [follow/ignore]{0}: ", REGEX_REFERRAL, True, LdapDefault("follow")),
LdapPropTemplate(properties, options.ldap_bind_anonym, "ambari.ldap.connectivity.anonymous_bind", "Bind anonymously [true/false]{0}: ", REGEX_TRUE_FALSE, False, LdapDefault("false")),
LdapPropTemplate(properties, options.ldap_sync_username_collisions_behavior, "ambari.ldap.advanced.collision_behavior", "Handling behavior for username collisions [convert/skip] for LDAP sync{0}: ", REGEX_SKIP_CONVERT, False, LdapDefault("skip")),
LdapPropTemplate(properties, options.ldap_force_lowercase_usernames, "ambari.ldap.advanced.force_lowercase_usernames", "Force lower-case user names [true/false]{0}:", REGEX_TRUE_FALSE, True),
LdapPropTemplate(properties, options.ldap_pagination_enabled, "ambari.ldap.advanced.pagination_enabled", "Results from LDAP are paginated when requested [true/false]{0}:", REGEX_TRUE_FALSE, True)
]
return ldap_properties
def update_ldap_configuration(admin_login, admin_password, properties, ldap_property_value_map):
request_data = {
"Configuration": {
"category": "ldap-configuration",
"properties": {
}
}
}
request_data['Configuration']['properties'] = ldap_property_value_map
perform_changes_via_rest_api(properties, admin_login, admin_password, SETUP_LDAP_CONFIG_URL, 'PUT', request_data)
def should_query_ldap_type(ldap_property_list_reqd):
for ldap_prop in ldap_property_list_reqd:
if ldap_prop.should_query_ldap_type():
return True
return False
def query_ldap_type(ldap_type_option):
return get_validated_string_input("Please select the type of LDAP you want to use [{}]({}):".format("/".join(LDAP_TYPES), LDAP_GENERIC),
LDAP_GENERIC,
REGEX_LDAP_TYPE,
"Please enter one of the followings '{}'!".format("', '".join(LDAP_TYPES)),
False,
False,
answer = ldap_type_option)
def is_interactive(property_list):
for prop in property_list:
if not prop.option and not prop.allow_empty_prompt:
return True
return False
def setup_ldap(options):
logger.info("Setup LDAP.")
properties = get_ambari_properties()
server_status, pid = is_server_runing()
if not server_status:
err = 'Ambari Server is not running.'
raise FatalException(1, err)
enforce_ldap = options.ldap_force_setup if options.ldap_force_setup is not None else False
if not enforce_ldap:
current_client_security = get_value_from_properties(properties, CLIENT_SECURITY, NO_AUTH_METHOD_CONFIGURED)
if current_client_security != 'ldap':
query = "Currently '{0}' is configured, do you wish to use LDAP instead [y/n] ({1})? "
ldap_setup_default = 'y' if current_client_security == NO_AUTH_METHOD_CONFIGURED else 'n'
if get_YN_input(query.format(current_client_security, ldap_setup_default), ldap_setup_default == 'y'):
pass
else:
err = "Currently '" + current_client_security + "' configured. Can not setup LDAP."
raise FatalException(1, err)
admin_login, admin_password = get_ambari_admin_username_password_pair(options)
ldap_properties = get_ldap_properties_from_db(properties, admin_login, admin_password)
if ldap_properties:
properties.update(ldap_properties)
sys.stdout.write('\n')
isSecure = get_is_secure(properties)
if options.ldap_url:
options.ldap_primary_host = options.ldap_url.split(':')[0]
options.ldap_primary_port = options.ldap_url.split(':')[1]
if options.ldap_secondary_url:
options.ldap_secondary_host = options.ldap_secondary_url.split(':')[0]
options.ldap_secondary_port = options.ldap_secondary_url.split(':')[1]
ldap_property_list_reqd = init_ldap_properties_list_reqd(properties, options)
ldap_bind_dn_template = LdapPropTemplate(properties, options.ldap_manager_dn, LDAP_MGR_USERNAME_PROPERTY, "Bind DN{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({
LDAP_AD:'cn=ldapbind,dc=ambari,dc=apache,dc=org',
LDAP_IPA:'uid=ldapbind,cn=users,cn=accounts,dc=ambari,dc=apache,dc=org',
LDAP_GENERIC:'uid=ldapbind,cn=users,dc=ambari,dc=apache,dc=org'}))
ldap_type = query_ldap_type(options.ldap_type) if options.ldap_type or should_query_ldap_type(ldap_property_list_reqd) else LDAP_GENERIC
ldap_property_list_opt = [LDAP_MGR_USERNAME_PROPERTY,
LDAP_MGR_PASSWORD_PROPERTY,
LDAP_DISABLE_ENDPOINT_IDENTIFICATION,
SSL_TRUSTSTORE_TYPE_PROPERTY,
SSL_TRUSTSTORE_PATH_PROPERTY,
SSL_TRUSTSTORE_PASSWORD_PROPERTY,
LDAP_MANAGE_SERVICES,
LDAP_ENABLED_SERVICES]
ldap_property_list_passwords=[LDAP_MGR_PASSWORD_PROPERTY, SSL_TRUSTSTORE_PASSWORD_PROPERTY]
ssl_truststore_type_default = get_value_from_properties(properties, SSL_TRUSTSTORE_TYPE_PROPERTY, "jks")
ssl_truststore_path_default = get_value_from_properties(properties, SSL_TRUSTSTORE_PATH_PROPERTY)
disable_endpoint_identification_default = get_value_from_properties(properties, LDAP_DISABLE_ENDPOINT_IDENTIFICATION, "False")
ldap_property_value_map = {}
ldap_property_values_in_ambari_properties = {}
interactive_mode = is_interactive(ldap_property_list_reqd)
for ldap_prop in ldap_property_list_reqd:
input = ldap_prop.get_input(ldap_type, interactive_mode)
if input is not None and input != "":
ldap_property_value_map[ldap_prop.prop_name] = input
if ldap_prop.prop_name == LDAP_ANONYMOUS_BIND:
anonymous = (input and input.lower() == 'true')
mgr_password = None
# Ask for manager credentials only if bindAnonymously is false
if not anonymous:
username = ldap_bind_dn_template.get_input(ldap_type, interactive_mode)
ldap_property_value_map[LDAP_MGR_USERNAME_PROPERTY] = username
mgr_password = configure_ldap_password(options.ldap_manager_password, interactive_mode)
ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = mgr_password
elif ldap_prop.prop_name == LDAP_USE_SSL:
ldaps = (input and input.lower() == 'true')
ts_password = None
if ldaps:
disable_endpoint_identification = get_validated_string_input("Disable endpoint identification during SSL handshake [true/false] ({0}): ".format(disable_endpoint_identification_default),
disable_endpoint_identification_default,
REGEX_TRUE_FALSE, "Invalid characters in the input!", False, allowEmpty=True,
answer=options.ldap_sync_disable_endpoint_identification) if interactive_mode else options.ldap_sync_disable_endpoint_identification
if disable_endpoint_identification is not None:
ldap_property_value_map[LDAP_DISABLE_ENDPOINT_IDENTIFICATION] = disable_endpoint_identification
truststore_default = "n"
truststore_set = bool(ssl_truststore_path_default)
if truststore_set:
truststore_default = "y"
custom_trust_store = True if options.trust_store_path is not None and options.trust_store_path else False
if not custom_trust_store:
custom_trust_store = get_YN_input("Do you want to provide custom TrustStore for Ambari [y/n] ({0})?".
format(truststore_default),
truststore_set) if interactive_mode else None
if custom_trust_store:
ts_type = get_validated_string_input("TrustStore type [jks/jceks/pkcs12] {0}:".format(get_prompt_default(ssl_truststore_type_default)),
ssl_truststore_type_default, "^(jks|jceks|pkcs12)?$", "Wrong type", False, answer=options.trust_store_type) if interactive_mode else options.trust_store_type
ts_path = None
while True:
ts_path = get_validated_string_input(format_prop_val_prompt("Path to TrustStore file{0}: ", ssl_truststore_path_default),
ssl_truststore_path_default, ".*", False, False, answer = options.trust_store_path) if interactive_mode else options.trust_store_path
if os.path.exists(ts_path):
break
else:
print 'File not found.'
hasAnswer = options.trust_store_path is not None and options.trust_store_path
quit_if_has_answer(hasAnswer)
ts_password = read_password("", ".*", "Password for TrustStore:", "Invalid characters in password", options.trust_store_password) if interactive_mode else options.trust_store_password
ldap_property_values_in_ambari_properties[SSL_TRUSTSTORE_TYPE_PROPERTY] = ts_type
ldap_property_values_in_ambari_properties[SSL_TRUSTSTORE_PATH_PROPERTY] = ts_path
ldap_property_values_in_ambari_properties[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = ts_password
pass
elif properties.get_property(SSL_TRUSTSTORE_TYPE_PROPERTY):
print 'The TrustStore is already configured: '
print ' ' + SSL_TRUSTSTORE_TYPE_PROPERTY + ' = ' + properties.get_property(SSL_TRUSTSTORE_TYPE_PROPERTY)
print ' ' + SSL_TRUSTSTORE_PATH_PROPERTY + ' = ' + properties.get_property(SSL_TRUSTSTORE_PATH_PROPERTY)
print ' ' + SSL_TRUSTSTORE_PASSWORD_PROPERTY + ' = ' + properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
if get_YN_input("Do you want to remove these properties [y/n] (y)? ", True, options.trust_store_reconfigure):
properties.removeOldProp(SSL_TRUSTSTORE_TYPE_PROPERTY)
properties.removeOldProp(SSL_TRUSTSTORE_PATH_PROPERTY)
properties.removeOldProp(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
pass
pass
populate_ambari_requires_ldap(options, ldap_property_value_map)
populate_service_management(options, ldap_property_value_map, properties, admin_login, admin_password)
print '=' * 20
print 'Review Settings'
print '=' * 20
for property in ldap_property_list_reqd:
if ldap_property_value_map.has_key(property.prop_name):
print("%s %s" % (property.get_prompt_text(ldap_type), ldap_property_value_map[property.prop_name]))
for property in ldap_property_list_opt:
if ldap_property_value_map.has_key(property):
if property not in ldap_property_list_passwords:
print("%s: %s" % (property, ldap_property_value_map[property]))
else:
print("%s: %s" % (property, BLIND_PASSWORD))
for property in ldap_property_list_opt:
if ldap_property_values_in_ambari_properties.has_key(property):
if property not in ldap_property_list_passwords:
print("%s: %s" % (property, ldap_property_values_in_ambari_properties[property]))
else:
print("%s: %s" % (property, BLIND_PASSWORD))
save_settings = True if options.ldap_save_settings is not None else get_YN_input("Save settings [y/n] (y)? ", True)
if save_settings:
if isSecure:
if ts_password:
encrypted_passwd = encrypt_password(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password, options)
if ts_password != encrypted_passwd:
ldap_property_values_in_ambari_properties[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = encrypted_passwd
print 'Saving LDAP properties...'
#Saving LDAP configuration in Ambari DB using the REST API
update_ldap_configuration(admin_login, admin_password, properties, ldap_property_value_map)
#The only properties we want to write out in Ambari.properties are the client.security type being LDAP and the custom Truststore related properties (if any)
ldap_property_values_in_ambari_properties[CLIENT_SECURITY] = 'ldap'
update_properties_2(properties, ldap_property_values_in_ambari_properties)
print 'Saving LDAP properties finished'
return 0
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def generate_env(options, ambari_user, current_user):
properties = get_ambari_properties()
isSecure = get_is_secure(properties)
(isPersisted, masterKeyFile) = get_is_persisted(properties)
environ = os.environ.copy()
# Need to handle master key not persisted scenario
if isSecure and not masterKeyFile:
prompt = False
masterKey = environ.get(SECURITY_KEY_ENV_VAR_NAME)
if masterKey is not None and masterKey != "":
pass
else:
keyLocation = environ.get(SECURITY_MASTER_KEY_LOCATION)
if keyLocation is not None:
try:
# Verify master key can be read by the java process
with open(keyLocation, 'r'):
pass
except IOError:
print_warning_msg("Cannot read Master key from path specified in "
"environemnt.")
prompt = True
else:
# Key not provided in the environment
prompt = True
if prompt:
import pwd
masterKey = get_original_master_key(properties)
environ[SECURITY_KEY_ENV_VAR_NAME] = masterKey
tempDir = tempfile.gettempdir()
tempFilePath = tempDir + os.sep + "masterkey"
save_master_key(options, masterKey, tempFilePath, True)
if ambari_user != current_user:
uid = pwd.getpwnam(ambari_user).pw_uid
gid = pwd.getpwnam(ambari_user).pw_gid
os.chown(tempFilePath, uid, gid)
else:
os.chmod(tempFilePath, stat.S_IREAD | stat.S_IWRITE)
if tempFilePath is not None:
environ[SECURITY_MASTER_KEY_LOCATION] = tempFilePath
return environ
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def generate_env(options, ambari_user, current_user):
return os.environ.copy()
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def ensure_can_start_under_current_user(ambari_user):
#Ignore the requirement to run as root. In Windows, by default the child process inherits the security context
# and the environment from the parent process.
return ""
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def ensure_can_start_under_current_user(ambari_user):
current_user = getpass.getuser()
if ambari_user is None:
err = "Unable to detect a system user for Ambari Server.\n" + SETUP_OR_UPGRADE_MSG
raise FatalException(1, err)
if current_user != ambari_user and not is_root():
err = "Unable to start Ambari Server as user {0}. Please either run \"ambari-server start\" " \
"command as root, as sudo or as user \"{1}\"".format(current_user, ambari_user)
raise FatalException(1, err)
return current_user
class PamPropTemplate:
def __init__(self, properties, i_option, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
self.prop_name = i_prop_name
self.option = i_option
self.pam_prop_name = get_value_from_properties(properties, i_prop_name, i_prop_name_default)
self.pam_prop_val_prompt = i_prop_val_pattern.format(get_prompt_default(self.pam_prop_name))
self.prompt_regex = i_prompt_regex
self.allow_empty_prompt = i_allow_empty_prompt
def init_pam_properties_list_reqd(properties, options):
properties = [
PamPropTemplate(properties, options.pam_config_file, PAM_CONFIG_FILE, "PAM configuration file* {0}: ", REGEX_ANYTHING, False, "/etc/pam.d/ambari"),
PamPropTemplate(properties, options.pam_auto_create_groups, AUTO_GROUP_CREATION, "Do you want to allow automatic group creation* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
]
return properties
def setup_pam(options):
if not is_root():
err = 'Ambari-server setup-pam should be run with root-level privileges'
raise FatalException(4, err)
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY,"") == 'ldap':
query = "LDAP is currently configured, do you wish to use PAM instead [y/n] (n)? "
if get_YN_input(query, False):
pass
else:
err = "LDAP is configured. Can not setup PAM."
raise FatalException(1, err)
pam_property_list_reqd = init_pam_properties_list_reqd(properties, options)
pam_property_value_map = {}
pam_property_value_map[CLIENT_SECURITY] = 'pam'
for pam_prop in pam_property_list_reqd:
input = get_validated_string_input(pam_prop.pam_prop_val_prompt, pam_prop.pam_prop_name, pam_prop.prompt_regex,
"Invalid characters in the input!", False, pam_prop.allow_empty_prompt,
answer = pam_prop.option)
if input is not None and input != "":
pam_property_value_map[pam_prop.prop_name] = input
# Verify that the PAM config file exists, else show warning...
pam_config_file = pam_property_value_map[PAM_CONFIG_FILE]
if not os.path.exists(pam_config_file):
print_warning_msg("The PAM configuration file, {0} does not exist. " \
"Please create it before restarting Ambari.".format(pam_config_file))
update_properties_2(properties, pam_property_value_map)
print 'Saving...done'
return 0
#
# Migration of LDAP users & groups to PAM
#
def migrate_ldap_pam(args):
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY,"") != 'pam':
err = "PAM is not configured. Please configure PAM authentication first."
raise FatalException(1, err)
db_title = get_db_type(properties).title
confirm = get_YN_input("Ambari Server configured for %s. Confirm "
"you have made a backup of the Ambari Server database [y/n] (y)? " % db_title, True)
if not confirm:
print_error_msg("Database backup is not confirmed")
return 1
jdk_path = get_java_exe_path()
if jdk_path is None:
print_error_msg("No JDK found, please run the \"setup\" "
"command to install a JDK automatically or install any "
"JDK manually to " + configDefaults.JDK_INSTALL_DIR)
return 1
# At this point, the args does not have the ambari database information.
# Augment the args with the correct ambari database information
parse_properties_file(args)
ensure_jdbc_driver_is_installed(args, properties)
print 'Migrating LDAP Users & Groups to PAM'
serverClassPath = ServerClassPath(properties, args)
class_path = serverClassPath.get_full_ambari_classpath_escaped_for_shell()
command = LDAP_TO_PAM_MIGRATION_HELPER_CMD.format(jdk_path, class_path)
ambari_user = read_ambari_user()
current_user = ensure_can_start_under_current_user(ambari_user)
environ = generate_env(args, ambari_user, current_user)
(retcode, stdout, stderr) = run_os_command(command, env=environ)
print_info_msg("Return code from LDAP to PAM migration command, retcode = " + str(retcode))
if stdout:
print "Console output from LDAP to PAM migration command:"
print stdout
print
if stderr:
print "Error output from LDAP to PAM migration command:"
print stderr
print
if retcode > 0:
print_error_msg("Error executing LDAP to PAM migration, please check the server logs.")
else:
print_info_msg('LDAP to PAM migration completed')
return retcode
def populate_ambari_requires_ldap(options, properties):
if options.ldap_enabled_ambari is None:
enabled = get_boolean_from_dictionary(properties, AMBARI_LDAP_AUTH_ENABLED, False)
enabled = get_YN_input("Use LDAP authentication for Ambari [y/n] ({0})? ".format('y' if enabled else 'n'), enabled)
else:
enabled = 'true' == options.ldap_enabled_ambari
properties[AMBARI_LDAP_AUTH_ENABLED] = 'true' if enabled else 'false'
def populate_service_management(options, properties, ambari_properties, admin_login, admin_password):
services = ""
if options.ldap_enabled_services is None:
if options.ldap_manage_services is None:
manage_services = get_boolean_from_dictionary(properties, LDAP_MANAGE_SERVICES, False)
manage_services = get_YN_input("Manage LDAP configurations for eligible services [y/n] ({0})? ".format('y' if manage_services else 'n'), manage_services)
else:
manage_services = 'true' == options.ldap_manage_services
stored_manage_services = get_boolean_from_dictionary(properties, LDAP_MANAGE_SERVICES, False)
print("Manage LDAP configurations for eligible services [y/n] ({0})? {1}".format('y' if stored_manage_services else 'n', 'y' if manage_services else 'n'))
if manage_services:
enabled_services = get_value_from_dictionary(properties, LDAP_ENABLED_SERVICES, "").upper().split(',')
all = WILDCARD_FOR_ALL_SERVICES in enabled_services
configure_for_all_services = get_YN_input(" Manage LDAP for all services [y/n] ({0})? ".format('y' if all else 'n'), all)
if configure_for_all_services:
services = WILDCARD_FOR_ALL_SERVICES
else:
cluster_name = get_cluster_name(ambari_properties, admin_login, admin_password)
if cluster_name:
eligible_services = get_eligible_services(ambari_properties, admin_login, admin_password, cluster_name, FETCH_SERVICES_FOR_LDAP_ENTRYPOINT, 'LDAP')
if eligible_services and len(eligible_services) > 0:
service_list = []
for service in eligible_services:
enabled = service.upper() in enabled_services
question = " Manage LDAP for {0} [y/n] ({1})? ".format(service, 'y' if enabled else 'n')
if get_YN_input(question, enabled):
service_list.append(service)
services = ','.join(service_list)
else:
print (" There are no eligible services installed.")
else:
if options.ldap_manage_services:
manage_services = 'true' == options.ldap_manage_services
else:
manage_services = True
services = options.ldap_enabled_services.upper() if options.ldap_enabled_services else ""
properties[LDAP_MANAGE_SERVICES] = 'true' if manage_services else 'false'
properties[LDAP_ENABLED_SERVICES] = services | 48.36984 | 291 | 0.729983 |
7952a093cea3a2ea19c8dfe9a1c518679c90e413 | 1,545 | py | Python | app/modules/ocr_utils/ocr.py | jasalt/kuittiskanneri | db04215b9e8b9b8624b8e28193382e4a37944f6c | [
"MIT"
] | 131 | 2015-01-19T13:56:20.000Z | 2022-03-10T22:46:17.000Z | app/modules/ocr_utils/ocr.py | jasalt/kuittiskanneri | db04215b9e8b9b8624b8e28193382e4a37944f6c | [
"MIT"
] | 2 | 2016-09-10T17:05:49.000Z | 2019-06-07T14:13:24.000Z | app/modules/ocr_utils/ocr.py | jasalt/kuittiskanneri | db04215b9e8b9b8624b8e28193382e4a37944f6c | [
"MIT"
] | 64 | 2015-01-27T12:05:37.000Z | 2021-03-26T19:56:04.000Z | import os
import subprocess
import autocorrect
import receiptparser
# TODO HACK DEPLOY This breaks easily
from app import app
UPLOAD_FOLDER = app.root_path + '/' + app.config['UPLOAD_FOLDER']
OCR_SCRIPT = app.root_path + '/modules/ocr_utils/ocr.sh'
def optical_character_recognition(imagepath):
""" Does OCR on an image and returns tuple:
(raw text, autocorrected text, parsed receipt data)
imagepath: path to image to be processed
"""
# Process image with ImageMagick
processed_imagepath = os.path.join(UPLOAD_FOLDER, 'temp.png')
print "Make image more readable"
im_proc = subprocess.Popen(['convert', imagepath, '-resize', '600x800',
'-blur', '2', '-lat', '8x8-2%',
processed_imagepath], stdout=subprocess.PIPE)
im_proc.communicate()
# Read receipt with Tesseract
print "Running OCR"
image_text = ""
proc = subprocess.Popen([OCR_SCRIPT, processed_imagepath],
stdout=subprocess.PIPE)
for line in iter(proc.stdout.readline, ''):
image_text += line.rstrip() + '\n'
image_text = image_text.decode('utf-8')
# Autocorrect
print "Autocorrecting text"
corrected_text = autocorrect.correct_text_block(image_text)
if corrected_text is unicode:
corrected_text = corrected_text.encode('utf-8')
print "Parsing text"
parsed_text = receiptparser.parse_receipt(corrected_text)
return (image_text,
corrected_text,
parsed_text)
| 29.711538 | 77 | 0.660841 |
7952a0af14fc4fefc6607939e0d149500dcc9653 | 310 | py | Python | old_ac_controller.py | tmkasun/jetson-gpio-device-controller | 9a05b8bf9ba493b18fb7a410a0c5f646dbece24e | [
"MIT"
] | null | null | null | old_ac_controller.py | tmkasun/jetson-gpio-device-controller | 9a05b8bf9ba493b18fb7a410a0c5f646dbece24e | [
"MIT"
] | null | null | null | old_ac_controller.py | tmkasun/jetson-gpio-device-controller | 9a05b8bf9ba493b18fb7a410a0c5f646dbece24e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import Jetson.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT, initial=GPIO.LOW)
while True:
print("Turning on")
GPIO.output(7, GPIO.LOW)
time.sleep(2*60)
print("turning off")
GPIO.output(7, GPIO.LOW)
time.sleep(2*60)
GPIO.cleanup()
| 16.315789 | 41 | 0.670968 |
7952a0be15361642de6a8c0358b1a7ef28d8c9f6 | 32,051 | py | Python | resqpy/rq_import/_grid_from_cp.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | 35 | 2021-05-24T17:43:55.000Z | 2022-03-15T15:47:59.000Z | resqpy/rq_import/_grid_from_cp.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | 355 | 2021-05-24T16:28:16.000Z | 2022-03-14T14:43:40.000Z | resqpy/rq_import/_grid_from_cp.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | 12 | 2021-06-02T09:50:46.000Z | 2022-03-02T20:29:56.000Z | """_grid_from_cp.py: Module to generate a RESQML grid object from an input corner point array."""
version = '15th November 2021'
import logging
log = logging.getLogger(__name__)
import numpy as np
import numpy.ma as ma
import resqpy.crs as rqc
import resqpy.grid as grr
import resqpy.olio.vector_utilities as vec
def grid_from_cp(model,
cp_array,
crs_uuid,
active_mask = None,
geometry_defined_everywhere = True,
treat_as_nan = None,
dot_tolerance = 1.0,
morse_tolerance = 5.0,
max_z_void = 0.1,
split_pillars = True,
split_tolerance = 0.01,
ijk_handedness = 'right',
known_to_be_straight = False):
"""Create a resqpy.grid.Grid object from a 7D corner point array.
Arguments:
model (resqpy.model.Model): model to which the grid will be added
cp_array (numpy float array): 7 dimensional numpy array of nexus corner point data, in nexus ordering
crs_uuid (uuid.UUID): uuid for the coordinate reference system
active_mask (3d numpy bool array): array indicating which cells are active
geometry_defined_everywhere (bool, default True): if False then inactive cells are marked as not having geometry
treat_as_nan (float, default None): if a value is provided corner points with this value will be assigned nan
dot_tolerance (float, default 1.0): minimum manhatten distance of primary diagonal of cell, below which cell is treated as inactive
morse_tolerance (float, default 5.0): maximum ratio of i and j face vector lengths, beyond which cells are treated as inactive
max_z_void (float, default 0.1): maximum z gap between vertically neighbouring corner points. Vertical gaps greater than this will introduce k gaps into resqml grid. Units are corp z units
split_pillars (bool, default True): if False an unfaulted grid will be generated
split_tolerance (float, default 0.01): maximum distance between neighbouring corner points before a pillar is considered 'split'. Applies to each of x, y, z differences
ijk_handedness (str, default 'right'): 'right' or 'left'
known_to_be_straight (bool, default False): if True pillars are forced to be straight
notes:
this function sets up all the geometry arrays in memory but does not write to hdf5 nor create xml: use Grid methods;
geometry_defined_everywhere is deprecated, use treat_as_nan instead
"""
grid = _GridFromCp(model, cp_array, crs_uuid, active_mask, geometry_defined_everywhere, treat_as_nan, dot_tolerance,
morse_tolerance, max_z_void, split_pillars, split_tolerance, ijk_handedness,
known_to_be_straight)
return grid.grid
class _GridFromCp:
"""Class to build a resqpy grid from a Nexus CORP array"""
def __init__(self,
model,
cp_array,
crs_uuid,
active_mask = None,
geometry_defined_everywhere = True,
treat_as_nan = None,
dot_tolerance = 1.0,
morse_tolerance = 5.0,
max_z_void = 0.1,
split_pillars = True,
split_tolerance = 0.01,
ijk_handedness = 'right',
known_to_be_straight = False):
"""Class to build a resqpy grid from a Nexus CORP array"""
self.__model = model
self.__cp_array = cp_array
self.__crs_uuid = crs_uuid
self.__active_mask = active_mask
self.__geometry_defined_everywhere = geometry_defined_everywhere
self.__treat_as_nan = treat_as_nan
self.__dot_tolerance = dot_tolerance
self.__morse_tolerance = morse_tolerance
self.__max_z_void = max_z_void
self.__split_pillars = split_pillars
self.__split_tolerance = split_tolerance
self.__ijk_handedness = ijk_handedness
self.__known_to_be_straight = known_to_be_straight
self.create_grid()
def __get_treat_as_nan(self):
if self.__treat_as_nan is None:
if not self.__geometry_defined_everywhere:
self.__treat_as_nan = 'morse'
else:
assert self.__treat_as_nan in ['none', 'dots', 'ij_dots', 'morse', 'inactive']
if self.__treat_as_nan == 'none':
self.__treat_as_nan = None
def __get_extents(self):
self.__nk, self.__nj, self.__ni = self.__cp_array.shape[:3]
self.__nk_plus_1 = self.__nk + 1
self.__nj_plus_1 = self.__nj + 1
self.__ni_plus_1 = self.__ni + 1
def __get_active_inactive_masks(self):
if self.__active_mask is None:
self.__active_mask = np.ones((self.__nk, self.__nj, self.__ni), dtype = 'bool')
self.__inactive_mask = np.zeros((self.__nk, self.__nj, self.__ni), dtype = 'bool')
else:
assert self.__active_mask.shape == (self.__nk, self.__nj, self.__ni)
self.__inactive_mask = np.logical_not(self.__active_mask)
self.__all_active = np.all(self.__active_mask)
def __get_dot_mask_dots(self):
# for speed, only check primary diagonal of cells
log.debug('geometry for cells with no length to primary cell diagonal being set to NaN')
self.__dot_mask = np.all(
np.abs(self.__cp_array[:, :, :, 1, 1, 1] - self.__cp_array[:, :, :, 0, 0, 0]) < self.__dot_tolerance,
axis = -1)
def __get_dot_mask_ijdots_or_morse(self):
# check one diagonal of each I & J face
log.debug(
'geometry being set to NaN for inactive cells with no length to primary face diagonal for any I or J face')
self.__dot_mask = np.zeros((self.__nk, self.__nj, self.__ni), dtype = bool)
# k_face_vecs = cp_array[:, :, :, :, 1, 1] - cp_array[:, :, :, :, 0, 0]
j_face_vecs = self.__cp_array[:, :, :, 1, :, 1] - self.__cp_array[:, :, :, 0, :, 0]
i_face_vecs = self.__cp_array[:, :, :, 1, 1, :] - self.__cp_array[:, :, :, 0, 0, :]
self.__dot_mask[:] = np.where(np.all(np.abs(j_face_vecs[:, :, :, 0]) < self.__dot_tolerance, axis = -1), True,
self.__dot_mask)
self.__dot_mask[:] = np.where(np.all(np.abs(j_face_vecs[:, :, :, 1]) < self.__dot_tolerance, axis = -1), True,
self.__dot_mask)
self.__dot_mask[:] = np.where(np.all(np.abs(i_face_vecs[:, :, :, 0]) < self.__dot_tolerance, axis = -1), True,
self.__dot_mask)
self.__dot_mask[:] = np.where(np.all(np.abs(i_face_vecs[:, :, :, 1]) < self.__dot_tolerance, axis = -1), True,
self.__dot_mask)
log.debug(f'dot mask set for {np.count_nonzero(self.__dot_mask)} cells')
if self.__treat_as_nan == 'morse':
self.__get_dot_mask_morse(i_face_vecs, j_face_vecs)
def __get_dot_mask_morse(self, i_face_vecs, j_face_vecs):
morse_tol_sqr = self.__morse_tolerance * self.__morse_tolerance
# compare face vecs lengths in xy against max for active cells: where much greater set to NaN
len_j_face_vecs_sqr = np.sum(j_face_vecs[..., :2] * j_face_vecs[..., :2], axis = -1)
len_i_face_vecs_sqr = np.sum(i_face_vecs[..., :2] * i_face_vecs[..., :2], axis = -1)
dead_mask = self.__inactive_mask.reshape(self.__nk, self.__nj, self.__ni, 1).repeat(2, -1)
# mean_len_active_j_face_vecs_sqr = np.mean(ma.masked_array(len_j_face_vecs_sqr, mask = dead_mask))
# mean_len_active_i_face_vecs_sqr = np.mean(ma.masked_array(len_i_face_vecs_sqr, mask = dead_mask))
max_len_active_j_face_vecs_sqr = np.max(ma.masked_array(len_j_face_vecs_sqr, mask = dead_mask))
max_len_active_i_face_vecs_sqr = np.max(ma.masked_array(len_i_face_vecs_sqr, mask = dead_mask))
self.__dot_mask = np.where(
np.any(len_j_face_vecs_sqr > morse_tol_sqr * max_len_active_j_face_vecs_sqr, axis = -1), True,
self.__dot_mask)
self.__dot_mask = np.where(
np.any(len_i_face_vecs_sqr > morse_tol_sqr * max_len_active_i_face_vecs_sqr, axis = -1), True,
self.__dot_mask)
log.debug(f'morse mask set for {np.count_nonzero(self.__dot_mask)} cells')
def __get_nan_mask(self):
if self.__all_active and self.__geometry_defined_everywhere:
self.__cp_nan_mask = None
else:
self.__cp_nan_mask = np.any(np.isnan(self.__cp_array), axis = (3, 4, 5, 6)) # ie. if any nan per cell
if not self.__geometry_defined_everywhere and not self.__all_active:
if self.__treat_as_nan == 'inactive':
log.debug('all inactive cell geometry being set to NaN')
self.__cp_nan_mask = np.logical_or(self.__cp_nan_mask, self.__inactive_mask)
else:
if self.__treat_as_nan == 'dots':
self.__get_dot_mask_dots()
elif self.__treat_as_nan in ['ij_dots', 'morse']:
self.__get_dot_mask_ijdots_or_morse()
else:
raise Exception('code broken')
self.__cp_nan_mask = np.logical_or(self.__cp_nan_mask,
np.logical_and(self.__inactive_mask, self.__dot_mask))
self.__geometry_defined_everywhere = not np.any(self.__cp_nan_mask)
if self.__geometry_defined_everywhere:
self.__cp_nan_mask = None
def __get_masked_cp_array(self):
# set up masked version of corner point data based on cells with defined geometry
if self.__geometry_defined_everywhere:
full_mask = None
self.__masked_cp_array = ma.masked_array(self.__cp_array, mask = ma.nomask)
log.info('geometry present for all cells')
else:
full_mask = self.__cp_nan_mask.reshape((self.__nk, self.__nj, self.__ni, 1)).repeat(24, axis = 3).reshape(
(self.__nk, self.__nj, self.__ni, 2, 2, 2, 3))
self.__masked_cp_array = ma.masked_array(self.__cp_array, mask = full_mask)
log.info('number of cells without geometry: ' + str(np.count_nonzero(self.__cp_nan_mask)))
def __check_for_kgaps(self):
self.__k_gaps = None
self.__k_gap_raw_index = None
self.__k_gap_after_layer = None
if self.__nk > 1:
# check for (vertical) voids, or un-pillar-like anomalies, which will require k gaps in the resqml ijk grid
log.debug('checking for voids')
gap = self.__masked_cp_array[1:, :, :, 0, :, :, :] - self.__masked_cp_array[:-1, :, :, 1, :, :, :]
max_gap_by_layer_and_xyz = np.max(np.abs(gap), axis = (1, 2, 3, 4))
max_gap = np.max(max_gap_by_layer_and_xyz)
log.debug('maximum void distance: {0:.3f}'.format(max_gap))
if max_gap > self.__max_z_void:
self.__get_kgaps_details(max_gap_by_layer_and_xyz, gap)
elif max_gap > 0.0:
self.__close_gaps(gap)
def __get_kgaps_details(self, max_gap_by_layer_and_xyz, gap):
log.warning('maximum void distance exceeds limit, grid will include k gaps')
self.__k_gaps = 0
self.__k_gap_after_layer = np.zeros((self.__nk - 1,), dtype = bool)
self.__k_gap_raw_index = np.empty((self.__nk,), dtype = int)
self.__k_gap_raw_index[0] = 0
for k in range(self.__nk - 1):
max_layer_gap = np.max(max_gap_by_layer_and_xyz[k])
if max_layer_gap > self.__max_z_void:
self.__k_gap_after_layer[k] = True
self.__k_gaps += 1
elif max_layer_gap > 0.0:
# close void (includes shifting x & y)
log.debug('closing void below layer (0 based): ' + str(k))
layer_gap = gap[k] * 0.5
layer_gap_unmasked = np.where(gap[k].mask, 0.0, layer_gap)
self.__masked_cp_array[k + 1, :, :, 0, :, :, :] -= layer_gap_unmasked
self.__masked_cp_array[k, :, :, 1, :, :, :] += layer_gap_unmasked
self.__k_gap_raw_index[k + 1] = k + self.__k_gaps
def __close_gaps(self, gap):
# close voids (includes shifting x & y)
log.debug('closing voids')
gap *= 0.5
gap_unmasked = np.where(gap.mask, 0.0, gap)
self.__masked_cp_array[1:, :, :, 0, :, :, :] -= gap_unmasked
self.__masked_cp_array[:-1, :, :, 1, :, :, :] += gap_unmasked
def __get_k_reduced_cp_array(self):
log.debug('reducing k extent of corner point array (sharing points vertically)')
self.__k_reduced_cp_array = ma.masked_array(np.zeros(
(self.__nk_plus_1, self.__nj, self.__ni, 2, 2, 3))) # (nk+1+k_gaps, nj, ni, jp, ip, xyz)
self.__k_reduced_cp_array[0, :, :, :, :, :] = self.__masked_cp_array[0, :, :, 0, :, :, :]
self.__k_reduced_cp_array[-1, :, :, :, :, :] = self.__masked_cp_array[-1, :, :, 1, :, :, :]
if self.__k_gaps:
self.__get_k_reduced_cp_array_kgaps()
else:
slice = self.__masked_cp_array[1:, :, :, 0, :, :, :]
# where cell geometry undefined, if cell above is defined, take data from cell above with kp = 1 and set shared point defined
self.__k_reduced_cp_array[1:-1, :, :, :, :, :] = np.where(slice.mask, self.__masked_cp_array[:-1, :, :,
1, :, :, :],
slice)
def __get_k_reduced_cp_array_kgaps(self):
raw_k = 1
for k in range(self.__nk - 1):
# fill reduced array slice(s) for base of layer k and top of layer k + 1
if self.__k_gap_after_layer[k]:
self.__k_reduced_cp_array[raw_k, :, :, :, :, :] = self.__masked_cp_array[k, :, :, 1, :, :, :]
raw_k += 1
self.__k_reduced_cp_array[raw_k, :, :, :, :, :] = self.__masked_cp_array[k + 1, :, :, 0, :, :, :]
raw_k += 1
else: # take data from either possible cp slice, whichever is defined
slice = self.__masked_cp_array[k + 1, :, :, 0, :, :, :]
self.__k_reduced_cp_array[raw_k, :, :, :, :, :] = np.where(slice.mask,
self.__masked_cp_array[k, :, :,
1, :, :, :], slice)
raw_k += 1
assert raw_k == self.__nk + self.__k_gaps
def __get_primary_pillar_ref(self):
log.debug('creating primary pillar reference neighbourly indices')
self.__primary_pillar_jip = np.zeros((self.__nj_plus_1, self.__ni_plus_1, 2),
dtype = 'int') # (nj + 1, ni + 1, jp:ip)
self.__primary_pillar_jip[-1, :, 0] = 1
self.__primary_pillar_jip[:, -1, 1] = 1
for j in range(self.__nj_plus_1):
for i in range(self.__ni_plus_1):
if self.__active_mask_2D[j - self.__primary_pillar_jip[j, i, 0],
i - self.__primary_pillar_jip[j, i, 1]]:
continue
if i > 0 and self.__primary_pillar_jip[j, i, 1] == 0 and self.__active_mask_2D[
j - self.__primary_pillar_jip[j, i, 0], i - 1]:
self.__primary_pillar_jip[j, i, 1] = 1
continue
if j > 0 and self.__primary_pillar_jip[j, i, 0] == 0 and self.__active_mask_2D[
j - 1, i - self.__primary_pillar_jip[j, i, 1]]:
self.__primary_pillar_jip[j, i, 0] = 1
continue
if i > 0 and j > 0 and self.__primary_pillar_jip[j, i, 0] == 0 and self.__primary_pillar_jip[
j, i, 1] == 0 and self.__active_mask_2D[j - 1, i - 1]:
self.__primary_pillar_jip[j, i, :] = 1
def __get_extra_pillar_ref(self):
self.__extras_count = np.zeros((self.__nj_plus_1, self.__ni_plus_1),
dtype = 'int') # count (0 to 3) of extras for pillar
self.__extras_list_index = np.zeros((self.__nj_plus_1, self.__ni_plus_1),
dtype = 'int') # index in list of 1st extra for pillar
self.__extras_list = [] # list of (jp, ip)
self.__extras_use = np.negative(np.ones((self.__nj, self.__ni, 2, 2),
dtype = 'int')) # (j, i, jp, ip); -1 means use primary
if self.__split_pillars:
self.__get_extra_pillar_ref_split()
def __get_extra_pillar_ref_split(self):
log.debug('building extra pillar references for split pillars')
# loop over pillars
for j in range(self.__nj_plus_1):
for i in range(self.__ni_plus_1):
self.__append_extra_pillars(i, j)
if len(self.__extras_list) == 0:
self.__split_pillars = False
log.debug('number of extra pillars: ' + str(len(self.__extras_list)))
def __append_single_pillar(self, i, j, ip, jp, col_j, p_col_i, p_col_j, primary_ip, primary_jp):
col_i = i - ip
if col_i < 0 or col_i >= self.__ni:
return # no column this side of pillar in i
if jp == primary_jp and ip == primary_ip:
return # this column is the primary for this pillar
discrepancy = np.max(
np.abs(self.__k_reduced_cp_array[:, col_j, col_i, jp, ip, :] -
self.__k_reduced_cp_array[:, p_col_j, p_col_i, primary_jp, primary_ip, :]))
if discrepancy <= self.__split_tolerance:
return # data for this column's corner aligns with primary
for e in range(self.__extras_count[j, i]):
eli = self.__extras_list_index[j, i] + e
pillar_j_extra = j - self.__extras_list[eli][0]
pillar_i_extra = i - self.__extras_list[eli][1]
discrepancy = np.max(
np.abs(self.__k_reduced_cp_array[:, col_j, col_i, jp, ip, :] -
self.__k_reduced_cp_array[:, pillar_j_extra, pillar_i_extra, self.__extras_list[eli][0],
self.__extras_list[eli][1], :]))
if discrepancy <= self.__split_tolerance: # data for this corner aligns with existing extra
self.__extras_use[col_j, col_i, jp, ip] = e
break
if self.__extras_use[col_j, col_i, jp, ip] >= 0: # reusing an existing extra for this pillar
return
# add this corner as an extra
if self.__extras_count[j, i] == 0: # create entry point for this pillar in extras
self.__extras_list_index[j, i] = len(self.__extras_list)
self.__extras_list.append((jp, ip))
self.__extras_use[col_j, col_i, jp, ip] = self.__extras_count[j, i]
self.__extras_count[j, i] += 1
def __append_extra_pillars(self, i, j):
primary_jp = self.__primary_pillar_jip[j, i, 0]
primary_ip = self.__primary_pillar_jip[j, i, 1]
p_col_j = j - primary_jp
p_col_i = i - primary_ip
# loop over 4 columns surrounding this pillar
for jp in range(2):
col_j = j - jp
if col_j < 0 or col_j >= self.__nj:
continue # no column this side of pillar in j
for ip in range(2):
self.__append_single_pillar(i, j, ip, jp, col_j, p_col_i, p_col_j, primary_ip, primary_jp)
def __get_points_array(self):
log.debug('creating points array as used in resqml format')
if self.__split_pillars:
self.__get_points_array_split()
else: # unsplit pillars
self.__points_array = np.zeros((self.__nk_plus_1, self.__nj_plus_1, self.__ni_plus_1, 3))
for j in range(self.__nj_plus_1):
for i in range(self.__ni_plus_1):
(jp, ip) = self.__primary_pillar_jip[j, i]
slice = self.__k_reduced_cp_array[:, j - jp, i - ip, jp, ip, :]
self.__points_array[:, j, i, :] = np.where(slice.mask, np.nan,
slice) # NaN indicates undefined/invalid geometry
def __get_points_array_split(self):
self.__points_array = np.zeros(
(self.__nk_plus_1, (self.__nj_plus_1 * self.__ni_plus_1) + len(self.__extras_list),
3)) # note: nk_plus_1 might include k_gaps
index = 0
index = self.__get_points_array_split_primary(index)
index = self.__get_points_array_split_extras(index)
assert (index == (self.__nj_plus_1 * self.__ni_plus_1) + len(self.__extras_list))
def __get_points_array_split_primary(self, index):
# primary pillars
for pillar_j in range(self.__nj_plus_1):
for pillar_i in range(self.__ni_plus_1):
(jp, ip) = self.__primary_pillar_jip[pillar_j, pillar_i]
slice = self.__k_reduced_cp_array[:, pillar_j - jp, pillar_i - ip, jp, ip, :]
self.__points_array[:, index, :] = np.where(slice.mask, np.nan,
slice) # NaN indicates undefined/invalid geometry
index += 1
return index
def __get_points_array_split_extras(self, index):
# add extras for split pillars
for pillar_j in range(self.__nj_plus_1):
for pillar_i in range(self.__ni_plus_1):
for e in range(self.__extras_count[pillar_j, pillar_i]):
eli = self.__extras_list_index[pillar_j, pillar_i] + e
(jp, ip) = self.__extras_list[eli]
pillar_j_extra = pillar_j - jp
pillar_i_extra = pillar_i - ip
slice = self.__k_reduced_cp_array[:, pillar_j_extra, pillar_i_extra, jp, ip, :]
self.__points_array[:, index, :] = np.where(slice.mask, np.nan,
slice) # NaN indicates undefined/invalid geometry
index += 1
return index
def __make_basic_grid(self):
log.debug('initialising grid object')
self.grid = grr.Grid(self.__model)
self.grid.grid_representation = 'IjkGrid'
self.grid.extent_kji = np.array((self.__nk, self.__nj, self.__ni), dtype = 'int')
self.grid.nk, self.grid.nj, self.grid.ni = self.__nk, self.__nj, self.__ni
self.grid.k_direction_is_down = True # assumed direction for corp; todo: determine from geometry and crs z_inc_down flag
if self.__known_to_be_straight:
self.grid.pillar_shape = 'straight'
else:
self.grid.pillar_shape = 'curved'
self.grid.has_split_coordinate_lines = self.__split_pillars
self.grid.k_gaps = self.__k_gaps
self.grid.k_gap_after_array = self.__k_gap_after_layer
self.grid.k_raw_index_array = self.__k_gap_raw_index
self.grid.crs_uuid = self.__crs_uuid
self.grid.crs_root = self.__model.root_for_uuid(self.__crs_uuid)
self.__crs = rqc.Crs(self.__model, uuid = self.__crs_uuid)
# add pillar points array to grid object
log.debug('attaching points array to grid object')
self.grid.points_cached = self.__points_array # NB: reference to points_array, array not copied here
def __get_split_pillars_lists(self, pillar_i, pillar_j, e):
self.__split_pillar_indices_list.append((pillar_j * self.__ni_plus_1) + pillar_i)
use_count = 0
for jp in range(2):
j = pillar_j - jp
if j < 0 or j >= self.__nj:
continue
for ip in range(2):
i = pillar_i - ip
if i < 0 or i >= self.__ni:
continue
if self.__extras_use[j, i, jp, ip] == e:
use_count += 1
self.__cols_for_extra_pillar_list.append((j * self.__ni) + i)
assert (use_count > 0)
self.__cumulative_length += use_count
self.__cumulative_length_list.append(self.__cumulative_length)
def __add_split_arrays_to_grid(self):
if self.__split_pillars:
log.debug('adding split pillar arrays to grid object')
self.__split_pillar_indices_list = []
self.__cumulative_length_list = []
self.__cols_for_extra_pillar_list = []
self.__cumulative_length = 0
for pillar_j in range(self.__nj_plus_1):
for pillar_i in range(self.__ni_plus_1):
for e in range(self.__extras_count[pillar_j, pillar_i]):
self.__get_split_pillars_lists(pillar_i, pillar_j, e)
log.debug('number of extra pillars: ' + str(len(self.__split_pillar_indices_list)))
assert (len(self.__cumulative_length_list) == len(self.__split_pillar_indices_list))
self.grid.split_pillar_indices_cached = np.array(self.__split_pillar_indices_list, dtype = 'int')
log.debug('number of uses of extra pillars: ' + str(len(self.__cols_for_extra_pillar_list)))
assert (len(self.__cols_for_extra_pillar_list) == np.count_nonzero(self.__extras_use + 1))
assert (len(self.__cols_for_extra_pillar_list) == self.__cumulative_length)
self.grid.cols_for_split_pillars = np.array(self.__cols_for_extra_pillar_list, dtype = 'int')
assert (len(self.__cumulative_length_list) == len(self.__extras_list))
self.grid.cols_for_split_pillars_cl = np.array(self.__cumulative_length_list, dtype = 'int')
self.grid.split_pillars_count = len(self.__extras_list)
def __set_up_column_to_pillars_mapping(self):
log.debug('setting up column to pillars mapping')
base_pillar_count = self.__nj_plus_1 * self.__ni_plus_1
self.grid.pillars_for_column = np.empty((self.__nj, self.__ni, 2, 2), dtype = 'int')
for j in range(self.__nj):
for i in range(self.__ni):
for jp in range(2):
for ip in range(2):
if not self.__split_pillars or self.__extras_use[j, i, jp, ip] < 0: # use primary pillar
pillar_index = (j + jp) * self.__ni_plus_1 + i + ip
else:
eli = self.__extras_list_index[j + jp, i + ip] + self.__extras_use[j, i, jp, ip]
pillar_index = base_pillar_count + eli
self.grid.pillars_for_column[j, i, jp, ip] = pillar_index
def __update_grid_geometry_information(self):
# add cell geometry defined array to model (using active cell mask unless geometry_defined_everywhere is True)
if self.__geometry_defined_everywhere:
self.grid.geometry_defined_for_all_cells_cached = True
self.grid.array_cell_geometry_is_defined = None
else:
log.debug('using active cell mask as indicator of defined cell geometry')
self.grid.array_cell_geometry_is_defined = self.__active_mask.copy(
) # a bit harsh: disallows reactivation of cells
self.grid.geometry_defined_for_all_cells_cached = np.all(self.__active_mask)
self.grid.geometry_defined_for_all_pillars_cached = True # following fesapi convention of defining all pillars regardless
# note: grid.array_pillar_geometry_is_defined not set, as line above should be sufficient
def __update_grid_handedness(self):
# set handedness of ijk axes
if self.__ijk_handedness is None or self.__ijk_handedness == 'auto':
# work out handedness from sample cell / column axes directions and handedness of crs
sample_kji0 = tuple(np.array(self.grid.extent_kji) // 2)
if not self.__geometry_defined_everywhere and not self.grid.array_cell_geometry_is_defined[sample_kji0]:
where_defined = np.where(
np.logical_and(self.grid.array_cell_geometry_is_defined, np.logical_not(self.grid.pinched_out())))
assert len(where_defined) == 3 and len(where_defined[0]) > 0, 'no extant cell geometries'
sample_kji0 = (where_defined[0][0], where_defined[1][0], where_defined[2][0])
sample_cp = self.__cp_array[sample_kji0]
self.__cell_ijk_lefthanded = (vec.clockwise(sample_cp[0, 0, 0], sample_cp[0, 1, 0], sample_cp[0, 0, 1]) >=
0.0)
if not self.grid.k_direction_is_down:
self.__cell_ijk_lefthanded = not self.__cell_ijk_lefthanded
if self.__crs.is_right_handed_xyz():
self.__cell_ijk_lefthanded = not self.__cell_ijk_lefthanded
self.grid.grid_is_right_handed = not self.__cell_ijk_lefthanded
else:
assert self.__ijk_handedness in ['left', 'right']
self.grid.grid_is_right_handed = (self.__ijk_handedness == 'right')
def create_grid(self):
"""Make the grid"""
# Find out which cells to treat as nans
self.__get_treat_as_nan()
self.__geometry_defined_everywhere = (self.__treat_as_nan is None)
assert self.__cp_array.ndim == 7
# Get the grid extents
self.__get_extents()
# Generate active and inactive masks
self.__get_active_inactive_masks()
# Generate the nan mask
self.__get_nan_mask()
# Apply nan and inactive masks
if self.__cp_nan_mask is not None:
self.__inactive_mask = np.logical_or(self.__inactive_mask, self.__cp_nan_mask)
self.__active_mask = np.logical_not(self.__inactive_mask)
# Generate the masked corner point array
self.__get_masked_cp_array()
# Find information on kgaps in the grid
self.__check_for_kgaps()
if self.__k_gaps:
self.__nk_plus_1 += self.__k_gaps
if self.__k_gap_raw_index is None:
self.__k_gap_raw_index = np.arange(self.__nk, dtype = int)
# reduce cp array extent in k
self.__get_k_reduced_cp_array()
# create 2D array of active columns (columns where at least one cell is active)
log.debug('creating 2D array of active columns')
self.__active_mask_2D = np.any(self.__active_mask, axis = 0)
# create primary pillar reference indices as one of four column corners around pillar, active column preferred
self.__get_primary_pillar_ref()
# build extra pillar references for split pillars
self.__get_extra_pillar_ref()
# create points array as used in resqml
self.__get_points_array()
# create an empty grid object and fill in some basic info
self.__make_basic_grid()
# add split pillar arrays to grid object
self.__add_split_arrays_to_grid()
# following is not part of resqml standard but is used by resqml_grid module for speed optimisation
self.__set_up_column_to_pillars_mapping()
# add inactive cell mask to grid
log.debug('setting inactive cell mask')
self.grid.inactive = self.__inactive_mask.copy()
# update grid with geometry parameters
self.__update_grid_geometry_information()
# tentatively add corner point array to grid object in case it is needed
log.debug('noting corner point array in grid')
self.grid.array_corner_points = self.__cp_array
# update grid handedness
self.__update_grid_handedness()
| 53.957912 | 196 | 0.604193 |
7952a0f349e17172d5635991318945a4a31be5c4 | 30,711 | py | Python | toontown/building/DistributedDoor.py | philicheese2003/ToontownProjectAltisServer | cfa225d1bdddacdbd29b621382347fce17e1dc66 | [
"Apache-2.0"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/building/DistributedDoor.py | philicheese2003/ToontownProjectAltisServer | cfa225d1bdddacdbd29b621382347fce17e1dc66 | [
"Apache-2.0"
] | null | null | null | toontown/building/DistributedDoor.py | philicheese2003/ToontownProjectAltisServer | cfa225d1bdddacdbd29b621382347fce17e1dc66 | [
"Apache-2.0"
] | 1 | 2020-03-11T17:38:45.000Z | 2020-03-11T17:38:45.000Z | from toontown.building import DoorTypes
from toontown.building import FADoorCodes
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM, State
from direct.interval.IntervalGlobal import *
from direct.task.Task import Task
from pandac.PandaModules import *
from toontown.distributed import DelayDelete
from toontown.distributed.DelayDeletable import DelayDeletable
from toontown.hood import ZoneUtil
from toontown.suit import Suit
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toontowngui import TTDialog
from toontown.toontowngui import TeaserPanel
from toontown.nametag.NametagGroup import NametagGroup
from toontown.nametag.Nametag import Nametag
class DistributedDoor(DistributedObject.DistributedObject, DelayDeletable):
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.openSfx = base.loader.loadSfx('phase_3.5/audio/sfx/Door_Open_1.ogg')
self.closeSfx = base.loader.loadSfx('phase_3.5/audio/sfx/Door_Close_1.ogg')
self.nametag = None
self.fsm = ClassicFSM.ClassicFSM(
'DistributedDoor_right',
[
State.State('off', self.enterOff, self.exitOff,
['closing', 'closed', 'opening', 'open']),
State.State('closing', self.enterClosing, self.exitClosing,
['closed', 'opening']),
State.State('closed', self.enterClosed, self.exitClosed,
['opening']),
State.State('opening', self.enterOpening, self.exitOpening,
['open']),
State.State('open', self.enterOpen, self.exitOpen,
['closing', 'open'])
], 'off', 'off')
self.fsm.enterInitialState()
self.exitDoorFSM = ClassicFSM.ClassicFSM(
'DistributedDoor_left',
[
State.State('off', self.exitDoorEnterOff, self.exitDoorExitOff,
['closing', 'closed', 'opening', 'open']),
State.State('closing', self.exitDoorEnterClosing, self.exitDoorExitClosing,
['closed', 'opening']),
State.State('closed', self.exitDoorEnterClosed, self.exitDoorExitClosed,
['opening']),
State.State('opening', self.exitDoorEnterOpening, self.exitDoorExitOpening,
['open']),
State.State('open', self.exitDoorEnterOpen, self.exitDoorExitOpen,
['closing', 'open'])
], 'off', 'off')
self.exitDoorFSM.enterInitialState()
self.specialDoorTypes = {
DoorTypes.EXT_HQ: 0,
DoorTypes.EXT_COGHQ: 0,
DoorTypes.INT_COGHQ: 0,
DoorTypes.EXT_KS: 0,
DoorTypes.INT_KS: 0 }
self.doorX = 1.5
def leaveDoor(self, collEntry):
self.ignore("shift")
if hasattr(self, "colorSeq"):
if self.colorSeq:
self.colorSeq.finish()
if hasattr(self, "enterText"):
self.enterText.removeNode()
del self.enterText
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.avatarTracks = []
self.avatarExitTracks = []
self.avatarIDList = []
self.avatarExitIDList = []
self.doorTrack = None
self.doorExitTrack = None
def disable(self):
self.clearNametag()
if hasattr(self, "colorSeq"):
if self.colorSeq:
self.colorSeq.finish()
taskMgr.remove(self.checkIsDoorHitTaskName())
self.ignore(self.getEnterTriggerEvent())
self.ignore(self.getExitTriggerEvent())
self.ignore('clearOutToonInterior')
self.fsm.request('off')
self.exitDoorFSM.request('off')
if hasattr(self, 'building'):
del self.building
self.finishAllTracks()
self.avatarIDList = []
self.avatarExitIDList = []
if hasattr(self, 'tempDoorNodePath'):
self.tempDoorNodePath.removeNode()
del self.tempDoorNodePath
DistributedObject.DistributedObject.disable(self)
def delete(self):
del self.fsm
del self.exitDoorFSM
del self.openSfx
del self.closeSfx
DistributedObject.DistributedObject.delete(self)
def wantsNametag(self):
return not ZoneUtil.isInterior(self.zoneId)
def setupNametag(self):
if not self.wantsNametag():
return
if self.doorIndex != 0:
return
if self.nametag == None:
self.nametag = NametagGroup()
self.nametag.setNametag3d(None)
self.nametag.setFont(ToontownGlobals.getBuildingNametagFont())
if TTLocalizer.BuildingNametagShadow:
self.nametag.setShadow(*TTLocalizer.BuildingNametagShadow)
self.nametag.hideChat()
self.nametag.hideThought()
nametagColor = NametagGlobals.NametagColors[NametagGlobals.CCToonBuilding]
self.nametag.setNametagColor(nametagColor)
self.nametag.setActive(False)
self.nametag.setAvatar(self.getDoorNodePath())
name = self.cr.playGame.dnaStore.getTitleFromBlockNumber(self.block)
self.nametag.setText(name)
self.nametag.manage(base.marginManager)
self.nametag.updateAll()
def clearNametag(self):
if self.nametag is not None:
self.nametag.unmanage(base.marginManager)
self.nametag.setAvatar(NodePath())
self.nametag = None
def getTriggerName(self):
if (self.doorType == DoorTypes.INT_HQ) or (self.doorType in self.specialDoorTypes):
return 'door_trigger_' + str(self.block) + '_' + str(self.doorIndex)
else:
return 'door_trigger_' + str(self.block)
def getTriggerName_wip(self):
return 'door_trigger_%d' % (self.doId,)
def getEnterTriggerEvent(self):
return 'enter' + self.getTriggerName()
def getExitTriggerEvent(self):
return 'exit' + self.getTriggerName()
def hideDoorParts(self):
if self.doorType in self.specialDoorTypes:
self.hideIfHasFlat(self.findDoorNode('rightDoor'))
self.hideIfHasFlat(self.findDoorNode('leftDoor'))
try:
self.findDoorNode('doorFrameHoleRight').hide()
self.findDoorNode('doorFrameHoleLeft').hide()
except:
pass
def setTriggerName(self):
if self.doorType in self.specialDoorTypes:
building = self.getBuilding()
doorTrigger = building.find('**/door_' + str(self.doorIndex) + '/**/door_trigger*')
try:
doorTrigger.node().setName(self.getTriggerName())
except:
pass
def setTriggerName_wip(self):
building = self.getBuilding()
doorTrigger = building.find('**/door_%d/**/door_trigger_%d' % (self.doorIndex, self.block))
if doorTrigger.isEmpty():
doorTrigger = building.find('**/door_trigger_%d' % (self.block,))
if doorTrigger.isEmpty():
doorTrigger = building.find('**/door_%d/**/door_trigger_*' % (self.doorIndex,))
if doorTrigger.isEmpty():
doorTrigger = building.find('**/door_trigger_*')
doorTrigger.node().setName(self.getTriggerName())
def setZoneIdAndBlock(self, zoneId, block):
self.zoneId = zoneId
self.block = block
def setDoorType(self, doorType):
self.notify.debug('Door type = ' + str(doorType) + ' on door #' + str(self.doId))
self.doorType = doorType
def setDoorIndex(self, doorIndex):
self.doorIndex = doorIndex
def setSwing(self, flags):
self.leftSwing = flags & 1 != 0
self.rightSwing = flags & 2 != 0
def setOtherZoneIdAndDoId(self, zoneId, distributedObjectID):
self.otherZoneId = zoneId
self.otherDoId = distributedObjectID
def setState(self, state, timestamp):
self.fsm.request(state, [globalClockDelta.localElapsedTime(timestamp)])
def setExitDoorState(self, state, timestamp):
self.exitDoorFSM.request(state, [globalClockDelta.localElapsedTime(timestamp)])
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.doPostAnnounceGenerate()
def doPostAnnounceGenerate(self):
flatDoorTypes = [DoorTypes.INT_STANDARD, DoorTypes.INT_HQ]
if self.doorType in flatDoorTypes:
self.bHasFlat = True
else:
self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty()
self.hideDoorParts()
self.setTriggerName()
# Check if we are dealing with a DDL HQ door...
if self.doorType == DoorTypes.EXT_HQ and \
ZoneUtil.getHoodId(self.zoneId) == ToontownGlobals.DonaldsDreamland:
# Get the doorTrigger...
building = self.getBuilding()
doorTrigger = building.find('**/%s' % self.getTriggerName())
# Check if the doorTrigger hasn't been 'fixed' already...
if not doorTrigger.getTag('fixed'):
# Reposition the doorTrigger based on its index...
if self.doorIndex == 0:
doorTrigger.setY(doorTrigger, 0.25)
else:
doorTrigger.setY(doorTrigger, -0.25)
# We are done :) Tag the door as fixed.
doorTrigger.setTag('fixed', 'true')
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
self.acceptOnce('clearOutToonInterior', self.doorTrigger)
self.setupNametag()
def getBuilding(self):
if not hasattr(self, 'building'):
if self.doorType == DoorTypes.INT_STANDARD:
door = render.find('**/leftDoor;+s')
self.building = door.getParent()
elif self.doorType == DoorTypes.INT_HQ:
door = render.find('**/door_0')
self.building = door.getParent()
elif self.doorType == DoorTypes.INT_KS:
self.building = render.find('**/KartShop_Interior*')
elif self.doorType == DoorTypes.EXT_STANDARD or self.doorType == DoorTypes.EXT_HQ or self.doorType == DoorTypes.EXT_KS:
self.building = self.cr.playGame.hood.loader.geom.find('**/??' + str(self.block) + ':*_landmark_*_DNARoot;+s')
if self.building.isEmpty():
self.building = self.cr.playGame.hood.loader.geom.find('**/??' + str(self.block) + ':animated_building_*_DNARoot;+s')
elif self.doorType == DoorTypes.EXT_COGHQ or self.doorType == DoorTypes.INT_COGHQ:
self.building = self.cr.playGame.hood.loader.geom
else:
self.notify.error('No such door type as ' + str(self.doorType))
return self.building
def getBuilding_wip(self):
if not hasattr(self, 'building'):
if hasattr(self, 'block'):
self.building = self.cr.playGame.hood.loader.geom.find('**/??' + str(self.block) + ':*_landmark_*_DNARoot;+s')
else:
self.building = self.cr.playGame.hood.loader.geom
return self.building
def readyToExit(self):
base.transitions.fadeScreen(1.0)
self.sendUpdate('requestExit')
def avatarEnterDoorTrack(self, avatar, duration):
trackName = 'avatarEnterDoor-%d-%d' % (self.doId, avatar.doId)
track = Parallel(name = trackName)
otherNP = self.getDoorNodePath()
if hasattr(avatar, 'stopSmooth'):
avatar.stopSmooth()
if avatar.doId == base.localAvatar.doId:
track.append(LerpPosHprInterval(nodePath = camera, other = avatar, duration = duration, pos = Point3(0, -8, avatar.getHeight()), hpr = VBase3(0, 0, 0), blendType = 'easeInOut'))
finalPos = avatar.getParent().getRelativePoint(otherNP, Point3(self.doorX, 2, ToontownGlobals.FloorOffset))
moveHere = Sequence(self.getAnimStateInterval(avatar, 'walk'), LerpPosInterval(nodePath = avatar, duration = duration, pos = finalPos, blendType = 'easeIn'))
track.append(moveHere)
if avatar.doId == base.localAvatar.doId:
track.append(Sequence(Wait(duration * 0.5), Func(base.transitions.irisOut, duration * 0.5), Wait(duration * 0.5), Func(avatar.b_setParent, ToontownGlobals.SPHidden)))
track.delayDelete = DelayDelete.DelayDelete(avatar, 'avatarEnterDoorTrack')
return track
def avatarEnqueueTrack(self, avatar, duration):
if hasattr(avatar, 'stopSmooth'):
avatar.stopSmooth()
back = -5.0 - 2.0 * len(self.avatarIDList)
if back < -9.0:
back = -9.0
offset = Point3(self.doorX, back, ToontownGlobals.FloorOffset)
otherNP = self.getDoorNodePath()
walkLike = ActorInterval(avatar, 'walk', startTime = 1, duration = duration, endTime = 0.0001)
standHere = Sequence(LerpPosHprInterval(nodePath = avatar, other = otherNP, duration = duration, pos = offset, hpr = VBase3(0, 0, 0), blendType = 'easeInOut'), self.getAnimStateInterval(avatar, 'neutral'))
trackName = 'avatarEnqueueDoor-%d-%d' % (self.doId, avatar.doId)
track = Parallel(walkLike, standHere, name = trackName)
track.delayDelete = DelayDelete.DelayDelete(avatar, 'avatarEnqueueTrack')
return track
def getAnimStateInterval(self, avatar, animName):
isSuit = isinstance(avatar, Suit.Suit)
if isSuit:
return Func(avatar.loop, animName, 0)
else:
return Func(avatar.setAnimState, animName)
def isDoorHit(self):
vec = base.localAvatar.getRelativeVector(self.currentDoorNp, self.currentDoorVec)
netScale = self.currentDoorNp.getNetTransform().getScale()
yToTest = vec.getY() / netScale[1]
return yToTest
def enterDoor(self):
self.ignore("shift")
if hasattr(self, "enterText"):
self.enterText.removeNode()
del self.enterText
if self.allowedToEnter():
messenger.send('DistributedDoor_doorTrigger')
self.sendUpdate('requestEnter')
else:
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('stopped')
self.dialog = TeaserPanel.TeaserPanel(pageName = 'otherHoods', doneFunc = self.handleOkTeaser)
def handleOkTeaser(self):
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
self.dialog.destroy()
del self.dialog
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
def allowedToEnter(self):
if base.cr.isPaid():
return True
place = base.cr.playGame.getPlace()
myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId)
if hasattr(place, 'id'):
myHoodId = place.id
if myHoodId in (ToontownGlobals.ToontownCentral, ToontownGlobals.MyEstate, ToontownGlobals.GoofySpeedway, ToontownGlobals.Tutorial):
return True
return False
def checkIsDoorHitTaskName(self):
return 'checkIsDoorHit' + self.getTriggerName()
def checkIsDoorHitTask(self, task):
if self.isDoorHit():
self.ignore(self.checkIsDoorHitTaskName())
self.ignore(self.getExitTriggerEvent())
#self.enterDoor()
return Task.done
return Task.cont
def cancelCheckIsDoorHitTask(self, args):
taskMgr.remove(self.checkIsDoorHitTaskName())
del self.currentDoorNp
del self.currentDoorVec
self.ignore(self.getExitTriggerEvent())
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
def doorTrigger(self, args=None):
self.ignore(self.getEnterTriggerEvent())
self.accept(self.getExitTriggerEvent(), self.leaveDoor)
if args == None:
self.enterDoor()
else:
self.currentDoorNp = NodePath(args.getIntoNodePath())
self.currentDoorVec = Vec3(args.getSurfaceNormal(self.currentDoorNp))
if self.isDoorHit():
if base.wantDoorKey:
if not hasattr(self, "enterText"):
self.accept("shift", self.enterDoor)
name = self.cr.playGame.dnaStore.getTitleFromBlockNumber(self.block)
if ZoneUtil.isInterior(self.zoneId):
state = "exit"
else:
state = "enter"
if name != '':
text = ("Press SHIFT to %s %s" % (state, name))
else:
text = ("Press SHIFT to %s" % state)
if base.wantMobile:
self.enterText = DirectButton(relief = None, parent = base.a2dBottomCenter, image = (base.shuffleUp, base.shuffleUp, base.shuffleDown), image_scale = (1, 0.7, 0.7), image1_scale = (1.02, 0.7, 0.7), image2_scale = (1.02, 0.7, 0.7), text = ("Tap to %s" % state), text_style = 3, text_scale = .07, text_pos = (0, -0.02), text_fg = (1, 0.9, 0.1, 1), scale = 1.5, pos = (0.0, 0.0, 0.5), command = self.enterDoor)
else:
self.enterText = OnscreenText(text, style = 3, scale = .09, parent = base.a2dBottomCenter, fg = (1, 0.9, 0.1, 1), pos = (0.0, 0.5))
self.colorSeq = Sequence(
LerpColorScaleInterval(self.enterText, .8, VBase4(.5, .6, 1, .9)),
LerpColorScaleInterval(self.enterText, .8, VBase4(1, 1, 1, 1))).loop()
else:
self.enterDoor()
else:
self.accept(self.getExitTriggerEvent(), self.cancelCheckIsDoorHitTask)
taskMgr.add(self.checkIsDoorHitTask, self.checkIsDoorHitTaskName())
def avatarEnter(self, avatarID):
avatar = self.cr.doId2do.get(avatarID, None)
if avatar:
avatar.setAnimState('neutral')
track = self.avatarEnqueueTrack(avatar, 0.5)
track.start()
self.avatarTracks.append(track)
self.avatarIDList.append(avatarID)
def rejectEnter(self, reason):
message = FADoorCodes.reasonDict[reason]
if message:
self.__faRejectEnter(message)
else:
self.__basicRejectEnter()
def __basicRejectEnter(self):
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
if self.cr.playGame.getPlace():
self.cr.playGame.getPlace().setState('walk')
def __faRejectEnter(self, message):
self.rejectDialog = TTDialog.TTGlobalDialog(message = message, doneEvent = 'doorRejectAck', style = TTDialog.Acknowledge)
self.rejectDialog.show()
self.rejectDialog.delayDelete = DelayDelete.DelayDelete(self, '__faRejectEnter')
event = 'clientCleanup'
self.acceptOnce(event, self.__handleClientCleanup)
base.cr.playGame.getPlace().setState('stopped')
self.acceptOnce('doorRejectAck', self.__handleRejectAck)
self.acceptOnce('stoppedAsleep', self.__handleFallAsleepDoor)
def __handleClientCleanup(self):
if hasattr(self, 'rejectDialog') and self.rejectDialog:
self.rejectDialog.doneStatus = 'ok'
self.__handleRejectAck()
def __handleFallAsleepDoor(self):
self.rejectDialog.doneStatus = 'ok'
self.__handleRejectAck()
def __handleRejectAck(self):
self.ignore('doorRejectAck')
self.ignore('stoppedAsleep')
self.ignore('clientCleanup')
doneStatus = self.rejectDialog.doneStatus
if doneStatus != 'ok':
self.notify.error('Unrecognized doneStatus: ' + str(doneStatus))
self.__basicRejectEnter()
self.rejectDialog.delayDelete.destroy()
self.rejectDialog.cleanup()
del self.rejectDialog
def getDoorNodePath(self):
if self.doorType == DoorTypes.INT_STANDARD:
otherNP = render.find('**/door_origin')
elif self.doorType == DoorTypes.EXT_STANDARD:
otherNP = self.getBuilding().find('**/*door_origin')
elif self.doorType in self.specialDoorTypes:
building = self.getBuilding()
otherNP = building.find('**/door_origin_' + str(self.doorIndex))
elif self.doorType == DoorTypes.INT_HQ:
otherNP = render.find('**/door_origin_' + str(self.doorIndex))
else:
self.notify.error('No such door type as ' + str(self.doorType))
return otherNP
def avatarExitTrack(self, avatar, duration):
if hasattr(avatar, 'stopSmooth'):
avatar.stopSmooth()
otherNP = self.getDoorNodePath()
trackName = 'avatarExitDoor-%d-%d' % (self.doId, avatar.doId)
track = Sequence(name=trackName)
track.append(self.getAnimStateInterval(avatar, 'walk'))
track.append(
PosHprInterval(
avatar, Point3(-self.doorX, 0, ToontownGlobals.FloorOffset),
VBase3(179, 0, 0), other=otherNP
)
)
track.append(Func(avatar.setParent, ToontownGlobals.SPRender))
if avatar.doId == base.localAvatar.doId:
track.append(
PosHprInterval(
camera, VBase3(-self.doorX, 5, avatar.getHeight()),
VBase3(180, 0, 0), other=otherNP
)
)
if avatar.doId == base.localAvatar.doId:
finalPos = render.getRelativePoint(
otherNP, Point3(-self.doorX, -6, ToontownGlobals.FloorOffset)
)
else:
finalPos = render.getRelativePoint(
otherNP, Point3(-self.doorX, -3, ToontownGlobals.FloorOffset)
)
track.append(
LerpPosInterval(
nodePath=avatar, duration=duration, pos=finalPos,
blendType='easeInOut'
)
)
if avatar.doId == base.localAvatar.doId:
track.append(Func(self.exitCompleted))
track.append(Func(base.transitions.irisIn))
if hasattr(avatar, 'startSmooth'):
track.append(Func(avatar.startSmooth))
track.delayDelete = DelayDelete.DelayDelete(avatar, 'DistributedDoor.avatarExitTrack')
return track
def exitCompleted(self):
base.localAvatar.setAnimState('neutral')
place = self.cr.playGame.getPlace()
if place:
place.setState('walk')
base.localAvatar.d_setParent(ToontownGlobals.SPRender)
def avatarExit(self, avatarID):
if avatarID in self.avatarIDList:
self.avatarIDList.remove(avatarID)
if avatarID == base.localAvatar.doId:
self.exitCompleted()
else:
self.avatarExitIDList.append(avatarID)
def finishDoorTrack(self):
if self.doorTrack:
self.doorTrack.finish()
self.doorTrack = None
def finishDoorExitTrack(self):
if self.doorExitTrack:
self.doorExitTrack.finish()
self.doorExitTrack = None
def finishAllTracks(self):
self.finishDoorTrack()
self.finishDoorExitTrack()
for t in self.avatarTracks:
t.finish()
DelayDelete.cleanupDelayDeletes(t)
self.avatarTracks = []
for t in self.avatarExitTracks:
t.finish()
DelayDelete.cleanupDelayDeletes(t)
self.avatarExitTracks = []
def enterOff(self):
pass
def exitOff(self):
pass
def getRequestStatus(self):
zoneId = self.otherZoneId
request = {
'loader': ZoneUtil.getBranchLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': 'doorIn',
'hoodId': ZoneUtil.getHoodId(zoneId),
'zoneId': zoneId,
'shardId': None,
'avId': -1,
'allowRedirect': 0,
'doorDoId': self.otherDoId
}
return request
def enterClosing(self, ts):
doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')
if doorFrameHoleRight.isEmpty():
self.notify.warning('enterClosing(): did not find doorFrameHoleRight')
return
rightDoor = self.findDoorNode('rightDoor')
if rightDoor.isEmpty():
self.notify.warning('enterClosing(): did not find rightDoor')
return
otherNP = self.getDoorNodePath()
trackName = 'doorClose-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Sequence(LerpHprInterval(nodePath = rightDoor, duration = 1.0, hpr = VBase3(0, 0, 0), startHpr = VBase3(h, 0, 0), other = otherNP, blendType = 'easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node = rightDoor), name = trackName)
self.doorTrack.start(ts)
if hasattr(self, 'done'):
request = self.getRequestStatus()
messenger.send('doorDoneEvent', [request])
def exitClosing(self):
pass
def enterClosed(self, ts):
pass
def exitClosed(self):
pass
def enterOpening(self, ts):
doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')
if doorFrameHoleRight.isEmpty():
self.notify.warning('enterOpening(): did not find doorFrameHoleRight')
return
rightDoor = self.findDoorNode('rightDoor')
if rightDoor.isEmpty():
self.notify.warning('enterOpening(): did not find rightDoor')
return
otherNP = self.getDoorNodePath()
trackName = 'doorOpen-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Parallel(SoundInterval(self.openSfx, node=rightDoor), Sequence(HprInterval(rightDoor, VBase3(0, 0, 0), other=otherNP), Wait(0.4), Func(rightDoor.show), Func(doorFrameHoleRight.show), LerpHprInterval(nodePath=rightDoor, duration=0.6, hpr=VBase3(h, 0, 0), startHpr=VBase3(0, 0, 0), other=otherNP, blendType='easeInOut')), name=trackName)
self.doorTrack.start(ts)
def exitOpening(self):
pass
def enterOpen(self, ts):
for avatarID in self.avatarIDList:
avatar = self.cr.doId2do.get(avatarID)
if avatar:
track = self.avatarEnterDoorTrack(avatar, 1.0)
track.start(ts)
self.avatarTracks.append(track)
if avatarID == base.localAvatar.doId:
self.done = 1
self.avatarIDList = []
def exitOpen(self):
for track in self.avatarTracks:
track.finish()
DelayDelete.cleanupDelayDeletes(track)
self.avatarTracks = []
def exitDoorEnterOff(self):
pass
def exitDoorExitOff(self):
pass
def exitDoorEnterClosing(self, ts):
doorFrameHoleLeft = self.findDoorNode('doorFrameHoleLeft')
if doorFrameHoleLeft.isEmpty():
self.notify.warning('enterOpening(): did not find flatDoors')
return
if self.leftSwing:
h = -100
else:
h = 100
leftDoor = self.findDoorNode('leftDoor')
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Sequence(LerpHprInterval(nodePath = leftDoor, duration = 1.0, hpr = VBase3(0, 0, 0), startHpr = VBase3(h, 0, 0), other = otherNP, blendType = 'easeInOut'), Func(doorFrameHoleLeft.hide), Func(self.hideIfHasFlat, leftDoor), SoundInterval(self.closeSfx, node = leftDoor), name = trackName)
self.doorExitTrack.start(ts)
def exitDoorExitClosing(self):
pass
def exitDoorEnterClosed(self, ts):
pass
def exitDoorExitClosed(self):
pass
def exitDoorEnterOpening(self, ts):
doorFrameHoleLeft = self.findDoorNode('doorFrameHoleLeft')
if doorFrameHoleLeft.isEmpty():
self.notify.warning('enterOpening(): did not find flatDoors')
return
leftDoor = self.findDoorNode('leftDoor')
if self.leftSwing:
h = -100
else:
h = 100
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorDoorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Parallel(SoundInterval(self.openSfx, node = leftDoor), Sequence(Func(leftDoor.show), Func(doorFrameHoleLeft.show), LerpHprInterval(nodePath = leftDoor, duration = 0.59999999999999998, hpr = VBase3(h, 0, 0), startHpr = VBase3(0, 0, 0), other = otherNP, blendType = 'easeInOut')), name = trackName)
self.doorExitTrack.start(ts)
else:
self.notify.warning('exitDoorEnterOpening(): did not find leftDoor')
def exitDoorExitOpening(self):
pass
def exitDoorEnterOpen(self, ts):
for avatarID in self.avatarExitIDList:
avatar = self.cr.doId2do.get(avatarID)
if avatar:
track = self.avatarExitTrack(avatar, 0.2)
track.start()
self.avatarExitTracks.append(track)
self.avatarExitIDList = []
def exitDoorExitOpen(self):
for track in self.avatarExitTracks:
track.finish()
DelayDelete.cleanupDelayDeletes(track)
self.avatarExitTracks = []
def findDoorNode(self, string, allowEmpty = False):
building = self.getBuilding()
if not building:
self.notify.warning('getBuilding() returned None, avoiding crash, remark 896029')
foundNode = None
else:
foundNode = building.find('**/door_' + str(self.doorIndex) + '/**/' + string + '*;+s+i')
if foundNode.isEmpty():
foundNode = building.find('**/' + string + '*;+s+i')
if allowEmpty:
return foundNode
return foundNode
def hideIfHasFlat(self, node):
if self.bHasFlat:
node.hide()
| 41.840599 | 435 | 0.608316 |
7952a1c2de03e077991e374aaf9cbadbb3cf847c | 25,262 | py | Python | python_old/atmos_models.py | velatkilic/LISA | ef2a3489bd194f7bf790c057edaf9c89b65c572b | [
"MIT"
] | 6 | 2021-09-29T21:20:23.000Z | 2022-03-29T05:50:02.000Z | python_old/atmos_models.py | velatkilic/LISA | ef2a3489bd194f7bf790c057edaf9c89b65c572b | [
"MIT"
] | null | null | null | python_old/atmos_models.py | velatkilic/LISA | ef2a3489bd194f7bf790c057edaf9c89b65c572b | [
"MIT"
] | 4 | 2021-09-01T17:12:55.000Z | 2022-02-17T07:28:33.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Lidar Scatterer Augmentation (LISA)
- Given lidar point cloud and a rain rate generates corresponding noisy signal
- Reflection data must be normalized to range [0 1] and
range must be in units of meters
"""
import numpy as np
from scipy.special import gamma
from scipy.integrate import trapz
import PyMieScatt as ps
class LISA():
def __init__(self,m=1.328,lam=905,rmax=200,rmin=1.5,bdiv=3e-3,dst=0.05,
dR=0.09,saved_model=False,atm_model='rain',mode='strongest'):
'''
Initialize LISA class
Parameters
----------
m : refractive index contrast
lam : wavelength (nm)
rmax : max lidar range (m)
rmin : min lidar range (m)
bdiv : beam divergence angle (rad)
dst : droplet diameter starting point (mm)
dR : range accuracy (m)
saved_model : use saved mie coefficients (bool)
atm_model : atmospheric model type
mode : lidar return mode: "strongest" or "last"
Returns
-------
None.
'''
self.m = m
self.lam = lam
self.rmax = rmax # max range (m)
self.bdiv = bdiv # beam divergence (rad)
self.dst = dst # min rain drop diameter to be sampled (mm)
self.rmin = rmin # min lidar range (bistatic)
self.dR = dR
self.mode = mode
self.atm_model = atm_model
if saved_model:
# If Mie parameters are saved, use those
dat = np.load('mie_q.npz')
self.D = dat['D']
self.qext = dat['qext']
self.qback = dat['qback']
else:
try:
dat = np.load('mie_q.npz')
self.D = dat['D']
self.qext = dat['qext']
self.qback = dat['qback']
except:
# else calculate Mie parameters
print('Calculating Mie coefficients... \nThis might take a few minutes')
self.D,self.qext,self.qback = self.calc_Mie_params()
print('Mie calculation done...')
# Diameter distribution function based on user input
if atm_model=='rain':
self.N_model = lambda D, Rr : self.N_MP_rain(D,Rr)
self.N_tot = lambda Rr,dst : self.N_MP_tot_rain(Rr,dst)
self.N_sam = lambda Rr,N,dst : self.MP_Sample_rain(Rr,N,dst)
# Augmenting function: hybrid Monte Carlo
self.augment = lambda pc,Rr : self.augment_mc(pc,Rr)
elif atm_model=='snow':
self.N_model = lambda D, Rr : self.N_MG_snow(D,Rr)
self.N_tot = lambda Rr,dst : self.N_MG_tot_snow(Rr,dst)
self.N_sam = lambda Rr,N,dst : self.MG_Sample_snow(Rr,N,dst)
self.m = 1.3031 # refractive index of ice
# Augmenting function: hybrid Monte Carlo
self.augment = lambda pc,Rr : self.augment_mc(pc,Rr)
elif atm_model=='chu_hogg_fog':
self.N_model = lambda D : self.Nd_chu_hogg(D)
# Augmenting function: average effects
self.augment = lambda pc : self.augment_avg(pc)
elif atm_model=='strong_advection_fog':
self.N_model = lambda D : self.Nd_strong_advection_fog(D)
# Augmenting function: average effects
self.augment = lambda pc : self.augment_avg(pc)
elif atm_model=='moderate_advection_fog':
self.N_model = lambda D : self.Nd_moderate_advection_fog(D)
# Augmenting function: average effects
self.augment = lambda pc : self.augment_avg(pc)
def augment_mc(self,pc,Rr):
'''
Augment clean pointcloud for a given rain rate
Parameters
----------
pc : pointcloud (N,4) -> x,y,z,reflectivity
Rr : rain rate (mm/hr)
Returns
-------
pc_new : new noisy point cloud (N,5) -> x,y,z,reflectivity,label
label 0 -> lost point
label 1 -> randomly scattered point
label 2 -> not-scattered
'''
shp = pc.shape
pc_new = np.zeros((shp[0],shp[1]+1))
leng = len(pc)
for i in range(leng):
x = pc[i,0]
y = pc[i,1]
z = pc[i,2]
ref = pc[i,3]
if ref!=0:
pc_new[i,:] = self.lisa_mc(x,y,z,ref,Rr)
return pc_new
def lisa_mc(self,x,y,z,ref,Rr):
'''
For a single lidar return, performs a hybrid Monte-Carlo experiment
Parameters
----------
x,y,z : coordinates of the point
ref : reflectivity [0 1]
Rr : rain rate (mm/hr)
Returns
-------
x,y,z : new coordinates of the noisy lidar point
ref_new : new reflectivity
'''
rmax = self.rmax # max range (m)
Pmin = 0.9*rmax**(-2) # min measurable power (arb units)
bdiv = self.bdiv # beam divergence (rad)
Db = lambda x: 1e3*np.tan(bdiv)*x # beam diameter (mm) for a given range (m)
dst = self.dst # min rain drop diameter to be sampled (mm)
n = self.m # refractive index of scatterer
rmin = self.rmin # min lidar range (bistatic)
Nd = self.N_model(self.D,Rr) # density of rain droplets (m^-3)
alpha, beta = self.alpha_beta(Nd) # extinction coeff. (1/m)
ran = np.sqrt(x**2 + y**2 + z**2) # range in m
if ran>rmin:
bvol = (np.pi/3)*ran*(1e-3*Db(ran)/2)**2 # beam volume in m^3 (cone)
Nt = self.N_tot(Rr,dst) * bvol # total number of particles in beam path
Nt = np.int32(np.floor(Nt) + (np.random.rand() < Nt-int(Nt))) # convert to integer w/ probabilistic rounding
else:
Nt = 0
ran_r = ran*(np.random.rand(Nt))**(1/3) # sample distances from a quadratic pdf
indx = np.where(ran_r>rmin)[0] # keep points where ranges larger than rmin
Nt = len(indx) # new particle number
P0 = ref*np.exp(-2*alpha*ran)/(ran**2) # power
snr = P0/Pmin # signal noise ratio
if Nt>0:
Dr = self.N_sam(Rr,Nt,dst) # randomly sample Nt particle diameters
ref_r = abs((n-1)/(n+1))**2 # Fresnel reflection at normal incidence
ran_r = ran_r[indx]
# Calculate powers for all particles
Pr = ref_r*np.exp(-2*alpha*ran_r)*np.minimum((Dr/Db(ran_r))**2,np.ones(Dr.shape))/(ran_r**2)
if (self.mode=='strongest'):
ind_r = np.argmax(Pr) # index of the max power
if P0<Pmin and Pr[ind_r]<Pmin: # if all smaller than Pmin, do nothing
ran_new = 0
ref_new = 0
labl = 0 # label for lost point
elif P0<Pr[ind_r]: # scatterer has larger power
ran_new = ran_r[ind_r] # new range is scatterer range
ref_new = ref_r*np.exp(-2*alpha*ran_new)*np.minimum((Dr[ind_r]/Db(ran_r[ind_r]))**2,1) # new reflectance biased by scattering
labl = 1 # label for randomly scattered point
else: # object return has larger power
sig = self.dR/np.sqrt(2*snr) # std of range uncertainty
ran_new = ran + np.random.normal(0,sig) # range with uncertainty added
ref_new = ref*np.exp(-2*alpha*ran) # new reflectance modified by scattering
labl = 2 # label for a non-scattering point
elif (self.mode=='last'):
# if object power larger than Pmin, then nothing is scattered
if P0>Pmin:
sig = self.dR/np.sqrt(2*snr) # std of range uncertainty
ran_new = ran + np.random.normal(0,sig) # range with uncertainty added
ref_new = ref*np.exp(-2*alpha*ran) # new reflectance modified by scattering
labl = 2 # label for a non-scattering point
# otherwise find the furthest point above Pmin
else:
inds = np.where(Pr>Pmin)[0]
if len(inds) == 0:
ran_new = 0
ref_new = 0
labl = 0 # label for lost point
else:
ind_r = np.where(ran_r == np.max(ran_r[inds]))[0]
ran_new = ran_r[ind_r] # new range is scatterer range
ref_new = ref_r*np.exp(-2*alpha*ran_new)*np.minimum((Dr[ind_r]/Db(ran_r[ind_r]))**2,1) # new reflectance biased by scattering
labl = 1 # label for randomly scattered point
else:
print("Invalid lidar return mode")
else:
if P0<Pmin:
ran_new = 0
ref_new = 0
labl = 0 # label for lost point
else:
sig = self.dR/np.sqrt(2*snr) # std of range uncertainty
ran_new = ran + np.random.normal(0,sig) # range with uncertainty added
ref_new = ref*np.exp(-2*alpha*ran) # new reflectance modified by scattering
labl = 2 # label for a non-scattering point
# Angles are same
if ran>0:
phi = np.arctan2(y,x) # angle in radians
the = np.arccos(z/ran) # angle in radians
else:
phi,the=0,0
# Update new x,y,z based on new range
x = ran_new*np.sin(the)*np.cos(phi)
y = ran_new*np.sin(the)*np.sin(phi)
z = ran_new*np.cos(the)
return x,y,z,ref_new,labl
def augment_avg(self,pc):
shp = pc.shape # data shape
pc_new = np.zeros(shp) # init new point cloud
leng = shp[0] # data length
# Rename variables for better readability
x = pc[:,0]
y = pc[:,1]
z = pc[:,2]
ref = pc[:,3]
# Get parameters from class init
rmax = self.rmax # max range (m)
Pmin = 0.9*rmax**(-2) # min measurable power (arb units)
rmin = self.rmin # min lidar range (bistatic)
# Calculate extinction coefficient from the particle distribution
Nd = self.N_model(self.D) # density of rain droplets (m^-3)
alpha, beta = self.alpha_beta(Nd) # extinction coeff. (1/m)
ran = np.sqrt(x**2 + y**2 + z**2) # range in m
indx = np.where(ran>rmin)[0] # keep points where ranges larger than rmin
P0 = np.zeros((leng,)) # init back reflected power
P0[indx] = ref[indx]*np.exp(-2*alpha*ran[indx])/(ran[indx]**2) # calculate reflected power
snr = P0/Pmin # signal noise ratio
indp = np.where(P0>Pmin)[0] # keep points where power is larger than Pmin
sig = np.zeros((leng,)) # init sigma - std of range uncertainty
sig[indp] = self.dR/np.sqrt(2*snr[indp]) # calc. std of range uncertainty
ran_new = np.zeros((leng,)) # init new range
ran_new[indp] = ran[indp] + np.random.normal(0,sig[indp]) # range with uncertainty added, keep range 0 if P<Pmin
ref_new = ref*np.exp(-2*alpha*ran) # new reflectance modified by scattering
# Init angles
phi = np.zeros((leng,))
the = np.zeros((leng,))
phi[indx] = np.arctan2(y[indx],x[indx]) # angle in radians
the[indx] = np.arccos(z[indx]/ran[indx]) # angle in radians
# Update new x,y,z based on new range
pc_new[:,0] = ran_new*np.sin(the)*np.cos(phi)
pc_new[:,1] = ran_new*np.sin(the)*np.sin(phi)
pc_new[:,2] = ran_new*np.cos(the)
pc_new[:,3] = ref_new
return pc_new
def msu_rain(self,pc,Rr):
'''
Lidar rain simulator from Goodin et al., 'Predicting the Influence of
Rain on LIDAR in ADAS', electronics 2019
Parameters
----------
pc : point cloud (N,4)
Rr : rain rate in mm/hr
Returns
-------
pc_new : output point cloud (N,4)
'''
shp = pc.shape # data shape
pc_new = np.zeros(shp) # init new point cloud
leng = shp[0] # data length
# Rename variables for better readability
x = pc[:,0]
y = pc[:,1]
z = pc[:,2]
ref = pc[:,3]
# Get parameters from class init
rmax = self.rmax # max range (m)
Pmin = 0.9*rmax**(-2)/np.pi # min measurable power (arb units)
# Calculate extinction coefficient from rain rate
alpha = 0.01* Rr**0.6
ran = np.sqrt(x**2 + y**2 + z**2) # range in m
indv = np.where(ran>0)[0] # clean data might already have invalid points
P0 = np.zeros((leng,))
P0[indv] = ref[indv]*np.exp(-2*alpha*ran[indv])/(ran[indv]**2) # calculate reflected power
# init new ref and ran
ran_new = np.zeros((leng,))
ref_new = np.zeros((leng,))
indp = np.where(P0>Pmin)[0] # points where power is greater than Pmin
ref_new[indp] = ref[indp]*np.exp(-2*alpha*ran[indp]) # reflectivity reduced by atten
sig = 0.02*ran[indp]* (1-np.exp(-Rr))**2
ran_new[indp] = ran[indp] + np.random.normal(0,sig) # new range with uncertainty
# Init angles
phi = np.zeros((leng,))
the = np.zeros((leng,))
phi[indp] = np.arctan2(y[indp],x[indp]) # angle in radians
the[indp] = np.arccos(z[indp]/ran[indp]) # angle in radians
# Update new x,y,z based on new range
pc_new[:,0] = ran_new*np.sin(the)*np.cos(phi)
pc_new[:,1] = ran_new*np.sin(the)*np.sin(phi)
pc_new[:,2] = ran_new*np.cos(the)
pc_new[:,3] = ref_new
return pc_new
def haze_point_cloud(self,pts_3D,Rr=0):
'''
Modified from
https://github.com/princeton-computational-imaging/SeeingThroughFog/blob/master/tools/DatasetFoggification/lidar_foggification.py
Parameters
----------
pts_3D : Point cloud
Rr : Rain rate (mm/hr)
Returns
-------
dist_pts_3d : Augmented point cloud
'''
n = []
#Velodyne HDL64S2
n = 0.05
g = 0.35
dmin = 2
d = np.sqrt(pts_3D[:,0] * pts_3D[:,0] + pts_3D[:,1] * pts_3D[:,1] + pts_3D[:,2] * pts_3D[:,2])
detectable_points = np.where(d>dmin)
d = d[detectable_points]
pts_3D = pts_3D[detectable_points]
#######################################################################
# This is the main modified part
# For comparison we would like to calculate the extinction coefficient
# from rain rate instead of sampling it from a distribution
if (self.atm_model == 'rain') or (self.atm_model == 'snow'):
Nd = self.N_model(self.D,Rr) # density of water droplets (m^-3)
elif (self.atm_model == 'chu_hogg_fog') or (self.atm_model=='strong_advection_fog') or (self.atm_model=='moderate_advection_fog'):
Nd = self.N_model(self.D) # density of water droplets (m^-3)
else:
print('Warning: weather model not implemented')
alpha, beta = self.alpha_beta(Nd) # extinction coeff. (1/m)
#######################################################################
beta_usefull = alpha*np.ones(d.shape) # beta is the extinction coefficient (actually alpha)
dmax = -np.divide(np.log(np.divide(n,(pts_3D[:,3] + g))),(2 * beta_usefull))
dnew = -np.log(1 - 0.5) / (beta_usefull)
probability_lost = 1 - np.exp(-beta_usefull*dmax)
lost = np.random.uniform(0, 1, size=probability_lost.shape) < probability_lost
cloud_scatter = np.logical_and(dnew < d, np.logical_not(lost))
random_scatter = np.logical_and(np.logical_not(cloud_scatter), np.logical_not(lost))
idx_stable = np.where(d<dmax)[0]
old_points = np.zeros((len(idx_stable), 5))
old_points[:,0:4] = pts_3D[idx_stable,:]
old_points[:,3] = old_points[:,3]*np.exp(-beta_usefull[idx_stable]*d[idx_stable])
old_points[:, 4] = np.zeros(np.shape(old_points[:,3]))
cloud_scatter_idx = np.where(np.logical_and(dmax<d, cloud_scatter))[0]
cloud_scatter = np.zeros((len(cloud_scatter_idx), 5))
cloud_scatter[:,0:4] = pts_3D[cloud_scatter_idx,:]
cloud_scatter[:,0:3] = np.transpose(np.multiply(np.transpose(cloud_scatter[:,0:3]), np.transpose(np.divide(dnew[cloud_scatter_idx],d[cloud_scatter_idx]))))
cloud_scatter[:,3] = cloud_scatter[:,3]*np.exp(-beta_usefull[cloud_scatter_idx]*dnew[cloud_scatter_idx])
cloud_scatter[:, 4] = np.ones(np.shape(cloud_scatter[:, 3]))
# Subsample random scatter abhaengig vom noise im Lidar
random_scatter_idx = np.where(random_scatter)[0]
scatter_max = np.min(np.vstack((dmax, d)).transpose(), axis=1)
drand = np.random.uniform(high=scatter_max[random_scatter_idx])
# scatter outside min detection range and do some subsampling. Not all points are randomly scattered.
# Fraction of 0.05 is found empirically.
drand_idx = np.where(drand>dmin)
drand = drand[drand_idx]
random_scatter_idx = random_scatter_idx[drand_idx]
# Subsample random scattered points to 0.05%
fraction_random = .05 # just set this according to the comment above^ rather than parsing arguments; also probably .05 not .05%
subsampled_idx = np.random.choice(len(random_scatter_idx), int(fraction_random*len(random_scatter_idx)), replace=False)
drand = drand[subsampled_idx]
random_scatter_idx = random_scatter_idx[subsampled_idx]
random_scatter = np.zeros((len(random_scatter_idx), 5))
random_scatter[:,0:4] = pts_3D[random_scatter_idx,:]
random_scatter[:,0:3] = np.transpose(np.multiply(np.transpose(random_scatter[:,0:3]), np.transpose(drand/d[random_scatter_idx])))
random_scatter[:,3] = random_scatter[:,3]*np.exp(-beta_usefull[random_scatter_idx]*drand)
random_scatter[:, 4] = 2*np.ones(np.shape(random_scatter[:, 3]))
dist_pts_3d = np.concatenate((old_points, cloud_scatter,random_scatter), axis=0)
return dist_pts_3d
def calc_Mie_params(self):
'''
Calculate scattering efficiencies
Returns
-------
D : Particle diameter (mm)
qext : Extinction efficiency
qback : Backscattering efficiency
'''
out = ps.MieQ_withDiameterRange(self.m, self.lam, diameterRange=(1,1e7),
nd=2000, logD=True)
D = out[0]*1e-6
qext = out[1]
qback = out[6]
# Save for later use since this function takes long to run
np.savez('mie_q.npz',D=D,qext=qext,qback=qback)
return D,qext,qback
def alpha_beta(self,Nd):
'''
Calculates extunction and backscattering coefficients
Parameters
----------
Nd : particle size distribution, m^-3 mm^-1
Returns
-------
alpha : extinction coefficient
beta : backscattering coefficient
'''
D = self.D
qe = self.qext
qb = self.qback
alpha = 1e-6*trapz(D**2*qe*Nd,D)*np.pi/4 # m^-1
beta = 1e-6*trapz(D**2*qb*Nd,D)*np.pi/4 # m^-1
return alpha, beta
# RAIN
def N_MP_rain(self,D,Rr):
'''
Marshall - Palmer rain model
Parameters
----------
D : rain droplet diameter (mm)
Rr : rain rate (mm h^-1)
Returns
-------
number of rain droplets for a given diameter (m^-3 mm^-1)
'''
return 8000*np.exp(-4.1*Rr**(-0.21)*D)
def N_MP_tot_rain(self,Rr,dstart):
'''
Integrated Marshall - Palmer Rain model
Parameters
----------
Rr : rain rate (mm h^-1)
dstart : integral starting point for diameter (mm)
Returns
-------
rain droplet density (m^-3) for a given min diameter
'''
lam = 4.1*Rr**(-0.21)
return 8000*np.exp(-lam*dstart)/lam
def MP_Sample_rain(self,Rr,N,dstart):
'''
Sample particle diameters from Marshall Palmer distribution
Parameters
----------
Rr : rain rate (mm/hr)
N : number of samples
dstart : Starting diameter (min diameter sampled)
Returns
-------
diameters : diameter of the samples
'''
lmda = 4.1*Rr**(-0.21)
r = np.random.rand(N)
diameters = -np.log(1-r)/lmda + dstart
return diameters
# SNOW
def N_MG_snow(self,D,Rr):
'''
Marshall - Palmer snow model
Parameters
----------
D : snow diameter (mm)
Rr : water equivalent rain rate (mm h^-1)
Returns
-------
number of snow particles for a given diameter (m^-3 mm^-1)
'''
N0 = 7.6e3* Rr**(-0.87)
lmda = 2.55* Rr**(-0.48)
return N0*np.exp(-lmda*D)
def N_MG_tot_snow(self,Rr,dstart):
'''
Integrated Marshall - Gunn snow model
Parameters
----------
Rr : rain rate (mm h^-1)
dstart : integral starting point for diameter (mm)
Returns
-------
snow particle density (m^-3) for a given min diameter
'''
N0 = 7.6e3* Rr**(-0.87)
lmda = 2.55* Rr**(-0.48)
return N0*np.exp(-lmda*dstart)/lmda
def MG_Sample_snow(self,Rr,N,dstart):
'''
Sample particle diameters from Marshall Palmer distribution
Parameters
----------
Rr : rain rate (mm/hr)
N : number of samples
dstart : Starting diameter (min diameter sampled)
Returns
-------
diameters : diameter of the samples
'''
lmda = 2.55* Rr**(-0.48)
r = np.random.rand(N)
diameters = -np.log(1-r)/lmda + dstart
return diameters
# FOG
def N_GD(self,D,rho,alpha,g,Rc):
'''
Gamma distribution model
Note the parameters are NOT normalized to unitless values
For example D^alpha term will have units Length^alpha
It is therefore important to use exactly the same units for D as those
cited in the paper by Rasshofer et al. and then perform unit conversion
after an N(D) curve is generated
D : rain diameter
Outputs number of rain droplets for a given diameter
'''
b = alpha/(g*Rc**g)
Nd = g*rho*b**((alpha+1)/g)*(D/2)**alpha*np.exp(-b*(D/2)**g)/gamma((alpha+1)/g)
return Nd
# Coastal fog distribution
# With given parameters, output has units cm^-3 um^-1 which is
# then converted to m^-3 mm^-1 which is what alpha_beta() expects
# so whole quantity is multiplied by (100 cm/m)^3 (1000 um/mm)
def Nd_haze_coast(self,D):
return 1e9*self.N_GD(D*1e3,rho=100,alpha=1,g=0.5,Rc=0.05e-3)
# Continental fog distribution
def Nd_haze_continental(self,D):
return 1e9*self.N_GD(D*1e3,rho=100,alpha=2,g=0.5,Rc=0.07)
# Strong advection fog
def Nd_strong_advection_fog(self,D):
return 1e9*self.N_GD(D*1e3,rho=20,alpha=3,g=1.,Rc=10)
# Moderate advection fog
def Nd_moderate_advection_fog(self,D):
return 1e9*self.N_GD(D*1e3,rho=20,alpha=3,g=1.,Rc=8)
# Strong spray
def Nd_strong_spray(self,D):
return 1e9*self.N_GD(D*1e3,rho=100,alpha=6,g=1.,Rc=4)
# Moderate spray
def Nd_moderate_spray(self,D):
return 1e9*self.N_GD(D*1e3,rho=100,alpha=6,g=1.,Rc=2)
# Chu/Hogg
def Nd_chu_hogg(self,D):
return 1e9*self.N_GD(D*1e3,rho=20,alpha=2,g=0.5,Rc=1)
| 39.657771 | 163 | 0.526126 |
7952a2617eaee508684366d74a2b6fc3bc14823b | 2,155 | py | Python | bmbc3/processData.py | zingson/hddpy | 2ed511dabe9eb63fd6798123475f8113ecf0ceb3 | [
"Apache-2.0"
] | null | null | null | bmbc3/processData.py | zingson/hddpy | 2ed511dabe9eb63fd6798123475f8113ecf0ceb3 | [
"Apache-2.0"
] | null | null | null | bmbc3/processData.py | zingson/hddpy | 2ed511dabe9eb63fd6798123475f8113ecf0ceb3 | [
"Apache-2.0"
] | null | null | null | import sys,gmssl
from gmssl import sm2
from gmssl.sm3 import sm3_hash
from base64 import b64encode, b64decode
from gmssl.func import bytes_to_list,list_to_bytes,random_hex
import os, sys
import rsa
#文件要求,UTF-8编码,第一行是标题,每行一条记录
#每行记录中,第一列是手机号,需要sm3加密。
#对文件先base64编码,然后使用sm3生成摘要,然后对摘要进行sm2加密。接收方需要对文件与摘要的正确性进行校验。
#原文件:
#客户号,手机号,变形客户号,加密后手机号,批次号,券类型编号,客户标识,券数量,有效期,券产品编号,
#输出文件:
#变形客户号,加密后手机号,批次号,券类型编号,券数量,有效期,券产品编号
#对原文件手机号进行sm3加密
f_name = input("your file name: ")
data_file = open(f_name,'r',encoding="UTF-8")
out_f_name = 'tmp_'+f_name
outf = open(out_f_name,"w",encoding="UTF-8")
sm3_f_name = 'YHK_MS_'+f_name
sm3f = open(sm3_f_name,"w",encoding="UTF-8")
cmbc_name = 'to_cmbc_'+f_name
cmbcf = open(cmbc_name,"w",encoding="UTF-8")
line = data_file.readline().replace('\n','')
varstr = line.split(',')
tmp_line = ','.join([varstr[2],varstr[3],varstr[4],varstr[5],varstr[7],varstr[8],varstr[9]])
outf.write(tmp_line+"\n")
cmbc_line = line
cmbcf.write(cmbc_line+"\n")
line = data_file.readline().replace('\n', '')
while line:
varstr = line.split(',')
phone=varstr[1]
sm3_phone = sm3_hash(bytes_to_list(phone.encode(encoding="utf-8")))
#print(sm3_phone)
varstr[3]=sm3_phone
tmp_line = ','.join([varstr[2],varstr[3],varstr[4],varstr[5],varstr[7],varstr[8],varstr[9]])
outf.writelines(tmp_line+"\n")
cmbc_line=','.join(varstr)
cmbcf.write(cmbc_line+"\n")
line = data_file.readline().replace('\n', '')
data_file.close()
outf.close()
cmbcf.close()
#读入对手机号加密后的中间文件
data_file=open(out_f_name,'r',encoding="UTF-8")
content = data_file.read()
content = content.strip() #去除头尾空行
sm3f.write(content)
sm3f.write("\n=====\n")
#print("【文件内容】\n"+content)
#对文件内容进行签名
#str_enc = encrypt(str_sm3)
#random_hex_str = random_hex(sm2_crypt.para_len)
#str_enc = sm2_crypt.sign(content.encode(encoding="utf-8"), random_hex_str) # 16进制
with open('cmbc_private.pem','r') as f:
privkey = rsa.PrivateKey.load_pkcs1(f.read().encode())
str_enc = rsa.sign(content.encode(encoding="utf-8"), privkey, 'SHA-1').hex()
print("【对内容签名】\n"+str_enc)
sm3f.write(str_enc+"\n")
sm3f.close()
data_file.close()
| 22.447917 | 96 | 0.699768 |
7952a35e8642bae9c7116167a411880bd488e1a3 | 572 | py | Python | blmath/util/decorators.py | metabolize/blmath | 8ea8d7be60349a60ffeb08a3e34fca20ef9eb0da | [
"BSD-2-Clause"
] | 6 | 2019-09-28T16:48:34.000Z | 2022-03-25T17:05:46.000Z | blmath/util/decorators.py | metabolize/blmath | 8ea8d7be60349a60ffeb08a3e34fca20ef9eb0da | [
"BSD-2-Clause"
] | 6 | 2019-09-09T16:42:02.000Z | 2021-06-25T15:25:50.000Z | blmath/util/decorators.py | metabolize/blmath | 8ea8d7be60349a60ffeb08a3e34fca20ef9eb0da | [
"BSD-2-Clause"
] | 4 | 2017-05-09T16:15:07.000Z | 2019-02-15T14:15:30.000Z | class setter_property(object):
'''
Note: setter_property incorrectly triggers method-hidden in pylint.
'''
def __init__(self, fn, doc=None):
self.fn = fn
self.__name__ = fn.__name__
self.__doc__ = doc if doc is not None else fn.__doc__
def __get__(self, obj, cls):
if self.__name__ in obj.__dict__:
return obj.__dict__[self.__name__]
else:
return None
def __set__(self, obj, value):
return self.fn(obj, value)
def __delete__(self, obj):
return self.fn(obj, None)
| 30.105263 | 71 | 0.611888 |
7952a37a9b0f7a0f77f38b999a7e0727e06db1b4 | 1,996 | py | Python | gomiget_aichi_ichinomiya.py | suconbu/gomiget | 3ea5ef9b37e27428ff844f715b2c2be92e32f7c6 | [
"CC0-1.0"
] | null | null | null | gomiget_aichi_ichinomiya.py | suconbu/gomiget | 3ea5ef9b37e27428ff844f715b2c2be92e32f7c6 | [
"CC0-1.0"
] | null | null | null | gomiget_aichi_ichinomiya.py | suconbu/gomiget | 3ea5ef9b37e27428ff844f715b2c2be92e32f7c6 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
import sys
from gomireader import GomiReader, PatternValuePair
def main(argv):
reader = GomiReader()
reader.municipality_id = "232033"
reader.municipality_name = "愛知県一宮市"
reader.datasource_url = "https://www.city.ichinomiya.aichi.jp/kurashi/gomi/1000388/1000167/index.html"
reader.target_url_base = "https://www.city.ichinomiya.aichi.jp/kurashi/gomi/1000388/1000167/"
reader.target_pages = [ "1001702.html", "1001703.html", "1001700.html", "1001701.html", "1001698.html", "1001699.html", "1001696.html", "1001697.html", "1001694.html", "1001695.html" ]
reader.datetime_selector = "p.update"
reader.datetime_pattern = "更新日\r\n%Y年%m月%d日"
reader.article_row_selector = "tbody > tr"
reader.article_column_selector = "td"
reader.category_to_category_id = {
PatternValuePair(r"/可燃ごみ.*/", "burnable"),
PatternValuePair(r"/不燃ごみ.*/", "unburnable"),
PatternValuePair(r"/粗大ごみ.*/", "oversized"),
PatternValuePair(r"/空き缶・金属類.*/", "metal"),
PatternValuePair(r"/プラスチック製容器包装.*/", "plasticpackaging"),
PatternValuePair("ペットボトル", "petbottle"),
PatternValuePair(r"/町内回収資源.*/", "localcollection"),
PatternValuePair("戸別収集", "housecollection"),
PatternValuePair(r"/市では収集.*できません.*/", "uncollectible")
}
reader.note_to_category_id = [
PatternValuePair(r"/家電リサイクル法対象品目.*/", "legalrecycling")
]
reader.category_definitions = {
"burnable": { "name": "可燃ごみ" },
"unburnable": { "name": "不燃ごみ" },
"oversized": { "name": "粗大ごみ" },
"metal": { "name": "金属ごみ" },
"plasticpackaging": { "name": "プラ容器包装" },
"petbottle": { "name": "ペットボトル" },
"localcollection": { "name": "町内回収資源" },
"housecollection": { "name": "戸別回収" },
"legalrecycling": { "name": "家電リサイクル法対象品" },
"uncollectible": { "name": "回収できません" }
}
print(reader.to_json())
if __name__ == "__main__":
main(sys.argv)
| 42.468085 | 188 | 0.627756 |
7952a39f1d59b63888c5639030f6dd3d1d075f38 | 26 | py | Python | junior_web/__init__.py | h4c-project-return/junior-web-azure | 121235ec81af8e60c482d714f8dd2449867ed388 | [
"FTL"
] | 1 | 2016-11-16T16:15:54.000Z | 2016-11-16T16:15:54.000Z | junior_web/__init__.py | h4c-project-return/junior-web-azure | 121235ec81af8e60c482d714f8dd2449867ed388 | [
"FTL"
] | 1 | 2021-02-08T20:17:40.000Z | 2021-02-08T20:17:40.000Z | junior_web/__init__.py | h4c-project-return/junior-web-azure | 121235ec81af8e60c482d714f8dd2449867ed388 | [
"FTL"
] | null | null | null | from jr_services import *
| 13 | 25 | 0.807692 |
7952a44a7bff306d8a160b60c73a249c622c65e7 | 789 | py | Python | setup.py | swaroopch/spherecluster | 3b33e6c6242cf2c83d7b502c662f57455e4f175f | [
"MIT"
] | null | null | null | setup.py | swaroopch/spherecluster | 3b33e6c6242cf2c83d7b502c662f57455e4f175f | [
"MIT"
] | null | null | null | setup.py | swaroopch/spherecluster | 3b33e6c6242cf2c83d7b502c662f57455e4f175f | [
"MIT"
] | null | null | null | from __future__ import print_function
import sys
from setuptools import setup, find_packages
with open('requirements.txt') as f:
INSTALL_REQUIRES = [l.strip() for l in f.readlines() if l]
# try:
# import numpy # NOQA
# except ImportError:
# print('numpy is required during installation')
# sys.exit(1)
# try:
# import scipy # NOQA
# except ImportError:
# print('scipy is required during installation')
# sys.exit(1)
setup(
name='hsspherecluster',
version='0.1.7',
description='Clustering on the unit hypersphere in scikit-learn.',
author='Jason Laska',
author_email='jason@claralabs.com',
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
url='https://github.com/clara-labs/spherecluster',
license='MIT',
)
| 24.65625 | 70 | 0.690748 |
7952a5ac7d0a0adb62c5421941263d08351edca5 | 11,400 | py | Python | tests/test_utils.py | NitefullWind/httprunner | 00421a5cbc965105ec49092da948b8fdaabf545f | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | NitefullWind/httprunner | 00421a5cbc965105ec49092da948b8fdaabf545f | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | NitefullWind/httprunner | 00421a5cbc965105ec49092da948b8fdaabf545f | [
"Apache-2.0"
] | null | null | null | import io
import os
import shutil
from httprunner import exceptions, loader, utils
from tests.base import ApiServerUnittest
class TestUtils(ApiServerUnittest):
def test_set_os_environ(self):
self.assertNotIn("abc", os.environ)
variables_mapping = {
"abc": "123"
}
utils.set_os_environ(variables_mapping)
self.assertIn("abc", os.environ)
self.assertEqual(os.environ["abc"], "123")
def test_query_json(self):
json_content = {
"ids": [1, 2, 3, 4],
"person": {
"name": {
"first_name": "Leo",
"last_name": "Lee",
},
"age": 29,
"cities": ["Guangzhou", "Shenzhen"]
}
}
query = "ids.2"
result = utils.query_json(json_content, query)
self.assertEqual(result, 3)
query = "ids.str_key"
with self.assertRaises(exceptions.ExtractFailure):
utils.query_json(json_content, query)
query = "ids.5"
with self.assertRaises(exceptions.ExtractFailure):
utils.query_json(json_content, query)
query = "person.age"
result = utils.query_json(json_content, query)
self.assertEqual(result, 29)
query = "person.not_exist_key"
with self.assertRaises(exceptions.ExtractFailure):
utils.query_json(json_content, query)
query = "person.cities.0"
result = utils.query_json(json_content, query)
self.assertEqual(result, "Guangzhou")
query = "person.name.first_name"
result = utils.query_json(json_content, query)
self.assertEqual(result, "Leo")
query = "person.name.first_name.0"
result = utils.query_json(json_content, query)
self.assertEqual(result, "L")
def current_validators(self):
from httprunner import built_in
functions_mapping = loader.load_module_functions(built_in)
functions_mapping["equals"](None, None)
functions_mapping["equals"](1, 1)
functions_mapping["equals"]("abc", "abc")
with self.assertRaises(AssertionError):
functions_mapping["equals"]("123", 123)
functions_mapping["less_than"](1, 2)
functions_mapping["less_than_or_equals"](2, 2)
functions_mapping["greater_than"](2, 1)
functions_mapping["greater_than_or_equals"](2, 2)
functions_mapping["not_equals"](123, "123")
functions_mapping["length_equals"]("123", 3)
functions_mapping["length_greater_than"]("123", 2)
functions_mapping["length_greater_than_or_equals"]("123", 3)
functions_mapping["contains"]("123abc456", "3ab")
functions_mapping["contains"](['1', '2'], "1")
functions_mapping["contains"]({'a':1, 'b':2}, "a")
functions_mapping["contained_by"]("3ab", "123abc456")
functions_mapping["regex_match"]("123abc456", "^123\w+456$")
with self.assertRaises(AssertionError):
functions_mapping["regex_match"]("123abc456", "^12b.*456$")
functions_mapping["startswith"]("abc123", "ab")
functions_mapping["startswith"]("123abc", 12)
functions_mapping["startswith"](12345, 123)
functions_mapping["endswith"]("abc123", 23)
functions_mapping["endswith"]("123abc", "abc")
functions_mapping["endswith"](12345, 45)
functions_mapping["type_match"](580509390, int)
functions_mapping["type_match"](580509390, "int")
functions_mapping["type_match"]([], list)
functions_mapping["type_match"]([], "list")
functions_mapping["type_match"]([1], "list")
functions_mapping["type_match"]({}, "dict")
functions_mapping["type_match"]({"a": 1}, "dict")
def test_deep_update_dict(self):
origin_dict = {'a': 1, 'b': {'c': 3, 'd': 4}, 'f': 6, 'h': 123}
override_dict = {'a': 2, 'b': {'c': 33, 'e': 5}, 'g': 7, 'h': None}
updated_dict = utils.deep_update_dict(origin_dict, override_dict)
self.assertEqual(
updated_dict,
{'a': 2, 'b': {'c': 33, 'd': 4, 'e': 5}, 'f': 6, 'g': 7, 'h': 123}
)
def test_handle_config_key_case(self):
origin_dict = {
"Name": "test",
"Request": {
"url": "http://127.0.0.1:5000",
"METHOD": "POST",
"Headers": {
"Accept": "application/json",
"User-Agent": "ios/9.3"
}
}
}
new_dict = utils.lower_test_dict_keys(origin_dict)
self.assertIn("name", new_dict)
self.assertIn("request", new_dict)
self.assertIn("method", new_dict["request"])
self.assertIn("headers", new_dict["request"])
self.assertIn("Accept", new_dict["request"]["headers"])
self.assertIn("User-Agent", new_dict["request"]["headers"])
origin_dict = {
"Name": "test",
"Request": "$default_request"
}
new_dict = utils.lower_test_dict_keys(origin_dict)
self.assertIn("$default_request", new_dict["request"])
def test_lower_dict_keys(self):
request_dict = {
"url": "http://127.0.0.1:5000",
"METHOD": "POST",
"Headers": {
"Accept": "application/json",
"User-Agent": "ios/9.3"
}
}
new_request_dict = utils.lower_dict_keys(request_dict)
self.assertIn("method", new_request_dict)
self.assertIn("headers", new_request_dict)
self.assertIn("Accept", new_request_dict["headers"])
self.assertIn("User-Agent", new_request_dict["headers"])
request_dict = "$default_request"
new_request_dict = utils.lower_dict_keys(request_dict)
self.assertEqual("$default_request", request_dict)
request_dict = None
new_request_dict = utils.lower_dict_keys(request_dict)
self.assertEqual(None, request_dict)
def test_ensure_mapping_format(self):
map_list = [
{"a": 1},
{"b": 2}
]
ordered_dict = utils.ensure_mapping_format(map_list)
self.assertIsInstance(ordered_dict, dict)
self.assertIn("a", ordered_dict)
def test_extend_variables(self):
raw_variables = [{"var1": "val1"}, {"var2": "val2"}]
override_variables = [{"var1": "val111"}, {"var3": "val3"}]
extended_variables_mapping = utils.extend_variables(raw_variables, override_variables)
self.assertEqual(extended_variables_mapping["var1"], "val111")
self.assertEqual(extended_variables_mapping["var2"], "val2")
self.assertEqual(extended_variables_mapping["var3"], "val3")
def test_extend_variables_fix(self):
raw_variables = [{"var1": "val1"}, {"var2": "val2"}]
override_variables = {}
extended_variables_mapping = utils.extend_variables(raw_variables, override_variables)
self.assertEqual(extended_variables_mapping["var1"], "val1")
def test_deepcopy_dict(self):
data = {
'a': 1,
'b': [2, 4],
'c': lambda x: x+1,
'd': open('LICENSE'),
'f': {
'f1': {'a1': 2},
'f2': io.open('LICENSE', 'rb'),
}
}
new_data = utils.deepcopy_dict(data)
data["a"] = 0
self.assertEqual(new_data["a"], 1)
data["f"]["f1"] = 123
self.assertEqual(new_data["f"]["f1"], {'a1': 2})
self.assertNotEqual(id(new_data["b"]), id(data["b"]))
self.assertEqual(id(new_data["c"]), id(data["c"]))
# self.assertEqual(id(new_data["d"]), id(data["d"]))
def test_create_scaffold(self):
project_name = "projectABC"
utils.create_scaffold(project_name)
self.assertTrue(os.path.isdir(os.path.join(project_name, "api")))
self.assertTrue(os.path.isdir(os.path.join(project_name, "testcases")))
self.assertTrue(os.path.isdir(os.path.join(project_name, "testsuites")))
self.assertTrue(os.path.isdir(os.path.join(project_name, "reports")))
self.assertTrue(os.path.isfile(os.path.join(project_name, "debugtalk.py")))
self.assertTrue(os.path.isfile(os.path.join(project_name, ".env")))
shutil.rmtree(project_name)
def test_cartesian_product_one(self):
parameters_content_list = [
[
{"a": 1},
{"a": 2}
]
]
product_list = utils.gen_cartesian_product(*parameters_content_list)
self.assertEqual(
product_list,
[
{"a": 1},
{"a": 2}
]
)
def test_cartesian_product_multiple(self):
parameters_content_list = [
[
{"a": 1},
{"a": 2}
],
[
{"x": 111, "y": 112},
{"x": 121, "y": 122}
]
]
product_list = utils.gen_cartesian_product(*parameters_content_list)
self.assertEqual(
product_list,
[
{'a': 1, 'x': 111, 'y': 112},
{'a': 1, 'x': 121, 'y': 122},
{'a': 2, 'x': 111, 'y': 112},
{'a': 2, 'x': 121, 'y': 122}
]
)
def test_cartesian_product_empty(self):
parameters_content_list = []
product_list = utils.gen_cartesian_product(*parameters_content_list)
self.assertEqual(product_list, [])
def test_print_info(self):
info_mapping = {
"a": 1,
"t": (1, 2),
"b": {
"b1": 123
},
"c": None,
"d": [4, 5]
}
utils.print_info(info_mapping)
def test_prepare_dump_json_file_path_for_folder(self):
# hrun tests/httpbin/a.b.c/ --save-tests
project_working_directory = os.path.join(os.getcwd(), "tests")
project_mapping = {
"PWD": project_working_directory,
"test_path": os.path.join(os.getcwd(), "tests", "httpbin", "a.b.c")
}
self.assertEqual(
utils.prepare_dump_json_file_abs_path(project_mapping, "loaded"),
os.path.join(project_working_directory, "logs", "httpbin/a.b.c/all.loaded.json")
)
def test_prepare_dump_json_file_path_for_file(self):
# hrun tests/httpbin/a.b.c/rpc.yml --save-tests
project_working_directory = os.path.join(os.getcwd(), "tests")
project_mapping = {
"PWD": project_working_directory,
"test_path": os.path.join(os.getcwd(), "tests", "httpbin", "a.b.c", "rpc.yml")
}
self.assertEqual(
utils.prepare_dump_json_file_abs_path(project_mapping, "loaded"),
os.path.join(project_working_directory, "logs", "httpbin/a.b.c/rpc.loaded.json")
)
def test_prepare_dump_json_file_path_for_passed_testcase(self):
project_working_directory = os.path.join(os.getcwd(), "tests")
project_mapping = {
"PWD": project_working_directory
}
self.assertEqual(
utils.prepare_dump_json_file_abs_path(project_mapping, "loaded"),
os.path.join(project_working_directory, "logs", "tests_mapping.loaded.json")
)
| 36.538462 | 94 | 0.569912 |
7952a715f9fe7a1c145ed7ab1ffe37367990a540 | 713 | py | Python | self_test.py | nmondal/exit_poll_classification | 1db66c4c922332a3f79154aa4d8454c50f1dfebd | [
"MIT"
] | 1 | 2020-12-12T11:08:33.000Z | 2020-12-12T11:08:33.000Z | self_test.py | nmondal/exit_poll_classification | 1db66c4c922332a3f79154aa4d8454c50f1dfebd | [
"MIT"
] | null | null | null | self_test.py | nmondal/exit_poll_classification | 1db66c4c922332a3f79154aa4d8454c50f1dfebd | [
"MIT"
] | null | null | null | from exit_poll import verify_classification, weighted_score
INDEX_DIR = "./index_1"
LABEL_CONFIG = {
"label_column": "senti",
"label_densities": {"positive": 0.16, "negative": 0.63, "neutral": 0.21}
}
QUESTION_BANK_CSV = "./data_dir/Tweets.csv"
QUESTION_CSV_CONFIG = {"id_column": "tweet_id", "label_column": "airline_sentiment", "text_column": "text",
"label_mapping": {"positive": "positive", "negative": "negative", "neutral": "neutral"}}
def do_self_testing():
verify_classification(QUESTION_BANK_CSV, QUESTION_CSV_CONFIG, INDEX_DIR,
LABEL_CONFIG, prediction_algorithm=weighted_score)
pass
if __name__ == '__main__':
do_self_testing()
| 33.952381 | 111 | 0.681627 |
7952a7ab40c58613076577fe9ff418a435a68de0 | 113 | py | Python | djangokeys/deprecated/__init__.py | alanverresen/django-keys | bd99a9f059af8b84b141ab8bf9a5bc5730a6ba38 | [
"MIT"
] | null | null | null | djangokeys/deprecated/__init__.py | alanverresen/django-keys | bd99a9f059af8b84b141ab8bf9a5bc5730a6ba38 | [
"MIT"
] | null | null | null | djangokeys/deprecated/__init__.py | alanverresen/django-keys | bd99a9f059af8b84b141ab8bf9a5bc5730a6ba38 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This module contains deprecated functionality of this package.
| 22.6 | 64 | 0.707965 |
7952a7e737a163807aa0156bbd17421806d19651 | 98 | py | Python | mmdet2trt/models/detectors/__init__.py | jackweiwang/mmdetection-to-tensorrt | f988ba8e923764fb1173385a1c7160b8f8b5bd99 | [
"Apache-2.0"
] | 1 | 2021-08-23T10:09:37.000Z | 2021-08-23T10:09:37.000Z | mmdet2trt/models/detectors/__init__.py | gcong18/mmdetection-to-tensorrt | c31c32ee4720ff56010bcda77bacf3a110d0526c | [
"Apache-2.0"
] | null | null | null | mmdet2trt/models/detectors/__init__.py | gcong18/mmdetection-to-tensorrt | c31c32ee4720ff56010bcda77bacf3a110d0526c | [
"Apache-2.0"
] | null | null | null | from .two_stage import TwoStageDetectorWraper
from .single_stage import SingleStageDetectorWraper
| 32.666667 | 51 | 0.897959 |
7952a8aac7811fa8fb62b68fe1486745e9d787eb | 4,932 | py | Python | docs/conf.py | aubricus/clack | 6f8b4efb329251f02005c9859091be7345beb717 | [
"MIT"
] | 1 | 2018-03-22T19:28:20.000Z | 2018-03-22T19:28:20.000Z | docs/conf.py | aubricus/clack | 6f8b4efb329251f02005c9859091be7345beb717 | [
"MIT"
] | 133 | 2018-03-30T19:31:00.000Z | 2020-01-11T04:33:46.000Z | docs/conf.py | aubricus/klak | 6f8b4efb329251f02005c9859091be7345beb717 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Klak documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
import klak
import toml
from pathlib import Path
sys.path.insert(0, os.path.abspath(".."))
pyproject = toml.load(Path.cwd().joinpath("../pyproject.toml"))
klak_version = pyproject["tool"]["poetry"]["version"]
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"klak"
copyright = u"2018, 2019, Aubrey Taylor"
author = u"Aubrey Taylor"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = klak_version
# The full version, including alpha/beta/rc tags.
release = klak_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "klakdoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, "klak.tex", u"Klak Documentation", u"Aubrey Taylor", "manual")
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "klak", u"Klak Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"klak",
u"klak Documentation",
author,
"klak",
"One line description of project.",
"Miscellaneous",
)
]
| 30.63354 | 79 | 0.681468 |
7952a8b7f5cd412bae048fbe58910f8a6075530c | 858 | py | Python | tests/tests_test_workflow/test_test_args_path_validator.py | naveenpajjuri/opensearch-build | 855f0296b36ba32b18cf4fc40b096659b5b3f1f0 | [
"Apache-2.0"
] | 62 | 2021-05-14T04:06:09.000Z | 2022-03-23T03:30:13.000Z | tests/tests_test_workflow/test_test_args_path_validator.py | naveenpajjuri/opensearch-build | 855f0296b36ba32b18cf4fc40b096659b5b3f1f0 | [
"Apache-2.0"
] | 1,590 | 2021-05-07T20:21:19.000Z | 2022-03-31T23:57:53.000Z | tests/tests_test_workflow/test_test_args_path_validator.py | mch2/opensearch-build | 39464ae1ded2b628d5b6cacb22064b715906520d | [
"Apache-2.0"
] | 134 | 2021-05-07T19:27:56.000Z | 2022-03-24T23:06:17.000Z | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import os
import unittest
from test_workflow.test_args_path_validator import TestArgsPathValidator
class TestTestArgsPathValidator(unittest.TestCase):
def test(self):
self.assertEqual(
TestArgsPathValidator.validate("https://ci.opensearch.org/ci/dbc/bundle-build-dashboards/1.2.0/428"),
"https://ci.opensearch.org/ci/dbc/bundle-build-dashboards/1.2.0/428"
)
self.assertEqual(
TestArgsPathValidator.validate("test"),
os.path.join(os.getcwd(), "test")
)
self.assertEqual(
TestArgsPathValidator.validate("."),
os.path.join(os.getcwd())
)
| 28.6 | 113 | 0.67366 |
7952aa0c06e2e46d4bc4e14064c747d6f3699444 | 4,113 | py | Python | layers.py | HighCWu/import_daz | d8fe108e4a0c5df3d8077b10f25b2deee0bd0f1f | [
"BSD-2-Clause"
] | null | null | null | layers.py | HighCWu/import_daz | d8fe108e4a0c5df3d8077b10f25b2deee0bd0f1f | [
"BSD-2-Clause"
] | null | null | null | layers.py | HighCWu/import_daz | d8fe108e4a0c5df3d8077b10f25b2deee0bd0f1f | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2016-2021, Thomas Larsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import bpy
from .mhx import *
from .error import *
MhxLayers = [
((L_MAIN, 'Root', 'MhxRoot'),
(L_SPINE , 'Spine', 'MhxFKSpine')),
((L_HEAD, 'Head', 'MhxHead'),
(L_FACE, 'Face', 'MhxFace')),
((L_TWEAK, 'Tweak', 'MhxTweak'),
(L_CUSTOM, 'Custom', 'MhxCustom')),
('Left', 'Right'),
((L_LARMIK, 'IK Arm', 'MhxIKArm'),
(L_RARMIK, 'IK Arm', 'MhxIKArm')),
((L_LARMFK, 'FK Arm', 'MhxFKArm'),
(L_RARMFK, 'FK Arm', 'MhxFKArm')),
((L_LLEGIK, 'IK Leg', 'MhxIKLeg'),
(L_RLEGIK, 'IK Leg', 'MhxIKLeg')),
((L_LLEGFK, 'FK Leg', 'MhxFKLeg'),
(L_RLEGFK, 'FK Leg', 'MhxFKLeg')),
((L_LEXTRA, 'Extra', 'MhxExtra'),
(L_REXTRA, 'Extra', 'MhxExtra')),
((L_LHAND, 'Hand', 'MhxHand'),
(L_RHAND, 'Hand', 'MhxHand')),
((L_LFINGER, 'Fingers', 'MhxFingers'),
(L_RFINGER, 'Fingers', 'MhxFingers')),
((L_LTOE, 'Toes', 'MhxToe'),
(L_RTOE, 'Toes', 'MhxToe')),
]
class DAZ_OT_MhxEnableAllLayers(DazOperator):
bl_idname = "daz.pose_enable_all_layers"
bl_label = "Enable all layers"
bl_options = {'UNDO'}
def run(self, context):
from .finger import getRigMeshes
rig,_meshes = getRigMeshes(context)
for (left,right) in MhxLayers:
if type(left) != str:
for (n, name, prop) in [left,right]:
rig.data.layers[n] = True
class DAZ_OT_MhxDisableAllLayers(DazOperator):
bl_idname = "daz.pose_disable_all_layers"
bl_label = "Disable all layers"
bl_options = {'UNDO'}
def run(self, context):
from .finger import getRigMeshes
rig,_meshes = getRigMeshes(context)
layers = 32*[False]
pb = context.active_pose_bone
if pb:
for n in range(32):
if pb.bone.layers[n]:
layers[n] = True
break
else:
layers[0] = True
if rig:
rig.data.layers = layers
#----------------------------------------------------------
# Initialize
#----------------------------------------------------------
classes = [
DAZ_OT_MhxEnableAllLayers,
DAZ_OT_MhxDisableAllLayers,
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
| 36.078947 | 82 | 0.605154 |
7952ab1f57259cd05a720231874730e4e5b15abf | 23,286 | py | Python | pinakes/main/catalog/views.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | null | null | null | pinakes/main/catalog/views.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | null | null | null | pinakes/main/catalog/views.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | null | null | null | """ Default views for Catalog."""
import logging
import django_rq
from django.utils.translation import gettext_lazy as _
from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework_extensions.mixins import NestedViewSetMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from drf_spectacular.utils import (
extend_schema,
extend_schema_view,
OpenApiParameter,
OpenApiResponse,
)
from pinakes.common.auth import keycloak_django
from pinakes.common.auth.keycloak_django.utils import (
parse_scope,
)
from pinakes.common.auth.keycloak_django.views import (
PermissionQuerySetMixin,
)
from pinakes.common.serializers import TaskSerializer
from pinakes.common.tag_mixin import TagMixin
from pinakes.common.image_mixin import ImageMixin
from pinakes.common.queryset_mixin import QuerySetMixin
from pinakes.main.models import Tenant
from pinakes.main.common.models import Group
from pinakes.main.catalog.exceptions import (
BadParamsException,
)
from pinakes.main.catalog.models import (
ApprovalRequest,
ServicePlan,
Order,
Portfolio,
PortfolioItem,
ProgressMessage,
)
from pinakes.main.catalog import permissions
from pinakes.main.catalog.serializers import (
ApprovalRequestSerializer,
ServicePlanSerializer,
CopyPortfolioSerializer,
CopyPortfolioItemSerializer,
ModifiedServicePlanInSerializer,
NextNameInSerializer,
NextNameOutSerializer,
OrderItemSerializer,
OrderItemDocSerializer,
OrderSerializer,
PortfolioItemSerializer,
PortfolioItemInSerializer,
PortfolioSerializer,
ProgressMessageSerializer,
TenantSerializer,
SharingRequestSerializer,
SharingPermissionSerializer,
)
from pinakes.main.catalog.services.collect_tag_resources import (
CollectTagResources,
)
from pinakes.main.catalog.services.copy_portfolio import (
CopyPortfolio,
)
from pinakes.main.catalog.services.copy_portfolio_item import (
CopyPortfolioItem,
)
from pinakes.main.catalog.services import (
name,
)
from pinakes.main.catalog.services.refresh_service_plan import (
RefreshServicePlan,
)
from pinakes.main.catalog.services.submit_approval_request import (
SubmitApprovalRequest,
)
from pinakes.main.catalog.services.validate_order_item import (
ValidateOrderItem,
)
from pinakes.main.catalog import tasks
# Create your views here.
logger = logging.getLogger("catalog")
@extend_schema_view(
retrieve=extend_schema(
description="Get a tenant by its id",
),
list=extend_schema(
description="List all tenants",
),
)
class TenantViewSet(viewsets.ReadOnlyModelViewSet):
"""API endpoint for listing tenants."""
queryset = Tenant.objects.all()
serializer_class = TenantSerializer
permission_classes = (IsAuthenticated,)
ordering = ("id",)
filterset_fields = "__all__"
@extend_schema_view(
retrieve=extend_schema(
description="Get a portfolio by its id",
),
list=extend_schema(
description="List all portfolios",
responses={
200: OpenApiResponse(
response=PortfolioSerializer,
description="Return a list of portfolios. An empty list indicates either undefined portfolios in the system or inaccessibility to the caller.",
)
},
),
create=extend_schema(
description="Create a new portfolio",
),
partial_update=extend_schema(
description="Edit an existing portfolio",
),
destroy=extend_schema(
description="Delete an existing portfolio",
),
)
class PortfolioViewSet(
ImageMixin,
TagMixin,
NestedViewSetMixin,
PermissionQuerySetMixin,
QuerySetMixin,
viewsets.ModelViewSet,
):
"""API endpoint for listing and creating portfolios."""
serializer_class = PortfolioSerializer
http_method_names = ["get", "post", "head", "patch", "delete"]
permission_classes = (IsAuthenticated, permissions.PortfolioPermission)
ordering = ("-id",)
filterset_fields = ("name", "description", "created_at", "updated_at")
search_fields = ("name", "description")
@extend_schema(
description="Make a copy of the portfolio",
request=CopyPortfolioSerializer,
responses={
200: OpenApiResponse(
PortfolioSerializer, description="The new portfolio"
)
},
)
@action(methods=["post"], detail=True)
def copy(self, request, pk):
"""Copy the specified pk portfolio."""
portfolio = self.get_object()
options = {
"portfolio": portfolio,
"portfolio_name": request.data.get(
"portfolio_name", portfolio.name
),
}
svc = CopyPortfolio(portfolio, options).process()
serializer = self.get_serializer(svc.new_portfolio)
return Response(serializer.data)
@extend_schema(
description=(
"Share a portfolio with specified groups and permissions."
),
request=SharingRequestSerializer,
responses={status.HTTP_202_ACCEPTED: TaskSerializer},
)
@action(methods=["post"], detail=True)
def share(self, request, pk=None):
portfolio = self.get_object()
data = self._parse_share_policy(request, portfolio)
group_ids = [group.id for group in data["groups"]]
job = django_rq.enqueue(
tasks.add_portfolio_permissions,
portfolio.id,
group_ids,
data["permissions"],
)
serializer = TaskSerializer(job)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
@extend_schema(
description=(
"Remove a portfolio sharing with specified groups and permissions."
),
request=SharingRequestSerializer,
responses={
status.HTTP_200_OK: None,
status.HTTP_202_ACCEPTED: TaskSerializer,
},
)
@action(methods=["post"], detail=True)
def unshare(self, request, pk=None):
portfolio = self.get_object()
data = self._parse_share_policy(request, portfolio)
if portfolio.keycloak_id is None:
return Response(status=status.HTTP_200_OK)
group_ids = [group.id for group in data["groups"]]
job = django_rq.enqueue(
tasks.remove_portfolio_permissions,
portfolio.id,
group_ids,
data["permissions"],
)
serializer = TaskSerializer(job)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
@extend_schema(
description="Retrieve a portfolio sharing info.",
responses=SharingPermissionSerializer(many=True),
)
@action(
methods=["get"],
detail=True,
pagination_class=None,
filterset_fields=None,
)
def share_info(self, request, pk=None):
portfolio = self.get_object()
if not portfolio.keycloak_id:
return Response([])
client = keycloak_django.get_uma_client()
permissions = client.find_permissions_by_resource(
portfolio.keycloak_id
)
permissions = list(
filter(keycloak_django.is_group_permission, permissions)
)
groups_lookup = [permission.groups[0] for permission in permissions]
groups = Group.objects.filter(path__in=groups_lookup)
groups_by_path = {group.path: group for group in groups}
permissions_by_group = {}
for permission in permissions:
if permission.groups[0] in permissions_by_group:
permissions_by_group[permission.groups[0]].extend(
permission.scopes
)
else:
permissions_by_group[permission.groups[0]] = permission.scopes
data = []
for path, scopes in permissions_by_group.items():
group = groups_by_path.get(path)
scopes = [parse_scope(portfolio, scope) for scope in scopes]
data.append(
{
"group_id": group.id if group else None,
"group_name": group.name if group else None,
"permissions": scopes,
}
)
return Response(data)
def _parse_share_policy(self, request, portfolio):
serializer = SharingRequestSerializer(
data=request.data,
context={
"valid_scopes": portfolio.keycloak_actions(),
},
)
serializer.is_valid(raise_exception=True)
return serializer.validated_data
def perform_destroy(self, instance):
if instance.keycloak_id:
client = keycloak_django.get_uma_client()
client.delete_resource(instance.keycloak_id)
super().perform_destroy(instance)
@extend_schema_view(
retrieve=extend_schema(
description="Get a portfolio item by its id",
),
list=extend_schema(
description="List all portfolio items",
),
partial_update=extend_schema(
description="Edit an existing portfolio item",
),
destroy=extend_schema(
description="Delete an existing portfolio item",
),
)
class PortfolioItemViewSet(
ImageMixin,
TagMixin,
NestedViewSetMixin,
PermissionQuerySetMixin,
QuerySetMixin,
viewsets.ModelViewSet,
):
"""API endpoint for listing and creating portfolio items."""
serializer_class = PortfolioItemSerializer
http_method_names = ["get", "post", "head", "patch", "delete"]
permission_classes = (IsAuthenticated, permissions.PortfolioItemPermission)
ordering = ("-id",)
filterset_fields = (
"name",
"description",
"service_offering_ref",
"portfolio",
"created_at",
"updated_at",
)
search_fields = ("name", "description")
parent_field_names = ("portfolio",)
@extend_schema(
description="Create a new portfolio item",
request=PortfolioItemInSerializer,
)
def create(self, request, *args, **kwargs):
serializer = PortfolioItemInSerializer(data=request.data)
if not serializer.is_valid():
return Response(
{"errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
portfolio_id = request.data.get("portfolio")
portfolio = get_object_or_404(Portfolio, pk=portfolio_id)
self.check_object_permissions(request, portfolio)
output_serializer = PortfolioItemSerializer(
serializer.save(user=self.request.user),
context=self.get_serializer_context(),
)
return Response(output_serializer.data, status=status.HTTP_201_CREATED)
@extend_schema(
description="Make a copy of the portfolio item",
request=CopyPortfolioItemSerializer,
responses={
200: OpenApiResponse(
PortfolioItemSerializer,
description="The copied portfolio item",
)
},
)
@action(methods=["post"], detail=True)
def copy(self, request, pk):
"""Copy the specified pk portfolio item."""
portfolio_item = self.get_object()
options = {
"portfolio_item_id": portfolio_item.id,
"portfolio_id": request.data.get(
"portfolio_id", portfolio_item.portfolio.id
),
"portfolio_item_name": request.data.get(
"portfolio_item_name", portfolio_item.name
),
}
svc = CopyPortfolioItem(portfolio_item, options).process()
serializer = self.get_serializer(svc.new_portfolio_item)
return Response(serializer.data)
@extend_schema(
description="Get next available portfolio item name",
parameters=[
OpenApiParameter(
"destination_portfolio_id",
required=False,
description="Retrieve next available portfolio item name from destination portfolio",
),
],
request=NextNameInSerializer,
responses={
200: OpenApiResponse(
NextNameOutSerializer(many=False),
description="The next available portfolio item name",
)
},
)
@action(methods=["get"], detail=True)
def next_name(self, request, pk):
"""Retrieve next available portfolio item name"""
portfolio_item = get_object_or_404(PortfolioItem, pk=pk)
destination_portfolio_id = request.GET.get(
"destination_portfolio_id", None
)
portfolio = (
portfolio_item.portfolio
if destination_portfolio_id is None
else Portfolio.objects.get(id=destination_portfolio_id)
)
portfolio_item_names = [
item.name
for item in PortfolioItem.objects.filter(portfolio=portfolio)
]
available_name = name.create_copy_name(
portfolio_item.name, portfolio_item_names
)
output_serializer = NextNameOutSerializer(
{"next_name": available_name}
)
return Response(output_serializer.data)
@extend_schema_view(
retrieve=extend_schema(
description="Get a specific order based on the order ID",
parameters=[
OpenApiParameter(
"extra",
required=False,
enum=["true", "false"],
description="Include extra data such as order items",
),
],
),
list=extend_schema(
description="Get a list of orders associated with the logged in user.",
parameters=[
OrderSerializer,
OpenApiParameter(
"extra",
required=False,
enum=["true", "false"],
description="Include extra data such as order items",
),
],
),
create=extend_schema(
description="Create a new order",
),
destroy=extend_schema(
description="Delete an existing order",
),
)
class OrderViewSet(
NestedViewSetMixin,
PermissionQuerySetMixin,
QuerySetMixin,
viewsets.ModelViewSet,
):
"""API endpoint for listing and creating orders."""
serializer_class = OrderSerializer
http_method_names = ["get", "post", "head", "delete"]
permission_classes = (IsAuthenticated, permissions.OrderPermission)
ordering = ("-id",)
filterset_fields = (
"state",
"order_request_sent_at",
"created_at",
"updated_at",
"completed_at",
)
search_fields = ("state",)
@extend_schema(
description="Submit the given order",
request=None,
responses={200: OrderSerializer},
)
@action(methods=["post"], detail=True)
def submit(self, request, pk):
"""Orders the specified pk order."""
order = self.get_object()
if not order.product:
raise BadParamsException(
_("Order {} does not have related order items").format(
order.id
)
)
ValidateOrderItem(order.product).process()
tag_resources = CollectTagResources(order).process().tag_resources
message = _("Computed tags for order {}: {}").format(
order.id, tag_resources
)
order.update_message(ProgressMessage.Level.INFO, message)
logger.info("Creating approval request for order id %d", order.id)
SubmitApprovalRequest(tag_resources, order).process()
serializer = self.get_serializer(order)
return Response(serializer.data)
# TODO:
@extend_schema(
description="Cancel the given order",
request=None,
responses={204: None},
)
@action(methods=["patch"], detail=True)
def cancel(self, request, pk):
"""Cancels the specified pk order."""
pass
@extend_schema_view(
retrieve=extend_schema(
tags=("orders", "order_items"),
description="Get a specific order item based on the order item ID",
parameters=[
OpenApiParameter(
"extra",
required=False,
enum=["true", "false"],
description="Include extra data such as portfolio item details",
),
],
),
list=extend_schema(
tags=("orders", "order_items"),
description="Get a list of order items associated with the logged in user.",
parameters=[
OrderItemDocSerializer,
OpenApiParameter(
"extra",
required=False,
enum=["true", "false"],
description="Include extra data such as portfolio item details",
),
],
responses={
200: OpenApiResponse(
OrderItemSerializer,
description="Return a list of order items. An empty list indicates either undefined orders in the system or inaccessibility to the caller.",
),
},
),
create=extend_schema(
tags=("orders", "order_items"),
description="Add an order item to an order in pending state",
),
destroy=extend_schema(
description="Delete an existing order item",
),
)
class OrderItemViewSet(
NestedViewSetMixin,
PermissionQuerySetMixin,
QuerySetMixin,
viewsets.ModelViewSet,
):
"""API endpoint for listing and creating order items."""
serializer_class = OrderItemSerializer
http_method_names = ["get", "post", "head", "delete"]
permission_classes = (IsAuthenticated, permissions.OrderItemPermission)
ordering = ("-id",)
filterset_fields = (
"name",
"count",
"state",
"portfolio_item",
"order",
"external_url",
"order_request_sent_at",
"created_at",
"updated_at",
"completed_at",
)
search_fields = ("name", "state")
parent_field_names = ("order",)
def perform_create(self, serializer):
serializer.save(
order_id=self.kwargs["order_id"],
)
@extend_schema_view(
list=extend_schema(
description="Get a list of approval requests associated with an order. As the order is being approved one can check the status of the approvals.",
),
)
class ApprovalRequestViewSet(
NestedViewSetMixin, QuerySetMixin, viewsets.ModelViewSet
):
"""API endpoint for listing approval requests."""
queryset = ApprovalRequest.objects.all()
serializer_class = ApprovalRequestSerializer
http_method_names = ["get"]
permission_classes = (IsAuthenticated,)
ordering = ("-id",)
filterset_fields = (
"order",
"approval_request_ref",
"state",
"reason",
"request_completed_at",
"created_at",
"updated_at",
)
search_fields = (
"state",
"reason",
)
parent_field_names = ("order",)
@extend_schema_view(
list=extend_schema(
description="Get a list of progress messages associated with an order. As the order is being processed the provider can update the progress messages.",
),
)
class ProgressMessageViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
"""API endpoint for listing progress messages."""
serializer_class = ProgressMessageSerializer
http_method_names = ["get"]
permission_classes = (IsAuthenticated,)
ordering = ("-id",)
filterset_fields = (
"received_at",
"level",
"created_at",
"updated_at",
)
def get_queryset(self):
"""return queryset based on messageable_type"""
path_splits = self.request.path.split("/")
parent_type = path_splits[path_splits.index("progress_messages") - 2]
messageable_id = self.kwargs.get("messageable_id")
messageable_type = "Order" if parent_type == "orders" else "OrderItem"
return ProgressMessage.objects.filter(
tenant=Tenant.current(),
messageable_type=messageable_type,
messageable_id=messageable_id,
)
@extend_schema_view(
retrieve=extend_schema(
description="Get the service plan by the specific ID",
parameters=[
OpenApiParameter(
"extra",
required=False,
enum=["true", "false"],
description="Include extra data such as base_schema",
),
],
request=None,
responses={200: ServicePlanSerializer},
),
list=extend_schema(
description="List all service plans of the portfolio item",
parameters=[
OpenApiParameter(
"extra",
required=False,
enum=["true", "false"],
description="Include extra data such as base_schema",
),
],
request=None,
responses={200: ServicePlanSerializer},
),
)
class ServicePlanViewSet(
NestedViewSetMixin, QuerySetMixin, viewsets.ModelViewSet
):
"""API endpoint for listing and creating service plans"""
pagination_class = None
queryset = ServicePlan.objects.all()
serializer_class = ServicePlanSerializer
http_method_names = ["get", "patch", "post", "head"]
permission_classes = (IsAuthenticated,)
filter_backends = [] # no filtering is needed
parent_field_names = ("portfolio_item",)
@extend_schema(
description="Modify the schema of the service plan",
request=ModifiedServicePlanInSerializer,
responses={200: ServicePlanSerializer},
)
def partial_update(self, request, pk):
service_plan = get_object_or_404(ServicePlan, pk=pk)
serializer = ModifiedServicePlanInSerializer(data=request.data)
if not serializer.is_valid():
return Response(
{"errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
service_plan.modified_schema = request.data["modified"]
service_plan.save()
output_serializer = ServicePlanSerializer(
service_plan, context=self.get_serializer_context()
)
return Response(output_serializer.data)
@extend_schema(
description="Reset the schema of the service plan. It deletes any user modifications and pulls in latest schema from inventory",
request=None,
responses={200: ServicePlanSerializer},
)
@action(methods=["post"], detail=True)
def reset(self, request, pk):
"""Reset the specified service plan."""
service_plan = get_object_or_404(ServicePlan, pk=pk)
service_plan.modified_schema = None
service_plan.base_schema = None
service_plan.base_sha256 = None
svc = RefreshServicePlan(service_plan).process()
serializer = ServicePlanSerializer(
svc.service_plan, many=False, context=self.get_serializer_context()
)
return Response(serializer.data)
| 31.172691 | 159 | 0.633299 |
7952ab7ffc8fad94f7ea101254063bfbcb96f541 | 2,659 | py | Python | _scripts_/DiabloUI/create_local_declarations.py | aleaksah/notes | 53f97c53d9d394ca2f315d261c2296147ae4433f | [
"Unlicense"
] | null | null | null | _scripts_/DiabloUI/create_local_declarations.py | aleaksah/notes | 53f97c53d9d394ca2f315d261c2296147ae4433f | [
"Unlicense"
] | null | null | null | _scripts_/DiabloUI/create_local_declarations.py | aleaksah/notes | 53f97c53d9d394ca2f315d261c2296147ae4433f | [
"Unlicense"
] | null | null | null | SetType(LocByName("UiDestroy"), "void __cdecl UiDestroy()")
SetType(LocByName("UiTitleDialog"), "void __stdcall UiTitleDialog(int a1)")
SetType(LocByName("UiInitialize"), "void __cdecl UiInitialize()")
SetType(LocByName("UiCopyProtError"), "void __stdcall UiCopyProtError(int a1)")
SetType(LocByName("UiAppActivate"), "void __stdcall UiAppActivate(int a1)")
SetType(LocByName("UiValidPlayerName"), "int __stdcall UiValidPlayerName(char *a1)")
SetType(LocByName("UiSelHeroMultDialog"), "int __stdcall UiSelHeroMultDialog(void *fninfo, void *fncreate, void *fnremove, void *fnstats, int *a5, int *a6, char *name)")
SetType(LocByName("UiSelHeroSingDialog"), "int __stdcall UiSelHeroSingDialog(void *fninfo, void *fncreate, void *fnremove, void *fnstats, int *a5, char *name, int *difficulty)")
SetType(LocByName("UiCreditsDialog"), "void __stdcall UiCreditsDialog(int a1)")
SetType(LocByName("UiMainMenuDialog"), "int __stdcall UiMainMenuDialog(char *name, int *a2, void *fnSound, int a4)")
SetType(LocByName("UiProgressDialog"), "int __stdcall UiProgressDialog(HWND window, char *msg, int a3, void *fnfunc, int a5)")
SetType(LocByName("UiProfileGetString"), "int __cdecl UiProfileGetString()")
SetType(LocByName("UiProfileCallback"), "void __cdecl UiProfileCallback()")
SetType(LocByName("UiProfileDraw"), "void __cdecl UiProfileDraw()")
SetType(LocByName("UiCategoryCallback"), "void __cdecl UiCategoryCallback()")
SetType(LocByName("UiGetDataCallback"), "void __cdecl UiGetDataCallback()")
SetType(LocByName("UiAuthCallback"), "void __cdecl UiAuthCallback()")
SetType(LocByName("UiSoundCallback"), "void __cdecl UiSoundCallback()")
SetType(LocByName("UiMessageBoxCallback"), "void __cdecl UiMessageBoxCallback()")
SetType(LocByName("UiDrawDescCallback"), "void __cdecl UiDrawDescCallback()")
SetType(LocByName("UiCreateGameCallback"), "void __cdecl UiCreateGameCallback()")
SetType(LocByName("UiArtCallback"), "BOOL __cdecl UiArtCallback(int connType, unsigned int artIndex, PALETTEENTRY *palette, void *destBuffer)")
SetType(LocByName("UiSelectGame"), "int __stdcall UiSelectGame(int a1, _SNETPROGRAMDATA *client_info, _SNETPLAYERDATA *user_info, _SNETUIDATA *ui_info, _SNETVERSIONDATA *file_info, int *a6)")
SetType(LocByName("UiSelectProvider"), "int __stdcall UiSelectProvider(int a1, _SNETPROGRAMDATA *client_info, _SNETPLAYERDATA *user_info, _SNETUIDATA *ui_info, _SNETVERSIONDATA *file_info, int *type)")
SetType(LocByName("UiCreatePlayerDescription"), "int __stdcall UiCreatePlayerDescription(_uiheroinfo *info, int mode, char *desc)")
SetType(LocByName("UiSetupPlayerInfo"), "int __stdcall UiSetupPlayerInfo(char *str, _uiheroinfo *info, int mode)")
| 98.481481 | 201 | 0.793531 |
7952ac37bd8bbd543ffde0465a189a66c811c9ff | 187 | py | Python | amdnet/tests/test_train_AMDNet.py | sparks-baird/AMDNet | 7934432302feb332e9c258cd6adcb464eb51b0fe | [
"BSD-3-Clause"
] | 1 | 2022-03-29T19:56:37.000Z | 2022-03-29T19:56:37.000Z | amdnet/tests/test_train_AMDNet.py | sparks-baird/AMDNet | 7934432302feb332e9c258cd6adcb464eb51b0fe | [
"BSD-3-Clause"
] | null | null | null | amdnet/tests/test_train_AMDNet.py | sparks-baird/AMDNet | 7934432302feb332e9c258cd6adcb464eb51b0fe | [
"BSD-3-Clause"
] | null | null | null | import os
def test_train_AMDNet():
os.system(
"train_AMDnet --material_file data/material_data.pkl --motif_file data/motif_graph.pkl --save_name save/new_model.hdf5"
)
| 20.777778 | 127 | 0.721925 |
7952ac6c9bf0ade19ca22aa852cf2e93c74311c5 | 2,706 | py | Python | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeNatGatewaysRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 1 | 2021-03-08T02:59:17.000Z | 2021-03-08T02:59:17.000Z | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeNatGatewaysRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeNatGatewaysRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeNatGatewaysRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeNatGateways','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_NatGatewayId(self):
return self.get_query_params().get('NatGatewayId')
def set_NatGatewayId(self,NatGatewayId):
self.add_query_param('NatGatewayId',NatGatewayId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId) | 33.825 | 78 | 0.767923 |
7952ac7c7b69b278528d21eb7299dd20ae3463af | 5,088 | py | Python | upb_lib/links.py | mag1024/upb-lib | 07fe4cc7b6ae884901e81b56463da53cf043d2a6 | [
"MIT"
] | null | null | null | upb_lib/links.py | mag1024/upb-lib | 07fe4cc7b6ae884901e81b56463da53cf043d2a6 | [
"MIT"
] | null | null | null | upb_lib/links.py | mag1024/upb-lib | 07fe4cc7b6ae884901e81b56463da53cf043d2a6 | [
"MIT"
] | null | null | null | """Definition of an link (scene)"""
import logging
from collections import namedtuple
from time import time
from .const import MINIMUM_BLINK_RATE, UpbCommand
from .elements import Addr, Element, Elements
from .util import check_dim_params, rate_to_seconds
LOG = logging.getLogger(__name__)
UPB_COMMAND_TO_ACTION_MAPPING = {
"GOTO": "goto",
"ACTIVATE": "activated",
"DEACTIVATE": "deactivated",
"BLINK": "blink",
"FADE_START": "fade_started",
"FADE_STOP": "fade_stopped",
}
DeviceLink = namedtuple("DeviceLink", "device_id, device_level")
class LinkAddr(Addr):
"""Representation of a link address."""
def __init__(self, network_id, upb_id):
super().__init__(network_id, upb_id, True)
self._index = f"{self.network_id}_{self.upb_id}"
class Link(Element):
"""Class representing a UPB Link."""
def __init__(self, addr, pim):
super().__init__(addr, pim)
self.devices = []
self.last_change = None
def add_device(self, device_link):
"""Add the device to the link."""
self.devices.append(device_link)
def activate(self):
"""(Helper) Activate link"""
self._pim.send(self._pim.encoder.activate_link(self._addr), False)
self.update_device_levels(UpbCommand.ACTIVATE)
def deactivate(self):
"""(Helper) Deactivate link"""
self._pim.send(self._pim.encoder.deactivate_link(self._addr), False)
self.update_device_levels(UpbCommand.DEACTIVATE)
def goto(self, brightness, rate=-1):
"""(Helper) Goto level"""
saved_rate = rate
brightness, rate = check_dim_params(
brightness, rate, self._pim.flags.get("use_raw_rate")
)
self._pim.send(self._pim.encoder.goto(self._addr, brightness, rate), False)
self.update_device_levels(UpbCommand.GOTO, brightness, saved_rate)
def fade_start(self, brightness, rate=-1):
"""(Helper) Start fading a link."""
saved_rate = rate
brightness, rate = check_dim_params(
brightness, rate, self._pim.flags.get("use_raw_rate")
)
self._pim.send(self._pim.encoder.fade_start(self._addr, brightness, rate), False)
self.update_device_levels(UpbCommand.FADE_START, brightness, saved_rate)
def fade_stop(self):
"""(Helper) Stop fading a link."""
self._pim.send(self._pim.encoder.fade_stop(self._addr), False)
for device_link in self.devices:
device = self._pim.devices.elements.get(device_link.device_id)
if device:
device.update_status()
def blink(self, rate=-1):
"""(Helper) Blink a link."""
if rate < MINIMUM_BLINK_RATE and not self._pim.flags.get(
"unlimited_blink_rate"
):
rate = MINIMUM_BLINK_RATE # Force 1/3 of second blink rate
self._pim.send(self._pim.encoder.blink(self._addr, rate), False)
self.update_device_levels(UpbCommand.BLINK, 100)
def update_device_levels(self, upb_cmd, level=-1, rate=-1):
"""Update the dim level on all devices in this link."""
LOG.debug("%s %s %s", upb_cmd.name.capitalize(), self.name, self.index)
for device_link in self.devices:
device = self._pim.devices.elements.get(device_link.device_id)
if not device:
continue
if upb_cmd in [UpbCommand.GOTO, UpbCommand.FADE_START]:
set_level = level
elif upb_cmd == UpbCommand.ACTIVATE:
set_level = device_link.device_level
else:
set_level = 0
device.setattr("status", set_level)
LOG.debug(" Updating '%s' to level %d", device.name, set_level)
changes = {"timestamp": time()}
changes["command"] = UPB_COMMAND_TO_ACTION_MAPPING[upb_cmd.name]
changes["level"] = level
changes["rate"] = rate
self.setattr("last_change", changes)
class Links(Elements):
"""Handling for multiple links."""
def __init__(self, pim):
super().__init__(pim)
pim.add_handler(UpbCommand.ACTIVATE.value, self._activate_handler)
pim.add_handler(UpbCommand.DEACTIVATE.value, self._deactivate_handler)
pim.add_handler(UpbCommand.GOTO.value, self._goto_handler)
def sync(self):
pass
def _levels(self, msg, upb_cmd, level=-1, rate=-1):
if not msg.link:
return
index = LinkAddr(msg.network_id, msg.dest_id).index
link = self.elements.get(index)
if not link:
return
if rate >= 0:
rate = rate_to_seconds(rate)
link.update_device_levels(upb_cmd, level, rate)
def _activate_handler(self, msg):
self._levels(msg, UpbCommand.ACTIVATE)
def _deactivate_handler(self, msg):
self._levels(msg, UpbCommand.DEACTIVATE)
def _goto_handler(self, msg):
level = msg.data[0] if len(msg.data) > 0 else -1
rate = msg.data[1] if len(msg.data) > 1 else -1
self._levels(msg, UpbCommand.GOTO, level, rate)
| 34.147651 | 89 | 0.636596 |
7952ad98c14aaeab885f206bfac42b8c9340fe62 | 8,072 | py | Python | main.py | pgrv/CarND-Semantic-Segmentation | 42d0b8dcfd8d1b49dee8c1279758c1f33e8f428e | [
"MIT"
] | null | null | null | main.py | pgrv/CarND-Semantic-Segmentation | 42d0b8dcfd8d1b49dee8c1279758c1f33e8f428e | [
"MIT"
] | null | null | null | main.py | pgrv/CarND-Semantic-Segmentation | 42d0b8dcfd8d1b49dee8c1279758c1f33e8f428e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
w1 = graph.get_tensor_by_name(vgg_input_tensor_name)
keep = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3 = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4 = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7 = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return w1, keep, layer3, layer4, layer7
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
conv_7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
transpose_1 = tf.layers.conv2d_transpose(conv_7, num_classes, 4, 2, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
conv_4 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
skip_1 = tf.add(transpose_1, conv_4)
transpose_2 = tf.layers.conv2d_transpose(skip_1, num_classes, 4, 2, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
conv_3 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
skip_2 = tf.add(transpose_2, conv_3)
nn_last_layer = tf.layers.conv2d_transpose(skip_2, num_classes, 16, 8, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
return nn_last_layer
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
labels = tf.reshape(correct_label, (-1, num_classes))
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logits, labels= correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
train_op = optimizer.minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
sess.run(tf.global_variables_initializer())
print("Training")
print()
for epoch in range(epochs):
print("EPOCH "+str(epoch+1))
numberOfImages = 0
averageLoss = 0
for image, label in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image:image, correct_label:label, keep_prob:0.4, learning_rate: 0.001})
numberOfImages = numberOfImages + 1
averageLoss = averageLoss + loss
print("Average loss: " + str(averageLoss/numberOfImages))
print()
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576)
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
# TODO: Build NN using load_vgg, layers, and optimize function
epochs = 20
batch_size = 2
# TF placeholders
correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(sess, vgg_path)
layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)
logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
| 43.165775 | 214 | 0.732161 |
7952adbdf6b6737b6dd629ec67db5a58469276f0 | 408 | py | Python | setup.py | ChiangYintso/py-comm | 5b80c56606e9db8ede0b41e9747418c4631a5326 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ChiangYintso/py-comm | 5b80c56606e9db8ede0b41e9747418c4631a5326 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ChiangYintso/py-comm | 5b80c56606e9db8ede0b41e9747418c4631a5326 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from py_comm import __version__
name = "py_comm"
setup(
name=name,
version=__version__,
keywords=["pip", "commons", "common", "utils", "util"],
description="Common utils for python",
packages=find_packages(include=[name, f"{name}.*"], exclude=["test", "test.*"]),
include_package_data=True,
platforms="any"
)
| 24 | 84 | 0.661765 |
7952ae5540eb9e3df8e101209db40716e1254b18 | 724 | py | Python | tests/conftest.py | adobe-type-tools/opentype-svg | 57353dfd094b6ed2225f18482331ff895e003d48 | [
"MIT"
] | 166 | 2016-09-14T07:42:58.000Z | 2022-03-27T14:37:27.000Z | tests/conftest.py | adobe-type-tools/opentype-svg | 57353dfd094b6ed2225f18482331ff895e003d48 | [
"MIT"
] | 13 | 2017-08-03T18:02:32.000Z | 2021-06-01T07:08:41.000Z | tests/conftest.py | adobe-type-tools/opentype-svg | 57353dfd094b6ed2225f18482331ff895e003d48 | [
"MIT"
] | 17 | 2018-01-20T03:21:40.000Z | 2022-02-26T12:33:17.000Z | # Copyright 2021 Adobe. All rights reserved.
import os
import pytest
@pytest.fixture
def fonts_dir():
root_dir = os.path.dirname(os.path.dirname(__file__))
yield os.path.join(root_dir, 'fonts')
@pytest.fixture
def base_font_path(fonts_dir):
yield os.path.join(fonts_dir, 'Zebrawood.otf')
@pytest.fixture
def shadow_font_path(fonts_dir):
yield os.path.join(fonts_dir, 'Zebrawood-Shadow.otf')
@pytest.fixture
def fill_font_path(fonts_dir):
yield os.path.join(fonts_dir, 'Zebrawood-Fill.otf')
@pytest.fixture
def dots_font_path(fonts_dir):
yield os.path.join(fonts_dir, 'Zebrawood-Dots.otf')
@pytest.fixture
def fixtures_dir():
yield os.path.join(os.path.dirname(__file__), 'fixtures')
| 20.111111 | 61 | 0.743094 |
7952af473f65c54e15f7d68b9f4289af2f555e86 | 6,912 | py | Python | misc/config_tools/static_allocators/bdf.py | tw4452852/acrn-hypervisor | 3af6c47292463564a8dec3dcca439c189fd82f8a | [
"BSD-3-Clause"
] | null | null | null | misc/config_tools/static_allocators/bdf.py | tw4452852/acrn-hypervisor | 3af6c47292463564a8dec3dcca439c189fd82f8a | [
"BSD-3-Clause"
] | null | null | null | misc/config_tools/static_allocators/bdf.py | tw4452852/acrn-hypervisor | 3af6c47292463564a8dec3dcca439c189fd82f8a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (C) 2021 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import sys, os, re
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
import common, lib.error, lib.lib
# Constants for device name prefix
IVSHMEM = "IVSHMEM"
VUART = "VUART"
# Exception bdf list
# Some hardware drivers' bdf is hardcoded, the bdf cannot be changed even it is passtrhough devices.
HARDCODED_BDF_LIST = ["00:0e.0"]
def find_unused_bdf(used_bdf):
# never assign 0:00.0 to any emulated devices, it's reserved for pci hostbridge
for dev in range(0x1, 0x20):
bdf = lib.lib.BusDevFunc(bus=0x00, dev=dev, func=0x0)
if all((bdf.dev != in_use_bdf.dev for in_use_bdf in used_bdf)):
return bdf
raise lib.error.ResourceError(f"Cannot find free bdf, used bdf: {sorted(used_bdf)}")
def insert_vuart_to_dev_dict(scenario_etree, devdict, used):
console_vuart = scenario_etree.xpath(f"./console_vuart[base != 'INVALID_PCI_BASE']/@id")
communication_vuarts = scenario_etree.xpath(f".//communication_vuart[base != 'INVALID_PCI_BASE']/@id")
for vuart_id in console_vuart:
free_bdf = find_unused_bdf(used)
devdict[f"{VUART}_{vuart_id}"] = free_bdf
used.append(free_bdf)
for vuart_id in communication_vuarts:
free_bdf = find_unused_bdf(used)
devdict[f"{VUART}_{vuart_id}"] = free_bdf
used.append(free_bdf)
def insert_ivsheme_to_dev_dict(scenario_etree, devdict, vm_id, used):
shmem_regions = lib.lib.get_ivshmem_regions_by_tree(scenario_etree)
if vm_id not in shmem_regions:
return
shmems = shmem_regions.get(vm_id)
for shm in shmems.values():
bdf = lib.lib.BusDevFunc.from_str(shm.get('vbdf'))
devdict[f"{IVSHMEM}_{shm.get('id')}"] = bdf
used.append(bdf)
def insert_pt_devs_to_dev_dict(vm_node_etree, devdict, used):
"""
Assign an unused bdf to each of passtrhough devices.
If a passtrhough device's bdf is in the list of HARDCODED_BDF_LIST, this device should apply the same bdf as native one.
Calls find_unused_bdf to assign an unused bdf for the rest of passtrhough devices except the ones in HARDCODED_BDF_LIST.
"""
pt_devs = vm_node_etree.xpath(f".//pci_dev/text()")
# assign the bdf of the devices in HARDCODED_BDF_LIST
for pt_dev in pt_devs:
bdf_string = pt_dev.split()[0]
if bdf_string in HARDCODED_BDF_LIST:
bdf = lib.lib.BusDevFunc.from_str(bdf_string)
dev_name = str(bdf)
devdict[dev_name] = bdf
used.append(bdf)
# remove the pt_dev nodes which are in HARDCODED_BDF_LIST
pt_devs = [pt_dev for pt_dev in pt_devs if lib.lib.BusDevFunc.from_str(bdf_string) not in used]
# call find_unused_bdf to assign an unused bdf for other passthrough devices except the ones in HARDCODED_BDF_LIST
for pt_dev in pt_devs:
bdf = lib.lib.BusDevFunc.from_str(pt_dev.split()[0])
free_bdf = find_unused_bdf(used)
dev_name = str(bdf)
devdict[dev_name] = free_bdf
used.append(free_bdf)
def get_devs_bdf_native(board_etree):
"""
Get all pci devices' bdf in native environment.
return: list of pci devices' bdf
"""
nodes = board_etree.xpath(f"//bus[@type = 'pci' and @address = '0x0']/device[@address]")
dev_list = []
for node in nodes:
address = node.get('address')
bus = int(common.get_node("../@address", node), 16)
dev = int(address, 16) >> 16
func = int(address, 16) & 0xffff
dev_list.append(lib.lib.BusDevFunc(bus = bus, dev = dev, func = func))
return dev_list
def get_devs_bdf_passthrough(scenario_etree):
"""
Get all pre-launched vms' passthrough devices' bdf in native environment.
return: list of passtrhough devices' bdf.
"""
dev_list = []
pt_devs = scenario_etree.xpath(f"//vm[load_order = 'PRE_LAUNCHED_VM']/pci_devs/pci_dev/text()")
for pt_dev in pt_devs:
bdf = lib.lib.BusDevFunc.from_str(pt_dev.split()[0])
dev_list.append(bdf)
return dev_list
def create_device_node(allocation_etree, vm_id, devdict):
for dev in devdict:
dev_name = dev
bdf = devdict.get(dev)
vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
if vm_node is None:
vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
dev_node = common.get_node(f"./device[@name = '{dev_name}']", vm_node)
if dev_node is None:
dev_node = common.append_node("./device", None, vm_node, name = dev_name)
if common.get_node(f"./bus", dev_node) is None:
common.append_node(f"./bus", f"{bdf.bus:#04x}", dev_node)
if common.get_node(f"./dev", dev_node) is None:
common.append_node(f"./dev", f"{bdf.dev:#04x}", dev_node)
if common.get_node(f"./func", dev_node) is None:
common.append_node(f"./func", f"{bdf.func:#04x}", dev_node)
def create_igd_sbdf(board_etree, allocation_etree):
"""
Extract the integrated GPU bdf from board.xml. If the device is not present, set bdf to "0xFFFF" which indicates the device
doesn't exist.
"""
bus = "0x0"
device_node = common.get_node(f"//bus[@type='pci' and @address='{bus}']/device[vendor='0x8086' and class='0x030000']", board_etree)
if device_node is None:
common.append_node("/acrn-config/hv/MISC_CFG/IGD_SBDF", '0xFFFF', allocation_etree)
else:
address = device_node.get('address')
dev = int(address, 16) >> 16
func = int(address, 16) & 0xffff
common.append_node("/acrn-config/hv/MISC_CFG/IGD_SBDF", f"{(int(bus, 16) << 8) | (dev << 3) | func:#06x}", allocation_etree)
def fn(board_etree, scenario_etree, allocation_etree):
create_igd_sbdf(board_etree, allocation_etree)
vm_nodes = scenario_etree.xpath("//vm")
for vm_node in vm_nodes:
vm_id = vm_node.get('id')
devdict = {}
used = []
load_order = common.get_node("./load_order/text()", vm_node)
if load_order is not None and lib.lib.is_post_launched_vm(load_order):
continue
if load_order is not None and lib.lib.is_service_vm(load_order):
native_used = get_devs_bdf_native(board_etree)
passthrough_used = get_devs_bdf_passthrough(scenario_etree)
used = [bdf for bdf in native_used if bdf not in passthrough_used]
if common.get_node("//@board", scenario_etree) == "tgl-rvp":
used.append(lib.lib.BusDevFunc(bus = 0, dev = 1, func = 0))
insert_vuart_to_dev_dict(vm_node, devdict, used)
insert_ivsheme_to_dev_dict(scenario_etree, devdict, vm_id, used)
insert_pt_devs_to_dev_dict(vm_node, devdict, used)
create_device_node(allocation_etree, vm_id, devdict)
| 43.746835 | 135 | 0.670284 |
7952b1412faa1a7b070f44384df7f65d68a2b231 | 158 | py | Python | src/pkg_trainmote/actions/actionInterface.py | FelixNievelstein/Trainmote-Server | 120b8b5a2db4c08789e57788233c38d659628330 | [
"MIT"
] | null | null | null | src/pkg_trainmote/actions/actionInterface.py | FelixNievelstein/Trainmote-Server | 120b8b5a2db4c08789e57788233c38d659628330 | [
"MIT"
] | null | null | null | src/pkg_trainmote/actions/actionInterface.py | FelixNievelstein/Trainmote-Server | 120b8b5a2db4c08789e57788233c38d659628330 | [
"MIT"
] | null | null | null |
class ActionInterface:
def prepareAction(self):
pass
def runAction(self, _callback):
pass
def cancelAction(self):
pass
| 14.363636 | 35 | 0.607595 |
7952b3dd631601e646420c976df1f58ee4a282fa | 3,331 | py | Python | myawwards/migrations/0001_initial.py | FestusMutie/festo-awwards | d8900915ee9d1eedda892712f99d867b904eb63f | [
"MIT"
] | null | null | null | myawwards/migrations/0001_initial.py | FestusMutie/festo-awwards | d8900915ee9d1eedda892712f99d867b904eb63f | [
"MIT"
] | null | null | null | myawwards/migrations/0001_initial.py | FestusMutie/festo-awwards | d8900915ee9d1eedda892712f99d867b904eb63f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-07-22 18:24
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import pyuploadcare.dj.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=155)),
('url', models.URLField(max_length=255)),
('description', models.TextField(max_length=255)),
('technologies', models.CharField(blank=True, max_length=200)),
('photo', pyuploadcare.dj.models.ImageField()),
('date', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(default='default.png', upload_to='images/')),
('bio', models.TextField(blank=True, default='My Bio', max_length=500)),
('name', models.CharField(blank=True, max_length=120)),
('location', models.CharField(blank=True, max_length=60)),
('contact', models.EmailField(blank=True, max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design', models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], default=0)),
('usability', models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')])),
('content', models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')])),
('score', models.FloatField(blank=True, default=0)),
('design_average', models.FloatField(blank=True, default=0)),
('usability_average', models.FloatField(blank=True, default=0)),
('content_average', models.FloatField(blank=True, default=0)),
('post', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='myawwards.Post')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rater', to=settings.AUTH_USER_MODEL)),
],
),
]
| 54.606557 | 183 | 0.576103 |
7952b63f3c1450cc932964505bfd7841c187d54d | 11,719 | py | Python | src/ssh/azext_ssh/tests/latest/test_custom.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/ssh/azext_ssh/tests/latest/test_custom.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/ssh/azext_ssh/tests/latest/test_custom.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import io
from azure.cli.core import azclierror
from unittest import mock
import unittest
from azext_ssh import custom
class SshCustomCommandTest(unittest.TestCase):
@mock.patch('azext_ssh.custom._do_ssh_op')
@mock.patch('azext_ssh.custom._assert_args')
def test_ssh_vm(self, mock_assert, mock_do_op):
cmd = mock.Mock()
custom.ssh_vm(cmd, "rg", "vm", "ip", "public", "private", False, "username", "cert", "port", None)
mock_assert.assert_called_once_with("rg", "vm", "ip", "cert", "username")
mock_do_op.assert_called_once_with(
cmd, "rg", "vm", "ip", "public", "private", False, "username", "cert", None, mock.ANY)
@mock.patch('azext_ssh.custom._do_ssh_op')
@mock.patch('azext_ssh.ssh_utils.write_ssh_config')
@mock.patch('azext_ssh.custom._assert_args')
@mock.patch('os.path.isdir')
@mock.patch('os.path.dirname')
@mock.patch('os.path.join')
def test_ssh_config(self, mock_join, mock_dirname, mock_isdir, mock_assert, mock_ssh_utils, mock_do_op):
cmd = mock.Mock()
mock_dirname.return_value = "configdir"
mock_isdir.return_value = True
mock_join.side_effect = ['az_ssh_config/rg-vm', 'path/to/az_ssh_config/rg-vm']
def do_op_side_effect(cmd, resource_group, vm_name, ssh_ip, public_key_file, private_key_file, use_private_ip, local_user, cert_file, credentials_folder, op_call):
op_call(ssh_ip, "username", "cert", private_key_file, False, False)
mock_do_op.side_effect = do_op_side_effect
custom.ssh_config(cmd, "path/to/file", "rg", "vm", "ip", "public", "private", False, False, "username", "cert", "port", None)
mock_ssh_utils.assert_called_once_with("path/to/file", "rg", "vm", False, "port", "ip", "username", "cert", "private", False, False)
mock_assert.assert_called_once_with("rg", "vm", "ip", "cert", "username")
mock_do_op.assert_called_once_with(
cmd, "rg", "vm", "ip", "public", "private", False, "username", "cert", 'path/to/az_ssh_config/rg-vm', mock.ANY)
@mock.patch('azext_ssh.ssh_utils.get_ssh_cert_principals')
@mock.patch('os.path.join')
@mock.patch('azext_ssh.custom._check_or_create_public_private_files')
@mock.patch('azext_ssh.ip_utils.get_ssh_ip')
@mock.patch('azext_ssh.custom._get_modulus_exponent')
@mock.patch('azure.cli.core._profile.Profile')
@mock.patch('azext_ssh.custom._write_cert_file')
def test_do_ssh_op_aad_user(self, mock_write_cert, mock_ssh_creds, mock_get_mod_exp, mock_ip,
mock_check_files, mock_join, mock_principal):
cmd = mock.Mock()
cmd.cli_ctx = mock.Mock()
cmd.cli_ctx.cloud = mock.Mock()
cmd.cli_ctx.cloud.name = "azurecloud"
mock_op = mock.Mock()
mock_check_files.return_value = "public", "private", False
mock_principal.return_value = ["username"]
mock_get_mod_exp.return_value = "modulus", "exponent"
profile = mock_ssh_creds.return_value
profile._adal_cache = True
profile.get_msal_token.return_value = "username", "certificate"
mock_join.return_value = "public-aadcert.pub"
custom._do_ssh_op(cmd, None, None, "1.2.3.4", "publicfile", "privatefile", False, None, None, "cred/folder", mock_op)
mock_check_files.assert_called_once_with("publicfile", "privatefile", "cred/folder")
mock_ip.assert_not_called()
mock_get_mod_exp.assert_called_once_with("public")
mock_write_cert.assert_called_once_with("certificate", "public-aadcert.pub")
mock_op.assert_called_once_with(
"1.2.3.4", "username", "public-aadcert.pub", "private", False, True)
@mock.patch('azext_ssh.custom._check_or_create_public_private_files')
@mock.patch('azext_ssh.ip_utils.get_ssh_ip')
def test_do_ssh_op_local_user(self, mock_ip, mock_check_files):
cmd = mock.Mock()
mock_op = mock.Mock()
mock_ip.return_value = "1.2.3.4"
custom._do_ssh_op(cmd, "vm", "rg", None, "publicfile", "privatefile", False, "username", "cert", "cred/folder", mock_op)
mock_check_files.assert_not_called()
mock_ip.assert_called_once_with(cmd, "vm", "rg", False)
mock_op.assert_called_once_with(
"1.2.3.4", "username", "cert", "privatefile", False, False)
@mock.patch('azext_ssh.custom._check_or_create_public_private_files')
@mock.patch('azext_ssh.ip_utils.get_ssh_ip')
@mock.patch('azext_ssh.custom._get_modulus_exponent')
def test_do_ssh_op_no_public_ip(self, mock_get_mod_exp, mock_ip, mock_check_files):
cmd = mock.Mock()
mock_op = mock.Mock()
mock_get_mod_exp.return_value = "modulus", "exponent"
mock_ip.return_value = None
self.assertRaises(
azclierror.ResourceNotFoundError, custom._do_ssh_op, cmd, "rg", "vm", None,
"publicfile", "privatefile", False, None, None, "cred/folder", mock_op)
mock_check_files.assert_not_called()
mock_ip.assert_called_once_with(cmd, "rg", "vm", False)
def test_assert_args_no_ip_or_vm(self):
self.assertRaises(azclierror.RequiredArgumentMissingError, custom._assert_args, None, None, None, None, None)
def test_assert_args_vm_rg_mismatch(self):
self.assertRaises(azclierror.MutuallyExclusiveArgumentError, custom._assert_args, "rg", None, None, None, None)
self.assertRaises(azclierror.MutuallyExclusiveArgumentError, custom._assert_args, None, "vm", None, None, None)
def test_assert_args_ip_with_vm_or_rg(self):
self.assertRaises(azclierror.MutuallyExclusiveArgumentError, custom._assert_args, None, "vm", "ip", None, None)
self.assertRaises(azclierror.MutuallyExclusiveArgumentError, custom._assert_args, "rg", "vm", "ip", None, None)
def test_assert_args_cert_with_no_user(self):
self.assertRaises(azclierror.MutuallyExclusiveArgumentError, custom._assert_args, None, None, "ip", "certificate", None)
@mock.patch('os.path.isfile')
def test_assert_args_invalid_cert_filepath(self, mock_is_file):
mock_is_file.return_value = False
self.assertRaises(azclierror.FileOperationError, custom._assert_args, 'rg', 'vm', None, 'cert_path', 'username')
@mock.patch('azext_ssh.ssh_utils.create_ssh_keyfile')
@mock.patch('tempfile.mkdtemp')
@mock.patch('os.path.isfile')
@mock.patch('os.path.join')
def test_check_or_create_public_private_files_defaults(self, mock_join, mock_isfile, mock_temp, mock_create):
mock_isfile.return_value = True
mock_temp.return_value = "/tmp/aadtemp"
mock_join.side_effect = ['/tmp/aadtemp/id_rsa.pub', '/tmp/aadtemp/id_rsa']
public, private, delete_key = custom._check_or_create_public_private_files(None, None, None)
self.assertEqual('/tmp/aadtemp/id_rsa.pub', public)
self.assertEqual('/tmp/aadtemp/id_rsa', private)
self.assertEqual(True, delete_key)
mock_join.assert_has_calls([
mock.call("/tmp/aadtemp", "id_rsa.pub"),
mock.call("/tmp/aadtemp", "id_rsa")
])
mock_isfile.assert_has_calls([
mock.call('/tmp/aadtemp/id_rsa.pub'),
mock.call('/tmp/aadtemp/id_rsa')
])
mock_create.assert_has_calls([
mock.call('/tmp/aadtemp/id_rsa')
])
@mock.patch('azext_ssh.ssh_utils.create_ssh_keyfile')
@mock.patch('os.path.isdir')
@mock.patch('os.path.isfile')
@mock.patch('os.path.join')
def test_check_or_create_public_private_files_defaults_with_cred_folder(self,mock_join, mock_isfile, mock_isdir, mock_create):
mock_isfile.return_value = True
mock_isdir.return_value = True
mock_join.side_effect = ['/cred/folder/id_rsa.pub', '/cred/folder/id_rsa']
public, private, delete_key = custom._check_or_create_public_private_files(None, None, '/cred/folder')
self.assertEqual('/cred/folder/id_rsa.pub', public)
self.assertEqual('/cred/folder/id_rsa', private)
self.assertEqual(True, delete_key)
mock_join.assert_has_calls([
mock.call("/cred/folder", "id_rsa.pub"),
mock.call("/cred/folder", "id_rsa")
])
mock_isfile.assert_has_calls([
mock.call('/cred/folder/id_rsa.pub'),
mock.call('/cred/folder/id_rsa')
])
mock_create.assert_has_calls([
mock.call('/cred/folder/id_rsa')
])
@mock.patch('os.path.isfile')
@mock.patch('os.path.join')
def test_check_or_create_public_private_files_no_public(self, mock_join, mock_isfile):
mock_isfile.side_effect = [False]
self.assertRaises(
azclierror.FileOperationError, custom._check_or_create_public_private_files, "public", None, None)
mock_isfile.assert_called_once_with("public")
@mock.patch('os.path.isfile')
@mock.patch('os.path.join')
def test_check_or_create_public_private_files_no_private(self, mock_join, mock_isfile):
mock_isfile.side_effect = [True, False]
self.assertRaises(
azclierror.FileOperationError, custom._check_or_create_public_private_files, "public", "private", None)
mock_join.assert_not_called()
mock_isfile.assert_has_calls([
mock.call("public"),
mock.call("private")
])
@mock.patch('builtins.open')
def test_write_cert_file(self, mock_open):
mock_file = mock.Mock()
mock_open.return_value.__enter__.return_value = mock_file
custom._write_cert_file("cert", "publickey-aadcert.pub")
mock_open.assert_called_once_with("publickey-aadcert.pub", 'w')
mock_file.write.assert_called_once_with("ssh-rsa-cert-v01@openssh.com cert")
@mock.patch('azext_ssh.rsa_parser.RSAParser')
@mock.patch('os.path.isfile')
@mock.patch('builtins.open')
def test_get_modulus_exponent_success(self, mock_open, mock_isfile, mock_parser):
mock_isfile.return_value = True
mock_open.return_value = io.StringIO('publickey')
modulus, exponent = custom._get_modulus_exponent('file')
self.assertEqual(mock_parser.return_value.modulus, modulus)
self.assertEqual(mock_parser.return_value.exponent, exponent)
mock_isfile.assert_called_once_with('file')
mock_open.assert_called_once_with('file', 'r')
mock_parser.return_value.parse.assert_called_once_with('publickey')
@mock.patch('os.path.isfile')
def test_get_modulus_exponent_file_not_found(self, mock_isfile):
mock_isfile.return_value = False
self.assertRaises(azclierror.FileOperationError, custom._get_modulus_exponent, 'file')
mock_isfile.assert_called_once_with('file')
@mock.patch('azext_ssh.rsa_parser.RSAParser')
@mock.patch('os.path.isfile')
@mock.patch('builtins.open')
def test_get_modulus_exponent_parse_error(self, mock_open, mock_isfile, mock_parser):
mock_isfile.return_value = True
mock_open.return_value = io.StringIO('publickey')
mock_parser_obj = mock.Mock()
mock_parser.return_value = mock_parser_obj
mock_parser_obj.parse.side_effect = ValueError
self.assertRaises(azclierror.FileOperationError, custom._get_modulus_exponent, 'file')
if __name__ == '__main__':
unittest.main()
| 46.876 | 171 | 0.68086 |
7952b6eea83f89cf311b5c290e6d33cb85a42b38 | 11,982 | py | Python | fkie_iop_node_manager/src/fkie_iop_node_manager/transport/udp_mc.py | fkie/iop_node_manager | c2e12989a6baf7098c93c33ca6e95acf7584e462 | [
"Apache-2.0"
] | 1 | 2021-05-05T14:57:06.000Z | 2021-05-05T14:57:06.000Z | fkie_iop_node_manager/src/fkie_iop_node_manager/transport/udp_mc.py | fkie/iop_node_manager | c2e12989a6baf7098c93c33ca6e95acf7584e462 | [
"Apache-2.0"
] | null | null | null | fkie_iop_node_manager/src/fkie_iop_node_manager/transport/udp_mc.py | fkie/iop_node_manager | c2e12989a6baf7098c93c33ca6e95acf7584e462 | [
"Apache-2.0"
] | null | null | null | # ****************************************************************************
#
# fkie_iop_node_manager
# Copyright 2019 Fraunhofer FKIE
# Author: Alexander Tiderko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ****************************************************************************
from __future__ import division, absolute_import, print_function, unicode_literals
import errno
import socket
import struct
import threading
import traceback
import fkie_iop_node_manager.queue as queue
from fkie_iop_node_manager.addrbook import AddressBook
from fkie_iop_node_manager.message_parser import MessageParser
from .net import getaddrinfo, localifs
from fkie_iop_node_manager.logger import NMLogger
SEND_ERRORS = {}
class UDPmcSocket(socket.socket):
'''
The UdpSocket class enables the send and receive UDP messages to a
multicast group and unicast address. The unicast socket is only created if
'send_mcast' and 'listen_mcast' parameter are set to False or a specific interface is defined.
'''
def __init__(self, port, mgroup, router=None, ttl=16, interface='', logger_name='udp_mc', send_buffer=0, recv_buffer=0, queue_length=0, loglevel='info'):
'''
Creates a socket, bind it to a given port and join to a given multicast
group. IPv4 and IPv6 are supported.
:param int port: the port to bind the socket
:param str mgroup: the multicast group to join
:param router: class which provides `route_udp_msg(fkie_iop_node_manager.message.Message)` method. If `None` receive will be disabled.
:param type: fkie_iop_node_manager.queue
:param int ttl: time to leave (Default: 20)
:param str interface: IP of interface to bind (Default: '').
'''
self.logger = NMLogger('%s[%s:%d]' % (logger_name, mgroup.replace('.', '_'), port), loglevel)
self.port = port
self.mgroup = mgroup
self._lock = threading.RLock()
self._closed = False
self._recv_buffer = recv_buffer
self._locals = [ip for _ifname, ip in localifs()]
self._locals.append('localhost')
self._sender_endpoints = {}
self.sock_5_error_printed = []
self.SOKET_ERRORS_NEEDS_RECONNECT = False
self.interface = interface
# get the AF_INET information for group to ensure that the address family
# of group is the same as for interface
addrinfo = getaddrinfo(self.mgroup)
self.interface_ip = ''
if self.interface:
addrinfo = getaddrinfo(self.interface, addrinfo[0])
if addrinfo is not None:
self.interface_ip = addrinfo[4][0]
self.logger.debug("destination: %s" % self.mgroup)
self.logger.debug("interface : %s (detected ip: %s)" % (self.interface, self.interface_ip))
self.logger.debug("inet: %s" % str(addrinfo))
socket.socket.__init__(self, addrinfo[0], socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.logger.info("Create multicast socket @('%s', %d)" % (self.mgroup, port))
# initialize multicast socket
# Allow multiple copies of this program on one machine
if hasattr(socket, "SO_REUSEPORT"):
try:
self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except Exception:
self.logger.warning("SO_REUSEPORT failed: Protocol not available, some functions are not available.")
# Set Time-to-live (optional) and loop count
ttl_bin = struct.pack('@i', ttl)
if addrinfo[0] == socket.AF_INET: # IPv4
self.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl_bin)
self.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
else: # IPv6
self.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
self.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1)
try:
if addrinfo[0] == socket.AF_INET: # IPv4
# Create group_bin for de-register later
# Set socket options for multicast specific interface or general
if not self.interface_ip:
self.group_bin = socket.inet_pton(socket.AF_INET, self.mgroup) + struct.pack('=I', socket.INADDR_ANY)
self.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP,
self.group_bin)
else:
self.group_bin = socket.inet_aton(self.mgroup) + socket.inet_aton(self.interface_ip)
self.setsockopt(socket.IPPROTO_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self.interface_ip))
self.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
self.group_bin)
else: # IPv6
# Create group_bin for de-register later
# Set socket options for multicast
self.group_bin = socket.inet_pton(addrinfo[0], self.mgroup) + struct.pack('@I', socket.INADDR_ANY)
self.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_JOIN_GROUP,
self.group_bin)
except socket.error as errobj:
msg = str(errobj)
if errobj.errno in [errno.ENODEV]:
msg = "socket.error[%d]: %s,\nis multicast route set? e.g. sudo route add -net 224.0.0.0 netmask 224.0.0.0 eth0" % (errobj.errno, msg)
raise Exception(msg)
# set buffer size if configured
if send_buffer:
old_bufsize = self.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
if old_bufsize != send_buffer:
self.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, send_buffer)
bufsize = self.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
self.logger.debug("Changed buffer size from %d to %d" % (old_bufsize, bufsize))
# Bind to the port
try:
# bind to default interfaces if not unicast socket was created
self.bind((self.interface_ip, port))
except socket.error as errobj:
msg = str(errobj)
self.logger.critical("Unable to bind multicast to interface: %s, check that it exists: %s" % (self.mgroup, msg))
raise
self._router = router
self._queue_send = queue.PQueue(queue_length, 'queue_udp_send', loglevel=loglevel)
self._parser_mcast = MessageParser(None, loglevel=loglevel)
self.addrinfo = addrinfo
# create a thread to handle the received multicast messages
if self._router is not None:
self._thread_recv = threading.Thread(target=self._loop_recv)
self._thread_recv.start()
self._thread_send = threading.Thread(target=self._loop_send)
self._thread_send.start()
def close(self):
'''
Unregister from the multicast group and close the socket.
'''
self._closed = True
self.logger.info("Close multicast socket [%s:%d]" % (self.mgroup, self.port))
try:
# shutdown to cancel recvfrom()
socket.socket.shutdown(self, socket.SHUT_RD)
except socket.error:
pass
# Use the stored group_bin to de-register
if self.addrinfo[0] == socket.AF_INET: # IPv4
self.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, self.group_bin)
else: # IPv6
self.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_LEAVE_GROUP,
self.group_bin)
socket.socket.close(self)
self._queue_send.clear()
def send_queued(self, msg):
try:
self._queue_send.put(msg)
except queue.Full as full:
print(traceback.format_exc())
self.logger.warning("Can't send message: %s" % full)
except Exception as e:
self.logger.warning("Error while put message into queue: %s" % e)
def _loop_send(self):
while not self._closed:
# Wait for next available Message. This method cancel waiting on clear() of PQueue and return None.
msg = self._queue_send.get()
if msg is not None:
dst = msg.tinfo_dst
if dst is None: # or msg.dst_id.has_wildcards():
dst = AddressBook.Endpoint(AddressBook.Endpoint.UDP, self.mgroup, self.getsockname()[1])
if dst is not None:
self._sendto(msg, dst)
else:
self.logger.warning("Can't send message to %s, destination not found!" % (dst))
# TODO: add retry mechanism
def _sendto(self, msg, endpoint):
# send to given addresses
try:
# self.logger.debug("Send to %s:%d" % (endpoint.address, endpoint.port))
val = self.sendto(msg.bytes(), (endpoint.address, endpoint.port))
if val != msg.raw_size:
raise Exception("not complete send %d of %d" % (val, msg.raw_size))
if endpoint.address in SEND_ERRORS:
del SEND_ERRORS[endpoint.address]
except socket.error as errobj:
erro_msg = "Error while send to '%s': %s" % (endpoint.address, errobj)
SEND_ERRORS[endpoint.address] = erro_msg
# -2: Name or service not known
if errobj.errno in [-5, -2]:
if endpoint.address not in self.sock_5_error_printed:
self.logger.warning(erro_msg)
self.sock_5_error_printed.append(endpoint.address)
else:
self.logger.warning(erro_msg)
if errobj.errno in [errno.ENETDOWN, errno.ENETUNREACH, errno.ENETRESET, errno]:
self.SOKET_ERRORS_NEEDS_RECONNECT = True
except Exception as e:
erro_msg = "Send to host '%s' failed: %s" % (endpoint.address, e)
self.logger.warning(erro_msg)
SEND_ERRORS[endpoint.address] = erro_msg
def _loop_recv(self):
'''
This method handles the received multicast messages.
'''
while not self._closed:
try:
(data, address) = self.recvfrom(self._recv_buffer)
if data and not self._closed and address[0] not in self._locals: # skip messages received from self
msgs = self._parser_mcast.unpack(data)
for msg in msgs:
try:
msg.tinfo_src = self._sender_endpoints[address]
except KeyError:
endpoint = AddressBook.Endpoint(AddressBook.Endpoint.UDP, address[0], address[1])
msg.tinfo_src = endpoint
self._sender_endpoints[address] = endpoint
# self.logger.debug("Received from %s" % (msg.tinfo_src))
self._router.route_udp_msg(msg)
except socket.timeout:
pass
except queue.Full as full_error:
self.logger.warning("Error while process received multicast message: %s" % full_error)
except socket.error:
if not self._closed:
self.logger.warning("socket error: %s" % traceback.format_exc())
| 47.928 | 157 | 0.600985 |
7952b74338373fd591a57f007650f79550df6f64 | 1,407 | py | Python | backend/api/v1/form_types.py | B3zaleel/Cartedepoezii | 217050d5ea1203a11a5ba9a74b3d497b5120cb9a | [
"MIT"
] | 4 | 2022-03-19T09:25:14.000Z | 2022-03-31T21:51:30.000Z | backend/api/v1/form_types.py | B3zaleel/Cartedepoezii | 217050d5ea1203a11a5ba9a74b3d497b5120cb9a | [
"MIT"
] | 2 | 2022-03-24T01:02:13.000Z | 2022-03-26T09:50:09.000Z | backend/api/v1/form_types.py | B3zaleel/Cartedepoezii | 217050d5ea1203a11a5ba9a74b3d497b5120cb9a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''A module containing JSON body data representations.
'''
from pydantic import BaseModel
from typing import Optional, List
class SignInForm(BaseModel):
email: str
password: str
class SignUpForm(BaseModel):
name: str
email: str
password: str
class PasswordResetRequestForm(BaseModel):
email: str
class PasswordResetForm(BaseModel):
email: str
password: str
resetToken: str
class UserDeleteForm(BaseModel):
authToken: str
userId: str
class UserUpdateForm(BaseModel):
authToken: str
userId: str
name: str
profilePhoto: Optional[str]
profilePhotoId: str
removeProfilePhoto: bool
email: str
bio: str
class ConnectionForm(BaseModel):
authToken: str
userId: str
followId: str
class PoemAddForm(BaseModel):
authToken: str
userId: str
title: str
verses: List[str]
class PoemUpdateForm(BaseModel):
authToken: str
userId: str
poemId: str
title: str
verses: List[str]
class PoemDeleteForm(BaseModel):
authToken: str
userId: str
poemId: str
class PoemLikeForm(BaseModel):
authToken: str
userId: str
poemId: str
class CommentAddForm(BaseModel):
authToken: str
userId: str
poemId: str
text: str
replyTo: Optional[str]
class CommentDeleteForm(BaseModel):
authToken: str
userId: str
commentId: str
| 15.633333 | 54 | 0.683724 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.