code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateInstruction
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datalabeling
# [START datalabeling_v1beta1_generated_DataLabelingService_CreateInstruction_sync]
from google.cloud import datalabeling_v1beta1
def sample_create_instruction():
# Create a client
client = datalabeling_v1beta1.DataLabelingServiceClient()
# Initialize request argument(s)
request = datalabeling_v1beta1.CreateInstructionRequest(
parent="parent_value",
)
# Make the request
operation = client.create_instruction(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END datalabeling_v1beta1_generated_DataLabelingService_CreateInstruction_sync]
|
googleapis/python-datalabeling
|
samples/generated_samples/datalabeling_v1beta1_generated_data_labeling_service_create_instruction_sync.py
|
Python
|
apache-2.0
| 1,628
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.volume.encryptors import test_cryptsetup
from nova.volume.encryptors import luks
"""
The utility of these test cases is limited given the simplicity of the
LuksEncryptor class. The attach_volume method has the only significant logic
to handle cases where the volume has not previously been formatted, but
exercising this logic requires "real" devices and actually executing the
various cryptsetup commands rather than simply logging them.
"""
class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
def _create(self, connection_info):
return luks.LuksEncryptor(connection_info)
def test__format_volume(self):
self.encryptor._format_volume("passphrase")
expected_commands = [('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path)]
self.assertEqual(expected_commands, self.executes)
def test__open_volume(self):
self.encryptor._open_volume("passphrase")
expected_commands = [('cryptsetup', 'luksOpen', '--key-file=-',
self.dev_path, self.dev_name)]
self.assertEqual(expected_commands, self.executes)
def test_attach_volume(self):
self.stubs.Set(self.encryptor, '_get_key',
test_cryptsetup.fake__get_key)
self.encryptor.attach_volume(None)
expected_commands = [('cryptsetup', 'luksOpen', '--key-file=-',
self.dev_path, self.dev_name),
('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name,
self.symlink_path)]
self.assertEqual(expected_commands, self.executes)
def test__close_volume(self):
self.encryptor.detach_volume()
expected_commands = [('cryptsetup', 'luksClose', self.dev_name)]
self.assertEqual(expected_commands, self.executes)
def test_detach_volume(self):
self.encryptor.detach_volume()
expected_commands = [('cryptsetup', 'luksClose', self.dev_name)]
self.assertEqual(expected_commands, self.executes)
|
berrange/nova
|
nova/tests/volume/encryptors/test_luks.py
|
Python
|
apache-2.0
| 2,831
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/WhereIs.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import sys
import TestSCons
test = TestSCons.TestSCons()
subdir_SConscript = os.path.join('subdir', 'SConscript')
sub1_xxx_exe = test.workpath('sub1', 'xxx.exe')
sub2_xxx_exe = test.workpath('sub2', 'xxx.exe')
sub3_xxx_exe = test.workpath('sub3', 'xxx.exe')
sub4_xxx_exe = test.workpath('sub4', 'xxx.exe')
test.subdir('subdir', 'sub1', 'sub2', 'sub3', 'sub4')
if sys.platform != 'win32':
test.write(sub1_xxx_exe, "\n")
os.mkdir(sub2_xxx_exe)
test.write(sub3_xxx_exe, "\n")
os.chmod(sub3_xxx_exe, 0777)
test.write(sub4_xxx_exe, "\n")
os.chmod(sub4_xxx_exe, 0777)
env_path = os.environ['PATH']
pathdirs_1234 = [ test.workpath('sub1'),
test.workpath('sub2'),
test.workpath('sub3'),
test.workpath('sub4'),
] + env_path.split(os.pathsep)
pathdirs_1243 = [ test.workpath('sub1'),
test.workpath('sub2'),
test.workpath('sub4'),
test.workpath('sub3'),
] + env_path.split(os.pathsep)
test.write('SConstruct', """
SConscript('%s')
env = Environment()
print WhereIs('xxx.exe')
print WhereIs('xxx.exe', %s)
print env.WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s, reject=%s)
env.Replace( XXXNAME='xxx.exe' )
print env.WhereIs( '$XXXNAME', %s )
""" % (subdir_SConscript,
repr(os.pathsep.join(pathdirs_1234)),
repr(os.pathsep.join(pathdirs_1243)),
repr(pathdirs_1234),
repr(pathdirs_1243),
repr(pathdirs_1243),
repr(sub4_xxx_exe),
repr(os.pathsep.join(pathdirs_1243)),
))
test.write(subdir_SConscript, """
env = Environment()
print WhereIs('xxx.exe')
print WhereIs('xxx.exe', %s)
print env.WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s)
print WhereIs('xxx.exe', %s)
""" % (repr(os.pathsep.join(pathdirs_1234)),
repr(os.pathsep.join(pathdirs_1243)),
repr(pathdirs_1234),
repr(pathdirs_1243),
))
os.environ['PATH'] = os.pathsep.join(pathdirs_1234)
expect = [ test.workpath(sub3_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe)
]
test.run(arguments = ".",
stdout = test.wrap_stdout(read_str = "\n".join(expect) + "\n",
build_str = "scons: `.' is up to date.\n"))
os.environ['PATH'] = os.pathsep.join(pathdirs_1243)
expect = [ test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe),
test.workpath(sub3_xxx_exe),
test.workpath(sub4_xxx_exe)
]
test.run(arguments = ".",
stdout = test.wrap_stdout(read_str = "\n".join(expect) + "\n",
build_str = "scons: `.' is up to date.\n"))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
test/WhereIs.py
|
Python
|
mit
| 4,794
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from six.moves import http_client
import keystone.conf
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import test_v3
CONF = keystone.conf.CONF
class TestTrustOperations(test_v3.RestfulTestCase):
"""Test module for create, read, update and delete operations on trusts.
This module is specific to tests for trust CRUD operations. All other tests
related to trusts that are authentication or authorization specific should
live in the keystone/tests/unit/test_v3_auth.py module.
"""
def setUp(self):
super(TestTrustOperations, self).setUp()
# create a trustee to delegate stuff to
self.trustee_user = unit.create_user(self.identity_api,
domain_id=self.domain_id)
self.trustee_user_id = self.trustee_user['id']
def test_create_trust_bad_request(self):
# The server returns a 403 Forbidden rather than a 400 Bad Request, see
# bug 1133435
self.post('/OS-TRUST/trusts', body={'trust': {}},
expected_status=http_client.FORBIDDEN)
def test_trust_crud(self):
# create a new trust
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
role_ids=[self.role_id])
r = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(r, ref)
# get the trust
r = self.get(
'/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']})
self.assertValidTrustResponse(r, ref)
# validate roles on the trust
r = self.get(
'/OS-TRUST/trusts/%(trust_id)s/roles' % {
'trust_id': trust['id']})
roles = self.assertValidRoleListResponse(r, self.role)
self.assertIn(self.role['id'], [x['id'] for x in roles])
self.head(
'/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
'trust_id': trust['id'],
'role_id': self.role['id']},
expected_status=http_client.OK)
r = self.get(
'/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
'trust_id': trust['id'],
'role_id': self.role['id']})
self.assertValidRoleResponse(r, self.role)
# list all trusts
r = self.get('/OS-TRUST/trusts')
self.assertValidTrustListResponse(r, trust)
# trusts are immutable
self.patch(
'/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
body={'trust': ref},
expected_status=http_client.NOT_FOUND)
# delete the trust
self.delete(
'/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']})
# ensure the trust is not found
self.get(
'/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
expected_status=http_client.NOT_FOUND)
def test_list_trusts(self):
# create three trusts with the same trustor and trustee
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
role_ids=[self.role_id])
for i in range(3):
ref['expires_at'] = datetime.datetime.utcnow().replace(
year=2032).strftime(unit.TIME_FORMAT)
r = self.post('/OS-TRUST/trusts', body={'trust': ref})
self.assertValidTrustResponse(r, ref)
# list all trusts
r = self.get('/OS-TRUST/trusts')
trusts = r.result['trusts']
self.assertEqual(3, len(trusts))
self.assertValidTrustListResponse(r)
# list all trusts for the trustor
r = self.get('/OS-TRUST/trusts?trustor_user_id=%s' %
self.user_id)
trusts = r.result['trusts']
self.assertEqual(3, len(trusts))
self.assertValidTrustListResponse(r)
# list all trusts as the trustor as the trustee.
r = self.get('/OS-TRUST/trusts?trustee_user_id=%s' %
self.user_id)
trusts = r.result['trusts']
self.assertEqual(0, len(trusts))
# list all trusts as the trustee is forbidden
r = self.get('/OS-TRUST/trusts?trustee_user_id=%s' %
self.trustee_user_id,
expected_status=http_client.FORBIDDEN)
def test_delete_trust(self):
# create a trust
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
role_ids=[self.role_id])
r = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(r, ref)
# delete the trust
self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
'trust_id': trust['id']})
# ensure the trust isn't found
self.get('/OS-TRUST/trusts/%(trust_id)s' % {
'trust_id': trust['id']},
expected_status=http_client.NOT_FOUND)
def test_create_trust_without_trustee_returns_bad_request(self):
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
role_ids=[self.role_id])
# trustee_user_id is required to create a trust
del ref['trustee_user_id']
self.post('/OS-TRUST/trusts',
body={'trust': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_trust_without_impersonation_returns_bad_request(self):
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
role_ids=[self.role_id])
# impersonation is required to create a trust
del ref['impersonation']
self.post('/OS-TRUST/trusts',
body={'trust': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_trust_with_bad_remaining_uses_returns_bad_request(self):
# negative numbers, strings, non-integers, and 0 are not value values
for value in [-1, 0, "a bad value", 7.2]:
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
remaining_uses=value,
role_ids=[self.role_id])
self.post('/OS-TRUST/trusts',
body={'trust': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_trust_with_non_existant_trustee_returns_not_found(self):
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=uuid.uuid4().hex,
project_id=self.project_id,
role_ids=[self.role_id])
self.post('/OS-TRUST/trusts', body={'trust': ref},
expected_status=http_client.NOT_FOUND)
def test_create_trust_with_trustee_as_trustor_returns_forbidden(self):
ref = unit.new_trust_ref(
trustor_user_id=self.trustee_user_id,
trustee_user_id=self.user_id,
project_id=self.project_id,
role_ids=[self.role_id])
# NOTE(lbragstad): This fails because the user making the request isn't
# the trustor defined in the request.
self.post('/OS-TRUST/trusts', body={'trust': ref},
expected_status=http_client.FORBIDDEN)
def test_create_trust_with_non_existant_project_returns_not_found(self):
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=uuid.uuid4().hex,
role_ids=[self.role_id])
self.post('/OS-TRUST/trusts', body={'trust': ref},
expected_status=http_client.NOT_FOUND)
def test_create_trust_with_non_existant_role_id_returns_not_found(self):
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
role_ids=[uuid.uuid4().hex])
self.post('/OS-TRUST/trusts', body={'trust': ref},
expected_status=http_client.NOT_FOUND)
def test_create_trust_with_non_existant_role_name_returns_not_found(self):
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
role_names=[uuid.uuid4().hex])
self.post('/OS-TRUST/trusts', body={'trust': ref},
expected_status=http_client.NOT_FOUND)
def test_validate_trust_scoped_token_against_v2(self):
# get a project-scoped token
auth_data = self.build_authentication_request(
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password'],
project_id=self.default_domain_project_id)
token = self.get_requested_token(auth_data)
user = unit.new_user_ref(CONF.identity.default_domain_id)
trustee = self.identity_api.create_user(user)
# create a new trust
ref = unit.new_trust_ref(
trustor_user_id=self.default_domain_user['id'],
trustee_user_id=trustee['id'],
project_id=self.default_domain_project_id,
impersonation=False,
expires=dict(minutes=1),
role_ids=[self.role_id])
r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
trust = self.assertValidTrustResponse(r)
# get a v3 trust-scoped token as the trustee
auth_data = self.build_authentication_request(
user_id=trustee['id'],
password=user['password'],
trust_id=trust['id'])
r = self.v3_create_token(auth_data)
self.assertValidProjectScopedTokenResponse(
r, trustee)
token = r.headers.get('X-Subject-Token')
# now validate the v3 token with v2 API
path = '/v2.0/tokens/%s' % (token)
self.admin_request(
path=path,
token=self.get_admin_token(),
method='GET'
)
def test_v3_v2_intermix_trustee_not_in_default_domain_failed(self):
# get a project-scoped token
auth_data = self.build_authentication_request(
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password'],
project_id=self.default_domain_project_id)
token = self.get_requested_token(auth_data)
# create a new trust
ref = unit.new_trust_ref(
trustor_user_id=self.default_domain_user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.default_domain_project_id,
impersonation=False,
expires=dict(minutes=1),
role_ids=[self.role_id])
r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
trust = self.assertValidTrustResponse(r)
# get a trust-scoped token as the trustee
auth_data = self.build_authentication_request(
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
r = self.v3_create_token(auth_data)
self.assertValidProjectScopedTokenResponse(
r, self.trustee_user)
token = r.headers.get('X-Subject-Token')
# now validate the v3 token with v2 API
path = '/v2.0/tokens/%s' % (token)
self.admin_request(
path=path, token=self.get_admin_token(),
method='GET', expected_status=http_client.UNAUTHORIZED)
def test_v3_v2_intermix_project_not_in_default_domain_failed(self):
# create a trustee in default domain to delegate stuff to
trustee_user = unit.create_user(self.identity_api,
domain_id=test_v3.DEFAULT_DOMAIN_ID)
trustee_user_id = trustee_user['id']
# create a new trust
ref = unit.new_trust_ref(
trustor_user_id=self.default_domain_user_id,
trustee_user_id=trustee_user_id,
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
role_ids=[self.role_id])
# get a project-scoped token as the default_domain_user
auth_data = self.build_authentication_request(
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password'],
project_id=self.default_domain_project_id)
token = self.get_requested_token(auth_data)
r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
trust = self.assertValidTrustResponse(r)
# get a trust-scoped token as the trustee
auth_data = self.build_authentication_request(
user_id=trustee_user['id'],
password=trustee_user['password'],
trust_id=trust['id'])
r = self.v3_create_token(auth_data)
self.assertValidProjectScopedTokenResponse(r, trustee_user)
token = r.headers.get('X-Subject-Token')
# ensure the token is invalid against v2
path = '/v2.0/tokens/%s' % (token)
self.admin_request(
path=path, token=self.get_admin_token(),
method='GET', expected_status=http_client.UNAUTHORIZED)
def test_exercise_trust_scoped_token_without_impersonation(self):
# create a new trust
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
role_ids=[self.role_id])
resp = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(resp)
# get a trust-scoped token as the trustee
auth_data = self.build_authentication_request(
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
resp = self.v3_create_token(auth_data)
resp_body = resp.json_body['token']
self.assertValidProjectScopedTokenResponse(resp,
self.trustee_user)
self.assertEqual(self.trustee_user['id'], resp_body['user']['id'])
self.assertEqual(self.trustee_user['name'], resp_body['user']['name'])
self.assertEqual(self.domain['id'], resp_body['user']['domain']['id'])
self.assertEqual(self.domain['name'],
resp_body['user']['domain']['name'])
self.assertEqual(self.project['id'], resp_body['project']['id'])
self.assertEqual(self.project['name'], resp_body['project']['name'])
def test_exercise_trust_scoped_token_with_impersonation(self):
# create a new trust
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=True,
expires=dict(minutes=1),
role_ids=[self.role_id])
resp = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(resp)
# get a trust-scoped token as the trustee
auth_data = self.build_authentication_request(
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
resp = self.v3_create_token(auth_data)
resp_body = resp.json_body['token']
self.assertValidProjectScopedTokenResponse(resp, self.user)
self.assertEqual(self.user['id'], resp_body['user']['id'])
self.assertEqual(self.user['name'], resp_body['user']['name'])
self.assertEqual(self.domain['id'], resp_body['user']['domain']['id'])
self.assertEqual(self.domain['name'],
resp_body['user']['domain']['name'])
self.assertEqual(self.project['id'], resp_body['project']['id'])
self.assertEqual(self.project['name'], resp_body['project']['name'])
def test_forbidden_trust_impersonation_in_redelegation(self):
"""Test forbiddance of impersonation in trust redelegation.
Check that trustee not allowed to create a trust (with impersonation
set to true) from a redelegated trust (with impersonation set to false)
"""
# create trust
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=False,
role_ids=[self.role_id],
allow_redelegation=True)
resp = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(resp)
auth_data = self.build_authentication_request(
user_id=self.trustee_user_id,
password=self.trustee_user['password'],
trust_id=trust['id'])
resp = self.v3_create_token(auth_data)
# create third-party user, which will be trustee in trust created from
# redelegated trust
third_party_trustee = unit.create_user(self.identity_api,
domain_id=self.domain_id)
third_party_trustee_id = third_party_trustee['id']
# create trust from redelegated trust
ref = unit.new_trust_ref(
trustor_user_id=self.trustee_user_id,
trustee_user_id=third_party_trustee_id,
project_id=self.project_id,
impersonation=True,
role_ids=[self.role_id])
ref['redelegated_trust_id'] = trust['id']
self.admin_request(path='/v3/OS-TRUST/trusts',
body={'trust': ref},
token=resp.headers.get('X-Subject-Token'),
method='POST',
expected_status=http_client.FORBIDDEN)
def test_trust_deleted_when_user_deleted(self):
# create trust
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=False,
role_ids=[self.role_id],
allow_redelegation=True)
resp = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(resp)
# list all trusts
r = self.get('/OS-TRUST/trusts')
self.assertEqual(1, len(r.result['trusts']))
# delete the trustee will delete the trust
self.delete(
'/users/%(user_id)s' % {'user_id': trust['trustee_user_id']})
self.get(
'/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
expected_status=http_client.NOT_FOUND)
# create another user as the new trustee
trustee_user = unit.create_user(self.identity_api,
domain_id=self.domain_id)
trustee_user_id = trustee_user['id']
# create the trust again
ref['trustee_user_id'] = trustee_user_id
resp = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(resp)
r = self.get('/OS-TRUST/trusts')
self.assertEqual(1, len(r.result['trusts']))
# delete the trustor will delete the trust
self.delete(
'/users/%(user_id)s' % {'user_id': trust['trustor_user_id']})
# call the backend method directly to bypass authentication since the
# user has been deleted.
self.assertRaises(exception.TrustNotFound,
self.trust_api.get_trust,
trust['id'])
def test_trust_deleted_when_project_deleted(self):
# create trust
ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=False,
role_ids=[self.role_id],
allow_redelegation=True)
resp = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(resp)
# list all trusts
r = self.get('/OS-TRUST/trusts')
self.assertEqual(1, len(r.result['trusts']))
# delete the project will delete the trust.
self.delete(
'/projects/%(project_id)s' % {'project_id': trust['project_id']})
# call the backend method directly to bypass authentication since the
# user no longer has the assignment on the project.
self.assertRaises(exception.TrustNotFound,
self.trust_api.get_trust,
trust['id'])
|
rajalokan/keystone
|
keystone/tests/unit/test_v3_trust.py
|
Python
|
apache-2.0
| 21,901
|
# -*- coding: utf-8 -*-
# Copyright (c) 2007, 2008, Benoît Chesneau
# Copyright (c) 2007 Simon Willison, original work on django-openid
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in the
# * documentation and/or other materials provided with the
# * distribution. Neither the name of the <ORGANIZATION> nor the names
# * of its contributors may be used to endorse or promote products
# * derived from this software without specific prior written
# * permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cgi
import datetime
from django.http import HttpResponseRedirect, Http404
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.template import RequestContext, Context
from django.conf import settings as django_settings
from askbot.conf import settings as askbot_settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse
from django.forms.util import ErrorList
from django.shortcuts import render
from django.template.loader import get_template
from django.views.decorators import csrf
from django.utils.encoding import smart_unicode
from askbot.utils.functions import generate_random_key
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils import simplejson
from askbot.mail.messages import EmailValidation
from askbot.utils import decorators as askbot_decorators
from askbot.utils.functions import format_setting_name
from askbot.utils.html import site_url
from recaptcha_works.decorators import fix_recaptcha_remote_ip
from askbot.deps.django_authopenid.ldap_auth import ldap_create_user
from askbot.deps.django_authopenid.ldap_auth import ldap_authenticate
from askbot.utils.loading import load_module
from sanction.client import Client as OAuth2Client
from urlparse import urlparse
from openid.consumer.consumer import Consumer, \
SUCCESS, CANCEL, FAILURE, SETUP_NEEDED
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import sreg
# needed for some linux distributions like debian
try:
from openid.yadis import xri
except ImportError:
from yadis import xri
try:
from xmlrpclib import Fault as WpFault
from wordpress_xmlrpc import Client
from wordpress_xmlrpc.methods.users import GetUserInfo
except ImportError:
pass
import urllib
from askbot import forms as askbot_forms
from askbot.deps.django_authopenid import util
from askbot.deps.django_authopenid.models import UserAssociation, UserEmailVerifier
from askbot.deps.django_authopenid import forms
from askbot.deps.django_authopenid.backends import AuthBackend
import logging
from askbot.utils.forms import get_next_url
from askbot.utils.http import get_request_info
from askbot.signals import user_logged_in, user_registered
def create_authenticated_user_account(
username=None, email=None, password=None,
user_identifier=None, login_provider_name=None
):
"""creates a user account, user association with
the login method and the the default email subscriptions
"""
user = User.objects.create_user(username, email)
user_registered.send(None, user=user)
logging.debug('creating new openid user association for %s', username)
if password:
user.set_password(password)
user.save()
else:
UserAssociation(
openid_url = user_identifier,
user = user,
provider_name = login_provider_name,
last_used_timestamp = datetime.datetime.now()
).save()
subscribe_form = askbot_forms.SimpleEmailSubscribeForm({'subscribe': 'y'})
subscribe_form.full_clean()
logging.debug('saving email feed settings')
subscribe_form.save(user)
logging.debug('logging the user in')
user = authenticate(method='force', user_id=user.id)
if user is None:
error_message = 'please make sure that ' + \
'askbot.deps.django_authopenid.backends.AuthBackend' + \
'is in your settings.AUTHENTICATION_BACKENDS'
raise Exception(error_message)
return user
def cleanup_post_register_session(request):
"""delete keys from session after registration is complete"""
keys = (
'user_identifier',
'login_provider_name',
'username',
'email',
'password',
'validation_code'
)
for key in keys:
if key in request.session:
del request.session[key]
#todo: decouple from askbot
def login(request, user):
from django.contrib.auth import login as _login
# get old session key
session_key = request.session.session_key
# login and get new session key
_login(request, user)
# send signal with old session key as argument
logging.debug('logged in user %s with session key %s' % (user.username, session_key))
#todo: move to auth app
user_logged_in.send(
request=request,
user=user,
session_key=session_key,
sender=None
)
#todo: uncouple this from askbot
def logout(request):
from django.contrib.auth import logout as _logout#for login I've added wrapper below - called login
_logout(request)
def logout_page(request):
data = {
'page_class': 'meta',
'have_federated_login_methods': util.have_enabled_federated_login_methods()
}
return render(request, 'authopenid/logout.html', Context(data))
def get_url_host(request):
if request.is_secure():
protocol = 'https'
else:
protocol = 'http'
host = escape(request.get_host())
return '%s://%s' % (protocol, host)
def get_full_url(request):
return get_url_host(request) + request.get_full_path()
def ask_openid(
request,
openid_url,
redirect_to,
on_failure=None,
sreg_request=None
):
""" basic function to ask openid and return response """
on_failure = on_failure or signin_failure
trust_root = getattr(
django_settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/'
)
if xri.identifierScheme(openid_url) == 'XRI' and getattr(
django_settings, 'OPENID_DISALLOW_INAMES', False
):
msg = _("i-names are not supported")
logging.debug('openid failed because i-names are not supported')
return on_failure(request, msg)
consumer = Consumer(request.session, util.DjangoOpenIDStore())
try:
auth_request = consumer.begin(openid_url)
except DiscoveryFailure:
openid_url = cgi.escape(openid_url)
msg = _(u"OpenID %(openid_url)s is invalid" % {'openid_url':openid_url})
logging.debug(msg)
return on_failure(request, msg)
logging.debug('openid seemed to work')
if sreg_request:
logging.debug('adding sreg_request - wtf it is?')
auth_request.addExtension(sreg_request)
redirect_url = auth_request.redirectURL(trust_root, redirect_to)
logging.debug('redirecting to %s' % redirect_url)
return HttpResponseRedirect(redirect_url)
def complete(request, on_success=None, on_failure=None, return_to=None):
""" complete openid signin """
assert(on_success is not None)
assert(on_failure is not None)
logging.debug('in askbot.deps.django_authopenid.complete')
consumer = Consumer(request.session, util.DjangoOpenIDStore())
# make sure params are encoded in utf8
params = dict((k,smart_unicode(v)) for k, v in request.GET.items())
openid_response = consumer.complete(params, return_to)
try:
logging.debug(u'returned openid parameters were: %s' % unicode(params))
except Exception, e:
logging.critical(u'fix logging statement above ' + unicode(e))
if openid_response.status == SUCCESS:
logging.debug('openid response status is SUCCESS')
return on_success(
request,
openid_response.identity_url,
openid_response
)
elif openid_response.status == CANCEL:
logging.debug('CANCEL')
return on_failure(request, 'The request was canceled')
elif openid_response.status == FAILURE:
logging.debug('FAILURE')
return on_failure(request, openid_response.message)
elif openid_response.status == SETUP_NEEDED:
logging.debug('SETUP NEEDED')
return on_failure(request, 'Setup needed')
else:
logging.debug('BAD OPENID STATUS')
assert False, "Bad openid status: %s" % openid_response.status
def not_authenticated(func):
""" decorator that redirect user to next page if
he/she is already logged in."""
def decorated(request, *args, **kwargs):
if request.user.is_authenticated():
return HttpResponseRedirect(get_next_url(request))
return func(request, *args, **kwargs)
return decorated
def complete_oauth2_signin(request):
if 'next_url' in request.session:
next_url = request.session['next_url']
del request.session['next_url']
else:
next_url = reverse('index')
if 'error' in request.GET:
return HttpResponseRedirect(reverse('index'))
csrf_token = request.GET.get('state', None)
oauth2_csrf_token = request.session.pop('oauth2_csrf_token', None)
if csrf_token is None or csrf_token != oauth2_csrf_token:
return HttpResponseBadRequest()
providers = util.get_enabled_login_providers()
provider_name = request.session.pop('provider_name')
params = providers[provider_name]
assert(params['type'] == 'oauth2')
name_token = format_setting_name(provider_name)
client_id = getattr(
askbot_settings,
name_token + '_KEY',
)
client_secret = getattr(
askbot_settings,
name_token + '_SECRET',
)
client = OAuth2Client(
token_endpoint=params['token_endpoint'],
resource_endpoint=params['resource_endpoint'],
redirect_uri=site_url(reverse('user_complete_oauth2_signin')),
client_id=client_id,
client_secret=client_secret,
token_transport=params.get('token_transport', None)
)
client.request_token(
code=request.GET['code'],
parser=params.get('response_parser', None)
)
#todo: possibly set additional parameters here
user_id = params['get_user_id_function'](client)
user = authenticate(
oauth_user_id=user_id,
provider_name=provider_name,
method='oauth'
)
logging.debug('finalizing oauth signin')
request.session['email'] = ''#todo: pull from profile
request.session['username'] = ''#todo: pull from profile
if provider_name == 'facebook':
profile = client.request("me")
request.session['email'] = profile.get('email', '')
request.session['username'] = profile.get('username', '')
elif provider_name == 'google-plus' and user is None:
#attempt to migrate user from the old OpenId protocol
openid_url = getattr(client, 'openid_id', None)
if openid_url:
msg_tpl = 'trying to migrate user %d from OpenID %s to g-plus %s'
logging.critical(msg_tpl, (user.id, str(openid_url), str(user_id)))
user = authenticate(openid_url=openid_url)
if user:
util.google_migrate_from_openid_to_gplus(openid_url, user_id)
logging.critical('migrated login from OpenID to g-plus')
return finalize_generic_signin(
request = request,
user = user,
user_identifier = user_id,
login_provider_name = provider_name,
redirect_url = next_url
)
def complete_oauth_signin(request):
if 'next_url' in request.session:
next_url = request.session['next_url']
del request.session['next_url']
else:
next_url = reverse('index')
if 'denied' in request.GET:
return HttpResponseRedirect(next_url)
if 'oauth_problem' in request.GET:
return HttpResponseRedirect(next_url)
try:
oauth_token = request.GET['oauth_token']
logging.debug('have token %s' % oauth_token)
oauth_verifier = request.GET['oauth_verifier']
logging.debug('have verifier %s' % oauth_verifier)
session_oauth_token = request.session['oauth_token']
logging.debug('have token from session')
assert(oauth_token == session_oauth_token['oauth_token'])
oauth_provider_name = request.session['oauth_provider_name']
logging.debug('have saved provider name')
del request.session['oauth_provider_name']
oauth = util.OAuthConnection(oauth_provider_name)
user_id = oauth.get_user_id(
oauth_token=session_oauth_token,
oauth_verifier=oauth_verifier
)
logging.debug('have %s user id=%s' % (oauth_provider_name, user_id))
except Exception, e:
logging.critical(e)
msg = _('Sorry, there was some problem '
'connecting to the login provider, please try again '
'or use another login method'
)
request.user.message_set.create(message = msg)
return HttpResponseRedirect(next_url)
else:
user = authenticate(
oauth_user_id=user_id,
provider_name=oauth_provider_name,
method='oauth'
)
logging.debug('finalizing oauth signin')
request.session['email'] = ''#todo: pull from profile
request.session['username'] = ''#todo: pull from profile
return finalize_generic_signin(
request=request,
user=user,
user_identifier=user_id,
login_provider_name=oauth_provider_name,
redirect_url=next_url
)
#@not_authenticated
@csrf.csrf_protect
def signin(request, template_name='authopenid/signin.html'):
"""
signin page. It manages the legacy authentification (user/password)
and openid authentification
url: /signin/
template : authopenid/signin.htm
"""
logging.debug('in signin view')
on_failure = signin_failure
#we need a special priority on where to redirect on successful login
#here:
#1) url parameter "next" - if explicitly set
#2) url from django setting LOGIN_REDIRECT_URL
#3) home page of the forum
login_redirect_url = getattr(django_settings, 'LOGIN_REDIRECT_URL', None)
next_url = get_next_url(request, default = login_redirect_url)
logging.debug('next url is %s' % next_url)
if askbot_settings.ALLOW_ADD_REMOVE_LOGIN_METHODS == False \
and request.user.is_authenticated():
return HttpResponseRedirect(next_url)
if next_url == reverse('user_signin'):
next_url = '%(next)s?next=%(next)s' % {'next': next_url}
login_form = forms.LoginForm(initial = {'next': next_url})
#todo: get next url make it sticky if next is 'user_signin'
if request.method == 'POST':
login_form = forms.LoginForm(request.POST)
if login_form.is_valid():
provider_name = login_form.cleaned_data['login_provider_name']
if login_form.cleaned_data['login_type'] == 'password':
password_action = login_form.cleaned_data['password_action']
if askbot_settings.USE_LDAP_FOR_PASSWORD_LOGIN:
assert(password_action == 'login')
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(
username=username,
password=password,
method = 'ldap'
)
if user:
login(request, user)
return HttpResponseRedirect(next_url)
else:
#try to login again via LDAP
user_info = ldap_authenticate(username, password)
if user_info['success']:
if askbot_settings.LDAP_AUTOCREATE_USERS:
#create new user or
user = ldap_create_user(user_info).user
user = authenticate(method='force', user_id=user.id)
assert(user is not None)
login(request, user)
return HttpResponseRedirect(next_url)
else:
#continue with proper registration
ldap_username = user_info['ldap_username']
request.session['email'] = user_info['email']
request.session['ldap_user_info'] = user_info
if askbot_settings.AUTOFILL_USER_DATA:
request.session['username'] = ldap_username
request.session['first_name'] = \
user_info['first_name']
request.session['last_name'] = \
user_info['last_name']
return finalize_generic_signin(
request,
login_provider_name = 'ldap',
user_identifier = ldap_username + '@ldap',
redirect_url = next_url
)
else:
auth_fail_func_path = getattr(
django_settings,
'LDAP_AUTHENTICATE_FAILURE_FUNCTION',
None
)
if auth_fail_func_path:
auth_fail_func = load_module(auth_fail_func_path)
auth_fail_func(user_info, login_form)
else:
login_form.set_password_login_error()
#return HttpResponseRedirect(request.path)
else:
if password_action == 'login':
user = authenticate(
username = login_form.cleaned_data['username'],
password = login_form.cleaned_data['password'],
provider_name = provider_name,
method = 'password'
)
if user is None:
login_form.set_password_login_error()
else:
login(request, user)
#todo: here we might need to set cookies
#for external login sites
return HttpResponseRedirect(next_url)
elif password_action == 'change_password':
if request.user.is_authenticated():
new_password = \
login_form.cleaned_data['new_password']
AuthBackend.set_password(
user=request.user,
password=new_password,
provider_name=provider_name
)
request.user.message_set.create(
message = _('Your new password is saved')
)
return HttpResponseRedirect(next_url)
else:
logging.critical(
'unknown password action %s' % password_action
)
raise Http404
elif login_form.cleaned_data['login_type'] == 'mozilla-persona':
assertion = login_form.cleaned_data['persona_assertion']
email = util.mozilla_persona_get_email_from_assertion(assertion)
if email:
user = authenticate(email=email, method='mozilla-persona')
if user is None:
user = authenticate(email=email, method='valid_email')
if user:
#create mozilla persona user association
#because we trust the given email address belongs
#to the same user
UserAssociation(
openid_url=email,
user=user,
provider_name='mozilla-persona',
last_used_timestamp=datetime.datetime.now()
).save()
if user:
login(request, user)
return HttpResponseRedirect(next_url)
#else - create new user account
#pre-fill email address with persona registration
request.session['email'] = email
return finalize_generic_signin(
request,
login_provider_name = 'mozilla-persona',
user_identifier = email,
redirect_url = next_url
)
elif login_form.cleaned_data['login_type'] == 'openid':
#initiate communication process
logging.debug('processing signin with openid submission')
#todo: make a simple-use wrapper for openid protocol
sreg_req = sreg.SRegRequest(optional=['nickname', 'email'])
redirect_to = "%s%s?%s" % (
get_url_host(request),
reverse('user_complete_signin'),
urllib.urlencode({'next':next_url})
)
return ask_openid(
request,
login_form.cleaned_data['openid_url'],
redirect_to,
on_failure=signin_failure,
sreg_request=sreg_req
)
elif login_form.cleaned_data['login_type'] == 'oauth':
try:
#this url may need to have "next" piggibacked onto
connection = util.OAuthConnection(
provider_name,
callback_url=reverse('user_complete_oauth_signin')
)
connection.start()
request.session['oauth_token'] = connection.get_token()
request.session['oauth_provider_name'] = provider_name
request.session['next_url'] = next_url#special case for oauth
oauth_url = connection.get_auth_url(login_only=True)
return HttpResponseRedirect(oauth_url)
except util.OAuthError, e:
logging.critical(unicode(e))
msg = _('Unfortunately, there was some problem when '
'connecting to %(provider)s, please try again '
'or use another provider'
) % {'provider': provider_name}
request.user.message_set.create(message=msg)
elif login_form.cleaned_data['login_type'] == 'oauth2':
try:
csrf_token = generate_random_key(length=32)
redirect_url = util.get_oauth2_starter_url(provider_name, csrf_token)
request.session['oauth2_csrf_token'] = csrf_token
request.session['provider_name'] = provider_name
request.session['next_url'] = next_url
return HttpResponseRedirect(redirect_url)
except util.OAuthError, e:
logging.critical(unicode(e))
msg = _('Unfortunately, there was some problem when '
'connecting to %(provider)s, please try again '
'or use another provider'
) % {'provider': provider_name}
request.user.message_set.create(message=msg)
elif login_form.cleaned_data['login_type'] == 'wordpress_site':
#here wordpress_site means for a self hosted wordpress blog not a wordpress.com blog
wp = Client(
askbot_settings.WORDPRESS_SITE_URL,
login_form.cleaned_data['username'],
login_form.cleaned_data['password']
)
try:
wp_user = wp.call(GetUserInfo())
custom_wp_openid_url = '%s?user_id=%s' % (wp.url, wp_user.user_id)
user = authenticate(
method = 'wordpress_site',
wordpress_url = wp.url,
wp_user_id = wp_user.user_id
)
return finalize_generic_signin(
request = request,
user = user,
user_identifier = custom_wp_openid_url,
login_provider_name = provider_name,
redirect_url = next_url
)
except WpFault, e:
logging.critical(unicode(e))
msg = _('The login password combination was not correct')
request.user.message_set.create(message = msg)
else:
#raise 500 error - unknown login type
pass
else:
logging.debug('login form is not valid')
logging.debug(login_form.errors)
logging.debug(request.REQUEST)
if request.method == 'GET' and request.user.is_authenticated():
view_subtype = 'change_openid'
else:
view_subtype = 'default'
return show_signin_view(
request,
login_form = login_form,
view_subtype = view_subtype,
template_name=template_name
)
@csrf.csrf_protect
def show_signin_view(
request,
login_form = None,
account_recovery_form = None,
account_recovery_message = None,
sticky = False,
view_subtype = 'default',
template_name='authopenid/signin.html'
):
"""url-less utility function that populates
context of template 'authopenid/signin.html'
and returns its rendered output
"""
allowed_subtypes = (
'default', 'add_openid',
'email_sent', 'change_openid',
'bad_key'
)
assert(view_subtype in allowed_subtypes)
if sticky:
next_url = reverse('user_signin')
else:
next_url = get_next_url(request)
if login_form is None:
login_form = forms.LoginForm(initial = {'next': next_url})
if account_recovery_form is None:
account_recovery_form = forms.AccountRecoveryForm()#initial = initial_data)
#if request is GET
if request.method == 'GET':
logging.debug('request method was GET')
#todo: this sthuff must be executed on some signal
#because askbot should have nothing to do with the login app
from askbot.models import AnonymousQuestion as AQ
session_key = request.session.session_key
logging.debug('retrieving anonymously posted question associated with session %s' % session_key)
qlist = AQ.objects.filter(session_key=session_key).order_by('-added_at')
if len(qlist) > 0:
question = qlist[0]
else:
question = None
from askbot.models import AnonymousAnswer as AA
session_key = request.session.session_key
logging.debug('retrieving posted answer associated with session %s' % session_key)
alist = AA.objects.filter(session_key=session_key).order_by('-added_at')
if len(alist) > 0:
answer = alist[0]
else:
answer = None
if request.user.is_authenticated():
existing_login_methods = UserAssociation.objects.filter(user = request.user)
#annotate objects with extra data
providers = util.get_enabled_login_providers()
for login_method in existing_login_methods:
try:
provider_data = providers[login_method.provider_name]
if provider_data['type'] == 'password':
#only external password logins will not be deletable
#this is because users with those can lose access to their accounts permanently
login_method.is_deletable = provider_data.get('password_changeable', False)
else:
login_method.is_deletable = True
except KeyError:
logging.critical(
'login method %s is no longer available '
'please delete records for this login method '
'from the UserAssociation table',
login_method.provider_name
)
continue
if view_subtype == 'default':
page_title = _('Please click any of the icons below to sign in')
elif view_subtype == 'email_sent':
page_title = _('Account recovery email sent')
elif view_subtype == 'change_openid':
if len(existing_login_methods) == 0:
page_title = _('Please add one or more login methods.')
else:
page_title = _('If you wish, please add, remove or re-validate your login methods')
elif view_subtype == 'add_openid':
page_title = _('Please wait a second! Your account is recovered, but ...')
elif view_subtype == 'bad_key':
page_title = _('Sorry, this account recovery key has expired or is invalid')
logging.debug('showing signin view')
data = {
'page_class': 'openid-signin',
'view_subtype': view_subtype, #add_openid|default
'page_title': page_title,
'question': question,
'answer': answer,
'login_form': login_form,
'use_password_login': util.use_password_login(),
'account_recovery_form': account_recovery_form,
'openid_error_message': request.REQUEST.get('msg',''),
'account_recovery_message': account_recovery_message,
'use_password_login': util.use_password_login(),
}
major_login_providers = util.get_enabled_major_login_providers()
minor_login_providers = util.get_enabled_minor_login_providers()
#determine if we are only using password login
active_provider_names = [p['name'] for p in major_login_providers.values()]
active_provider_names.extend([p['name'] for p in minor_login_providers.values()])
have_buttons = True
if (len(active_provider_names) == 1 and active_provider_names[0] == 'local'):
if askbot_settings.SIGNIN_ALWAYS_SHOW_LOCAL_LOGIN == True:
#in this case the form is not using javascript, so set initial values
#here
have_buttons = False
login_form.initial['login_provider_name'] = 'local'
if request.user.is_authenticated():
login_form.initial['password_action'] = 'change_password'
else:
login_form.initial['password_action'] = 'login'
data['have_buttons'] = have_buttons
if request.user.is_authenticated():
data['existing_login_methods'] = existing_login_methods
active_provider_names = [
item.provider_name for item in existing_login_methods
]
util.set_login_provider_tooltips(
major_login_providers,
active_provider_names = active_provider_names
)
util.set_login_provider_tooltips(
minor_login_providers,
active_provider_names = active_provider_names
)
data['major_login_providers'] = major_login_providers.values()
data['minor_login_providers'] = minor_login_providers.values()
return render(request, template_name, Context(data))
@csrf.csrf_protect
@askbot_decorators.post_only
@askbot_decorators.ajax_login_required
def change_password(request):
form = forms.ChangePasswordForm(request.POST)
data = dict()
if form.is_valid():
request.user.set_password(form.cleaned_data['new_password'])
request.user.save()
data['message'] = _('Your new password is saved')
else:
data['errors'] = form.errors
return HttpResponse(simplejson.dumps(data), content_type='application/json')
@login_required
def delete_login_method(request):
if askbot_settings.ALLOW_ADD_REMOVE_LOGIN_METHODS == False:
raise Http404
if request.is_ajax() and request.method == 'POST':
provider_name = request.POST['provider_name']
try:
login_method = UserAssociation.objects.get(
user = request.user,
provider_name = provider_name
)
login_method.delete()
return HttpResponse('', mimetype = 'application/json')
except UserAssociation.DoesNotExist:
#error response
message = _('Login method %(provider_name)s does not exist')
return HttpResponse(message, status=500, mimetype = 'application/json')
except UserAssociation.MultipleObjectsReturned:
logging.critical(
'have multiple %(provider)s logins for user %(id)s'
) % {'provider':provider_name, 'id': request.user.id}
message = _('Oops, sorry - there was some error - please try again')
return HttpResponse(message, status=500, mimetype = 'application/json')
else:
raise Http404
def complete_signin(request):
""" in case of complete signin with openid """
logging.debug('')#blank log just for the trace
return complete(
request,
on_success = signin_success,
on_failure = signin_failure,
return_to = get_url_host(request) + reverse('user_complete_signin')
)
def signin_success(request, identity_url, openid_response):
"""
this is not a view, has no url pointing to this
this function is called when OpenID provider returns
successful response to user authentication
Does actual authentication in Django site and
redirects to the registration page, if necessary
or adds another login method.
"""
logging.debug('')
openid_data = util.from_openid_response(openid_response) #create janrain OpenID object
request.session['openid'] = openid_data
openid_url = str(openid_data)
user = authenticate(
openid_url = openid_url,
method = 'openid'
)
next_url = get_next_url(request)
provider_name = util.get_provider_name(openid_url)
request.session['email'] = openid_data.sreg.get('email', '')
request.session['username'] = openid_data.sreg.get('nickname', '')
return finalize_generic_signin(
request = request,
user = user,
user_identifier = openid_url,
login_provider_name = provider_name,
redirect_url = next_url
)
def finalize_generic_signin(
request = None,
user = None,
login_provider_name = None,
user_identifier = None,
redirect_url = None
):
"""non-view function
generic signin, run after all protocol-dependent details
have been resolved
"""
if 'in_recovery' in request.session:
del request.session['in_recovery']
redirect_url = getattr(django_settings, 'LOGIN_REDIRECT_URL', None)
if redirect_url is None:
redirect_url = reverse('questions')
if request.user.is_authenticated():
#this branch is for adding a new association
if user is None:
try:
#see if currently logged in user has login with the given provider
assoc = UserAssociation.objects.get(
user=request.user,
provider_name=login_provider_name
)
logging.info('switching account or open id changed???')
#did openid url change? or we are dealing with a brand new open id?
message = _(
'If you are trying to sign in to another account, '
'please sign out first. Otherwise, please report the incident '
'to the site administrator.'
)
request.user.message_set.create(message=message)
return HttpResponseRedirect(redirect_url)
except UserAssociation.DoesNotExist:
#register new association
UserAssociation(
user=request.user,
provider_name=login_provider_name,
openid_url=user_identifier,
last_used_timestamp=datetime.datetime.now()
).save()
return HttpResponseRedirect(redirect_url)
elif user != request.user:
#prevent theft of account by another pre-existing user
logging.critical(
'possible account theft attempt by %s,%d to %s %d' % \
(
request.user.username,
request.user.id,
user.username,
user.id
)
)
logout(request)#log out current user
login(request, user)#login freshly authenticated user
return HttpResponseRedirect(redirect_url)
else:
#user just checks if another login still works
msg = _('Your %(provider)s login works fine') % \
{'provider': login_provider_name}
request.user.message_set.create(message = msg)
return HttpResponseRedirect(redirect_url)
elif user:
#login branch
login(request, user)
logging.debug('login success')
return HttpResponseRedirect(redirect_url)
else:
#need to register
request.method = 'GET'#this is not a good thing to do
#but necessary at the moment to reuse the register()
#method
return register(
request,
login_provider_name=login_provider_name,
user_identifier=user_identifier
)
@not_authenticated
@csrf.csrf_protect
@fix_recaptcha_remote_ip
def register(request, login_provider_name=None, user_identifier=None):
"""
this function is used via it's own url with request.method=POST
or as a simple function call from "finalize_generic_signin"
in which case request.method must ge 'GET'
and login_provider_name and user_identifier arguments must not be None
user_identifier will be stored in the UserAssociation as openid_url
login_provider_name - as provider_name
this function may need to be refactored to simplify the usage pattern
template : authopenid/complete.html
"""
logging.debug('')
next_url = get_next_url(request)
user = None
username = request.session.get('username', '')
email = request.session.get('email', '')
logging.debug('request method is %s' % request.method)
form_class = forms.get_registration_form_class()
register_form = form_class(
initial={
'next': next_url,
'username': request.session.get('username', ''),
'email': request.session.get('email', ''),
}
)
if request.method == 'GET':
assert(login_provider_name is not None)
assert(user_identifier is not None)
#store this data into the session
#to persist for the post request
request.session['login_provider_name'] = login_provider_name
request.session['user_identifier'] = user_identifier
elif request.method == 'POST':
if 'login_provider_name' not in request.session \
or 'user_identifier' not in request.session:
logging.critical('illegal attempt to register')
return HttpResponseRedirect(reverse('user_signin'))
#load this data from the session
user_identifier = request.session['user_identifier']
login_provider_name = request.session['login_provider_name']
logging.debug('trying to create new account associated with openid')
form_class = forms.get_registration_form_class()
register_form = form_class(request.POST)
if not register_form.is_valid():
logging.debug('registration form is INVALID')
else:
username = register_form.cleaned_data['username']
email = register_form.cleaned_data['email']
if 'ldap_user_info' in request.session:
user_info = request.session['ldap_user_info']
#we take this info from the user input where
#they can override the default provided by LDAP
user_info['django_username'] = username
user_info['email'] = email
user = ldap_create_user(user_info).user
user = authenticate(user_id=user.id, method='force')
del request.session['ldap_user_info']
login(request, user)
cleanup_post_register_session(request)
return HttpResponseRedirect(next_url)
elif askbot_settings.REQUIRE_VALID_EMAIL_FOR == 'nothing':
user = create_authenticated_user_account(
username=username,
email=email,
user_identifier=user_identifier,
login_provider_name=login_provider_name,
)
login(request, user)
cleanup_post_register_session(request)
return HttpResponseRedirect(next_url)
else:
email_verifier = UserEmailVerifier(key=generate_random_key())
email_verifier.value = {'username': username, 'email': email,
'user_identifier': user_identifier,
'login_provider_name': login_provider_name}
email_verifier.save()
send_email_key(email, email_verifier.key,
handler_url_name='verify_email_and_register')
redirect_url = reverse('verify_email_and_register') + '?next=' + next_url
return HttpResponseRedirect(redirect_url)
providers = {
'yahoo':'<font color="purple">Yahoo!</font>',
'flickr':'<font color="#0063dc">flick</font><font color="#ff0084">r</font>™',
'google':'Google™',
'aol':'<font color="#31658e">AOL</font>',
'myopenid':'MyOpenID',
}
if login_provider_name not in providers:
provider_logo = login_provider_name
else:
provider_logo = providers[login_provider_name]
logging.debug('printing authopenid/complete.html output')
data = {
'openid_register_form': register_form,
'default_form_action': django_settings.LOGIN_URL,
'provider': mark_safe(provider_logo),
'username': username,
'email': email,
'login_type':'openid',
'gravatar_faq_url':reverse('faq') + '#gravatar',
}
return render(request, 'authopenid/complete.html', Context(data))
def signin_failure(request, message):
"""
falure with openid signin. Go back to signin page.
"""
request.user.message_set.create(message = message)
return show_signin_view(request)
@not_authenticated
@csrf.csrf_protect
def verify_email_and_register(request):
"""for POST request - check the validation code,
and if correct - create an account an log in the user
for GET - give a field to paste the activation code
and a button to send another validation email.
"""
presented_code = request.REQUEST.get('validation_code', None)
if presented_code:
try:
#we get here with post if button is pushed
#or with "get" if emailed link is clicked
email_verifier = UserEmailVerifier.objects.get(key=presented_code)
#verifies that the code has not been used already
assert(email_verifier.verified == False)
assert(email_verifier.has_expired() == False)
username = email_verifier.value['username']
email = email_verifier.value['email']
password = email_verifier.value.get('password', None)
user_identifier = email_verifier.value.get('user_identifier', None)
login_provider_name = email_verifier.value.get('login_provider_name', None)
if password:
user = create_authenticated_user_account(
username=username,
email=email,
password=password,
)
elif user_identifier and login_provider_name:
user = create_authenticated_user_account(
username=username,
email=email,
user_identifier=user_identifier,
login_provider_name=login_provider_name,
)
else:
raise NotImplementedError()
login(request, user)
email_verifier.verified = True
email_verifier.save()
cleanup_post_register_session(request)
return HttpResponseRedirect(get_next_url(request))
except Exception, e:
message = _(
'Sorry, registration failed. '
'The token can be already used or has expired. Please try again'
)
request.user.message_set.create(message=message)
return HttpResponseRedirect(reverse('index'))
else:
data = {'page_class': 'validate-email-page'}
return render(request, 'authopenid/verify_email.html', Context(data))
@not_authenticated
@csrf.csrf_protect
@fix_recaptcha_remote_ip
def signup_with_password(request):
"""Create a password-protected account
template: authopenid/signup_with_password.html
"""
logging.debug(get_request_info(request))
login_form = forms.LoginForm(initial = {'next': get_next_url(request)})
#this is safe because second decorator cleans this field
if askbot_settings.USE_RECAPTCHA:
RegisterForm = forms.SafeClassicRegisterForm
else:
RegisterForm = forms.ClassicRegisterForm
logging.debug('request method was %s' % request.method)
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
next = form.cleaned_data['next']
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
if askbot_settings.REQUIRE_VALID_EMAIL_FOR == 'nothing':
user = create_authenticated_user_account(
username=username,
email=email,
password=password,
)
login(request, user)
cleanup_post_register_session(request)
return HttpResponseRedirect(get_next_url(request))
else:
email_verifier = UserEmailVerifier(key=generate_random_key())
email_verifier.value = {'username': username,
'login_provider_name': 'local',
'email': email, 'password': password}
email_verifier.save()
send_email_key(
email, email_verifier.key,
handler_url_name='verify_email_and_register'
)
redirect_url = reverse('verify_email_and_register') + \
'?next=' + get_next_url(request)
return HttpResponseRedirect(redirect_url)
else:
#todo: here we have duplication of get_password_login_provider...
form = RegisterForm(initial={'next': get_next_url(request)})
major_login_providers = util.get_enabled_major_login_providers()
minor_login_providers = util.get_enabled_minor_login_providers()
context_data = {
'form': form,
'page_class': 'openid-signin',
'major_login_providers': major_login_providers.values(),
'minor_login_providers': minor_login_providers.values(),
'login_form': login_form
}
return render(
request,
'authopenid/signup_with_password.html',
Context(context_data)
)
@login_required
def signout(request):
"""
signout from the website. Remove openid from session and kill it.
url : /signout/"
"""
logging.debug('')
try:
logging.debug('deleting openid session var')
del request.session['openid']
except KeyError:
logging.debug('failed')
pass
logout(request)
logging.debug('user logged out')
return HttpResponseRedirect(get_next_url(request))
XRDF_TEMPLATE = """<?xml version='1.0' encoding='UTF-8'?>
<xrds:XRDS
xmlns:xrds='xri://$xrds'
xmlns:openid='http://openid.net/xmlns/1.0'
xmlns='xri://$xrd*($v*2.0)'>
<XRD>
<Service>
<Type>http://specs.openid.net/auth/2.0/return_to</Type>
<URI>%(return_to)s</URI>
</Service>
</XRD>
</xrds:XRDS>"""
def xrdf(request):
url_host = get_url_host(request)
return_to = "%s%s" % (url_host, reverse('user_complete_signin'))
return HttpResponse(XRDF_TEMPLATE % {'return_to': return_to})
def set_new_email(user, new_email):
if new_email != user.email:
user.email = new_email
user.email_isvalid = False
user.save()
def send_email_key(address, key, handler_url_name='user_account_recover'):
"""private function. sends email containing validation key
to user's email address
"""
email = EmailValidation({
'handler_url_name': handler_url_name,
'key': key
})
email.send([address,])
def send_user_new_email_key(user):
user.email_key = generate_random_key()
user.save()
send_email_key(user.email, user.email_key)
def account_recover(request):
"""view similar to send_email_key, except
it allows user to recover an account by entering
his/her email address
this view will both - send the recover link and
process it
url name 'user_account_recover'
"""
if not askbot_settings.ALLOW_ACCOUNT_RECOVERY_BY_EMAIL:
raise Http404
if request.method == 'POST':
form = forms.AccountRecoveryForm(request.POST)
if form.is_valid():
user = form.cleaned_data['user']
send_user_new_email_key(user)
message = _(
'Please check your email and visit the enclosed link.'
)
return show_signin_view(
request,
account_recovery_message = message,
view_subtype = 'email_sent'
)
else:
return show_signin_view(
request,
account_recovery_form = form
)
else:
key = request.GET.get('validation_code', None)
if key is None:
return HttpResponseRedirect(reverse('user_signin'))
user = authenticate(email_key = key, method = 'email')
if user:
if request.user.is_authenticated():
if user != request.user:
logout(request)
login(request, user)
else:
login(request, user)
from askbot.models import greet_new_user
greet_new_user(user)
#need to show "sticky" signin view here
request.session['in_recovery'] = True
return show_signin_view(
request,
view_subtype = 'add_openid',
sticky = True
)
else:
return show_signin_view(request, view_subtype = 'bad_key')
return HttpResponseRedirect(get_next_url(request))
|
jesonyang001/qarepo
|
askbot/deps/django_authopenid/views.py
|
Python
|
gpl-3.0
| 55,438
|
# demo_gui.py ---
#
# Filename: demo_gui.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Jun 16 05:41:58 2010 (+0530)
# Version:
# Last-Updated: Tue Sep 11 14:26:13 2012 (+0530)
# By: subha
# Update #: 318
# URL:
# Change log:
# Tuesday 18 September 2018 09:51:56 AM IST`
# Qt Related changes.
try:
from PyQt4 import QtGui, QtCore
from PyQt4.Qt import Qt
except ImportError as e:
print( 'PyQt4 is not found. Doing nothing' )
quit()
try:
import PyQt4.Qwt5 as Qwt
except ImportError as e:
print( 'PyQt4.Qwt5 not found. Doing nothing' )
quit()
import numpy
from Izhikevich import IzhikevichDemo
class IzhikevichGui(QtGui.QMainWindow):
"""This is a Qt version of the GUI"""
def __init__(self, *args):
QtGui.QMainWindow.__init__(self, *args)
self.demo = IzhikevichDemo()
self.signalMapper = QtCore.QSignalMapper(self)
self.demoFrame = QtGui.QFrame(self)
self.controlPanel = QtGui.QFrame(self.demoFrame)
self.figureNo = {}
self.buttons = {}
for key, value in list(IzhikevichDemo.parameters.items()):
button = QtGui.QPushButton(key, self.controlPanel)
self.figureNo[value[0]] = key
self.buttons[key] = button
keys = list(self.figureNo.keys())
keys.sort()
length = len(keys)
rows = int(numpy.rint(numpy.sqrt(length)))
cols = int(numpy.ceil(length * 1.0 / rows))
layout = QtGui.QGridLayout()
for ii in range(rows):
for jj in range(cols):
index = ii * cols + jj
if index < length:
key = self.figureNo[keys[index]]
button = self.buttons[key]
button.setToolTip(self.tr(IzhikevichDemo.documentation[key]))
layout.addWidget(button, ii, jj)
self.connect(button, QtCore.SIGNAL('clicked()'), self.signalMapper, QtCore.SLOT('map()'))
self.signalMapper.setMapping(button, key)
self.connect(self.signalMapper, QtCore.SIGNAL('mapped(const QString &)'), self._simulateAndPlot)
self.controlPanel.setLayout(layout)
self.plotPanel = QtGui.QFrame(self.demoFrame)
self.VmPlot = Qwt.QwtPlot(self.plotPanel)
self.VmPlot.setAxisTitle(Qwt.QwtPlot.xBottom, 'time (ms)')
self.VmPlot.setAxisTitle(Qwt.QwtPlot.yLeft, 'Vm (mV)')
self.VmPlot.replot()
self.ImPlot = Qwt.QwtPlot(self.plotPanel)
self.ImPlot.setAxisTitle(Qwt.QwtPlot.xBottom, 'time (ms)')
self.ImPlot.setAxisTitle(Qwt.QwtPlot.yLeft, 'Im (nA)')
self.vmPlotZoomer = self._make_zoomer(self.VmPlot)
self.imPlotZoomer = self._make_zoomer(self.ImPlot)
self.descriptionWidget = QtGui.QLabel('Click any of the buttons to simulate and plot the corresponding neuron.')
self.descriptionWidget.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.descriptionWidget.setSizePolicy(sizePolicy)
self.VmPlot.setSizePolicy(sizePolicy)
self.ImPlot.setSizePolicy(sizePolicy)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.VmPlot)
layout.addWidget(self.ImPlot)
layout.addWidget(self.descriptionWidget)
self.plotPanel.setLayout(layout)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.plotPanel)
layout.addWidget(self.controlPanel)
self.demoFrame.setLayout(layout)
self.setCentralWidget(self.demoFrame)
def _simulateAndPlot(self, key):
key = str(key)
equationText = self.demo.getEquation(key).replace('\n', '<br/>')
doc = IzhikevichDemo.documentation[key].replace('\n', '<br/>')
text = '<b>%s:</b> %s<p><b>Equation:</b><br/> %s' % (key, doc, equationText)
self.descriptionWidget.setText(self.tr(text))
# if key == 'accommodation':
# mbox = QtGui.QMessageBox(self)
# mbox.setText(self.tr('Accommodation cannot be shown with regular Izhikevich model.'))
# mbox.setDetailedText(self.tr('\
# Equation for u for the accommodating neuron is: \
# u\' = a * b * (V + 65)\n Which is different from \
# the regular equation u\' = a * (b*V - u) and cannot \
# be obtained from the latter by any choice of a and b.'))
# mbox.show()
# return
(time, Vm, Im) = self.demo.simulate(key)
Vm = numpy.array(Vm.vector) * 1e3
Im = numpy.array(Im.vector) * 1e9
self.VmPlot.clear()
self.ImPlot.clear()
curve = Qwt.QwtPlotCurve(self.tr(key + '_Vm'))
curve.setPen(QtCore.Qt.red)
curve.setData(time, numpy.array(Vm))
curve.attach(self.VmPlot)
curve = Qwt.QwtPlotCurve(self.tr(key + '_Im'))
curve.setPen(QtCore.Qt.blue)
curve.setData(time, Im)
curve.attach(self.ImPlot)
self.imPlotZoomer.setZoomBase()
self.vmPlotZoomer.setZoomBase()
self.ImPlot.replot()
self.VmPlot.replot()
def _make_zoomer(self, plot):
zoomer = Qwt.QwtPlotZoomer(Qwt.QwtPlot.xBottom,
Qwt.QwtPlot.yLeft,
Qwt.QwtPicker.DragSelection,
Qwt.QwtPicker.AlwaysOn,
plot.canvas())
zoomer.setRubberBandPen(QtGui.QPen(QtCore.Qt.white))
zoomer.setTrackerPen(QtGui.QPen(QtCore.Qt.cyan))
return zoomer
import sys
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
mainWin = IzhikevichGui()
mainWin.show()
sys.exit(app.exec_())
#
# demo_gui.py ends here
|
BhallaLab/moose-examples
|
izhikevich/demogui_qt.py
|
Python
|
gpl-2.0
| 5,815
|
#!/usr/bin/python
from __future__ import print_function
import sys
import json
from httplib import HTTPException
import socket
from pprint import pprint, pformat
import os
from gnt_rapi import GntRapi, GntTagMap
import yaml
def gnt_inst_list(rapi):
"""Get list of all instances (including host-vars)"""
group_map=dict()
meta_list=dict()
instance_data=rapi.query(["name","tags","os","status"],None,
"instance")["data"]
for i in instance_data:
instance_name=i[0][1]
tags=GntTagMap(i[1][1])
if not tags.nolist:
tags.add_fact("os",i[2][1])
tags.add_fact("status",i[3][1])
tags.add_to_group_map(group_map,instance_name)
meta_list[instance_name]=tags.dump()
group_map["_meta"]={"hostvars":meta_list}
return group_map
def gnt_inst_get(rapi,name):
"""Get host-vars for host //name//"""
vars_raw=rapi.query(["tags","os","status"],["=","name",name],
"instance")["data"][0]
tags=GntTagMap(vars_raw[0][1])
if tags.nolist:
return False
tags.add_fact("os",vars_raw[1][1])
tags.add_fact("status",vars_raw[2][1])
return tags.dump()
def err(msg,rc=1):
pprint(msg,stream=sys.stderr)
sys.exit(rc)
def main():
"""Main procedure"""
# read configuration
try:
conf_filename=os.getenv('GNT_RAPI_CONF', os.path.join(os.path.dirname(
os.path.realpath(__file__)),'rapi.yml'))
with open(conf_filename,'r') as stream:
conf_data=yaml.load(stream)
except yaml.YAMLError as e:
err("Configuration file syntax error: %s " % e)
except IOError as e:
err("Configuration file error: %s " % e)
if "ganeti" in conf_data and "rapi" in conf_data["ganeti"]:
# parse command line arguments
name=sys.argv[2] if len(sys.argv)>2 and sys.argv[1]=="--host" else False
try:
# RAPI initialization
rapi=GntRapi(conf_data["ganeti"]["rapi"]["host"],
conf_data["ganeti"]["rapi"]["port"],conf_data["ganeti"]["rapi"]["user"],
conf_data["ganeti"]["rapi"]["pass"])
if name: # --host
tag_info=gnt_inst_get(rapi,name)
if not tag_info:
err("No data for host")
else: # --list
tag_info=gnt_inst_list(rapi)
except HTTPException as e:
err("HTTP error: %s " % e)
except socket.error as e:
err("HTTP Socket error: %s" %e)
except Exception as e:
err("Error: %s " % e)
else:
# return data if no error occured
print(json.dumps(tag_info,indent=4))
if __name__ == '__main__':
main()
# vim:ff=unix ts=2 sw=2 ai expandtab
|
thoto/ganeti_ansible_facts
|
gnt_index.py
|
Python
|
gpl-3.0
| 2,515
|
import argparse
import numpy as np
from PIL import Image
import ocppaths
import ocpcarest
import zindex
import anydbm
import multiprocessing
import pdb
#
# ingest the PNG files into the database
#
"""This file is super-customized for Mitya's FlyEM data."""
# Stuff we make take from a config or the command line in the future
#ximagesz = 12000
#yimagesz = 12000
parser = argparse.ArgumentParser(description='Ingest the FlyEM image data.')
parser.add_argument('baseurl', action="store", help='Base URL to of ocp service no http://, e. g. neurodata.io')
parser.add_argument('token', action="store", help='Token for the annotation project.')
parser.add_argument('path', action="store", help='Directory with annotation PNG files.')
parser.add_argument('process', action="store", help='Number of processes.')
result = parser.parse_args()
# convert to an argument
resolution = 0
# load a database
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )
# get the dataset configuration
(xcubedim,ycubedim,zcubedim)=proj.datasetcfg.cubedim[resolution]
(startslice,endslice)=proj.datasetcfg.slicerange
batchsz=zcubedim
# This doesn't work because the image size does not match exactly the cube size
#(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
ximagesz = 12000
yimagesz = 12000
batchsz=16
totalslices = range(startslice,endslice,16)
totalprocs = int(result.process)
#global anydb
#pdb.set_trace()
#anydb = anydbm.open('bodydict','r')
#anydb = dict(anydb)
def parallelwrite(slicenumber):
# Accessing the dict in dbm
#anydb = anydbm.open('bodydict','r')
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )
#print slicenumber
startslice = slicenumber
endslice = startslice+16
# Get a list of the files in the directories
for sl in range (startslice, endslice+1, batchsz):
slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint32 )
for b in range ( batchsz ):
if ( sl + b <= endslice and sl + b<=1460 ):
# raw data
filenm = result.path + '/superpixel.' + '{:0>5}'.format(sl+b) + '.png'
#print "Opening filenm " + filenm
img = Image.open ( filenm, 'r' )
imgdata = np.asarray ( img )
#Adding new lines
anydb = anydbm.open('bodydict2','r')
superpixelarray = imgdata[:,:,0] + (np.uint32(imgdata[:,:,1])<<8)
newdata = np.zeros([superpixelarray.shape[0],superpixelarray.shape[1]], dtype=np.uint32)
#print "slice",sl+b,"batch",sl
print sl+b,multiprocessing.current_process()
for i in range(superpixelarray.shape[0]):
for j in range(superpixelarray.shape[1]):
key = str(sl)+','+str(superpixelarray[i,j])
if( key not in anydb):
f = open('missing_keys', 'a')
f.write(key+'\n')
f.close()
print "Error Detected Writing to File"
dictvalue = '0'
else:
dictvalue = anydb.get( key )
newdata[i,j] = int(dictvalue)
slab[b,:,:] = newdata
print "end of slice:",sl+b
anydb.close()
print "Entering commit phase"
# Now we have a 1024x1024x16 z-aligned cube.
# Send it to the database.
for y in range ( 0, yimagesz, ycubedim ):
for x in range ( 0, ximagesz, xcubedim ):
mortonidx = zindex.XYZMorton ( [ x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )
xmin = x
ymin = y
xmax = min ( ximagesz, x+xcubedim )
ymax = min ( yimagesz, y+ycubedim )
zmin = 0
zmax = min(sl+zcubedim,endslice+1)
cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax]
# insert the blob into the database
db.annotateDense ((x,y,sl-startslice), resolution, cubedata, 'O')
print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl)
db.conn.commit()
return None
def run():
flypool = multiprocessing.Pool(totalprocs)
flypool.map(parallelwrite, totalslices, 16)
if __name__ == "__main__":
run()
|
openconnectome/open-connectome
|
scripts/ingest/flyem/flyem_anno_parallel.py
|
Python
|
apache-2.0
| 4,133
|
# Generated by Django 2.0.5 on 2019-07-18 22:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scheduletemplates', '0004_shifttemplate_members_only'),
]
operations = [
migrations.AlterField(
model_name='shifttemplate',
name='workplace',
field=models.ForeignKey(verbose_name='workplace', to='organizations.Workplace',
null=True, blank=True,
on_delete=django.db.models.deletion.CASCADE),
),
]
|
pitpalme/volunteer_planner
|
scheduletemplates/migrations/0005_cascade_deletion.py
|
Python
|
agpl-3.0
| 629
|
import time
import itertools
import gevent
from Config import config
from util import helper
from Plugin import PluginManager
from ChartDb import ChartDb
from ChartCollector import ChartCollector
if "db" not in locals().keys(): # Share on reloads
db = ChartDb()
gevent.spawn_later(10 * 60, db.archive)
helper.timer(60 * 60 * 6, db.archive)
collector = ChartCollector(db)
@PluginManager.registerTo("SiteManager")
class SiteManagerPlugin(object):
def load(self, *args, **kwargs):
back = super(SiteManagerPlugin, self).load(*args, **kwargs)
collector.setInitialLastValues(self.sites.values())
return back
def delete(self, address, *args, **kwargs):
db.deleteSite(address)
return super(SiteManagerPlugin, self).delete(address, *args, **kwargs)
@PluginManager.registerTo("UiWebsocket")
class UiWebsocketPlugin(object):
def actionChartDbQuery(self, to, query, params=None):
if not "ADMIN" in self.permissions:
return {"error": "No permission"}
if config.debug or config.verbose:
s = time.time()
rows = []
try:
if not query.strip().upper().startswith("SELECT"):
raise Exception("Only SELECT query supported")
res = db.execute(query, params)
except Exception, err: # Response the error to client
self.log.error("ChartDbQuery error: %s" % err)
return {"error": str(err)}
# Convert result to dict
for row in res:
rows.append(dict(row))
if config.verbose and time.time() - s > 0.1: # Log slow query
self.log.debug("Slow query: %s (%.3fs)" % (query, time.time() - s))
return rows
def actionChartGetPeerLocations(self, to):
if not "ADMIN" in self.permissions:
return {"error": "No permission"}
peers = {}
for site in self.server.sites.values():
peers.update(site.peers)
peer_locations = self.getPeerLocations(peers)
return peer_locations
|
OliverCole/ZeroNet
|
plugins/Chart/ChartPlugin.py
|
Python
|
gpl-2.0
| 2,057
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# FILE: dump_tweets.py
# DATE: February, 2014
# Author: David W. McDonald
#
# Sample code that requests tweets from the DB and dumps each tweet
# to the screen
#
# Copyright by Author. All rights reserved. Not for reuse without
# express permissions.
#
import sys, gc, time, string, json, pickle, random
from datetime import datetime, timedelta
from infx.data.db.base.dbConfig import DBConfiguration
from infx.data.db.fitness.settings_db import *
from infx.data.db.fitness.FitTweetsDB import FitTweetsDB as DB
from infx.data.db.fitness.FitTweetObj import FitTweetObj
from infx.utils.tweet_entities import tweet_entities
from infx.utils.stop_words import remove_stops
from infx.data.fitness.constants import *
def query_date(db=None, date=None, dur=1, by_hour=False):
result_list = []
if( by_hour ):
delta = timedelta(hours=1)
else:
delta = timedelta(days=1)
dt2 = date + (dur*delta)
start_date = date.strftime("%Y%m%d%H%M%S")
end_date = dt2.strftime("%Y%m%d%H%M%S")
#start_date = date.strftime("%Y%m%d000000")
#end_date = dt2.strftime("%Y%m%d000000")
try:
result_list = db.query_tweet_table_by_date_range(start_date=start_date,
end_date=end_date,
in_order=True)
except Exception, e:
print "EXCEPTION when running query!"
print e
result_list = []
return {'tweet_list':result_list,
'query_date':date,
'start_date_str':start_date,
'end_date_str':end_date,
'duration':dur}
def tweet_dump(db=None, start_date=None, dur=1, items=-1, obj=False, report=False):
# query the database to get a set (list) of tweets
result = query_date(db=db, date=start_date, dur=dur)
tweet_list = result['tweet_list']
total_tweets = 0
if( report ):
total_tweets = len(tweet_list)
print "Found %d tweets."%(total_tweets)
if( items>0 ):
tweet_list = tweet_list[:items]
# now iterate through the list of the tweet objects
counter = 0
for tweet in tweet_list:
counter+=1
print "Tweet [%6d]:"%counter
if( obj ):
# this version uses the __repr__ version of the object
# this is very similar to the version below
print tweet
else:
# this version just picks out a few specific items from the
# object and prints them
print "Tweet Type %s:"%(type(tweet))
print "tweet_id(%s):"%(type(tweet.tweet_id)),tweet.tweet_id
print "tweet_id_str(%s):"%(type(tweet.tweet_id_str)),tweet.tweet_id_str
print "created_at(%s):"%(type(tweet.created_at)),tweet.created_at
print "query_source(%s):"%(type(tweet.query_source)),tweet.query_source
print "from_user(%s):"%(type(tweet.from_user)),tweet.from_user.encode('utf-8')
print "from_user_name(%s):"%(type(tweet.from_user_name)),tweet.from_user_name.encode('utf-8')
print "from_user_id(%s):"%(type(tweet.from_user_id)),tweet.from_user_id
ttext = tweet.tweet_text.replace("\n","").replace("\r","")
print "tweet_text(%s)"%(type(tweet.tweet_text)),ttext.encode('utf-8')
print
if( report ):
print "Found %d tweets."%(total_tweets)
print "Dumped %d tweets."%(counter)
return
def parse_date(dstr=None):
date = None
try:
date = datetime.strptime(dstr,"%Y%m%d")
except:
try:
date = datetime.strptime(dstr,"%d-%m-%Y")
except:
try:
date = datetime.strptime(dstr,"%d/%m/%Y")
except:
print "Can't parse that date."
date = None
return date
def parse_params(argv):
dt = None # date for the doc
dt_str = None # date as a string entered by the user
dur = 1 # duration
items = 0 # number of items to include
objects = False # just dump the tweet objects
report = True # report progress
pc = 1
while( pc < len(argv) ):
param = argv[pc]
if( param == "-date"):
pc += 1
dt_str = argv[pc]
dt = parse_date(dt_str)
if( param == "-dur"):
pc += 1
dur = int(argv[pc])
if( param == "-items"):
pc += 1
items = int(argv[pc])
if( param == "-obj"):
objects = True
if( param == "-report"):
report = True
if( param == "-no_report"):
report = False
pc += 1
return {'date':dt, 'dt_str':dt_str,
'report':report, 'items':items,
'objects':objects, 'duration':dur }
def usage(prog):
print "USAGE: %s -date <date> [-dur <days>] [-items <n_items>] [-obj] [-report | -no_report]"%(prog)
sys.exit(0)
# Some simple examples of using this at the command line
#
# python dump_tweets.py -date 20130101
# python dump_tweets.py -date 20130101 -items 10
# python dump_tweets.py -date 20130101 -items 10 -obj
def main(argv):
if len(argv) < 3:
usage(sys.argv[0])
params = parse_params(argv)
if( params['report'] ):
print "Got parameters"
print params
if( params['report'] ):
print "Preparing Database Configuration"
config = DBConfiguration(db_settings=DATABASE_SETTINGS['default'])
#config = DBConfiguration(db_settings=DATABASE_SETTINGS['main_db'])
if( params['report'] ):
print config
print "Opening Database"
# Open the database with the specific configuration
db = DB(config=config)
doc = tweet_dump(db=db,start_date=params['date'],
dur=params['duration'],
items=params['items'],
obj=params['objects'],
report=params['report'])
# Always remember to close the DB when you're done
db.close()
return
if __name__ == '__main__':
main(sys.argv)
|
bhargavz/py-twitter-sentiment-analysis
|
data/sochi/dump_tweets.py
|
Python
|
mit
| 6,209
|
# OpenMV Unit Tests.
#
import os, sensor, gc
TEST_DIR = "unittest"
TEMP_DIR = "unittest/temp"
DATA_DIR = "unittest/data"
SCRIPT_DIR = "unittest/script"
if not (TEST_DIR in os.listdir("")):
raise Exception('Unittest dir not found!')
print("")
test_failed = False
def print_result(test, result):
s = "Unittest (%s)"%(test)
padding = "."*(60-len(s))
print(s + padding + result)
for test in sorted(os.listdir(SCRIPT_DIR)):
if test.endswith(".py"):
test_result = "PASSED"
test_path = "/".join((SCRIPT_DIR, test))
try:
exec(open(test_path).read())
gc.collect()
if unittest(DATA_DIR, TEMP_DIR) == False:
raise Exception()
except Exception as e:
if "unavailable" in str(e):
test_result = "DISABLED"
else:
test_failed = True
test_result = "FAILED"
print_result(test, test_result)
if test_failed:
print("\nSome tests have FAILED!!!\n\n")
else:
print("\nAll tests PASSED.\n\n")
|
kwagyeman/openmv
|
scripts/examples/OpenMV/99-Tests/unittests.py
|
Python
|
mit
| 1,077
|
"""Support for KNX/IP sensors."""
from __future__ import annotations
from typing import Any
from xknx import XKNX
from xknx.devices import Sensor as XknxSensor
from homeassistant import config_entries
from homeassistant.components.sensor import (
CONF_STATE_CLASS,
DEVICE_CLASSES,
SensorEntity,
)
from homeassistant.const import CONF_ENTITY_CATEGORY, CONF_NAME, CONF_TYPE, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, StateType
from .const import ATTR_SOURCE, DATA_KNX_CONFIG, DOMAIN
from .knx_entity import KnxEntity
from .schema import SensorSchema
async def async_setup_entry(
hass: HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up sensor(s) for KNX platform."""
xknx: XKNX = hass.data[DOMAIN].xknx
config: list[ConfigType] = hass.data[DATA_KNX_CONFIG][Platform.SENSOR]
async_add_entities(KNXSensor(xknx, entity_config) for entity_config in config)
def _create_sensor(xknx: XKNX, config: ConfigType) -> XknxSensor:
"""Return a KNX sensor to be used within XKNX."""
return XknxSensor(
xknx,
name=config[CONF_NAME],
group_address_state=config[SensorSchema.CONF_STATE_ADDRESS],
sync_state=config[SensorSchema.CONF_SYNC_STATE],
always_callback=config[SensorSchema.CONF_ALWAYS_CALLBACK],
value_type=config[CONF_TYPE],
)
class KNXSensor(KnxEntity, SensorEntity):
"""Representation of a KNX sensor."""
_device: XknxSensor
def __init__(self, xknx: XKNX, config: ConfigType) -> None:
"""Initialize of a KNX sensor."""
super().__init__(_create_sensor(xknx, config))
self._attr_device_class = (
self._device.ha_device_class()
if self._device.ha_device_class() in DEVICE_CLASSES
else None
)
self._attr_force_update = self._device.always_callback
self._attr_entity_category = config.get(CONF_ENTITY_CATEGORY)
self._attr_unique_id = str(self._device.sensor_value.group_address_state)
self._attr_native_unit_of_measurement = self._device.unit_of_measurement()
self._attr_state_class = config.get(CONF_STATE_CLASS)
@property
def native_value(self) -> StateType:
"""Return the state of the sensor."""
return self._device.resolve_state()
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return device specific state attributes."""
attr: dict[str, Any] = {}
if self._device.last_telegram is not None:
attr[ATTR_SOURCE] = str(self._device.last_telegram.source_address)
return attr
|
jawilson/home-assistant
|
homeassistant/components/knx/sensor.py
|
Python
|
apache-2.0
| 2,801
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL7743358405.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
biomodels/MODEL7743358405
|
MODEL7743358405/model.py
|
Python
|
cc0-1.0
| 427
|
from tests.markdown_test.test_utils import MarkdownTest
class ListTest(MarkdownTest):
def testBasic(self):
text = """
* item 1
* item 2
* item 3
"""
expect = """
<ul>
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
1. item 1
2. item 2
3. item 3
"""
expect = """
<ol>
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
"""
self.assertMarkdown(text, expect)
text = """
* Some text here that is
aligned nicely to the side
* Some text here that is
aligned nicely to the side
"""
expect = """
<ul>
<li>Some text here that is
aligned nicely to the side</li>
<li>Some text here that is
aligned nicely to the side</li>
</ul>
"""
self.assertMarkdown(text, expect)
def testNewline(self):
text = """
* item 1
* item 2
"""
expect = """
<ul>
<li>item 1</li>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
def testNestedParagraphs(self):
text = """
* This here is the first
paragraph in the list item
This is the second paragraph.
It contains two leading spaces
* item 2
"""
expect = """
<ul>
<li><p>This here is the first
paragraph in the list item</p>
<p>This is the second paragraph.
It contains two leading spaces</p></li>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
* This here is the first
paragraph in the list item
This is not in the list, because it only has
1 leading space
"""
expect = """
<ul>
<li>This here is the first
paragraph in the list item</li>
</ul>
<p>This is not in the list, because it only has
1 leading space</p>
"""
self.assertMarkdown(text, expect)
def testNestedList(self):
text = """
* item 1
* item 2
* item 3
"""
expect = """
<ul>
<li><p>item 1</p>
<ul>
<li><p>item 2</p>
<ul>
<li>item 3</li>
</ul></li>
</ul></li>
</ul>
"""
self.assertMarkdown(text, expect)
def testNestedListWithMultipleItems(self):
text = """
* item 1
* item 1
* item 2
* item 3
* item 2
* item 1
"""
expect = """
<ul>
<li><p>item 1</p>
<ul>
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ul></li>
<li><p>item 2</p>
<ul>
<li>item 1</li>
</ul></li>
</ul>
"""
self.assertMarkdown(text, expect)
def testNoWrap(self):
text = """
* item 1
this should be in same
list
"""
expect = """
<ul>
<li>item 1
this should be in same
list</li>
</ul>
"""
self.assertMarkdown(text, expect)
def testParagraphs(self):
text = """
* item 1
This should be in same list item,
different paragraph
"""
expect = """
<ul>
<li><p>item 1</p>
<p>This should be in same list item,
different paragraph</p></li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
* item 1
This should be in same list item,
different paragraph
Yet another paragraph
"""
expect = """
<ul>
<li><p>item 1</p>
<p>This should be in same list item,
different paragraph</p>
<p>Yet another paragraph</p></li>
</ul>
"""
self.assertMarkdown(text, expect)
def testSeparated(self):
text = """
* item 1
* item 2
Not in list
* item 1
* item 2
"""
expect = """
<ul>
<li>item 1</li>
<li>item 2</li>
</ul>
<p>Not in list</p>
<ul>
<li>item 1</li>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
def testNotAList(self):
text = """
* * Not a list
"""
expect = """
<p>* * Not a list</p>
"""
self.assertMarkdown(text, expect)
text = """
\* Not a list
"""
expect = """
<p>* Not a list</p>
"""
self.assertMarkdown(text, expect)
def testInterchangedListType(self):
text = """
* item 1
* item 2
1. item 1
2. item 2
"""
expect = """
<ul>
<li>item 1</li>
<li>item 2</li>
</ul>
<ol>
<li>item 1</li>
<li>item 2</li>
</ol>
"""
self.assertMarkdown(text, expect)
text = """
1. item 1
2. item 2
* item 1
* item 2
"""
expect = """
<ol>
<li>item 1</li>
<li>item 2</li>
</ol>
<ul>
<li>item 1</li>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
1. item 1
* item 1
2. item 2
* item 2
"""
expect = """
<ol>
<li>item 1</li>
</ol>
<ul>
<li>item 1</li>
</ul>
<ol>
<li>item 2</li>
</ol>
<ul>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
def testNestedInterchangedListType(self):
text = """
* item 1
1. item 1
2. item 2
* item 2
"""
expect = """
<ul>
<li><p>item 1</p>
<ol>
<li>item 1</li>
<li>item 2</li>
</ol></li>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
* level 1
1. level 2
* level 3
2. level 4
"""
expect = """
<ul>
<li><p>level 1</p>
<ol>
<li><p>level 2</p>
<ul>
<li><p>level 3</p>
<ol>
<li>level 4</li>
</ol></li>
</ul></li>
</ol></li>
</ul>
"""
self.assertMarkdown(text, expect)
def testLeadingEmphasis(self):
text = """
* *This is a* list
"""
expect = """
<ul>
<li><em>This is a</em> list</li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
* **This is** a * list
"""
expect = """
<ul>
<li><strong>This is</strong> a * list</li>
</ul>
"""
self.assertMarkdown(text, expect)
def testPrecedingParagraph(self):
text = """
Preceding Paragraph here
* item 1
* item 2
"""
expect = """
<p>Preceding Paragraph here</p>
<ul>
<li>item 1</li>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
def testPrecedingCodeblock(self):
text = """
Stuff
Code block
here
* item 1
* item 2
"""
expect = """
<p>Stuff</p>
<pre><code>Code block
here</code></pre>
<ul>
<li>item 1</li>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
class ListItemTest(MarkdownTest):
def testCodeblock(self):
text = """
* item 1
Codeblock contents
"""
expect = """
<ul>
<li><p>item 1</p>
<pre><code>Codeblock contents</code></pre></li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
* item 1
def hello(world):
return hi
"""
expect = """
<ul>
<li><p>item 1</p>
<pre><code>def hello(world):
return hi</code></pre></li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
* item 1
def hello(world):
return hi
* item 2
"""
expect = """
<ul>
<li><p>item 1</p>
<pre><code>def hello(world):
return hi</code></pre></li>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
def testNotCodeblock(self):
text = """
* item 1
def hello(world):
return hi
* item 2
"""
expect = """
<ul>
<li><p>item 1</p>
<p>def hello(world):
return hi</p></li>
<li>item 2</li>
</ul>
"""
self.assertMarkdown(text, expect)
def testBlockquote(self):
text = """
* item 1
> blockquote in list
"""
expect = """
<ul>
<li><p>item 1</p>
<blockquote><p>blockquote in list</p></blockquote></li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
* item 1
> Multiline
> blockquote
with no wrap here
"""
expect = """
<ul>
<li><p>item 1</p>
<blockquote><p>Multiline
blockquote
with no wrap here</p></blockquote></li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
* item 1
> Multiline
> blockquote
> with a
> split in the middle
"""
expect = """
<ul>
<li><p>item 1</p>
<blockquote><p>Multiline
blockquote</p>
<p>with a
split in the middle</p></blockquote></li>
</ul>
"""
self.assertMarkdown(text, expect)
def testCodeblockLooksLikeList(self):
text = """
* item 1
* This is a codeblock
because it is indented
* 8 spaces
"""
expect = """
<ul>
<li><p>item 1</p>
<pre><code>* This is a codeblock
because it is indented
* 8 spaces</code></pre></li>
</ul>
"""
self.assertMarkdown(text, expect)
text = """
* item 1
1. This is a codeblock
because it is indented
2. 8 spaces
"""
expect = """
<ul>
<li><p>item 1</p>
<pre><code>1. This is a codeblock
because it is indented
2. 8 spaces</code></pre></li>
</ul>
"""
self.assertMarkdown(text, expect)
def testCodeblockLooksLikeBlockquote(self):
text = """
* item 1
> This is a codeblock
> Because it is indented
> 8 spaces
"""
expect = """
<ul>
<li><p>item 1</p>
<pre><code>> This is a codeblock
> Because it is indented
> 8 spaces</code></pre></li>
</ul>
"""
self.assertMarkdown(text, expect)
def testCodeblockInNestedList(self):
text = """
* item 1
* item 1
This is a codeblock
* item 2
This is not a codeblock
* item 2
This is a codeblock
"""
expect = """
<ul>
<li><p>item 1</p>
<ul>
<li><p>item 1</p>
<pre><code>This is a codeblock</code></pre></li>
<li>item 2
This is not a codeblock</li>
</ul></li>
<li><p>item 2</p>
<pre><code>This is a codeblock</code></pre></li>
</ul>
"""
self.assertMarkdown(text, expect)
def testBlockquoteInNestedList(self):
text = """
* item 1
* item 1
> This is a blockquote
"""
expect = """
<ul>
<li><p>item 1</p>
<ul>
<li><p>item 1</p>
<blockquote><p>This is a blockquote</p></blockquote></li>
</ul></li>
</ul>
"""
self.assertMarkdown(text, expect)
|
albert12132/templar
|
tests/markdown_test/list_test.py
|
Python
|
mit
| 13,140
|
import os
import sys
import string
import re
import shutil
import traceback
import cPickle
import ConfigParser
import exceptions
import psutil
from defaultdict import *
All_Targets = []
Targets = []
Config_Replacement_File = None
Args = []
Generated_Installers = {}
try:
import subprocess
except:
import popen2
class BlurException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TerminalControllerDummy:
def render(self,text):
return re.sub( r'\${\w+}', '', text )
try:
from termctl import TerminalController
except:
TerminalController = TerminalControllerDummy
term = TerminalControllerDummy()
def find_targets(name):
ret = []
for t in All_Targets:
if t.name == name or re.match('^' + name + '$',t.name):
ret.append(t)
return ret
def find_target(name):
return find_targets(name)[0]
def add_target(target):
try:
if find_target(target) == target:
# Same target added twice
pass
else:
raise ("Adding target with duplicate name " + target.name)
except:
All_Targets.append(target)
# Returns a tuple containing (returncode,stdout)
# Cmd can be a string or a list of args
def cmd_output(cmd,outputObject=None,shell=None):
p = None
outputFd = None
pollRunVal = None
if shell is None:
shell = sys.platform != 'win32'
try:
if 'subprocess' in globals():
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,shell=shell)
outputFd = p.stdout
else:
p = popen2.Popen4(cmd)
pollRunVal = -1
outputFd = p.fromchild
except Exception, e:
print "Error starting command: " + str(cmd)
raise e
output = ''
ret = 0
def processOutput(existing, new, outputProgress):
existing += new
if outputProgress:
outputObject.output(new)
return existing
while True:
ret = p.poll()
if ret == pollRunVal:
output = processOutput(output,outputFd.readline(),outputObject)
else:
break
return (ret,processOutput(output,outputFd.read(),outputObject))
# Baseclass for all targets
class Target:
def __init__(self,name,dir,pre_deps=[],post_deps=[]):
self.name = name
self.dir = dir
self.pre_deps = pre_deps
self.post_deps = post_deps
self.built = False
self.args = []
# "Registers" this target
add_target(self)
def __repr__(self):
return self.name
def check_arg_sanity(self):
if self.has_arg('install') and self.has_arg('clean') and not self.has_arg('build'):
print term.render("${RED}Asked to clean then install without building. Bailing out${NORMAL}")
sys.exit(1)
# Returns true if this target has already been built
# Some targets have different build steps, and can
# return different values depending on the args
def is_built(self):
return self.built
def apply_arg(self,arg):
if arg is not None:
try:
for a in arg: self.args.append(a)
except:
self.args.append(arg)
# Returns true if the argument list contains arg or
# contains TARGET:arg
def has_arg(self,arg):
# Local args
for a in self.args:
if arg == a:
return True
# Global args
sa = '-'+arg
for a in Args:
if arg == a or sa == a:
return True
return False
# Returns a string to execute using os.system to complete the target
# Simplified method for creating simpler targets that
# dont need to run multiple commands or study any output
#
# The return value of the command indicates the whether
# the target was completed. 0 for success
def command(self):
return ''
def output(self,output):
if self.has_arg('verbose'):
print output,
elif self.has_arg('progress'):
for line in output.splitlines():
match = re.match("^(g\+\+|gcc)",line)
if match:
# Compile
if re.search(r"\s-c\s",line):
match = re.search(r'([\w\._-]+)\s*$',line)
if match:
print "Compiling", match.group(1)
# Link
else:
match = re.search(r'-o\s+([\w\._-]+)',line)
if match:
print "Linking", match.group(1)
match = re.match(r"^\S+uic\s+(\S+)",line)
if match:
print "Uic", match.group(1)
def cmd_error(self,cmd,output):
if not self.has_arg('verbose'):
print output,
print term.render("${RED}Error Building Target${NORMAL}: %s, cmd was: %s" % (self.name,cmd))
raise Exception()
def run_cmd(self,cmd,shell=None,noThrow=False):
if self.has_arg('verbose') or self.has_arg('show-commands'):
print term.render('${BLUE}Running Command${NORMAL}:'), str(cmd)
try:
(ret,output) = cmd_output(cmd,self,shell)
except:
print "Exception while running cmd:", cmd
traceback.print_exc()
raise
if ret and not noThrow:
self.cmd_error(cmd,output)
return (ret,output)
def run_make(self, arg_string=''):
make_cmd = 'make'
if 'QMAKESPEC' in os.environ and 'msvc' in os.environ['QMAKESPEC']:
make_cmd = 'nmake'
if arg_string and arg_string[0] != ' ':
arg_string = ' ' + arg_string
return self.run_cmd(make_cmd + arg_string)
# Central function of a Target, responsible for completing the target.
# Raises an exception if it cannot complete the target
def build_run(self):
cmd = self.command()
if cmd and len(cmd):
self.run_cmd(cmd)
# This is used to check whether the given target is buildable.
# A target may only be buildable on certain systems, or when
# certain requirements are met
def is_buildable(self):
return True
# This builds all the dependencies for this target
# If the 'skip-ext-deps' option is passed, targets
# that are specified as string are ignored, otherwise
# a string target is looked up in the All_Targets list
# with the find_target function.
def build_deps(self,deps):
skipext = self.has_arg('skip-ext-deps')
for d in deps:
if isinstance(d,str) and not skipext:
if ':' in d:
parts = d.split(':')
#local_args.append(d)
d = parts[0]
try:
d = find_target(d)
except:
raise BlurException("Target.build_deps: couldn't find dependancy: %s for target: %s" % (d, self.name))
if isinstance(d,Target):
d.build()
# Builds the target using by building the deps and callign build_run
# Skips build if name:skip exists in the args list. Changes directories
# to the target directory if there is one
def build(self):
if self.has_arg('skip') or self.is_built():
return
if not self.is_buildable():
return
self.build_deps(self.pre_deps)
cwd = os.getcwd()
nwd = os.path.join(cwd,self.dir)
print term.render("${YELLOW}Building${NORMAL}: %s\t\t%s" % (self.name, nwd))
#print "Target.build: doing ", self.name
#print "Target.build: chdir to ", nwd
os.chdir( nwd )
self.build_run()
os.chdir(cwd)
self.built = True
self.build_deps(self.post_deps)
# Executes a static command inside dir
class StaticTarget(Target):
def __init__(self,name,dir,cmd,pre_deps=[],post_deps=[],shell=False):
Target.__init__(self,name,dir,pre_deps,post_deps)
self.cmd = cmd
self.shell = shell
def command(self):
return self.cmd
def build_run(self):
self.check_arg_sanity()
if self.has_arg('build'):
if not self.built:
cmd = self.command()
if cmd and len(cmd):
self.run_cmd(cmd,shell=self.shell)
# Copies a single file
class CopyTarget(Target):
def __init__(self,name,dir,src,dest):
Target.__init__(self,name,dir)
self.Source = src
self.Dest = dest
def build_run(self):
shutil.copyfile(self.Source,self.Dest)
# Run configure.py, make, and optionally make install
# for building sip targets, these are python bindings
# including pyqt, pystone, pyclasses, various application
# interfaces, and whatever else is added...
class SipTarget(Target):
def __init__(self,name,dir,static=False,platform=None,pre_deps=[]):
Target.__init__(self,name,dir,pre_deps)
self.Static = static
self.Platform = platform
self.CleanDone = False
self.InstallDone = False
self.config = ""
self.name=name
def is_built(self):
if self.has_arg('clean') and not self.CleanDone:
return False
if self.has_arg('install') and not self.InstallDone:
return False
return Target.is_built(self)
def configure_command(self):
pass
def build_run(self):
self.check_arg_sanity()
if os.environ.has_key('PYTHON'):
self.config = os.environ['PYTHON'] + " configure.py"
else:
self.config = "python configure.py"
if self.Static:
self.config += " -k"
if self.Platform:
self.config += " -p " + self.Platform
if self.has_arg("debug"):
self.config += " -u"
if self.has_arg("trace"):
self.config += " -r"
if self.has_arg('build') or (not os.path.exists(os.getcwd() + 'Makefile') and not self.name.startswith('py') ):
self.configure_command()
self.run_cmd(self.config)
if self.has_arg('clean') and not self.CleanDone:
self.run_make('clean')
self.CleanDone = True
self.built = False
self.InstallDone = False
if os.name == 'nt':
wantedName = self.name.replace("static","").replace("py","",1)
print "Cleaning sip working dir"
if os.path.isfile('sip' + wantedName + '/' + wantedName + '.lib'):
os.remove('sip' + wantedName + '/' + wantedName + '.lib')
if os.path.isfile('sip' + wantedName + '/py' + wantedName + '.lib'):
os.remove('sip' + wantedName + '/py' + wantedName + '.lib')
if self.has_arg('build'):
self.run_make()
self.built = True
if os.name == 'nt':
wantedName = self.name.replace("static","").replace("py","",1)
if self.has_arg("debug"):
wantedName += "_d"
print "Checking for the existance of (%s)" % ('sip' + wantedName + '/' + wantedName + '.lib')
if os.path.isfile('sip' + wantedName + '/' + wantedName + '.lib'):
shutil.copyfile('sip' + wantedName + '/' + wantedName + '.lib', 'sip' + wantedName + '/py' + wantedName + '.lib')
os.remove('sip' + wantedName + '/' + wantedName + '.lib')
#if os.path.isfile('sip' + wantedName + '/py' + wantedName + '.lib'):
#os.remove('sip' + wantedName + '/py' + wantedName + '.lib')
if self.has_arg('install') and not self.InstallDone:
cmd = 'install'
try:
pos = args.index('-install-root')
cmd += ' INSTALL_ROOT=' + args[pos+1]
except: pass
self.run_make(cmd)
self.InstallDone = True
# Builds a Qt Project(.pro) using qmake and make
# Runs qmake with optional debug and console
# Runs make clean(optional), make, and make install(optional)
#
# make clean is done if args contains 'clean' or 'TARGET:clean'
# make install is done if args contains 'install' or 'TARGET:install'
class QMakeTarget(Target):
def __init__(self,name,dir,target=None,pre_deps=[],post_deps=[],Static=False):
Target.__init__(self,name,dir,pre_deps,post_deps)
self.Static = Static
self.Target = target
self.UicOnly = False
self.Defines = []
self.ConfigDone = False
self.CleanDone = False
self.BuildDone = False
self.InstallDone = False
def is_built(self):
if self.UicOnly and self.built:
return True
if self.has_arg('clean') and not self.CleanDone:
return False
if self.has_arg('install') and not self.InstallDone:
return False
return self.ConfigDone and self.BuildDone
# Returns the arguments to qmake
# Override if you need special args
def qmakeargs(self):
Args = []
if self.Static:
Args.append("CONFIG+=staticlib")
if self.has_arg("debug")>0:
Args.append("CONFIG+=debug")
else:
Args.append("CONFIG-=debug")
if self.has_arg("console"):
Args.append("CONFIG+=console")
for d in self.Defines:
Args.append("DEFINES+=\"" + d + "\"")
if os.environ.has_key('PYTHON'):
Args.append("PYTHON="+os.environ['PYTHON'])
if self.Target:
Args.append(self.Target)
return string.join(Args,' ')
# Runs qmake, make clean(option), make, make install(option)
def build_run(self):
self.check_arg_sanity()
if not self.ConfigDone and self.has_arg("build"):
cmd = "qmake " + self.qmakeargs()
self.run_cmd(cmd)
self.ConfigDone = True
if self.has_arg("clean") and not self.CleanDone:
self.run_make('clean')
self.BuildDone = self.InstallDone = False
self.CleanDone = True
if self.UicOnly:
if sys.platform=='win32':
self.run_make("-f Makefile.release compiler_uic_make_all")
else:
self.run_make("compiler_uic_make_all")
return
if not self.BuildDone and self.has_arg("build"):
self.run_make()
self.BuildDone = True
self.built = True
if not self.InstallDone and self.has_arg('install'):
self.run_make('install')
self.InstallDone = True
# Finds the makensis command and runs it on the specified file
# Nullsoft install - used to generate windows installers
class NSISTarget(Target):
if sys.platform=='win32':
NSIS_PATHS = ["C:/Program Files (x86)/NSIS/","C:/Program Files/NSIS/","E:/Program Files (x86)/NSIS/","E:/Program Files/NSIS/"]
NSIS = "makensis.exe"
CanBuild = True
else:
CanBuild = False
def __init__(self,name,dir,file,pre_deps=[],makensis_extra_options=[], revdir=None):
Target.__init__(self,name,dir,pre_deps)
self.File = file
self.ExtraOptions = makensis_extra_options
self.RevDir = revdir
if not self.RevDir:
self.RevDir = dir
# Only buildable on win32
def is_buildable(self):
return NSISTarget.CanBuild
def find_nsis(self):
for p in self.NSIS_PATHS:
if os.access(p,os.F_OK):
return p + "makensis.exe"
raise Exception("Couldn't find nsis cmd, searched " + str(self.NSIS_PATHS))
return None
def makensis_options(self):
p = self.find_nsis()
file = os.getcwd() + "/" + self.File
cmd_parts = [p]
plat = 'Unkown'
if 'QMAKESPEC' in os.environ:
plat = os.environ['QMAKESPEC']
if self.has_arg('X86_64'):
plat += '_64'
cmd_parts.append( '/DPLATFORM=%s' % plat )
rev = GetRevision(self.RevDir)
cmd_parts.append( '/DREVISION=%s' % rev )
if self.ExtraOptions:
cmd_parts += self.ExtraOptions
cmd_parts.append(file)
return cmd_parts
def build_run(self):
cmd_parts = self.makensis_options()
(ret,output) = self.run_cmd( cmd_parts )
outputFile = None
for line in output.splitlines():
outputMatch = re.match( r'Output:\s+"(.*)"', line )
if outputMatch:
outputFile = outputMatch.group(1)
if outputFile is None:
raise ("Unable to parse output file from output\n"+output)
Generated_Installers[self.name] = outputFile
if self.has_arg('install'):
if self.has_arg('progress') or self.has_arg('-verbose'):
print term.render("${YELLOW}Installing${NORMAL}"), outputFile
self.run_cmd( [outputFile,'/S'] )
class KillTarget(Target):
def __init__(self, name, path, applications):
Target.__init__(self,name, path)
self.name = name
self.apps = applications
def is_buildable(self):
return True
def build_run(self):
for proc in psutil.process_iter():
try:
if proc.name() in self.apps:
print "Terminating process (%s)" % (proc.name())
try:
proc.kill()
except AccessDenied, ad:
print "Unable to kill the process"
except:
# We can't read process names of system owned procs, so we ignore these errors
pass
# Finds the subwcrev.exe program
# Part of toroisesvn used to get revision info for a file/dir
def find_wcrev():
WCREV_PATHS = ["C:/Program Files/TortoiseSVN/bin/","C:/Program Files (x86)/TortoiseSVN/bin/","C:/blur/TortoiseSVN/bin/"]
for p in WCREV_PATHS:
if os.access(p,os.F_OK):
return p + "/" + "subwcrev.exe"
raise Exception("Couldn't find subwcrev.exe, searched " + WCREV_PATHS)
return None
def isSubversion(dir):
return os.path.exists(os.path.join(dir,'.svn'))
def GetRevision_nocache(dir):
if not isSubversion(dir):
return 0
if sys.platform == 'win32':
wcrev = find_wcrev()
(ret,output) = cmd_output([wcrev,dir])
m = re.search("Updated to revision (\d+)",output)
if m != None:
return m.group(1)
m = re.search("Last committed at revision (\d+)",output)
if m != None:
return m.group(1)
raise "Couldn't parse valid revision: " + output
else:
(ret,output) = cmd_output('svnversion ' + dir)
m = re.search("(\d+)",output)
if m != None:
return m.group(1)
return None
# Gets the svn revision from the current directory
rev_cache = {}
def GetRevision(dir):
global rev_cache
if dir in rev_cache:
return rev_cache[dir]
rev = GetRevision_nocache(dir)
if rev:
rev_cache[dir] = rev
return rev
# Takes a template file, runs subwcrev.exe on it and outputs to output
class WCRevTarget(Target):
def __init__(self,name,dir,revdir,input,output):
Target.__init__(self,name,dir)
self.Input = input
self.Output = output
self.Revdir = revdir
def build_run(self):
if not isSubversion(self.Revdir):
shutil.copyfile(self.Input,self.Output)
return
if sys.platform == 'win32':
p = find_wcrev()
self.run_cmd( [p,self.Revdir,self.Input,self.Output,"-f"] )
else:
rev = GetRevision(self.Revdir)
self.run_cmd( "cat " + self.Input + " | sed 's/\\$WCREV\\$/" + str(rev) + "/' > " + self.Output )
# Takes a template .ini file, and sets certain keys based the inputed IniConfig object
class IniConfigTarget(Target):
def __init__(self,name,dir,template_ini,output_ini,install_dir = None, config = None):
Target.__init__(self,name,dir)
self.TemplateIni = template_ini
self.OutputIni = output_ini
self.Config = config
self.InstallDir = install_dir
def build_run(self):
replacementsBySection = DefaultDict(dict)
if not self.Config:
self.Config = Config_Replacement_File
section = "Default"
replacementsFile = None
try:
replacementsFile = open(self.Config)
except:
print ("Failure Reading %s for config replacement" % str(self.Config))
if replacementsFile:
for line in replacementsFile:
matchSection = re.match("^\[(.*)\]$",line)
if matchSection:
section = matchSection.group(1)
continue
matchKV = re.match("^([^=]*)=(.*)$",line)
if matchKV:
replacementsBySection[section][matchKV.group(1)] = matchKV.group(2)
outLines = []
section = "Default"
for line in open(self.TemplateIni):
matchSection = re.match("^\[(.*)\]$",line)
if matchSection:
section = matchSection.group(1)
outLines.append(line)
continue
matchKV = re.match("^([^=]*)=(.*)$",line)
if matchKV:
key = matchKV.group(1)
if key in replacementsBySection[section]:
outLines.append( "%s=%s\n" % (key, replacementsBySection[section][key] ) )
continue
outLines.append(line)
open(self.OutputIni,"w").write( ''.join(outLines) )
if self.has_arg('install') and not self.InstallDir == None:
if self.has_arg('verbose'):
print "${GREEN}Installing config file${NORMAL} %s" % (self.InstallDir + "/" + self.OutputIni)
if not os.path.exists(self.InstallDir):
os.makedirs(self.InstallDir)
shutil.copy2(self.OutputIni, self.InstallDir + "/" + self.OutputIni)
# Copies a file, replacing part of the name with the
# svn revisision number from revdir, which is relative
# to dir(or absolute)
class RevCopyTarget(Target):
def __init__(self,name,dir,revdir,src,dest):
Target.__init__(self,name,dir)
self.RevDir = revdir
self.Source = src
self.Dest = dest
def build_run(self):
rev = GetRevision(self.RevDir)
dest = re.sub("{REVSTR}",rev,self.Dest)
shutil.copyfile(self.Source,dest)
class LibVersionTarget(Target):
def __init__(self,name,dir,revdir,library):
Target.__init__(self,name,dir)
self.RevDir = revdir
self.Library = library
def build_run(self):
if sys.platform == 'win32':
rev = GetRevision(self.RevDir)
shutil.copyfile(self.Library+".dll",self.Library+str(rev)+".dll")
shutil.copyfile("lib"+self.Library+".a","lib" + self.Library+str(rev)+".a")
else:
return
class LibInstallTarget(Target):
def __init__(self,name,dir,library,dest):
Target.__init__(self,name,dir)
self.Library = library
self.Dest = dest
def build_run(self):
libname = self.Library
dest = self.Dest
if sys.platform == 'win32':
libname = libname + '.dll'
else:
libname = 'lib' + libname + '.so'
destpath = dest + libname
shutil.copyfile(libname,destpath)
class RPMTarget(Target):
CanBuildRpms = None
""" # This class is used to build rpms
# It currently has 5 steps
# 1. Create a tarball named packageName-Version-rRevision.tgz in /usr/src/redhat/SOURCES/
# 2. Create a spec file, uses a template and replaces $VERSION$ and $WCREV$ in /usr/src/redhat/SPECS/
# 3. Run rpmbuild on the created spec file.
# 4. Parse the output from rpmbuild to generate a list of created rpms
# 5(optional, depeding on install arg). Install the rpms, except for debug-info and source rpm
"""
def __init__(self,targetName,packageName,dir,specTemplate,version,pre_deps):
""" for targetName and dir refer to Target docs
" packageName is the name of the .tgz package ie packageName-Version-rRevision.tgz
" specTemplate is the path to the spec template file, relative to the file the RPMTarget is
" constructed in
" version is the Version for the rpm, the revision is taken using GetRevision on dir
"""
Target.__init__(self,targetName,dir,pre_deps)
self.SpecTemplate = specTemplate
self.Version = version
self.Revision = GetRevision(dir)
self.PackageName = packageName
self.BuiltRPMS = []
self.InstallDone = False
self.pre_deps = pre_deps
self.BuildRoot = '/usr/src/redhat/'
try:
if os.path.exists('/etc/redhat-release'):
match = re.search( 'release ([\d\.]+)', open('/etc/redhat-release','r').read())
if match and float(match.group(1)) >= 6.0:
self.BuildRoot = '/root/rpmbuild/'
except: pass
# Only buildable on linux, this should probably check for the existance
# of rpmbuild and other required commands.
def is_buildable(self):
if RPMTarget.CanBuildRpms == None:
if sys.platform == 'win32':
RPMTarget.CanBuildRpms = False
return False
RPMTarget.CanBuildRpms = True
return RPMTarget.CanBuildRpms
def is_built(self):
if self.has_arg('install') and not self.InstallDone:
return False
return Target.is_built(self)
def build_run(self):
destDir = ""
buildRoot = ""
if "DESTDIR" in os.environ:
destDir = os.environ['DESTDIR']
buildRoot = "--buildroot %s" % destDir
if not self.built:
sourceDir = self.BuildRoot + 'SOURCES/'
specDir = self.BuildRoot + 'SPECS/'
tarball = destDir + sourceDir + '%s-%s-%s.tgz' % (self.PackageName,self.Version,self.Revision)
specDest = destDir + specDir + '%s.spec' % self.PackageName
dirName = os.path.split(self.dir)[1]
if not os.path.exists(destDir + sourceDir):
os.makedirs(destDir + sourceDir)
if not os.path.exists(destDir + specDir):
os.makedirs(destDir + specDir)
if os.path.exists(tarball):
os.remove(tarball)
if self.run_cmd('tar -C .. -czf %s %s' % (tarball,dirName))[0]:
raise "Unable to create rpm tarball"
if self.run_cmd('cat %s | sed "s/\\$WCREV\\\\$/%s/" | sed "s/\\$VERSION\\\\$/%s/" > %s' % (self.SpecTemplate,self.Revision,self.Version,specDest) )[0]:
raise "Unable to process spec file template."
if self.run_cmd('sed -i "s/BuildRoot:.*$/BuildRoot: %s/" %s' % (destDir.replace('/', '\/'),specDest) )[0]:
raise "Unable to process spec file template."
pythonVersion = 'python%s' % sys.version[:3]
if self.run_cmd('sed -i "s/python2.5/%s/" %s' % (pythonVersion,specDest))[0]:
raise "Unable to process spec file template."
ret,output = self.run_cmd('rpmbuild -bb %s %s' % (buildRoot, specDest))
for line in output.splitlines():
res = re.match('Wrote: (.+)$',line)
if res:
self.BuiltRPMS.append(res.group(1))
print line
if self.has_arg('install') and not self.InstallDone:
for rpm in self.BuiltRPMS:
if not '/SRPMS/' in rpm and not '-debuginfo-' in rpm:
cmd = 'rpm -Uv ' + rpm
(ret,output) = self.run_cmd(cmd,noThrow=True)
if ret:
if 'is already installed' in output:
print ("Skipping rpm installation, %s already installed" % rpm)
else:
self.cmd_error( cmd, output )
self.InstallDone = True
class UploadTarget(Target):
DefaultHost = 'hartigan'
DefaultDest = ''
def __init__(self,name,dir,files,host,dest):
Target.__init__(self,name,dir)
if files is None:
self.files = []
else:
try:
iter(files)
self.files = files
except:
self.files = [files]
self.host = host
self.dest = dest
def build_run(self):
for file in self.files:
self.run_cmd(['scp',file,"%s:%s" % (self.host,self.dest)])
class ClassGenTarget(Target):
def __init__(self,name,dir,schema, pre_deps = [], post_deps = []):
Target.__init__(self,name,dir,pre_deps,post_deps)
self.Schema = schema
def get_command(self):
# Here is the static command for running classmaker to generate the classes
classmakercmd = 'classmaker'
if sys.platform == 'win32':
classmakercmd = 'classmaker.exe'
# Run using cmd in path, unless we are inside the tree
if os.path.isfile(os.path.join(self.dir,'../../apps/classmaker/',classmakercmd)):
if sys.platform != 'win32':
classmakercmd = './' + classmakercmd
classmakercmd = 'cd ../../apps/classmaker && ' + classmakercmd
return classmakercmd
def build_run(self):
self.run_cmd( self.get_command() + " -s " + self.Schema + " -o " + self.dir, shell=True )
argv = sys.argv
def build():
global All_Targets
global Targets
global Args
global Config_Replacement_File
# Targets
Args = argv[1:]
build_resume_path = os.path.join(os.path.abspath(os.getcwd()),'build_resume.cache')
if Args.count('-resume'):
try:
pf = open( build_resume_path, 'rb' )
All_Targets = cPickle.load(pf)
Targets = cPickle.load(pf)
Args = cPickle.load(pf)
except:
traceback.print_exc()
print "Unable to load build resume cache."
return
if Args.count('-config-replacement-file'):
try:
idx = args.index('-config-replacement-file')
Config_Replacement_File = Args[idx+1]
del Args[idx+1]
del Args[idx]
except:
print "Unable to load config replacement file"
# Gather the targets
for a in argv[1:]:
targetName = a
args = []
# Parse args if there are any
try:
col_idx = targetName.index(':')
targetName = a[:col_idx]
args = a[col_idx+1:].split(',')
except: pass
# Find the targets, continue to next arg if none found
try:
targets = find_targets(targetName)
except: continue
# Remove the arg from the global arg list if it applies to specified targets
if len(targets) > 1 and a in Args:
Args.remove(a)
for target in targets:
target.apply_arg(args)
Targets.append(target)
# Has an argument
try:
Args.remove(target.name)
except: pass
if argv.count('all'):
Targets = All_Targets
Args.remove('all')
if sys.platform == 'win32':
if 'QMAKESPEC' in os.environ and 'win32-msvc' in os.environ['QMAKESPEC']:
if 'FrameworkDir' in os.environ and os.environ['FrameworkDir'].endswith('Framework64'):
Args.append('X86_64')
if '-color' in Args:
global term
term = TerminalController()
# These Options are passed to the module build scripts
print term.render('${YELLOW}Starting Build${NORMAL}')
print "Targets: ", Targets
print "Args: ", Args
for t in Targets:
#print "Entering Target.build(args) on target ", t.name
#print "Target pre_deps are: ", t.pre_deps
#print "Target post_deps are: ", t.post_deps
#print "\n [build process starts here] \n"
try:
t.build()
except Exception,e:
if not e.__class__ == exceptions.KeyboardInterrupt:
traceback.print_exc()
print "Writing build resume file to",build_resume_path
pf = open(build_resume_path,'wb')
cPickle.dump(All_Targets,pf,-1)
cPickle.dump(Targets,pf,-1)
cPickle.dump(Args,pf,-1)
pf.close()
print "Exiting"
sys.exit(1)
if '--write-installers-file' in Args:
f = open('new_installers.pickle','wb')
cPickle.dump(Generated_Installers,f)
f.close()
|
lordtangent/arsenalsuite
|
python/blur/build.py
|
Python
|
gpl-2.0
| 33,209
|
from discord.ext import commands
import discord
from .utils.hero_dictionary import hero_dic, item_dic
class Pics:
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def hero(self, ctx, *, hero_name):
"""<hero_name>'s icon"""
if hero_name.lower() in [x.lower() for x in hero_dic.values()]:
await self.bot.send_file(
ctx.message.channel,
'images/heroes/{} icon.png'.format(hero_name.lower())
)
else:
await self.bot.say('Invalid hero name')
@commands.command(pass_context=True)
async def item(self, ctx, *, item_name):
"""Picture of <item_name>"""
if item_name.lower() in [x.lower() for x in item_dic.values()]:
await self.bot.send_file(
ctx.message.channel,
'images/items/{} icon.png'.format(item_name.lower())
)
else:
await self.bot.say('Invalid item name')
@commands.command(pass_context=True)
async def wow(self, ctx):
"""Eddy Wally"""
await self.bot.send_file(
ctx.message.channel,
'images/wow.png'
)
def setup(bot):
bot.add_cog(Pics(bot))
|
bozhko-egor/dota2-discord-bot
|
cogs/pics.py
|
Python
|
mit
| 1,266
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Mozilla's Kraken JavaScript benchmark."""
import os
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
def _Mean(l):
return float(sum(l)) / len(l) if len(l) > 0 else 0.0
class Kraken(page_measurement.PageMeasurement):
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/kraken.json',
'pages': [
{ 'url': 'http://krakenbenchmark.mozilla.org/kraken-1.1/driver.html' }
]
}, os.path.abspath(__file__))
def MeasurePage(self, _, tab, results):
js_is_done = """
document.title.indexOf("Results") != -1 && document.readyState == "complete"
"""
def _IsDone():
return bool(tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 500, poll_interval=5)
js_get_results = """
var formElement = document.getElementsByTagName("input")[0];
decodeURIComponent(formElement.value.split("?")[1]);
"""
result_dict = eval(tab.EvaluateJavaScript(js_get_results))
total = 0
for key in result_dict:
if key == 'v':
continue
results.Add(key, 'ms', result_dict[key], data_type='unimportant')
total += _Mean(result_dict[key])
results.Add('Total', 'ms', total)
|
loopCM/chromium
|
tools/perf/perf_tools/kraken.py
|
Python
|
bsd-3-clause
| 1,439
|
import numpy as np
import matplotlib.pylab as plt
import h5py
fl=h5py.File('test.h5');
k=fl['/fields/k'].value[0]
u=fl['/fields/u'].value
u=u['real']+1j*u['imag']
fid = open ("out68.vtk", "w");
fid.write("# vtk DataFile Version 3.0\n");
fid.write("patates\n");
fid.write("ASCII\nDATASET UNSTRUCTURED_GRID\n");
fid.write("POINTS %i FLOAT\n" % k.shape[1]);
N=k.shape[1]
for l in range(N):
kx=k[0,l];
ky=k[1,l];
kz=k[2,l];
kk=np.sqrt(kx**2+ky**2+kz**2);
th=np.arccos(kz/kk);
if(kx==0):
ph=0;
else:
ph=np.arctan2(ky,kx);
kkx=np.log10(kk)*np.sin(th)*np.cos(ph);
kky=np.log10(kk)*np.sin(th)*np.sin(ph);
kkz=np.log10(kk)*np.cos(th);
fid.write("% 4.4f % 4.4f % 4.4f\n" %(kkx,kky,kkz));
kn=np.sqrt(kkz**2+kky**2+kkz**2)
fid.write("CELLS %i %i\n"%(N,2*N));
for l in range(N):
fid.write("1 %i\n"%l)
fid.write("CELL_TYPES %i\n"%N);
for l in range(N):
fid.write("1\n");
fid.write("POINT_DATA %i\n"%N)
fid.write("SCALARS sample_scalars float 1\n");
fid.write("LOOKUP_TABLE default\n");
for l in range(N):
fid.write("%f\n"%np.log10(np.sum(np.abs(u[6800,:,l])**2,0)/kn));
fid.close()
|
gurcani/dins
|
vtkgen.py
|
Python
|
gpl-2.0
| 1,152
|
#! /usr/bin/env python
def probl4():
"""Find the largest palindrome made by
the products of x and y. Values between 101
and 1000"""
largest_palindrome = 0
for i in xrange(101, 1000):
for j in xrange(101, 1000):
output = i * j
if str(output) == str(output)[::-1] and \
output > largest_palindrome:
largest_palindrome = output
return largest_palindrome
if __name__ == '__main__':
print probl4()
|
efrainc/project_euler
|
problem4.py
|
Python
|
mit
| 495
|
#!/usr/bin/python
import sys
depth = int(sys.argv[1]) if len(sys.argv) > 1 else 2
lines = []
switches_left = []
counter = 0
def get_new_id():
global counter
i = counter % 8 + 1 + (counter / 8 + 1) * 1000
counter += 1
return i
def new_switch(parent_id, depth):
if depth == 0: return
new_id = get_new_id()
lines.append("switch %d %d" % (new_id, parent_id))
switches_left.append((new_id, depth))
def expand_switch(parent_id, depth):
for _ in range(5):
lines.append("worker %d %d 0.0.0.0 CUBIEBOARD" % (get_new_id(), parent_id))
for _ in range(2):
new_switch(parent_id, depth - 1)
new_switch(0, depth)
while switches_left:
parent_id, depth = switches_left.pop(0)
expand_switch(parent_id, depth)
print "\n".join(lines)
|
christoff-buerger/reat
|
communication/gen_config.py
|
Python
|
mit
| 744
|
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2016 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
import requests
from xml.dom import minidom
class S3Client(object):
def objects(self, bucket_name):
response = requests.get('https://%s.s3.amazonaws.com/' % bucket_name)
if response.status_code == 404:
raise NoSuchBucket('Bucket "%s" does not exist' % bucket_name)
if response.status_code == 403:
raise PrivateBucket('Bucket "%s" is private' % bucket_name)
xml_data = minidom.parseString(response.text)
contents = xml_data.getElementsByTagName('Contents')
return [content.getElementsByTagName('Key')[0].firstChild.nodeValue
for content in contents if not self._is_folder(content)]
def _is_folder(self, content):
size = content.getElementsByTagName('Size')[0].firstChild.nodeValue
name = content.getElementsByTagName('Key')[0].firstChild.nodeValue
return name.endswith('/') and size == '0'
class NoSuchBucket(Exception):
status_code = 404
class PrivateBucket(Exception):
status_code = 403
|
PyBossa/pybossa
|
pybossa/s3_client.py
|
Python
|
agpl-3.0
| 1,754
|
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class ReidysSpider(BaseSpider):
name = 'reidys.com'
allowed_domains = ['reidys.com']
start_urls = ['http://www.reidys.com/']
def parse(self, response):
hxs = HtmlXPathSelector(response)
urls = hxs.select('//div[@class="menu_wrap"]/div/div/div/a/@href').extract()
for url in urls:
yield Request(url, callback=self.parse_categories)
def parse_categories(self, response):
hxs = HtmlXPathSelector(response)
sub_categories = hxs.select('//div[@class="section_190"]/a/@href').extract()
if not sub_categories:
products = hxs.select('//div[@class="list_search_result"]')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'div[@class="list_search_detail"]/'
'div[@class="list_search_info"]/p/a/'
'span/text()')
loader.add_xpath('url', 'div[@class="list_search_detail"]/'
'div[@class="list_search_info"]/p/a/@href')
loader.add_xpath('price', 'div[@class="list_search_detail"]/'
'div[@class="list_search_actionblock"]/'
'p/span[@class="list_search_price"]/text()')
yield loader.load_item()
next_page = hxs.select('//div[@class="formfloatright"]/'
'strong/a[text()="Next>"]/@href').extract()
if next_page:
next_url = next_page[-1]
yield Request(next_url, callback=self.parse_categories)
else:
urls = hxs.select('//div[@class="section_190"]/a/@href').extract()
for url in urls:
yield Request(url, callback=self.parse_categories)
|
0--key/lib
|
portfolio/Python/scrapy/soundslive/reidys_spider.py
|
Python
|
apache-2.0
| 2,164
|
# Copyright Bruno da Silva de Oliveira 2006. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from distutils.core import setup
import py2exe
import sys
sys.path.append('../src')
setup(name='pyste', scripts=['../src/pyste.py'])
|
NixaSoftware/CVis
|
venv/bin/libs/python/pyste/dist/setup.py
|
Python
|
apache-2.0
| 331
|
#!/home/upmc_aren/python_env/bin/python
from yanoama.pilot.system._linux import LinuxSystemConsole
try:
import json
except ImportError:
import simplejson as json
DEFAULT_CONF_FILE='/etc/yanoama.conf'
try:
config_file = file(DEFAULT_CONF_FILE).read()
config = json.loads(config_file)
except Exception, e:
print "There was an error in your configuration file ("+DEFAULT_CONF_FILE+")"
raise e
# host and port to listen
pilot= config.get('pilot', {})
HOST = pilot.get('host', LinuxSystemConsole().getHostname())
PORT = pilot.get('port', 49127)
|
guthemberg/yanoama
|
yanoama/pilot/defaults.py
|
Python
|
bsd-3-clause
| 569
|
import asyncio
import binascii
import base64
import json
import io
import mimetypes
import os
import re
import uuid
import warnings
import zlib
from urllib.parse import quote, unquote, urlencode, parse_qsl
from collections import deque, Mapping, Sequence
from .helpers import parse_mimetype
from .multidict import CIMultiDict
from .protocol import HttpParser
from .hdrs import (
CONTENT_DISPOSITION,
CONTENT_ENCODING,
CONTENT_LENGTH,
CONTENT_TRANSFER_ENCODING,
CONTENT_TYPE
)
__all__ = ('MultipartReader', 'MultipartWriter',
'BodyPartReader', 'BodyPartWriter',
'BadContentDispositionHeader', 'BadContentDispositionParam',
'parse_content_disposition', 'content_disposition_filename')
CHAR = set(chr(i) for i in range(0, 128))
CTL = set(chr(i) for i in range(0, 32)) | {chr(127), }
SEPARATORS = {'(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']',
'?', '=', '{', '}', ' ', chr(9)}
TOKEN = CHAR ^ CTL ^ SEPARATORS
class BadContentDispositionHeader(RuntimeWarning):
pass
class BadContentDispositionParam(RuntimeWarning):
pass
def parse_content_disposition(header):
def is_token(string):
return string and TOKEN >= set(string)
def is_quoted(string):
return string[0] == string[-1] == '"'
def is_rfc5987(string):
return is_token(string) and string.count("'") == 2
def is_extended_param(string):
return string.endswith('*')
def is_continuous_param(string):
pos = string.find('*') + 1
if not pos:
return False
substring = string[pos:-1] if string.endswith('*') else string[pos:]
return substring.isdigit()
def unescape(text, *, chars=''.join(map(re.escape, CHAR))):
return re.sub('\\\\([{}])'.format(chars), '\\1', text)
if not header:
return None, {}
disptype, *parts = header.split(';')
if not is_token(disptype):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params = {}
for item in parts:
if '=' not in item:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
key, value = item.split('=', 1)
key = key.lower().strip()
value = value.lstrip()
if key in params:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
if not is_token(key):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_continuous_param(key):
if is_quoted(value):
value = unescape(value[1:-1])
elif not is_token(value):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_extended_param(key):
if is_rfc5987(value):
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
else:
warnings.warn(BadContentDispositionParam(item))
continue
try:
value = unquote(value, encoding, 'strict')
except UnicodeDecodeError: # pragma: nocover
warnings.warn(BadContentDispositionParam(item))
continue
else:
if is_quoted(value):
value = unescape(value[1:-1].lstrip('\\/'))
elif not is_token(value):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params[key] = value
return disptype.lower(), params
def content_disposition_filename(params):
if not params:
return None
elif 'filename*' in params:
return params['filename*']
elif 'filename' in params:
return params['filename']
else:
parts = []
fnparams = sorted((key, value)
for key, value in params.items()
if key.startswith('filename*'))
for num, (key, value) in enumerate(fnparams):
_, tail = key.split('*', 1)
if tail.endswith('*'):
tail = tail[:-1]
if tail == str(num):
parts.append(value)
else:
break
if not parts:
return None
value = ''.join(parts)
if "'" in value:
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
return unquote(value, encoding, 'strict')
return value
class MultipartResponseWrapper(object):
"""Wrapper around the :class:`MultipartBodyReader` to take care about
underlying connection and close it when it needs in."""
def __init__(self, resp, stream):
self.resp = resp
self.stream = stream
def at_eof(self):
"""Returns ``True`` when all response data had been read.
:rtype: bool
"""
return self.resp.content.at_eof()
@asyncio.coroutine
def next(self):
"""Emits next multipart reader object."""
item = yield from self.stream.next()
if self.stream.at_eof():
yield from self.release()
return item
@asyncio.coroutine
def release(self):
"""Releases the connection gracefully, reading all the content
to the void."""
yield from self.resp.release()
class BodyPartReader(object):
"""Multipart reader for single body part."""
chunk_size = 8192
def __init__(self, boundary, headers, content):
self.headers = headers
self._boundary = boundary
self._content = content
self._at_eof = False
length = self.headers.get(CONTENT_LENGTH, None)
self._length = int(length) if length is not None else None
self._read_bytes = 0
self._unread = deque()
@asyncio.coroutine
def next(self):
item = yield from self.read()
if not item:
return None
return item
@asyncio.coroutine
def read(self, *, decode=False):
"""Reads body part data.
:param bool decode: Decodes data following by encoding
method from `Content-Encoding` header. If it missed
data remains untouched
:rtype: bytearray
"""
if self._at_eof:
return b''
data = bytearray()
if self._length is None:
while not self._at_eof:
data.extend((yield from self.readline()))
else:
while not self._at_eof:
data.extend((yield from self.read_chunk(self.chunk_size)))
if decode:
return self.decode(data)
return data
@asyncio.coroutine
def read_chunk(self, size=chunk_size):
"""Reads body part content chunk of the specified size.
The body part must has `Content-Length` header with proper value.
:param int size: chunk size
:rtype: bytearray
"""
if self._at_eof:
return b''
assert self._length is not None, \
'Content-Length required for chunked read'
chunk_size = min(size, self._length - self._read_bytes)
chunk = yield from self._content.read(chunk_size)
self._read_bytes += len(chunk)
if self._read_bytes == self._length:
self._at_eof = True
assert b'\r\n' == (yield from self._content.readline()), \
'reader did not read all the data or it is malformed'
return chunk
@asyncio.coroutine
def readline(self):
"""Reads body part by line by line.
:rtype: bytearray
"""
if self._at_eof:
return b''
if self._unread:
line = self._unread.popleft()
else:
line = yield from self._content.readline()
if line.startswith(self._boundary):
# the very last boundary may not come with \r\n,
# so set single rules for everyone
sline = line.rstrip(b'\r\n')
boundary = self._boundary
last_boundary = self._boundary + b'--'
# ensure that we read exactly the boundary, not something alike
if sline == boundary or sline == last_boundary:
self._at_eof = True
self._unread.append(line)
return b''
else:
next_line = yield from self._content.readline()
if next_line.startswith(self._boundary):
line = line[:-2] # strip CRLF but only once
self._unread.append(next_line)
return line
@asyncio.coroutine
def release(self):
"""Lke :meth:`read`, but reads all the data to the void.
:rtype: None
"""
if self._at_eof:
return
if self._length is None:
while not self._at_eof:
yield from self.readline()
else:
while not self._at_eof:
yield from self.read_chunk(self.chunk_size)
@asyncio.coroutine
def text(self, *, encoding=None):
"""Lke :meth:`read`, but assumes that body part contains text data.
:param str encoding: Custom text encoding. Overrides specified
in charset param of `Content-Type` header
:rtype: str
"""
data = yield from self.read(decode=True)
encoding = encoding or self.get_charset(default='latin1')
return data.decode(encoding)
@asyncio.coroutine
def json(self, *, encoding=None):
"""Lke :meth:`read`, but assumes that body parts contains JSON data.
:param str encoding: Custom JSON encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return json.loads(data.decode(encoding))
@asyncio.coroutine
def form(self, *, encoding=None):
"""Lke :meth:`read`, but assumes that body parts contains form
urlencoded data.
:param str encoding: Custom form encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return parse_qsl(data.rstrip().decode(encoding), encoding=encoding)
def at_eof(self):
"""Returns ``True`` if the boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
def decode(self, data):
"""Decodes data according the specified `Content-Encoding`
or `Content-Transfer-Encoding` headers value.
Supports ``gzip``, ``deflate`` and ``identity`` encodings for
`Content-Encoding` header.
Supports ``base64``, ``quoted-printable`` encodings for
`Content-Transfer-Encoding` header.
:param bytearray data: Data to decode.
:raises: :exc:`RuntimeError` - if encoding is unknown.
:rtype: bytes
"""
if CONTENT_TRANSFER_ENCODING in self.headers:
data = self._decode_content_transfer(data)
if CONTENT_ENCODING in self.headers:
return self._decode_content(data)
return data
def _decode_content(self, data):
encoding = self.headers[CONTENT_ENCODING].lower()
if encoding == 'deflate':
return zlib.decompress(data, -zlib.MAX_WBITS)
elif encoding == 'gzip':
return zlib.decompress(data, 16 + zlib.MAX_WBITS)
elif encoding == 'identity':
return data
else:
raise RuntimeError('unknown content encoding: {}'.format(encoding))
def _decode_content_transfer(self, data):
encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()
if encoding == 'base64':
return base64.b64decode(data)
elif encoding == 'quoted-printable':
return binascii.a2b_qp(data)
else:
raise RuntimeError('unknown content transfer encoding: {}'
''.format(encoding))
def get_charset(self, default=None):
"""Returns charset parameter from ``Content-Type`` header or default.
"""
ctype = self.headers.get(CONTENT_TYPE, '')
*_, params = parse_mimetype(ctype)
return params.get('charset', default)
@property
def filename(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed or header is malformed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params)
class MultipartReader(object):
"""Multipart body reader."""
#: Response wrapper, used when multipart readers constructs from response.
response_wrapper_cls = MultipartResponseWrapper
#: Multipart reader class, used to handle multipart/* body parts.
#: None points to type(self)
multipart_reader_cls = None
#: Body part reader class for non multipart/* content types.
part_reader_cls = BodyPartReader
def __init__(self, headers, content):
self.headers = headers
self._boundary = ('--' + self._get_boundary()).encode()
self._content = content
self._last_part = None
self._at_eof = False
self._unread = []
@classmethod
def from_response(cls, response):
"""Constructs reader instance from HTTP response.
:param response: :class:`~aiohttp.client.ClientResponse` instance
"""
obj = cls.response_wrapper_cls(response, cls(response.headers,
response.content))
return obj
def at_eof(self):
"""Returns ``True`` if the final boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
@asyncio.coroutine
def next(self):
"""Emits the next multipart body part."""
if self._at_eof:
return
yield from self._maybe_release_last_part()
yield from self._read_boundary()
if self._at_eof: # we just read the last boundary, nothing to do there
return
self._last_part = yield from self.fetch_next_part()
return self._last_part
@asyncio.coroutine
def release(self):
"""Reads all the body parts to the void till the final boundary."""
while not self._at_eof:
item = yield from self.next()
if item is None:
break
yield from item.release()
@asyncio.coroutine
def fetch_next_part(self):
"""Returns the next body part reader."""
headers = yield from self._read_headers()
return self._get_part_reader(headers)
def _get_part_reader(self, headers):
"""Dispatches the response by the `Content-Type` header, returning
suitable reader instance.
:param dict headers: Response headers
"""
ctype = headers.get(CONTENT_TYPE, '')
mtype, *_ = parse_mimetype(ctype)
if mtype == 'multipart':
if self.multipart_reader_cls is None:
return type(self)(headers, self._content)
return self.multipart_reader_cls(headers, self._content)
else:
return self.part_reader_cls(self._boundary, headers, self._content)
def _get_boundary(self):
mtype, *_, params = parse_mimetype(self.headers[CONTENT_TYPE])
assert mtype == 'multipart', 'multipart/* content type expected'
if 'boundary' not in params:
raise ValueError('boundary missed for Content-Type: %s'
% self.headers[CONTENT_TYPE])
boundary = params['boundary']
if len(boundary) > 70:
raise ValueError('boundary %r is too long (70 chars max)'
% boundary)
return boundary
@asyncio.coroutine
def _readline(self):
if self._unread:
return self._unread.pop()
return (yield from self._content.readline())
@asyncio.coroutine
def _read_boundary(self):
chunk = (yield from self._readline()).rstrip()
if chunk == self._boundary:
pass
elif chunk == self._boundary + b'--':
self._at_eof = True
else:
raise ValueError('Invalid boundary %r, expected %r'
% (chunk, self._boundary))
@asyncio.coroutine
def _read_headers(self):
lines = ['']
while True:
chunk = yield from self._content.readline()
chunk = chunk.decode().strip()
lines.append(chunk)
if not chunk:
break
parser = HttpParser()
headers, *_ = parser.parse_headers(lines)
return headers
@asyncio.coroutine
def _maybe_release_last_part(self):
"""Ensures that the last read body part is read completely."""
if self._last_part is not None:
if not self._last_part.at_eof():
yield from self._last_part.release()
self._unread.extend(self._last_part._unread)
self._last_part = None
class BodyPartWriter(object):
"""Multipart writer for single body part."""
def __init__(self, obj, headers=None, *, chunk_size=8192):
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, CIMultiDict):
headers = CIMultiDict(headers)
self.obj = obj
self.headers = headers
self._chunk_size = chunk_size
self._fill_headers_with_defaults()
self._serialize_map = {
bytes: self._serialize_bytes,
str: self._serialize_str,
io.IOBase: self._serialize_io,
MultipartWriter: self._serialize_multipart,
('application', 'json'): self._serialize_json,
('application', 'x-www-form-urlencoded'): self._serialize_form
}
def _fill_headers_with_defaults(self):
if CONTENT_TYPE not in self.headers:
content_type = self._guess_content_type(self.obj)
if content_type is not None:
self.headers[CONTENT_TYPE] = content_type
if CONTENT_LENGTH not in self.headers:
content_length = self._guess_content_length(self.obj)
if content_length is not None:
self.headers[CONTENT_LENGTH] = str(content_length)
if CONTENT_DISPOSITION not in self.headers:
filename = self._guess_filename(self.obj)
if filename is not None:
self.set_content_disposition('attachment', filename=filename)
def _guess_content_length(self, obj):
if isinstance(obj, bytes):
return len(obj)
elif isinstance(obj, str):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
charset = params.get('charset', 'us-ascii')
return len(obj.encode(charset))
elif isinstance(obj, io.StringIO):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
charset = params.get('charset', 'us-ascii')
return len(obj.getvalue().encode(charset)) - obj.tell()
elif isinstance(obj, io.BytesIO):
return len(obj.getvalue()) - obj.tell()
elif isinstance(obj, io.IOBase):
try:
return os.fstat(obj.fileno()).st_size - obj.tell()
except (AttributeError, OSError):
return None
else:
return None
def _guess_content_type(self, obj, default='application/octet-stream'):
if hasattr(obj, 'name'):
name = getattr(obj, 'name')
return mimetypes.guess_type(name)[0]
elif isinstance(obj, (str, io.StringIO)):
return 'text/plain; charset=utf-8'
else:
return default
def _guess_filename(self, obj):
if isinstance(obj, io.IOBase):
name = getattr(obj, 'name', None)
if name is not None:
return os.path.basename(name)
def serialize(self):
"""Yields byte chunks for body part."""
has_encoding = (
CONTENT_ENCODING in self.headers and
self.headers[CONTENT_ENCODING] != 'identity' or
CONTENT_TRANSFER_ENCODING in self.headers
)
if has_encoding:
# since we're following streaming approach which doesn't assumes
# any intermediate buffers, we cannot calculate real content length
# with the specified content encoding scheme. So, instead of lying
# about content length and cause reading issues, we have to strip
# this information.
self.headers.pop(CONTENT_LENGTH, None)
if self.headers:
yield b'\r\n'.join(
b': '.join(map(lambda i: i.encode('latin1'), item))
for item in self.headers.items()
)
yield b'\r\n\r\n'
yield from self._maybe_encode_stream(self._serialize_obj())
yield b'\r\n'
def _serialize_obj(self):
obj = self.obj
mtype, stype, *_ = parse_mimetype(self.headers.get(CONTENT_TYPE))
serializer = self._serialize_map.get((mtype, stype))
if serializer is not None:
return serializer(obj)
for key in self._serialize_map:
if not isinstance(key, tuple) and isinstance(obj, key):
return self._serialize_map[key](obj)
return self._serialize_default(obj)
def _serialize_bytes(self, obj):
yield obj
def _serialize_str(self, obj):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
yield obj.encode(params.get('charset', 'us-ascii'))
def _serialize_io(self, obj):
while True:
chunk = obj.read(self._chunk_size)
if not chunk:
break
if isinstance(chunk, str):
yield from self._serialize_str(chunk)
else:
yield from self._serialize_bytes(chunk)
def _serialize_multipart(self, obj):
yield from obj.serialize()
def _serialize_json(self, obj):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
yield json.dumps(obj).encode(params.get('charset', 'utf-8'))
def _serialize_form(self, obj):
if isinstance(obj, Mapping):
obj = list(obj.items())
return self._serialize_str(urlencode(obj, doseq=True))
def _serialize_default(self, obj):
raise TypeError('unknown body part type %r' % type(obj))
def _maybe_encode_stream(self, stream):
if CONTENT_ENCODING in self.headers:
stream = self._apply_content_encoding(stream)
if CONTENT_TRANSFER_ENCODING in self.headers:
stream = self._apply_content_transfer_encoding(stream)
yield from stream
def _apply_content_encoding(self, stream):
encoding = self.headers[CONTENT_ENCODING].lower()
if encoding == 'identity':
yield from stream
elif encoding in ('deflate', 'gzip'):
if encoding == 'gzip':
zlib_mode = 16 + zlib.MAX_WBITS
else:
zlib_mode = -zlib.MAX_WBITS
zcomp = zlib.compressobj(wbits=zlib_mode)
for chunk in stream:
yield zcomp.compress(chunk)
else:
yield zcomp.flush()
else:
raise RuntimeError('unknown content encoding: {}'
''.format(encoding))
def _apply_content_transfer_encoding(self, stream):
encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()
if encoding == 'base64':
buffer = bytearray()
while True:
if buffer:
div, mod = divmod(len(buffer), 3)
chunk, buffer = buffer[:div * 3], buffer[div * 3:]
if chunk:
yield base64.b64encode(chunk)
chunk = next(stream, None)
if not chunk:
if buffer:
yield base64.b64encode(buffer[:])
return
buffer.extend(chunk)
elif encoding == 'quoted-printable':
for chunk in stream:
yield binascii.b2a_qp(chunk)
else:
raise RuntimeError('unknown content transfer encoding: {}'
''.format(encoding))
def set_content_disposition(self, disptype, **params):
"""Sets ``Content-Disposition`` header.
:param str disptype: Disposition type: inline, attachment, form-data.
Should be valid extension token (see RFC 2183)
:param dict params: Disposition params
"""
if not disptype or not (TOKEN > set(disptype)):
raise ValueError('bad content disposition type {!r}'
''.format(disptype))
value = disptype
if params:
lparams = []
for key, val in params.items():
if not key or not (TOKEN > set(key)):
raise ValueError('bad content disposition parameter'
' {!r}={!r}'.format(key, val))
qval = quote(val, '')
if key == 'filename':
lparams.append((key, '"%s"' % qval))
lparams.append(('filename*', "utf-8''" + qval))
else:
lparams.append((key, "%s" % qval))
sparams = '; '.join('='.join(pair) for pair in lparams)
value = '; '.join((value, sparams))
self.headers[CONTENT_DISPOSITION] = value
@property
def filename(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params)
class MultipartWriter(object):
"""Multipart body writer."""
#: Body part reader class for non multipart/* content types.
part_writer_cls = BodyPartWriter
def __init__(self, subtype='mixed', boundary=None):
boundary = boundary if boundary is not None else uuid.uuid4().hex
try:
boundary.encode('us-ascii')
except UnicodeEncodeError:
raise ValueError('boundary should contains ASCII only chars')
self.headers = CIMultiDict()
self.headers[CONTENT_TYPE] = 'multipart/{}; boundary="{}"'.format(
subtype, boundary
)
self.parts = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return iter(self.parts)
def __len__(self):
return len(self.parts)
@property
def boundary(self):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
return params['boundary'].encode('us-ascii')
def append(self, obj, headers=None):
"""Adds a new body part to multipart writer."""
if isinstance(obj, self.part_writer_cls):
if headers:
obj.headers.update(headers)
self.parts.append(obj)
else:
if not headers:
headers = CIMultiDict()
self.parts.append(self.part_writer_cls(obj, headers))
return self.parts[-1]
def append_json(self, obj, headers=None):
"""Helper to append JSON part."""
if not headers:
headers = CIMultiDict()
headers[CONTENT_TYPE] = 'application/json'
return self.append(obj, headers)
def append_form(self, obj, headers=None):
"""Helper to append form urlencoded part."""
if not headers:
headers = CIMultiDict()
headers[CONTENT_TYPE] = 'application/x-www-form-urlencoded'
assert isinstance(obj, (Sequence, Mapping))
return self.append(obj, headers)
def serialize(self):
"""Yields multipart byte chunks."""
if not self.parts:
yield b''
return
for part in self.parts:
yield b'--' + self.boundary + b'\r\n'
yield from part.serialize()
else:
yield b'--' + self.boundary + b'--\r\n'
yield b''
|
kehao95/Wechat_LearnHelper
|
src/env/lib/python3.5/site-packages/aiohttp/multipart.py
|
Python
|
gpl-3.0
| 28,717
|
from util_classes import ChipType
from game_components import Chip
def chip_type_for_colour_name(colour_name):
"""
Convert a string (colour) to a ChipType
:param str colour_name: colour, e.g. 'red'
:return: the ChipType for this colour
"""
return {
'red': ChipType.red_ruby,
'blue': ChipType.blue_sapphire,
'white': ChipType.white_diamond,
'green': ChipType.green_emerald,
'black': ChipType.black_onyx,
'yellow': ChipType.yellow_gold
}[colour_name]
def pieces_match(a, b):
"""
Are a and b either the same piece or the same colour of chip
:param a: A game component
:type a: game_components.AbstractGameComponent
:param b: Another game component
:type b: game_components.AbstractGameComponent
:return: True if A and B match
"""
if a == b:
return True
return isinstance(a, Chip) \
and isinstance(b, Chip) \
and a.chip_type == b.chip_type
class SplitIntoIncludedAndExcluded:
"""
Split up pieces in source and target:
* self.included: both in source + target
* self.excluded: in target but not in source
* self.remaining: in source but not in target
Chips of the same colour are treated as the same piece.
Unlike sets, the lists can contain multiple pieces of the same type.
This is typically used to work out whether a player can take
a piece, halfway through a turn, based on the list of available moves
"""
def __init__(self, source, target):
self.included = []
self.excluded = []
self.remaining = source[:]
for target_piece in target:
found = False
for i, remaining_piece in enumerate(self.remaining):
if not found and pieces_match(target_piece, remaining_piece):
self.included.append(remaining_piece)
del (self.remaining[i])
found = True
if not found:
self.excluded.append(target_piece)
|
CoachCoen/games
|
utils.py
|
Python
|
mit
| 2,044
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Projet : Editeur, Compilateur et Micro-Ordinateur pour
un langage assembleur.
Nom du fichier : 01-04-CodeScrolledText.py
Identification : 01-04-CodeScrolledText
Titre : Widget : Texte avec défilement et numéro de ligne
Auteurs : Francis Emond, Malek Khattech,
Mamadou Dia, Marc-André Jean
Date : 13-04-2017
Description : Un widget Tk qui représente une ScrolledText avancé.
Le module ``CodeScrolledText``
================================
Ce module contient une classe nommée « CodeScrolledText » qui est
un widget similaire à «ScrolledText» mais plus avancé pour la
programmation. Il s'agit d'un Widget qui n'existe pas dans la
bibliothèque de Tkinter (Python). Cette classe utilise deux classes
« TextwLineNumbers » et « TextAvanced ». TextwLineNumbers est un
Canvas Tk qui est utilisé ici pour « redessiner » les numéros de ligne.
TextAvanced est tout simplement un Widget Text à lequel on ajoute un
évènement qui est déclencher lors de l'ajout ou la suppression de
ligne. CodeScrolledText, quand à elle, s'occupe de lié ces deux classes
avec une ScrollBar.
"""
__author__ = "Francis Emond, Malek Khattech, Mamadou Dia, Marc-Andre Jean"
__version__ = "1.0"
__status__ = "Production"
# On cache les classes non nécessaires
__all__ = ['CodeScrolledText']
# Importation de Tkinter selon la version de Python.
# Python 2 seulement:
try:
from Tkinter import *
import ttk as ttk
# Python 2 et 3 (Python 2 après ''pip install future''):
except ImportError:
from tkinter import *
import tkinter.ttk as ttk
class TextwLineNumbers(Canvas):
"""
Classe qui hérite de Canvas. Cette classe est utilisé pour
redessiner les numéros de ligne accompagné avec le « textwidget ».
Cette classe est interne au module. Elle est un objet pour la
classe central du module : CodeScrolledText.
:example:
>>> test = TextwLineNumbers()
>>> test.attach(TextAdvanced())
>>> test.redraw()
"""
def __init__(self, *args, **kwargs):
"""
Constructeur de TextwLineNumbers.
Le constructeurs prend en argument *args et **kwargs pour
une construction plus complexe du canvas si désiré.
:example:
>>> test = TextwLineNumbers()
:param *args: variable positional parameter
:type *args: argument unpacking
:param **kwargs: variable keyword parameter
:type **kwargs: keyword argument unpacking
"""
# Initialisation du Canvas et du textwidget.
Canvas.__init__(self, *args, **kwargs)
self.textwidget = None
return
def attach(self, argtextwidget):
"""
Fonction qui attache le Widget Text « argtextwidget ».
Cette fonction attache le Widget Text « argtextwidget »
à la classe. Celui-ci devrait être de type « TextAdvanced »
pour que cette classe puisse redessiner les lignes.
:example:
>>> test = TextwLineNumbers()
>>> test.attach(TextAdvanced())
:param argtextwidget: Widget Text du Canvas.
:type argtextwidget: TextAdvanced
"""
self.textWidget = argtextwidget
return
def redraw(self, *args):
"""
Redessine les numéros de lignes dans le canvas.
Lorsque cette classe est attaché au TextAdvanced et que
celui-ci déclenche sont event, cette fonction est
appelé et les numéros de lignes sont redessinées.
:example:
>>> test = TextwLineNumbers()
>>> test.attach(TextAdvanced())
>>> test.redraw()
"""
# Nous supprimons tout du canvas.
self.delete("all")
# On remets l'index au début.
try:
i = self.textWidget.index("@0,0")
except AttributeError:
raise AttributeError("Vous devez appeler attach() avant "
"d'appeler redraw().")
# Pour toutes les lignes existantes nous redéfinisons:
while True:
dline = self.textWidget.dlineinfo(i)
# On quitte la boucle lorsque nous avons parcouru la boucle
if dline is None:
break
y = dline[1]
# On quitte si nous avons dépasser le nombre maximal en ROM
if y > 0x40FB:
break
# On érit le format en Hex (4 bytes) et on l'imprime
linetext = '0x' + \
format((int(str(i).split(".")[0]) - 1) * 2, '#06X')[2:]
self.create_text(2, y, anchor="nw", text=linetext)
i = self.textWidget.index("%s+1line" % i)
class TextAdvanced(Text):
"""
TextAvanced est tout simplement un Widget Text à lequel on ajoute
un évènement qui est déclencher lors de l'ajout ou la suppression
d'une ligne.
:example:
>>> test = TextAdvanced()
"""
def __init__(self, *args, **kwargs):
"""
Constructeur de TextAdvanced.
Le constructeurs prend en argument *args et **kwargs pour
une construction plus complexe du Text si désiré.
:example:
>>> test = TextAdvanced()
"""
# Initialisation de Text avec les arguments.
Text.__init__(self, *args, **kwargs)
# Création des évènements pour l'insertion ou la supression de
# ligne dans la zone de texte, ou lorsque le texte est
# « scrolled ». Le code en commentaire n'est pas du code Python mais
# du code Tcl (Tck/Tk).
self.tk.eval('''
proc widget_proxy {widget widget_command args} {
# On fait un appel avec le vrai tk Widget avec ces args.
set result [uplevel [linsert $args 0 $widget_command]]
# On génère les évènements pour les différents types
# de commandes.
if {([lindex $args 0] in {insert replace delete}) ||
([lrange $args 0 2] == {mark set insert}) ||
([lrange $args 0 1] == {xview moveto}) ||
([lrange $args 0 1] == {xview scroll}) ||
([lrange $args 0 1] == {yview moveto}) ||
([lrange $args 0 1] == {yview scroll})} {
event generate $widget <<Change>> -when tail
}
# On retourne le résultat.
return $result
}
''')
self.tk.eval('''
rename {widget} _{widget}
interp alias {{}} ::{widget} {{}} widget_proxy {widget} _{widget}
'''.format(widget=str(self)))
return
class CodeScrolledText(Frame):
"""
class CodeScrolledText
========================
Cette classe hérite d'un widget Frame. Elle y inclut un widget
Text, un widget Label et un widget Scrollbar.
:example:
>>> test = CodeScrolledText()
"""
def __init__(self, parent=None):
"""
Constructeur de CodeScrolledText.
Le constructeur initialise son Frame avec le parent qui est
donné en argument. Il initialise ses composantes.
:example:
>>> test = CodeScrolledText(None)
>>> test = CodeScrolledText(Frame())
:param parent: Widget Parent de la classe.
:type parent: Widget (Tk)
"""
# Initialistion du status bar.
Frame.__init__(self, parent)
# --Initialisation de la scrollbar.
self.scrollbar = Scrollbar(self)
# --Initialisation de la zone de code.
self.editArea = TextAdvanced(self, yscrollcommand=self.scrollbar.set)
# --Initialisation de la zone de numéro de ligne.
self.linenumbers = TextwLineNumbers(self, width=40)
self.linenumbers.attach(self.editArea)
# Configuration du tout.
self.scrollbar.config(command=self.editArea.yview)
self.scrollbar.pack(side="right", fill="y")
self.linenumbers.pack(side="left", fill="y", expand=True)
self.editArea.pack(side="left", fill="both", expand=True)
# Liaison des fonctions ultérieurement écrites (voir plus haut
# dans TextAdvanced) avec TextwLineNumbers.
self.editArea.bind("<<Change>>", self.__onChange)
self.editArea.bind("<Configure>", self.__onChange)
# Fin de __init__.
return
def __onChange(self, event):
"""
Event pour la mise à jour des lignes.
Cette fonction est appelé par le TextAdvanced lors d'une
modification pour permettre la mise à jour du canvas et
des numéros de lignes avec TextwLineNumbers.redraw().
:param event: Informations de l'évènement.
:type event: Tk Event
"""
self.linenumbers.redraw()
return
def get(self, start, stop):
"""
Wrapper pour la fonction get du TextAdvanced.
Cette fonction est un wrapper pour la fonction get()
du TextAdvanced.
:example:
>>> test = CodeScrolledText(None)
>>> test.get('0.0', END)
u'\\n'
:param start: Position début du texte à extraire.
:type start: str
:param stop: Position fin du texte à extraire.
:type stop: Indexes
:return: Retourne le texte désiré.
:rtype: str
"""
return self.editArea.get(start, stop)
def delete(self, start, stop):
"""
Wrapper pour la fonction delete du TextAdvanced.
Cette fonction est un wrapper pour la fonction delete()
du TextAdvanced.
:example:
>>> test = CodeScrolledText(None)
>>> test.delete('0.0', END)
:param start: Position début du texte à supprimer.
:type start: str
:param stop: Position fin du texte à supprimer.
:type stop: Indexes
"""
return self.editArea.delete(start, stop)
def insert(self, start, texte):
"""
Wrapper pour la fonction insert du TextAdvanced.
Cette fonction est un wrapper pour la fonction insert()
du TextAdvanced.
:example:
>>> test = CodeScrolledText(None)
>>> test.insert('0.0', "test")
>>> test.get('0.0', END)
u'test\\n'
:param start: Position début du texte où insérer.
:type start: str
:param texte: Texte à insérer.
:type texte: str
"""
return self.editArea.insert(start, texte)
# Activation des doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
|
MarcAndreJean/PCONC
|
Modules/01-04-CodeScrolledText.py
|
Python
|
mit
| 11,142
|
from __future__ import absolute_import, division, print_function
import datashape
from datashape import String, DataShape, Option, bool_
from odo.utils import copydoc
from .expressions import schema_method_list, ElemWise
from .arithmetic import Interp, Repeat, _mkbin, repeat, interp, _add, _radd
from ..compatibility import basestring
__all__ = ['Like', 'like', 'strlen', 'UnaryStringFunction']
class Like(ElemWise):
""" Filter expression by string comparison
>>> from blaze import symbol, like, compute
>>> t = symbol('t', 'var * {name: string, city: string}')
>>> expr = t[t.name.like('Alice*')]
>>> data = [('Alice Smith', 'New York'),
... ('Bob Jones', 'Chicago'),
... ('Alice Walker', 'LA')]
>>> list(compute(expr, data))
[('Alice Smith', 'New York'), ('Alice Walker', 'LA')]
"""
__slots__ = '_hash', '_child', 'pattern'
def _dshape(self):
shape, schema = self._child.dshape.shape, self._child.schema
schema = Option(bool_) if isinstance(schema.measure, Option) else bool_
return DataShape(*(shape + (schema,)))
@copydoc(Like)
def like(child, pattern):
if not isinstance(pattern, basestring):
raise TypeError('pattern argument must be a string')
return Like(child, pattern)
class UnaryStringFunction(ElemWise):
"""String function that only takes a single argument.
"""
__slots__ = '_hash', '_child'
class strlen(UnaryStringFunction):
schema = datashape.int64
def isstring(ds):
measure = ds.measure
return isinstance(getattr(measure, 'ty', measure), String)
_mod, _rmod = _mkbin('mod', Interp)
_mul, _rmul = _mkbin('mul', Repeat)
schema_method_list.extend([
(
isstring,
set([
_add, _radd, _mod, _rmod, _mul, _rmul, repeat, interp, like, strlen
])
)
])
|
cpcloud/blaze
|
blaze/expr/strings.py
|
Python
|
bsd-3-clause
| 1,851
|
from gi.repository import Gtk, Gdk, GdkPixbuf
(TARGET_ENTRY_TEXT, TARGET_ENTRY_PIXBUF) = range(2)
(COLUMN_TEXT, COLUMN_PIXBUF) = range(2)
DRAG_ACTION = Gdk.DragAction.COPY
class DragDropWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Drag and Drop")
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(vbox)
hbox = Gtk.Box(spacing=12)
vbox.pack_start(hbox, True, True, 0)
self.iconview = DragSourceIconView()
self.drop_area = DropArea()
hbox.pack_start(self.iconview, True, True, 0)
hbox.pack_start(self.drop_area, True, True, 0)
button_box = Gtk.Box(spacing=6)
vbox.pack_start(button_box, True, False, 0)
image_button = Gtk.RadioButton.new_with_label_from_widget(None,
"Images")
image_button.connect("toggled", self.add_image_targets)
button_box.pack_start(image_button, True, False, 0)
text_button = Gtk.RadioButton.new_with_label_from_widget(image_button,
"Text")
text_button.connect("toggled", self.add_text_targets)
button_box.pack_start(text_button, True, False, 0)
self.add_image_targets()
def add_image_targets(self, button=None):
targets = Gtk.TargetList.new([])
targets.add_image_targets(TARGET_ENTRY_PIXBUF, True)
self.drop_area.drag_dest_set_target_list(targets)
self.iconview.drag_source_set_target_list(targets)
def add_text_targets(self, button=None):
self.drop_area.drag_dest_set_target_list(None)
self.iconview.drag_source_set_target_list(None)
self.drop_area.drag_dest_add_text_targets()
self.iconview.drag_source_add_text_targets()
class DragSourceIconView(Gtk.IconView):
def __init__(self):
Gtk.IconView.__init__(self)
self.set_text_column(COLUMN_TEXT)
self.set_pixbuf_column(COLUMN_PIXBUF)
model = Gtk.ListStore(str, GdkPixbuf.Pixbuf)
self.set_model(model)
self.add_item("Item 1", "image-missing")
self.add_item("Item 2", "help-about")
self.add_item("Item 3", "edit-copy")
self.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK, [],
DRAG_ACTION)
self.connect("drag-data-get", self.on_drag_data_get)
def on_drag_data_get(self, widget, drag_context, data, info, time):
selected_path = self.get_selected_items()[0]
selected_iter = self.get_model().get_iter(selected_path)
if info == TARGET_ENTRY_TEXT:
text = self.get_model().get_value(selected_iter, COLUMN_TEXT)
data.set_text(text, -1)
elif info == TARGET_ENTRY_PIXBUF:
pixbuf = self.get_model().get_value(selected_iter, COLUMN_PIXBUF)
data.set_pixbuf(pixbuf)
def add_item(self, text, icon_name):
pixbuf = Gtk.IconTheme.get_default().load_icon(icon_name, 16, 0)
self.get_model().append([text, pixbuf])
class DropArea(Gtk.Label):
def __init__(self):
Gtk.Label.__init__(self, "Drop something on me!")
self.drag_dest_set(Gtk.DestDefaults.ALL, [], DRAG_ACTION)
self.connect("drag-data-received", self.on_drag_data_received)
def on_drag_data_received(self, widget, drag_context, x,y, data,info, time):
if info == TARGET_ENTRY_TEXT:
text = data.get_text()
print("Received text: %s" % text)
elif info == TARGET_ENTRY_PIXBUF:
pixbuf = data.get_pixbuf()
width = pixbuf.get_width()
height = pixbuf.get_height()
print("Received pixbuf with width %spx and height %spx" % (width,
height))
win = DragDropWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
Dehyrf/python_gates
|
window.py
|
Python
|
gpl-3.0
| 3,800
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2016 Chris Lamb <lamby@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import re
import os
import struct
import binascii
from diffoscope.difference import Difference
from .utils.file import File
class GitIndexFile(File):
DESCRIPTION = "Git repositories"
FILE_TYPE_RE = re.compile(r'^Git index')
def compare_details(self, other, source=None):
return [Difference.from_text(
describe_index(self.path),
describe_index(other.path),
self.path,
other.path,
)]
def parse_index(f):
_, version = struct.unpack('>LL', f.read(4 * 2))
return {
'version': version,
'entries': list(parse_entries(f)),
}
def parse_entries(f):
num_entries = struct.unpack('>L', f.read(4))[0]
for _ in range(num_entries):
x = {}
pos = f.tell()
x['ctime'], x['ctime_nano'], x['mtime'], x['mtime_nano'], \
x['dev'], x['inode'], x['mode'], x['uid'], x['gid'], \
x['size'], x['sha'], x['flags'] = \
struct.unpack('>LLLLLLLLLL20sH', f.read((4 * 10) + 20 + 2))
x['path'] = f.read(x['flags'] & 0x0fff)
f.read((pos + ((f.tell() - pos + 8) & ~7)) - f.tell())
yield x
def describe_index(filename):
with open(filename, 'rb') as f:
index = parse_index(f)
return """
Version: {version}
Entries:
{entries_fmt}
""".format(
entries_fmt=''.join(describe_entry(x) for x in index['entries']),
**index
)
def describe_entry(x):
return """
Path: {x[path]}
SHA: {hexsha}
Size: {x[size]}
Flags: {x[flags]:#b}
User ID: {x[uid]}
Group ID: {x[gid]}
Created: {x[ctime]}.{x[ctime_nano]}
Modified: {x[mtime]}.{x[mtime_nano]}
Inode: {x[inode]}
Device ID: ({major}, {minor})
""".format(
x=x,
major=os.major(x['dev']),
minor=os.minor(x['dev']),
hexsha=binascii.b2a_hex(x['sha']).decode('utf-8'),
)
|
ReproducibleBuilds/diffoscope
|
diffoscope/comparators/git.py
|
Python
|
gpl-3.0
| 2,663
|
'''
Created on Jul 5, 2010
@author: Soren S. Nielsen
#******************************************************************************
# This file is part of RAW.
#
# RAW is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAW is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAW. If not, see <http://www.gnu.org/licenses/>.
#
#******************************************************************************
'''
from __future__ import division # TODO: check whether true division is right?
import os, sys, copy
from math import pi, sin
# import wx
import numpy as np
import scipy.interpolate as interp
from scipy import integrate as integrate
RAW_DIR = os.path.dirname(os.path.abspath(__file__))
if RAW_DIR not in sys.path:
sys.path.append(RAW_DIR)
import SASCalib, SASExceptions
class SASM:
'''
Small Angle Scattering Measurement (SASM) Object.
Contains all information extracted from a SAS data file.
'''
def __init__(self, i, q, err, parameters):
''' Constructor
parameters contains at least {'filename': filename_with_no_path}
other reserved keys are:
'counters' : [(countername, value),...] Info from counterfiles
'fileHeader' : [(label, value),...] Info from the header in the loaded file
'''
#Raw intensity variables
self._i_raw = np.array(i)
self._q_raw = np.array(q)
self._err_raw = np.array(err)
self._parameters = parameters
# Make an entry for analysis parameters i.e. Rg, I(0) etc:
if 'analysis' not in self._parameters:
self._parameters['analysis'] = {}
if 'history' not in self._parameters:
self._parameters['history'] = {}
#Binned intensity variables
self._i_binned = self._i_raw.copy()
self._q_binned = self._q_raw.copy()
self._err_binned = self._err_raw.copy()
#Modified intensity variables
self.i = self._i_raw.copy()
self.q = self._q_raw.copy()
self.err = self._err_raw.copy()
self._scale_factor = 1
self._offset_value = 0
self._norm_factor = 1
self._q_scale_factor = 1
self._bin_size = 1
#variables used for plot management
self.item_panel = None
self.plot_panel = None
self.line = None
self.origline = None
self.fitline = None
self.err_line = None
self.axes = None
self.is_plotted = False
self._selected_q_range = (0, len(self._q_binned))
#Calculated values
try:
if len(self.q)>0:
self.total_intensity = integrate.simps(self.i, self.q)
self.mean_intensity = self.i.mean()
except:
self.total_intensity = -1
self.mean_intensity = -1
def __deepcopy__(self, memo):
#Raw intensity variables
i_raw = copy.deepcopy(self._i_raw, memo)
q_raw = copy.deepcopy(self._q_raw, memo)
err_raw = copy.deepcopy(self._err_raw, memo)
parameters = copy.deepcopy(self._parameters, memo)
newsasm = SASM(i_raw, q_raw, err_raw, parameters)
#Binned intensity variables
newsasm.setQrange(copy.deepcopy(self.getQrange(), memo))
newsasm.scale(copy.deepcopy(self.getScale(), memo))
newsasm.normalize(copy.deepcopy(self._norm_factor, memo))
newsasm.offset(copy.deepcopy(self.getOffset(), memo))
newsasm._q_scale_factor = copy.deepcopy(self._q_scale_factor, memo)
newsasm._bin_size = copy.deepcopy(self.getBinning(), memo)
newsasm.setBinnedI(copy.deepcopy(self.getBinnedI(), memo))
newsasm.setBinnedQ(copy.deepcopy(self.getBinnedQ(), memo))
newsasm.setBinnedErr(copy.deepcopy(self.getBinnedErr(), memo))
newsasm._update()
return newsasm
def _update(self):
''' updates modified intensity after scale, normalization and offset changes '''
#self.i = ((self._i_binned / self._norm_factor) + self._offset_value) * self._scale_factor
self.i = ((self._i_binned / self._norm_factor) * self._scale_factor) + self._offset_value
#self.err = ((self._err_binned / self._norm_factor) + self._offset_value) * abs(self._scale_factor)
self.err = ((self._err_binned / self._norm_factor)) * abs(self._scale_factor)
self.q = self._q_binned * self._q_scale_factor
#Calculated values
try:
if len(self.q)>0:
self.total_intensity = integrate.simps(self.i, self.q)
self.mean_intensity = self.i.mean()
except:
self.total_intensity = -1
self.mean_intensity = -1
def getScale(self):
return self._scale_factor
def getOffset(self):
return self._offset_value
def getLine(self):
return self.line
def scaleRelative(self, relscale):
self._scale_factor = abs(self._scale_factor * relscale)
self._update()
def scale(self, scale_factor):
''' Scale intensity by a factor from the raw intensity, also scales errorbars appropiately '''
self._scale_factor = abs(scale_factor)
self._update()
def normalize(self, norm_value):
''' Normalize (divide) raw intensity by a value, errorbars follow '''
self._norm_factor = norm_value
self._update()
def offset(self, offset_value):
''' Offset raw intensity by a constant. Only modified intensity is affected '''
self._offset_value = offset_value
self._update()
def scaleBinnedQ(self, scale_factor):
self._q_binned = self._q_binned * scale_factor
self._update()
def scaleQ(self, q_scale_factor):
''' scale Q values by a factor (calibrate) '''
self._q_scale_factor = q_scale_factor
self._update()
def calibrateQ(self, sd_distance, delta_q_length, wavelength):
''' calibrates the q_vector from the sample-detector
distance sd_distance. Going from a q-vector in pixels
to inverse angstroms via delta_q_length (ex. detector pixel size)'''
for q_idx in range(0,len(self._q_binned)):
q_vector = self._q_binned[q_idx]
theta = SASCalib.calcTheta(sd_distance, delta_q_length, q_vector)
self._q_binned[q_idx] = ((4 * pi * sin(theta)) / wavelength)
self._update()
def reset(self):
''' Reset q, i and err to their original values '''
self.i = self._i_raw.copy()
self.q = self._q_raw.copy()
self.err = self._err_raw.copy()
self._i_binned = self._i_raw.copy()
self._q_binned = self._q_raw.copy()
self._err_binned = self._err_raw.copy()
self._scale_factor = 1
self._offset_value = 0
self._norm_factor = 1
self._q_scale_factor = 1
def setQrange(self, qrange):
if qrange[0] < 0 or qrange[1] > (len(self._q_binned)):
raise SASExceptions.InvalidQrange('Qrange: ' + str(qrange) + ' is not a valid q-range for a q-vector of length ' + str(len(self._q_binned)-1))
else:
self._selected_q_range = qrange
def getQrange(self):
return self._selected_q_range
def setAllParameters(self, new_parameters):
self._parameters = new_parameters
def getAllParameters(self):
return self._parameters
def getParameter(self, key):
''' Get parameter from parameters dict '''
if key in self._parameters:
return self._parameters[key]
else:
return None
def setParameter(self, key, value):
''' insert key,value pair into parameters dict '''
self._parameters[key] = value
def removeZingers(self, start_idx = 0, window_length = 10, stds = 4.0):
''' Removes spikes from the radial averaged data
Threshold is currently 4 times the standard deviation (stds)
window_length : The number of points before the spike
that are averaged and used to replace the spike.
start_idx : Index in the intensityArray to start the search for spikes
'''
intensity = self._i_binned
for i in range(window_length + start_idx, len(intensity)):
averaging_window = intensity[i - window_length : i]
averaging_window_std = np.std(averaging_window)
averging_window_mean = np.mean(averaging_window)
threshold = averging_window_mean + (stds * averaging_window_std)
if intensity[i] > threshold:
intensity[i] = averging_window_mean
self._update()
# def logRebin(self, no_points, start_idx = 0, end_idx = -1):
# pass
def setLogBinning(self, no_points, start_idx = 0, end_idx = -1):
if end_idx == -1:
end_idx = len(self._i_raw)
i = self._i_raw[start_idx:end_idx]
q = self._q_raw[start_idx:end_idx]
err = self._err_raw[start_idx:end_idx]
bins = np.logspace(1, np.log10(len(q)), no_points)
binned_q = []
binned_i = []
binned_err = []
idx = 0
for i in range(0, len(bins)):
no_of_bins = np.floor(bins[i] - bins[i-1])
if no_of_bins > 1:
mean_q = np.mean( q[ idx : idx + no_of_bins ] )
mean_i = np.mean( i[ idx : idx + no_of_bins ] )
mean_err = np.sqrt( sum( np.power( err[ idx : idx + no_of_bins ], 2) ) ) / np.sqrt( no_of_bins )
binned_q.append(mean_q)
binned_i.append(mean_i)
binned_err.append(mean_err)
idx = idx + no_of_bins
else:
binned_q.append(q[idx])
binned_i.append(i[idx])
binned_err.append(err[idx])
idx = idx + 1
self._i_binned = np.array(binned_i)
self._q_binned = np.array(binned_q)
self._err_binned = np.array(binned_err)
self._update()
self._selected_q_range = (0, len(self._i_binned))
def setBinning(self, bin_size, start_idx = 0, end_idx = -1):
''' Sets the bin size of the I_q plot
end_idx will be lowered to fit the bin_size
if needed.
'''
self._bin_size = bin_size
if end_idx == -1:
end_idx = len(self._i_raw)
len_iq = len(self._i_raw[start_idx:end_idx])
no_of_bins = int(np.floor(len_iq / bin_size))
end_idx = start_idx + no_of_bins*bin_size
i_roi = self._i_raw[start_idx:end_idx]
q_roi = self._q_raw[start_idx:end_idx]
err_roi = self._err_raw[start_idx:]
new_i = np.zeros(no_of_bins)
new_q = np.zeros(no_of_bins)
new_err = np.zeros(no_of_bins)
for eachbin in range(0, no_of_bins):
first_idx = eachbin * bin_size
last_idx = (eachbin * bin_size) + bin_size
new_i[eachbin] = sum(i_roi[first_idx:last_idx]) / bin_size
new_q[eachbin] = sum(q_roi[first_idx:last_idx]) / bin_size
new_err[eachbin] = np.sqrt(sum(np.power(err_roi[first_idx:last_idx],2))) / np.sqrt(bin_size)
if end_idx == -1 or end_idx == len(self._i_raw):
self._i_binned = np.append(self._i_raw[0:start_idx], new_i)
self._q_binned = np.append(self._q_raw[0:start_idx], new_q)
self._err_binned = np.append(self._err_raw[0:start_idx], new_err)
else:
self._i_binned = np.append(np.append(self._i_raw[0:start_idx], new_i), self._i_raw[end_idx:])
self._q_binned = np.append(np.append(self._q_raw[0:start_idx], new_q), self._q_raw[end_idx:])
self._err_binned = np.append(np.append(self._err_raw[0:start_idx], new_err), self._err_raw[end_idx:])
self._update()
self._selected_q_range = (0, len(self._i_binned))
def getBinning(self):
return self._bin_size
def getBinnedQ(self):
return self._q_binned
def getBinnedI(self):
return self._i_binned
def getBinnedErr(self):
return self._err_binned
def setBinnedI(self, new_binned_i):
self._i_binned = new_binned_i
def setBinnedQ(self, new_binned_q):
self._q_binned = new_binned_q
def setBinnedErr(self, new_binned_err):
self._err_binned = new_binned_err
def setScaleValues(self, scale_factor, offset_value, norm_factor, q_scale_factor, bin_size):
self._scale_factor = scale_factor
self._offset_value = offset_value
self._norm_factor = norm_factor
self._q_scale_factor = q_scale_factor
self._bin_size = bin_size
def scaleRawIntensity(self, scale):
self._i_raw = self._i_raw * scale
self._err_raw = self._err_raw * scale
def scaleBinnedIntensity(self, scale):
self._i_binned = self._i_binned * scale
self._err_binned = self._err_binned * scale
self._update()
def offsetBinnedIntensity(self, offset):
self._i_binned = self._i_binned + offset
self._err_binned = self._err_binned
self._update()
def extractAll(self):
''' extracts all data from the object and delivers it as a dict '''
all_data = {}
all_data['i_raw'] = self._i_raw
all_data['q_raw'] = self._q_raw
all_data['err_raw'] = self._err_raw
all_data['i_binned'] = self._i_binned
all_data['q_binned'] = self._q_binned
all_data['err_binned'] = self._err_binned
all_data['scale_factor'] = self._scale_factor
all_data['offset_value'] = self._offset_value
all_data['norm_factor'] = self._norm_factor
all_data['q_scale_factor'] = self._q_scale_factor
all_data['bin_size'] = self._bin_size
all_data['selected_qrange'] = self._selected_q_range
all_data['parameters'] = self._parameters
return all_data
def copy(self):
''' return a copy of the object '''
return SASM(copy.copy(self.i), copy.copy(self.q), copy.copy(self.err), copy.copy(self._parameters))
def getMeanI(self):
return self.mean_intensity
def getTotalI(self):
return self.total_intensity
class IFTM(SASM):
'''
Inverse fourier tranform measurement (IFTM) Object.
Contains all information extracted from a IFT.
'''
def __init__(self, p, r, err, i_orig, q_orig, err_orig, i_fit, parameters, i_extrap = [], q_extrap = []):
''' Constructor
parameters contains at least {'filename': filename_with_no_path}
other reserved keys are:
'counters' : [(countername, value),...] Info from counterfiles
'fileHeader' : [(label, value),...] Info from the header in the loaded file
'''
#Raw intensity variables
self._r_raw = np.array(r)
self._p_raw = np.array(p)
self._err_raw = np.array(err)
self._i_orig_raw = np.array(i_orig)
self._q_orig_raw = np.array(q_orig)
self._err_orig_raw = np.array(err_orig)
self._i_fit_raw = np.array(i_fit)
self._i_extrap_raw = np.array(i_extrap)
self._q_extrap_raw = np.array(q_extrap)
self._parameters = parameters
# Make an entry for analysis parameters i.e. Rg, I(0) etc:
# if 'analysis' not in self._parameters:
# self._parameters['analysis'] = {}
# if 'history' not in self._parameters:
# self._parameters['history'] = {}
#Binned intensity variables
self._i_orig_binned = self._i_orig_raw.copy()
self._q_orig_binned = self._q_orig_raw.copy()
self._err_orig_binned = self._err_orig_raw.copy()
self._i_fit_binned = self._i_fit_raw.copy()
self._i_extrap_binned = self._i_extrap_raw.copy()
self._q_extrap_binned = self._q_extrap_raw.copy()
#Modified intensity variables
self.r = self._r_raw.copy()
self.p = self._p_raw.copy()
self.err = self._err_raw.copy()
self.i_orig = self._i_orig_raw.copy()
self.q_orig = self._q_orig_raw.copy()
self.err_orig = self._err_orig_raw.copy()
self.i_fit = self._i_fit_raw.copy()
self.i_extrap = self._i_extrap_raw.copy()
self.q_extrap = self._q_extrap_raw.copy()
# self._scale_factor = 1
# self._offset_value = 0
# self._norm_factor = 1
# self._q_scale_factor = 1
# self._bin_size = 1
#variables used for plot management
self.item_panel = None
self.plot_panel = None
self.r_line = None
self.qo_line = None
self.qf_line = None
self.r_origline = None
self.qo_origline = None
self.qf_origline = None
# self.fitline = None
self.r_err_line = None
self.qo_err_line = None
self.r_axes = None
self.qo_axes = None
self.qf_axes = None
self.canvas = None
self.is_plotted = False
self._selected_q_range = (0, len(self._q_orig_binned))
def _update(self):
''' updates modified intensity after scale, normalization and offset changes '''
#self.i = ((self._i_binned / self._norm_factor) + self._offset_value) * self._scale_factor
self.i = ((self._i_binned / self._norm_factor) * self._scale_factor) + self._offset_value
#self.err = ((self._err_binned / self._norm_factor) + self._offset_value) * abs(self._scale_factor)
self.err = ((self._err_binned / self._norm_factor)) * abs(self._scale_factor)
self.q = self._q_binned * self._q_scale_factor
def getScale(self):
return self._scale_factor
def getOffset(self):
return self._offset_value
def getLine(self):
return self.line
def scaleRelative(self, relscale):
self._scale_factor = abs(self._scale_factor * relscale)
self._update()
def scale(self, scale_factor):
''' Scale intensity by a factor from the raw intensity, also scales errorbars appropiately '''
self._scale_factor = abs(scale_factor)
self._update()
def normalize(self, norm_value):
''' Normalize (divide) raw intensity by a value, errorbars follow '''
self._norm_factor = norm_value
self._update()
def offset(self, offset_value):
''' Offset raw intensity by a constant. Only modified intensity is affected '''
self._offset_value = offset_value
self._update()
def reset(self):
# ''' Reset q, i and err to their original values '''
# self.i = self._i_raw.copy()
# self.q = self._q_raw.copy()
# self.err = self._err_raw.copy()
# self._i_binned = self._i_raw.copy()
# self._q_binned = self._q_raw.copy()
# self._err_binned = self._err_raw.copy()
# self._scale_factor = 1
# self._offset_value = 0
# self._norm_factor = 1
# self._q_scale_factor = 1
pass
def setQrange(self, qrange):
if qrange[0] < 0 or qrange[1] > (len(self._q_orig_binned)):
raise SASExceptions.InvalidQrange('Qrange: ' + str(qrange) + ' is not a valid q-range for a q-vector of length ' + str(len(self._q_orig_binned)-1))
else:
self._selected_q_range = qrange
def getQrange(self):
return self._selected_q_range
def setAllParameters(self, new_parameters):
self._parameters = new_parameters
def getAllParameters(self):
return self._parameters
def getParameter(self, key):
''' Get parameter from parameters dict '''
if key in self._parameters:
return self._parameters[key]
else:
return None
def setParameter(self, key, value):
''' insert key,value pair into parameters dict '''
self._parameters[key] = value
def setScaleValues(self, scale_factor, offset_value, norm_factor, q_scale_factor, bin_size):
self._scale_factor = scale_factor
self._offset_value = offset_value
self._norm_factor = norm_factor
self._q_scale_factor = q_scale_factor
self._bin_size = bin_size
def extractAll(self):
''' extracts all data from the object and delivers it as a dict '''
all_data = {}
all_data['r_raw'] = self._r_raw
all_data['p_raw'] = self._p_raw
all_data['err_raw'] = self._err_raw
all_data['i_orig_raw'] = self._i_orig_raw
all_data['q_orig_raw'] = self._q_orig_raw
all_data['err_orig_raw'] = self._err_orig_raw
all_data['i_fit_raw'] = self._i_fit_raw
all_data['i_extrap_raw'] = self._i_extrap_raw
all_data['q_extrap_raw'] = self._q_extrap_raw
# all_data['i_binned'] = self._i_binned
# all_data['q_binned'] = self._q_binned
# all_data['err_binned'] = self._err_binned
# all_data['scale_factor'] = self._scale_factor
# all_data['offset_value'] = self._offset_value
# all_data['norm_factor'] = self._norm_factor
# all_data['q_scale_factor'] = self._q_scale_factor
# all_data['bin_size'] = self._bin_size
all_data['selected_qrange'] = self._selected_q_range
all_data['parameters'] = self._parameters
return all_data
pass
def copy(self):
''' return a copy of the object '''
return SASM(copy.copy(self.i), copy.copy(self.q), copy.copy(self.err), copy.copy(self._parameters))
class SECM:
'''
SEC-SAS Measurement (SECM) Object.
'''
def __init__(self, file_list, sasm_list, frame_list, parameters):
''' Constructor
parameters contains at least {'filename': filename_with_no_path}
other reserved keys are:
'counters' : [(countername, value),...] Info from counterfiles
'fileHeader' : [(label, value),...] Info from the header in the loaded file
'''
#Raw inputs variables
self._file_list = file_list
self._sasm_list = sasm_list
self._frame_list_raw = np.array(frame_list, dtype=int)
self._parameters = parameters
# Make an entry for analysis parameters i.e. Rg, I(0) etc:
if 'analysis' not in self._parameters:
self._parameters['analysis'] = {}
if 'history' not in self._parameters:
self._parameters['history'] = {}
if 'filename' not in self._parameters:
self._parameters['filename'] = os.path.splitext(os.path.basename(self._file_list[0]))[0]
#Extract initial mean and total intensity variables
self._mean_i_raw = np.array([sasm.getMeanI() for sasm in self._sasm_list])
self._total_i_raw = np.array([sasm.getTotalI() for sasm in self._sasm_list])
#Set up the modified mean and total intensity variables
self.mean_i = self._mean_i_raw.copy()
self.total_i = self._total_i_raw.copy()
#Make sure we have as many frame numbers as sasm objects
if len(self._sasm_list) != len(self._frame_list_raw):
self._frame_list_raw=np.arange(len(self._sasm_list))
print('Warning: Incorrect frame number input to SECM object. Using default frame numbers.')
self.frame_list = self._frame_list_raw.copy()
self._scale_factor = 1
self._offset_value = 0
self._frame_scale_factor = 1
#variables used for plot management
self.item_panel = None
self.plot_panel = None
self.line = None
self.origline = None
self.err_line = None
self.axes = None
self.is_plotted = False
self.qref=0
self.I_of_q=[]
self.time=[]
main_frame = wx.FindWindowByName('MainFrame')
hdr_format = main_frame.raw_settings.get('ImageHdrFormat')
if hdr_format == 'G1, CHESS' or hdr_format == 'G1 WAXS, CHESS':
for sasm in self._sasm_list:
if sasm.getAllParameters().has_key('counters'):
file_hdr = sasm.getParameter('counters')
if '#C' not in file_hdr.values():
if file_hdr.has_key('Time'):
sasm_time = file_hdr['Time']
self.time.append(sasm_time)
elif file_hdr.has_key('Seconds'):
sasm_time = file_hdr['Seconds']
if len(self.time) == 0:
self.time.append(0)
else:
self.time.append(sasm_time+self.time[-1])
elif file_hdr.has_key('Exposure_time'):
sasm_time = file_hdr['Exposure_time']
if len(self.time) == 0:
self.time.append(0)
else:
self.time.append(sasm_time+self.time[-1])
self.time=np.array(self.time,dtype=float)
####### Parameters for autocalculating rg, MW for SEC plot
self.initial_buffer_frame = -1
self.final_buffer_frame = -1
self.window_size = -1
self.threshold = -1
self.mol_type = ''
self.average_buffer_sasm = None
self.subtracted_sasm_list = []
self.use_subtracted_sasm = []
self.rg_list = []
self.rger_list = []
self.i0_list = []
self.i0er_list = []
self.mw_list = []
self.mwer_list = []
self.calc_line = None
self.calc_err_line = None
self.calc_axes = None
self.calc_is_plotted = False
self.calc_has_data = False
self.is_visible = True
def _update(self):
''' updates modified intensity after scale, normalization and offset changes '''
#self.i = ((self._i_binned / self._norm_factor) + self._offset_value) * self._scale_factor
self.mean_i = ((self.mean_i) * self._scale_factor) + self._offset_value
self.total_i = ((self.total_i) * self._scale_factor) + self._offset_value
self.frame_list = self.frame_list * self._frame_scale_factor
def append(self, filename_list, sasm_list, frame_list):
self._file_list.extend(filename_list)
self._sasm_list.extend(sasm_list)
self._frame_list_raw = np.concatenate((self._frame_list_raw, np.array(frame_list, dtype=int)))
self._mean_i_raw = np.concatenate((self._mean_i_raw, np.array([sasm.getMeanI() for sasm in sasm_list])))
self._total_i_raw = np.concatenate((self._total_i_raw, np.array([sasm.getTotalI() for sasm in sasm_list])))
self.mean_i = self._mean_i_raw.copy()
self.total_i = self._total_i_raw.copy()
if len(self._sasm_list) != len(self._frame_list_raw):
self._frame_list_raw=np.arange(len(self._sasm_list))
print('Warning: Incorrect frame number input to SECM object. Using default frame numbers.')
self.frame_list = self._frame_list_raw.copy()
time=list(self.time)
main_frame = wx.FindWindowByName('MainFrame')
hdr_format = main_frame.raw_settings.get('ImageHdrFormat')
if hdr_format == 'G1, CHESS' or hdr_format == 'G1 WAXS, CHESS':
for sasm in sasm_list:
if sasm.getAllParameters().has_key('counters'):
file_hdr = sasm.getParameter('counters')
if '#C' not in file_hdr.values():
if file_hdr.has_key('Time'):
sasm_time = file_hdr['Time']
time.append(sasm_time)
elif file_hdr.has_key('Seconds'):
sasm_time = file_hdr['Seconds']
if len(time) == 0:
time.append(0)
else:
time.append(sasm_time+time[-1])
elif file_hdr.has_key('Exposure_time'):
sasm_time = file_hdr['Exposure_time']
if len(time) == 0:
time.append(0)
else:
time.append(sasm_time+self.time[-1])
self.time=np.array(time,dtype=float)
if self.qref>0:
I_of_q = []
closest = lambda qlist: np.argmin(np.absolute(qlist-self.qref))
for sasm in sasm_list:
# print('in sasm_list loop')
q = sasm.q
index = closest(q)
# print(index)
intensity = sasm.i[index]
# print(intensity)
I_of_q.append(intensity)
self.I_of_q.extend(I_of_q)
# print(self.time)
self._update()
def getScale(self):
return self._scale_factor
def getOffset(self):
return self._offset_value
def getLine(self):
return self.line
def getCalcLine(self):
return self.calc_line
def getSASMList(self, initial_frame, final_frame):
sasms = []
try:
initial_frame = int(initial_frame)
except:
msg = "Invalid value for initial frame."
wx.CallAfter(wx.MessageBox, msg, "Invalid frame range", style = wx.ICON_ERROR | wx.OK)
return sasms
try:
final_frame = int(final_frame)
except:
msg = "Invalid value for final frame."
wx.CallAfter(wx.MessageBox, msg, "Invalid frame range", style = wx.ICON_ERROR | wx.OK)
return sasms
if initial_frame > final_frame:
msg = "To send data to the main plot, enter a valid frame range (initial frame larger than final frame)."
wx.CallAfter(wx.MessageBox, msg, "Invalid frame range", style = wx.ICON_ERROR | wx.OK)
return sasms
elif len(np.where(self.frame_list == initial_frame)[0]) == 0:
msg = "To send data to the main plot, enter a valid frame range (initial frame not in data set)."
wx.CallAfter(wx.MessageBox, msg, "Invalid frame range", style = wx.ICON_ERROR | wx.OK)
return sasms
else:
index1 = np.where(self.frame_list == initial_frame)[0][0]
if len(np.where(self.frame_list == final_frame)[0]) == 0:
index2 = len(self.frame_list)
print('Warning: Final frame not in data set')
else:
index2 = np.where(self.frame_list == final_frame)[0][0]
sasms = self._sasm_list[index1 : index2+1]
return sasms
def getTime(self):
if len(self.time)==0:
return np.zeros_like(self.frame_list) - 1
else:
return self.time
def scaleRelative(self, relscale):
self._scale_factor = abs(self._scale_factor * relscale)
self._update()
def scale(self, scale_factor):
''' Scale intensity by a factor from the raw intensity, also scales errorbars appropiately '''
self._scale_factor = abs(scale_factor)
self._update()
def normalize(self, norm_value):
''' Normalize (divide) raw intensity by a value, errorbars follow '''
self._norm_factor = norm_value
self._update()
def offset(self, offset_value):
''' Offset raw intensity by a constant. Only modified intensity is affected '''
self._offset_value = offset_value
self._update()
def reset(self):
''' Reset q, i and err to their original values '''
self.mean_i = self._mean_i_raw.copy()
self.total_i = self._total_i_raw.copy()
self.frame_list = self._frame_list_raw.copy()
self._scale_factor = 1
self._offset_value = 0
self._frame_scale_factor = 1
def setAllParameters(self, new_parameters):
self._parameters = new_parameters
def getAllParameters(self):
return self._parameters
def getParameter(self, key):
''' Get parameter from parameters dict '''
if self._parameters.has_key(key):
return self._parameters[key]
else:
return None
def setParameter(self, key, value):
''' insert key,value pair into parameters dict '''
self._parameters[key] = value
def setScaleValues(self, scale_factor, offset_value, frame_scale_factor):
self._scale_factor = scale_factor
self._offset_value = offset_value
self._frame_scale_factor = frame_scale_factor
def extractAll(self):
''' extracts all data from the object and delivers it as a dict '''
all_data = {}
all_data['file_list'] = self._file_list
all_data['mean_i_raw'] = self._mean_i_raw
all_data['total_i_raw'] = self._total_i_raw
all_data['frame_list_raw'] = self._frame_list_raw
all_data['mean_i'] = self.mean_i
all_data['total_i'] = self.total_i
all_data['frame_list'] = self.frame_list
all_data['i_of_q'] = self.I_of_q
all_data['time'] = self.time
all_data['qref'] = self.qref
all_data['scale_factor'] = self._scale_factor
all_data['offset_value'] = self._offset_value
all_data['frame_scale_factor'] = self._frame_scale_factor
all_data['parameters'] = self._parameters
all_data['intial_buffer_frame'] = self.initial_buffer_frame
all_data['final_buffer_frame'] = self.final_buffer_frame
all_data['window_size'] = self.window_size
all_data['mol_type'] = self.mol_type
all_data['threshold'] = self.threshold
all_data['rg'] = self.rg_list
all_data['rger'] = self.rger_list
all_data['i0'] = self.i0_list
all_data['i0er'] = self.i0er_list
all_data['mw'] = self.mw_list
all_data['mwer'] = self.mwer_list
all_data['calc_has_data'] = self.calc_has_data
all_data['is_visible'] = self.is_visible
all_data['use_subtracted_sasm'] = self.use_subtracted_sasm
all_data['sasm_list'] = []
for idx in range(len(self._sasm_list)):
all_data['sasm_list'].append(self._sasm_list[idx].extractAll())
if self.average_buffer_sasm is None or self.average_buffer_sasm == -1:
all_data['average_buffer_sasm'] = self.average_buffer_sasm
else:
all_data['average_buffer_sasm'] = self.average_buffer_sasm.extractAll()
all_data['subtracted_sasm_list'] = []
for idx in range(len(self.subtracted_sasm_list)):
if self.subtracted_sasm_list[idx] != -1:
all_data['subtracted_sasm_list'].append(self.subtracted_sasm_list[idx].extractAll())
else:
all_data['subtracted_sasm_list'].append(-1)
return all_data
def copy(self):
''' return a copy of the object '''
return SECM(copy.copy(self.mean_i), copy.copy(self.total_i), copy.copy(self.frame_list), copy.copy(self._parameters))
def getSASM(self, index=0):
return self._sasm_list[index]
def I(self, qref):
# print('in I(q)')
self.qref=float(qref)
self.I_of_q = []
closest = lambda qlist: np.argmin(np.absolute(qlist-self.qref))
for sasm in self._sasm_list:
# print('in sasm_list loop')
q = sasm.q
index = closest(q)
# print(index)
intensity = sasm.i[index]
# print(intensity)
self.I_of_q.append(intensity)
return self.I_of_q
def setCalcParams(self, initial, final, window, mol_type, threshold):
new = False
if initial != self.initial_buffer_frame or final != self.final_buffer_frame or window != self.window_size or self.mol_type != mol_type or threshold != self.threshold:
new = True
self.initial_buffer_frame = initial
self.final_buffer_frame = final
self.window_size = window
self.mol_type = mol_type
self.threshold = threshold
# print(self.initial_buffer_frame)
# print(self.final_buffer_frame)
# print(self.window_size)
# print(new)
return new
def getCalcParams(self):
return self.initial_buffer_frame, self.final_buffer_frame, self.window_size
def setAverageBufferSASM(self, sasm):
self.average_buffer_sasm = sasm
def getAllSASMs(self):
return self._sasm_list
def setSubtractedSASMList(self, sasm_list, use_sasm_list):
self.subtracted_sasm_list = sasm_list
self.use_subtracted_sasm = use_sasm_list
def appendSubtractedSASMList(self, sasm_list, use_sasm_list):
self.subtracted_sasm_list = self.subtracted_sasm_list + sasm_list
self.use_subtracted_sasm = self.use_subtracted_sasm + use_sasm_list
def setRgAndI0(self, rg, rger, i0, i0er):
self.rg_list = rg
self.rger_list = rger
self.i0_list = i0
self.i0er_list = i0er
def setMW(self, mw, mwer):
self.mw_list = mw
self.mwer_list = mwer
def getRg(self):
return self.rg_list, self.rger_list
def getMW(self):
return self.mw_list, self.mwer_list
def getI0(self):
return self.i0_list, self.i0er_list
def appendRgAndI0(self, rg, rger, i0, i0er, first_frame, window_size):
index1 = int(first_frame+(window_size-1)/2)
index2 = int((window_size-1)/2)
self.rg_list = np.concatenate((self.rg_list[:index1],rg[index2:]))
self.rger_list = np.concatenate((self.rger_list[:index1],rger[index2:]))
self.i0_list = np.concatenate((self.i0_list[:index1],i0[index2:]))
self.i0er_list = np.concatenate((self.i0er_list[:index1],i0er[index2:]))
def appendMW(self, mw, mwer, first_frame, window_size):
index1 = int(first_frame+(window_size-1)/2)
index2 = int((window_size-1)/2)
self.mw_list = np.concatenate((self.mw_list[:index1], mw[index2:]))
self.mwer_list = np.concatenate((self.mwer_list[:index1], mwer[index2:]))
def subtract(sasm1, sasm2, forced = False):
''' Subtract one SASM object from another and propagate errors '''
q1_min, q1_max = sasm1.getQrange()
q2_min, q2_max = sasm2.getQrange()
if not np.all(np.round(sasm1.q[q1_min:q1_max],5) == np.round(sasm2.q[q2_min:q2_max],5)) and not forced:
raise SASExceptions.DataNotCompatible('The curves does not have the same q vectors.')
elif not np.all(np.round(sasm1.q[q1_min:q1_max],5) == np.round(sasm2.q[q2_min:q2_max],5)) and forced:
q1 = np.round(sasm1.q[q1_min:q1_max],5)
q2 = np.round(sasm2.q[q2_min:q2_max],5)
i1 = np.round(sasm1.i[q1_min:q1_max],5)
i2 = np.round(sasm2.i[q2_min:q2_max],5)
err1 = np.round(sasm1.err[q1_min:q1_max],5)
err2 = np.round(sasm2.err[q2_min:q2_max],5)
if q1[0]>q2[0]:
start=np.round(q1[0],5)
else:
start=np.round(q2[0],5)
if q1[-1]>q2[-1]:
end=np.round(q2[-1],5)
else:
end=np.round(q1[-1],5)
if start>end:
raise SASExceptions.DataNotCompatible('Subtraction failed: the curves have no overlapping q region.')
shifted = False
if len(np.argwhere(q1==start))>0 and len(np.argwhere(q1==end))>0 and len(np.argwhere(q2==start))>0 and len(np.argwhere(q2==end))>0:
q1_idx1 = np.argwhere(q1==start)[0][0]
q1_idx2 = np.argwhere(q1==end)[0][0]+1
q2_idx1 = np.argwhere(q2==start)[0][0]
q2_idx2 = np.argwhere(q2==end)[0][0] +1
if np.all(q1[q1_idx1:q1_idx2]==q2[q2_idx1:q2_idx2]):
shifted = True
if shifted:
i = i1[q1_idx1:q1_idx2] - i2[q2_idx1:q2_idx2]
err = np.sqrt( np.power(err1[q1_idx1:q1_idx2], 2) + np.power(err2[q2_idx1:q2_idx2],2))
q = copy.deepcopy(q1[q1_idx1:q1_idx2])
# print(i)
# print(q)
else:
q1space=q1[1]-q1[0]
q2space=q2[1]-q2[0]
if q1space>q2space:
npts=(end-start)/q1space+1
else:
npts=(end-start)/q2space+1
refq=np.linspace(start,end,npts,endpoint=True)
q1_idx1 = np.argmin(np.absolute(q1-start))
q1_idx2 = np.argmin(np.absolute(q1-end))+1
q2_idx1 = np.argmin(np.absolute(q2-start))
q2_idx2 = np.argmin(np.absolute(q2-end))+1
q1b, i1b, err1b=binfixed(q1[q1_idx1:q1_idx2], i1[q1_idx1:q1_idx2], err1[q1_idx1:q1_idx2], refq=refq)
q2b, i2b, err2b=binfixed(q2[q2_idx1:q2_idx2], i2[q2_idx1:q2_idx2], err2[q2_idx1:q2_idx2], refq=refq)
i = i1b - i2b
err=np.sqrt(np.square(err1b)+np.square(err2b))
q = refq
else:
i = sasm1.i[q1_min:q1_max] - sasm2.i[q2_min:q2_max]
q = copy.deepcopy(sasm1.q)[q1_min:q1_max]
err = np.sqrt( np.power(sasm1.err[q1_min:q1_max], 2) + np.power(sasm2.err[q2_min:q2_max],2))
parameters = copy.deepcopy(sasm1.getAllParameters())
newSASM = SASM(i, q, err, parameters)
history = newSASM.getParameter('history')
history = {}
history1 = []
history1.append(copy.deepcopy(sasm1.getParameter('filename')))
for key in sasm1.getParameter('history'):
history1.append({ key : copy.deepcopy(sasm1.getParameter('history')[key])})
history2 = []
history2.append(copy.deepcopy(sasm2.getParameter('filename')))
for key in sasm2.getParameter('history'):
history2.append({key : copy.deepcopy(sasm2.getParameter('history')[key])})
history['subtraction'] = {'initial_file':history1, 'subtracted_file':history2}
newSASM.setParameter('history', history)
return newSASM
def average(sasm_list, forced = False):
''' Average the intensity of a list of sasm objects '''
#Check average is possible with provided curves:
first_sasm = sasm_list[0]
first_q_min, first_q_max = first_sasm.getQrange()
for each in sasm_list:
each_q_min, each_q_max = each.getQrange()
if not np.all(np.round(each.q[each_q_min:each_q_max], 5) == np.round(first_sasm.q[first_q_min:first_q_max], 5)) and not forced:
raise SASExceptions.DataNotCompatible('Average list contains data sets with different q vectors.')
all_i = first_sasm.i[first_q_min : first_q_max]
all_err = first_sasm.err[first_q_min : first_q_max]
avg_filelist = []
avg_filelist.append(first_sasm.getParameter('filename'))
for idx in range(1, len(sasm_list)):
each_q_min, each_q_max = sasm_list[idx].getQrange()
all_i = np.vstack((all_i, sasm_list[idx].i[each_q_min:each_q_max]))
all_err = np.vstack((all_err, sasm_list[idx].err[each_q_min:each_q_max]))
avg_filelist.append(sasm_list[idx].getParameter('filename'))
avg_i = np.mean(all_i, 0)
avg_err = np.sqrt( np.sum( np.power(all_err,2), 0 ) ) / len(all_err) #np.sqrt(len(all_err))
avg_i = copy.deepcopy(avg_i)
avg_err = copy.deepcopy(avg_err)
avg_q = copy.deepcopy(first_sasm.q)[first_q_min:first_q_max]
avg_parameters = copy.deepcopy(sasm_list[0].getAllParameters())
avgSASM = SASM(avg_i, avg_q, avg_err, avg_parameters)
history = avgSASM.getParameter('history')
history = {}
history_list = []
for eachsasm in sasm_list:
each_history = []
each_history.append(copy.deepcopy(eachsasm.getParameter('filename')))
for key in eachsasm.getParameter('history'):
each_history.append({key : copy.deepcopy(eachsasm.getParameter('history')[key])})
history_list.append(each_history)
history['averaged_files'] = history_list
avgSASM.setParameter('history', history)
return avgSASM
def weightedAverage(sasm_list, weightByError, weightCounter, forced = False):
''' Weighted average of the intensity of a list of sasm objects '''
#Check average is possible with provided curves:
first_sasm = sasm_list[0]
first_q_min, first_q_max = first_sasm.getQrange()
for each in sasm_list:
each_q_min, each_q_max = each.getQrange()
if not np.all(np.round(each.q[each_q_min:each_q_max], 5) == np.round(first_sasm.q[first_q_min:first_q_max], 5)) and not forced:
raise SASExceptions.DataNotCompatible('Average list contains data sets with different q vectors.')
all_i = first_sasm.i[first_q_min : first_q_max]
all_err = first_sasm.err[first_q_min : first_q_max]
if not weightByError:
if first_sasm.getAllParameters().has_key('counters'):
file_hdr = first_sasm.getParameter('counters')
if first_sasm.getAllParameters().has_key('imageHeader'):
img_hdr = first_sasm.getParameter('imageHeader')
if weightCounter in file_hdr:
all_weight = float(file_hdr[weightCounter])
else:
all_weight = float(img_hdr[weightCounter])
avg_filelist = []
if not weightByError:
avg_filelist.append([first_sasm.getParameter('filename'), all_weight])
else:
avg_filelist.append([first_sasm.getParameter('filename'), 'error'])
for idx in range(1, len(sasm_list)):
each_q_min, each_q_max = sasm_list[idx].getQrange()
all_i = np.vstack((all_i, sasm_list[idx].i[each_q_min:each_q_max]))
all_err = np.vstack((all_err, sasm_list[idx].err[each_q_min:each_q_max]))
if not weightByError:
if sasm_list[idx].getAllParameters().has_key('counters'):
file_hdr = sasm_list[idx].getParameter('counters')
if sasm_list[idx].getAllParameters().has_key('imageHeader'):
img_hdr = sasm_list[idx].getParameter('imageHeader')
if weightCounter in file_hdr:
try:
all_weight = np.vstack((all_weight, float(file_hdr[weightCounter])))
except ValueError:
raise SASExceptions.DataNotCompatible('Not all weight counter values were numbers.')
else:
try:
all_weight = np.vstack((all_weight, float(img_hdr[weightCounter])))
except ValueError:
raise SASExceptions.DataNotCompatible('Not all weight counter values were numbers.')
if not weightByError:
avg_filelist.append([sasm_list[idx].getParameter('filename'), all_weight])
else:
avg_filelist.append([sasm_list[idx].getParameter('filename'), 'error'])
if not weightByError:
weight = all_weight.flatten()
avg_i = np.average(all_i, axis=0, weights=weight)
avg_err = np.sqrt(np.average(np.square(all_err), axis=0, weights=np.square(weight)))
else:
all_err = 1/(np.square(all_err))
avg_i = np.average(all_i, axis=0, weights = all_err)
avg_err = np.sqrt(1/np.sum(all_err,0))
avg_i = copy.deepcopy(avg_i)
avg_err = copy.deepcopy(avg_err)
avg_q = copy.deepcopy(first_sasm.q)[first_q_min:first_q_max]
avg_parameters = copy.deepcopy(sasm_list[0].getAllParameters())
avgSASM = SASM(avg_i, avg_q, avg_err, avg_parameters)
history = avgSASM.getParameter('history')
history = {}
history_list = []
for eachsasm in sasm_list:
each_history = []
each_history.append(copy.deepcopy(eachsasm.getParameter('filename')))
for key in eachsasm.getParameter('history'):
each_history.append({key : copy.deepcopy(eachsasm.getParameter('history')[key])})
history_list.append(each_history)
history['weighted_averaged_files'] = history_list
avgSASM.setParameter('history', history)
return avgSASM
def calcAbsoluteScaleWaterConst(water_sasm, emptycell_sasm, I0_water, raw_settings):
if emptycell_sasm is None or emptycell_sasm == 'None' or water_sasm == 'None' or water_sasm is None:
raise SASExceptions.AbsScaleNormFailed('Empty cell file or water file was not found. Open options to set these files.')
water_bgsub_sasm = subtract(water_sasm, emptycell_sasm)
water_avg_end_idx = int( len(water_bgsub_sasm.i) * 0.666 )
water_avg_start_idx = int( len(water_bgsub_sasm.i) * 0.333 )
avg_water = np.mean(water_bgsub_sasm.i[water_avg_start_idx : water_avg_end_idx])
abs_scale_constant = I0_water / avg_water
return abs_scale_constant
def normalizeAbsoluteScaleWater(sasm, raw_settings):
abs_scale_constant = raw_settings.get('NormAbsWaterConst')
sasm.scaleBinnedIntensity(abs_scale_constant)
norm_parameter = sasm.getParameter('normalizations')
norm_parameter['Absolute_scale_factor'] = abs_scale_constant
sasm.setParameter('normalizations', norm_parameter)
return sasm, abs_scale_constant
def postProcessImageSasm(sasm, raw_settings):
if raw_settings.get('NormAbsWater'):
try:
normalizeAbsoluteScaleWater(sasm, raw_settings)
except SASExceptions.AbsScaleNormFailed as error:
print(error)
def postProcessSasm(sasm, raw_settings):
if raw_settings.get('ZingerRemoval'):
std = raw_settings.get('ZingerRemoveSTD')
winlen = raw_settings.get('ZingerRemoveWinLen')
start_idx = raw_settings.get('ZingerRemoveIdx')
sasm.removeZingers(start_idx, winlen, std)
def superimpose(sasm_star, sasm_list):
"""
Find the scale factors for a protein buffer pair that will best match a known standard curve.
If I = I_prot - alf*I_buf, then find alf and bet such that
||(I_prot - alf*I_buf) - bet*I_std ||^2 is a minimum. This is a standard vector norm which gives the least squares minimum.
The standard curve need not be sampled at the same q-space points.
"""
q_star = sasm_star.q
i_star = sasm_star.i
# err_star = sasm_star.err
q_star_qrange_min, q_star_qrange_max = sasm_star.getQrange()
for each_sasm in sasm_list:
each_q = each_sasm.getBinnedQ()
each_i = each_sasm.getBinnedI()
# each_err = each_sasm.getBinnedErr()
each_q_qrange_min, each_q_qrange_max = each_sasm.getQrange()
# resample standard curve on the data q vector
min_q_star, min_q_each = q_star[q_star_qrange_min], each_q[each_q_qrange_min]
max_q_star, max_q_each = q_star[q_star_qrange_max-1], each_q[each_q_qrange_max-1]
min_q = min([min_q_star, min_q_each])
max_q = min([max_q_star, max_q_each])
min_q_idx = np.where(q_star >= min_q_each)[0][0]
max_q_idx = np.where(q_star <= max_q_each)[0][-1]
I_resamp = np.interp(q_star[min_q_idx:max_q_idx+1],
each_q[each_q_qrange_min:each_q_qrange_max-1],
each_i[each_q_qrange_min:each_q_qrange_max-1])
I_buf = np.ones(max_q_idx - min_q_idx + 1)
g2 = np.dot(I_buf, I_buf)
s2 = np.dot(i_star[min_q_idx:max_q_idx+1], i_star[min_q_idx:max_q_idx+1])
gs = sg = np.dot(I_buf, i_star[min_q_idx:max_q_idx+1])
fg = np.dot(I_resamp, I_buf)
fs = np.dot(I_resamp, i_star[min_q_idx:max_q_idx+1])
determ = g2*s2 - gs*sg
alf = (fg*s2-fs*sg) / determ
bet = (g2*fs-gs*fg) / determ
offset = -alf
scale = 1.0/bet
each_sasm.scale(scale)
each_sasm.offset(offset)
def merge(sasm_star, sasm_list):
""" Merge one or more sasms by averaging and possibly interpolating
points if all values are not on the same q scale """
#Sort sasms according to lowest q value:
sasm_list.extend([sasm_star])
sasm_list = sorted(sasm_list, key=lambda each: each.q[each.getQrange()[0]])
s1 = sasm_list[0]
s2 = sasm_list[1]
sasm_list.pop(0)
sasm_list.pop(0)
#find overlapping s2 points
highest_q = s1.q[s1.getQrange()[1]-1]
min, max = s2.getQrange()
overlapping_q2 = s2.q[min:max][np.where(s2.q[min:max] <= highest_q)]
#find overlapping s1 points
lowest_s2_q = s2.q[s2.getQrange()[0]]
min, max = s1.getQrange()
overlapping_q1 = s1.q[min:max][np.where(s1.q[min:max] >= lowest_s2_q)]
tmp_s2i = s2.i.copy()
tmp_s2q = s2.q.copy()
tmp_s2err = s2.err.copy()
if len(overlapping_q1) == 1 and len(overlapping_q2) == 1: #One point overlap
q1idx = s1.getQrange()[1]
q2idx = s2.getQrange()[0]
avg_i = (s1.i[q1idx] + s2.i[q2idx])/2.0
tmp_s2i[q2idx] = avg_i
minq, maxq = s1.getQrange()
q1_indexs = [maxq-1, minq]
elif len(overlapping_q1) == 0 and len(overlapping_q2) == 0: #No overlap
minq, maxq = s1.getQrange()
q1_indexs = [maxq, minq]
else: #More than 1 point overlap
added_index = False
if overlapping_q2[0] < overlapping_q1[0]:
#add the point before overlapping_q1[0] to overlapping_q1
idx, = np.where(s1.q == overlapping_q1[0])
overlapping_q1 = np.insert(overlapping_q1, 0, s1.q[idx-1][0])
added_index = True
#get indexes for overlapping_q2 and q1
q2_indexs = []
q1_indexs = []
for each in overlapping_q2:
idx, = np.where(s2.q == each)
q2_indexs.append(idx[0])
for each in overlapping_q1:
idx, = np.where(s1.q == each)
q1_indexs.append(idx[0])
#interpolate overlapping s2 onto s1
f = interp.interp1d(s1.q[q1_indexs], s1.i[q1_indexs])
intp_I = f(s2.q[q2_indexs])
averaged_I = (intp_I + s2.i[q2_indexs])/2.0
if added_index:
q1_indexs = np.delete(q1_indexs, 0)
tmp_s2i[q2_indexs] = averaged_I
#Merge the two parts
#cut away the overlapping part on s1 and append s2 to it
min, max = s1.getQrange()
newi = s1.i[min:q1_indexs[0]]
newq = s1.q[min:q1_indexs[0]]
newerr = s1.err[min:q1_indexs[0]]
min, max = s2.getQrange()
newi = np.append(newi, tmp_s2i[min:max])
newq = np.append(newq, tmp_s2q[min:max])
newerr = np.append(newerr, tmp_s2err[min:max])
#create a new SASM object with the merged parts.
parameters = copy.deepcopy(s1.getAllParameters())
newSASM = SASM(newi, newq, newerr, parameters)
history = newSASM.getParameter('history')
history = {}
history_list = []
for eachsasm in [s1, s2]:
each_history = []
each_history.append(copy.deepcopy(eachsasm.getParameter('filename')))
for key in eachsasm.getParameter('history'):
each_history.append({key : copy.deepcopy(eachsasm.getParameter('history')[key])})
history_list.append(each_history)
history['merged_files'] = history_list
newSASM.setParameter('history', history)
if len(sasm_list) == 0:
return newSASM
else:
return merge(newSASM, sasm_list)
def interpolateToFit(sasm_star, sasm):
s1 = sasm_star
s2 = sasm
#find overlapping s2 points
min_q1, max_q1 = s1.getQrange()
min_q2, max_q2 = s2.getQrange()
lowest_q1, highest_q1 = s1.q[s1.getQrange()[0]], s1.q[s1.getQrange()[1]-1]
#fuck hvor besvaerligt!
overlapping_q2_top = s2.q[min_q2:max_q2][np.where( (s2.q[min_q2:max_q2] <= highest_q1))]
overlapping_q2 = overlapping_q2_top[np.where(overlapping_q2_top >= lowest_q1)]
if overlapping_q2[0] != s2.q[0]:
idx = np.where(s2.q == overlapping_q2[0])
overlapping_q2 = np.insert(overlapping_q2, 0, s2.q[idx[0]-1])
if overlapping_q2[-1] != s2.q[-1]:
idx = np.where(s2.q == overlapping_q2[-1])
overlapping_q2 = np.append(overlapping_q2, s2.q[idx[0]+1])
overlapping_q1_top = s1.q[min_q1:max_q1][np.where( (s1.q[min_q1:max_q1] <= overlapping_q2[-1]))]
overlapping_q1 = overlapping_q1_top[np.where(overlapping_q1_top >= overlapping_q2[0])]
q2_indexs = []
q1_indexs = []
for each in overlapping_q2:
idx, = np.where(s2.q == each)
q2_indexs.append(idx[0])
for each in overlapping_q1:
idx, = np.where(s1.q == each)
q1_indexs.append(idx[0])
#interpolate find the I's that fits the q vector of s1:
f = interp.interp1d(s2.q[q2_indexs], s2.i[q2_indexs])
intp_i_s2 = f(s1.q[q1_indexs])
intp_q_s2 = s1.q[q1_indexs].copy()
newerr = s1.err[q1_indexs].copy()
parameters = copy.deepcopy(s1.getAllParameters())
newSASM = SASM(intp_i_s2, intp_q_s2, newerr, parameters)
history = newSASM.getParameter('history')
history = {}
history1 = []
history1.append(copy.deepcopy(s1.getParameter('filename')))
for key in s1.getParameter('history'):
history1.append({key:copy.deepcopy(s1.getParameter('history')[key])})
history2 = []
history2.append(copy.deepcopy(s2.getParameter('filename')))
for key in s2.getParameter('history'):
history2.append({key:copy.deepcopy(s2.getParameter('history')[key])})
history['interpolation'] = {'initial_file':history1, 'interpolated_to_q_of':history2}
newSASM.setParameter('history', history)
return newSASM
def logBinning(sasm, no_points):
# if end_idx == -1:
# end_idx = len(self._i_raw)
i_roi = sasm._i_binned
q_roi = sasm._q_binned
err_roi = sasm._err_binned
bins = np.logspace(1, np.log10(len(q_roi)), no_points)
binned_q = []
binned_i = []
binned_err = []
idx = 0
for i in range(0, len(bins)):
no_of_bins = int(np.floor(bins[i] - bins[i-1]))
if no_of_bins > 1:
mean_q = np.mean( q_roi[ idx : idx + no_of_bins ] )
mean_i = np.mean( i_roi[ idx : idx + no_of_bins ] )
mean_err = np.sqrt( sum( np.power( err_roi[ idx : idx + no_of_bins ], 2) ) ) / np.sqrt( no_of_bins )
binned_q.append(mean_q)
binned_i.append(mean_i)
binned_err.append(mean_err)
idx = idx + no_of_bins
else:
binned_q.append(q_roi[idx])
binned_i.append(i_roi[idx])
binned_err.append(err_roi[idx])
idx = idx + 1
parameters = copy.deepcopy(sasm.getAllParameters())
newSASM = SASM(binned_i, binned_q, binned_err, parameters)
history = newSASM.getParameter('history')
history = {}
history1 = []
history1.append(copy.deepcopy(sasm.getParameter('filename')))
for key in sasm.getParameter('history'):
history1.append({key:copy.deepcopy(sasm.getParameter('history')[key])})
history['log_binning'] = {'initial_file' : history1, 'initial_points' : len(q_roi), 'final_points': len(bins)}
newSASM.setParameter('history', history)
return newSASM
def rebin(sasm, rebin_factor):
''' Sets the bin size of the I_q plot
end_idx will be lowered to fit the bin_size
if needed.
'''
len_iq = len(sasm._i_binned)
no_of_bins = int(np.floor(len_iq / rebin_factor))
end_idx = no_of_bins * rebin_factor
start_idx = 0
i_roi = sasm._i_binned[start_idx:end_idx]
q_roi = sasm._q_binned[start_idx:end_idx]
err_roi = sasm._err_binned[start_idx:end_idx]
new_i = np.zeros(no_of_bins)
new_q = np.zeros(no_of_bins)
new_err = np.zeros(no_of_bins)
for eachbin in range(0, no_of_bins):
first_idx = eachbin * rebin_factor
last_idx = (eachbin * rebin_factor) + rebin_factor
new_i[eachbin] = sum(i_roi[first_idx:last_idx]) / rebin_factor
new_q[eachbin] = sum(q_roi[first_idx:last_idx]) / rebin_factor
new_err[eachbin] = np.sqrt(sum(np.power(err_roi[first_idx:last_idx],2))) / np.sqrt(rebin_factor)
parameters = copy.deepcopy(sasm.getAllParameters())
newSASM = SASM(new_i, new_q, new_err, parameters)
qstart, qend = sasm.getQrange()
new_qstart = int(qstart/float(rebin_factor)+.5)
new_qend = int(qend/float(rebin_factor))
newSASM.setQrange([new_qstart, new_qend])
history = newSASM.getParameter('history')
history = {}
history1 = []
history1.append(copy.deepcopy(sasm.getParameter('filename')))
for key in sasm.getParameter('history'):
history1.append({key:copy.deepcopy(sasm.getParameter('history')[key])})
history['log_binning'] = {'initial_file' : history1, 'initial_points' : len_iq, 'final_points': no_of_bins}
newSASM.setParameter('history', history)
return newSASM
def binfixed(q, I, er, refq):
"""
This function bins the input q, I, and er into the fixed bins of qref
"""
dq=refq[1]-refq[0]
qn=np.linspace(refq[0]-dq/2.,refq[-1]+1.5*dq, np.around((refq[-1]+2*dq-refq[0])/dq,0)+1,endpoint=True )
dig=np.digitize(q,qn)
In=np.array([I[dig==i].mean() for i in range(1,len(qn)-1)])
mI = np.ma.masked_equal(I,0)
Iern=np.array([np.sqrt(np.sum(np.square(er[dig==i]/mI[dig==i])))/len(I[dig==i]) for i in range(1,len(qn)-1)])
Iern=Iern*In
qn=refq
return qn, In, np.nan_to_num(Iern)
|
lqhuang/SAXS-tools
|
RAW/SASM.py
|
Python
|
gpl-3.0
| 61,301
|
from __future__ import division
import os
import numpy as np
import pandas as pd
# what to do if skip_cols == 'auto'
# (this is used in feature_transforms.py)
column_range = {'loudness': (1, None),
'sharpness': (1, None),
'roughness': (1, None),
'bands': (1, None),
'melody': (1, None),
'hpcp': (1, None),
'mfcc': (2, 14),
'beats': (1, None),
'onsets': (1, None)}
def read_feature(filename, mode='pandas', time=False, skip_cols=(0, None)):
"""Read features from CSV.
This is not a general purpose i/o function. It is written to work well
with frame-based features, and with this module's `write_features()'
in particular.
Args:
filename (list or str): file name. If list, will use
os.path.join to join dir names and filename. CSV extension will be
added if not already included
mode (str): choose between 'pandas' and 'numpy'.
Pandas is faster when reading large files.
time (bool): set True to split data in a column of frame times and
2d-array of frame data.
"""
# if filename is a list, use os.path.join to join
if type(filename) is list:
filename = os.path.join(*filename)
if not (filename.endswith('.csv') or filename.endswith('.txt')):
filename += '.csv'
# if no skip_cols
if skip_cols == 'auto':
dir_name = os.path.dirname(filename)
subdir_name = os.path.basename(dir_name)
skip_cols = skip_columns(subdir_name)
# pick csv reader
try:
if mode == 'numpy':
data = np.genfromtxt(filename, delimiter=',')
elif mode == 'pandas':
data = pd.read_csv(filename, delimiter=',', header=None).values
except ValueError:
data = np.array([[0]])
feature = data[:, skip_cols[0]:skip_cols[1]]
# if time=True, split first and following columns
if time:
t = feature[:, 0]
x = feature[:, 1:]
feature = (t, x)
return feature
def write_feature(data, filename):
"""Write frame-based features to CSV.
Args:
data (nd-array or list): feature matrix or list of feature matrices
if list, feature matrices will be concatenated
(1d-arrays will be reshaped into column vectors).
filename (list or str): file name. If list, will use
os.path.join to join dirs and filename. CSV extension will be
added if not already included.
Usage:
>>> # simplest case
>>> # write an array of ones to temp.csv
>>> X = np.ones((100, 30))
>>> write_feature(X, 'temp.csv')
>>> # with lists
>>> # write indexed array of ones to data/ones/0.csv
>>> t = np.arange(100)
>>> X = np.ones((100, 30))
>>> feature_name, id = 'ones', str(0)
>>> write_feature([t, X], ['data', feature_name, id])
"""
# if data is a list of nd-arrays, hstack as 2d-arrays
if type(data) is list:
for i in np.where([len(x.shape) == 1 for x in data])[0]: # check if better solution than [0]
data[i] = data[i][:, np.newaxis]
data = np.hstack(data)
elif len(data.shape) == 1:
data = data[:, np.newaxis]
# if filename is a list, use os.path.join to join
if type(filename) is list:
filename = os.path.join(*filename)
if not (filename.endswith('.csv') or filename.endswith('.txt')):
filename += '.csv'
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
print('making new dir ' + dirname)
os.makedirs(dirname)
dataframe = pd.DataFrame(data)
dataframe.to_csv(filename, header=False, index=False)
def skip_columns(feature_name, default_range=(0,None)):
"""Set automatic column ignore behavior in read_feature().
"""
first_col, last_col = column_range.get(feature_name, default_range)
return first_col, last_col
def dataset_from_dir(audio_dir, separator='-'):
"""Make a dictionary of song section paths grouped by song id
from audio files in a particular directory.
Assumes files are labeled 'songid-sectionid.wav', where the
dash is the separator specified in the separator parameter.
Extension can be 'wav' or 'mp3'.
Args:
audio_dir (str): path to audio dir.
separator (str): character or string that separates song
id and section id in the audio file names.
Returns:
segment_dict (dict): dictionary of song segments, containing
all segment paths (without extension) as a list, grouped by
song id.
"""
segment_dict = {}
for file_path in os.listdir(audio_dir):
if file_path.endswith('.wav') or file_path.endswith('.mp3'):
filename = os.path.basename(file_path).split('.')[0]
song_id = filename.split(separator)[0]
if song_id in segment_dict:
segment_dict[song_id].append(filename)
else:
segment_dict[song_id] = [filename]
return segment_dict
|
jvbalen/catchy
|
utils.py
|
Python
|
mit
| 5,181
|
# TweetPy
# Test
import unittest
import tweet
class SampleTestClass(unittest.TestCase):
def sampleTest(self):
#do something
a = 1
if __name__ == '__main__':
unittest.main()
|
andresitodeguzman/twtpy
|
test/test.py
|
Python
|
mit
| 189
|
import sys
from .generic import Relocation
from .defines import defines
from plasma.lib.utils import warning
from . import arm64, i386, mips64, ppc64, amd64, arm, mips, ppc
ALL_RELOCATIONS = {}
complaint_log = set()
def load_relocations():
for module in [arm64, i386, mips64, ppc64, amd64, arm, mips, ppc]:
try:
arch_name = module.arch
except AttributeError:
continue
for item_name in dir(module):
if item_name not in defines:
continue
item = getattr(module, item_name)
if not isinstance(item, type) or not issubclass(item, Relocation):
continue
if arch_name not in ALL_RELOCATIONS:
ALL_RELOCATIONS[arch_name] = {}
ALL_RELOCATIONS[arch_name][defines[item_name]] = item
def get_relocation(arch, r_type):
if r_type == 0:
return None
try:
return ALL_RELOCATIONS[arch][r_type]
except KeyError:
if (arch, r_type) not in complaint_log:
complaint_log.add((arch, r_type))
warning("Unknown reloc %d on %s" % (r_type, arch))
return None
load_relocations()
|
chubbymaggie/reverse
|
plasma/lib/fileformat/relocations/__init__.py
|
Python
|
gpl-3.0
| 1,184
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from datetime import datetime
from django.contrib.auth.models import Group
from django.db.models import Q
class ProfileManager(models.Manager):
"""
Custom profile manager.
"""
def contactable_members(self):
"""
Gets all members to whom you can send a private message and can respond.
:return: All members contactable
:rtype: QuerySet
"""
excluded_groups = [Group.objects.get(name=settings.ZDS_APP['member']['bot_group'])]
now = datetime.now()
return super(ProfileManager, self).get_queryset()\
.exclude(user__groups__in=excluded_groups)\
.exclude(user__is_active=False)\
.filter(Q(can_read=True) | Q(end_ban_read__lte=now))
def all_members_ordered_by_date_joined(self):
"""
Gets all members ordered by date joined.
:return: All members ordered by date joined
:rtype: QuerySet
"""
return super(ProfileManager, self).get_queryset().order_by('-user__date_joined').all()
def all_old_tutos_from_site_du_zero(self, profile):
"""
Gets all tutorials from Site du Zéro for a member if exist.
:param profile: the profile of a member
:type profile: QuerySet
:return: A list of tutorials from Site du Zéro for a member if exist.
:rtype: list
"""
from zds.member.models import get_info_old_tuto
if profile.sdz_tutorial:
olds = profile.sdz_tutorial.strip().split(':')
else:
olds = []
old_tutos = [get_info_old_tuto(old) for old in olds]
return old_tutos
|
DevHugo/zds-site
|
zds/member/managers.py
|
Python
|
gpl-3.0
| 1,730
|
""" Unit tests for adodbapi version 2.2.6 (d)"""
"""
adodbapi - A python DB API 2.0 interface to Microsoft ADO
Copyright (C) 2002 Henrik Ekelund
Email: <http://sourceforge.net/sendmessage.php?touser=618411>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Updated for decimal data and version 2.1 by Vernon Cole
AS400 tests removed v 2.1.1 - Vernon Cole
"""
import unittest
import sys
try:
import win32com.client
win32 = True
except ImportError:
win32 = False
import adodbapitestconfig #will find (parent?) adodbpai
import adodbapi
#adodbapi.adodbapi.verbose = 3
import types
try:
import decimal
except ImportError:
import win32com.decimal_23 as decimal
def str2bytes(sval):
if sys.version_info < (3,0) and isinstance(sval, str):
sval = sval.decode("latin1")
return sval.encode("latin1")
class CommonDBTests(unittest.TestCase):
"Self contained super-simple tests in easy syntax, should work on everything between mySQL and Oracle"
def setUp(self):
self.engine = 'unknown'
def getEngine(self):
return self.engine
def getConnection(self):
raise NotImplementedError #"This method must be overriden by a subclass"
def getCursor(self):
return self.getConnection().cursor()
def testConnection(self):
crsr=self.getCursor()
assert crsr.__class__.__name__ == 'Cursor'
def testErrorHandlerInherits(self):
conn=self.getConnection()
mycallable=lambda connection,cursor,errorclass,errorvalue: 1
conn.errorhandler=mycallable
crsr=conn.cursor()
assert crsr.errorhandler==mycallable,"Error handler on crsr should be same as on connection"
def testDefaultErrorHandlerConnection(self):
conn=self.getConnection()
del conn.messages[:]
try:
conn.close()
conn.commit() #Should not be able to use connection after it is closed
except:
assert len(conn.messages)==1
assert len(conn.messages[0])==2
assert conn.messages[0][0]==adodbapi.Error
def testOwnErrorHandlerConnection(self):
mycallable=lambda connection,cursor,errorclass,errorvalue: 1 #does not raise anything
conn=self.getConnection()
conn.errorhandler=mycallable
conn.close()
conn.commit() #Should not be able to use connection after it is closed
assert len(conn.messages)==0
conn.errorhandler=None #This should bring back the standard error handler
try:
conn.close()
conn.commit() #Should not be able to use connection after it is closed
except:
pass
#The Standard errorhandler appends error to messages attribute
assert len(conn.messages)>0,"Setting errorhandler to none should bring back the standard error handler"
def testDefaultErrorHandlerCursor(self):
crsr=self.getConnection().cursor()
del crsr.messages[:]
try:
crsr.execute("SELECT abbtytddrf FROM dasdasd")
except:
assert len(crsr.messages)==1
assert len(crsr.messages[0])==2
assert crsr.messages[0][0]==adodbapi.DatabaseError
def testOwnErrorHandlerCursor(self):
mycallable=lambda connection,cursor,errorclass,errorvalue: 1 #does not raise anything
crsr=self.getConnection().cursor()
crsr.errorhandler=mycallable
crsr.execute("SELECT abbtytddrf FROM dasdasd")
assert len(crsr.messages)==0
crsr.errorhandler=None #This should bring back the standard error handler
try:
crsr.execute("SELECT abbtytddrf FROM dasdasd")
except:
pass
#The Standard errorhandler appends error to messages attribute
assert len(crsr.messages)>0,"Setting errorhandler to none should bring back the standard error handler"
def testUserDefinedConversions(self):
oldconverter=adodbapi.variantConversions[adodbapi.adoStringTypes]
try:
duplicatingConverter=lambda aStringField: aStringField*2
assert duplicatingConverter(u'gabba') == u'gabbagabba'
adodbapi.variantConversions[adodbapi.adoStringTypes]=duplicatingConverter
self.helpForceDropOnTblTemp()
conn=self.getConnection()
crsr=conn.cursor()
tabdef = "CREATE TABLE tblTemp (fldData VARCHAR(100) NOT NULL)"
crsr.execute(tabdef)
crsr.execute("INSERT INTO tblTemp(fldData) VALUES('gabba')")
crsr.execute("INSERT INTO tblTemp(fldData) VALUES('hey')")
crsr.execute("SELECT fldData FROM tblTemp ORDER BY fldData")
row=crsr.fetchone()
self.assertEquals(row[0],'gabbagabba')
row=crsr.fetchone()
self.assertEquals(row[0],'heyhey')
finally:
adodbapi.variantConversions[adodbapi.adoStringTypes]=oldconverter #Restore
self.helpRollbackTblTemp()
def helpTestDataType(self,sqlDataTypeString,
DBAPIDataTypeString,
pyData,
pyDataInputAlternatives=None,
compareAlmostEqual=None,
allowedReturnValues=None):
self.helpForceDropOnTblTemp()
conn=self.getConnection()
crsr=conn.cursor()
tabdef= """
CREATE TABLE tblTemp (
fldId integer NOT NULL,
fldData """ + sqlDataTypeString + ")\n"
crsr.execute(tabdef)
#Test Null values mapped to None
crsr.execute("INSERT INTO tblTemp (fldId) VALUES (1)")
crsr.execute("SELECT fldId,fldData FROM tblTemp")
rs=crsr.fetchone()
self.assertEquals(rs[1],None) #Null should be mapped to None
assert rs[0]==1
#Test description related
descTuple=crsr.description[1]
assert descTuple[0] == 'fldData'
if DBAPIDataTypeString=='STRING':
assert descTuple[1] == adodbapi.STRING, 'was "%s" expected "%s"'%(descTuple[1],adodbapi.STRING.values)
elif DBAPIDataTypeString == 'NUMBER':
assert descTuple[1] == adodbapi.NUMBER, 'was "%s" expected "%s"'%(descTuple[1],adodbapi.NUMBER.values)
elif DBAPIDataTypeString == 'BINARY':
assert descTuple[1] == adodbapi.BINARY, 'was "%s" expected "%s"'%(descTuple[1],adodbapi.BINARY.values)
elif DBAPIDataTypeString == 'DATETIME':
assert descTuple[1] == adodbapi.DATETIME, 'was "%s" expected "%s"'%(descTuple[1],adodbapi.DATETIME.values)
elif DBAPIDataTypeString == 'ROWID':
assert descTuple[1] == adodbapi.ROWID, 'was "%s" expected "%s"'%(descTuple[1],adodbapi.ROWID.values)
else:
raise NotImplementedError #"DBAPIDataTypeString not provided"
#Test data binding
inputs=[pyData]
if pyDataInputAlternatives:
inputs.append(pyDataInputAlternatives)
fldId=1
for inParam in inputs:
fldId+=1
try:
crsr.execute("INSERT INTO tblTemp (fldId,fldData) VALUES (?,?)", (fldId,pyData))
except:
conn.printADOerrors()
raise
crsr.execute("SELECT fldData FROM tblTemp WHERE ?=fldID", [fldId])
rs=crsr.fetchone()
if allowedReturnValues:
allowedTypes = tuple([type(aRV) for aRV in allowedReturnValues])
assert isinstance(rs[0],allowedTypes), \
'result type "%s" must be one of %s'%(type(rs[0]),allowedTypes)
else:
assert isinstance(rs[0] ,type(pyData)), \
'result type "%s" must be instance of %s'%(type(rs[0]),type(pyData))
if compareAlmostEqual and DBAPIDataTypeString == 'DATETIME':
iso1=adodbapi.dateconverter.DateObjectToIsoFormatString(rs[0])
iso2=adodbapi.dateconverter.DateObjectToIsoFormatString(pyData)
self.assertEquals(iso1 , iso2)
elif compareAlmostEqual:
assert abs(rs[0]-pyData)/pyData<0.00001, \
"Values not almost equal rs[0]=%s, oyDta=%f" %(rs[0],pyData)
else:
if allowedReturnValues:
ok=0
for possibility in allowedReturnValues:
if rs[0]==possibility:
ok=1
assert ok
else:
self.assertEquals(rs[0] , pyData)
self.helpRollbackTblTemp()
def testDataTypeFloat(self):
self.helpTestDataType("real",'NUMBER',3.45,compareAlmostEqual=1)
self.helpTestDataType("float",'NUMBER',1.79e37,compareAlmostEqual=1)
def testDataTypeMoney(self): #v2.1 Cole -- use decimal for money
if self.getEngine() != 'MySQL':
self.helpTestDataType("smallmoney",'NUMBER',decimal.Decimal('214748.02'))
self.helpTestDataType("money",'NUMBER',decimal.Decimal('-922337203685477.5808'))
def testDataTypeInt(self):
self.helpTestDataType("tinyint",'NUMBER',115)
self.helpTestDataType("smallint",'NUMBER',-32768)
self.helpTestDataType("int",'NUMBER',2147483647,
pyDataInputAlternatives='2137483647')
if self.getEngine() != 'ACCESS':
self.helpTestDataType("bit",'NUMBER',1) #Does not work correctly with access
self.helpTestDataType("bigint",'NUMBER',3000000000)
def testDataTypeChar(self):
for sqlDataType in ("char(6)","nchar(6)"):
self.helpTestDataType(sqlDataType,'STRING',u'spam ',allowedReturnValues=[u'spam','spam',u'spam ','spam '])
def testDataTypeVarChar(self):
stringKinds = ["varchar(10)","nvarchar(10)","text","ntext"]
if self.getEngine() == 'MySQL':
stringKinds = ["varchar(10)","text"]
for sqlDataType in stringKinds:
self.helpTestDataType(sqlDataType,'STRING',u'spam',['spam'])
def testDataTypeDate(self):
#Does not work with pytonTimeConvertor
#self.helpTestDataType("smalldatetime",'DATETIME',adodbapi.Timestamp(2002,10,28,12,15,00)) #Accuracy one minute
self.helpTestDataType("datetime",'DATETIME',adodbapi.Date(2002,10,28),compareAlmostEqual=True)
if self.getEngine() != 'MySQL':
self.helpTestDataType("smalldatetime",'DATETIME',adodbapi.Date(2002,10,28),compareAlmostEqual=True)
self.helpTestDataType("datetime",'DATETIME',adodbapi.Timestamp(2002,10,28,12,15,1),compareAlmostEqual=True)
def testDataTypeBinary(self):
if self.getEngine() == 'MySQL':
pass #self.helpTestDataType("BLOB",'BINARY',adodbapi.Binary('\x00\x01\xE2\x40'))
else:
binfld = str2bytes('\x00\x01\xE2\x40')
self.helpTestDataType("binary(4)",'BINARY',adodbapi.Binary(binfld))
self.helpTestDataType("varbinary(100)",'BINARY',adodbapi.Binary(binfld))
self.helpTestDataType("image",'BINARY',adodbapi.Binary(binfld))
def helpRollbackTblTemp(self):
try:
self.getConnection().rollback()
except adodbapi.NotSupportedError:
pass
self.helpForceDropOnTblTemp()
def helpForceDropOnTblTemp(self):
conn=self.getConnection()
crsr=conn.cursor()
try:
crsr.execute("DELETE FROM tblTemp")
crsr.execute("DROP TABLE tblTemp")
conn.commit()
except:
pass
def helpCreateAndPopulateTableTemp(self,crsr):
tabdef= """
CREATE TABLE tblTemp (
fldData INTEGER
)
"""
crsr.execute(tabdef)
for i in range(9):
crsr.execute("INSERT INTO tblTemp (fldData) VALUES (%i)" %(i,))
def testFetchAll(self):
crsr=self.getCursor()
self.helpCreateAndPopulateTableTemp(crsr)
crsr.execute("SELECT fldData FROM tblTemp")
rs=crsr.fetchall()
assert len(rs)==9
self.helpRollbackTblTemp()
def testExecuteMany(self):
crsr=self.getCursor()
self.helpCreateAndPopulateTableTemp(crsr)
values = [ (111,) , (222,) ]
crsr.executemany("INSERT INTO tblTemp (fldData) VALUES (?)",values)
if crsr.rowcount==-1:
print self.getEngine(),"Provider does not support rowcount (on .executemany())"
else:
self.assertEquals( crsr.rowcount,2)
crsr.execute("SELECT fldData FROM tblTemp")
rs=crsr.fetchall()
assert len(rs)==11
self.helpRollbackTblTemp()
def testRowCount(self):
crsr=self.getCursor()
self.helpCreateAndPopulateTableTemp(crsr)
crsr.execute("SELECT fldData FROM tblTemp")
if crsr.rowcount == -1:
#print "provider does not support rowcount on select"
pass
else:
self.assertEquals( crsr.rowcount,9)
self.helpRollbackTblTemp()
def testRowCountNoRecordset(self):
crsr=self.getCursor()
self.helpCreateAndPopulateTableTemp(crsr)
crsr.execute("DELETE FROM tblTemp WHERE fldData >= 5")
if crsr.rowcount==-1:
print self.getEngine(), "Provider does not support rowcount (on DELETE)"
else:
self.assertEquals( crsr.rowcount,4)
self.helpRollbackTblTemp()
def testFetchMany(self):
crsr=self.getCursor()
self.helpCreateAndPopulateTableTemp(crsr)
crsr.execute("SELECT fldData FROM tblTemp")
rs=crsr.fetchmany(3)
assert len(rs)==3
rs=crsr.fetchmany(5)
assert len(rs)==5
rs=crsr.fetchmany(5)
assert len(rs)==1 #Ask for five, but there is only one left
self.helpRollbackTblTemp()
def testFetchManyWithArraySize(self):
crsr=self.getCursor()
self.helpCreateAndPopulateTableTemp(crsr)
crsr.execute("SELECT fldData FROM tblTemp")
rs=crsr.fetchmany()
assert len(rs)==1 #arraysize Defaults to one
crsr.arraysize=4
rs=crsr.fetchmany()
assert len(rs)==4
rs=crsr.fetchmany()
assert len(rs)==4
rs=crsr.fetchmany()
assert len(rs)==0
self.helpRollbackTblTemp()
def testCurrencyDataType(self):
if self.getEngine() != 'MySQL':
tabdef= """
CREATE TABLE tblTemp (
fldCurr MONEY
)
"""
else:
tabdef= """
CREATE TABLE tblTemp (
fldCurr DECIMAL(19,4)
)
"""
conn=self.getConnection()
crsr=conn.cursor()
crsr.execute(tabdef)
for multiplier in (1,decimal.Decimal('2.5'),78,9999,99999,7007):
crsr.execute("DELETE FROM tblTemp")
correct = decimal.Decimal('12.50') * multiplier
crsr.execute("INSERT INTO tblTemp(fldCurr) VALUES (?)",[correct])
sql="SELECT fldCurr FROM tblTemp "
try:
crsr.execute(sql)
except:
conn.printADOerrors()
fldcurr=crsr.fetchone()[0]
self.assertEquals( fldcurr,correct)
def testErrorConnect(self):
self.assertRaises(adodbapi.DatabaseError,adodbapi.connect,'not a valid connect string')
class TestADOwithSQLServer(CommonDBTests):
def setUp(self):
self.conn=adodbapi.connect(adodbapitestconfig.connStrSQLServer)
self.engine = 'MSSQL'
def tearDown(self):
try:
self.conn.rollback()
except:
pass
try:
self.conn.close()
except:
pass
self.conn=None
def getConnection(self):
return self.conn
def testSQLServerDataTypes(self):
self.helpTestDataType("decimal(18,2)",'NUMBER',3.45,
allowedReturnValues=[u'3.45',u'3,45',decimal.Decimal('3.45')])
self.helpTestDataType("numeric(18,2)",'NUMBER',3.45,
allowedReturnValues=[u'3.45',u'3,45',decimal.Decimal('3.45')])
def testUserDefinedConversionForExactNumericTypes(self):
# variantConversions is a dictionary of convertion functions
# held internally in adodbapi
# By default decimal and numbers are returned as decimals.
# Instead, make them return as floats
oldconverter = adodbapi.variantConversions[adodbapi.adNumeric] #keep old function to restore later
adodbapi.variantConversions[adodbapi.adNumeric] = adodbapi.cvtFloat
self.helpTestDataType("decimal(18,2)",'NUMBER',3.45,compareAlmostEqual=1)
self.helpTestDataType("numeric(18,2)",'NUMBER',3.45,compareAlmostEqual=1)
# now return strings
adodbapi.variantConversions[adodbapi.adNumeric] = adodbapi.cvtString
self.helpTestDataType("numeric(18,2)",'NUMBER','3.45')
# now a completly weird user defined convertion
adodbapi.variantConversions[adodbapi.adNumeric] = lambda x: u'!!This function returns a funny unicode string %s!!'%x
self.helpTestDataType("numeric(18,2)",'NUMBER','3.45',
allowedReturnValues=[u'!!This function returns a funny unicode string 3.45!!'])
# now reset the converter to its original function
adodbapi.variantConversions[adodbapi.adNumeric]=oldconverter #Restore the original convertion function
self.helpTestDataType("numeric(18,2)",'NUMBER',decimal.Decimal('3.45'))
def testVariableReturningStoredProcedure(self):
crsr=self.conn.cursor()
spdef= """
CREATE PROCEDURE sp_DeleteMeOnlyForTesting
@theInput varchar(50),
@theOtherInput varchar(50),
@theOutput varchar(100) OUTPUT
AS
SET @theOutput=@theInput+@theOtherInput
"""
try:
crsr.execute("DROP PROCEDURE sp_DeleteMeOnlyForTesting")
self.conn.commit()
except: #Make sure it is empty
pass
crsr.execute(spdef)
retvalues=crsr.callproc('sp_DeleteMeOnlyForTesting',('Dodsworth','Anne',' '))
assert retvalues[0]=='Dodsworth'
assert retvalues[1]=='Anne'
assert retvalues[2]=='DodsworthAnne'
self.conn.rollback()
def testMultipleSetReturn(self):
crsr=self.getCursor()
self.helpCreateAndPopulateTableTemp(crsr)
spdef= """
CREATE PROCEDURE sp_DeleteMe_OnlyForTesting
AS
SELECT fldData FROM tblTemp ORDER BY fldData ASC
SELECT fldData From tblTemp where fldData = -9999
SELECT fldData FROM tblTemp ORDER BY fldData DESC
"""
try:
crsr.execute("DROP PROCEDURE sp_DeleteMe_OnlyForTesting")
self.conn.commit()
except: #Make sure it is empty
pass
crsr.execute(spdef)
retvalues=crsr.callproc('sp_DeleteMe_OnlyForTesting')
row=crsr.fetchone()
self.assertEquals(row[0], 0)
assert crsr.nextset() == True, 'Operation should succede'
assert not crsr.fetchall(), 'Should be an empty second set'
assert crsr.nextset() == True, 'third set should be present'
rowdesc=crsr.fetchall()
self.assertEquals(rowdesc[0][0],8)
assert crsr.nextset() == None,'No more return sets, should return None'
self.helpRollbackTblTemp()
def testRollBack(self):
crsr=self.getCursor()
self.helpCreateAndPopulateTableTemp(crsr)
self.conn.commit()
crsr.execute("INSERT INTO tblTemp (fldData) VALUES(100)")
selectSql="SELECT fldData FROM tblTemp WHERE fldData=100"
crsr.execute(selectSql)
rs=crsr.fetchall()
assert len(rs)==1
self.conn.rollback()
crsr.execute(selectSql)
assert crsr.fetchone()==None, 'cursor.fetchone should return None if a query retrieves no rows'
self.helpRollbackTblTemp()
class TestADOwithAccessDB(CommonDBTests):
def setUp(self):
self.conn = adodbapi.connect(adodbapitestconfig.connStrAccess)
self.engine = 'ACCESS'
def tearDown(self):
try:
self.conn.rollback()
except:
pass
try:
self.conn.close()
except:
pass
self.conn=None
def getConnection(self):
return self.conn
def testOkConnect(self):
c=adodbapi.connect(adodbapitestconfig.connStrAccess)
assert c != None
c.close()
class TestADOwithMySql(CommonDBTests):
def setUp(self):
self.conn = adodbapi.connect(adodbapitestconfig.connStrMySql)
self.engine = 'MySQL'
def tearDown(self):
try:
self.conn.rollback()
except:
pass
try:
self.conn.close()
except:
pass
self.conn=None
def getConnection(self):
return self.conn
def testOkConnect(self):
c=adodbapi.connect(adodbapitestconfig.connStrMySql)
assert c != None
class TimeConverterInterfaceTest(unittest.TestCase):
def testIDate(self):
assert self.tc.Date(1990,2,2)
def testITime(self):
assert self.tc.Time(13,2,2)
def testITimestamp(self):
assert self.tc.Timestamp(1990,2,2,13,2,1)
def testIDateObjectFromCOMDate(self):
assert self.tc.DateObjectFromCOMDate(37435.7604282)
def testICOMDate(self):
assert hasattr(self.tc,'COMDate')
def testExactDate(self):
d=self.tc.Date(1994,11,15)
comDate=self.tc.COMDate(d)
correct=34653.0
assert comDate == correct,comDate
def testExactTimestamp(self):
d=self.tc.Timestamp(1994,11,15,12,0,0)
comDate=self.tc.COMDate(d)
correct=34653.5
self.assertEquals( comDate ,correct)
d=self.tc.Timestamp(2003,5,6,14,15,17)
comDate=self.tc.COMDate(d)
correct=37747.593946759262
self.assertEquals( comDate ,correct)
def testIsoFormat(self):
d=self.tc.Timestamp(1994,11,15,12,3,10)
iso=self.tc.DateObjectToIsoFormatString(d)
self.assertEquals(str(iso[:19]) , '1994-11-15 12:03:10')
dt=self.tc.Date(2003,5,2)
iso=self.tc.DateObjectToIsoFormatString(dt)
self.assertEquals(str(iso[:10]), '2003-05-02')
if adodbapitestconfig.doMxDateTimeTest:
import mx.DateTime
class TestMXDateTimeConverter(TimeConverterInterfaceTest):
def setUp(self):
self.tc=adodbapi.mxDateTimeConverter()
def testCOMDate(self):
t=mx.DateTime.DateTime(2002,6,28,18,15,2)
cmd=self.tc.COMDate(t)
assert cmd == t.COMDate()
def testDateObjectFromCOMDate(self):
cmd=self.tc.DateObjectFromCOMDate(37435.7604282)
t=mx.DateTime.DateTime(2002,6,28,18,15,0)
t2=mx.DateTime.DateTime(2002,6,28,18,15,2)
assert t2>cmd>t
def testDate(self):
assert mx.DateTime.Date(1980,11,4)==self.tc.Date(1980,11,4)
def testTime(self):
assert mx.DateTime.Time(13,11,4)==self.tc.Time(13,11,4)
def testTimestamp(self):
t=mx.DateTime.DateTime(2002,6,28,18,15,1)
obj=self.tc.Timestamp(2002,6,28,18,15,1)
assert t == obj
import time
class TestPythonTimeConverter(TimeConverterInterfaceTest):
def setUp(self):
self.tc=adodbapi.pythonTimeConverter()
def testCOMDate(self):
mk = time.mktime((2002,6,28,18,15,1, 4,31+28+31+30+31+28,-1))
t=time.localtime(mk)
# Fri, 28 Jun 2002 18:15:01 +0000
cmd=self.tc.COMDate(t)
assert abs(cmd - 37435.7604282) < 1.0/24,"%f more than an hour wrong" % cmd
def testDateObjectFromCOMDate(self):
cmd=self.tc.DateObjectFromCOMDate(37435.7604282)
t1=time.gmtime(time.mktime((2002,6,28,0,14,1, 4,31+28+31+30+31+28,-1)))
#there are errors in the implementation of gmtime which we ignore
t2=time.gmtime(time.mktime((2002,6,29,12,14,2, 4,31+28+31+30+31+28,-1)))
assert t1<cmd<t2, '"%s" should be about 2002-6-28 12:15:01'%repr(cmd)
def testDate(self):
t1=time.mktime((2002,6,28,18,15,1, 4,31+28+31+30+31+30,0))
t2=time.mktime((2002,6,30,18,15,1, 4,31+28+31+30+31+28,0))
obj=self.tc.Date(2002,6,29)
assert t1< time.mktime(obj)<t2,obj
def testTime(self):
self.assertEquals( self.tc.Time(18,15,2),time.gmtime(18*60*60+15*60+2))
def testTimestamp(self):
t1=time.localtime(time.mktime((2002,6,28,18,14,1, 4,31+28+31+30+31+28,-1)))
t2=time.localtime(time.mktime((2002,6,28,18,16,1, 4,31+28+31+30+31+28,-1)))
obj=self.tc.Timestamp(2002,6,28,18,15,2)
assert t1< obj <t2,obj
if adodbapitestconfig.doDateTimeTest:
import datetime
class TestPythonDateTimeConverter(TimeConverterInterfaceTest):
def setUp(self):
self.tc=adodbapi.pythonDateTimeConverter()
def testCOMDate(self):
t=datetime.datetime( 2002,6,28,18,15,1)
# Fri, 28 Jun 2002 18:15:01 +0000
cmd=self.tc.COMDate(t)
assert abs(cmd - 37435.7604282) < 1.0/24,"more than an hour wrong"
def testDateObjectFromCOMDate(self):
cmd=self.tc.DateObjectFromCOMDate(37435.7604282)
t1=datetime.datetime(2002,6,28,18,14,1)
t2=datetime.datetime(2002,6,28,18,16,1)
assert t1<cmd<t2,cmd
def testDate(self):
t1=datetime.date(2002,6,28)
t2=datetime.date(2002,6,30)
obj=self.tc.Date(2002,6,29)
assert t1< obj <t2,obj
def testTime(self):
self.assertEquals( self.tc.Time(18,15,2).isoformat()[:8],'18:15:02')
def testTimestamp(self):
t1=datetime.datetime(2002,6,28,18,14,1)
t2=datetime.datetime(2002,6,28,18,16,1)
obj=self.tc.Timestamp(2002,6,28,18,15,2)
assert t1< obj <t2,obj
suites=[]
if adodbapitestconfig.doMxDateTimeTest:
suites.append( unittest.makeSuite(TestMXDateTimeConverter,'test'))
if adodbapitestconfig.doDateTimeTest:
suites.append( unittest.makeSuite(TestPythonDateTimeConverter,'test'))
suites.append( unittest.makeSuite(TestPythonTimeConverter,'test'))
if adodbapitestconfig.doAccessTest:
suites.append( unittest.makeSuite(TestADOwithAccessDB,'test'))
if adodbapitestconfig.doSqlServerTest:
suites.append( unittest.makeSuite(TestADOwithSQLServer,'test'))
if adodbapitestconfig.doMySqlTest:
suites.append( unittest.makeSuite(TestADOwithMySql,'test'))
suite=unittest.TestSuite(suites)
if __name__ == '__main__':
defaultDateConverter=adodbapi.dateconverter
print __doc__
print "Default Date Converter is %s" %(defaultDateConverter,)
unittest.TextTestRunner().run(suite)
if adodbapitestconfig.iterateOverTimeTests:
for test,dateconverter in (
(1,adodbapi.pythonTimeConverter),
(adodbapitestconfig.doMxDateTimeTest,adodbapi.mxDateTimeConverter),
(adodbapitestconfig.doDateTimeTest,adodbapi.pythonDateTimeConverter)
):
if test and not isinstance(defaultDateConverter,dateconverter):
adodbapi.dateconverter=dateconverter()
print "Changed dateconverter to "
print adodbapi.dateconverter
unittest.TextTestRunner().run(suite)
|
slozier/ironpython2
|
Src/StdLib/Lib/site-packages/adodbapi/tests/adodbapitest.py
|
Python
|
apache-2.0
| 28,674
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gdb
import six
import pwndbg.auxv
import pwndbg.chain
import pwndbg.commands
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def auxv():
"""
Print information from the Auxiliary ELF Vector.
"""
for k,v in sorted(pwndbg.auxv.get().items()):
if v is not None:
print(k.ljust(24), v if not isinstance(v, six.integer_types) else pwndbg.chain.format(v))
|
zachriggle/pwndbg
|
pwndbg/commands/auxv.py
|
Python
|
mit
| 606
|
#=============================================================================
# gpx_layer_search.py
# Search results map layer
# Copyright 2013, 2014, Trinity College
# Last modified: 7 November 2014
#=============================================================================
import math
import gtk
from pykarta.maps.layers import MapLayer
class SearchLayer(MapLayer):
def __init__(self, data):
MapLayer.__init__(self)
self.tool = None
self.layer_objs = data
self.layer_objs.add_client('map_layer', self)
self.visible_objs = []
self.selected_path = None
self.radius = None
def on_select(self, path, source, client_name):
self.selected_path = path
if path is not None:
match = self.layer_objs[path[0]]
if source == 'treeview_double_click':
self.containing_map.set_center_and_zoom_in(match.lat, match.lon, match.zoom)
else:
self.containing_map.make_visible(match.lat, match.lon)
if match.polygonpoints:
self.containing_map.make_visible_polygon(match.polygonpoints)
self.redraw()
def set_tool(self, tool):
if tool is None:
pass
elif tool == "tool_select_adjust":
return _("Search result locations are circled on map. Click on them.")
else:
raise NotImplementedError
def do_viewport(self):
self.visible_objs = []
match_index = 0
for match in self.layer_objs:
x, y = self.containing_map.project_point(match)
if x > 0 and x < self.containing_map.width and y > 0 and y < self.containing_map.height: # if within viewport,
polygonpoints = self.containing_map.project_points(match.polygonpoints)
self.visible_objs.append([match_index, match, x, y, polygonpoints])
match_index += 1
self.containing_map.feedback.debug(1, " %d of %d search results are in view" % (len(self.visible_objs), len(self.layer_objs)))
def do_draw(self, ctx):
zoom = self.containing_map.get_zoom()
self.radius = 3 + 2 * zoom
for item in self.visible_objs:
(match_index, match, x, y, polygonpoints) = item
if len(polygonpoints) == 0: # if point, draw magnifying glass
# Lens
ctx.arc(x, y, self.radius, 0, 2*math.pi)
ctx.set_source_rgba(1.0, 1.0, 1.0, 0.5)
ctx.fill_preserve()
# Add handle
ctx.move_to(x+0.707*self.radius, y+0.707*self.radius)
ctx.line_to(x+2*self.radius, y+2*self.radius)
# Stroke in one of two colors
ctx.set_line_width(1 + zoom * 0.25)
if self.selected_path != None and match_index == self.selected_path[0]:
ctx.set_source_rgb(1.0, 0.0, 0.0) # red
else:
ctx.set_source_rgb(0.0, 0.0, 0.0) # black
ctx.stroke()
else: # if boundary, draw it
ctx.move_to(polygonpoints[0][0], polygonpoints[0][1])
for point in polygonpoints[1:]:
ctx.line_to(point[0], point[1])
ctx.set_line_width(3)
if self.selected_path != None and match_index == self.selected_path[0]:
ctx.set_source_rgb(1.0, 0.0, 0.0) # red
else:
ctx.set_source_rgb(0.0, 0.0, 0.0) # black
ctx.stroke()
def on_button_press(self, gdkevent):
if self.tool == None:
return False
# If not a single click with left button, bail out.
if gdkevent.type != gtk.gdk.BUTTON_PRESS or gdkevent.button != 1:
return False
for item in reversed(self.visible_objs):
(match_index, match, x, y, polygonpoints) = item
if abs(gdkevent.x - x) <= self.radius and abs(gdkevent.y - y) <= self.radius:
print "Hit search result point"
self.selected_path = (match_index,)
self.layer_objs.select(self.selected_path, "map_layer")
self.redraw()
return True
return False
|
david672orford/GPX_Trip_Planner
|
Code/gpx_layer_search.py
|
Python
|
gpl-2.0
| 3,542
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models
class FleetVehicleLogContract(models.Model):
_inherit = ['mail.thread', 'mail.activity.mixin']
_name = 'fleet.vehicle.log.contract'
_description = 'Vehicle Contract'
_order = 'state desc,expiration_date'
def compute_next_year_date(self, strdate):
oneyear = relativedelta(years=1)
start_date = fields.Date.from_string(strdate)
return fields.Date.to_string(start_date + oneyear)
vehicle_id = fields.Many2one('fleet.vehicle', 'Vehicle', required=True, help='Vehicle concerned by this log', check_company=True)
cost_subtype_id = fields.Many2one('fleet.service.type', 'Type', help='Cost type purchased with this cost', domain=[('category', '=', 'contract')])
amount = fields.Monetary('Cost')
date = fields.Date(help='Date when the cost has been executed')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.company)
currency_id = fields.Many2one('res.currency', related='company_id.currency_id')
name = fields.Char(string='Name', compute='_compute_contract_name', store=True)
active = fields.Boolean(default=True)
user_id = fields.Many2one('res.users', 'Responsible', default=lambda self: self.env.user, index=True)
start_date = fields.Date(
'Contract Start Date', default=fields.Date.context_today,
help='Date when the coverage of the contract begins')
expiration_date = fields.Date(
'Contract Expiration Date', default=lambda self:
self.compute_next_year_date(fields.Date.context_today(self)),
help='Date when the coverage of the contract expirates (by default, one year after begin date)')
days_left = fields.Integer(compute='_compute_days_left', string='Warning Date')
insurer_id = fields.Many2one('res.partner', 'Vendor')
purchaser_id = fields.Many2one(related='vehicle_id.driver_id', string='Driver')
ins_ref = fields.Char('Reference', size=64, copy=False)
state = fields.Selection(
[('futur', 'Incoming'),
('open', 'In Progress'),
('expired', 'Expired'),
('closed', 'Closed')
], 'Status', default='open', readonly=True,
help='Choose whether the contract is still valid or not',
tracking=True,
copy=False)
notes = fields.Html('Terms and Conditions', help='Write here all supplementary information relative to this contract', copy=False)
cost_generated = fields.Monetary('Recurring Cost')
cost_frequency = fields.Selection([
('no', 'No'),
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('yearly', 'Yearly')
], 'Recurring Cost Frequency', default='monthly', help='Frequency of the recuring cost', required=True)
service_ids = fields.Many2many('fleet.service.type', string="Included Services")
@api.depends('vehicle_id.name', 'cost_subtype_id')
def _compute_contract_name(self):
for record in self:
name = record.vehicle_id.name
if name and record.cost_subtype_id.name:
name = record.cost_subtype_id.name + ' ' + name
record.name = name
@api.depends('expiration_date', 'state')
def _compute_days_left(self):
"""return a dict with as value for each contract an integer
if contract is in an open state and is overdue, return 0
if contract is in a closed state, return -1
otherwise return the number of days before the contract expires
"""
for record in self:
if record.expiration_date and record.state in ['open', 'expired']:
today = fields.Date.from_string(fields.Date.today())
renew_date = fields.Date.from_string(record.expiration_date)
diff_time = (renew_date - today).days
record.days_left = diff_time if diff_time > 0 else 0
else:
record.days_left = -1
def write(self, vals):
res = super(FleetVehicleLogContract, self).write(vals)
if 'start_date' in vals or 'expiration_date' in vals:
date_today = fields.Date.today()
future_contracts, running_contracts, expired_contracts = self.env[self._name], self.env[self._name], self.env[self._name]
for contract in self.filtered(lambda c: c.start_date and c.state != 'closed'):
if date_today < contract.start_date:
future_contracts |= contract
elif not contract.expiration_date or contract.start_date <= date_today < contract.expiration_date:
running_contracts |= contract
else:
expired_contracts |= contract
future_contracts.action_draft()
running_contracts.action_open()
expired_contracts.action_expire()
if vals.get('expiration_date') or vals.get('user_id'):
self.activity_reschedule(['fleet.mail_act_fleet_contract_to_renew'], date_deadline=vals.get('expiration_date'), new_user_id=vals.get('user_id'))
return res
def action_close(self):
self.write({'state': 'closed'})
def action_draft(self):
self.write({'state': 'futur'})
def action_open(self):
self.write({'state': 'open'})
def action_expire(self):
self.write({'state': 'expired'})
@api.model
def scheduler_manage_contract_expiration(self):
# This method is called by a cron task
# It manages the state of a contract, possibly by posting a message on the vehicle concerned and updating its status
params = self.env['ir.config_parameter'].sudo()
delay_alert_contract = int(params.get_param('hr_fleet.delay_alert_contract', default=30))
date_today = fields.Date.from_string(fields.Date.today())
outdated_days = fields.Date.to_string(date_today + relativedelta(days=+delay_alert_contract))
reminder_activity_type = self.env.ref('fleet.mail_act_fleet_contract_to_renew', raise_if_not_found=False) or self.env['mail.activity.type']
nearly_expired_contracts = self.search([
('state', '=', 'open'),
('expiration_date', '<', outdated_days),
('user_id', '!=', False)
]
).filtered(
lambda nec: reminder_activity_type not in nec.activity_ids.activity_type_id
)
for contract in nearly_expired_contracts:
contract.activity_schedule(
'fleet.mail_act_fleet_contract_to_renew', contract.expiration_date,
user_id=contract.user_id.id)
expired_contracts = self.search([('state', 'not in', ['expired', 'closed']), ('expiration_date', '<',fields.Date.today() )])
expired_contracts.write({'state': 'expired'})
futur_contracts = self.search([('state', 'not in', ['futur', 'closed']), ('start_date', '>', fields.Date.today())])
futur_contracts.write({'state': 'futur'})
now_running_contracts = self.search([('state', '=', 'futur'), ('start_date', '<=', fields.Date.today())])
now_running_contracts.write({'state': 'open'})
def run_scheduler(self):
self.scheduler_manage_contract_expiration()
|
jeremiahyan/odoo
|
addons/fleet/models/fleet_vehicle_log_contract.py
|
Python
|
gpl-3.0
| 7,365
|
# Spawn Group file created with PSWG Planetary Spawn Tool
import sys
from java.util import Vector
from services.spawn import DynamicSpawnGroup
from services.spawn import MobileTemplate
def addDynamicGroup(core):
dynamicGroup = DynamicSpawnGroup()
mobileTemplates = Vector()
mobileTemplates.add('mokk_clan_leader')
mobileTemplates.add('mokk_clan_primalist')
mobileTemplates.add('mokk_harvester')
mobileTemplates.add('mokk_herbalist')
mobileTemplates.add('mokk_hunter')
mobileTemplates.add('mokk_loreweaver')
mobileTemplates.add('mokk_rockshaper')
mobileTemplates.add('mokk_scout')
mobileTemplates.add('mokk_shaman')
mobileTemplates.add('mokk_soothsayer')
mobileTemplates.add('mokk_tribesman')
mobileTemplates.add('mokk_warrior')
dynamicGroup.setMobiles(mobileTemplates)
dynamicGroup.setGroupMembersNumber(-3)
dynamicGroup.setName('dantooine_mokk')
dynamicGroup.setMaxSpawns(-1)
dynamicGroup.setMinSpawnDistance(150)
core.spawnService.addDynamicGroup('dantooine_mokk', dynamicGroup)
return
|
agry/NGECore2
|
scripts/mobiles/dynamicgroups/dantooine_mokk.py
|
Python
|
lgpl-3.0
| 1,010
|
#-*- coding: utf-8 -*-
import logging
import threading
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
from foobnix.gui.controls.playback import PlaybackControls
from foobnix.gui.model.signal import FControl
from foobnix.helpers.my_widgets import notetab_label, ImageButton
from foobnix.helpers.window import ChildTopWindow
from foobnix.util import analytics, idle_task
from foobnix.util.key_utils import is_key, is_key_alt, get_key
from foobnix.util.mouse_utils import is_double_left_click
class AdvancedDrawingArea(Gtk.DrawingArea):
def __init__(self, controls):
Gtk.DrawingArea.__init__(self)
self.controls = controls
self.set_events(Gdk.EventMask.ALL_EVENTS_MASK) #@UndefinedVariable
# TODO: check it
## self.set_flags(Gtk.CAN_FOCUS)
self.connect("key-release-event", self.on_key_press)
self.connect("button-press-event", self.on_button_press)
self.connect("scroll-event", self.controls.volume.on_scroll_event)
def action_function(self):
logging.debug("Template function not defined")
def on_key_press(self, w, e):
if is_key(e, 'Escape') or get_key(e) in ('F', 'f', 'а', 'А'):
self.action_function()
elif is_key_alt(e) and is_key(e, "Return"):
self.action_function()
elif get_key(e) in ('P', 'p', 'з', 'З','space'):
self.controls.play_pause()
elif is_key(e, 'Left'):
self.controls.seek_down()
elif is_key(e, 'Right'):
self.controls.seek_up()
elif is_key(e, 'Up'):
self.controls.volume_up()
elif is_key(e, 'Down'):
self.controls.volume_down()
self.grab_focus()
def on_button_press(self, w, e):
if is_double_left_click(e):
self.action_function()
self.grab_focus()
class FullScreanArea(ChildTopWindow):
def __init__(self, controls, on_hide_callback):
self.controls = controls
ChildTopWindow.__init__(self, "movie")
self.set_hide_on_escape(False)
self.on_hide_callback = on_hide_callback
## TODO: check it
##self.set_flags(Gtk.CAN_FOCUS)
self.layout = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
self.set_property("skip-taskbar-hint", True)
self.set_keep_above(True)
self.draw = AdvancedDrawingArea(controls)
self.draw.action_function = on_hide_callback
self.set_resizable(True)
self.set_border_width(0)
self.layout.pack_start(self.draw, True, False, 0)
self.text_label = Gtk.Label("foobnix")
self.volume_button = Gtk.VolumeButton()
self.volume_button.connect("value-changed", self.volume_changed)
line = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)
line.pack_start(ImageButton("view-fullscreen", on_hide_callback, _("Exit Fullscrean")), False, False, 0)
line.pack_start(PlaybackControls(controls), False, False, 0)
line.pack_start(controls.seek_bar_movie, True, False, 0)
line.pack_start(Gtk.SeparatorToolItem.new(), False, False, 0)
line.pack_start(self.text_label, False, False, 0)
line.pack_start(Gtk.SeparatorToolItem.new(), False, False, 0)
line.pack_start(self.volume_button, False, False, 0)
line.show_all()
control_panel = Gtk.Window(Gtk.WindowType.POPUP)
control_panel.set_size_request(800, -1)
control_panel.add(line)
self.add(self.layout)
self.draw.connect("enter-notify-event", lambda *a: GLib.idle_add(control_panel.hide))
def my_event(w, e):
if e.y > Gdk.screen_height() - 5: #@UndefinedVariable
def safe_task():
control_panel.show()
control_panel.set_size_request(Gdk.screen_width(), -1)#@UndefinedVariable
control_panel.move(0, Gdk.screen_height() - control_panel.get_allocation().height)#@UndefinedVariable
GLib.idle_add(safe_task)
self.connect("motion-notify-event", my_event)
def volume_changed(self, volumebutton, value):
self.controls.volume.set_value(float(value * 100))
@idle_task
def set_text(self, text):
self.text_label.set_text(text)
def get_draw(self):
return self.draw
@idle_task
def hide_window(self, *a):
self.hide()
@idle_task
def show_window(self):
self.fullscreen()
self.volume_button.set_value(float(self.controls.volume.volume_scale.get_value()/ 100))
self.show_all()
class MovieDrawingArea(FControl, Gtk.Frame):
def __init__(self, controls):
FControl.__init__(self, controls)
Gtk.Frame.__init__(self)
self.set_label_widget(notetab_label(self.hide))
self.set_label_align(1.0, 0.0)
self.set_border_width(0)
self.smallscree_area = AdvancedDrawingArea(controls)
self.smallscree_area.action_function = self.on_full_screen
self.add(self.smallscree_area)
self.fullscrean_area = FullScreanArea(controls, self.on_small_screen)
def modyfy_background():
for state in (Gtk.StateType.NORMAL, Gtk.STATE_PRELIGHT, Gtk.STATE_ACTIVE, Gtk.STATE_SELECTED, Gtk.STATE_INSENSITIVE):
self.smallscree_area.modify_bg(state, self.smallscree_area.get_colormap().alloc_color("black"))
self.fullscrean_area.draw.modify_bg(state, self.fullscrean_area.get_colormap().alloc_color("black"))
# TODO Fix it
#GLib.idle_add(modyfy_background)
self.output = None
self.set_output(self.smallscree_area)
def set_output(self, area):
self.output = area
def get_output(self):
return self.output
def get_draw(self):
return self.smallscree_area
def on_full_screen(self):
self.controls.state_stop(True)
self.fullscrean_area.show_window()
self.set_output(self.fullscrean_area.get_draw())
self.controls.state_play(under_pointer_icon=True)
analytics.action("FullScreanArea")
@idle_task
def set_text(self, text):
self.fullscrean_area.set_text(text)
def on_small_screen(self):
self.controls.state_stop(True)
self.set_output(self.smallscree_area)
self.fullscrean_area.hide_window()
self.controls.state_play(under_pointer_icon=True)
@idle_task
def draw_video(self, message):
message_name = message.get_structure().get_name()
if message_name == "prepare-xwindow-id":
imagesink = message.src
imagesink.set_property("force-aspect-ratio", True)
self.show_all()
imagesink.set_xwindow_id(self.get_output().window.xid)
'''trick to amoid possible black screen in movie_area'''
threading.Timer(0.5, lambda: self.get_output().set_size_request(-1, 400)).start()
|
kagel/foobnix
|
foobnix/gui/controls/movie_area.py
|
Python
|
gpl-3.0
| 7,154
|
__author__ = 'ggdhines'
import matplotlib
matplotlib.use('WXAgg')
from aggregation_api import AggregationAPI
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
import cv2
import numpy as np
with AggregationAPI(592,"development") as sea:
sea.__setup__()
postgres_cursor = sea.postgres_session.cursor()
select = "SELECT classification_subjects.subject_id,annotations from classifications INNER JOIN classification_subjects ON classification_subjects.classification_id = classifications.id where workflow_id = 607"
postgres_cursor.execute(select)
for subject_id in postgres_cursor.fetchall():
subject_id = subject_id[0]
f_name = sea.__image_setup__(subject_id)
image_file = cbook.get_sample_data(f_name[0])
image = plt.imread(image_file)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fig, ax1 = plt.subplots(1, 1)
ax1.imshow(gray_image)
plt.show()
# (thresh, _) = cv2.threshold(gray_image, 250, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
im_bw = cv2.threshold(gray_image, 180, 255, cv2.THRESH_BINARY)[1]
fig, ax1 = plt.subplots(1, 1)
ax1.imshow(im_bw)
plt.show()
kernelx = cv2.getStructuringElement(cv2.MORPH_RECT,(1,10))
dx = cv2.Sobel(im_bw,cv2.CV_16S,1,0)
dx = cv2.convertScaleAbs(dx)
cv2.normalize(dx,dx,0,255,cv2.NORM_MINMAX)
ret,close = cv2.threshold(dx,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
close = cv2.morphologyEx(close,cv2.MORPH_DILATE,kernelx,iterations = 1)
cv2.imwrite("/home/ggdhines/temp.png",close)
_,contour, hier = cv2.findContours(close,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
x,y,w,h = cv2.boundingRect(cnt)
if (cv2.arcLength(cnt,True) > 250) and (h/w > 2):
# print cv2.arcLength(cnt,True)
print h/w
# if h/w > 10:
cv2.drawContours(close,[cnt],0,255,-1)
else:
cv2.drawContours(close,[cnt],0,0,-1)
close = cv2.morphologyEx(close,cv2.MORPH_CLOSE,None,iterations = 2)
# closex = close.copy()
print
cv2.imwrite("/home/ggdhines/vert.png",close)
kernely = cv2.getStructuringElement(cv2.MORPH_RECT,(10,2))
dy = cv2.Sobel(im_bw,cv2.CV_16S,0,2)
dy = cv2.convertScaleAbs(dy)
cv2.normalize(dy,dy,0,255,cv2.NORM_MINMAX)
ret,close = cv2.threshold(dy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
close = cv2.morphologyEx(close,cv2.MORPH_DILATE,kernely)
_,contour, hier = cv2.findContours(close,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
x,y,w,h = cv2.boundingRect(cnt)
if w/h > 5:
cv2.drawContours(close,[cnt],0,255,-1)
else:
cv2.drawContours(close,[cnt],0,0,-1)
close = cv2.morphologyEx(close,cv2.MORPH_DILATE,None,iterations = 2)
cv2.imwrite("/home/ggdhines/horiz.png",close)
continue
im2, contours, hierarchy = cv2.findContours(im_bw,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for ii,cnt in enumerate(contours):
if cnt.shape[0] > 20:
cnt = np.reshape(cnt,(cnt.shape[0],cnt.shape[2]))
cnt_list = cnt.tolist()
X,Y = zip(*cnt_list)
plt.plot(X,Y)
# hull = ConvexHull(cnt)
# # plt.plot(cnt[hull.vertices,0], cnt[hull.vertices,1], 'r--', lw=2)
#
# shapely_points = [shapely.geometry.shape({"type": "Point", "coordinates": (x,y)}) for (x,y) in zip(X,Y)]
# concave_hull, edge_points = alpha_shape(shapely_points,alpha=0.01)
#
# # print edge_points
#
# if isinstance(concave_hull,shapely.geometry.Polygon):
# # plot_polygon(ax1,concave_hull)
# X,Y = zip(*list(concave_hull.exterior.coords))
# plt.plot(X,Y)
# else:
#
# for p in concave_hull:
# X,Y = zip(*list(p.exterior.coords))
# plt.plot(X,Y)
# # else:
# # for p in concave_hull:
# # plot_polygon(ax1,p)
#
#
# # hull_y = [Y[simplex[0]] for simplex in hull.simplices]
# # plt.plot(hull_x,hull_y)
# if cv2.contourArea(cnt) > 0:
# print cv2.contourArea(cnt)
# cv2.drawContours(image, contours, ii, (0,255,0), 3)
plt.ylim((image.shape[0],0))
plt.xlim((0,image.shape[1]))
plt.show()
|
zooniverse/aggregation
|
blog/sea.py
|
Python
|
apache-2.0
| 4,802
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pickle
from openerp.osv import osv, fields
from openerp.osv.orm import except_orm
EXCLUDED_FIELDS = set((
'report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml',
'report_sxw_content_data', 'report_rml_content_data', 'search_view', ))
#: Possible slots to bind an action to with :meth:`~.set_action`
ACTION_SLOTS = [
"client_action_multi", # sidebar wizard action
"client_print_multi", # sidebar report printing button
"client_action_relate", # sidebar related link
"tree_but_open", # double-click on item in tree view
"tree_but_action", # deprecated: same as tree_but_open
]
class ir_values(osv.osv):
"""Holds internal model-specific action bindings and user-defined default
field values. definitions. This is a legacy internal model, mixing
two different concepts, and will likely be updated or replaced in a
future version by cleaner, separate models. You should not depend
explicitly on it.
The purpose of each ``ir.values`` entry depends on its type, defined
by the ``key`` column:
* 'default': user-defined default values, used when creating new
records of this model:
* 'action': binding of an action to a particular *action slot* of
this model, making the action easily available in the user
interface for this model.
The ``key2`` column acts as a qualifier, further refining the type
of the entry. The possible values are:
* for 'default' entries: an optional condition restricting the
cases where this particular default value will be applicable,
or ``False`` for no condition
* for 'action' entries: the ``key2`` qualifier is one of the available
action slots, defining how this action can be invoked:
* ``'client_print_multi'`` for report printing actions that will
be available on views displaying items from this model
* ``'client_action_multi'`` for assistants (wizards) actions
that will be available in views displaying objects of this model
* ``'client_action_relate'`` for links towards related documents
that should be available in views displaying objects of this model
* ``'tree_but_open'`` for actions that will be triggered when
double-clicking an item from this model in a hierarchical tree view
Each entry is specific to a model (``model`` column), and for ``'actions'``
type, may even be made specific to a given record of that model when the
``res_id`` column contains a record ID (``False`` means it's global for
all records).
The content of the entry is defined by the ``value`` column, which may either
contain an arbitrary value, or a reference string defining the action that
should be executed.
.. rubric:: Usage: default values
The ``'default'`` entries are usually defined manually by the
users, and set by their UI clients calling :meth:`~.set_default`.
These default values are then automatically used by the
ORM every time a new record is about to be created, i.e. when
:meth:`~openerp.osv.osv.osv.default_get`
or :meth:`~openerp.osv.osv.osv.create` are called.
.. rubric:: Usage: action bindings
Business applications will usually bind their actions during
installation, and OpenERP UI clients will apply them as defined,
based on the list of actions included in the result of
:meth:`~openerp.osv.osv.osv.fields_view_get`,
or directly returned by explicit calls to :meth:`~.get_actions`.
"""
_name = 'ir.values'
def _value_unpickle(self, cursor, user, ids, name, arg, context=None):
res = {}
for record in self.browse(cursor, user, ids, context=context):
value = record[name[:-9]]
if record.key == 'default' and value:
# default values are pickled on the fly
try:
value = str(pickle.loads(value))
except Exception:
pass
res[record.id] = value
return res
def _value_pickle(self, cursor, user, id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
record = self.browse(cursor, user, id, context=context)
if record.key == 'default':
# default values are pickled on the fly
value = pickle.dumps(value)
self.write(cursor, user, id, {name[:-9]: value}, context=ctx)
def onchange_object_id(self, cr, uid, ids, object_id, context=None):
if not object_id: return {}
act = self.pool.get('ir.model').browse(cr, uid, object_id, context=context)
return {
'value': {'model': act.model}
}
def onchange_action_id(self, cr, uid, ids, action_id, context=None):
if not action_id: return {}
act = self.pool.get('ir.actions.actions').browse(cr, uid, action_id, context=context)
return {
'value': {'value_unpickle': act.type+','+str(act.id)}
}
_columns = {
'name': fields.char('Name', required=True),
'model': fields.char('Model Name', select=True, required=True,
help="Model to which this entry applies"),
# TODO: model_id and action_id should be read-write function fields
'model_id': fields.many2one('ir.model', 'Model (change only)', size=128,
help="Model to which this entry applies - "
"helper field for setting a model, will "
"automatically set the correct model name"),
'action_id': fields.many2one('ir.actions.actions', 'Action (change only)',
help="Action bound to this entry - "
"helper field for binding an action, will "
"automatically set the correct reference"),
'value': fields.text('Value', help="Default value (pickled) or reference to an action"),
'value_unpickle': fields.function(_value_unpickle, fnct_inv=_value_pickle,
type='text',
string='Default value or action reference'),
'key': fields.selection([('action','Action'),('default','Default')],
'Type', select=True, required=True,
help="- Action: an action attached to one slot of the given model\n"
"- Default: a default value for a model field"),
'key2' : fields.char('Qualifier', select=True,
help="For actions, one of the possible action slots: \n"
" - client_action_multi\n"
" - client_print_multi\n"
" - client_action_relate\n"
" - tree_but_open\n"
"For defaults, an optional condition"
,),
'res_id': fields.integer('Record ID', select=True,
help="Database identifier of the record to which this applies. "
"0 = for all records"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', select=True,
help="If set, action binding only applies for this user."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', select=True,
help="If set, action binding only applies for this company")
}
_defaults = {
'key': 'action',
'key2': 'tree_but_open',
}
def _auto_init(self, cr, context=None):
super(ir_values, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_values_key_model_key2_res_id_user_id_idx\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_values_key_model_key2_res_id_user_id_idx ON ir_values (key, model, key2, res_id, user_id)')
def set_default(self, cr, uid, model, field_name, value, for_all_users=True, company_id=False, condition=False):
"""Defines a default value for the given model and field_name. Any previous
default for the same scope (model, field_name, value, for_all_users, company_id, condition)
will be replaced and lost in the process.
Defaults can be later retrieved via :meth:`~.get_defaults`, which will return
the highest priority default for any given field. Defaults that are more specific
have a higher priority, in the following order (highest to lowest):
* specific to user and company
* specific to user only
* specific to company only
* global to everyone
:param string model: model name
:param string field_name: field name to which the default applies
:param value: the default field value to set
:type value: any serializable Python value
:param bool for_all_users: whether the default should apply to everybody or only
the user calling the method
:param int company_id: optional ID of the company to which the default should
apply. If omitted, the default will be global. If True
is passed, the current user's company will be used.
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: id of the newly created ir.values entry
"""
if isinstance(value, unicode):
value = value.encode('utf8')
if company_id is True:
# should be company-specific, need to get company id
user = self.pool.get('res.users').browse(cr, uid, uid)
company_id = user.company_id.id
# remove existing defaults for the same scope
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'name': field_name,
'value': pickle.dumps(value),
'model': model,
'key': 'default',
'key2': condition and condition[:200],
'user_id': False if for_all_users else uid,
'company_id': company_id,
})
def get_default(self, cr, uid, model, field_name, for_all_users=True, company_id=False, condition=False):
""" Return the default value defined for model, field_name, users, company and condition.
Return ``None`` if no such default exists.
"""
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
defaults = self.browse(cr, uid, self.search(cr, uid, search_criteria))
return pickle.loads(defaults[0].value.encode('utf-8')) if defaults else None
def get_defaults(self, cr, uid, model, condition=False):
"""Returns any default values that are defined for the current model and user,
(and match ``condition``, if specified), previously registered via
:meth:`~.set_default`.
Defaults are global to a model, not field-specific, but an optional
``condition`` can be provided to restrict matching default values
to those that were defined for the same condition (usually based
on another field's value).
Default values also have priorities depending on whom they apply
to: only the highest priority value will be returned for any
field. See :meth:`~.set_default` for more details.
:param string model: model name
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: list of default values tuples of the form ``(id, field_name, value)``
(``id`` is the ID of the default entry, usually irrelevant)
"""
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
LEFT JOIN res_users u ON (v.user_id = u.id)
WHERE v.key = %%s AND v.model = %%s
AND (v.user_id = %%s OR v.user_id IS NULL)
AND (v.company_id IS NULL OR
v.company_id =
(SELECT company_id from res_users where id = %%s)
)
%s
ORDER BY v.user_id, u.company_id"""
params = ('default', model, uid, uid)
if condition:
query %= 'AND v.key2 = %s'
params += (condition[:200],)
else:
query %= 'AND v.key2 is NULL'
cr.execute(query, params)
# keep only the highest priority default for each field
defaults = {}
for row in cr.dictfetchall():
defaults.setdefault(row['name'],
(row['id'], row['name'], pickle.loads(row['value'].encode('utf-8'))))
return defaults.values()
def set_action(self, cr, uid, name, action_slot, model, action, res_id=False):
"""Binds an the given action to the given model's action slot - for later
retrieval via :meth:`~.get_actions`. Any existing binding of the same action
to the same slot is first removed, allowing an update of the action's name.
See the class description for more details about the various action
slots: :class:`~ir_values`.
:param string name: action label, usually displayed by UI client
:param string action_slot: the action slot to which the action should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param string action: action reference, in the form ``'model,id'``
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: id of the newly created ir.values entry
"""
assert isinstance(action, basestring) and ',' in action, \
'Action definition must be an action reference, e.g. "ir.actions.act_window,42"'
assert action_slot in ACTION_SLOTS, \
'Action slot (%s) must be one of: %r' % (action_slot, ACTION_SLOTS)
# remove existing action definition of same slot and value
search_criteria = [
('key', '=', 'action'),
('key2', '=', action_slot),
('model', '=', model),
('res_id', '=', res_id or 0), # int field -> NULL == 0
('value', '=', action),
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'key': 'action',
'key2': action_slot,
'model': model,
'res_id': res_id,
'name': name,
'value': action,
})
def get_actions(self, cr, uid, action_slot, model, res_id=False, context=None):
"""Retrieves the list of actions bound to the given model's action slot.
See the class description for more details about the various action
slots: :class:`~.ir_values`.
:param string action_slot: the action slot to which the actions should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: list of action tuples of the form ``(id, name, action_def)``,
where ``id`` is the ID of the default entry, ``name`` is the
action label, and ``action_def`` is a dict containing the
action definition as obtained by calling
:meth:`~openerp.osv.osv.osv.read` on the action record.
"""
assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
WHERE v.key = %s AND v.key2 = %s
AND v.model = %s
AND (v.res_id = %s
OR v.res_id IS NULL
OR v.res_id = 0)
ORDER BY v.id"""
cr.execute(query, ('action', action_slot, model, res_id or None))
results = {}
for action in cr.dictfetchall():
if not action['value']:
continue # skip if undefined
action_model_name, action_id = action['value'].split(',')
action_model = self.pool.get(action_model_name)
if not action_model:
continue # unknow model? skip it
fields = [field for field in action_model._all_columns if field not in EXCLUDED_FIELDS]
# FIXME: needs cleanup
try:
action_def = action_model.read(cr, uid, int(action_id), fields, context)
if action_def:
if action_model_name in ('ir.actions.report.xml', 'ir.actions.act_window'):
groups = action_def.get('groups_id')
if groups:
cr.execute('SELECT 1 FROM res_groups_users_rel WHERE gid IN %s AND uid=%s',
(tuple(groups), uid))
if not cr.fetchone():
if action['name'] == 'Menuitem':
raise osv.except_osv('Error!',
'You do not have the permission to perform this operation!!!')
continue
# keep only the first action registered for each action name
results[action['name']] = (action['id'], action['name'], action_def)
except except_orm:
continue
return sorted(results.values())
def _map_legacy_model_list(self, model_list, map_fn, merge_results=False):
"""Apply map_fn to the various models passed, according to
legacy way to specify models/records.
"""
assert isinstance(model_list, (list, tuple)), \
"model_list should be in the form [model,..] or [(model,res_id), ..]"
results = []
for model in model_list:
res_id = False
if isinstance(model, (list, tuple)):
model, res_id = model
result = map_fn(model, res_id)
# some of the functions return one result at a time (tuple or id)
# and some return a list of many of them - care for both
if merge_results:
results.extend(result)
else:
results.append(result)
return results
# Backards-compatibility adapter layer to retrofit into split API
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
"""Deprecated legacy method to set default values and bind actions to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.set_default`
(``key=='default'``) or :meth:`~.set_action` (``key == 'action'``).
:deprecated: As of v6.1, ``set_default()`` or ``set_action()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_set(model,res_id):
return self.set_default(cr, uid, model, field_name=name, value=value,
for_all_users=(not preserve_user), company_id=company,
condition=key2)
elif key == 'action':
def do_set(model,res_id):
return self.set_action(cr, uid, name, action_slot=key2, model=model, action=value, res_id=res_id)
return self._map_legacy_model_list(models, do_set)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
"""Deprecated legacy method to get the list of default values or actions bound to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.get_defaults`
(``key=='default'``) or :meth:`~.get_actions` (``key == 'action'``)
:deprecated: As of v6.1, ``get_defaults()`` or ``get_actions()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_get(model,res_id):
return self.get_defaults(cr, uid, model, condition=key2)
elif key == 'action':
def do_get(model,res_id):
return self.get_actions(cr, uid, action_slot=key2, model=model, res_id=res_id, context=context)
return self._map_legacy_model_list(models, do_get, merge_results=True)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
a0c/odoo
|
openerp/addons/base/ir/ir_values.py
|
Python
|
agpl-3.0
| 25,151
|
from __future__ import unicode_literals
import unittest
from prompt_toolkit.document import Document
from contrail_api_cli.command import Command, Arg, Option
from contrail_api_cli.manager import CommandManager
from contrail_api_cli.parser import CommandParser
from contrail_api_cli.exceptions import CommandNotFound, CommandInvalid
BASE = 'http://localhost:8082'
class TestCmd(Command):
long = Option('-l', action="store_true")
foo = Option(help="foo")
bar = Option(nargs="*")
arg1 = Arg(help="%(default)s", default="bar")
arg2 = Arg()
def __call__(self, *args, **kwargs):
pass
class TestCmd2(Command):
long = Option('-l', action="store_true")
arg1 = Arg(help="%(default)s", default="bar")
arg2 = Arg(nargs="*")
def __call__(self, *args, **kwargs):
pass
class TestCommandOptions(unittest.TestCase):
def setUp(self):
self.cmd = TestCmd('test-cmd')
def test_option_multiple(self):
self.assertTrue(self.cmd.options['bar'].is_multiple)
self.assertFalse(self.cmd.options['foo'].is_multiple)
def test_option_help(self):
self.assertEqual(self.cmd.options['foo'].help, 'foo')
self.assertEqual(self.cmd.options['bar'].help, '')
self.assertEqual(self.cmd.args['arg1'].help, 'bar')
def test_option_need_value(self):
self.assertTrue(self.cmd.options['foo'].need_value)
self.assertFalse(self.cmd.options['long'].need_value)
def test_option_strings(self):
self.assertEqual(['--long', '-l'], self.cmd.options['long'].option_strings)
self.assertEqual(['--bar'], self.cmd.options['bar'].option_strings)
def test_cmd_options(self):
self.assertEqual(['long', 'foo', 'bar'], list(self.cmd.options.keys()))
def test_cmd_args(self):
self.assertEqual(['arg1', 'arg2'], list(self.cmd.args.keys()))
class TestParser(unittest.TestCase):
def setUp(self):
self.mgr = CommandManager()
self.cmd = TestCmd('test-cmd')
self.cmd2 = TestCmd2('test-cmd2')
self.mgr.add('test-cmd', self.cmd)
self.mgr.add('test-cmd2', self.cmd2)
def test_bad_cmd(self):
with self.assertRaises(CommandInvalid):
CommandParser(Document('foo -h'))
with self.assertRaises(CommandInvalid):
CommandParser(Document('bar '))
with self.assertRaises(CommandNotFound):
CommandParser(Document())
with self.assertRaises(CommandNotFound):
CommandParser(Document('ex'))
def test_cmd(self):
parser = CommandParser(Document('test-cmd'))
self.assertEqual(parser.cmd, self.mgr.get('test-cmd'))
self.assertEqual(parser.cmd_name, 'test-cmd')
def test_option_parsing(self):
parser = CommandParser(Document('test-cmd -h'))
self.assertEqual(len(list(parser.used_options)), 0)
expected = ['-l', '--foo', '--bar']
parsed = [o.short_name or o.long_name for o in parser.available_options]
self.assertEqual(parsed, expected)
parser = CommandParser(Document('test-cmd --bar -h'))
self.assertEqual(len(list(parser.used_options)), 1)
expected = ['-l', '--foo', '--bar']
parsed = [o.short_name or o.long_name for o in parser.available_options]
self.assertEqual(parsed, expected)
def test_arg_parsing(self):
parser = CommandParser(Document('test-cmd --foo bar arg1_value -l arg2_value '))
self.assertEqual(list(parser.used_args), [self.cmd.args['arg1'], self.cmd.args['arg2']])
parser = CommandParser(Document('test-cmd arg1_value -l'))
self.assertEqual(list(parser.used_args), [self.cmd.args['arg1']])
self.assertEqual(list(parser.available_args), [self.cmd.args['arg2']])
parser = CommandParser(Document('test-cmd2 arg1_value -l'))
self.assertEqual(list(parser.available_args), [self.cmd2.args['arg2']])
parser = CommandParser(Document('test-cmd2 arg1_value -l arg2_value'))
self.assertEqual(list(parser.available_args), [self.cmd2.args['arg2']])
parser = CommandParser(Document('test-cmd2 arg1_value -l arg2_value arg2_value2'))
self.assertEqual(list(parser.available_args), [self.cmd2.args['arg2']])
if __name__ == "__main__":
unittest.main()
|
eonpatapon/contrail-api-cli
|
contrail_api_cli/tests/test_parser.py
|
Python
|
mit
| 4,305
|
#-*- coding: utf-8 -*-
# Copyright (c) 2005 Canonical
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <niemeyer@conectiva.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.interface import getImagePath
from smart import *
import os
try:
from PyQt4 import QtCore, QtGui
except ImportError:
from smart.const import DEBUG
if sysconf.get("log-level") == DEBUG:
import traceback
traceback.print_exc()
raise Error, _("System has no support for qt python interface")
def create(ctrl, command=None, argv=None):
if command:
from smart.interfaces.qt4.command import QtCommandInterface
return QtCommandInterface(ctrl)
else:
from smart.interfaces.qt4.interactive import QtInteractiveInterface
return QtInteractiveInterface(ctrl)
_pixmap = {}
def getPixmap(name):
if name not in _pixmap:
filename = getImagePath(name)
if os.path.isfile(filename):
pixmap = QtGui.QPixmap(filename)
_pixmap[name] = pixmap
else:
raise Error, _("Image '%s' not found") % name
return _pixmap[name]
def centerWindow(window):
w = window.topLevelWidget()
if w:
scrn = QtGui.QApplication.desktop().screenNumber(w)
elif QtGui.QApplication.desktop().isVirtualDesktop():
scrn = QtGui.QApplication.desktop().screenNumber(QtGui.QCursor.pos())
else:
scrn = QtGui.QApplication.desktop().screenNumber(window)
desk = QtGui.QApplication.desktop().availableGeometry(scrn)
window.move((desk.width() - window.frameGeometry().width()) / 2, \
(desk.height() - window.frameGeometry().height()) / 2)
# vim:ts=4:sw=4:et
|
blackPantherOS/packagemanagement
|
smartpm/smart/interfaces/qt4/__init__.py
|
Python
|
apache-2.0
| 2,441
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SMURF augmentation.
This library contains various augmentation functions.
"""
# pylint:skip-file
import math
from typing import Tuple, Union, Dict
import gin
import gin.tf
import tensorflow as tf
from tensorflow_addons import image as tfa_image
from functools import partial
from smurf import smurf_utils
_TensorTuple2 = Tuple[tf.Tensor, tf.Tensor]
_TensorTuple3 = Tuple[tf.Tensor, tf.Tensor, tf.Tensor]
_TensorTuple4 = Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]
def apply_augmentation(
inputs,
crop_height = 640,
crop_width = 640,
return_full_scale=False):
"""Applies photometric and geometric augmentations to images and flow.
Args:
inputs: dictionary of data to perform augmentation on.
crop_height: Height of the final augmented output.
crop_width: Width of the final augmented output.
return_full_scale: bool, if True, include the full size images.
Returns:
Augmented images and possibly flow, mask (if provided).
"""
# Ensure sequence length of two to be able to unstack images.
images = inputs['images']
flow = inputs.get('flow')
mask = inputs.get('flow_valid')
images = tf.ensure_shape(images, (2, None, None, None))
# Apply geometric augmentation functions.
if return_full_scale: # Perform "full-scale warping."
images, flow, mask, full_size_images, crop_h, crop_w, pad_h, pad_w = geometric_augmentation(
images, flow, mask, crop_height, crop_width, return_full_scale=True)
else:
images, flow, mask = geometric_augmentation(
images, flow, mask, crop_height, crop_width, return_full_scale=False)
images_aug = photometric_augmentation(images)
if flow is not None:
inputs['flow'] = flow
inputs['flow_valid'] = mask
if return_full_scale:
inputs['crop_h'] = crop_h
inputs['crop_w'] = crop_w
inputs['pad_h'] = pad_h
inputs['pad_w'] = pad_w
inputs['full_size_images'] = full_size_images
inputs['images'] = images
inputs['augmented_images'] = images_aug
return inputs
@gin.configurable
def photometric_augmentation(
images,
probability_color_swap = 0.0,
probability_hue_shift = 1.0,
probability_saturation = 1.0,
probability_brightness = 1.0,
probability_contrast = 1.0,
probability_gaussian_noise = 0.0,
probability_brightness_individual = 0.0,
probability_contrast_individual = 0.0,
probability_eraser = 0.5,
probability_eraser_additional_operations = 0.5,
probability_assymetric = 0.2,
max_delta_hue = 0.5 / 3.14,
min_bound_saturation = 0.6,
max_bound_saturation = 1.4,
max_delta_brightness = 0.4,
min_bound_contrast = 0.6,
max_bound_contrast = 1.4,
min_bound_gaussian_noise = 0.0,
max_bound_gaussian_noise = 0.02,
max_delta_brightness_individual = 0.02,
min_bound_contrast_individual = 0.95,
max_bound_contrast_individual = 1.05,
min_size_eraser = 50,
max_size_eraser = 100,
max_operations_eraser = 3):
"""Applies photometric augmentations to an image pair.
Args:
images: Image pair of shape [2, height, width, channels].
probability_color_swap: Probability of applying color swap augmentation.
probability_hue_shift: Probability of applying hue shift augmentation.
probability_saturation: Probability of applying saturation augmentation.
probability_brightness: Probability of applying brightness augmentation.
probability_contrast: Probability of applying contrast augmentation.
probability_gaussian_noise: Probability of applying gaussian noise
augmentation.
probability_brightness_individual: Probability of applying brightness
augmentation individually to each image of the image pair.
probability_contrast_individual: Probability of applying contrast
augmentation individually to each image of the image pair.
probability_eraser: Probability of applying the eraser augmentation.
probability_eraser_additional_operations: Probability of applying additional
erase operations within the eraser augmentation.
probability_assymetric: Probability of applying some photomoteric
augmentations invidiually per frame (hue_shift, brightness,
saturation, contrast, gaussian noise).
max_delta_hue: Must be in the interval [0, 0.5]. Defines the interval
[-max_delta_hue, max_delta_hue] in which to pick a random hue offset.
min_bound_saturation: Lower bound for the randomly picked saturation factor
in the interval [lower, upper].
max_bound_saturation: Upper bound for the randomly picked saturation factor
in the interval [lower, upper].
max_delta_brightness: Delta defines the interval [-max_delta, max_delta) in
which a random value is picked that will be added to the image values.
min_bound_contrast: Lower bound for the randomly picked contrast factor in
the interval [lower, upper]. It will be applied per channel via (x - mean)
* contrast_factor + mean.
max_bound_contrast: Upper bound for the randomly picked contrast factor in
the interval [lower, upper]. It will be applied per channel via (x - mean)
* contrast_factor + mean.
min_bound_gaussian_noise: Lower bound for the randomly picked sigma in the
interval [lower, upper].
max_bound_gaussian_noise: Upper bound for the randomly picked sigma in the
interval [lower, upper].
max_delta_brightness_individual: Same as max_delta_brightness, but for the
augmentation applied individually per frame.
min_bound_contrast_individual: Same as min_bound_contrast, but for the
augmentation applied individually per frame.
max_bound_contrast_individual: Same as max_bound_contrast, but for the
augmentation applied individually per frame.
min_size_eraser: Minimal side length of the rectangle shaped region that
will be removed.
max_size_eraser: Maximal side length of the rectangle shaped region that
will be removed.
max_operations_eraser: Maximal number of rectangle shaped regions that will
be removed.
Returns:
Augmented images and possibly flow, mask (if provided).
"""
# All photometric augmentation that could be applied individually per frame.
def potential_asymmetric_augmentations(images):
if probability_hue_shift > 0:
images = random_hue_shift(images, probability_hue_shift, max_delta_hue)
if probability_saturation > 0:
images = random_saturation(images, probability_saturation,
min_bound_saturation, max_bound_saturation)
if probability_brightness > 0:
images = random_brightness(images, probability_brightness,
max_delta_brightness)
if probability_contrast > 0:
images = random_contrast(images, probability_contrast, min_bound_contrast,
max_bound_contrast)
if probability_gaussian_noise > 0:
images = random_gaussian_noise(images, probability_gaussian_noise,
min_bound_gaussian_noise,
max_bound_gaussian_noise)
return images
perform_assymetric = tf.random.uniform([]) < probability_assymetric
def true_fn(images):
image_1, image_2 = tf.unstack(images)
image_1 = potential_asymmetric_augmentations(image_1)
image_2 = potential_asymmetric_augmentations(image_2)
return tf.stack([image_1, image_2])
def false_fn(images):
return images
images = tf.cond(perform_assymetric, lambda: true_fn(images),
lambda: false_fn(images))
# Photometric augmentations applied to all frames of a pair.
if probability_color_swap > 0:
images = random_color_swap(images, probability_color_swap)
# Photometric augmentations applied individually per image frame.
if probability_contrast_individual > 0:
images = random_contrast_individual(images, probability_contrast_individual,
min_bound_contrast_individual,
max_bound_contrast_individual)
if probability_brightness_individual > 0:
images = random_brightness_individual(images,
probability_brightness_individual,
max_delta_brightness_individual)
# Crop values to ensure values are within [0,1], some augmentations may
# violate this.
images = tf.clip_by_value(images, 0.0, 1.0)
# Apply special photometric augmentations.
if probability_eraser > 0:
images = random_eraser(
images,
min_size=min_size_eraser,
max_size=max_size_eraser,
probability=probability_eraser,
max_operations=max_operations_eraser,
probability_additional_operations=probability_eraser_additional_operations
)
return images
@gin.configurable
def geometric_augmentation(images,
flow = None,
mask = None,
crop_height = 640,
crop_width = 640,
probability_flip_left_right = 0.5,
probability_flip_up_down = 0.1,
probability_scale = 0.8,
probability_relative_scale = 0.,
probability_stretch = 0.8,
probability_rotation = 0.0,
probability_relative_rotation = 0.0,
probability_crop_offset = 0.0,
min_bound_scale = -0.2,
max_bound_scale = 0.6,
max_strech_scale = 0.2,
min_bound_relative_scale = -0.1,
max_bound_relative_scale = 0.1,
max_rotation_deg = 15,
max_relative_rotation_deg = 3,
max_relative_crop_offset = 5,
return_full_scale=False):
"""Applies geometric augmentations to an image pair and corresponding flow.
Args:
images: Image pair of shape [2, height, width, channels].
flow: Corresponding forward flow field of shape [height, width, 2].
mask: Mask indicating which positions in the flow field hold valid flow
vectors of shape [height, width, 1]. Non-valid poisitions are encoded with
0, valid positions with 1.
crop_height: Height of the final augmented output.
crop_width: Width of the final augmented output.
probability_flip_left_right: Probability of applying left/right flip.
probability_flip_up_down: Probability of applying up/down flip
probability_scale: Probability of applying scale augmentation.
probability_relative_scale: Probability of applying scale augmentation to
only the second frame of the the image pair.
probability_stretch: Probability of applying stretch augmentation (scale
without keeping the aspect ratio).
probability_rotation: Probability of applying rotation augmentation.
probability_relative_rotation: Probability of applying rotation augmentation
to only the second frame of the the image pair.
probability_crop_offset: Probability of applying a relative offset while
cropping.
min_bound_scale: Defines the smallest possible scaling factor as
2**min_bound_scale.
max_bound_scale: Defines the largest possible scaling factor as
2**max_bound_scale.
max_strech_scale: Defines the smallest and largest possible streching factor
as 2**-max_strech_scale and 2**max_strech_scale.
min_bound_relative_scale: Defines the smallest possible scaling factor for
the relative scaling as 2**min_bound_relative_scale.
max_bound_relative_scale: Defines the largest possible scaling factor for
the relative scaling as 2**max_bound_relative_scale.
max_rotation_deg: Defines the maximum angle of rotation in degrees.
max_relative_rotation_deg: Defines the maximum angle of rotation in degrees
for the relative rotation.
max_relative_crop_offset: Defines the maximum relative offset in pixels for
cropping.
return_full_scale: bool. If this is passed, the full size images will be
returned in addition to the geometrically augmented (cropped and / or
resized) images. In addition to the resized images, the crop height,
width, and any padding applied will be returned.
Returns:
if return_full_scale is False:
Augmented images, flow and mask (if not None).
if return_full_scale is True:
Augmented images, flow, mask, full_size_images, crop_h, crop_w, pad_h,
and pad_w.
"""
# apply geometric augmentation
if probability_flip_left_right > 0:
images, flow, mask = random_flip_left_right(
images, flow, mask, probability_flip_left_right)
if probability_flip_up_down > 0:
images, flow, mask = random_flip_up_down(
images, flow, mask, probability_flip_up_down)
if probability_scale > 0 or probability_stretch > 0:
images, flow, mask = random_scale(
images,
flow,
mask,
min_scale=min_bound_scale,
max_scale=max_bound_scale,
max_strech=max_strech_scale,
probability_scale=probability_scale,
probability_strech=probability_stretch)
if probability_relative_scale > 0:
images, flow, mask = random_scale_second(
images, flow, mask,
min_scale=min_bound_relative_scale,
max_scale=max_bound_relative_scale,
probability_scale=probability_relative_scale)
if probability_rotation > 0:
images, flow, mask = random_rotation(
images, flow, mask,
probability=probability_rotation,
max_rotation=max_rotation_deg, not_empty_crop=True)
if probability_relative_rotation > 0:
images, flow, mask = random_rotation_second(
images, flow, mask,
probability=probability_relative_rotation,
max_rotation=max_relative_rotation_deg, not_empty_crop=True)
images_uncropped = images
images, flow, mask, offset_h, offset_w = random_crop(
images, flow, mask, crop_height, crop_width,
relative_offset=max_relative_crop_offset,
probability_crop_offset=probability_crop_offset)
# Add 100 / 200 pixels to crop height / width for full scale warp
pad_to_size_h = crop_height + 200
pad_to_size_w = crop_width + 400
if return_full_scale:
if pad_to_size_w:
uncropped_shape = tf.shape(images_uncropped)
if images.shape[1] > uncropped_shape[1] or images.shape[
2] > uncropped_shape[2]:
images_uncropped = images
uncropped_shape = tf.shape(images_uncropped)
offset_h = tf.zeros_like(offset_h)
offset_w = tf.zeros_like(offset_w)
if uncropped_shape[1] > pad_to_size_h:
crop_ht = offset_h - (200 // 2)
crop_hb = offset_h + crop_height + (200 // 2)
crop_hb += tf.maximum(0, -crop_ht)
crop_ht -= tf.maximum(0, -(uncropped_shape[1] - crop_hb))
crop_ht = tf.maximum(crop_ht, 0)
crop_hb = tf.minimum(crop_hb, uncropped_shape[1])
offset_h -= crop_ht
images_uncropped = images_uncropped[:, crop_ht:crop_hb, :, :]
if uncropped_shape[2] > pad_to_size_w:
crop_wt = offset_w - (400 // 2)
crop_wb = offset_w + crop_width + (400 // 2)
crop_wb += tf.maximum(0, -crop_wt)
crop_wt -= tf.maximum(0, -(uncropped_shape[2] - crop_wb))
crop_wt = tf.maximum(crop_wt, 0)
crop_wb = tf.minimum(crop_wb, uncropped_shape[2])
offset_w -= crop_wt
images_uncropped = images_uncropped[:, :, crop_wt:crop_wb, :]
uncropped_shape = tf.shape(images_uncropped)
# remove remove_pixels_w from the width while keeping the crop centered
pad_h = pad_to_size_h - uncropped_shape[1]
pad_w = pad_to_size_w - uncropped_shape[2]
with tf.control_dependencies([
tf.compat.v1.assert_greater_equal(pad_h, 0),
tf.compat.v1.assert_greater_equal(pad_w, 0)
]):
images_uncropped = tf.pad(images_uncropped,
[[0, 0], [pad_h, 0], [pad_w, 0], [0, 0]])
images_uncropped = tf.ensure_shape(images_uncropped,
[2, pad_to_size_h, pad_to_size_w, 3])
return images, flow, mask, images_uncropped, offset_h, offset_w, pad_h, pad_w
return images, flow, mask
def _center_crop(images, height, width):
"""Performs a center crop with the given heights and width."""
# ensure height, width to be int
height = tf.cast(height, tf.int32)
width = tf.cast(width, tf.int32)
# get current size
images_shape = tf.shape(images)
current_height = images_shape[-3]
current_width = images_shape[-2]
# compute required offset
offset_height = tf.cast((current_height - height) / 2, tf.int32)
offset_width = tf.cast((current_width - width) / 2, tf.int32)
# perform the crop
images = tf.image.crop_to_bounding_box(
images, offset_height, offset_width, height, width)
return images
def _positions_center_origin(height, width):
"""Returns image coordinates where the origin at the image center."""
h = tf.range(0.0, height, 1)
w = tf.range(0.0, width, 1)
center_h = tf.cast(height, tf.float32) / 2.0 - 0.5
center_w = tf.cast(width, tf.float32) / 2.0 - 0.5
return tf.stack(tf.meshgrid(h - center_h, w - center_w, indexing='ij'), -1)
def rotate(img,
angle_radian,
is_flow,
mask = None):
"""Rotate an image or flow field."""
def _rotate(img, mask=None):
if angle_radian == 0.0:
# early return if no resizing is required
if mask is not None:
return img, mask
else:
return img
if mask is not None:
# multiply with mask, to ensure non-valid locations are zero
img = tf.math.multiply(img, mask)
# rotate img
img_rotated = tfa_image.rotate(
img, angle_radian, interpolation='BILINEAR')
# rotate mask (will serve as normalization weights)
mask_rotated = tfa_image.rotate(
mask, angle_radian, interpolation='BILINEAR')
# normalize sparse flow field and mask
img_rotated = tf.math.multiply(
img_rotated, tf.math.reciprocal_no_nan(mask_rotated))
mask_rotated = tf.math.multiply(
mask_rotated, tf.math.reciprocal_no_nan(mask_rotated))
else:
img_rotated = tfa_image.rotate(
img, angle_radian, interpolation='BILINEAR')
if is_flow:
# If image is a flow image, scale flow values to be consistent with the
# rotation.
cos = tf.math.cos(angle_radian)
sin = tf.math.sin(angle_radian)
rotation_matrix = tf.reshape([cos, sin, -sin, cos], [2, 2])
img_rotated = tf.linalg.matmul(img_rotated, rotation_matrix)
if mask is not None:
return img_rotated, mask_rotated
return img_rotated
# Apply resizing at the right shape.
shape = img.shape.as_list()
if len(shape) == 3:
if mask is not None:
img_rotated, mask_rotated = _rotate(img[None], mask[None])
return img_rotated[0], mask_rotated[0]
else:
return _rotate(img[None])[0]
elif len(shape) == 4:
# Input at the right shape.
return _rotate(img, mask)
else:
raise ValueError('Cannot rotate an image of shape', shape)
def random_flip_left_right(images,
flow,
mask,
probability):
"""Performs a random left/right flip."""
perform_flip = tf.less(tf.random.uniform([]), probability)
# apply flip
images = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(images, axis=[-2]),
false_fn=lambda: images)
if flow is not None:
flow = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(flow, axis=[-2]),
false_fn=lambda: flow)
mask = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(mask, axis=[-2]),
false_fn=lambda: mask)
# correct sign of flow
sign_correction = tf.reshape([1.0, -1.0], [1, 1, 2])
flow = tf.cond(pred=perform_flip,
true_fn=lambda: flow * sign_correction,
false_fn=lambda: flow)
return images, flow, mask
def random_flip_up_down(images,
flow,
mask,
probability):
"""Performs a random up/down flip."""
# 50/50 chance
perform_flip = tf.less(tf.random.uniform([]), probability)
# apply flip
images = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(images, axis=[-3]),
false_fn=lambda: images)
if flow is not None:
flow = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(flow, axis=[-3]),
false_fn=lambda: flow)
mask = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(mask, axis=[-3]),
false_fn=lambda: mask)
# correct sign of flow
sign_correction = tf.reshape([-1.0, 1.0], [1, 1, 2])
flow = tf.cond(pred=perform_flip,
true_fn=lambda: flow * sign_correction,
false_fn=lambda: flow)
return images, flow, mask
def _get_random_scaled_resolution(
orig_height,
orig_width,
min_scale,
max_scale,
max_strech,
probability_strech):
"""Computes a new random resolution."""
# Choose a random scale factor and compute new resolution.
scale = 2 ** tf.random.uniform([],
minval=min_scale,
maxval=max_scale,
dtype=tf.float32)
scale_height = scale
scale_width = scale
# Possibly change scale values individually to perform strech
def true_fn(scale_height, scale_width):
scale_height *= 2 ** tf.random.uniform([], -max_strech, max_strech)
scale_width *= 2 ** tf.random.uniform([], -max_strech, max_strech)
return tf.stack((scale_height, scale_width), axis=0)
def false_fn(scale_height, scale_width):
return tf.stack((scale_height, scale_width), axis=0)
perform_strech = tf.random.uniform([]) < probability_strech
scales = tf.cond(perform_strech,
lambda: true_fn(scale_height, scale_width),
lambda: false_fn(scale_height, scale_width))
scale_height = scales[0]
scale_width = scales[1]
# Compute scaled image resolution.
new_height = tf.cast(
tf.math.ceil(tf.cast(orig_height, tf.float32) * scale_height), tf.int32)
new_width = tf.cast(
tf.math.ceil(tf.cast(orig_width, tf.float32) * scale_width), tf.int32)
return new_height, new_width, scale
def random_scale(images,
flow,
mask,
min_scale,
max_scale,
max_strech,
probability_scale,
probability_strech):
"""Performs a random scaling in the given range."""
perform_scale = tf.random.uniform([]) < probability_scale
def true_fn(images, flow, mask):
# Get a random new resolution to which the images will be scaled.
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
new_height, new_width, _ = _get_random_scaled_resolution(
orig_height=orig_height,
orig_width=orig_width,
min_scale=min_scale,
max_scale=max_scale,
max_strech=max_strech,
probability_strech=probability_strech)
# rescale the images (and flow)
images = smurf_utils.resize(images, new_height, new_width, is_flow=False)
if flow is not None:
flow, mask = smurf_utils.resize(
flow, new_height, new_width, is_flow=True, mask=mask)
return images, flow, mask
def false_fn(images, flow, mask):
return images, flow, mask
return tf.cond(perform_scale, lambda: true_fn(images, flow, mask),
lambda: false_fn(images, flow, mask))
def random_scale_second(images,
flow,
mask,
min_scale,
max_scale,
probability_scale):
"""Performs a random scaling on the second image in the given range."""
perform_scale = tf.random.uniform([]) < probability_scale
def true_fn(images, flow, mask):
# choose a random scale factor and compute new resolution
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
new_height, new_width, scale = _get_random_scaled_resolution(
orig_height=orig_height,
orig_width=orig_width,
min_scale=min_scale,
max_scale=max_scale,
max_strech=0.0,
probability_strech=0.0)
# rescale only the second image
image_1, image_2 = tf.unstack(images)
image_2 = smurf_utils.resize(image_2, new_height, new_width, is_flow=False)
# Crop either first or second image to have matching dimensions
if scale < 1.0:
image_1 = _center_crop(image_1, new_height, new_width)
else:
image_2 = _center_crop(image_2, orig_height, orig_width)
images = tf.stack([image_1, image_2])
if flow is not None:
# get current locations (with the origin in the image center)
positions = _positions_center_origin(orig_height, orig_width)
# compute scale factor of the actual new image resolution
scale_flow_h = tf.cast(new_height, tf.float32) / tf.cast(
orig_height, tf.float32)
scale_flow_w = tf.cast(new_width, tf.float32) / tf.cast(
orig_width, tf.float32)
scale_flow = tf.stack([scale_flow_h, scale_flow_w])
# compute augmented flow (multiply by mask to zero invalid flow locations)
flow = ((positions + flow) * scale_flow - positions) * mask
if scale < 1.0:
# in case we downsample the image we crop the reference image to keep
# the same shape
flow = _center_crop(flow, new_height, new_width)
mask = _center_crop(mask, new_height, new_width)
return images, flow, mask
def false_fn(images, flow, mask):
return images, flow, mask
return tf.cond(perform_scale, lambda: true_fn(images, flow, mask),
lambda: false_fn(images, flow, mask))
def random_crop(images,
flow,
mask,
crop_height,
crop_width,
relative_offset,
probability_crop_offset):
"""Performs a random crop with the given height and width."""
# early return if crop_height or crop_width is not specified
if crop_height is None or crop_width is None:
return images, flow, mask
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
# check if crop size fits the image size
scale = 1.0
ratio = tf.cast(crop_height, tf.float32) / tf.cast(orig_height, tf.float32)
scale = tf.math.maximum(scale, ratio)
ratio = tf.cast(crop_width, tf.float32) / tf.cast(orig_width, tf.float32)
scale = tf.math.maximum(scale, ratio)
# compute minimum required hight
new_height = tf.cast(
tf.math.ceil(tf.cast(orig_height, tf.float32) * scale), tf.int32)
new_width = tf.cast(
tf.math.ceil(tf.cast(orig_width, tf.float32) * scale), tf.int32)
# perform resize (scales with 1 if not required)
images = smurf_utils.resize(images, new_height, new_width, is_flow=False)
# compute joint offset
max_offset_h = new_height - tf.cast(crop_height, dtype=tf.int32)
max_offset_w = new_width - tf.cast(crop_width, dtype=tf.int32)
joint_offset_h = tf.random.uniform([], maxval=max_offset_h+1, dtype=tf.int32)
joint_offset_w = tf.random.uniform([], maxval=max_offset_w+1, dtype=tf.int32)
# compute relative offset
min_relative_offset_h = tf.math.maximum(
joint_offset_h - relative_offset, 0)
max_relative_offset_h = tf.math.minimum(
joint_offset_h + relative_offset, max_offset_h)
min_relative_offset_w = tf.math.maximum(
joint_offset_w - relative_offset, 0)
max_relative_offset_w = tf.math.minimum(
joint_offset_w + relative_offset, max_offset_w)
relative_offset_h = tf.random.uniform(
[], minval=min_relative_offset_h, maxval=max_relative_offset_h+1,
dtype=tf.int32)
relative_offset_w = tf.random.uniform(
[], minval=min_relative_offset_w, maxval=max_relative_offset_w+1,
dtype=tf.int32)
set_crop_offset = tf.random.uniform([]) < probability_crop_offset
relative_offset_h = tf.cond(
set_crop_offset, lambda: relative_offset_h, lambda: joint_offset_h)
relative_offset_w = tf.cond(
set_crop_offset, lambda: relative_offset_w, lambda: joint_offset_w)
# crop both images
image_1, image_2 = tf.unstack(images)
image_1 = tf.image.crop_to_bounding_box(
image_1, offset_height=joint_offset_h, offset_width=joint_offset_w,
target_height=crop_height, target_width=crop_width)
image_2 = tf.image.crop_to_bounding_box(
image_2, offset_height=relative_offset_h, offset_width=relative_offset_w,
target_height=crop_height, target_width=crop_width)
images = tf.stack([image_1, image_2])
if flow is not None:
# perform resize (scales with 1 if not required)
flow, mask = smurf_utils.resize(
flow, new_height, new_width, is_flow=True, mask=mask)
# crop flow and mask
flow = tf.image.crop_to_bounding_box(
flow,
offset_height=joint_offset_h,
offset_width=joint_offset_w,
target_height=crop_height,
target_width=crop_width)
mask = tf.image.crop_to_bounding_box(
mask,
offset_height=joint_offset_h,
offset_width=joint_offset_w,
target_height=crop_height,
target_width=crop_width)
# correct flow for relative shift (/crop)
flow_delta = tf.stack(
[tf.cast(relative_offset_h - joint_offset_h, tf.float32),
tf.cast(relative_offset_w - joint_offset_w, tf.float32)])
flow = (flow - flow_delta) * mask
return images, flow, mask, joint_offset_h, joint_offset_w
def random_rotation(images,
flow,
mask,
probability,
max_rotation,
not_empty_crop = True):
"""Performs a random rotation with the specified maximum rotation."""
perform_rotation = tf.random.uniform([]) < probability
def true_fn(images, flow, mask):
angle_radian = tf.random.uniform(
[], minval=-max_rotation, maxval=max_rotation,
dtype=tf.float32) * math.pi / 180.0
images = rotate(images, angle_radian, is_flow=False, mask=None)
if not_empty_crop:
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
# introduce abbreviations for shorter notation
cos = tf.math.cos(angle_radian % math.pi)
sin = tf.math.sin(angle_radian % math.pi)
h = tf.cast(orig_height, tf.float32)
w = tf.cast(orig_width, tf.float32)
# compute required scale factor
scale = tf.cond(tf.math.less(angle_radian % math.pi, math.pi/2.0),
lambda: tf.math.maximum((w/h)*sin+cos, (h/w)*sin+cos),
lambda: tf.math.maximum((w/h)*sin-cos, (h/w)*sin-cos))
new_height = tf.math.floor(h / scale)
new_width = tf.math.floor(w / scale)
# crop image again to original size
offset_height = tf.cast((h - new_height) / 2, tf.int32)
offset_width = tf.cast((w - new_width) / 2, tf.int32)
images = tf.image.crop_to_bounding_box(
images,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
if flow is not None:
flow, mask = rotate(flow, angle_radian, is_flow=True, mask=mask)
if not_empty_crop:
# crop flow and mask again to original size
flow = tf.image.crop_to_bounding_box(
flow,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
mask = tf.image.crop_to_bounding_box(
mask,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
return images, flow, mask
def false_fn(images, flow, mask):
return images, flow, mask
return tf.cond(perform_rotation, lambda: true_fn(images, flow, mask),
lambda: false_fn(images, flow, mask))
def random_rotation_second(images,
flow,
mask,
probability,
max_rotation,
not_empty_crop=True):
"""Performs a random rotation on only the second image."""
perform_rotation = tf.random.uniform([]) < probability
def true_fn(images, flow, mask):
angle_radian = tf.random.uniform(
[], minval=-max_rotation, maxval=max_rotation,
dtype=tf.float32) * math.pi / 180.0
image_1, image_2 = tf.unstack(images)
image_2 = rotate(image_2, angle_radian, is_flow=False, mask=None)
images = tf.stack([image_1, image_2])
if not_empty_crop:
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
# introduce abbreviations for shorter notation
cos = tf.math.cos(angle_radian % math.pi)
sin = tf.math.sin(angle_radian % math.pi)
h = tf.cast(orig_height, tf.float32)
w = tf.cast(orig_width, tf.float32)
# compute required scale factor
scale = tf.cond(tf.math.less(angle_radian % math.pi, math.pi/2.0),
lambda: tf.math.maximum((w/h)*sin+cos, (h/w)*sin+cos),
lambda: tf.math.maximum((w/h)*sin-cos, (h/w)*sin-cos))
new_height = tf.math.floor(h / scale)
new_width = tf.math.floor(w / scale)
# crop image again to original size
offset_height = tf.cast((h-new_height)/2, tf.int32)
offset_width = tf.cast((w-new_width)/2, tf.int32)
images = tf.image.crop_to_bounding_box(
images,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
if flow is not None:
# get current locations (with the origin in the image center)
positions = _positions_center_origin(orig_height, orig_width)
# compute augmented flow (multiply by mask to zero invalid flow locations)
cos = tf.math.cos(angle_radian)
sin = tf.math.sin(angle_radian)
rotation_matrix = tf.reshape([cos, sin, -sin, cos], [2, 2])
flow = (tf.linalg.matmul(
(positions + flow), rotation_matrix) - positions) * mask
if not_empty_crop:
# crop flow and mask again to original size
flow = tf.image.crop_to_bounding_box(
flow,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
mask = tf.image.crop_to_bounding_box(
mask,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
return images, flow, mask
def false_fn(images, flow, mask):
return images, flow, mask
return tf.cond(perform_rotation, lambda: true_fn(images, flow, mask),
lambda: false_fn(images, flow, mask))
def random_color_swap(images, probability):
"""Randomly permute colors (rolling and reversing covers all permutations)."""
perform_color_swap = tf.random.uniform([]) < probability
def true_fn(images):
r = tf.random.uniform([], maxval=3, dtype=tf.int32)
images = tf.roll(images, r, axis=-1)
r = tf.equal(tf.random.uniform([], maxval=2, dtype=tf.int32), 1)
return tf.reverse(images, axis=[-1])
def false_fn(images):
return images
return tf.cond(perform_color_swap,
lambda: true_fn(images),
lambda: false_fn(images))
def random_hue_shift(images,
probability,
max_delta):
perform_hue_shift = tf.random.uniform([]) < probability
return tf.cond(perform_hue_shift,
lambda: tf.image.random_hue(images, max_delta), lambda: images)
def random_saturation(images,
probability,
min_bound,
max_bound):
perform_saturation = tf.random.uniform([]) < probability
return tf.cond(
perform_saturation,
lambda: tf.image.random_saturation(images, min_bound, max_bound),
lambda: images)
def random_brightness(images,
probability,
max_delta):
perform_brightness = tf.random.uniform([]) < probability
return tf.cond(
perform_brightness,
lambda: tf.image.random_brightness(images, max_delta),
lambda: images)
def random_contrast(images,
probability,
min_bound,
max_bound):
perform_contrast = tf.random.uniform([]) < probability
return tf.cond(
perform_contrast,
lambda: tf.image.random_contrast(images, min_bound, max_bound),
lambda: images)
def random_contrast_individual(images,
probability,
min_bound,
max_bound):
perform_augmentation = tf.random.uniform([]) < probability
def true_fn(images):
image_1, image_2 = tf.unstack(images)
image_1 = tf.image.random_contrast(image_1, min_bound, max_bound)
image_2 = tf.image.random_contrast(image_2, min_bound, max_bound)
return tf.stack([image_1, image_2])
def false_fn(images):
return images
return tf.cond(perform_augmentation,
lambda: true_fn(images),
lambda: false_fn(images))
def random_brightness_individual(images,
probability,
max_delta):
perform_augmentation = tf.random.uniform([]) < probability
def true_fn(images):
image_1, image_2 = tf.unstack(images)
image_1 = tf.image.random_brightness(image_1, max_delta)
image_2 = tf.image.random_brightness(image_2, max_delta)
return tf.stack([image_1, image_2])
def false_fn(images):
return images
return tf.cond(perform_augmentation,
lambda: true_fn(images),
lambda: false_fn(images))
def random_gaussian_noise(images,
probability,
min_bound,
max_bound):
"""Augments images by adding gaussian noise."""
perform_gaussian_noise = tf.random.uniform([]) < probability
def true_fn(images):
sigma = tf.random.uniform([],
minval=min_bound,
maxval=max_bound,
dtype=tf.float32)
noise = tf.random.normal(
tf.shape(input=images), stddev=sigma, dtype=tf.float32)
images = images + noise
def false_fn(images):
return images
return tf.cond(perform_gaussian_noise,
lambda: true_fn(images),
lambda: false_fn(images))
def random_eraser(images,
min_size,
max_size,
probability,
max_operations,
probability_additional_operations,
augment_entire_batch = False):
"""Earses a random rectangle shaped areas in the second image or image batch.
Args:
images: Stacked image pair that should be augmented with shape
[2, height, width, 3] or a batch of images that should be augmented with
shape [batch, height, width, 3].
min_size: Minimum size of erased rectangle.
max_size: Maximum size of erased rectangle.
probability: Probability of applying this augementation function.
max_operations: Maximum number total areas that should be erased.
probability_additional_operations: Probability for each additional area to
be erased if augementation is applied.
augment_entire_batch: If true the input is treated as batch of images to
which the augmentation should be applid.
Returns:
Possibly augemented images.
"""
perform_erase = tf.less(tf.random.uniform([]), probability)
height = tf.shape(images)[-3]
width = tf.shape(images)[-2]
# Returns augemented images.
def true_fn(images):
if augment_entire_batch:
image_2 = images
mean_color = tf.reduce_mean(image_2, axis=[1, 2], keepdims=True)
print(mean_color.shape)
else:
image_1, image_2 = tf.unstack(images)
mean_color = tf.reduce_mean(image_2, axis=[0, 1], keepdims=True)
def body(var_img, mean_color):
x0 = tf.random.uniform([], 0, width, dtype=tf.int32)
y0 = tf.random.uniform([], 0, height, dtype=tf.int32)
dx = tf.random.uniform([], min_size, max_size, dtype=tf.int32)
dy = tf.random.uniform([], min_size, max_size, dtype=tf.int32)
x = tf.range(width)
x_mask = (x0 <= x) & (x < x0+dx)
y = tf.range(height)
y_mask = (y0 <= y) & (y < y0+dy)
mask = x_mask & y_mask[:, tf.newaxis]
mask = tf.cast(mask[:, :, tf.newaxis], image_2.dtype)
result = var_img * (1 - mask) + mean_color * mask
return result
# Perform at least one erase operation.
image_2 = body(image_2, mean_color)
# Perform additional erase operations.
for _ in range(max_operations - 1):
perform_erase = tf.less(
tf.random.uniform([]), probability_additional_operations)
image_2 = tf.cond(perform_erase, lambda: body(image_2, mean_color),
lambda: image_2)
if augment_entire_batch:
images = image_2
else:
images = tf.stack([image_1, image_2])
return images
# Returns unaugmented images.
def false_fn(images):
return images
return tf.cond(perform_erase,
lambda: true_fn(images),
lambda: false_fn(images))
def build_selfsup_transformations(num_flow_levels=3,
crop_height=0,
crop_width=0,
resize=True):
"""Apply augmentations to a list of student images."""
def transform(images, is_flow, crop_height, crop_width, resize):
height = images.shape[-3]
width = images.shape[-2]
op5 = tf.compat.v1.assert_greater(
height,
2 * crop_height,
message='Image height is too small for cropping.')
op6 = tf.compat.v1.assert_greater(
width, 2 * crop_width, message='Image width is too small for cropping.')
with tf.control_dependencies([op5, op6]):
images = images[:, crop_height:height - crop_height,
crop_width:width - crop_width, :]
if resize:
images = smurf_utils.resize(images, height, width, is_flow=is_flow)
images.set_shape((images.shape[0], height, width, images.shape[3]))
else:
images.set_shape((images.shape[0], height - 2 * crop_height,
width - 2 * crop_width, images.shape[3]))
return images
max_divisor = 2**(num_flow_levels - 1)
assert crop_height % max_divisor == 0
assert crop_width % max_divisor == 0
# Compute random shifts for different images in a sequence.
return partial(
transform,
crop_height=crop_height,
crop_width=crop_width,
resize=resize)
|
google-research/google-research
|
smurf/smurf_augmentation.py
|
Python
|
apache-2.0
| 44,282
|
# -*- coding: utf 8 -*-
from __future__ import division
import tarfile
import os
from scipy.io import wavfile
import numpy as np
import tables
import numbers
import random
import string
import fnmatch
import theano
from lxml import etree
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib2 as urllib
def get_dataset_dir(dataset_name, data_dir=None, folder=None, create_dir=True):
if not data_dir:
data_dir = os.getenv("MINET_DATA", os.path.join(
os.path.expanduser("~"), "minet_data"))
if folder is None:
data_dir = os.path.join(data_dir, dataset_name)
else:
data_dir = os.path.join(data_dir, folder)
if not os.path.exists(data_dir) and create_dir:
os.makedirs(data_dir)
return data_dir
def download(url, server_fname, local_fname=None, progress_update_percentage=5):
"""
An internet download utility modified from
http://stackoverflow.com/questions/22676/
how-do-i-download-a-file-over-http-using-python/22776#22776
"""
u = urllib.urlopen(url)
if local_fname is None:
local_fname = server_fname
full_path = local_fname
meta = u.info()
with open(full_path, 'wb') as f:
try:
file_size = int(meta.get("Content-Length"))
except TypeError:
print("WARNING: Cannot get file size, displaying bytes instead!")
file_size = 100
print("Downloading: %s Bytes: %s" % (server_fname, file_size))
file_size_dl = 0
block_sz = int(1E7)
p = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if (file_size_dl * 100. / file_size) > p:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl *
100. / file_size)
print(status)
p += progress_update_percentage
def check_fetch_iamondb():
partial_path = get_dataset_dir("iamondb")
if not os.path.exists(partial_path):
os.makedirs(partial_path)
strokes_path = os.path.join(partial_path, "lineStrokes")
ascii_path = os.path.join(partial_path, "ascii")
if not os.path.exists(strokes_path) or not os.path.exists(ascii_path):
raise ValueError("You must download the data from IAMOnDB, and"
"unpack in %s" % partial_path)
return strokes_path, ascii_path
def plot_scatter_iamondb_example(X, y=None):
import matplotlib.pyplot as plt
rgba_colors = np.zeros((len(X), 4))
normed = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
# for red the first column needs to be one
rgba_colors[:, 0] = normed[:, 0]
# for blue last color column needs to be one
rgba_colors[:, 2] = np.abs(1 - normed[:, 0])
# the fourth column needs to be alphas
rgba_colors[:, 3] = np.ones((len(X),)) * .4 + .4 * normed[:, 0]
if len(X[0]) == 3:
plt.scatter(X[:, 1], X[:, 2], color=rgba_colors)
elif len(X[0]) == 2:
plt.scatter(X[:, 0], X[:, 1], color=rgba_colors)
if y is not None:
plt.title(y)
def plot_lines_iamondb_example(X, y=None):
import matplotlib.pyplot as plt
val_index = np.where(X[:, 0] != 1)[0]
contiguous = np.where((val_index[1:] - val_index[:-1]) == 1)[0] + 1
non_contiguous = np.where((val_index[1:] - val_index[:-1]) != 1)[0] + 1
prev_nc = 0
for nc in val_index[non_contiguous]:
ind = ((prev_nc <= contiguous) & (contiguous < nc))[:-1]
prev_nc = nc
plt.plot(X[val_index[ind], 1], X[val_index[ind], 2])
plt.plot(X[prev_nc:, 1], X[prev_nc:, 2])
if y is not None:
plt.title(y)
# A trick for monkeypatching an instancemethod that when method is a
# c-extension? there must be a better way
class _textEArray(tables.EArray):
pass
class _handwritingEArray(tables.EArray):
pass
def fetch_iamondb():
strokes_path, ascii_path = check_fetch_iamondb()
stroke_matches = []
for root, dirnames, filenames in os.walk(strokes_path):
for filename in fnmatch.filter(filenames, '*.xml'):
stroke_matches.append(os.path.join(root, filename))
ascii_matches = []
for root, dirnames, filenames in os.walk(ascii_path):
for filename in fnmatch.filter(filenames, '*.txt'):
ascii_matches.append(os.path.join(root, filename))
partial_path = get_dataset_dir("iamondb")
hdf5_path = os.path.join(partial_path, "iamondb.h5")
if not os.path.exists(hdf5_path):
# setup tables
compression_filter = tables.Filters(complevel=5, complib='blosc')
hdf5_file = tables.openFile(hdf5_path, mode='w')
handwriting = hdf5_file.createEArray(hdf5_file.root, 'handwriting',
tables.Int32Atom(),
shape=(0, 3),
filters=compression_filter,
expectedrows=len(ascii_matches))
handwriting_poslen = hdf5_file.createEArray(hdf5_file.root,
'handwriting_poslen',
tables.Int32Atom(),
shape=(0, 2),
filters=compression_filter,
expectedrows=len(
ascii_matches))
text = hdf5_file.createEArray(hdf5_file.root, 'text',
tables.Int32Atom(),
shape=(0, 1),
filters=compression_filter,
expectedrows=len(ascii_matches))
text_poslen = hdf5_file.createEArray(hdf5_file.root, 'text_poslen',
tables.Int32Atom(),
shape=(0, 2),
filters=compression_filter,
expectedrows=len(ascii_matches))
current_text_pos = 0
current_handwriting_pos = 0
for na, ascii_file in enumerate(ascii_matches):
if na % 100 == 0:
print("Reading ascii file %i of %i" % (na, len(ascii_matches)))
with open(ascii_file) as fp:
cleaned = [t.strip() for t in fp.readlines()
if 'OCR' not in t
and 'CSR' not in t
and t != '\r\n'
and t != '\n']
# Find correspnding XML file for ascii file
file_id = ascii_file.split(os.sep)[-2]
submatches = [sf for sf in stroke_matches if file_id in sf]
# Sort by file number
submatches = sorted(submatches,
key=lambda x: int(
x.split(os.sep)[-1].split(
"-")[-1][:-4]))
# Skip files where ascii length and number of XML don't match
# TODO: Figure out why this is happening
if len(cleaned) != len(submatches):
continue
for n, stroke_file in enumerate(submatches):
with open(stroke_file) as fp:
tree = etree.parse(fp)
root = tree.getroot()
# Get all the values from the XML
# 0th index is stroke ID, will become up/down
s = np.array([[i, int(Point.attrib['x']),
int(Point.attrib['y'])]
for StrokeSet in root
for i, Stroke in enumerate(StrokeSet)
for Point in Stroke])
# flip y axis
s[:, 2] = -s[:, 2]
# Get end of stroke points
c = s[1:, 0] != s[:-1, 0]
ci = np.where(c == True)[0]
nci = np.where(c == False)[0]
# set pen down
s[0, 0] = 0
s[nci, 0] = 0
# set pen up
s[ci, 0] = 1
s[-1, 0] = 1
lh = len(s)
for i in range(lh):
handwriting.append(s[i][None])
handwriting_poslen.append(
np.array([current_handwriting_pos, lh])[None])
current_handwriting_pos += lh
lt = len(cleaned[n])
for i in range(lt):
text.append(
np.array(ord(cleaned[n][i]))[None, None])
text_poslen.append(
np.array([current_text_pos, lt])[None])
current_text_pos += lt
hdf5_file.close()
hdf5_file = tables.openFile(hdf5_path, mode='r')
handwriting = hdf5_file.root.handwriting
handwriting_poslen = hdf5_file.root.handwriting_poslen
text = hdf5_file.root.text
text_poslen = hdf5_file.root.text_poslen
# Monkeypatch text
# A dirty hack to only monkeypatch text
text.__class__ = _textEArray
def text_getter(self, key):
if isinstance(key, numbers.Integral) or isinstance(key, np.integer):
p, l = text_poslen[key]
return "".join(map(chr, self.read(p, p+l, 1)))
elif isinstance(key, slice):
start, stop, step = self._processRange(key.start, key.stop,
key.step)
if key.stop is None:
stop = len(text_poslen)
if key.start is None:
start = 0
if stop <= start:
# replicate slice where stop <= start
return []
if stop >= len(text_poslen):
stop = len(text_poslen)
elif key.stop < 0 and key.stop is not None:
stop = len(text_poslen) + key.stop
if key.start < 0 and key.start is not None:
start = len(text_poslen) + key.start
return ["".join(map(chr, self.read(text_poslen[k][0],
sum(text_poslen[k]), 1)))
for k in range(start, stop, step)]
# Patch __getitem__ in custom subclass, applying to all instances of it
_textEArray.__getitem__ = text_getter
# Monkeypatch handwriting
# A dirty hack to only monkeypatch handwriting
handwriting.__class__ = _handwritingEArray
def handwriting_getter(self, key):
if isinstance(key, numbers.Integral) or isinstance(key, np.integer):
p, l = handwriting_poslen[key]
return self.read(p, p+l, 1).astype('float32')
elif isinstance(key, slice):
start, stop, step = self._processRange(key.start, key.stop,
key.step)
if key.stop is None:
stop = len(text_poslen)
if key.start is None:
start = 0
if stop <= start:
# replicate slice where stop <= start
return []
if stop >= len(text_poslen):
stop = len(text_poslen)
elif key.stop < 0 and key.stop is not None:
stop = len(text_poslen) + key.stop
if key.start < 0 and key.start is not None:
start = len(text_poslen) + key.start
return [self.read(handwriting_poslen[k][0],
sum(handwriting_poslen[k]), 1).astype('float32')
for k in range(start, stop, step)]
# Patch __getitem__ in custom subclass, applying to all instances of it
_handwritingEArray.__getitem__ = handwriting_getter
X = handwriting
y = text
return (X, y)
"""
def load_fruitspeech():
# Check if dataset is in the data directory.
data_path = os.path.join(os.path.split(__file__)[0], "data")
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset = 'audio.tar.gz'
data_file = os.path.join(data_path, dataset)
if os.path.isfile(data_file):
dataset = data_file
if not os.path.isfile(data_file):
try:
import urllib
urllib.urlretrieve('http://google.com')
url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
except AttributeError:
import urllib.request as urllib
url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
print('Downloading data from %s' % url)
urllib.urlretrieve(url, data_file)
print('... loading data')
if not os.path.exists(os.path.join(data_path, "audio")):
tar = tarfile.open(data_file)
os.chdir(data_path)
tar.extractall()
tar.close()
h5_file_path = os.path.join(data_path, "saved_fruit.h5")
if not os.path.exists(h5_file_path):
data_path = os.path.join(data_path, "audio")
audio_matches = []
for root, dirnames, filenames in os.walk(data_path):
for filename in fnmatch.filter(filenames, '*.wav'):
audio_matches.append(os.path.join(root, filename))
random.seed(1999)
random.shuffle(audio_matches)
# http://mail.scipy.org/pipermail/numpy-discussion/2011-March/055219.html
h5_file = tables.openFile(h5_file_path, mode='w')
data_x = h5_file.createVLArray(h5_file.root, 'data_x',
tables.Float32Atom(shape=()),
filters=tables.Filters(1))
data_x_shapes = h5_file.createVLArray(h5_file.root, 'data_x_shapes',
tables.Int32Atom(shape=()),
filters=tables.Filters(1))
data_y = h5_file.createVLArray(h5_file.root, 'data_y',
tables.Int32Atom(shape=()),
filters=tables.Filters(1))
for wav_path in audio_matches:
# Convert chars to int classes
word = wav_path.split(os.sep)[-1][:-6]
chars = [ord(c) - 97 for c in word]
data_y.append(np.array(chars, dtype='int32'))
fs, d = wavfile.read(wav_path)
# Preprocessing from A. Graves "Towards End-to-End Speech
# Recognition"
Pxx, _, _, _ = specgram(d, NFFT=256, noverlap=128)
data_x_shapes.append(np.array(Pxx.T.shape, dtype='int32'))
data_x.append(Pxx.T.astype('float32').flatten())
h5_file.close()
h5_file = tables.openFile(h5_file_path, mode='r')
data_x = h5_file.root.data_x
data_x_shapes = h5_file.root.data_x_shapes
data_y = h5_file.root.data_y
# A dirty hack to only monkeypatch data_x
data_x.__class__ = _cVLArray
# override getter so that it gets reshaped to 2D when fetched
old_getter = data_x.__getitem__
def getter(self, key):
if isinstance(key, numbers.Integral) or isinstance(key, np.integer):
return old_getter(key).reshape(data_x_shapes[key]).astype(
theano.config.floatX)
elif isinstance(key, slice):
start, stop, step = self._processRange(key.start, key.stop,
key.step)
return [o.reshape(s) for o, s in zip(
self.read(start, stop, step), data_x_shapes[slice(
start, stop, step)])]
# Patch __getitem__ in custom subclass, applying to all instances of it
_cVLArray.__getitem__ = getter
train_x = data_x[:80]
train_y = data_y[:80]
valid_x = data_x[80:90]
valid_y = data_y[80:90]
test_x = data_x[90:]
test_y = data_y[90:]
rval = [(train_x, train_y), (valid_x, valid_y), (test_x, test_y)]
return rval
def load_cmuarctic():
# Check if dataset is in the data directory.
data_path = os.path.join(os.path.split(__file__)[0], "data")
if not os.path.exists(data_path):
os.makedirs(data_path)
urls = ['http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_awb_arctic-0.95-release.tar.bz2',
'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_bdl_arctic-0.95-release.tar.bz2',
'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_clb_arctic-0.95-release.tar.bz2',
'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_jmk_arctic-0.95-release.tar.bz2',
'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_ksp_arctic-0.95-release.tar.bz2',
'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_rms_arctic-0.95-release.tar.bz2',
'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_slt_arctic-0.95-release.tar.bz2',
]
data_files = []
for url in urls:
dataset = url.split('/')[-1]
data_file = os.path.join(data_path, dataset)
data_files.append(data_file)
if os.path.isfile(data_file):
dataset = data_file
if not os.path.isfile(data_file):
try:
import urllib
urllib.urlretrieve('http://google.com')
except AttributeError:
import urllib.request as urllib
print('Downloading data from %s' % url)
urllib.urlretrieve(url, data_file)
print('... loading data')
folder_paths = []
for data_file in data_files:
folder_name = data_file.split(os.sep)[-1].split("-")[0]
folder_path = os.path.join(data_path, folder_name)
folder_paths.append(folder_path)
if not os.path.exists(folder_path):
tar = tarfile.open(data_file)
os.chdir(data_path)
tar.extractall()
tar.close()
h5_file_path = os.path.join(data_path, "saved_cmu.h5")
if not os.path.exists(h5_file_path):
# http://mail.scipy.org/pipermail/numpy-discussion/2011-March/055219.html
h5_file = tables.openFile(h5_file_path, mode='w')
data_x = h5_file.createVLArray(h5_file.root, 'data_x',
tables.Float32Atom(shape=()),
filters=tables.Filters(1))
data_x_shapes = h5_file.createVLArray(h5_file.root, 'data_x_shapes',
tables.Int32Atom(shape=()),
filters=tables.Filters(1))
data_y = h5_file.createVLArray(h5_file.root, 'data_y',
tables.Int32Atom(shape=()),
filters=tables.Filters(1))
data_meta = h5_file.createVLArray(h5_file.root, 'data_meta',
tables.StringAtom(200),
filters=tables.Filters(1))
for folder_path in folder_paths:
audio_matches = []
for root, dirnames, filenames in os.walk(folder_path):
for filename in fnmatch.filter(filenames, '*.wav'):
audio_matches.append(os.path.join(root, filename))
f = open(os.path.join(folder_path, "etc", "txt.done.data"))
read_raw_text = f.readlines()
f.close()
# Remove all punctuations
list_text = [t.strip().lower().translate(
string.maketrans("", ""), string.punctuation).split(" ")[1:-1]
for t in read_raw_text]
# Get rid of numbers, even though it will probably hurt
# recognition on certain examples
cleaned_lookup = {lt[0]: " ".join(lt[1:]).translate(
None, string.digits).strip() for lt in list_text}
data_meta.append(folder_path.split(os.sep)[-1])
for wav_path in audio_matches:
lookup_key = wav_path.split(os.sep)[-1][:-4]
# Some files aren't consistent!
if "_" in cleaned_lookup.keys()[0] and "_" not in lookup_key:
# Needs an _ to match text format... sometimes!
lookup_key = lookup_key[:6] + "_" + lookup_key[6:]
elif "_" not in cleaned_lookup.keys()[0]:
lookup_key = lookup_key.translate(None, "_")
try:
words = cleaned_lookup[lookup_key]
# Convert chars to int classes
chars = [ord(c) - 97 for c in words]
# Make spaces last class
chars = [c if c >= 0 else 26 for c in chars]
data_y.append(np.array(chars, dtype='int32'))
# Convert chars to int classes
fs, d = wavfile.read(wav_path)
# Preprocessing from A. Graves "Towards End-to-End Speech
# Recognition"
Pxx, _, _, _ = plt.specgram(d, NFFT=256, noverlap=128)
data_x_shapes.append(np.array(Pxx.T.shape, dtype='int32'))
data_x.append(Pxx.T.astype('float32').flatten())
except KeyError:
# Necessary because some labels are missing in some folders
print("Skipping %s due to missing key" % wav_path)
h5_file.close()
h5_file = tables.openFile(h5_file_path, mode='r')
data_x = h5_file.root.data_x
data_x_shapes = h5_file.root.data_x_shapes
data_y = h5_file.root.data_y
# A dirty hack to only monkeypatch data_x
data_x.__class__ = _cVLArray
# override getter so that it gets reshaped to 2D when fetched
old_getter = data_x.__getitem__
def getter(self, key):
if isinstance(key, numbers.Integral) or isinstance(key, np.integer):
return old_getter(key).reshape(data_x_shapes[key]).astype(
theano.config.floatX)
elif isinstance(key, slice):
start, stop, step = self._processRange(key.start, key.stop,
key.step)
return [o.reshape(s) for o, s in zip(
self.read(start, stop, step), data_x_shapes[slice(
start, stop, step)])]
# Patch __getitem__ in custom subclass, applying to all instances of it
_cVLArray.__getitem__ = getter
train_x = data_x[:6000]
train_y = data_y[:6000]
valid_x = data_x[6000:7500]
valid_y = data_y[6000:7500]
test_x = data_x[7500:]
test_y = data_y[7500:]
rval = [(train_x, train_y), (valid_x, valid_y), (test_x, test_y)]
return rval
def load_librispeech():
# Check if dataset is in the data directory.
data_path = os.path.join(os.path.split(__file__)[0], "data")
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset = 'dev-clean.tar.gz'
data_file = os.path.join(data_path, dataset)
if os.path.isfile(data_file):
dataset = data_file
if not os.path.isfile(data_file):
try:
import urllib
urllib.urlretrieve('http://google.com')
url = 'http://www.openslr.org/resources/12/dev-clean.tar.gz'
except AttributeError:
import urllib.request as urllib
url = 'http://www.openslr.org/resources/12/dev-clean.tar.gz'
print('Downloading data from %s' % url)
urllib.urlretrieve(url, data_file)
print('... loading data')
if not os.path.exists(os.path.join(data_path, "LibriSpeech", "dev-clean")):
tar = tarfile.open(data_file)
os.chdir(data_path)
tar.extractall()
tar.close()
h5_file_path = os.path.join(data_path, "saved_libri.h5")
if not os.path.exists(h5_file_path):
data_path = os.path.join(data_path, "LibriSpeech", "dev-clean")
audio_matches = []
for root, dirnames, filenames in os.walk(data_path):
for filename in fnmatch.filter(filenames, '*.flac'):
audio_matches.append(os.path.join(root, filename))
text_matches = []
for root, dirnames, filenames in os.walk(data_path):
for filename in fnmatch.filter(filenames, '*.txt'):
text_matches.append(os.path.join(root, filename))
# http://mail.scipy.org/pipermail/numpy-discussion/2011-March/055219.html
h5_file = tables.openFile(h5_file_path, mode='w')
data_x = h5_file.createVLArray(h5_file.root, 'data_x',
tables.Float32Atom(shape=()),
filters=tables.Filters(1))
data_x_shapes = h5_file.createVLArray(h5_file.root, 'data_x_shapes',
tables.Int32Atom(shape=()),
filters=tables.Filters(1))
data_y = h5_file.createVLArray(h5_file.root, 'data_y',
tables.Int32Atom(shape=()),
filters=tables.Filters(1))
for full_t in text_matches:
f = open(full_t, 'r')
for line in f.readlines():
word_splits = line.strip().split(" ")
file_tag = word_splits[0]
words = word_splits[1:]
# Convert chars to int classes
chars = [ord(c) - 97 for c in (" ").join(words).lower()]
# Make spaces last class
chars = [c if c >= 0 else 26 for c in chars]
data_y.append(np.array(chars, dtype='int32'))
audio_path = [a for a in audio_matches if file_tag in a]
if len(audio_path) != 1:
raise ValueError("More than one match for"
"tag %s!" % file_tag)
if not os.path.exists(audio_path[0][:-5] + ".wav"):
r = os.system("ffmpeg -i %s %s.wav" % (audio_path[0],
audio_path[0][:-5]))
if r:
raise ValueError("A problem occured converting flac to"
"wav, make sure ffmpeg is installed")
wav_path = audio_path[0][:-5] + '.wav'
fs, d = wavfile.read(wav_path)
# Preprocessing from A. Graves "Towards End-to-End Speech
# Recognition"
Pxx, _, _, _ = plt.specgram(d, NFFT=256, noverlap=128)
data_x_shapes.append(np.array(Pxx.T.shape, dtype='int32'))
data_x.append(Pxx.T.astype('float32').flatten())
f.close()
h5_file.close()
h5_file_path = os.path.join(data_path, "saved_libri.h5")
h5_file = tables.openFile(h5_file_path, mode='r')
data_x = h5_file.root.data_x
data_x_shapes = h5_file.root.data_x_shapes
data_y = h5_file.root.data_y
# A dirty hack to only monkeypatch data_x
data_x.__class__ = _cVLArray
# override getter so that it gets reshaped to 2D when fetched
old_getter = data_x.__getitem__
def getter(self, key):
if isinstance(key, numbers.Integral) or isinstance(key, np.integer):
return old_getter(key).reshape(data_x_shapes[key]).astype(
theano.config.floatX)
elif isinstance(key, slice):
start, stop, step = self._processRange(key.start, key.stop,
key.step)
return [o.reshape(s) for o, s in zip(
self.read(start, stop, step), data_x_shapes[slice(
start, stop, step)])]
# Patch __getitem__ in custom subclass, applying to all instances of it
_cVLArray.__getitem__ = getter
train_x = data_x[:2000]
train_y = data_y[:2000]
valid_x = data_x[2000:2500]
valid_y = data_y[2000:2500]
test_x = data_x[2500:]
test_y = data_y[2500:]
rval = [(train_x, train_y), (valid_x, valid_y), (test_x, test_y)]
return rval
"""
|
kastnerkyle/minet
|
minet/datasets.py
|
Python
|
bsd-3-clause
| 28,453
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import pytest
import numpy as np
import pyarrow as pa
def test_tensor_attrs():
data = np.random.randn(10, 4)
tensor = pa.Tensor.from_numpy(data)
assert tensor.ndim == 2
assert tensor.size == 40
assert tensor.shape == data.shape
assert tensor.strides == data.strides
assert tensor.is_contiguous
assert tensor.is_mutable
# not writeable
data2 = data.copy()
data2.flags.writeable = False
tensor = pa.Tensor.from_numpy(data2)
assert not tensor.is_mutable
def test_tensor_base_object():
tensor = pa.Tensor.from_numpy(np.random.randn(10, 4))
n = sys.getrefcount(tensor)
array = tensor.to_numpy() # noqa
assert sys.getrefcount(tensor) == n + 1
@pytest.mark.parametrize('dtype_str,arrow_type', [
('i1', pa.int8()),
('i2', pa.int16()),
('i4', pa.int32()),
('i8', pa.int64()),
('u1', pa.uint8()),
('u2', pa.uint16()),
('u4', pa.uint32()),
('u8', pa.uint64()),
('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())
])
def test_tensor_numpy_roundtrip(dtype_str, arrow_type):
dtype = np.dtype(dtype_str)
data = (100 * np.random.randn(10, 4)).astype(dtype)
tensor = pa.Tensor.from_numpy(data)
assert tensor.type == arrow_type
repr(tensor)
result = tensor.to_numpy()
assert (data == result).all()
def _try_delete(path):
import gc
gc.collect()
try:
os.remove(path)
except os.error:
pass
def test_tensor_ipc_roundtrip(tmpdir):
data = np.random.randn(10, 4)
tensor = pa.Tensor.from_numpy(data)
path = os.path.join(str(tmpdir), 'pyarrow-tensor-ipc-roundtrip')
mmap = pa.create_memory_map(path, 1024)
pa.write_tensor(tensor, mmap)
mmap.seek(0)
result = pa.read_tensor(mmap)
assert result.equals(tensor)
def test_tensor_ipc_strided(tmpdir):
data1 = np.random.randn(10, 4)
tensor1 = pa.Tensor.from_numpy(data1[::2])
data2 = np.random.randn(10, 6, 4)
tensor2 = pa.Tensor.from_numpy(data2[::, ::2, ::])
path = os.path.join(str(tmpdir), 'pyarrow-tensor-ipc-strided')
mmap = pa.create_memory_map(path, 2048)
for tensor in [tensor1, tensor2]:
mmap.seek(0)
pa.write_tensor(tensor, mmap)
mmap.seek(0)
result = pa.read_tensor(mmap)
assert result.equals(tensor)
def test_tensor_equals():
def eq(a, b):
assert a.equals(b)
assert a == b
assert not (a != b)
def ne(a, b):
assert not a.equals(b)
assert not (a == b)
assert a != b
data = np.random.randn(10, 6, 4)[::, ::2, ::]
tensor1 = pa.Tensor.from_numpy(data)
tensor2 = pa.Tensor.from_numpy(np.ascontiguousarray(data))
eq(tensor1, tensor2)
data = data.copy()
data[9, 0, 0] = 1.0
tensor2 = pa.Tensor.from_numpy(np.ascontiguousarray(data))
ne(tensor1, tensor2)
def test_tensor_hashing():
# Tensors are unhashable
with pytest.raises(TypeError, match="unhashable"):
hash(pa.Tensor.from_numpy(np.arange(10)))
def test_tensor_size():
data = np.random.randn(10, 4)
tensor = pa.Tensor.from_numpy(data)
assert pa.get_tensor_size(tensor) > (data.size * 8)
def test_read_tensor(tmpdir):
# Create and write tensor tensor
data = np.random.randn(10, 4)
tensor = pa.Tensor.from_numpy(data)
data_size = pa.get_tensor_size(tensor)
path = os.path.join(str(tmpdir), 'pyarrow-tensor-ipc-read-tensor')
write_mmap = pa.create_memory_map(path, data_size)
pa.write_tensor(tensor, write_mmap)
# Try to read tensor
read_mmap = pa.memory_map(path, mode='r')
array = pa.read_tensor(read_mmap).to_numpy()
np.testing.assert_equal(data, array)
|
yufeldman/arrow
|
python/pyarrow/tests/test_tensor.py
|
Python
|
apache-2.0
| 4,521
|
import gym
from gym.spaces import Box, Discrete
import numpy as np
from typing import Dict, List, Optional
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils import force_list
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.spaces.simplex import Simplex
from ray.rllib.utils.typing import ModelConfigDict, TensorType
torch, nn = try_import_torch()
class SACTorchModel(TorchModelV2, nn.Module):
"""Extension of the standard TorchModelV2 for SAC.
To customize, do one of the following:
- sub-class SACTorchModel and override one or more of its methods.
- Use SAC's `Q_model` and `policy_model` keys to tweak the default model
behaviors (e.g. fcnet_hiddens, conv_filters, etc..).
- Use SAC's `Q_model->custom_model` and `policy_model->custom_model` keys
to specify your own custom Q-model(s) and policy-models, which will be
created within this SACTFModel (see `build_policy_model` and
`build_q_model`.
Note: It is not recommended to override the `forward` method for SAC. This
would lead to shared weights (between policy and Q-nets), which will then
not be optimized by either of the critic- or actor-optimizers!
Data flow:
`obs` -> forward() (should stay a noop method!) -> `model_out`
`model_out` -> get_policy_output() -> pi(actions|obs)
`model_out`, `actions` -> get_q_values() -> Q(s, a)
`model_out`, `actions` -> get_twin_q_values() -> Q_twin(s, a)
"""
def __init__(self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: Optional[int],
model_config: ModelConfigDict,
name: str,
policy_model_config: ModelConfigDict = None,
q_model_config: ModelConfigDict = None,
twin_q: bool = False,
initial_alpha: float = 1.0,
target_entropy: Optional[float] = None):
"""Initializes a SACTorchModel instance.
7
Args:
policy_model_config (ModelConfigDict): The config dict for the
policy network.
q_model_config (ModelConfigDict): The config dict for the
Q-network(s) (2 if twin_q=True).
twin_q (bool): Build twin Q networks (Q-net and target) for more
stable Q-learning.
initial_alpha (float): The initial value for the to-be-optimized
alpha parameter (default: 1.0).
target_entropy (Optional[float]): A target entropy value for
the to-be-optimized alpha parameter. If None, will use the
defaults described in the papers for SAC (and discrete SAC).
Note that the core layers for forward() are not defined here, this
only defines the layers for the output heads. Those layers for
forward() should be defined in subclasses of SACModel.
"""
nn.Module.__init__(self)
super(SACTorchModel, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
if isinstance(action_space, Discrete):
self.action_dim = action_space.n
self.discrete = True
action_outs = q_outs = self.action_dim
elif isinstance(action_space, Box):
self.action_dim = np.product(action_space.shape)
self.discrete = False
action_outs = 2 * self.action_dim
q_outs = 1
else:
assert isinstance(action_space, Simplex)
self.action_dim = np.product(action_space.shape)
self.discrete = False
action_outs = self.action_dim
q_outs = 1
# Build the policy network.
self.action_model = self.build_policy_model(
self.obs_space, action_outs, policy_model_config, "policy_model")
# Build the Q-network(s).
self.q_net = self.build_q_model(self.obs_space, self.action_space,
q_outs, q_model_config, "q")
if twin_q:
self.twin_q_net = self.build_q_model(self.obs_space,
self.action_space, q_outs,
q_model_config, "twin_q")
else:
self.twin_q_net = None
log_alpha = nn.Parameter(
torch.from_numpy(np.array([np.log(initial_alpha)])).float())
self.register_parameter("log_alpha", log_alpha)
# Auto-calculate the target entropy.
if target_entropy is None or target_entropy == "auto":
# See hyperparams in [2] (README.md).
if self.discrete:
target_entropy = 0.98 * np.array(
-np.log(1.0 / action_space.n), dtype=np.float32)
# See [1] (README.md).
else:
target_entropy = -np.prod(action_space.shape)
self.target_entropy = torch.tensor(
data=[target_entropy], dtype=torch.float32, requires_grad=False)
@override(TorchModelV2)
def forward(self, input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType) -> (TensorType, List[TensorType]):
"""The common (Q-net and policy-net) forward pass.
NOTE: It is not(!) recommended to override this method as it would
introduce a shared pre-network, which would be updated by both
actor- and critic optimizers.
"""
return input_dict["obs"], state
def build_policy_model(self, obs_space, num_outputs, policy_model_config,
name):
"""Builds the policy model used by this SAC.
Override this method in a sub-class of SACTFModel to implement your
own policy net. Alternatively, simply set `custom_model` within the
top level SAC `policy_model` config key to make this default
implementation of `build_policy_model` use your custom policy network.
Returns:
TorchModelV2: The TorchModelV2 policy sub-model.
"""
model = ModelCatalog.get_model_v2(
obs_space,
self.action_space,
num_outputs,
policy_model_config,
framework="torch",
name=name)
return model
def build_q_model(self, obs_space, action_space, num_outputs,
q_model_config, name):
"""Builds one of the (twin) Q-nets used by this SAC.
Override this method in a sub-class of SACTFModel to implement your
own Q-nets. Alternatively, simply set `custom_model` within the
top level SAC `Q_model` config key to make this default implementation
of `build_q_model` use your custom Q-nets.
Returns:
TorchModelV2: The TorchModelV2 Q-net sub-model.
"""
self.concat_obs_and_actions = False
if self.discrete:
input_space = obs_space
else:
orig_space = getattr(obs_space, "original_space", obs_space)
if isinstance(orig_space, Box) and len(orig_space.shape) == 1:
input_space = Box(
float("-inf"),
float("inf"),
shape=(orig_space.shape[0] + action_space.shape[0], ))
self.concat_obs_and_actions = True
else:
if isinstance(orig_space, gym.spaces.Tuple):
spaces = orig_space.spaces
elif isinstance(orig_space, gym.spaces.Dict):
spaces = list(orig_space.spaces.values())
else:
spaces = [obs_space]
input_space = gym.spaces.Tuple(spaces + [action_space])
model = ModelCatalog.get_model_v2(
input_space,
action_space,
num_outputs,
q_model_config,
framework="torch",
name=name)
return model
def get_q_values(self,
model_out: TensorType,
actions: Optional[TensorType] = None) -> TensorType:
"""Returns Q-values, given the output of self.__call__().
This implements Q(s, a) -> [single Q-value] for the continuous case and
Q(s) -> [Q-values for all actions] for the discrete case.
Args:
model_out (TensorType): Feature outputs from the model layers
(result of doing `self.__call__(obs)`).
actions (Optional[TensorType]): Continuous action batch to return
Q-values for. Shape: [BATCH_SIZE, action_dim]. If None
(discrete action case), return Q-values for all actions.
Returns:
TensorType: Q-values tensor of shape [BATCH_SIZE, 1].
"""
return self._get_q_value(model_out, actions, self.q_net)
def get_twin_q_values(self,
model_out: TensorType,
actions: Optional[TensorType] = None) -> TensorType:
"""Same as get_q_values but using the twin Q net.
This implements the twin Q(s, a).
Args:
model_out (TensorType): Feature outputs from the model layers
(result of doing `self.__call__(obs)`).
actions (Optional[Tensor]): Actions to return the Q-values for.
Shape: [BATCH_SIZE, action_dim]. If None (discrete action
case), return Q-values for all actions.
Returns:
TensorType: Q-values tensor of shape [BATCH_SIZE, 1].
"""
return self._get_q_value(model_out, actions, self.twin_q_net)
def _get_q_value(self, model_out, actions, net):
# Model outs may come as original Tuple observations, concat them
# here if this is the case.
if isinstance(net.obs_space, Box):
if isinstance(model_out, (list, tuple)):
model_out = torch.cat(model_out, dim=-1)
elif isinstance(model_out, dict):
model_out = torch.cat(list(model_out.values()), dim=-1)
elif isinstance(model_out, dict):
model_out = list(model_out.values())
# Continuous case -> concat actions to model_out.
if actions is not None:
if self.concat_obs_and_actions:
input_dict = {"obs": torch.cat([model_out, actions], dim=-1)}
else:
input_dict = {"obs": force_list(model_out) + [actions]}
# Discrete case -> return q-vals for all actions.
else:
input_dict = {"obs": model_out}
# Switch on training mode (when getting Q-values, we are usually in
# training).
input_dict["is_training"] = True
out, _ = net(input_dict, [], None)
return out
def get_policy_output(self, model_out: TensorType) -> TensorType:
"""Returns policy outputs, given the output of self.__call__().
For continuous action spaces, these will be the mean/stddev
distribution inputs for the (SquashedGaussian) action distribution.
For discrete action spaces, these will be the logits for a categorical
distribution.
Args:
model_out (TensorType): Feature outputs from the model layers
(result of doing `self.__call__(obs)`).
Returns:
TensorType: Distribution inputs for sampling actions.
"""
# Model outs may come as original Tuple observations, concat them
# here if this is the case.
if isinstance(self.action_model.obs_space, Box):
if isinstance(model_out, (list, tuple)):
model_out = torch.cat(model_out, dim=-1)
elif isinstance(model_out, dict):
model_out = torch.cat(list(model_out.values()), dim=-1)
out, _ = self.action_model({"obs": model_out}, [], None)
return out
def policy_variables(self):
"""Return the list of variables for the policy net."""
return self.action_model.variables()
def q_variables(self):
"""Return the list of variables for Q / twin Q nets."""
return self.q_net.variables() + (self.twin_q_net.variables()
if self.twin_q_net else [])
|
pcmoritz/ray-1
|
rllib/agents/sac/sac_torch_model.py
|
Python
|
apache-2.0
| 12,453
|
from jno.commands.command import Command
from jno.commands.setdefault import SetDefault
from jno.commands.init import Init
from jno.commands.jnoserial import JnoSerial
from jno.commands.build import Build
from jno.commands.upload import Upload
from jno.commands.boards import Boards
from jno.commands.ports import Ports
from jno.commands.clean import Clean
from jno.util import formatted_help_string, JnoException
import os
from colorama import Fore
command_list = [Init, Build, Upload, JnoSerial, Boards, Ports, Clean, SetDefault]
class JnoHelp(Command):
help_name = "Help"
help_usage = "jno help [command_name]"
help_description = "Without arguments, prints usage and description for all supported commands. With a command name supplied, prints usage and description for specific command."
def run(self,argv,location):
if len(argv) > 0:
query_name = argv[-1]
found_command = None
if query_name in ["setlocal", "setglobal"]:
found_command = SetDefault
else:
for command in command_list:
if command.help_name.lower() == query_name.lower():
found_command = command
break
if not found_command:
raise JnoException("help for command '{}' cannot be displayed'; command not found".format(query_name))
print(formatted_help_string(found_command,surround=True))
return
print(Fore.CYAN+"======================")
for command in command_list:
print(formatted_help_string(command))
print(Fore.CYAN+"----------------------")
print(formatted_help_string(self))
print(Fore.CYAN+"======================"+Fore.RESET)
|
Kosinkadink/jno
|
jno/commands/jnohelp.py
|
Python
|
mit
| 1,575
|
#!/usr/bin/env python
from distutils.core import setup
from stringlike import __version__
setup(
name='stringlike',
packages=['stringlike'],
version=__version__,
description='Classes for mimicking string behavior',
author='Elliot Cameron',
author_email='elliot.cameron@covenanteyes.com',
url='https://github.com/CovenantEyes/py_stringlike',
download_url='https://github.com/CovenantEyes/py_stringlike/tarball/v' + __version__,
keywords=['string', 'lazy'],
platforms=['any'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
long_description="""
String-Like Classes
-------------------
Provides a ``StringLike`` class that adds the behavior of Python's built-in
``string`` to its children. This is useful when you want to implement a class
that behaves like a string but encapsulates some additional functionality
that a normal string doesn't provide.
Additionally provides ``LazyString`` and ``CachedLazyString`` classes which
behave exactly like strings but allow strings to be constructed in a thunk
(i.e. lazily) instead of strictly (i.e. immediately).
An example of how it can be used:
http://developer.covenanteyes.com/stringlike-in-python/"""
)
|
CovenantEyes/py_stringlike
|
setup.py
|
Python
|
mit
| 1,706
|
from flask import Blueprint, url_for, redirect, flash
from flask_login import LoginManager, login_required, \
logout_user
from spacewiki import model
from spacewiki.auth import tripcodes
import logging
LOGIN_MANAGER = LoginManager()
@LOGIN_MANAGER.user_loader
def load_user(user_id):
logging.debug("Loading logged in user %s", user_id)
return model.Identity.get_from_id(user_id)
LOGIN_MANAGER.anonymous_user = tripcodes.new_anon_user
BLUEPRINT = Blueprint('auth', __name__)
@BLUEPRINT.route('/logout')
@login_required
def logout():
logout_user()
flash('Logged out.')
return redirect(url_for('pages.view'))
|
spacewiki/spacewiki
|
spacewiki/auth/__init__.py
|
Python
|
agpl-3.0
| 639
|
#!/usr/bin/env python3
'''
Configure AntiVirus profiles
Method
https://<DEVICE_IP>/api/v2/cmdb/antivirus/profile
https://<DEVICE_IP>/api/v2/cmdb/antivirus/profile?filter=name==default
CLI
FG # sh antivirus profile default
config antivirus profile
edit "default"
set comment "Scan files and block viruses."
config http
set options scan
end
config ftp
set options scan
end
config imap
set options scan
set executables virus
end
config pop3
set options scan
set executables virus
end
config smtp
set options scan
set executables virus
end
next
end
Response
{
"http_method":"GET",
"revision":"1.0.3.9539865665020678008.1535505187",
"results":[
{
"q_origin_key":"default",
"name":"default",
"comment":"Scan files and block viruses.",
"replacemsg-group":"",
"inspection-mode":"flow-based",
"ftgd-analytics":"disable",
"analytics-max-upload":10,
"analytics-wl-filetype":0,
"analytics-bl-filetype":0,
"analytics-db":"disable",
"mobile-malware-db":"enable",
"http":{
"options":"scan",
"archive-block":"",
"archive-log":"",
"emulator":"enable",
"outbreak-prevention":"disabled",
"content-disarm":"disable"
},
"ftp":{
"options":"scan",
"archive-block":"",
"archive-log":"",
"emulator":"enable",
"outbreak-prevention":"disabled"
},
"imap":{
"options":"scan",
"archive-block":"",
"archive-log":"",
"emulator":"enable",
"executables":"virus",
"outbreak-prevention":"disabled",
"content-disarm":"disable"
},
"pop3":{
"options":"scan",
"archive-block":"",
"archive-log":"",
"emulator":"enable",
"executables":"virus",
"outbreak-prevention":"disabled",
"content-disarm":"disable"
},
"smtp":{
"options":"scan",
"archive-block":"",
"archive-log":"",
"emulator":"enable",
"executables":"virus",
"outbreak-prevention":"disabled",
"content-disarm":"disable"
},
"mapi":{
"options":"",
"archive-block":"",
"archive-log":"",
"emulator":"enable",
"executables":"default",
"outbreak-prevention":"disabled"
},
"nntp":{
"options":"",
"archive-block":"",
"archive-log":"",
"emulator":"enable",
"outbreak-prevention":"disabled"
},
"smb":{
"options":"",
"archive-block":"",
"archive-log":"",
"emulator":"enable",
"outbreak-prevention":"disabled"
},
"nac-quar":{
"infected":"none",
"expiry":"5m",
"log":"disable"
},
"content-disarm":{
"original-file-destination":"discard",
"office-macro":"enable",
"office-hylink":"enable",
"office-linked":"enable",
"office-embed":"enable",
"pdf-javacode":"enable",
"pdf-embedfile":"enable",
"pdf-hyperlink":"enable",
"pdf-act-gotor":"enable",
"pdf-act-launch":"enable",
"pdf-act-sound":"enable",
"pdf-act-movie":"enable",
"pdf-act-java":"enable",
"pdf-act-form":"enable",
"cover-page":"enable",
"detect-only":"disable"
},
"av-virus-log":"enable",
"av-block-log":"enable",
"extended-log":"disable",
"scan-mode":"full"
}
],
"vdom":"root",
"path":"antivirus",
"name":"profile",
"status":"success",
"http_status":200,
"serial":"FGVM020000000000",
"version":"v6.0.0",
"build":76
}
'''
from fortiosapi import FortiOSAPI
from pprint import pprint
fgt = FortiOSAPI()
device = {
'host': '10.99.236.231',
'username': 'admin',
'password': '',
}
fgt.login(**device)
profile_name = 'default'
filter = 'filter=name==' + profile_name
out = fgt.get('antivirus', 'profile', parameters=filter)
pprint(out)
fgt.logout()
|
barbosm/gatepy
|
examples/cmdb_antivirus_profile.py
|
Python
|
gpl-2.0
| 4,491
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# NetProfile: Bills module - Views
# Copyright © 2017 Alex Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (unicode_literals, print_function,
absolute_import, division)
from pyramid.i18n import TranslationStringFactory
from netprofile.common.hooks import register_hook
from netprofile.db.connection import DBSession
from .models import Bill
_ = TranslationStringFactory('netprofile_bills')
@register_hook('core.dpanetabs.bills.BillType')
def _dpane_billtype_bills(tabs, model, req):
loc = req.localizer
if req.has_permission('BILLS_LIST'):
tabs.append({
'title': loc.translate(_('Bills')),
'iconCls': 'ico-mod-bill',
'xtype': 'grid_bills_Bill',
'stateId': None,
'stateful': False,
'hideColumns': ('type',),
'extraParamProp': 'btypeid',
'createControllers': 'NetProfile.core.controller.RelatedWizard'
})
@register_hook('core.dpanetabs.entities.Entity')
@register_hook('core.dpanetabs.entities.PhysicalEntity')
@register_hook('core.dpanetabs.entities.LegalEntity')
@register_hook('core.dpanetabs.entities.StructuralEntity')
@register_hook('core.dpanetabs.entities.ExternalEntity')
def _dpane_entity_bills(tabs, model, req):
loc = req.localizer
if req.has_permission('BILLS_LIST'):
tabs.append({
'title': loc.translate(_('Bills')),
'iconCls': 'ico-mod-bill',
'xtype': 'grid_bills_Bill',
'stateId': None,
'stateful': False,
'hideColumns': ('entity',),
'extraParamProp': 'entityid',
'createControllers': 'NetProfile.core.controller.RelatedWizard'
})
@register_hook('documents.gen.object')
def _doc_gen_obj(tpl_vars, objid, objtype, req):
if objtype != 'bill':
return
obj = DBSession().query(Bill).get(objid)
if not obj:
return
mr = req.matched_route
if mr and mr.name and mr.name.startswith('documents.generate'):
tpl_vars.update({'bill': obj})
else:
v = obj.template_vars(req)
if v:
tpl_vars.update({'bill': v})
|
unikmhz/npui
|
netprofile_bills/netprofile_bills/views.py
|
Python
|
agpl-3.0
| 3,005
|
from gi.repository import Gtk
from forecastmgmt.ui.masterdata_mask import MasterdataMask
from forecastmgmt.ui.forecast_mask import ForecastMask
from forecastmgmt.ui.publication_mask import PublicationMask
class MainWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Forecaster")
self.set_size_request(800,600)
# The main area, grid
self.grid = Gtk.Grid()
self.grid.set_orientation(Gtk.Orientation.VERTICAL)
self.add(self.grid)
menubar=self.create_menubar()
self.grid.add(menubar)
toolbar=self.create_toolbar()
self.grid.add(toolbar)
self.working_area=Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.grid.add(self.working_area)
self.set_working_area("forecast")
self.create_status_bar()
#self.grid.add(self.statusbar)
def create_status_bar(self):
self.statusbar = Gtk.Statusbar()
self.statusbar.add(Gtk.Label("statusbar"))
def create_toolbar(self):
toolbar=Gtk.Toolbar()
toolbutton_forecast=Gtk.ToolButton(Gtk.STOCK_ABOUT)
toolbutton_forecast.set_tooltip_text("forecast")
toolbutton_forecast.connect("clicked", self.on_toolbutton_forecast)
toolbar.add(toolbutton_forecast)
toolbutton_publications=Gtk.ToolButton(Gtk.STOCK_EDIT)
toolbutton_publications.set_tooltip_text("publications")
toolbutton_publications.connect("clicked", self.on_toolbutton_publication)
toolbar.add(toolbutton_publications)
toolbutton_master_data=Gtk.ToolButton(Gtk.STOCK_EXECUTE)
toolbutton_master_data.set_tooltip_text("master data")
toolbutton_master_data.connect("clicked", self.on_toolbutton_masterdata)
toolbar.add(toolbutton_master_data)
toolbutton_quit=Gtk.ToolButton(Gtk.STOCK_QUIT)
toolbutton_quit.set_tooltip_text("quit")
toolbutton_quit.connect("clicked", self.on_menu_file_quit)
toolbar.add(toolbutton_quit)
return toolbar
def create_menubar(self):
menubar = Gtk.MenuBar()
file_menu_entry = Gtk.MenuItem("File")
menu = Gtk.Menu()
mitem_quit = Gtk.MenuItem("Quit")
mitem_quit.connect("activate", self.on_menu_file_quit)
menu.insert(mitem_quit, 0)
file_menu_entry.set_submenu(menu)
menubar.append(file_menu_entry)
return menubar
def clean_working_area(self):
for child in self.working_area.get_children():
self.working_area.remove(child)
def set_working_area(self, action="masterdata"):
self.clean_working_area()
if action=="masterdata":
self.working_area.pack_start(MasterdataMask(self), False, False, 0)
elif action=="forecast":
self.working_area.pack_start(ForecastMask(self), False, False, 0)
elif action=="publication":
self.working_area.pack_start(PublicationMask(self), False, False, 0)
else:
print("unimplemented")
self.working_area.show_all()
def on_menu_file_quit(self, widget):
Gtk.main_quit()
def on_toolbutton_masterdata(self, widget):
self.set_working_area("masterdata")
def on_toolbutton_forecast(self, widget):
self.set_working_area("forecast")
def on_toolbutton_publication(self, widget):
self.set_working_area("publication")
win = MainWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
vvladych/forecastmgmt
|
src/forecastmgmt.py
|
Python
|
unlicense
| 3,741
|
# #
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Easyconfig module that provides functionality for dealing with easyconfig (.eb) files,
alongside the EasyConfig class to represent parsed easyconfig files.
:author: Stijn De Weirdt (Ghent University)
:author: Dries Verdegem (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Pieter De Baets (Ghent University)
:author: Jens Timmerman (Ghent University)
:author: Toon Willems (Ghent University)
:author: Fotis Georgatos (Uni.Lu, NTUA)
:author: Ward Poelmans (Ghent University)
"""
import copy
import glob
import os
import re
import sys
import tempfile
from distutils.version import LooseVersion
from vsc.utils import fancylogger
from easybuild.framework.easyconfig import EASYCONFIGS_PKG_SUBDIR
from easybuild.framework.easyconfig.easyconfig import EASYCONFIGS_ARCHIVE_DIR, ActiveMNS, EasyConfig
from easybuild.framework.easyconfig.easyconfig import create_paths, get_easyblock_class, process_easyconfig
from easybuild.framework.easyconfig.format.yeb import quote_yaml_special_chars
from easybuild.tools.build_log import EasyBuildError, print_msg
from easybuild.tools.config import build_option
from easybuild.tools.environment import restore_env
from easybuild.tools.filetools import find_easyconfigs, is_patch_file, which, write_file
from easybuild.tools.github import fetch_easyconfigs_from_pr, download_repo
from easybuild.tools.modules import modules_tool
from easybuild.tools.multidiff import multidiff
from easybuild.tools.ordereddict import OrderedDict
from easybuild.tools.toolchain import DUMMY_TOOLCHAIN_NAME
from easybuild.tools.utilities import only_if_module_is_available, quote_str
from easybuild.tools.version import VERSION as EASYBUILD_VERSION
# optional Python packages, these might be missing
# failing imports are just ignored
# a NameError should be caught where these are used
try:
# PyGraph (used for generating dependency graphs)
# https://pypi.python.org/pypi/python-graph-core
from pygraph.classes.digraph import digraph
# https://pypi.python.org/pypi/python-graph-dot
import pygraph.readwrite.dot as dot
# graphviz (used for creating dependency graph images)
sys.path.append('..')
sys.path.append('/usr/lib/graphviz/python/')
sys.path.append('/usr/lib64/graphviz/python/')
# https://pypi.python.org/pypi/pygraphviz
# graphviz-python (yum) or python-pygraphviz (apt-get)
# or brew install graphviz --with-bindings (OS X)
import gv
except ImportError:
pass
_log = fancylogger.getLogger('easyconfig.tools', fname=False)
def skip_available(easyconfigs, modtool):
"""Skip building easyconfigs for existing modules."""
module_names = [ec['full_mod_name'] for ec in easyconfigs]
modules_exist = modtool.exist(module_names)
retained_easyconfigs = []
for ec, mod_name, mod_exists in zip(easyconfigs, module_names, modules_exist):
if mod_exists:
_log.info("%s is already installed (module found), skipping" % mod_name)
else:
_log.debug("%s is not installed yet, so retaining it" % mod_name)
retained_easyconfigs.append(ec)
return retained_easyconfigs
def find_resolved_modules(easyconfigs, avail_modules, modtool, retain_all_deps=False):
"""
Find easyconfigs in 1st argument which can be fully resolved using modules specified in 2nd argument
:param easyconfigs: list of parsed easyconfigs
:param avail_modules: list of available modules
:param retain_all_deps: retain all dependencies, regardless of whether modules are available for them or not
"""
ordered_ecs = []
new_easyconfigs = []
# copy, we don't want to modify the origin list of available modules
avail_modules = avail_modules[:]
_log.debug("Finding resolved modules for %s (available modules: %s)", easyconfigs, avail_modules)
ec_mod_names = [ec['full_mod_name'] for ec in easyconfigs]
for easyconfig in easyconfigs:
if isinstance(easyconfig, EasyConfig):
easyconfig._config = copy.copy(easyconfig._config)
else:
easyconfig = easyconfig.copy()
deps = []
for dep in easyconfig['dependencies']:
dep_mod_name = dep.get('full_mod_name', ActiveMNS().det_full_module_name(dep))
# treat external modules as resolved when retain_all_deps is enabled (e.g., under --dry-run),
# since no corresponding easyconfig can be found for them
if retain_all_deps and dep.get('external_module', False):
_log.debug("Treating dependency marked as external dependency as resolved: %s", dep_mod_name)
elif retain_all_deps and dep_mod_name not in avail_modules:
# if all dependencies should be retained, include dep unless it has been already
_log.debug("Retaining new dep %s in 'retain all deps' mode", dep_mod_name)
deps.append(dep)
# retain dep if it is (still) in the list of easyconfigs
elif dep_mod_name in ec_mod_names:
_log.debug("Dep %s is (still) in list of easyconfigs, retaining it", dep_mod_name)
deps.append(dep)
# retain dep if corresponding module is not available yet;
# fallback to checking with modtool.exist is required,
# for hidden modules and external modules where module name may be partial
elif dep_mod_name not in avail_modules and not modtool.exist([dep_mod_name], skip_avail=True)[0]:
# no module available (yet) => retain dependency as one to be resolved
_log.debug("No module available for dep %s, retaining it", dep)
deps.append(dep)
# update list of dependencies with only those unresolved
easyconfig['dependencies'] = deps
# if all dependencies have been resolved, add module for this easyconfig in the list of available modules
if not easyconfig['dependencies']:
_log.debug("Adding easyconfig %s to final list" % easyconfig['spec'])
ordered_ecs.append(easyconfig)
mod_name = easyconfig['full_mod_name']
avail_modules.append(mod_name)
# remove module name from list, so dependencies can be marked as resolved
ec_mod_names.remove(mod_name)
else:
new_easyconfigs.append(easyconfig)
return ordered_ecs, new_easyconfigs, avail_modules
@only_if_module_is_available('pygraph.classes.digraph', pkgname='python-graph-core')
def dep_graph(filename, specs):
"""
Create a dependency graph for the given easyconfigs.
"""
# check whether module names are unique
# if so, we can omit versions in the graph
names = set()
for spec in specs:
names.add(spec['ec']['name'])
omit_versions = len(names) == len(specs)
def mk_node_name(spec):
if spec.get('external_module', False):
node_name = "%s (EXT)" % spec['full_mod_name']
elif omit_versions:
node_name = spec['name']
else:
node_name = ActiveMNS().det_full_module_name(spec)
return node_name
# enhance list of specs
all_nodes = set()
for spec in specs:
spec['module'] = mk_node_name(spec['ec'])
all_nodes.add(spec['module'])
spec['ec']._all_dependencies = [mk_node_name(s) for s in spec['ec'].all_dependencies]
all_nodes.update(spec['ec'].all_dependencies)
# Get the build dependencies for each spec so we can distinguish them later
spec['ec'].build_dependencies = [mk_node_name(s) for s in spec['ec']['builddependencies']]
all_nodes.update(spec['ec'].build_dependencies)
# build directed graph
dgr = digraph()
dgr.add_nodes(all_nodes)
for spec in specs:
for dep in spec['ec'].all_dependencies:
dgr.add_edge((spec['module'], dep))
if dep in spec['ec'].build_dependencies:
dgr.add_edge_attributes((spec['module'], dep), attrs=[('style','dotted'), ('color','blue'), ('arrowhead','diamond')])
_dep_graph_dump(dgr, filename)
if not build_option('silent'):
print "Wrote dependency graph for %d easyconfigs to %s" % (len(specs), filename)
@only_if_module_is_available('pygraph.readwrite.dot', pkgname='python-graph-dot')
def _dep_graph_dump(dgr, filename):
"""Dump dependency graph to file, in specified format."""
# write to file
dottxt = dot.write(dgr)
if os.path.splitext(filename)[-1] == '.dot':
# create .dot file
write_file(filename, dottxt)
else:
_dep_graph_gv(dottxt, filename)
@only_if_module_is_available('gv', pkgname='graphviz')
def _dep_graph_gv(dottxt, filename):
"""Render dependency graph to file using graphviz."""
# try and render graph in specified file format
gvv = gv.readstring(dottxt)
gv.layout(gvv, 'dot')
gv.render(gvv, os.path.splitext(filename)[-1], filename)
def get_paths_for(subdir=EASYCONFIGS_PKG_SUBDIR, robot_path=None):
"""
Return a list of absolute paths where the specified subdir can be found, determined by the PYTHONPATH
"""
paths = []
# primary search path is robot path
path_list = []
if isinstance(robot_path, list):
path_list = robot_path[:]
elif robot_path is not None:
path_list = [robot_path]
# consider Python search path, e.g. setuptools install path for easyconfigs
path_list.extend(sys.path)
# figure out installation prefix, e.g. distutils install path for easyconfigs
eb_path = which('eb')
if eb_path is None:
_log.warning("'eb' not found in $PATH, failed to determine installation prefix")
else:
# eb should reside in <install_prefix>/bin/eb
install_prefix = os.path.dirname(os.path.dirname(eb_path))
path_list.append(install_prefix)
_log.debug("Also considering installation prefix %s..." % install_prefix)
# look for desired subdirs
for path in path_list:
path = os.path.join(path, "easybuild", subdir)
_log.debug("Checking for easybuild/%s at %s" % (subdir, path))
try:
if os.path.exists(path):
paths.append(os.path.abspath(path))
_log.debug("Added %s to list of paths for easybuild/%s" % (path, subdir))
except OSError, err:
raise EasyBuildError(str(err))
return paths
def alt_easyconfig_paths(tmpdir, tweaked_ecs=False, from_pr=False):
"""Obtain alternative paths for easyconfig files."""
# path where tweaked easyconfigs will be placed
tweaked_ecs_path = None
if tweaked_ecs:
tweaked_ecs_path = os.path.join(tmpdir, 'tweaked_easyconfigs')
# path where files touched in PR will be downloaded to
pr_path = None
if from_pr:
pr_path = os.path.join(tmpdir, "files_pr%s" % from_pr)
return tweaked_ecs_path, pr_path
def det_easyconfig_paths(orig_paths):
"""
Determine paths to easyconfig files.
:param orig_paths: list of original easyconfig paths
:return: list of paths to easyconfig files
"""
from_pr = build_option('from_pr')
robot_path = build_option('robot_path')
# list of specified easyconfig files
ec_files = orig_paths[:]
if from_pr is not None:
pr_files = fetch_easyconfigs_from_pr(from_pr)
if ec_files:
# replace paths for specified easyconfigs that are touched in PR
for i, ec_file in enumerate(ec_files):
for pr_file in pr_files:
if ec_file == os.path.basename(pr_file):
ec_files[i] = pr_file
else:
# if no easyconfigs are specified, use all the ones touched in the PR
ec_files = [path for path in pr_files if path.endswith('.eb')]
if ec_files and robot_path:
# look for easyconfigs with relative paths in robot search path,
# unless they were found at the given relative paths
# determine which easyconfigs files need to be found, if any
ecs_to_find = []
for idx, ec_file in enumerate(ec_files):
if ec_file == os.path.basename(ec_file) and not os.path.exists(ec_file):
ecs_to_find.append((idx, ec_file))
_log.debug("List of easyconfig files to find: %s" % ecs_to_find)
# find missing easyconfigs by walking paths in robot search path
for path in robot_path:
_log.debug("Looking for missing easyconfig files (%d left) in %s..." % (len(ecs_to_find), path))
for (subpath, dirnames, filenames) in os.walk(path, topdown=True):
for idx, orig_path in ecs_to_find[:]:
if orig_path in filenames:
full_path = os.path.join(subpath, orig_path)
_log.info("Found %s in %s: %s" % (orig_path, path, full_path))
ec_files[idx] = full_path
# if file was found, stop looking for it (first hit wins)
ecs_to_find.remove((idx, orig_path))
# stop os.walk insanity as soon as we have all we need (os.walk loop)
if not ecs_to_find:
break
# ignore subdirs specified to be ignored by replacing items in dirnames list used by os.walk
dirnames[:] = [d for d in dirnames if d not in build_option('ignore_dirs')]
# ignore archived easyconfigs, unless specified otherwise
if not build_option('consider_archived_easyconfigs'):
dirnames[:] = [d for d in dirnames if d != EASYCONFIGS_ARCHIVE_DIR]
# stop os.walk insanity as soon as we have all we need (outer loop)
if not ecs_to_find:
break
return [os.path.abspath(ec_file) for ec_file in ec_files]
def parse_easyconfigs(paths, validate=True):
"""
Parse easyconfig files
:param paths: paths to easyconfigs
"""
easyconfigs = []
generated_ecs = False
for (path, generated) in paths:
path = os.path.abspath(path)
# keep track of whether any files were generated
generated_ecs |= generated
if not os.path.exists(path):
raise EasyBuildError("Can't find path %s", path)
try:
ec_files = find_easyconfigs(path, ignore_dirs=build_option('ignore_dirs'))
for ec_file in ec_files:
# only pass build specs when not generating easyconfig files
kwargs = {'validate': validate}
if not build_option('try_to_generate'):
kwargs['build_specs'] = build_option('build_specs')
ecs = process_easyconfig(ec_file, **kwargs)
easyconfigs.extend(ecs)
except IOError, err:
raise EasyBuildError("Processing easyconfigs in path %s failed: %s", path, err)
return easyconfigs, generated_ecs
def stats_to_str(stats, isyeb=False):
"""
Pretty print build statistics to string.
"""
if not isinstance(stats, (OrderedDict, dict)):
raise EasyBuildError("Can only pretty print build stats in dictionary form, not of type %s", type(stats))
txt = "{\n"
pref = " "
for key in sorted(stats):
if isyeb:
val = stats[key]
if isinstance(val, tuple):
val = list(val)
key, val = quote_yaml_special_chars(key), quote_yaml_special_chars(val)
else:
key, val = quote_str(key), quote_str(stats[key])
txt += "%s%s: %s,\n" % (pref, key, val)
txt += "}"
return txt
def find_related_easyconfigs(path, ec):
"""
Find related easyconfigs for provided parsed easyconfig in specified path.
A list of easyconfigs for the same software (name) is returned,
matching the 1st criterion that yields a non-empty list.
The following criteria are considered (in this order) next to common software version criterion, i.e.
exact version match, a major/minor version match, a major version match, or no version match (in that order).
(i) matching versionsuffix and toolchain name/version
(ii) matching versionsuffix and toolchain name (any toolchain version)
(iii) matching versionsuffix (any toolchain name/version)
(iv) matching toolchain name/version (any versionsuffix)
(v) matching toolchain name (any versionsuffix, toolchain version)
(vi) no extra requirements (any versionsuffix, toolchain name/version)
If no related easyconfigs with a matching software name are found, an empty list is returned.
"""
name = ec.name
version = ec.version
versionsuffix = ec['versionsuffix']
toolchain_name = ec['toolchain']['name']
toolchain_name_pattern = r'-%s-\S+' % toolchain_name
toolchain_pattern = '-%s-%s' % (toolchain_name, ec['toolchain']['version'])
if toolchain_name == DUMMY_TOOLCHAIN_NAME:
toolchain_name_pattern = ''
toolchain_pattern = ''
potential_paths = [glob.glob(ec_path) for ec_path in create_paths(path, name, '*')]
potential_paths = sum(potential_paths, []) # flatten
_log.debug("found these potential paths: %s" % potential_paths)
parsed_version = LooseVersion(version).version
version_patterns = [version] # exact version match
if len(parsed_version) >= 2:
version_patterns.append(r'%s\.%s\.\w+' % tuple(parsed_version[:2])) # major/minor version match
if parsed_version != parsed_version[0]:
version_patterns.append(r'%s\.[\d-]+\.\w+' % parsed_version[0]) # major version match
version_patterns.append(r'[\w.]+') # any version
regexes = []
for version_pattern in version_patterns:
common_pattern = r'^\S+/%s-%s%%s\.eb$' % (re.escape(name), version_pattern)
regexes.extend([
common_pattern % (toolchain_pattern + versionsuffix),
common_pattern % (toolchain_name_pattern + versionsuffix),
common_pattern % (r'\S*%s' % versionsuffix),
common_pattern % toolchain_pattern,
common_pattern % toolchain_name_pattern,
common_pattern % r'\S*',
])
for regex in regexes:
res = [p for p in potential_paths if re.match(regex, p)]
if res:
_log.debug("Related easyconfigs found using '%s': %s" % (regex, res))
break
else:
_log.debug("No related easyconfigs in potential paths using '%s'" % regex)
return sorted(res)
def review_pr(pr, colored=True, branch='develop'):
"""
Print multi-diff overview between easyconfigs in specified PR and specified branch.
:param pr: pull request number in easybuild-easyconfigs repo to review
:param colored: boolean indicating whether a colored multi-diff should be generated
:param branch: easybuild-easyconfigs branch to compare with
"""
tmpdir = tempfile.mkdtemp()
download_repo_path = download_repo(branch=branch, path=tmpdir)
repo_path = os.path.join(download_repo_path, 'easybuild', 'easyconfigs')
pr_files = [path for path in fetch_easyconfigs_from_pr(pr) if path.endswith('.eb')]
lines = []
ecs, _ = parse_easyconfigs([(fp, False) for fp in pr_files], validate=False)
for ec in ecs:
files = find_related_easyconfigs(repo_path, ec['ec'])
_log.debug("File in PR#%s %s has these related easyconfigs: %s" % (pr, ec['spec'], files))
if files:
lines.append(multidiff(ec['spec'], files, colored=colored))
else:
lines.extend(['', "(no related easyconfigs found for %s)\n" % os.path.basename(ec['spec'])])
return '\n'.join(lines)
def dump_env_script(easyconfigs):
"""
Dump source scripts that set up build environment for specified easyconfigs.
:param easyconfigs: list of easyconfigs to generate scripts for
"""
ecs_and_script_paths = []
for easyconfig in easyconfigs:
script_path = '%s.env' % os.path.splitext(os.path.basename(easyconfig['spec']))[0]
ecs_and_script_paths.append((easyconfig['ec'], script_path))
# don't just overwrite existing scripts
existing_scripts = [s for (_, s) in ecs_and_script_paths if os.path.exists(s)]
if existing_scripts:
if build_option('force'):
_log.info("Found existing scripts, overwriting them: %s", ' '.join(existing_scripts))
else:
raise EasyBuildError("Script(s) already exists, not overwriting them (unless --force is used): %s",
' '.join(existing_scripts))
orig_env = copy.deepcopy(os.environ)
for ec, script_path in ecs_and_script_paths:
# obtain EasyBlock instance
app_class = get_easyblock_class(ec['easyblock'], name=ec['name'])
app = app_class(ec)
# mimic dry run, and keep quiet
app.dry_run = app.silent = app.toolchain.dry_run = True
# prepare build environment (in dry run mode)
app.check_readiness_step()
app.prepare_step(start_dir=False)
# compose script
ecfile = os.path.basename(ec.path)
script_lines = [
"#!/bin/bash",
"# script to set up build environment as defined by EasyBuild v%s for %s" % (EASYBUILD_VERSION, ecfile),
"# usage: source %s" % os.path.basename(script_path),
]
script_lines.extend(['', "# toolchain & dependency modules"])
if app.toolchain.modules:
script_lines.extend(["module load %s" % mod for mod in app.toolchain.modules])
else:
script_lines.append("# (no modules loaded)")
script_lines.extend(['', "# build environment"])
if app.toolchain.vars:
env_vars = sorted(app.toolchain.vars.items())
script_lines.extend(["export %s='%s'" % (var, val.replace("'", "\\'")) for (var, val) in env_vars])
else:
script_lines.append("# (no build environment defined)")
write_file(script_path, '\n'.join(script_lines))
print_msg("Script to set up build environment for %s dumped to %s" % (ecfile, script_path), prefix=False)
restore_env(orig_env)
def categorize_files_by_type(paths):
"""
Splits list of filepaths into a 3 separate lists: easyconfigs, files to delete and patch files
"""
res = {
'easyconfigs': [],
'files_to_delete': [],
'patch_files': [],
}
for path in paths:
if path.startswith(':'):
res['files_to_delete'].append(path[1:])
# file must exist in order to check whether it's a patch file
elif os.path.isfile(path) and is_patch_file(path):
res['patch_files'].append(path)
else:
# anything else is considered to be an easyconfig file
res['easyconfigs'].append(path)
return res
|
wpoely86/easybuild-framework
|
easybuild/framework/easyconfig/tools.py
|
Python
|
gpl-2.0
| 23,864
|
#!/usr/bin/env python
import argparse
import logging
import dns
from dns.resolver import Resolver
def get_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-z', '--zone', default='.' )
parser.add_argument('-s', '--server', default='127.0.0.1' )
parser.add_argument('-b', '--bufsize', type=int, default=4096 )
parser.add_argument('-v', '--verbose', action='count' )
return parser.parse_args()
def set_log_level(args_level):
log_level = logging.ERROR
if args_level == 1:
log_level = logging.WARN
elif args_level == 2:
log_level = logging.INFO
elif args_level > 2:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
def get_nsset(zone, server, bufsize):
nsset = { 'RRSIG' : False }
resolver = Resolver()
resolver.nameservers = [server]
resolver.use_edns(edns=True, ednsflags=dns.flags.DO, payload=bufsize)
response = resolver.query(zone, 'NS', dns.rdataclass.IN, True).response
for answer in response.answer:
for ans in answer.items:
if ans.rdtype == dns.rdatatype.NS:
nsset[ans.to_text()] = { 'A' : None, 'AAAA' : None, 'RRSIG' : None }
elif ans.rdtype == dns.rdatatype.RRSIG:
nsset['RRSIG'] = True
return nsset
def main():
query_count = 0
total_size = 0
nsset = dict()
args = get_args()
set_log_level(args.verbose)
nsset = get_nsset(args.zone, args.server, args.bufsize)
if __name__ == '__main__':
main()
|
b4ldr/rsn
|
5.1/test_rsn.py
|
Python
|
apache-2.0
| 1,577
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import timeit
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
from derivsfunc import *
from initialconditions import direct_ic, retro_ic
def S7_parabola(M,S,gamma):
"""
Computes the intial conditions for the disrupting galaxy.
"""
R1 = -55
R2 = 25-(R1**2)/100
vR = np.sqrt((2*gamma*(M+S))/np.linalg.norm([R1,R2]))
if R1 == 0:
vR1,vR2 = vR,0
else:
theta = np.arctan(abs(R1/50))
if R2 > 0:
vR1 = vR*np.cos(theta)
vR2 = vR*np.sin(theta)
if R2 < 0:
vR1 = vR*np.cos(theta)
vR2 = vR*np.sin(theta)
return R1, R2, vR1, vR2
def S7_ics(M,S,gamma):
"""
Compiles all the S7 initial conditions into a single array.
"""
direct_r1, direct_r2, direct_vr1, direct_vr2 = direct_ic(M,gamma)
retro_r1, retro_r2, retro_vr1, retro_vr2 = retro_ic(M,gamma)
R1,R2,vR1,vR2 = S7_parabola(M,S,gamma)
icR = np.array([R1,R2,vR1,vR2])
direct_mr1 = np.hstack((direct_r1[0],direct_r1[1],direct_r1[2],direct_r1[3],direct_r1[4]))
direct_mr2 = np.hstack((direct_r2[0],direct_r2[1],direct_r2[2],direct_r2[3],direct_r2[4]))
direct_mvr1 = np.hstack((direct_vr1[0],direct_vr1[1],direct_vr1[2],direct_vr1[3],direct_vr1[4]))
direct_mvr2 = np.hstack((direct_vr2[0],direct_vr2[1],direct_vr2[2],direct_vr2[3],direct_vr2[4]))
retro_mr1 = np.hstack((retro_r1[0],retro_r1[1],retro_r1[2],retro_r1[3],retro_r1[4]))
retro_mr2 = np.hstack((retro_r2[0],retro_r2[1],retro_r2[2],retro_r2[3],retro_r2[4]))
retro_mvr1 = np.hstack((retro_vr1[0],retro_vr1[1],retro_vr1[2],retro_vr1[3],retro_vr1[4]))
retro_mvr2 = np.hstack((retro_vr2[0],retro_vr2[1],retro_vr2[2],retro_vr2[3],retro_vr2[4]))
direct_star_ic = np.transpose(np.vstack((direct_mr1,direct_mr2,direct_mvr1,direct_mvr2)))
direct_ic_total = np.append(icR,direct_star_ic)
retro_star_ic = np.transpose(np.vstack((retro_mr1,retro_mr2,retro_mvr1,retro_mvr2)))
retro_ic_total = np.append(icR,retro_star_ic)
return direct_ic_total, retro_ic_total, icR, direct_star_ic, retro_star_ic
def S7_ode_solutions(t,tsteps,M,S,gamma):
"""
Solve the differentials with an array of initial conditions of the S7 case and returns lists of positions and velocities.
Parameters
----------
t: float
The current time t[i].
tsteps: intf initial conditions and returns lists of positions and velocities.
The number of times between [0,t] the solution is calculated.
M, S, gamma: float
Parameters of the differential equation.
Returns
-------
direct_r1, direct_r2, retro_r1, retro_r2, R1, R2: lists of arrays
Lists of 120 arrays each with tsteps number of solutions; one array for each star.
"""
direct_ic_total, retro_ic_total, icR, direct_star_ic, retro_star_ic = S7_ics(M,S,gamma)
direct_r1,direct_r2,direct_vr1,direct_vr2 = [], [], [], []
direct_complete_sol = []
retro_r1,retro_r2,retro_vr1,retro_vr2 = [], [], [], []
retro_complete_sol = []
for i in range(120):
direct_ic = np.append(icR,direct_star_ic[i])
direct_solution = odeint(derivs, direct_ic, t, args=(M, S), atol=1e-5, rtol=1e-5)
direct_complete_sol.append(direct_solution)
direct_r1.append(direct_complete_sol[i][0:tsteps,4])
direct_r2.append(direct_complete_sol[i][0:tsteps,5])
direct_vr1.append(direct_complete_sol[i][0:tsteps,6])
direct_vr2.append(direct_complete_sol[i][0:tsteps,7])
retro_ic = np.append(icR,retro_star_ic[i])
retro_solution = odeint(derivs, retro_ic, t, args=(M, S), atol=1e-5, rtol=1e-5)
retro_complete_sol.append(retro_solution)
retro_r1.append(retro_complete_sol[i][0:tsteps,4])
retro_r2.append(retro_complete_sol[i][0:tsteps,5])
retro_vr1.append(retro_complete_sol[i][0:tsteps,6])
retro_vr2.append(retro_complete_sol[i][0:tsteps,7])
R1 = direct_complete_sol[0][0:tsteps,0]
R2 = direct_complete_sol[0][0:tsteps,1]
vR1 = direct_complete_sol[0][0:tsteps,2]
vR2 = direct_complete_sol[0][0:tsteps,3]
return direct_r1, direct_r2, retro_r1, retro_r2, R1, R2, vR1, vR2
|
brettavedisian/phys202-project
|
Project/S7_icsandsolutions.py
|
Python
|
mit
| 4,336
|
import json
import requests
import logging
from requests.exceptions import *
logger = logging.getLogger(__name__)
class Service:
""" Proxy Service for custom RESTful service API to MassDOT real-time data """
base_url = ''
def __init__(self, base_url, prediction_uri):
self.base_url = base_url
self.prediction_uri = prediction_uri
def format_stop_request(self, stop_id):
return "{url}/{endpoint}/{stop}".format(url=self.base_url, endpoint=self.prediction_uri, stop=str(stop_id))
def resource_available(self):
url = "{url}/{endpoint}".format(url=self.base_url, endpoint=self.prediction_uri)
logger.info('Checking resource availability on %s' % url)
try:
r = requests.get(url)
return r.status_code == requests.codes.ok
except HTTPError, e:
logger.error('Error in request: %r' % e)
return False
except RequestException, e:
logger.error('RequestException in request: %r' % e)
return False
def access(self, stop_id, delegate):
url = self.format_stop_request(stop_id)
logger.info('Request for stop %s' % url)
headers = {
'content-type': 'application/json; charset=utf-8',
'Accept': 'application/json'
}
try:
r = requests.get(url, headers=headers)
delegate.success(r.text)
except HTTPError as e:
logger.error('Error in request for %r: %r' % (stop_id, e))
delegate.failure(e)
except RequestException as e:
logger.error('RequestException in request for %r: %r' % (stop_id, e))
delegate.failure(e)
|
infrared5/massroute-pi
|
app/service/service.py
|
Python
|
mit
| 1,548
|
# -*- coding: latin_1 -*-
#***********************************************************************************
#-----------------------------------------------------------------------------------
# Filnavn: Avrenningrivernet.py
#-----------------------------------------------------------------------------------
# Opprettet: 1. des. 2010, JSV
# Revidert:
#-----------------------------------------------------------------------------------
# Beskrivelse:
#
#-----------------------------------------------------------------------------------
# Kontekst:
#
#-----------------------------------------------------------------------------------
# Argumenter: ingen
#-----------------------------------------------------------------------------------
# Forutsetninger:
# Fil feltinformasjon_alle.dbf med felt VOMR (vassdragsomr�de) og SNR (stasjonsnummer)
# Denne filen definerer hvilke m�lestasjoner analysen skal kj�res for
#-----------------------------------------------------------------------------------
# Resultater:
#
#-----------------------------------------------------------------------------------
#***********************************************************************************
# Import av systemmoduler
import sys, string, os, arcpy, traceback
import numpy
import arcpy
from arcpy import env
from arcpy.sa import *
# Import av NVE-rutiner
sys.path.append("V:\\Rutiner\\Python\\NVE")
from General10 import Log
from General10 import sett_snapextent
# Tillat overskriving av resultater
env.overwriteOutput = True
env.scratchWorkspace = r"C:\Temp\scratch.gdb"
#***********************************************************************************
def ErrorHandling(LogFile):
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
PyMsg = "PYTHON ERROR:\n" + tbinfo + "\n" + str(sys.exc_type) + ":" + str(sys.exc_value)+ "\n"
GpMsg = "ARCPY ERROR:\n" + arcpy.GetMessages(2)+ "\n"
print(PyMsg)
print(GpMsg)
arcpy.AddError(PyMsg)
arcpy.AddError(GpMsg)
Log(LogFile,PyMsg)
Log(LogFile,GpMsg)
arcpy.AddMessage(arcpy.GetMessages(1))
#print arcpy.GetMessages(1)
Log(LogFile,arcpy.GetMessages())
#***********************************************************************************
# Funksjonsdefinisjoner
#***********************************************************************************
def Resultat_Eksisterer(utfil):
if arcpy.Exists(utfil):
return True
else:
return False
#***********************************************************************************
def AreaDistributionRivernet(workspace,fc_msta_River,catchment,grd_hydflowdir,grd_hydflowacc,test,LogFile, utfil):
try:
LogTekst = " AreaDistributionRivernet "
print(LogTekst)
#Log(LogFile,LogTekst)
class LicenseError(Exception):
pass
# Check out any necessary licenses
print("The Spatial license is " + arcpy.CheckExtension("Spatial"))
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
else:
raise LicenseError
# arcpy.env.workspace = workspace
# dsc_catchment = arcpy.Describe(catchment)
# extent_felt = dsc_catchment.extent
# env.extent = sett_snapextent(extent_felt, grd_hydflowdir)
# cellSize = arcpy.Describe(grd_hydflowdir).MeanCellHeight
# print("Satt snapextent")
# # Convert rivernet to raster
# arcpy.FeatureToRaster_conversion(fc_msta_River, "LTEMA", "HRiver", cellSize)
# # give all cells in the network tha river code
# River_grd = Con("HRiver", 3201)
# River_grd.save(os.path.join(workspace,"grd_msta_River"))
# grd_msta_River = os.path.join(workspace,"grd_msta_River")
# arcpy.BuildRasterAttributeTable_management(grd_msta_River)
# print("rivernet convertet to raster")
########################################################################
grd_msta_River = os.path.join(workspace,"grd_msta_River")
########################################################################
# extract flow acc by mask, glomma Wrong at extract
# CatFlowAcc = ExtractByMask(grd_hydflowacc, catchment)
# CatFlowAcc.save(os.path.join(workspace,"CatFlowAcc"))
# arcpy.BuildRasterAttributeTable_management(CatFlowAcc)
# print(" extract flow acc by catchment mask")
# CatFlowAcc = os.path.join(workspace,"CatFlowAcc")
# Rows = arcpy.SearchCursor(grd_msta_River)
# for row in Rows:
# countRiverHong = int(row.getValue("Count"))
# print("the number of river cells")
# print(countRiverHong)
# arr = arcpy.RasterToNumPyArray(CatFlowAcc)
# brr = numpy.sort(arr, axis=None, kind = "mergesort")
# brr[:] = brr[::-1]
# thValue = brr[countRiverHong-1]
# print(thValue)
# ConAcc = "Value < %s" % (thValue)
# outCon = SetNull(CatFlowAcc, 1, ConAcc)
# outCon.save(os.path.join(workspace,"outCon"))
# print("get river cells according to the flow acc")
#############################################################
# # extract flow dir to the river cells
# outCon = os.path.join(workspace,"outCon");
# CatFlowDir = Con(outCon, grd_hydflowdir)
# CatFlowDir.save(os.path.join(workspace,"CatFlowDir"))
# print(CatFlowDir)
#############################################################
flowlen_diff = Int(FlowLength(os.path.join(workspace,"CatFlowDir"), "DOWNSTREAM", ""))
arcpy.BuildRasterAttributeTable_management(flowlen_diff)
flowlen_diff.save("%s\\flowlen_diff" % (workspace))
flowlen_diff = os.path.join(workspace, "flowlen_diff")
# MINIMUM —Smallest value of all cells in the input raster.
# MAXIMUM —Largest value of all cells in the input raster.
# MEAN —Average of all cells in the input raster.
# STD —Standard deviation of all cells in the input raster.
ReStat = arcpy.GetRasterProperties_management(flowlen_diff, "MINIMUM")
MinValue = ReStat.getOutput(0)
if (MinValue < 1):
Log(LogFile, "min value is not 0, wrong; it is %s" %(MinValue))
print(MinValue)
return None
else:
resultatfil = open(utfil, 'a')
resultatfil.write("Statistics for River\n")
resultatfil.write('\n')
ReStat = arcpy.GetRasterProperties_management(flowlen_diff, "MAXIMUM")
MaxValue = ReStat.getOutput(0)
resultatfil.write('Max River, %s\n' %(MaxValue))
ReStat = arcpy.GetRasterProperties_management(flowlen_diff, "MEAN")
MeanValue = ReStat.getOutput(0)
resultatfil.write('Mean River, %s\n' %(MeanValue))
ReStat = arcpy.GetRasterProperties_management(flowlen_diff, "STD")
STDValue = ReStat.getOutput(0)
resultatfil.write('STD River, %s\n' %(STDValue))
resultatfil.write('\n')
resultatfil.close()
except:
ErrorHandling(LogFile)
sys.exit(9)
#***********************************************************************************
def Tilrettelegg_Data(stasjonsnr,DDDParameterList,DDD_Output_List,arbeidsomr,fc_mstaFelt,fc_rivernet,grd_hydflowdir,grd_hydflowacc,landuse,dtem25,resMal,test,LogFile):
try:
loggtekst = "\nMalestasjon: %s\n" % (stasjonsnr)
print("Malestasjon: %s" % (stasjonsnr))
resultatkatalog = os.path.join(arbeidsomr,"resultat")
if not os.path.exists(resultatkatalog):
os.makedirs(resultatkatalog)
ok_stnr = stasjonsnr.replace(".", "_")
resultatGdb = os.path.join(arbeidsomr, "%s_0.gdb" % (ok_stnr))
# Creates geodatabase per station
utfil = os.path.join(resultatkatalog, "%s_Stat.txt" %(ok_stnr))
# check resultfiles
resOk = Resultat_Eksisterer(utfil)
print("%s made? %s" %(utfil,resOk))
#--- if resultfile and result gbg exist.
if resOk:
print("%s already calculated" % (stasjonsnr))
loggtekst += "%s already calculated\n" % (stasjonsnr)
else:
# # # env.extent = "MAXOF"
# # Get the catchment for the station
# # # msta_query = "\"STASJON_NR\" = '%s'" % (stasjonsnr)
# # # catchment = "%s\\msta_felt" % (resultatGdb)
# # # if arcpy.Exists("lyr"):
# # # arcpy.Delete_management("lyr")
# # # arcpy.MakeFeatureLayer_management(fc_mstaFelt, "lyr")
# # # arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION", msta_query)
# # # if int(arcpy.GetCount_management("lyr").getOutput(0)) > 0:
# # # arcpy.CopyFeatures_management("lyr", catchment)
# # # print "Copied catchment polygon for %s" % (stasjonsnr)
# # --- get the river net inside the catchment
# # # fc_msta_River = os.path.join(resultatGdb,"msta_River")
# # # arcpy.Clip_analysis(fc_rivernet, catchment, fc_msta_River)
fc_msta_River = os.path.join(resultatGdb,"msta_River")
catchment = os.path.join(resultatGdb,"msta_felt")
para = "river"
if Resultat_Eksisterer(fc_msta_River):
AreaDistributionRivernet(resultatGdb,fc_msta_River,catchment,grd_hydflowdir,grd_hydflowacc,test,LogFile, utfil)
else:
Log(LogFile, "no results %s" % (resultatGdb))
except:
ErrorHandling(LogFile)
sys.exit(9)
# Hovedskript
#***********************************************************************************
def main():
#--- INPUT ARGUMENTER
#msta = sys.argv[1]
#msta = 101.1
#datotag = "20161115" # dette er bare en tag
arbeidsomr = "C:\\Temp\\FlowLength" # katalogen m� finnes
#check if stasjonsnr er format xx.x.0
#if msta.count(".") == 1:
# msta = "%s.0" %msta
#mstaliste = []
StaFile = open("%s\\%s" % (arbeidsomr,"miss_sta"))
#mstaliste.append(msta)
#--- DDDParameterList = ["River","SoilBogGlacier","Hypso_HBV", "DDDD"]
DDDParameterList = ["River"]
#DDD_Output_List = ["AreaDistribution","Statistics"]
DDD_Output_List = ["Statistics"]
#--- STANDARD INPUT
SDE_Innsyn = "Database Connections\\AD@innsynWGS_GISSQL01.sde"
Database = "innsyn_wgs"
fc_mstaFelt = "%s\\%s.VANN.Hydrologi_INNSYN\\%s.VANN.HYDRA_FeltTotalMstaF" %(SDE_Innsyn,Database,Database)
fc_rivernet = "%s\\%s.VANN.Elvenett_INNSYN\\%s.VANN.ElvenettL" %(SDE_Innsyn,Database,Database)
grd_hydflowdir = "%s\\%s.VANN.HYDFLOWDIR" %(SDE_Innsyn,Database)
grd_hydflowacc = "%s\\%s.VANN.HYDFLOWACC" %(SDE_Innsyn,Database)
SDE_Kart = "Database Connections\\AD@kart_GISSQL01.sde"
landuse = "%s\\kart.SK.Feltpar25_NSFR" %(SDE_Kart)
dtem25 = "%s\\kart.SK.DTEM25_SF" %(SDE_Kart)
LogFile = "%s\\%s.log" % (arbeidsomr,sys.argv[0])
scriptPath = "L:\\ArcGIS\\HydrologiskAnalyser\\Python\\script20100414\\"
resMal = scriptPath + "hyp_hbv_mal.dbf"
if not arcpy.Exists(resMal):
arcpy.Addmessage("Tabellmalen %s finnes ikke" %(resMal))
sys.exit(1)
env.workspace = arbeidsomr
test = 1
for iSta in range(1, 7):
sta = StaFile.readline()
print(sta)
stasjonsnr = sta.rstrip()
print("%s : %s" %(iSta,stasjonsnr))
Log(LogFile, "%s : %s" %(iSta,stasjonsnr))
Tilrettelegg_Data(stasjonsnr,DDDParameterList,DDD_Output_List,arbeidsomr,fc_mstaFelt,fc_rivernet,grd_hydflowdir,grd_hydflowacc,landuse,dtem25,resMal,test,LogFile)
Log(LogFile, "%s : %s ferdig" %(iSta,stasjonsnr))
print("Skriptet er ferdig")
#***********************************************************************************
# If script runs independently call main.
# If it is imported as a module in an another script: don't call main.
#***********************************************************************************
if __name__ == '__main__':
main()
#***********************************************************************************
|
honglioslo/NorwayIndexFlood
|
DDDparametre1HongFlowAccFlowDir.py
|
Python
|
gpl-3.0
| 12,505
|
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function, unicode_literals
from collections import deque
import gc
import gzip
import logging
import signal
import sys
import textwrap
import time
import warnings
import weakref
import zlib
import portage
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage.cache.mappings import slot_dict_class
from portage.elog.messages import eerror
from portage.localization import _
from portage.output import colorize, create_color_func, red
bad = create_color_func("BAD")
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ensure_dirs, writemsg, writemsg_level
from portage.util.SlotObject import SlotObject
from portage.util._async.SchedulerInterface import SchedulerInterface
from portage.util._eventloop.EventLoop import EventLoop
from portage.package.ebuild.digestcheck import digestcheck
from portage.package.ebuild.digestgen import digestgen
from portage.package.ebuild.doebuild import (_check_temp_dir,
_prepare_self_update)
from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
import _emerge
from _emerge.BinpkgFetcher import BinpkgFetcher
from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
from _emerge.BinpkgVerifier import BinpkgVerifier
from _emerge.Blocker import Blocker
from _emerge.BlockerDB import BlockerDB
from _emerge.clear_caches import clear_caches
from _emerge.create_depgraph_params import create_depgraph_params
from _emerge.create_world_atom import create_world_atom
from _emerge.DepPriority import DepPriority
from _emerge.depgraph import depgraph, resume_depgraph
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildFetcher import EbuildFetcher
from _emerge.EbuildPhase import EbuildPhase
from _emerge.emergelog import emergelog
from _emerge.FakeVartree import FakeVartree
from _emerge.getloadavg import getloadavg
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
from _emerge.JobStatusDisplay import JobStatusDisplay
from _emerge.MergeListItem import MergeListItem
from _emerge.Package import Package
from _emerge.PackageMerge import PackageMerge
from _emerge.PollScheduler import PollScheduler
from _emerge.SequentialTaskQueue import SequentialTaskQueue
if sys.hexversion >= 0x3000000:
basestring = str
class Scheduler(PollScheduler):
# max time between loadavg checks (milliseconds)
_loadavg_latency = 30000
# max time between display status updates (milliseconds)
_max_display_latency = 3000
_opts_ignore_blockers = \
frozenset(["--buildpkgonly",
"--fetchonly", "--fetch-all-uri",
"--nodeps", "--pretend"])
_opts_no_background = \
frozenset(["--pretend",
"--fetchonly", "--fetch-all-uri"])
_opts_no_self_update = frozenset(["--buildpkgonly",
"--fetchonly", "--fetch-all-uri", "--pretend"])
class _iface_class(SchedulerInterface):
__slots__ = ("fetch",
"scheduleSetup", "scheduleUnpack")
class _fetch_iface_class(SlotObject):
__slots__ = ("log_file", "schedule")
_task_queues_class = slot_dict_class(
("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
class _build_opts_class(SlotObject):
__slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
"fetch_all_uri", "fetchonly", "pretend")
class _binpkg_opts_class(SlotObject):
__slots__ = ("fetchonly", "getbinpkg", "pretend")
class _pkg_count_class(SlotObject):
__slots__ = ("curval", "maxval")
class _emerge_log_class(SlotObject):
__slots__ = ("xterm_titles",)
def log(self, *pargs, **kwargs):
if not self.xterm_titles:
# Avoid interference with the scheduler's status display.
kwargs.pop("short_msg", None)
emergelog(self.xterm_titles, *pargs, **kwargs)
class _failed_pkg(SlotObject):
__slots__ = ("build_dir", "build_log", "pkg", "returncode")
class _ConfigPool(object):
"""Interface for a task to temporarily allocate a config
instance from a pool. This allows a task to be constructed
long before the config instance actually becomes needed, like
when prefetchers are constructed for the whole merge list."""
__slots__ = ("_root", "_allocate", "_deallocate")
def __init__(self, root, allocate, deallocate):
self._root = root
self._allocate = allocate
self._deallocate = deallocate
def allocate(self):
return self._allocate(self._root)
def deallocate(self, settings):
self._deallocate(settings)
class _unknown_internal_error(portage.exception.PortageException):
"""
Used internally to terminate scheduling. The specific reason for
the failure should have been dumped to stderr.
"""
def __init__(self, value=""):
portage.exception.PortageException.__init__(self, value)
def __init__(self, settings, trees, mtimedb, myopts,
spinner, mergelist=None, favorites=None, graph_config=None):
PollScheduler.__init__(self, main=True)
if mergelist is not None:
warnings.warn("The mergelist parameter of the " + \
"_emerge.Scheduler constructor is now unused. Use " + \
"the graph_config parameter instead.",
DeprecationWarning, stacklevel=2)
self.settings = settings
self.target_root = settings["EROOT"]
self.trees = trees
self.myopts = myopts
self._spinner = spinner
self._mtimedb = mtimedb
self._favorites = favorites
self._args_set = InternalPackageSet(favorites, allow_repo=True)
self._build_opts = self._build_opts_class()
for k in self._build_opts.__slots__:
setattr(self._build_opts, k, myopts.get("--" + k.replace("_", "-")))
self._build_opts.buildpkg_exclude = InternalPackageSet( \
initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
allow_wildcard=True, allow_repo=True)
self._binpkg_opts = self._binpkg_opts_class()
for k in self._binpkg_opts.__slots__:
setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
self.curval = 0
self._logger = self._emerge_log_class()
self._task_queues = self._task_queues_class()
for k in self._task_queues.allowed_keys:
setattr(self._task_queues, k,
SequentialTaskQueue())
# Holds merges that will wait to be executed when no builds are
# executing. This is useful for system packages since dependencies
# on system packages are frequently unspecified. For example, see
# bug #256616.
self._merge_wait_queue = deque()
# Holds merges that have been transfered from the merge_wait_queue to
# the actual merge queue. They are removed from this list upon
# completion. Other packages can start building only when this list is
# empty.
self._merge_wait_scheduled = []
# Holds system packages and their deep runtime dependencies. Before
# being merged, these packages go to merge_wait_queue, to be merged
# when no other packages are building.
self._deep_system_deps = set()
# Holds packages to merge which will satisfy currently unsatisfied
# deep runtime dependencies of system packages. If this is not empty
# then no parallel builds will be spawned until it is empty. This
# minimizes the possibility that a build will fail due to the system
# being in a fragile state. For example, see bug #259954.
self._unsatisfied_system_deps = set()
self._status_display = JobStatusDisplay(
xterm_titles=('notitles' not in settings.features))
self._max_load = myopts.get("--load-average")
max_jobs = myopts.get("--jobs")
if max_jobs is None:
max_jobs = 1
self._set_max_jobs(max_jobs)
self._running_root = trees[trees._running_eroot]["root_config"]
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.pkgsettings = {}
self._config_pool = {}
for root in self.trees:
self._config_pool[root] = []
self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
'emerge-fetch.log')
fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
schedule=self._schedule_fetch)
self._sched_iface = self._iface_class(
self._event_loop,
is_background=self._is_background,
fetch=fetch_iface,
scheduleSetup=self._schedule_setup,
scheduleUnpack=self._schedule_unpack)
self._prefetchers = weakref.WeakValueDictionary()
self._pkg_queue = []
self._jobs = 0
self._running_tasks = {}
self._completed_tasks = set()
self._failed_pkgs = []
self._failed_pkgs_all = []
self._failed_pkgs_die_msgs = []
self._post_mod_echo_msgs = []
self._parallel_fetch = False
self._init_graph(graph_config)
merge_count = len([x for x in self._mergelist \
if isinstance(x, Package) and x.operation == "merge"])
self._pkg_count = self._pkg_count_class(
curval=0, maxval=merge_count)
self._status_display.maxval = self._pkg_count.maxval
# The load average takes some time to respond when new
# jobs are added, so we need to limit the rate of adding
# new jobs.
self._job_delay_max = 5
self._previous_job_start_time = None
# This is used to memoize the _choose_pkg() result when
# no packages can be chosen until one of the existing
# jobs completes.
self._choose_pkg_return_early = False
features = self.settings.features
if "parallel-fetch" in features and \
not ("--pretend" in self.myopts or \
"--fetch-all-uri" in self.myopts or \
"--fetchonly" in self.myopts):
if "distlocks" not in features:
portage.writemsg(red("!!!")+"\n", noiselevel=-1)
portage.writemsg(red("!!!")+" parallel-fetching " + \
"requires the distlocks feature enabled"+"\n",
noiselevel=-1)
portage.writemsg(red("!!!")+" you have it disabled, " + \
"thus parallel-fetching is being disabled"+"\n",
noiselevel=-1)
portage.writemsg(red("!!!")+"\n", noiselevel=-1)
elif merge_count > 1:
self._parallel_fetch = True
if self._parallel_fetch:
# clear out existing fetch log if it exists
try:
open(self._fetch_log, 'w').close()
except EnvironmentError:
pass
self._running_portage = None
portage_match = self._running_root.trees["vartree"].dbapi.match(
portage.const.PORTAGE_PACKAGE_ATOM)
if portage_match:
cpv = portage_match.pop()
self._running_portage = self._pkg(cpv, "installed",
self._running_root, installed=True)
def _handle_self_update(self):
if self._opts_no_self_update.intersection(self.myopts):
return os.EX_OK
for x in self._mergelist:
if not isinstance(x, Package):
continue
if x.operation != "merge":
continue
if x.root != self._running_root.root:
continue
if not portage.dep.match_from_list(
portage.const.PORTAGE_PACKAGE_ATOM, [x]):
continue
rval = _check_temp_dir(self.settings)
if rval != os.EX_OK:
return rval
_prepare_self_update(self.settings)
break
return os.EX_OK
def _terminate_tasks(self):
self._status_display.quiet = True
for task in list(self._running_tasks.values()):
task.cancel()
for q in self._task_queues.values():
q.clear()
def _init_graph(self, graph_config):
"""
Initialization structures used for dependency calculations
involving currently installed packages.
"""
self._set_graph_config(graph_config)
self._blocker_db = {}
dynamic_deps = self.myopts.get("--dynamic-deps", "y") != "n"
ignore_built_slot_operator_deps = self.myopts.get(
"--ignore-built-slot-operator-deps", "n") == "y"
for root in self.trees:
if graph_config is None:
fake_vartree = FakeVartree(self.trees[root]["root_config"],
pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,
ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
fake_vartree.sync()
else:
fake_vartree = graph_config.trees[root]['vartree']
self._blocker_db[root] = BlockerDB(fake_vartree)
def _destroy_graph(self):
"""
Use this to free memory at the beginning of _calc_resume_list().
After _calc_resume_list(), the _init_graph() method
must to be called in order to re-generate the structures that
this method destroys.
"""
self._blocker_db = None
self._set_graph_config(None)
gc.collect()
def _set_max_jobs(self, max_jobs):
self._max_jobs = max_jobs
self._task_queues.jobs.max_jobs = max_jobs
if "parallel-install" in self.settings.features:
self._task_queues.merge.max_jobs = max_jobs
def _background_mode(self):
"""
Check if background mode is enabled and adjust states as necessary.
@rtype: bool
@return: True if background mode is enabled, False otherwise.
"""
background = (self._max_jobs is True or \
self._max_jobs > 1 or "--quiet" in self.myopts \
or self.myopts.get("--quiet-build") == "y") and \
not bool(self._opts_no_background.intersection(self.myopts))
if background:
interactive_tasks = self._get_interactive_tasks()
if interactive_tasks:
background = False
writemsg_level(">>> Sending package output to stdio due " + \
"to interactive package(s):\n",
level=logging.INFO, noiselevel=-1)
msg = [""]
for pkg in interactive_tasks:
pkg_str = " " + colorize("INFORM", str(pkg.cpv))
if pkg.root_config.settings["ROOT"] != "/":
pkg_str += " for " + pkg.root
msg.append(pkg_str)
msg.append("")
writemsg_level("".join("%s\n" % (l,) for l in msg),
level=logging.INFO, noiselevel=-1)
if self._max_jobs is True or self._max_jobs > 1:
self._set_max_jobs(1)
writemsg_level(">>> Setting --jobs=1 due " + \
"to the above interactive package(s)\n",
level=logging.INFO, noiselevel=-1)
writemsg_level(">>> In order to temporarily mask " + \
"interactive updates, you may\n" + \
">>> specify --accept-properties=-interactive\n",
level=logging.INFO, noiselevel=-1)
self._status_display.quiet = \
not background or \
("--quiet" in self.myopts and \
"--verbose" not in self.myopts)
self._logger.xterm_titles = \
"notitles" not in self.settings.features and \
self._status_display.quiet
return background
def _get_interactive_tasks(self):
interactive_tasks = []
for task in self._mergelist:
if not (isinstance(task, Package) and \
task.operation == "merge"):
continue
if 'interactive' in task.properties:
interactive_tasks.append(task)
return interactive_tasks
def _set_graph_config(self, graph_config):
if graph_config is None:
self._graph_config = None
self._pkg_cache = {}
self._digraph = None
self._mergelist = []
self._deep_system_deps.clear()
return
self._graph_config = graph_config
self._pkg_cache = graph_config.pkg_cache
self._digraph = graph_config.graph
self._mergelist = graph_config.mergelist
if "--nodeps" in self.myopts or \
(self._max_jobs is not True and self._max_jobs < 2):
# save some memory
self._digraph = None
graph_config.graph = None
graph_config.pkg_cache.clear()
self._deep_system_deps.clear()
for pkg in self._mergelist:
self._pkg_cache[pkg] = pkg
return
self._find_system_deps()
self._prune_digraph()
self._prevent_builddir_collisions()
if '--debug' in self.myopts:
writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
self._digraph.debug_print()
writemsg("\n", noiselevel=-1)
def _find_system_deps(self):
"""
Find system packages and their deep runtime dependencies. Before being
merged, these packages go to merge_wait_queue, to be merged when no
other packages are building.
NOTE: This can only find deep system deps if the system set has been
added to the graph and traversed deeply (the depgraph "complete"
parameter will do this, triggered by emerge --complete-graph option).
"""
deep_system_deps = self._deep_system_deps
deep_system_deps.clear()
deep_system_deps.update(
_find_deep_system_runtime_deps(self._digraph))
deep_system_deps.difference_update([pkg for pkg in \
deep_system_deps if pkg.operation != "merge"])
def _prune_digraph(self):
"""
Prune any root nodes that are irrelevant.
"""
graph = self._digraph
completed_tasks = self._completed_tasks
removed_nodes = set()
while True:
for node in graph.root_nodes():
if not isinstance(node, Package) or \
(node.installed and node.operation == "nomerge") or \
node.onlydeps or \
node in completed_tasks:
removed_nodes.add(node)
if removed_nodes:
graph.difference_update(removed_nodes)
if not removed_nodes:
break
removed_nodes.clear()
def _prevent_builddir_collisions(self):
"""
When building stages, sometimes the same exact cpv needs to be merged
to both $ROOTs. Add edges to the digraph in order to avoid collisions
in the builddir. Currently, normal file locks would be inappropriate
for this purpose since emerge holds all of it's build dir locks from
the main process.
"""
cpv_map = {}
for pkg in self._mergelist:
if not isinstance(pkg, Package):
# a satisfied blocker
continue
if pkg.installed:
continue
if pkg.cpv not in cpv_map:
cpv_map[pkg.cpv] = [pkg]
continue
for earlier_pkg in cpv_map[pkg.cpv]:
self._digraph.add(earlier_pkg, pkg,
priority=DepPriority(buildtime=True))
cpv_map[pkg.cpv].append(pkg)
class _pkg_failure(portage.exception.PortageException):
"""
An instance of this class is raised by unmerge() when
an uninstallation fails.
"""
status = 1
def __init__(self, *pargs):
portage.exception.PortageException.__init__(self, pargs)
if pargs:
self.status = pargs[0]
def _schedule_fetch(self, fetcher):
"""
Schedule a fetcher, in order to control the number of concurrent
fetchers. If self._max_jobs is greater than 1 then the fetch
queue is bypassed and the fetcher is started immediately,
otherwise it is added to the front of the parallel-fetch queue.
NOTE: The parallel-fetch queue is currently used to serialize
access to the parallel-fetch log, so changes in the log handling
would be required before it would be possible to enable
concurrent fetching within the parallel-fetch queue.
"""
if self._max_jobs > 1:
fetcher.start()
else:
self._task_queues.fetch.addFront(fetcher)
def _schedule_setup(self, setup_phase):
"""
Schedule a setup phase on the merge queue, in order to
serialize unsandboxed access to the live filesystem.
"""
if self._task_queues.merge.max_jobs > 1 and \
"ebuild-locks" in self.settings.features:
# Use a separate queue for ebuild-locks when the merge
# queue allows more than 1 job (due to parallel-install),
# since the portage.locks module does not behave as desired
# if we try to lock the same file multiple times
# concurrently from the same process.
self._task_queues.ebuild_locks.add(setup_phase)
else:
self._task_queues.merge.add(setup_phase)
self._schedule()
def _schedule_unpack(self, unpack_phase):
"""
Schedule an unpack phase on the unpack queue, in order
to serialize $DISTDIR access for live ebuilds.
"""
self._task_queues.unpack.add(unpack_phase)
def _find_blockers(self, new_pkg):
"""
Returns a callable.
"""
def get_blockers():
return self._find_blockers_impl(new_pkg)
return get_blockers
def _find_blockers_impl(self, new_pkg):
if self._opts_ignore_blockers.intersection(self.myopts):
return None
blocker_db = self._blocker_db[new_pkg.root]
blocker_dblinks = []
for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
if new_pkg.slot_atom == blocking_pkg.slot_atom:
continue
if new_pkg.cpv == blocking_pkg.cpv:
continue
blocker_dblinks.append(portage.dblink(
blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
self.pkgsettings[blocking_pkg.root], treetype="vartree",
vartree=self.trees[blocking_pkg.root]["vartree"]))
return blocker_dblinks
def _generate_digests(self):
"""
Generate digests if necessary for --digests or FEATURES=digest.
In order to avoid interference, this must done before parallel
tasks are started.
"""
if '--fetchonly' in self.myopts:
return os.EX_OK
digest = '--digest' in self.myopts
if not digest:
for pkgsettings in self.pkgsettings.values():
if pkgsettings.mycpv is not None:
# ensure that we are using global features
# settings rather than those from package.env
pkgsettings.reset()
if 'digest' in pkgsettings.features:
digest = True
break
if not digest:
return os.EX_OK
for x in self._mergelist:
if not isinstance(x, Package) or \
x.type_name != 'ebuild' or \
x.operation != 'merge':
continue
pkgsettings = self.pkgsettings[x.root]
if pkgsettings.mycpv is not None:
# ensure that we are using global features
# settings rather than those from package.env
pkgsettings.reset()
if '--digest' not in self.myopts and \
'digest' not in pkgsettings.features:
continue
portdb = x.root_config.trees['porttree'].dbapi
ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % x.cpv)
pkgsettings['O'] = os.path.dirname(ebuild_path)
if not digestgen(mysettings=pkgsettings, myportdb=portdb):
writemsg_level(
"!!! Unable to generate manifest for '%s'.\n" \
% x.cpv, level=logging.ERROR, noiselevel=-1)
return 1
return os.EX_OK
def _env_sanity_check(self):
"""
Verify a sane environment before trying to build anything from source.
"""
have_src_pkg = False
for x in self._mergelist:
if isinstance(x, Package) and not x.built:
have_src_pkg = True
break
if not have_src_pkg:
return os.EX_OK
for settings in self.pkgsettings.values():
for var in ("ARCH", ):
value = settings.get(var)
if value and value.strip():
continue
msg = _("%(var)s is not set... "
"Are you missing the '%(configroot)s%(profile_path)s' symlink? "
"Is the symlink correct? "
"Is your portage tree complete?") % \
{"var": var, "configroot": settings["PORTAGE_CONFIGROOT"],
"profile_path": portage.const.PROFILE_PATH}
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 70):
out.eerror(line)
return 1
return os.EX_OK
def _check_manifests(self):
# Verify all the manifests now so that the user is notified of failure
# as soon as possible.
if "strict" not in self.settings.features or \
"--fetchonly" in self.myopts or \
"--fetch-all-uri" in self.myopts:
return os.EX_OK
shown_verifying_msg = False
quiet_settings = {}
for myroot, pkgsettings in self.pkgsettings.items():
quiet_config = portage.config(clone=pkgsettings)
quiet_config["PORTAGE_QUIET"] = "1"
quiet_config.backup_changes("PORTAGE_QUIET")
quiet_settings[myroot] = quiet_config
del quiet_config
failures = 0
for x in self._mergelist:
if not isinstance(x, Package) or \
x.type_name != "ebuild":
continue
if x.operation == "uninstall":
continue
if not shown_verifying_msg:
shown_verifying_msg = True
self._status_msg("Verifying ebuild manifests")
root_config = x.root_config
portdb = root_config.trees["porttree"].dbapi
quiet_config = quiet_settings[root_config.root]
ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % x.cpv)
quiet_config["O"] = os.path.dirname(ebuild_path)
if not digestcheck([], quiet_config, strict=True):
failures |= 1
if failures:
return 1
return os.EX_OK
def _add_prefetchers(self):
if not self._parallel_fetch:
return
if self._parallel_fetch:
prefetchers = self._prefetchers
for pkg in self._mergelist:
# mergelist can contain solved Blocker instances
if not isinstance(pkg, Package) or pkg.operation == "uninstall":
continue
prefetcher = self._create_prefetcher(pkg)
if prefetcher is not None:
# This will start the first prefetcher immediately, so that
# self._task() won't discard it. This avoids a case where
# the first prefetcher is discarded, causing the second
# prefetcher to occupy the fetch queue before the first
# fetcher has an opportunity to execute.
prefetchers[pkg] = prefetcher
self._task_queues.fetch.add(prefetcher)
def _create_prefetcher(self, pkg):
"""
@return: a prefetcher, or None if not applicable
"""
prefetcher = None
if not isinstance(pkg, Package):
pass
elif pkg.type_name == "ebuild":
prefetcher = EbuildFetcher(background=True,
config_pool=self._ConfigPool(pkg.root,
self._allocate_config, self._deallocate_config),
fetchonly=1, logfile=self._fetch_log,
pkg=pkg, prefetch=True, scheduler=self._sched_iface)
elif pkg.type_name == "binary" and \
"--getbinpkg" in self.myopts and \
pkg.root_config.trees["bintree"].isremote(pkg.cpv):
prefetcher = BinpkgPrefetcher(background=True,
pkg=pkg, scheduler=self._sched_iface)
return prefetcher
def _run_pkg_pretend(self):
"""
Since pkg_pretend output may be important, this method sends all
output directly to stdout (regardless of options like --quiet or
--jobs).
"""
failures = 0
# Use a local EventLoop instance here, since we don't
# want tasks here to trigger the usual Scheduler callbacks
# that handle job scheduling and status display.
sched_iface = SchedulerInterface(EventLoop(main=False))
for x in self._mergelist:
if not isinstance(x, Package):
continue
if x.operation == "uninstall":
continue
if x.eapi in ("0", "1", "2", "3"):
continue
if "pretend" not in x.defined_phases:
continue
out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
portage.util.writemsg_stdout(out_str, noiselevel=-1)
root_config = x.root_config
settings = self.pkgsettings[root_config.root]
settings.setcpv(x)
# setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
# have to validate it for each package
rval = _check_temp_dir(settings)
if rval != os.EX_OK:
return rval
build_dir_path = os.path.join(
os.path.realpath(settings["PORTAGE_TMPDIR"]),
"portage", x.category, x.pf)
existing_builddir = os.path.isdir(build_dir_path)
settings["PORTAGE_BUILDDIR"] = build_dir_path
build_dir = EbuildBuildDir(scheduler=sched_iface,
settings=settings)
build_dir.lock()
current_task = None
try:
# Clean up the existing build dir, in case pkg_pretend
# checks for available space (bug #390711).
if existing_builddir:
if x.built:
tree = "bintree"
infloc = os.path.join(build_dir_path, "build-info")
ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
else:
tree = "porttree"
portdb = root_config.trees["porttree"].dbapi
ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
if ebuild_path is None:
raise AssertionError(
"ebuild not found for '%s'" % x.cpv)
portage.package.ebuild.doebuild.doebuild_environment(
ebuild_path, "clean", settings=settings,
db=self.trees[settings['EROOT']][tree].dbapi)
clean_phase = EbuildPhase(background=False,
phase='clean', scheduler=sched_iface, settings=settings)
current_task = clean_phase
clean_phase.start()
clean_phase.wait()
if x.built:
tree = "bintree"
bintree = root_config.trees["bintree"].dbapi.bintree
fetched = False
# Display fetch on stdout, so that it's always clear what
# is consuming time here.
if bintree.isremote(x.cpv):
fetcher = BinpkgFetcher(pkg=x,
scheduler=sched_iface)
fetcher.start()
if fetcher.wait() != os.EX_OK:
failures += 1
continue
fetched = fetcher.pkg_path
verifier = BinpkgVerifier(pkg=x,
scheduler=sched_iface)
current_task = verifier
verifier.start()
if verifier.wait() != os.EX_OK:
failures += 1
continue
if fetched:
bintree.inject(x.cpv, filename=fetched)
tbz2_file = bintree.getname(x.cpv)
infloc = os.path.join(build_dir_path, "build-info")
ensure_dirs(infloc)
portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
else:
tree = "porttree"
portdb = root_config.trees["porttree"].dbapi
ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % x.cpv)
settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
if self._build_opts.buildpkgonly:
settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
else:
settings.configdict["pkg"]["MERGE_TYPE"] = "source"
portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
"pretend", settings=settings,
db=self.trees[settings['EROOT']][tree].dbapi)
prepare_build_dirs(root_config.root, settings, cleanup=0)
vardb = root_config.trees['vartree'].dbapi
settings["REPLACING_VERSIONS"] = " ".join(
set(portage.versions.cpv_getversion(match) \
for match in vardb.match(x.slot_atom) + \
vardb.match('='+x.cpv)))
pretend_phase = EbuildPhase(
phase="pretend", scheduler=sched_iface,
settings=settings)
current_task = pretend_phase
pretend_phase.start()
ret = pretend_phase.wait()
if ret != os.EX_OK:
failures += 1
portage.elog.elog_process(x.cpv, settings)
finally:
if current_task is not None:
if current_task.isAlive():
current_task.cancel()
current_task.wait()
if current_task.returncode == os.EX_OK:
clean_phase = EbuildPhase(background=False,
phase='clean', scheduler=sched_iface,
settings=settings)
clean_phase.start()
clean_phase.wait()
build_dir.unlock()
if failures:
return 1
return os.EX_OK
def merge(self):
if "--resume" in self.myopts:
# We're resuming.
portage.writemsg_stdout(
colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
self._logger.log(" *** Resuming merge...")
self._save_resume_list()
try:
self._background = self._background_mode()
except self._unknown_internal_error:
return 1
rval = self._handle_self_update()
if rval != os.EX_OK:
return rval
for root in self.trees:
root_config = self.trees[root]["root_config"]
# Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
# since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
# for ensuring sane $PWD (bug #239560) and storing elog messages.
tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
if not tmpdir or not os.path.isdir(tmpdir):
msg = "The directory specified in your " + \
"PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
"does not exist. Please create this " + \
"directory or correct your PORTAGE_TMPDIR setting."
msg = textwrap.wrap(msg, 70)
out = portage.output.EOutput()
for l in msg:
out.eerror(l)
return 1
if self._background:
root_config.settings.unlock()
root_config.settings["PORTAGE_BACKGROUND"] = "1"
root_config.settings.backup_changes("PORTAGE_BACKGROUND")
root_config.settings.lock()
self.pkgsettings[root] = portage.config(
clone=root_config.settings)
keep_going = "--keep-going" in self.myopts
fetchonly = self._build_opts.fetchonly
mtimedb = self._mtimedb
failed_pkgs = self._failed_pkgs
rval = self._generate_digests()
if rval != os.EX_OK:
return rval
rval = self._env_sanity_check()
if rval != os.EX_OK:
return rval
# TODO: Immediately recalculate deps here if --keep-going
# is enabled and corrupt manifests are detected.
rval = self._check_manifests()
if rval != os.EX_OK and not keep_going:
return rval
if not fetchonly:
rval = self._run_pkg_pretend()
if rval != os.EX_OK:
return rval
while True:
received_signal = []
def sighandler(signum, frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
{"signal":signum})
self.terminate()
received_signal.append(128 + signum)
earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
try:
rval = self._merge()
finally:
# Restore previous handlers
if earlier_sigint_handler is not None:
signal.signal(signal.SIGINT, earlier_sigint_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if earlier_sigterm_handler is not None:
signal.signal(signal.SIGTERM, earlier_sigterm_handler)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
if received_signal:
sys.exit(received_signal[0])
if rval == os.EX_OK or fetchonly or not keep_going:
break
if "resume" not in mtimedb:
break
mergelist = self._mtimedb["resume"].get("mergelist")
if not mergelist:
break
if not failed_pkgs:
break
for failed_pkg in failed_pkgs:
mergelist.remove(list(failed_pkg.pkg))
self._failed_pkgs_all.extend(failed_pkgs)
del failed_pkgs[:]
if not mergelist:
break
if not self._calc_resume_list():
break
clear_caches(self.trees)
if not self._mergelist:
break
self._save_resume_list()
self._pkg_count.curval = 0
self._pkg_count.maxval = len([x for x in self._mergelist \
if isinstance(x, Package) and x.operation == "merge"])
self._status_display.maxval = self._pkg_count.maxval
self._logger.log(" *** Finished. Cleaning up...")
if failed_pkgs:
self._failed_pkgs_all.extend(failed_pkgs)
del failed_pkgs[:]
printer = portage.output.EOutput()
background = self._background
failure_log_shown = False
if background and len(self._failed_pkgs_all) == 1 and \
self.myopts.get('--quiet-fail', 'n') != 'y':
# If only one package failed then just show it's
# whole log for easy viewing.
failed_pkg = self._failed_pkgs_all[-1]
log_file = None
log_file_real = None
log_path = self._locate_failure_log(failed_pkg)
if log_path is not None:
try:
log_file = open(_unicode_encode(log_path,
encoding=_encodings['fs'], errors='strict'), mode='rb')
except IOError:
pass
else:
if log_path.endswith('.gz'):
log_file_real = log_file
log_file = gzip.GzipFile(filename='',
mode='rb', fileobj=log_file)
if log_file is not None:
try:
for line in log_file:
writemsg_level(line, noiselevel=-1)
except zlib.error as e:
writemsg_level("%s\n" % (e,), level=logging.ERROR,
noiselevel=-1)
finally:
log_file.close()
if log_file_real is not None:
log_file_real.close()
failure_log_shown = True
# Dump mod_echo output now since it tends to flood the terminal.
# This allows us to avoid having more important output, generated
# later, from being swept away by the mod_echo output.
mod_echo_output = _flush_elog_mod_echo()
if background and not failure_log_shown and \
self._failed_pkgs_all and \
self._failed_pkgs_die_msgs and \
not mod_echo_output:
for mysettings, key, logentries in self._failed_pkgs_die_msgs:
root_msg = ""
if mysettings["ROOT"] != "/":
root_msg = " merged to %s" % mysettings["ROOT"]
print()
printer.einfo("Error messages for package %s%s:" % \
(colorize("INFORM", key), root_msg))
print()
for phase in portage.const.EBUILD_PHASES:
if phase not in logentries:
continue
for msgtype, msgcontent in logentries[phase]:
if isinstance(msgcontent, basestring):
msgcontent = [msgcontent]
for line in msgcontent:
printer.eerror(line.strip("\n"))
if self._post_mod_echo_msgs:
for msg in self._post_mod_echo_msgs:
msg()
if len(self._failed_pkgs_all) > 1 or \
(self._failed_pkgs_all and keep_going):
if len(self._failed_pkgs_all) > 1:
msg = "The following %d packages have " % \
len(self._failed_pkgs_all) + \
"failed to build or install:"
else:
msg = "The following package has " + \
"failed to build or install:"
printer.eerror("")
for line in textwrap.wrap(msg, 72):
printer.eerror(line)
printer.eerror("")
for failed_pkg in self._failed_pkgs_all:
# Use unicode_literals to force unicode format string so
# that Package.__unicode__() is called in python2.
msg = " %s" % (failed_pkg.pkg,)
log_path = self._locate_failure_log(failed_pkg)
if log_path is not None:
msg += ", Log file:"
printer.eerror(msg)
if log_path is not None:
printer.eerror(" '%s'" % colorize('INFORM', log_path))
printer.eerror("")
if self._failed_pkgs_all:
return 1
return os.EX_OK
def _elog_listener(self, mysettings, key, logentries, fulltext):
errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
if errors:
self._failed_pkgs_die_msgs.append(
(mysettings, key, errors))
def _locate_failure_log(self, failed_pkg):
log_paths = [failed_pkg.build_log]
for log_path in log_paths:
if not log_path:
continue
try:
log_size = os.stat(log_path).st_size
except OSError:
continue
if log_size == 0:
continue
return log_path
return None
def _add_packages(self):
pkg_queue = self._pkg_queue
for pkg in self._mergelist:
if isinstance(pkg, Package):
pkg_queue.append(pkg)
elif isinstance(pkg, Blocker):
pass
def _system_merge_started(self, merge):
"""
Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
In general, this keeps track of installed system packages with
unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
a fragile situation, so we don't execute any unrelated builds until
the circular dependencies are built and installed.
"""
graph = self._digraph
if graph is None:
return
pkg = merge.merge.pkg
# Skip this if $ROOT != / since it shouldn't matter if there
# are unsatisfied system runtime deps in this case.
if pkg.root_config.settings["ROOT"] != "/":
return
completed_tasks = self._completed_tasks
unsatisfied = self._unsatisfied_system_deps
def ignore_non_runtime_or_satisfied(priority):
"""
Ignore non-runtime and satisfied runtime priorities.
"""
if isinstance(priority, DepPriority) and \
not priority.satisfied and \
(priority.runtime or priority.runtime_post):
return False
return True
# When checking for unsatisfied runtime deps, only check
# direct deps since indirect deps are checked when the
# corresponding parent is merged.
for child in graph.child_nodes(pkg,
ignore_priority=ignore_non_runtime_or_satisfied):
if not isinstance(child, Package) or \
child.operation == 'uninstall':
continue
if child is pkg:
continue
if child.operation == 'merge' and \
child not in completed_tasks:
unsatisfied.add(child)
def _merge_wait_exit_handler(self, task):
self._merge_wait_scheduled.remove(task)
self._merge_exit(task)
def _merge_exit(self, merge):
self._running_tasks.pop(id(merge), None)
self._do_merge_exit(merge)
self._deallocate_config(merge.merge.settings)
if merge.returncode == os.EX_OK and \
not merge.merge.pkg.installed:
self._status_display.curval += 1
self._status_display.merges = len(self._task_queues.merge)
self._schedule()
def _do_merge_exit(self, merge):
pkg = merge.merge.pkg
if merge.returncode != os.EX_OK:
settings = merge.merge.settings
build_dir = settings.get("PORTAGE_BUILDDIR")
build_log = settings.get("PORTAGE_LOG_FILE")
self._failed_pkgs.append(self._failed_pkg(
build_dir=build_dir, build_log=build_log,
pkg=pkg,
returncode=merge.returncode))
if not self._terminated_tasks:
self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
self._status_display.failed = len(self._failed_pkgs)
return
self._task_complete(pkg)
pkg_to_replace = merge.merge.pkg_to_replace
if pkg_to_replace is not None:
# When a package is replaced, mark it's uninstall
# task complete (if any).
if self._digraph is not None and \
pkg_to_replace in self._digraph:
try:
self._pkg_queue.remove(pkg_to_replace)
except ValueError:
pass
self._task_complete(pkg_to_replace)
else:
self._pkg_cache.pop(pkg_to_replace, None)
if pkg.installed:
return
# Call mtimedb.commit() after each merge so that
# --resume still works after being interrupted
# by reboot, sigkill or similar.
mtimedb = self._mtimedb
mtimedb["resume"]["mergelist"].remove(list(pkg))
if not mtimedb["resume"]["mergelist"]:
del mtimedb["resume"]
mtimedb.commit()
def _build_exit(self, build):
self._running_tasks.pop(id(build), None)
if build.returncode == os.EX_OK and self._terminated_tasks:
# We've been interrupted, so we won't
# add this to the merge queue.
self.curval += 1
self._deallocate_config(build.settings)
elif build.returncode == os.EX_OK:
self.curval += 1
merge = PackageMerge(merge=build)
self._running_tasks[id(merge)] = merge
if not build.build_opts.buildpkgonly and \
build.pkg in self._deep_system_deps:
# Since dependencies on system packages are frequently
# unspecified, merge them only when no builds are executing.
self._merge_wait_queue.append(merge)
merge.addStartListener(self._system_merge_started)
else:
merge.addExitListener(self._merge_exit)
self._task_queues.merge.add(merge)
self._status_display.merges = len(self._task_queues.merge)
else:
settings = build.settings
build_dir = settings.get("PORTAGE_BUILDDIR")
build_log = settings.get("PORTAGE_LOG_FILE")
self._failed_pkgs.append(self._failed_pkg(
build_dir=build_dir, build_log=build_log,
pkg=build.pkg,
returncode=build.returncode))
if not self._terminated_tasks:
self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
self._status_display.failed = len(self._failed_pkgs)
self._deallocate_config(build.settings)
self._jobs -= 1
self._status_display.running = self._jobs
self._schedule()
def _extract_exit(self, build):
self._build_exit(build)
def _task_complete(self, pkg):
self._completed_tasks.add(pkg)
self._unsatisfied_system_deps.discard(pkg)
self._choose_pkg_return_early = False
blocker_db = self._blocker_db[pkg.root]
blocker_db.discardBlocker(pkg)
def _main_loop(self):
term_check_id = self._event_loop.idle_add(self._termination_check)
loadavg_check_id = None
if self._max_load is not None and \
self._loadavg_latency is not None and \
(self._max_jobs is True or self._max_jobs > 1):
# We have to schedule periodically, in case the load
# average has changed since the last call.
loadavg_check_id = self._event_loop.timeout_add(
self._loadavg_latency, self._schedule)
try:
# Populate initial event sources. Unless we're scheduling
# based on load average, we only need to do this once
# here, since it can be called during the loop from within
# event handlers.
self._schedule()
# Loop while there are jobs to be scheduled.
while self._keep_scheduling():
self._event_loop.iteration()
# Clean shutdown of previously scheduled jobs. In the
# case of termination, this allows for basic cleanup
# such as flushing of buffered output to logs.
while self._is_work_scheduled():
self._event_loop.iteration()
finally:
self._event_loop.source_remove(term_check_id)
if loadavg_check_id is not None:
self._event_loop.source_remove(loadavg_check_id)
def _merge(self):
if self._opts_no_background.intersection(self.myopts):
self._set_max_jobs(1)
self._add_prefetchers()
self._add_packages()
failed_pkgs = self._failed_pkgs
portage.locks._quiet = self._background
portage.elog.add_listener(self._elog_listener)
display_timeout_id = None
if self._status_display._isatty and not self._status_display.quiet:
display_timeout_id = self._event_loop.timeout_add(
self._max_display_latency, self._status_display.display)
rval = os.EX_OK
try:
self._main_loop()
finally:
self._main_loop_cleanup()
portage.locks._quiet = False
portage.elog.remove_listener(self._elog_listener)
if display_timeout_id is not None:
self._event_loop.source_remove(display_timeout_id)
if failed_pkgs:
rval = failed_pkgs[-1].returncode
return rval
def _main_loop_cleanup(self):
del self._pkg_queue[:]
self._completed_tasks.clear()
self._deep_system_deps.clear()
self._unsatisfied_system_deps.clear()
self._choose_pkg_return_early = False
self._status_display.reset()
self._digraph = None
self._task_queues.fetch.clear()
self._prefetchers.clear()
def _choose_pkg(self):
"""
Choose a task that has all its dependencies satisfied. This is used
for parallel build scheduling, and ensures that we don't build
anything with deep dependencies that have yet to be merged.
"""
if self._choose_pkg_return_early:
return None
if self._digraph is None:
if self._is_work_scheduled() and \
not ("--nodeps" in self.myopts and \
(self._max_jobs is True or self._max_jobs > 1)):
self._choose_pkg_return_early = True
return None
return self._pkg_queue.pop(0)
if not self._is_work_scheduled():
return self._pkg_queue.pop(0)
self._prune_digraph()
chosen_pkg = None
# Prefer uninstall operations when available.
graph = self._digraph
for pkg in self._pkg_queue:
if pkg.operation == 'uninstall' and \
not graph.child_nodes(pkg):
chosen_pkg = pkg
break
if chosen_pkg is None:
later = set(self._pkg_queue)
for pkg in self._pkg_queue:
later.remove(pkg)
if not self._dependent_on_scheduled_merges(pkg, later):
chosen_pkg = pkg
break
if chosen_pkg is not None:
self._pkg_queue.remove(chosen_pkg)
if chosen_pkg is None:
# There's no point in searching for a package to
# choose until at least one of the existing jobs
# completes.
self._choose_pkg_return_early = True
return chosen_pkg
def _dependent_on_scheduled_merges(self, pkg, later):
"""
Traverse the subgraph of the given packages deep dependencies
to see if it contains any scheduled merges.
@param pkg: a package to check dependencies for
@type pkg: Package
@param later: packages for which dependence should be ignored
since they will be merged later than pkg anyway and therefore
delaying the merge of pkg will not result in a more optimal
merge order
@type later: set
@rtype: bool
@return: True if the package is dependent, False otherwise.
"""
graph = self._digraph
completed_tasks = self._completed_tasks
dependent = False
traversed_nodes = set([pkg])
direct_deps = graph.child_nodes(pkg)
node_stack = direct_deps
direct_deps = frozenset(direct_deps)
while node_stack:
node = node_stack.pop()
if node in traversed_nodes:
continue
traversed_nodes.add(node)
if not ((node.installed and node.operation == "nomerge") or \
(node.operation == "uninstall" and \
node not in direct_deps) or \
node in completed_tasks or \
node in later):
dependent = True
break
# Don't traverse children of uninstall nodes since
# those aren't dependencies in the usual sense.
if node.operation != "uninstall":
node_stack.extend(graph.child_nodes(node))
return dependent
def _allocate_config(self, root):
"""
Allocate a unique config instance for a task in order
to prevent interference between parallel tasks.
"""
if self._config_pool[root]:
temp_settings = self._config_pool[root].pop()
else:
temp_settings = portage.config(clone=self.pkgsettings[root])
# Since config.setcpv() isn't guaranteed to call config.reset() due to
# performance reasons, call it here to make sure all settings from the
# previous package get flushed out (such as PORTAGE_LOG_FILE).
temp_settings.reload()
temp_settings.reset()
return temp_settings
def _deallocate_config(self, settings):
self._config_pool[settings['EROOT']].append(settings)
def _keep_scheduling(self):
return bool(not self._terminated.is_set() and self._pkg_queue and \
not (self._failed_pkgs and not self._build_opts.fetchonly))
def _is_work_scheduled(self):
return bool(self._running_tasks)
def _running_job_count(self):
return self._jobs
def _schedule_tasks(self):
while True:
state_change = 0
# When the number of jobs and merges drops to zero,
# process a single merge from _merge_wait_queue if
# it's not empty. We only process one since these are
# special packages and we want to ensure that
# parallel-install does not cause more than one of
# them to install at the same time.
if (self._merge_wait_queue and not self._jobs and
not self._task_queues.merge):
task = self._merge_wait_queue.popleft()
task.addExitListener(self._merge_wait_exit_handler)
self._merge_wait_scheduled.append(task)
self._task_queues.merge.add(task)
self._status_display.merges = len(self._task_queues.merge)
state_change += 1
if self._schedule_tasks_imp():
state_change += 1
self._status_display.display()
# Cancel prefetchers if they're the only reason
# the main poll loop is still running.
if self._failed_pkgs and not self._build_opts.fetchonly and \
not self._is_work_scheduled() and \
self._task_queues.fetch:
# Since this happens asynchronously, it doesn't count in
# state_change (counting it triggers an infinite loop).
self._task_queues.fetch.clear()
if not (state_change or \
(self._merge_wait_queue and not self._jobs and
not self._task_queues.merge)):
break
def _job_delay(self):
"""
@rtype: bool
@return: True if job scheduling should be delayed, False otherwise.
"""
if self._jobs and self._max_load is not None:
current_time = time.time()
try:
avg1, avg5, avg15 = getloadavg()
except OSError:
return False
delay = self._job_delay_max * avg1 / self._max_load
if delay > self._job_delay_max:
delay = self._job_delay_max
elapsed_seconds = current_time - self._previous_job_start_time
# elapsed_seconds < 0 means the system clock has been adjusted
if elapsed_seconds > 0 and elapsed_seconds < delay:
self._event_loop.timeout_add(
1000 * (delay - elapsed_seconds), self._schedule_once)
return True
return False
def _schedule_once(self):
self._schedule()
return False
def _schedule_tasks_imp(self):
"""
@rtype: bool
@return: True if state changed, False otherwise.
"""
state_change = 0
while True:
if not self._keep_scheduling():
return bool(state_change)
if self._choose_pkg_return_early or \
self._merge_wait_scheduled or \
(self._jobs and self._unsatisfied_system_deps) or \
not self._can_add_job() or \
self._job_delay():
return bool(state_change)
pkg = self._choose_pkg()
if pkg is None:
return bool(state_change)
state_change += 1
if not pkg.installed:
self._pkg_count.curval += 1
task = self._task(pkg)
if pkg.installed:
merge = PackageMerge(merge=task)
self._running_tasks[id(merge)] = merge
merge.addExitListener(self._merge_exit)
self._task_queues.merge.addFront(merge)
elif pkg.built:
self._jobs += 1
self._previous_job_start_time = time.time()
self._status_display.running = self._jobs
self._running_tasks[id(task)] = task
task.addExitListener(self._extract_exit)
self._task_queues.jobs.add(task)
else:
self._jobs += 1
self._previous_job_start_time = time.time()
self._status_display.running = self._jobs
self._running_tasks[id(task)] = task
task.addExitListener(self._build_exit)
self._task_queues.jobs.add(task)
return bool(state_change)
def _task(self, pkg):
pkg_to_replace = None
if pkg.operation != "uninstall":
vardb = pkg.root_config.trees["vartree"].dbapi
previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
if portage.cpv_getkey(x) == pkg.cp]
if not previous_cpv and vardb.cpv_exists(pkg.cpv):
# same cpv, different SLOT
previous_cpv = [pkg.cpv]
if previous_cpv:
previous_cpv = previous_cpv.pop()
pkg_to_replace = self._pkg(previous_cpv,
"installed", pkg.root_config, installed=True,
operation="uninstall")
try:
prefetcher = self._prefetchers.pop(pkg, None)
except KeyError:
# KeyError observed with PyPy 1.8, despite None given as default.
# Note that PyPy 1.8 has the same WeakValueDictionary code as
# CPython 2.7, so it may be possible for CPython to raise KeyError
# here as well.
prefetcher = None
if prefetcher is not None and not prefetcher.isAlive():
try:
self._task_queues.fetch._task_queue.remove(prefetcher)
except ValueError:
pass
prefetcher = None
task = MergeListItem(args_set=self._args_set,
background=self._background, binpkg_opts=self._binpkg_opts,
build_opts=self._build_opts,
config_pool=self._ConfigPool(pkg.root,
self._allocate_config, self._deallocate_config),
emerge_opts=self.myopts,
find_blockers=self._find_blockers(pkg), logger=self._logger,
mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
pkg_to_replace=pkg_to_replace,
prefetcher=prefetcher,
scheduler=self._sched_iface,
settings=self._allocate_config(pkg.root),
statusMessage=self._status_msg,
world_atom=self._world_atom)
return task
def _failed_pkg_msg(self, failed_pkg, action, preposition):
pkg = failed_pkg.pkg
msg = "%s to %s %s" % \
(bad("Failed"), action, colorize("INFORM", pkg.cpv))
if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
log_path = self._locate_failure_log(failed_pkg)
if log_path is not None:
msg += ", Log file:"
self._status_msg(msg)
if log_path is not None:
self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
def _status_msg(self, msg):
"""
Display a brief status message (no newlines) in the status display.
This is called by tasks to provide feedback to the user. This
delegates the resposibility of generating \r and \n control characters,
to guarantee that lines are created or erased when necessary and
appropriate.
@type msg: str
@param msg: a brief status message (no newlines allowed)
"""
if not self._background:
writemsg_level("\n")
self._status_display.displayMessage(msg)
def _save_resume_list(self):
"""
Do this before verifying the ebuild Manifests since it might
be possible for the user to use --resume --skipfirst get past
a non-essential package with a broken digest.
"""
mtimedb = self._mtimedb
mtimedb["resume"] = {}
# Stored as a dict starting with portage-2.1.6_rc1, and supported
# by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
# a list type for options.
mtimedb["resume"]["myopts"] = self.myopts.copy()
# Convert Atom instances to plain str.
mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
mtimedb["resume"]["mergelist"] = [list(x) \
for x in self._mergelist \
if isinstance(x, Package) and x.operation == "merge"]
mtimedb.commit()
def _calc_resume_list(self):
"""
Use the current resume list to calculate a new one,
dropping any packages with unsatisfied deps.
@rtype: bool
@return: True if successful, False otherwise.
"""
print(colorize("GOOD", "*** Resuming merge..."))
# free some memory before creating
# the resume depgraph
self._destroy_graph()
myparams = create_depgraph_params(self.myopts, None)
success = False
e = None
try:
success, mydepgraph, dropped_tasks = resume_depgraph(
self.settings, self.trees, self._mtimedb, self.myopts,
myparams, self._spinner)
except depgraph.UnsatisfiedResumeDep as exc:
# rename variable to avoid python-3.0 error:
# SyntaxError: can not delete variable 'e' referenced in nested
# scope
e = exc
mydepgraph = e.depgraph
dropped_tasks = {}
if e is not None:
def unsatisfied_resume_dep_msg():
mydepgraph.display_problems()
out = portage.output.EOutput()
out.eerror("One or more packages are either masked or " + \
"have missing dependencies:")
out.eerror("")
indent = " "
show_parents = set()
for dep in e.value:
if dep.parent in show_parents:
continue
show_parents.add(dep.parent)
if dep.atom is None:
out.eerror(indent + "Masked package:")
out.eerror(2 * indent + str(dep.parent))
out.eerror("")
else:
out.eerror(indent + str(dep.atom) + " pulled in by:")
out.eerror(2 * indent + str(dep.parent))
out.eerror("")
msg = "The resume list contains packages " + \
"that are either masked or have " + \
"unsatisfied dependencies. " + \
"Please restart/continue " + \
"the operation manually, or use --skipfirst " + \
"to skip the first package in the list and " + \
"any other packages that may be " + \
"masked or have missing dependencies."
for line in textwrap.wrap(msg, 72):
out.eerror(line)
self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
return False
if success and self._show_list():
mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)
if not success:
self._post_mod_echo_msgs.append(mydepgraph.display_problems)
return False
mydepgraph.display_problems()
self._init_graph(mydepgraph.schedulerGraph())
msg_width = 75
for task, atoms in dropped_tasks.items():
if not (isinstance(task, Package) and task.operation == "merge"):
continue
pkg = task
msg = "emerge --keep-going:" + \
" %s" % (pkg.cpv,)
if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % (pkg.root,)
if not atoms:
msg += " dropped because it is masked or unavailable"
else:
msg += " dropped because it requires %s" % ", ".join(atoms)
for line in textwrap.wrap(msg, msg_width):
eerror(line, phase="other", key=pkg.cpv)
settings = self.pkgsettings[pkg.root]
# Ensure that log collection from $T is disabled inside
# elog_process(), since any logs that might exist are
# not valid here.
settings.pop("T", None)
portage.elog.elog_process(pkg.cpv, settings)
self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
return True
def _show_list(self):
myopts = self.myopts
if "--quiet" not in myopts and \
("--ask" in myopts or "--tree" in myopts or \
"--verbose" in myopts):
return True
return False
def _world_atom(self, pkg):
"""
Add or remove the package to the world file, but only if
it's supposed to be added or removed. Otherwise, do nothing.
"""
if set(("--buildpkgonly", "--fetchonly",
"--fetch-all-uri",
"--oneshot", "--onlydeps",
"--pretend")).intersection(self.myopts):
return
if pkg.root != self.target_root:
return
args_set = self._args_set
if not args_set.findAtomForPackage(pkg):
return
logger = self._logger
pkg_count = self._pkg_count
root_config = pkg.root_config
world_set = root_config.sets["selected"]
world_locked = False
atom = None
if pkg.operation != "uninstall":
# Do this before acquiring the lock, since it queries the
# portdbapi which can call the global event loop, triggering
# a concurrent call to this method or something else that
# needs an exclusive (non-reentrant) lock on the world file.
atom = create_world_atom(pkg, args_set, root_config)
try:
if hasattr(world_set, "lock"):
world_set.lock()
world_locked = True
if hasattr(world_set, "load"):
world_set.load() # maybe it's changed on disk
if pkg.operation == "uninstall":
if hasattr(world_set, "cleanPackage"):
world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
pkg.cpv)
if hasattr(world_set, "remove"):
for s in pkg.root_config.setconfig.active:
world_set.remove(SETPREFIX+s)
else:
if atom is not None:
if hasattr(world_set, "add"):
self._status_msg(('Recording %s in "world" ' + \
'favorites file...') % atom)
logger.log(" === (%s of %s) Updating world file (%s)" % \
(pkg_count.curval, pkg_count.maxval, pkg.cpv))
world_set.add(atom)
else:
writemsg_level('\n!!! Unable to record %s in "world"\n' % \
(atom,), level=logging.WARN, noiselevel=-1)
finally:
if world_locked:
world_set.unlock()
def _pkg(self, cpv, type_name, root_config, installed=False,
operation=None, myrepo=None):
"""
Get a package instance from the cache, or create a new
one if necessary. Raises KeyError from aux_get if it
failures for some reason (package does not exist or is
corrupt).
"""
# Reuse existing instance when available.
pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
type_name=type_name, repo_name=myrepo, root_config=root_config,
installed=installed, operation=operation))
if pkg is not None:
return pkg
tree_type = depgraph.pkg_tree_map[type_name]
db = root_config.trees[tree_type].dbapi
db_keys = list(self.trees[root_config.root][
tree_type].dbapi._aux_cache_keys)
metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
pkg = Package(built=(type_name != "ebuild"),
cpv=cpv, installed=installed, metadata=metadata,
root_config=root_config, type_name=type_name)
self._pkg_cache[pkg] = pkg
return pkg
|
funtoo/portage-funtoo
|
pym/_emerge/Scheduler.py
|
Python
|
gpl-2.0
| 61,389
|
#!/usr/bin/env python
#
# TestSystem
#
# Tests the System class
#
# Author P G Jones - 2014-02-15 <p.g.jones@qmul.ac.uk> : First revision
####################################################################################################
import unittest
import nusoft.system.standard
import os
class TestSystem(unittest.TestCase):
def setUp(self):
super(TestSystem, self).setUp()
self._system = nusoft.system.standard.Standard(os.getcwd())
def test_remove(self):
""" Test the system removes files.
First create a temporary empty file in the temporary path then remove it.
"""
test_file = os.path.join(self._system.get_temporary_path(), "nusoft.test")
with open(test_file, 'a'):
os.utime(test_file, None)
self.assertTrue(os.path.exists(test_file))
self._system.remove(test_file)
self.assertFalse(os.path.exists(test_file))
def test_download(self):
""" Test the system can download files.
Try to download a small tar.
"""
test_file = os.path.join(self._system.get_temporary_path(), "nusoft.test")
self._system.download("http://www.github.com", name=test_file)
self.assertTrue(os.path.exists(test_file))
os.remove(test_file)
def test_untar(self):
""" Test the system can untar files.
Download the a small tar and untar.
"""
def test_exists(self):
""" Test the system believes files exist"""
self.assertTrue(os.path.exists(__file__) == self._system.exists(__file__))
if __name__ == '__main__':
unittest.main()
|
pgjones/nusoft
|
nusoft/test/test_system.py
|
Python
|
mit
| 1,639
|
# encoding: utf-8
import datetime
import os
from south.db import db
from south.v2 import SchemaMigration
from django.conf import settings
from panda.models import DataUpload
from panda import utils
class Migration(SchemaMigration):
def get_path(self, data_upload):
"""
From BaseUpload abstract model.
"""
return os.path.join(settings.MEDIA_ROOT, data_upload.filename)
def dialect_as_parameters(self, data_upload):
"""
From DataUpload model.
"""
dialect_params = {}
# This code is absolutely terrifying
# (Also, it works.)
for k, v in data_upload.dialect.items():
if isinstance(v, basestring):
dialect_params[k] = v.decode('string_escape')
else:
dialect_params[k] = v
return dialect_params
def forwards(self, orm):
# Adding field 'DataUpload.guessed_types'
db.add_column('panda_dataupload', 'guessed_types', self.gf('panda.fields.JSONField')(null=True), keep_default=False)
db.commit_transaction() # Commit the first transaction
db.start_transaction() # Start the second, committed on completion
if not db.dry_run:
for data_upload in orm.DataUpload.objects.all():
path = self.get_path(data_upload)
try:
data_upload.guessed_types = utils.guess_column_types(data_upload.data_type, path, self.dialect_as_parameters(data_upload), encoding=data_upload.encoding)
data_upload.save()
except IOError:
# File does not exist on disk
continue
def backwards(self, orm):
# Deleting field 'DataUpload.guessed_types'
db.delete_column('panda_dataupload', 'guessed_types')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'panda.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256', 'db_index': 'True'})
},
'panda.dataset': {
'Meta': {'ordering': "['-creation_date']", 'object_name': 'Dataset'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['panda.Category']"}),
'columns': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': "orm['auth.User']"}),
'current_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['panda.TaskStatus']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_upload': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initial_upload_for'", 'null': 'True', 'to': "orm['panda.DataUpload']"}),
'last_modification': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'row_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sample_data': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256', 'db_index': 'True'})
},
'panda.dataupload': {
'Meta': {'ordering': "['creation_date']", 'object_name': 'DataUpload'},
'columns': ('panda.fields.JSONField', [], {'null': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_uploads'", 'null': 'True', 'to': "orm['panda.Dataset']"}),
'dialect': ('panda.fields.JSONField', [], {'null': 'True'}),
'encoding': ('django.db.models.fields.CharField', [], {'default': "'utf-8'", 'max_length': '32'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'guessed_types': ('panda.fields.JSONField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'sample_data': ('panda.fields.JSONField', [], {'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'panda.export': {
'Meta': {'ordering': "['creation_date']", 'object_name': 'Export'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exports'", 'to': "orm['panda.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'panda.notification': {
'Meta': {'ordering': "['-sent_at']", 'object_name': 'Notification'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications'", 'to': "orm['auth.User']"}),
'related_dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['panda.Dataset']", 'null': 'True'}),
'related_task': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['panda.TaskStatus']", 'null': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Info'", 'max_length': '16'})
},
'panda.relatedupload': {
'Meta': {'ordering': "['creation_date']", 'object_name': 'RelatedUpload'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_uploads'", 'to': "orm['panda.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'panda.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'null': 'True', 'to': "orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '50'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'traceback': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'panda.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['panda']
|
datadesk/panda
|
panda/migrations/0003_auto__add_field_dataupload_guessed_types.py
|
Python
|
mit
| 12,910
|
# -*- coding: utf-8 -*-
"""
Definitions for common aid configurations
Rename to annot_cfgdef
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import numpy as np # NOQA
print, rrr, profile = ut.inject2(__name__, '[aidcfg]')
# easier to type names to alias some of these options
ALIAS_KEYS = {
#'aids' : 'default_aids',
'pername' : 'sample_per_name',
'offset' : 'sample_offset',
'refrule' : 'sample_rule_ref',
'rule' : 'sample_rule',
'size' : 'sample_size',
'mingt' : 'min_pername',
'excluderef': 'exclude_reference',
}
INDEPENDENT_DEFAULTS_PARAM_INFO = [
ut.ParamInfo('reviewed', None, valid_values=[True, False, None]),
ut.ParamInfo('minqual', None, valid_values=[None, 'junk', 'poor', 'ok',
'good', 'excellent']),
ut.ParamInfo('multiple', None, valid_values=[True, False, None]),
ut.ParamInfo('species', None),
ut.ParamInfo('view', None), # TODO: allow for lists
ut.ParamInfo('require_quality', None, valid_values=[True, False, None]),
ut.ParamInfo('require_viewpoint', None, valid_values=[True, False, None]),
ut.ParamInfo('is_exemplar', None, valid_values=[True, False, None]),
ut.ParamInfo('min_pername_global', None, type_=int, min_=0,
help_='Keep annot if it has at least this many global names'),
ut.ParamInfo('max_pername_global', None, type_=int, min_=0,
help_='Keep annot if it has at most this many global names'),
ut.ParamInfo('min_unixtime', None, type_=float, min_=0,
help_='Remove anything before this timestamp'),
ut.ParamInfo('max_unixtime', None, type_=float, min_=0,
help_='Remove anything after this timestamp'),
#ut.ParamInfo('view', None),
]
INTRAGROUP_DEFAULTS_PARAM_INFO = [
ut.ParamInfo('min_pername', None, type_=int, min_=0,
help_='Keeps names with at least this number of aids within the group'),
ut.ParamInfo('max_pername', None, type_=int, min_=0,
help_='Keeps names with at most this number of aids within the group'),
]
SAMPLE_DEFAULTS_PARAM_INFO = [
ut.ParamInfo('sample_per_name', None, type_=int, min_=0,
help_='Take this many annots per name'),
ut.ParamInfo('sample_rule', 'random', valid_values=['random', 'mintime', 'maxtime', 'qual_and_view'],
help_='Method of samping from names'),
ut.ParamInfo('sample_seed', 0, type_=int, none_ok=True,
help_='Random seed for sampling from names'),
]
SUBINDEX_DEFAULTS_PARAM_INFO = [
ut.ParamInfo('index', None),
]
OTHER_DEFAULTS = {
# forces a consistnet sample size across combinations
'force_const_size' : None,
#'hack_extra' : None, # hack param to make bigger db sizes
#'hack_imageset': None,
# Hack out errors in test data
'hackerrors' : True,
'joinme' : None,
}
# Defaults for the independent filter
# THese filters are orderless
INDEPENDENT_DEFAULTS = {
#'species' : 'primary', # specify the species
#'species' : None,
# Timedelta Params
'require_timestamp' : None,
'contrib_contains' : None,
# Quality Params
#'require_quality' : None, # if True unknown qualities are removed
#'minqual' : 'poor',
'minqual' : None,
'been_adjusted' : None, # HACK PARAM
# Viewpoint params
#'require_viewpoint' : None,
#'view' : None,
'view_ext' : 0, # num viewpoints to extend in dir1 and dir2
'view_ext1' : None, # num viewpoints to extend in dir1
'view_ext2' : None, # num viewpoints to extend in dir2
'is_known' : None,
# maximum number of features detected by default config
'min_numfeat' : None,
# minimum number of features detected by default config
'max_numfeat' : None,
'reviewed' : None,
'multiple' : None,
}
# HACK
from ibeis import tag_funcs # NOQA #
# Build Filters
filter_keys = ut.get_func_kwargs(tag_funcs.filterflags_general_tags)
for key in filter_keys:
INDEPENDENT_DEFAULTS[key] = None
for pi in INDEPENDENT_DEFAULTS_PARAM_INFO:
INDEPENDENT_DEFAULTS[pi.varname] = pi.default
INTRAGROUP_DEFAULTS = {
# if True all annots must belong to the same imageset
'same_imageset' : None,
'view_pername' : None, # formatted string filtering the viewpoints
'min_timedelta' : None,
# minimum number of aids for each name in sample
#'min_pername' : None,
#'max_pername' : None,
'min_spacedelta' : None,
'min_spacetimedelta' : None,
}
for pi in INTRAGROUP_DEFAULTS_PARAM_INFO:
INTRAGROUP_DEFAULTS[pi.varname] = pi.default
# HACK
INDEPENDENT_DEFAULTS.update(INTRAGROUP_DEFAULTS) # hack
SUBINDEX_DEFAULTS = {
# Final indexing
'shuffle' : False, # randomize order before indexing
#'index' : None, # choose only a subset
}
for pi in SUBINDEX_DEFAULTS_PARAM_INFO:
SUBINDEX_DEFAULTS[pi.varname] = pi.default
SAMPLE_DEFAULTS = {
'sample_size' : None,
'num_names' : None,
# Gets as close to sample size without removing other props
# Per Name / Exemplar Params
#'sample_per_name' : None, # Choos num_annots to sample from each name.
#'sample_rule' : 'random',
'sample_offset' : None, # UNUSED
'occur_offset' : None, # UNUSED
'name_offset' : None, # UNUSED
'sample_occur' : None,
}
for pi in SAMPLE_DEFAULTS_PARAM_INFO:
SAMPLE_DEFAULTS[pi.varname] = pi.default
SAMPLE_REF_DEFAULTS = {
# excludes any aids specified in a reference set (ie qaids)
'exclude_reference' : None,
'sample_rule_ref' : 'random',
# when sampling daids, choose this many correct matches per query
'sample_per_ref_name' : None,
}
# Base common settings, but some default settings will be different
# for query and database annotations
DEFAULT_AIDCFG = ut.merge_dicts(OTHER_DEFAULTS, INDEPENDENT_DEFAULTS,
SAMPLE_DEFAULTS, SAMPLE_REF_DEFAULTS,
SUBINDEX_DEFAULTS)
__default_aidcfg = DEFAULT_AIDCFG
def compress_aidcfg(acfg, filter_nones=False, filter_empty=False, force_noncommon=[]):
r"""
Args:
acfg (dict):
Returns:
dict: acfg
CommandLine:
#python -m ibeis --tf compress_aidcfg
python -m ibeis.expt.annotation_configs --exec-compress_aidcfg --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.annotation_configs import * # NOQA
>>> acfg = default
>>> acfg = compress_aidcfg(acfg)
>>> result = ('acfg = %s' % (ut.dict_str(acfg),))
>>> print(default)
>>> print(result)
"""
import copy
if 'qcfg' not in acfg or 'dcfg' not in acfg:
return acfg
acfg = copy.deepcopy(acfg)
common_cfg = ut.dict_intersection(acfg['qcfg'], acfg['dcfg'])
ut.delete_keys(common_cfg, force_noncommon)
ut.delete_keys(acfg['qcfg'], common_cfg.keys())
ut.delete_keys(acfg['dcfg'], common_cfg.keys())
acfg['common'] = common_cfg
if filter_nones:
acfg['common'] = ut.dict_filter_nones(acfg['common'])
acfg['qcfg'] = ut.dict_filter_nones(acfg['qcfg'])
acfg['dcfg'] = ut.dict_filter_nones(acfg['dcfg'])
if filter_empty:
if len(acfg['common']) == 0:
del acfg['common']
if len(acfg['qcfg']) == 0:
del acfg['qcfg']
if len(acfg['dcfg']) == 0:
del acfg['dcfg']
return acfg
def partition_acfg_list(acfg_list):
for acfg in acfg_list:
assert acfg['qcfg']['_cfgname'] == acfg['dcfg']['_cfgname'], (
'should be the same for now')
# Hack to make common params between q and d appear the same
_acfg_list = [compress_aidcfg(acfg) for acfg in acfg_list]
flat_acfg_list = flatten_acfg_list(_acfg_list)
tup = ut.partition_varied_cfg_list(flat_acfg_list)
flat_nonvaried_dict, flat_varied_acfg_list = tup
nonvaried_dict = unflatten_acfgdict(flat_nonvaried_dict)
varied_acfg_list = [unflatten_acfgdict(acfg)
for acfg in flat_varied_acfg_list]
return nonvaried_dict, varied_acfg_list
def get_varied_acfg_labels(acfg_list, mainkey='_cfgname', checkname=False):
"""
>>> from ibeis.expt.annotation_configs import * # NOQA
"""
#print(ut.list_str(varied_acfg_list, nl=2))
for acfg in acfg_list:
assert acfg['qcfg'].get(mainkey, '') == acfg['dcfg'].get(mainkey, ''), (
'should be the same for now')
cfgname_list = [acfg['qcfg'].get(mainkey, '') for acfg in acfg_list]
if checkname and ut.allsame(cfgname_list):
cfgname_list = [None] * len(cfgname_list)
# Hack to make common params between q and d appear the same
_acfg_list = [compress_aidcfg(acfg) for acfg in acfg_list]
flat_acfg_list = flatten_acfg_list(_acfg_list)
nonvaried_dict, varied_acfg_list = ut.partition_varied_cfg_list(
flat_acfg_list)
SUPER_HACK = True
if SUPER_HACK:
# SUPER HACK, recompress remake the varied list after knowing what is varied
_varied_keys = list(set(ut.flatten(
[list(ut.flatten(
[list(x.keys())
for x in unflatten_acfgdict(cfg).values()]
)) for cfg in varied_acfg_list]
)))
_acfg_list = [
compress_aidcfg(acfg, force_noncommon=_varied_keys)
for acfg in acfg_list]
flat_acfg_list = flatten_acfg_list(_acfg_list)
nonvaried_dict, varied_acfg_list = ut.partition_varied_cfg_list(
flat_acfg_list)
shortened_cfg_list = [
#{shorten_to_alias_labels(key): val for key, val in _dict.items()}
ut.map_dict_keys(shorten_to_alias_labels, _dict)
for _dict in varied_acfg_list]
nonlbl_keys = ut.INTERNAL_CFGKEYS
nonlbl_keys = [prefix + key for key in nonlbl_keys
for prefix in ['', 'q', 'd']]
# hack for sorting by q/d stuff first
def get_key_order(cfg):
keys = [k for k in cfg.keys() if k not in nonlbl_keys]
sortorder = [2 * k.startswith('q') + 1 * k.startswith('d')
for k in keys]
return ut.sortedby(keys, sortorder)[::-1]
cfglbl_list = [
ut.get_cfg_lbl(cfg, name, nonlbl_keys, key_order=get_key_order(cfg))
for cfg, name in zip(shortened_cfg_list, cfgname_list)]
if checkname:
cfglbl_list = [x.lstrip(':') for x in cfglbl_list]
return cfglbl_list
def shorten_to_alias_labels(key):
if key is None:
return None
search_list = list(ALIAS_KEYS.values()) + ['qcfg_', 'dcfg_', 'common_']
repl_list = list(ALIAS_KEYS.keys()) + ['q', 'd', '']
return ut.multi_replace(key, search_list, repl_list)
def flatten_acfg_list(acfg_list):
"""
Returns a new config where subconfig params are prefixed by subconfig keys
"""
flat_acfg_list = []
for acfg in acfg_list:
flat_dict = {
prefix + '_' + key: val
for prefix, subdict in acfg.items()
for key, val in subdict.items()
}
flat_acfg_list.append(flat_dict)
return flat_acfg_list
def compress_acfg_list_for_printing(acfg_list):
r"""
CommandLine:
python -m ibeis --tf compress_acfg_list_for_printing
Example:
>>> from ibeis.expt.annotation_configs import * # NOQA
>>> qcfg_list = [{'f': 1, 'b': 1}, {'f': 2, 'b': 1}, {'f': 3, 'b': 1, 'z': 4}]
>>> acfg_list = [{'qcfg': qcfg} for qcfg in qcfg_list]
>>> nonvaried_dict, varied_dicts = compress_acfg_list_for_printing(acfg_list)
>>> result = ('varied_dicts = %s\n' % (ut.list_str(varied_dicts),))
>>> result += ('nonvaried_dict = %s' % (ut.dict_str(nonvaried_dict),))
>>> print(result)
"""
flat_acfg_list = flatten_acfg_list(acfg_list)
tup = ut.partition_varied_cfg_list(flat_acfg_list)
nonvaried_dict, varied_acfg_list = tup
nonvaried_compressed_dict = compress_aidcfg(
unflatten_acfgdict(nonvaried_dict), filter_nones=True)
varied_compressed_dict_list = [
compress_aidcfg(unflatten_acfgdict(cfg), filter_empty=True)
for cfg in varied_acfg_list]
return nonvaried_compressed_dict, varied_compressed_dict_list
def print_acfg_list(acfg_list, expanded_aids_list=None, ibs=None,
combined=False, **kwargs):
r"""
Args:
acfg_list (list):
expanded_aids_list (list): (default = None)
ibs (IBEISController): ibeis controller object(default = None)
combined (bool): (default = False)
CommandLine:
python -m ibeis.expt.annotation_configs --exec-print_acfg_list --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.annotation_configs import * # NOQA
>>> import ibeis
>>> acfg_list = '?'
>>> expanded_aids_list = None
>>> ibs = None
>>> combined = False
>>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined)
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> ut.show_if_requested()
"""
_tup = compress_acfg_list_for_printing(acfg_list)
nonvaried_compressed_dict, varied_compressed_dict_list = _tup
ut.colorprint('+=== <Info acfg_list> ===', 'white')
#print('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),))
print('non-varied aidcfg = ' + ut.dict_str(nonvaried_compressed_dict))
seen_ = ut.ddict(list)
# get default kwkeys for annot info
if ibs is not None:
annotstats_kw = kwargs.copy()
kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict)
annotstats_kw.update(ut.argparse_dict(
dict(zip(kwkeys, [None] * len(kwkeys))), only_specified=True))
hashid_list = []
for acfgx in range(len(acfg_list)):
acfg = acfg_list[acfgx]
title = ('q_cfgname=' + acfg['qcfg']['_cfgname'] +
' d_cfgname=' + acfg['dcfg']['_cfgname'])
ut.colorprint('+--- acfg %d / %d -- %s ---- ' %
(acfgx + 1, len(acfg_list), title), 'lightgray')
print('acfg = ' + ut.dict_str(varied_compressed_dict_list[acfgx],
strvals=True))
if expanded_aids_list is not None:
qaids, daids = expanded_aids_list[acfgx]
key = (ut.hashstr_arr27(qaids, 'qaids'),
ut.hashstr_arr27(daids, 'daids'))
if key not in seen_:
if ibs is not None:
seen_[key].append(acfgx)
stats_, locals_ = ibs.get_annotconfig_stats(
qaids, daids, verbose=False, combined=combined,
**annotstats_kw)
hashids = (stats_['qaid_stats']['qhashid'],
stats_['daid_stats']['dhashid'])
hashid_list.append(hashids)
stats_str2 = ut.dict_str(stats_, strvals=True,
newlines=True, explicit=False,
nobraces=False)
print('annot_config_stats = ' + stats_str2)
else:
dupindex = seen_[key]
print('DUPLICATE of index %r' % (dupindex,))
dupdict = varied_compressed_dict_list[dupindex[0]]
print('DUP OF acfg = ' + ut.dict_str(dupdict, strvals=True))
print('hashid summary = ' + ut.list_str(hashid_list, nl=1))
ut.colorprint('L___ </Info acfg_list> ___', 'white')
def print_acfg(acfg, expanded_aids=None, ibs=None, **kwargs):
print('acfg = ' + ut.dict_str(compress_aidcfg(acfg)))
if expanded_aids is not None:
ibs.print_annot_stats(expanded_aids, label='expanded_aids = ', **kwargs)
def unflatten_acfgdict(flat_dict, prefix_list=['dcfg', 'qcfg']):
acfg = {prefix: {} for prefix in prefix_list}
for prefix in prefix_list:
for key, val in flat_dict.items():
if key.startswith(prefix + '_'):
acfg[prefix][key[len(prefix) + 1:]] = val
return acfg
def apply_timecontrol(acfg, min_timedelta='6h', require_timestamp=True):
return {
'qcfg': ut.augdict(
acfg['qcfg'], {
'require_timestamp': require_timestamp,
'min_timedelta': min_timedelta,
}),
'dcfg': ut.augdict(
acfg['dcfg'], {
'require_timestamp': require_timestamp,
'min_timedelta': min_timedelta,
}),
}
def apply_qualcontrol(acfg):
return {
'qcfg': ut.augdict(
acfg['qcfg'], {
'require_quality': True,
}),
'dcfg': ut.augdict(
acfg['dcfg'], {
'require_quality': True,
}),
}
__baseline_aidcfg = ut.augdict(__default_aidcfg, {
'is_known': True,
'minqual': 'ok',
'view': 'primary',
'view_ext': 1,
})
__controlled_aidcfg = ut.augdict(__baseline_aidcfg, {
#'require_timestamp': True,
'view_ext': 0,
'minqual': 'ok',
'species': 'primary',
'is_known': True,
})
single_default = __default_aidcfg
exclude_vars = list(locals().keys()) # this line is before tests
exclude_vars.append('exclude_vars')
default = {
'qcfg': ut.augdict(
single_default, {
#'default_aids': (1,)
}),
'dcfg': ut.augdict(
single_default, {
}),
}
default2 = {
'qcfg': ut.augdict(
default['qcfg'], {
'exclude_reference': True,
'is_known': True,
}),
'dcfg': ut.augdict(
default['dcfg'], {
'exclude_reference': True,
'is_known': True,
}),
}
"""
ibeis -e print_acfg --db PZ_Master1 -a unctrl
"""
unctrl = uncontrolled = {
'qcfg': ut.augdict(
__baseline_aidcfg, {
#'default_aids': 'allgt',
'min_pername': 2,
'species': 'primary',
}),
'dcfg': ut.augdict(
__baseline_aidcfg, {
'species': 'primary',
}),
}
# Uncontrolled but comparable to controlled
unctrl_comp = {
'qcfg': ut.augdict(
__baseline_aidcfg, {
#'default_aids': 'allgt',
'species': 'primary',
'sample_per_name': 1,
'min_pername': 2,
'view_ext': 0,
}),
'dcfg': ut.augdict(
__baseline_aidcfg, {
'species': 'primary',
}),
}
"""
ibeis -e print_acfg --db PZ_Master1 -a ctrl
ibeis -e print_acfg --db PZ_Master1 -a unctrl ctrl::unctrl:qpername=1,qview_ext=0
ibeis -e print_acfg --db PZ_Master1 -a unctrl ctrl::unctrl_comp
"""
ctrl = controlled = {
'qcfg': ut.augdict(
__controlled_aidcfg, {
#'default_aids': 'allgt',
'sample_per_name': 1,
'min_pername': 2,
}),
'dcfg': ut.augdict(
__controlled_aidcfg, {
#'default_aids': 'all',
'sample_per_name': 1,
'exclude_reference': True,
'min_pername': 1, # allows for singletons to be in the database
}),
}
"""
ibeis -e print_acfg --db PZ_Master1 -a timectrl
"""
timectrl = timecontrolled = apply_timecontrol(ctrl)
timectrl1h = timecontrolled = apply_timecontrol(ctrl, '1h')
timectrlL = timecontrolled = apply_timecontrol(ctrl, require_timestamp=False)
"""
ibeis -e print_acfg --db PZ_Master1 -a timequalctrl
"""
timequalctrl = timequalcontrolled = apply_qualcontrol(timectrl)
# Just vary the samples per name without messing with the number of annots in
# the database
varypername = {
'qcfg': ut.augdict(
ctrl['qcfg'], {
# ensures each query will have a correct example for the groundtruth
'min_pername': 4,
'force_const_size': True,
}),
'dcfg': ut.augdict(
ctrl['qcfg'], {
'sample_per_name': [1, 2, 3],
#'sample_per_name': [1, 3],
#'sample_per_ref_name': [1, 2, 3],
#'sample_per_ref_name': [1, 3],
'force_const_size': True,
}),
}
varypername2 = {
'qcfg': ut.augdict(
ctrl['qcfg'], {
'min_pername': 3,
'force_const_size': True,
}),
'dcfg': ut.augdict(
ctrl['dcfg'], {
'sample_per_name': [1, 2],
'force_const_size': True,
}),
}
varypername2_td = apply_timecontrol(varypername2)
"""
ibeis -e print_acfg --db PZ_Master1 -a ctrl2
ibeis -e print_acfg --db PZ_Master1 -a timectrl2
ibeis -e rank_cdf --db PZ_Master1 -a timectrl2 -t invarbest
"""
ctrl2 = {
'qcfg': ut.augdict(
ctrl['qcfg'], {
'min_pername': 3,
#'force_const_size': True,
}),
'dcfg': ut.augdict(
ctrl['dcfg'], {
'sample_per_name': 2,
'force_const_size': True,
}),
}
timectrl2 = apply_timecontrol(ctrl2)
varypername_td = apply_timecontrol(varypername)
varypername_td1h = apply_timecontrol(varypername, '1h')
"""
ibeis -e print_acfg --db PZ_Master1 -a varypername_tdqual
"""
varypername_tdqual = apply_qualcontrol(varypername_td)
"""
python -m ibeis --tf get_num_annots_per_name --db PZ_Master1
ibeis -e print_acfg -a varysize2 --db PZ_Master1 --verbtd --nocache
ibeis -e print_acfg -a varysize2 --db NNP_MasterGIRM_core --verbtd --nocache
"""
varysize = {
'qcfg': ut.augdict(
__controlled_aidcfg, {
#'default_aids': 'allgt',
'sample_size': None,
'sample_per_name': 1,
#'force_const_size': True,
'min_pername': 4,
}),
'dcfg': ut.augdict(
__controlled_aidcfg, {
#'default_aids': 'all',
'sample_per_name': [1, 2, 3],
#'sample_per_name': [1, 3],
'exclude_reference': True,
'sample_size': [0.25, 0.5, 0.75], # .95], 1.0],
'min_pername': 1,
}),
}
"""
ibeis -e print_acfg -a varysize2_td --db PZ_Master1 --verbtd --nocache
"""
varysize_td = apply_timecontrol(varysize)
varysize_td1h = apply_timecontrol(varysize, '1h')
varysize_tdqual = apply_qualcontrol(varysize_td)
varynannots = {
'qcfg': ut.augdict(
__controlled_aidcfg, {
#'default_aids': 'allgt',
'sample_size': None,
'sample_per_name': 1,
#'force_const_size': True,
#'min_pername': 4,
'min_pername': 2,
}),
'dcfg': ut.augdict(
__controlled_aidcfg, {
#'default_aids': 'all',
'sample_per_name': [1],
#'sample_per_name': [1, 3],
'exclude_reference': True,
#'sample_size': [.01, .125, 0.25, .375, 0.5, .625, 0.75], # , .875], # .95], 1.0],
#'sample_size': [.01, .05, .125, 0.25, .375, 0.5, 0.75], # , .875], # .95], 1.0],
'sample_size': [.0, .01, .05, .125, 0.25, .375, 0.5, 0.75, .875, .95, 1.0],
#'sample_size': [.01, .025, .05, .125, 0.25, .375, 0.5, 0.75, .875, .95, 1.0],
#'sample_size': ((10 * np.logspace(0, np.log(100), num=11, base=np.e)).astype(np.int) / 1000).tolist(),
#(10 * np.logspace(0, np.log2(100), num=11, base=2)).astype(np.int) / 1000,
'min_pername': 1,
}),
}
varynannots_td = apply_timecontrol(varynannots)
varynannots_td1h = apply_timecontrol(varynannots, '1h')
#varysize_tdqual = apply_qualcontrol(varysize_td)
# Compare query of frontleft animals when database has only left sides
"""
ibeis -e print_acfg -a viewpoint_compare --db PZ_Master1 --verbtd --nocache
python -m ibeis --tf parse_acfg_combo_list -a viewpoint_compare
python -m ibeis --tf get_annotcfg_list --db PZ_Master1 -a viewpoint_compare \
--verbtd
# Check composition of names per viewpoint
python -m ibeis --tf group_annots_by_multi_prop --db PZ_Master1 \
--props=yaw_texts,name_rowids --keys1 frontleft
python -m ibeis --tf get_annot_stats_dict --db PZ_Master1 \
--per_name_vpedge=True
TODO: Need to explicitly setup the common config I think?
ibeis -e print_acfg -a viewdiff:min_timedelta=1h --db PZ_Master1 --verbtd --nocache-aid
ibeis --tf get_annotcfg_list -a viewdiff:min_timedelta=1h --db PZ_Master1 \
--verbtd --nocache-aid
"""
viewpoint_compare = {
'qcfg': ut.augdict(
ctrl['qcfg'], {
'sample_size': None,
# To be a query you must have at least two primary1 views and at
# least one primary view
'view_pername': '#primary>0&#primary1>1',
'force_const_size': True,
'view': 'primary1',
'sample_per_name': 1,
#'min_pername': 2,
}),
'dcfg': ut.augdict(
ctrl['dcfg'], {
'view': ['primary1', 'primary'],
'force_const_size': True,
# To be a query you must have at least two primary1 views and at
# least one primary view
'view_pername': '#primary>0&#primary1>1',
'sample_per_ref_name': 1,
'sample_per_name': 1,
# TODO: need to make this consistent accross both experiment modes
'sample_size': None,
}),
}
viewdiff = vp = viewpoint_compare = {
'qcfg': ut.augdict(
ctrl['qcfg'], ut.odict([
('sample_size', None),
# To be a query you must have at least two primary1 views and at
# least one primary view
('view_pername', '#primary>0&#primary1>0'),
('force_const_size', True),
('view', 'primary1'),
('sample_per_name', 1),
])),
'dcfg': ut.augdict(
ctrl['dcfg'], {
'view': ['primary'],
'force_const_size': True,
'sample_per_ref_name': 1,
'sample_per_name': 1, # None this seems to produce odd results
# where the per_ref is still more then 1
'sample_size': None,
'view_pername': '#primary>0&#primary1>0',
}),
}
# Use tags to find a small set of difficult cases
timectrlhard = viewpoint_compare = {
'qcfg': ut.augdict(
timectrl['qcfg'], ut.odict([
('has_any', ('needswork', 'correctable', 'mildviewpoint')),
('has_none', ('viewpoint', 'photobomb', 'error:viewpoint', 'quality')),
])),
'dcfg': ut.augdict(
timectrl['dcfg'], {
}),
}
"""
ibeis -e print_acfg -a viewdiff --db PZ_Master1 --verbtd --nocache --per_vp=True
ibeis -e print_acfg -a viewdiff_td --db PZ_Master1 --verbtd --nocache --per_vp=True
"""
viewdiff_td = apply_timecontrol(viewdiff)
viewdiff_td1h = apply_timecontrol(viewdiff, '1h')
# THIS IS A GOOD START
# NEED TO DO THIS CONFIG AND THEN SWITCH DCFG TO USE primary1
include_vars = list(locals().keys()) # this line is after tests
# List of all valid tests
TEST_NAMES = set(include_vars) - set(exclude_vars)
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.expt.annotation_configs
python -m ibeis.expt.annotation_configs --allexamples
python -m ibeis.expt.annotation_configs --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
SU-ECE-17-7/ibeis
|
ibeis/expt/annotation_configs.py
|
Python
|
apache-2.0
| 27,587
|
from base64 import urlsafe_b64decode
from io import StringIO
from django.http.response import HttpResponse
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
import openid2rp
from Care4Care.settings import STATICFILES_DIRS
@never_cache
def oidtest(request):
ax = (("http://axschema.org/eid/card-validity/end",
"http://axschema.org/person/gender",
"http://axschema.org/contact/postalAddress/home",
"http://axschema.org/namePerson/first",
"http://axschema.org/eid/photo",
"http://axschema.org/eid/card-validity/begin",
"http://axschema.org/contact/city/home",
"http://axschema.org/contact/postalCode/home",
"http://axschema.org/birthDate",
"http://openid.net/schema/birthDate/birthYear",
"http://openid.net/schema/birthDate/birthMonth",
"http://openid.net/schema/birthDate/birthday",
"http://axschema.org/eid/pob",
"http://axschema.org/eid/card-number",
"http://axschema.org/eid/nationality",
"http://axschema.org/namePerson/last",
"http://axschema.org/namePerson",
"http://axschema.org/eid/rrn",
# "http://axschema.org/eid/cert/auth",
"http://axschema.org/eid/age"), ())
uri = "https://www.e-contract.be/eid-idp/endpoints/openid/ident"
kind, claimedId = openid2rp.normalize_uri(uri)
res = openid2rp.discover(claimedId)
if res is not None:
services, op_endpoint, op_local = res
session = openid2rp.associate(services, op_endpoint)
redirect_url = openid2rp.request_authentication(
services,
op_endpoint,
session['assoc_handle'],
"http://127.0.0.1:8000/tests/openid2",
claimedId, op_local,
sreg=((), ()),
ax=ax
)
response = HttpResponse()
response['Location'] = redirect_url
response.status_code=303
return response
# return render_to_response('OIDTest.html',
# {
# 'services' : services,
# 'op_endpoint' : op_endpoint,
# 'op_local' : op_local,
# 'kind' : kind,
# 'claimedID' : claimedId,
# 'redirect_url' : redirect_url
# }
# )
@csrf_exempt
@never_cache
def oidtest2(request):
# signed, claimedID = openid2rp.verify(request.POST, None, None, True)
# printy(get_ax(request.POST))
ax = get_ax(request.POST)
args = ax.copy()
args.update({'ax' : ax})
strIO = StringIO()
print(ax, file=strIO)
args.update({'printy' : strIO.getvalue()})
return render(request, "OIDTest.html", args)
def get_ax(response):
ax = 'ax' + "."
oax = 'openid.' + ax
res = {}
for k, v in response.items():
if k.startswith(oax+"type."):
k = k.rsplit('.',1)[1]
value_name = oax+"value."+k
if ax+"value."+k not in response['openid.signed']:
continue
res[v] = response[value_name]
return _get_readable_ax(res)
def _get_readable_ax(ax):
res = {}
AX = {"http://axschema.org/eid/card-validity/end" : 'card_validity_end',
"http://axschema.org/person/gender" : 'gender',
"http://axschema.org/contact/postalAddress/home" : 'address',
"http://axschema.org/namePerson/first" : 'firstname',
"http://axschema.org/eid/photo" : 'photo',
"http://axschema.org/eid/card-validity/begin" : 'card_validity_start',
"http://axschema.org/contact/city/home" : 'city',
"http://axschema.org/contact/postalCode/home" : 'postal_code',
"http://axschema.org/birthDate" : 'birth_date',
"http://openid.net/schema/birthDate/birthYear" : 'birth_year',
"http://openid.net/schema/birthDate/birthMonth" : 'birth_month',
"http://openid.net/schema/birthDate/birthday" : 'birth_day',
"http://axschema.org/eid/pob" : 'birth_place',
"http://axschema.org/eid/card-number" : 'card_number',
"http://axschema.org/eid/nationality" : 'nationality',
"http://axschema.org/namePerson/last" : 'lastname',
"http://axschema.org/namePerson" : 'fullname',
"http://axschema.org/eid/rrn" : 'register_number',
"http://axschema.org/eid/age" : 'age'
}
for key, value in ax.items():
if key.endswith('photo'):
filename = STATICFILES_DIRS[0] + '/images/photo.jpg'
#value = value.replace('-', '+').replace('_', '/')
if len(value) % 3 != 0: value += '=';
if len(value) % 3 != 0: value += '=';
with open(filename, 'wb') as fd:
fd.write(urlsafe_b64decode(value))
fd.close()
value = filename
res[AX[key]] = value
return res
|
dsarkozi/care4care-sdp-grp4
|
Care4Care/C4CApplication/tests/OIDTest.py
|
Python
|
agpl-3.0
| 5,177
|
from collections import defaultdict
class UserInfo(object):
""" Class to be able to assign properties to a dict"""
def __init__(self):
self._values_ = {}
def __getattr__(self, name):
if name.startswith("_") and name.endswith("_"):
return super(UserInfo, self).__getattr__(name)
try:
return self._values_[name]
except KeyError:
raise AttributeError
def __setattr__(self, name, value):
if name.startswith("_") and name.endswith("_"):
return super(UserInfo, self).__setattr__(name, value)
self._values_[name] = str(value)
def __repr__(self):
return str(self._values_)
@property
def vars(self):
return self._values_
class DepsUserInfo(defaultdict):
def __init__(self):
super(DepsUserInfo, self).__init__(UserInfo)
|
conan-io/conan
|
conans/model/user_info.py
|
Python
|
mit
| 872
|
# coding: utf-8
from flask import Flask, request, current_app, make_response, abort, render_template, jsonify, redirect
from flask.ext.script import Manager, Shell
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.migrate import Migrate, MigrateCommand
from flask_restful import Resource, Api, reqparse, abort
import os
# FLASK RESTFUL API conf
parser = reqparse.RequestParser()
parser.add_argument('name', type=str)
parser.add_argument('text', type=str)
app = Flask(__name__, static_path='/static/')
# for SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
manager = Manager(app)
api = Api(app, catch_all_404s=True)
# database migration
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
class Board(db.Model):
"""Represents a Board object
"""
__tablename__ = 'boards'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
# columns = db.relationship('Column', backref='board')
def __repr__(self):
return '<Board {}>'.format(self.name)
class BoardsContent(db.Model):
"""Represents a board-column interaction"""
__tablename__ = 'boardsContent'
id = db.Column(db.Integer, primary_key=True)
board = db.Column(db.Integer, db.ForeignKey('boards.id'))
column = db.Column(db.Integer, db.ForeignKey('columns.id'))
class ColumnsContent(db.Model):
"""Represents a column-note interaction"""
___tablename__ = 'columnsContent'
id = db.Column(db.Integer, primary_key=True)
column = db.Column(db.Integer, db.ForeignKey('columns.id'))
note = db.Column(db.Integer, db.ForeignKey('notes.id'))
class Column(db.Model):
"""Represents a column object.
"""
__tablename__ = 'columns'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
# notes = db.relationship('Note', backref='column')
# board_id = db.Column(db.Integer, db.ForeignKey('boards.id'))
def __repr__(self):
return '<Col {}>'.format(self.name)
class Note(db.Model):
"""represents a Note object. It can contain text
"""
__tablename__ = 'notes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
# column_id = db.Column(db.Integer, db.ForeignKey('columns.id'))
text = db.Column(db.Text)
def __repr__(self):
return '<Note {}>'.format(self.name)
class BoardsEP(Resource):
@staticmethod
def to_json():
"""
:return: dict object representing a column in json.
"""
boards = Board.query.all()
columns = Column.query.all()
notes = Note.query.all()
boards_content = [{
'column': i.column,
'board': i.board
} for i in BoardsContent.query.all() ]
columns_content = [{
'note': i.note,
'column': i.column
} for i in ColumnsContent.query.all() ]
return {
'boards': [{
'id': i.id,
'name': i.name
} for i in boards ],
'columns': [{
'id': i.id,
'name': i.name
} for i in columns ],
'notes': [{
'id': i.id,
'name': i.name,
'text': i.text
} for i in notes ],
'boards-content': boards_content,
'columns-content': columns_content,
}
@staticmethod
def get():
request_type = request.args.get('type', '')
if request_type == 'json':
return jsonify(BoardsEP.to_json())
else:
return make_response("hello")
@staticmethod
def delete():
args = request.form
id = args.get('id', '')
if id:
board_obj = Board.query.filter_by(id=id).first()
if board_obj:
# delete board relations if needed
# needs to be before deleting note because checked in delete_call
board_content = BoardsContent.query.filter_by(board=id)
for content in board_content:
BoardsContentEP.delete_call(id, content.column)
# delete board object
db.session.delete(board_obj)
db.session.commit()
return {'code': 204, 'description': 'No content: The request was processed successfully, but no response body is needed.'}
else:
return {'code': 400, 'description': 'No resource found', 'asked': id}
else:
return {'code': 400, 'description': 'some fields are missing', 'missing': 'id'}
@staticmethod
def post_method():
args = request.form
name = args.get('name', '')
if name:
# check if there is no board with this name
if Board.query.filter_by(name=name).first():
return {'code': 400, 'description': 'name already exists'}
board_obj = Board(name=name)
db.session.add(board_obj)
db.session.commit()
return {'code': 201, 'description': 'created'}
else:
return {'code': 400, 'description': 'some fields are missing', 'missing': 'name'}
@staticmethod
def put():
args = request.form
id = args.get('id', '')
if id:
board_obj = Board.query.filter_by(id=id).first()
if not board_obj:
return {'code': 400, 'description': 'No resource found', 'asked': id}
name = args.get('name', '')
if name:
board_obj.name = name
else:
return {'code': 304, 'description': 'not modified'}
db.session.add(board_obj)
db.session.commit()
return {'code': 204, 'description': 'No content: The request was processed successfully, but no response body is needed.'}
else:
return {'code': 400, 'description': 'some fields are missing', 'missing': 'id'}
@staticmethod
def post():
args = request.form
request_type = args.get('request-type', '')
if request_type == 'delete':
return BoardsEP.delete()
elif request_type == 'put':
return BoardsEP.put()
else: # post request
return BoardsEP.post_method()
class ColumnsEP(Resource):
@staticmethod
def post_method():
args = request.form
name = args.get('name', '')
if name:
col_obj = Column(name=name)
db.session.add(col_obj)
db.session.commit()
return {'code': 201, 'description': 'created'}
else:
return {'code': 400, 'description': 'some fields are missing', 'missing': 'name'}
@staticmethod
def delete():
args = request.form
id = args.get('id', '')
if id:
col_obj = Column.query.filter_by(id=id).first()
if col_obj:
# delete board content if needed
# needs to be before deleting note because checked in delete_call
board_content = BoardsContent.query.filter_by(column=id)
for content in board_content:
BoardsContentEP.delete_call(content.board, id)
# delete column content if needed
column_content = ColumnsContent.query.filter_by(column=id)
for content in column_content:
ColumnsContentEP.delete_call(id, content.note)
db.session.delete(col_obj)
db.session.commit()
return {'code': 204, 'description': 'No content: The request was processed successfully, but no response body is needed.'}
else:
return {'code': 400, 'description': 'No resource found', 'asked': id}
else:
return {'code': 400, 'description': 'some fields are missing', 'missing': 'id'}
@staticmethod
def put():
args = request.form
id = args.get('id', '')
if id:
col_obj = Column.query.filter_by(id=id).first()
if not col_obj:
return {'code': 400, 'description': 'No resource found', 'asked': id}
name = args.get('name', '')
if name:
col_obj.name = name
else:
return {'code': 304, 'description': 'not modified'}
db.session.add(col_obj)
db.session.commit()
return {'code': 204, 'description': 'No content: The request was processed successfully, but no response body is needed.'}
else:
return {'code': 400, 'description': 'some fields are missing', 'missing': 'id'}
@staticmethod
def post():
args = request.form
request_type = args.get('request-type', '')
if request_type == 'delete':
return ColumnsEP.delete()
elif request_type == 'put':
return ColumnsEP.put()
else:
return ColumnsEP.post_method()
class NotesEP(Resource):
@staticmethod
def post_method():
args = request.form
name = args.get('name', '')
if name:
text = args.get('text', '') # get the note text
note_obj = Note(name=name, text=text)
db.session.add(note_obj)
db.session.commit()
return {'code': 201, 'description': 'created'}
else:
return {'code': 400, 'description': 'some fields are missing', 'missing': 'name'}
@staticmethod
def delete():
args = request.form
id = args.get('id', '')
if id:
note_obj = Note.query.filter_by(id=id).first()
if note_obj:
# delete column relations if needed
# needs to be before deleting note because checked in delete_call
column_content = ColumnsContent.query.filter_by(note=id)
for content in column_content:
ColumnsContentEP.delete_call(content.column, id)
db.session.delete(note_obj)
db.session.commit()
return {'code': 204, 'description': 'No content: The request was processed successfully, but no response body is needed.'}
else:
return {'code': 400, 'description': 'No resource found', 'asked': id}
else:
return {'code': 400, 'description': 'some fields are missing', 'missing': 'id'}
@staticmethod
def put():
args = request.form
id = args.get('id', '')
if id:
note_obj = Note.query.filter_by(id=id).first()
if not note_obj:
return {'code': 400, 'description': 'No resource found', 'asked': id}
name = args.get('name', '')
text = args.get('text', '')
modified = False
if name:
note_obj.name = name
modified = True
if text:
note_obj.text = text
modified = True
if not modified:
return {'code': 304, 'description': 'not modified'}
db.session.add(note_obj)
db.session.commit()
return {'code': 204, 'description': 'No content: The request was processed successfully, but no response body is needed.'}
else:
return {'code': 400, 'description': 'some fields are missing', 'missing': 'id'}
@staticmethod
def post():
args = request.form
request_type = args.get('request-type', '')
if request_type == 'delete':
return NotesEP.delete()
elif request_type == 'put':
return NotesEP.put()
else:
return NotesEP.post_method()
class BoardsContentEP(Resource):
@staticmethod
def delete_call(board, column):
if board and column:
# check if board or column corresponds to correct id
if not Board.query.filter_by(id=board).first():
return {'code': 400, 'description': 'board with id {} does not exist'.format(board)}
if not Column.query.filter_by(id=column).first():
return {'code': 400, 'description': 'column with id {} does not exist'.format(column)}
content_obj = BoardsContent.query.filter_by(board=board, column=column).first()
db.session.delete(content_obj)
db.session.commit()
return {'code': 204, 'description': 'No content: The request was processed successfully, but no response body is needed.'}
else:
missing = []
if not board:
missing.append('board')
if not column:
missing.append('column')
return {'code': 400, 'description': 'some fields are missing', 'missing': ', '.join(missing)}
@staticmethod
def delete():
args = request.form
board = args.get('board', '')
column = args.get('column', '')
return BoardsContentEP.delete_call(board, column)
@staticmethod
def post_method():
args = request.form
board = args.get('board', '')
column = args.get('column', '')
if board and column:
# check if board or column corresponds to correct id
if not Board.query.filter_by(id=board).first():
return {'code': 400, 'description': 'board with id {} does not exist'.format(board)}
if not Column.query.filter_by(id=column).first():
return {'code': 400, 'description': 'column with id {} does not exist'.format(column)}
if BoardsContent.query.filter_by(board=board, column=column).first():
return {'code': 400, 'description': 'relationship already exists between board {} and column {}'.format(board, column)}
content_obj = BoardsContent(board=board, column=column)
db.session.add(content_obj)
db.session.commit()
return {'code': 201, 'description': 'created'}
else:
missing = []
if not board:
missing.append('board')
if not column:
missing.append('column')
return {'code': 400, 'description': 'some fields are missing', 'missing': ', '.join(missing)}
@staticmethod
def post():
args = request.form
request_type = args.get('request-type', '')
if request_type == 'delete':
return BoardsContentEP.delete()
else:
return BoardsContentEP.post_method()
class ColumnsContentEP(Resource):
@staticmethod
def delete_call(column, note):
if note and column:
# check if note or column corresponds to correct id
if not Note.query.filter_by(id=note).first():
return {'code': 400, 'description': 'note with id {} does not exist'.format(note)}
if not Column.query.filter_by(id=column).first():
return {'code': 400, 'description': 'column with id {} does not exist'.format(column)}
content_obj = ColumnsContent.query.filter_by(note=note, column=column).first()
db.session.delete(content_obj)
db.session.commit()
return {'code': 204, 'description': 'No content: The request was processed successfully, but no response body is needed.'}
else:
missing = []
if not note:
missing.append('note')
if not column:
missing.append('column')
return {'code': 400, 'description': 'some fields are missing', 'missing': ', '.join(missing)}
@staticmethod
def delete():
args = request.form
column = args.get('column', '')
note = args.get('note', '')
return ColumnsContentEP.delete_call(column, note)
@staticmethod
def post_method():
args = request.form
note = args.get('note', '')
column = args.get('column', '')
if note and column:
# check if note or column corresponds to correct id
if not Note.query.filter_by(id=note).first():
return {'code': 400, 'description': 'note with id {} does not exist'.format(note)}
if not Column.query.filter_by(id=column).first():
return {'code': 400, 'description': 'column with id {} does not exist'.format(column)}
if ColumnsContent.query.filter_by(note=note, column=column).first():
return {'code': 400, 'description': 'relationship already exists between note {} and column {}'.format(note, column)}
content_obj = ColumnsContent(note=note, column=column)
db.session.add(content_obj)
db.session.commit()
return {'code': 201, 'description': 'created'}
else:
missing = []
if not note:
missing.append('note')
if not column:
missing.append('column')
return {'code': 400, 'description': 'some fields are missing', 'missing': ', '.join(missing)}
@staticmethod
def post():
args = request.form
request_type = args.get('request-type', '')
if request_type == 'delete':
return ColumnsContentEP.delete()
else:
return ColumnsContentEP.post_method()
api.add_resource(BoardsEP, '/v1/boards/')
api.add_resource(ColumnsEP, '/v1/columns/')
api.add_resource(NotesEP, '/v1/notes/')
api.add_resource(BoardsContentEP, '/v1/boards-content/')
api.add_resource(ColumnsContentEP, '/v1/columns-content/')
@app.errorhandler(404)
def handler_404(e):
return render_template('404.html'), 404
def make_shell_context():
return dict(app=app, db=db, Board=Board, Column=Column, Note=Note)
manager.add_command('shell', Shell(make_context=make_shell_context))
if __name__ == '__main__':
manager.run()
|
librallu/virtual-boards
|
main.py
|
Python
|
gpl-2.0
| 18,510
|
#!/usr/bin/python
from Queue import Queue
from base64 import b64encode
from hashlib import sha1
from optparse import OptionParser
from SimpleHTTPServer import SimpleHTTPRequestHandler
from threading import Thread
import traceback
import BaseHTTPServer
import os
import json
import struct
import ssl
q = Queue()
class WebSocketHandler(object):
_ws_GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11' #'e47b2610-3ce1-4dd6-9ef0-760a026014eb'
_opcode_continu = 0x0
_opcode_text = 0x1
_opcode_binary = 0x2
_opcode_close = 0x8
_opcode_ping = 0x9
_opcode_pong = 0xa
def __init__(self, requestHandler, msgHandler):
self.__request_handler = requestHandler
self.__msg_handler = msgHandler
def run(self):
self.__handshake()
self.__read_messages()
self.__close()
def __handshake(self):
rh = self.__request_handler
headers = rh.headers
key = headers['Sec-WebSocket-Key']
digest = b64encode(sha1(key + self._ws_GUID).hexdigest().decode('hex'))
rh.send_response(101, 'Switching Protocols')
rh.send_header('Upgrade', 'websocket')
rh.send_header('Connection', 'Upgrade')
rh.send_header('Sec-WebSocket-Accept', str(digest))
rh.end_headers()
def __close(self):
try:
self.__send_close()
except Exception, e:
self.__request_handler.log_message('ignore exception while closing WebSocket: %s.' % e)
def __send_close(self):
msg = bytearray()
msg.append(0x80 + self._opcode_close)
msg.append(0x00)
self.__request_handler.request.send(msg)
def __read_messages(self):
try:
while self.__read_next_message():
pass
except Exception, e:
traceback.print_exc()
self.__request_handler.log_error("closing WebSocket on exception: %s" % e)
def __read_next_message(self):
rfile = self.__request_handler.rfile
#rfile.read(n) is blocking.
#it returns however immediately when the socket is closed.
opcode = ord(rfile.read(1)) & 0x0F
length = ord(rfile.read(1)) & 0x7F
if length == 126:
length = struct.unpack(">H", rfile.read(2))[0]
elif length == 127:
length = struct.unpack(">Q", rfile.read(8))[0]
masks = [ord(byte) for byte in rfile.read(4)]
decoded = ""
for char in rfile.read(length):
decoded += chr(ord(char) ^ masks[len(decoded) % 4])
return self.__on_message(opcode, decoded)
def __on_message(self, opcode, decoded):
if opcode == self._opcode_close:
return False
if opcode == self._opcode_ping:
self.__send_message(self._opcode_pong, message)
elif opcode == self._opcode_pong:
pass
elif (opcode == self._opcode_continu or
opcode == self._opcode_text or
opcode == self._opcode_binary):
self.__msg_handler(decoded)
return True
def __send_message(self, opcode, message):
request = self.__request_handler.request
request.send(chr(0x80 + opcode))
length = len(message)
if length <= 125:
request.send(chr(length))
elif length >= 126 and length <= 65535:
request.send(chr(126))
request.send(struct.pack(">H", length))
else:
request.send(chr(127))
request.send(struct.pack(">Q", length))
if length > 0:
request.send(message)
class ServerHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if self.headers.get("Upgrade", None) == "websocket":
WebSocketHandler(self, lambda msg: self.__handle_websocket_message(msg)).run()
#This handler is in websocket mode now.
#do_GET only returns after client close or socket error.
else:
SimpleHTTPRequestHandler.do_GET(self)
def __handle_websocket_message(self, msg):
try:
r = json.loads(msg)
for i in r:
q.put(i)
except Exception, x:
print("error: %s" % x)
def run(options):
port = options.port
print('serving at port: %d' % port)
httpd = BaseHTTPServer.HTTPServer(('', port), ServerHandler)
if options.certfile:
try:
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=options.certfile, server_side=True)
except Exception, e:
print("ssl error: %s" % e)
return
httpd.serve_forever()
def notify(msg):
if os.name != 'nt':
print(msg);
return
import ctypes
MessageBox = ctypes.windll.user32.MessageBoxA
MB_SYSTEMMODAL = 0x1000
MessageBox(None, msg, 'Warning!', MB_SYSTEMMODAL)
def main(options):
t = Thread(target=run, args=([options]))
t.daemon = True # thread dies with the program
t.start()
while True:
i = 0
try:
i = q.get(True, 5)
except Exception:
notify('no signal detected');
if i > options.sensitivity:
notify('Incoming!')
# clear queue (without analyzing) after notification is shown
while not q.empty():
q.get()
parser = OptionParser()
parser.add_option("-p", "--port", default=8000, type=int, dest="port",
help="serve at HTTP port [default: %default]")
parser.add_option("-s", "--sensitivity", default=100000, type=int, dest="sensitivity",
help="motion sensitivity [default: %default]")
parser.add_option("-c", "--certfile", default=None, dest="certfile",
help="use SSL with certificate file (path to .pem)")
(options, args) = parser.parse_args()
main(options)
|
vladfolts/Web-Motion-Detector
|
server.py
|
Python
|
mit
| 5,819
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from oslo.config import cfg
from neutron.common import legacy
_core_opts = [
cfg.StrOpt('core_plugin',
default='',
help=_('Neutron plugin provider module')),
cfg.ListOpt('service_plugins',
default=[],
help=_("The service plugins Neutron will use")),
]
_quota_opts = [
cfg.StrOpt('quota_driver',
default='',
help=_('Neutron quota driver class')),
]
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
help=_('URL to database')),
]
CONF = cfg.ConfigOpts()
CONF.register_opts(_core_opts)
CONF.register_opts(_db_opts, 'database')
CONF.register_opts(_quota_opts, 'QUOTAS')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
def do_upgrade_downgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
sign = '+' if CONF.command.name == 'upgrade' else '-'
revision = sign + str(CONF.command.delta)
else:
revision = CONF.command.revision
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
for name in ['upgrade', 'downgrade']:
parser = subparsers.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade_downgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini')
)
config.set_main_option('script_location',
'neutron.db.migration:alembic_migrations')
# attach the Neutron conf to the Alembic conf
config.neutron_config = CONF
CONF()
#TODO(gongysh) enable logging
legacy.modernize_quantum_config(CONF)
CONF.command.func(config, CONF.command.name)
|
Juniper/neutron
|
neutron/db/migration/cli.py
|
Python
|
apache-2.0
| 4,441
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2021 Jose Antonio Chavarría <jachavar@gmail.com>
# Copyright (c) 2015-2021 Alberto Gacías <alberto@migasfree.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.utils.translation import gettext_lazy as _
from .node import Node
class ConfigurationManager(models.Manager):
def create(self, node, name, value):
obj = Configuration(
node=node,
name=name,
value=value
)
obj.save()
return obj
class Configuration(models.Model):
node = models.ForeignKey(
Node,
on_delete=models.CASCADE,
verbose_name=_("hardware node")
)
name = models.TextField(
verbose_name=_("name"),
blank=True
) # This is the field "config" in lshw
value = models.TextField(
verbose_name=_("value"),
null=True,
blank=True
)
objects = ConfigurationManager()
def __str__(self):
return self.name
class Meta:
app_label = 'hardware'
verbose_name = _('Hardware Configuration')
verbose_name_plural = _('Hardware Configurations')
unique_together = (('name', 'node'),)
|
migasfree/migasfree-backend
|
migasfree/hardware/models/configuration.py
|
Python
|
gpl-3.0
| 1,825
|
#!/usr/bin/python
import os, sys
import argparse
import pycurl
from lxml import etree
from datetime import datetime, timedelta
class FritzBox(object):
def __init__(self, hostname='fritz.box', port=49000, pre_os6=False):
self._hostname = hostname
self._port = port
self._status = None
self._pre_os6 = pre_os6
if self._pre_os6:
self._baseurl = '/upnp'
else:
self._baseurl = '/igdupnp'
self._wanAddress = None
self._addonInfo = None
self._dslLinkInfo = None
self._commonLinkProperties = None
self._userList = None
self._callLists = {}
class Request:
def __init__(self, uri, urn, action):
self._uri = uri
self._urn = urn
self._action = action
def data(self):
return """<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body><u:%s xmlns:u="urn:%s"/></s:Body>
</s:Envelope>""" % (self._action, self._urn)
class Response:
def __init__(self):
self.contents = b''
def body_callback(self, buf):
self.contents = self.contents + buf
def _sendRequest(self, request, verbose=False):
resp = FritzBox.Response()
c = pycurl.Curl()
url = 'http://' + self._hostname + ':' + str(self._port) + request._uri
try:
data = request.data()
try:
if verbose:
print('C: ' + url)
print('C: ' + data)
c.setopt(pycurl.URL, url)
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, data)
c.setopt(pycurl.INFILESIZE, len(data))
c.setopt(pycurl.WRITEFUNCTION, resp.body_callback)
c.setopt(pycurl.CONNECTTIMEOUT, 30)
c.setopt(pycurl.TIMEOUT, 300)
c.setopt(pycurl.HTTPHEADER, ['SOAPACTION: "urn:' + request._urn + '#' + request._action + '"', 'CONTENT-TYPE: text/xml;', 'User-Agent: nagios'])
c.perform()
if verbose:
print('S: ' + resp.contents)
ret = True
except Exception as e:
print("ERROR - HTTP request to UPNP server not possible " + str(e))
ret = False
finally:
c.close()
if ret:
return resp
else:
return None
def _retrieveStatusInfo(self):
response = self._sendRequest( FritzBox.Request(self._baseurl + '/control/WANIPConn1', 'schemas-upnp-org:service:WANIPConnection:1', 'GetStatusInfo') )
if response is not None:
self._status = {}
ret = False
try:
namespaces = { 's':"http://schemas.xmlsoap.org/soap/envelope/", 'u':"urn:schemas-upnp-org:service:WANIPConnection:1" }
tree = etree.fromstring(response.contents)
elements = tree.xpath('/s:Envelope/s:Body/u:GetStatusInfoResponse/NewConnectionStatus', namespaces=namespaces)
self._status['connectionstatus'] = elements[0].text if elements else None
elements = tree.xpath('/s:Envelope/s:Body/u:GetStatusInfoResponse/NewLastConnectionError', namespaces=namespaces)
self._status['lastConnectionError'] = elements[0].text if elements else None
if self._status['lastConnectionError'] == 'ERROR_NONE':
self._status['lastConnectionError'] = None
elements = tree.xpath('/s:Envelope/s:Body/u:GetStatusInfoResponse/NewUptime', namespaces=namespaces)
self._status['connectDuration'] = int(elements[0].text) if elements else 0
self._status['connectTime'] = datetime.now() - timedelta(seconds=self._status['connectDuration'])
ret = True
finally:
pass
else:
self._status = None
ret = False
return ret
def _retrieveWANAddress(self, pre_os6=False):
if self._pre_os6:
request = FritzBox.Request(self._baseurl + '/control/WANCommonIFC1', 'schemas-upnp-org:service:WANPPPConnection:1', 'GetExternalIPAddress')
else:
request = FritzBox.Request(self._baseurl + '/control/WANIPConn1', 'schemas-upnp-org:service:WANIPConnection:1', 'GetExternalIPAddress')
response = self._sendRequest( request )
if response is not None:
self._wanAddress = ''
ret = False
try:
tree = etree.fromstring(response.contents)
namespaces = { 's':"http://schemas.xmlsoap.org/soap/envelope/" }
if self._pre_os6:
namespaces['u'] = "urn:schemas-upnp-org:service:WANPPPConnection:1"
else:
namespaces['u'] = "urn:schemas-upnp-org:service:WANIPConnection:1"
elements = tree.xpath('/s:Envelope/s:Body/u:GetExternalIPAddressResponse/NewExternalIPAddress', namespaces=namespaces)
if elements:
self._wanAddress = elements[0].text
ret = True
else:
self._wanAddress = None
ret = False
finally:
pass
else:
self._wanAddress = None
ret = False
return ret
def _retrieveAddonInfo(self):
response = self._sendRequest( FritzBox.Request(self._baseurl + '/control/WANCommonIFC1', 'schemas-upnp-org:service:WANCommonInterfaceConfig:1', 'GetAddonInfos') )
if response is not None:
self._addonInfo = {}
ret = False
try:
tree = etree.fromstring(response.contents)
namespaces = { 's':"http://schemas.xmlsoap.org/soap/envelope/", 'u':"urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" }
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewDNSServer1', namespaces=namespaces)
dnsserver1 = elements[0].text if elements else None
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewDNSServer2', namespaces=namespaces)
dnsserver2 = elements[0].text if elements else None
self._addonInfo['dnsserver'] = [ dnsserver1, dnsserver2 ]
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewVoipDNSServer1', namespaces=namespaces)
dnsserver1 = elements[0].text if elements else None
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewVoipDNSServer2', namespaces=namespaces)
dnsserver2 = elements[0].text if elements else None
self._addonInfo['voipdnsserver'] = [ dnsserver1, dnsserver2 ]
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewByteSendRate', namespaces=namespaces)
self._addonInfo['sendRateByte'] = int(elements[0].text) if elements else 0
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewByteReceiveRate', namespaces=namespaces)
self._addonInfo['receiveRateByte'] = int(elements[0].text) if elements else 0
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewPacketSendRate', namespaces=namespaces)
self._addonInfo['sendRatePacket'] = int(elements[0].text) if elements else 0
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewPacketReceiveRate', namespaces=namespaces)
self._addonInfo['receiveRatePacket'] = int(elements[0].text) if elements else 0
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewTotalBytesSent', namespaces=namespaces)
self._addonInfo['sendTotalRateByte'] = int(elements[0].text) if elements else 0
elements = tree.xpath('/s:Envelope/s:Body/u:GetAddonInfosResponse/NewTotalBytesReceived', namespaces=namespaces)
self._addonInfo['receiveTotalByte'] = int(elements[0].text) if elements else 0
ret = True
finally:
pass
else:
self._addonInfo = None
ret = False
return ret
def _retrieveCallLists(self, user):
response = self._sendRequest( FritzBox.Request(self._baseurl + '/control/foncontrol', 'schemas-upnp-org:service:device:foncontrol:1', 'GetCallLists') )
if response is not None:
self._callLists[user] = []
ret = False
try:
tree = etree.fromstring(response.contents)
namespaces = { 's':"http://schemas.xmlsoap.org/soap/envelope/", 'u':"urn:schemas-upnp-org:service:WANIPConnection:1" }
elements = tree.xpath('/s:Envelope/s:Body/u:GetExternalIPAddressResponse/NewExternalIPAddress', namespaces=namespaces)
#self._wanAddress = elements[0].text
ret = True
finally:
pass
else:
self._callLists[user] = None
ret = False
return ret
def _retrieveUserList(self):
response = self._sendRequest( FritzBox.Request(self._baseurl + '/control/foncontrol', 'schemas-upnp-org:service:device:foncontrol:1', 'GetUserList'), True )
if response is not None:
self._userList = []
ret = False
try:
tree = etree.fromstring(response.contents)
namespaces = { 's':"http://schemas.xmlsoap.org/soap/envelope/", 'u':"urn:schemas-upnp-org:service:device:foncontrol:1" }
elements = tree.xpath('/s:Envelope/s:Body/u:GetUserListResponse/UserList', namespaces=namespaces)
#self._wanAddress = elements[0].text
ret = True
finally:
pass
else:
self._userList = None
ret = False
return ret
def _retrieveDSLLinkInfo(self):
response = self._sendRequest( FritzBox.Request(self._baseurl + '/control/WANDSLLinkC1', 'schemas-upnp-org:service:WANDSLLinkConfig:1', 'GetDSLLinkInfo') )
if response is not None:
self._dslLinkInfo = {}
ret = False
try:
tree = etree.fromstring(response.contents)
namespaces = { 's':"http://schemas.xmlsoap.org/soap/envelope/", 'u':"urn:schemas-upnp-org:service:WANDSLLinkConfig:1" }
elements = tree.xpath('/s:Envelope/s:Body/u:GetDSLLinkInfoResponse/NewLinkType', namespaces=namespaces)
self._dslLinkInfo['linktype'] = elements[0].text if elements else None
elements = tree.xpath('/s:Envelope/s:Body/u:GetDSLLinkInfoResponse/NewLinkStatus', namespaces=namespaces)
self._dslLinkInfo['status'] = elements[0].text if elements else None
ret = True
finally:
pass
else:
self._dslLinkInfo = None
ret = False
return ret
def _retrieveCommonLinkProperties(self):
response = self._sendRequest( FritzBox.Request(self._baseurl + '/control/WANCommonIFC1', 'schemas-upnp-org:service:WANCommonInterfaceConfig:1', 'GetCommonLinkProperties') )
if response is not None:
self._commonLinkProperties = {}
ret = False
try:
tree = etree.fromstring(response.contents)
namespaces = { 's':"http://schemas.xmlsoap.org/soap/envelope/", 'u':"urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" }
elements = tree.xpath('/s:Envelope/s:Body/u:GetCommonLinkPropertiesResponse/NewWANAccessType', namespaces=namespaces)
self._commonLinkProperties['wanaccesstype'] = elements[0].text if elements else None
elements = tree.xpath('/s:Envelope/s:Body/u:GetCommonLinkPropertiesResponse/NewLayer1UpstreamMaxBitRate', namespaces=namespaces)
self._commonLinkProperties['upstreammaxbiterate'] = int(elements[0].text) if elements else None
elements = tree.xpath('/s:Envelope/s:Body/u:GetCommonLinkPropertiesResponse/NewLayer1DownstreamMaxBitRate', namespaces=namespaces)
self._commonLinkProperties['downstreammaxbitrate'] = int(elements[0].text) if elements else None
elements = tree.xpath('/s:Envelope/s:Body/u:GetCommonLinkPropertiesResponse/NewPhysicalLinkStatus', namespaces=namespaces)
self._commonLinkProperties['physicallinkstatus'] = elements[0].text if elements else None
ret = True
finally:
pass
else:
self._commonLinkProperties = None
ret = False
return ret
def connectTime(self):
if self._status is None:
self._retrieveStatusInfo()
if self._status is not None:
return self._status['connectTime']
else:
return None
def connectDuration(self):
if self._status is None:
self._retrieveStatusInfo()
if self._status is not None:
return self._status['connectDuration']
else:
return None
def connectionStatus(self):
if self._status is None:
self._retrieveStatusInfo()
if self._status is not None:
return self._status['connectionstatus']
else:
return None
def lastConnectionError(self):
if self._status is None:
self._retrieveStatusInfo()
if self._status is not None:
return self._status['lastConnectionError']
else:
return None
def dslLinkStatus(self):
if self._dslLinkInfo is None:
self._retrieveDSLLinkInfo()
if self._dslLinkInfo is not None:
return self._dslLinkInfo['status']
else:
return None
def physicalLinkStatus(self):
if self._commonLinkProperties is None:
self._retrieveCommonLinkProperties()
if self._commonLinkProperties is not None:
return self._commonLinkProperties['physicallinkstatus']
else:
return None
def physicalLinkUpStream(self):
if self._commonLinkProperties is None:
self._retrieveCommonLinkProperties()
if self._commonLinkProperties is not None:
return self._commonLinkProperties['upstreammaxbiterate']
else:
return None
def physicalLinkDownStream(self):
if self._commonLinkProperties is None:
self._retrieveCommonLinkProperties()
if self._commonLinkProperties is not None:
return self._commonLinkProperties['downstreammaxbitrate']
else:
return None
def isConnected(self):
cxnstat = self.connectionStatus()
cxnerr = self.lastConnectionError()
return True if cxnstat == 'Connected' or (cxnstat == 'Connecting' and cxnerr is None) else False
def isDSLConnected(self):
cxnstat = self.dslLinkStatus()
return True if cxnstat == 'Up' else False
def isPhysicalConnected(self):
cxnstat = self.physicalLinkStatus()
return True if cxnstat == 'Up' else False
def wanAddress(self):
if self._wanAddress is None:
self._retrieveWANAddress()
if self._wanAddress is not None:
return self._wanAddress
else:
return None
def dnsServer(self):
if self._addonInfo is None:
self._retrieveAddonInfo()
if self._addonInfo is not None:
return self._addonInfo['dnsserver']
else:
return None
def userList(self):
if self._userList is None:
self._retrieveUserList()
if self._userList is not None:
return self._userList
else:
return None
def callList(self, user):
if self._callsList is None:
self._retrieveCallLists(user)
if self._callsList is not None:
if user in self._callLists:
return self._callLists[user]
else:
return []
else:
return None
def reconnect(self):
response = self._sendRequest( FritzBox.Request('/upnp/control/WANIPConn1', 'schemas-upnp-org:service:WANIPConnection:1', 'ForceTermination') )
if response is not None:
ret = True
else:
ret = False
return ret
|
aroth-arsoft/arsoft-python
|
python3/arsoft/fritzbox/__init__.py
|
Python
|
gpl-3.0
| 16,739
|
# -*- coding:utf-8 -*-
import argparse
from nlp.ner.idcnn.train import train
from nlp.ner.idcnn.predict import predict
def main(args):
if args.train:
train(args)
else:
predict(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=bool, default=True, help="Whether train the model")
parser.add_argument('--clean', type=bool, default=True, help="Whether clean the model")
parser.add_argument('--ckpt_path', type=str, default="ckpt", help="Path to save model")
parser.add_argument('--log_path', type=str, default="train.log", help="File for log")
parser.add_argument('--vocab_path', type=str, default="vocab.json", help="Path to vocab file")
parser.add_argument('--config_path', type=str, default="config_file", help="File for config")
parser.add_argument('--script', type=str, default="conlleval", help="evaluation script")
parser.add_argument('--result_path', type=str, default="result", help="Path to result")
parser.add_argument('--emb_file', type=str, default="vec.txt", help="Path for pre_trained embedding")
parser.add_argument('--train_file', type=str, default="train.txt", help="Path for train data")
parser.add_argument('--dev_file', type=str, default="dev.txt", help="Path for dev data")
parser.add_argument('--test_file', type=str, default="test.txt", help="Path for test data")
parser.add_argument('--raw_file', type=str, default="example.raw", help="Path for predict data")
parser.add_argument('--model_type', type=str, default="bilstm", help="Model type, can be idcnn or bilstm")
parser.add_argument('--seg_dim', type=int, default=50, help="Embedding size for segmentation, 0 if not used")
parser.add_argument('--char_dim', type=int, default=100, help="Embedding size for characters")
parser.add_argument('--lstm_dim', type=int, default=100, help="Num of hidden units in LSTM, or num of filters in IDCNN")
parser.add_argument('--tag_schema', type=str, default="iobes", help="tagging schema iobes or iob")
parser.add_argument('--clip', type=int, default=5, help="Gradient clip")
parser.add_argument('--dropout', type=float, default=0.5, help="Dropout rate")
parser.add_argument('--batch_size', type=int, default=20, help="batch size")
parser.add_argument('--lr', type=float, default=0.001, help="Initial learning rate")
parser.add_argument('--optimizer', type=str, default='adam')
parser.add_argument('--pre_emb', type=bool, default=True, help="Whether use pre-trained embedding")
parser.add_argument('--zeros', type=bool, default=False, help="Whether replace digits with zero")
parser.add_argument('--lower', type=bool, default=True, help="Whether lower case")
parser.add_argument('--max_epoch', type=int, default=4, help="maximum training epochs")
parser.add_argument('--steps_check', type=int, default=100, help="steps per checkpoint")
args = parser.parse_args()
main(args)
|
koala-ai/tensorflow_nlp
|
nlp/ner/idcnn/run.py
|
Python
|
apache-2.0
| 3,000
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cityBikeWein.py
Copyright (C) 2010 Patrick Installé <PatrickInstalle@P-Installe.be>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
This program parse the data of CityBike - Wien - austria
"""
import sys
import os
import urllib2
import re
import xml.dom.minidom
import datetime
from plugin import *
class CityBikeWien(Provider):
config = {
'country_uid' : 'at',
'country_Name' : 'Autriche',
'city_uid' : 'wien',
'city_Name' : 'Wien',
'bike_name' : 'CityBike',
'server' : 'dynamisch.citybikewien.at',
'lat' : 48.2092062,
'lng' : 16.3727778,
}
def url(self):
return 'http://' + self.config['server'] + "/citybike_xml.php"
def get_countries(self):
country = Country()
country.uid = "at"
country.name = "Austria"
return [country]
def get_cities(self, country):
city = City()
city.uid = self.config['city_uid']
city.id = city.uid
city.name = self.config['city_Name']
city.bikeName = self.config['bike_name']
city.lat = self.config['lat']
city.lng = self.config['lng']
city.create_rect()
city.type = "CityBikeWien"
return [city]
def get_stations(self, city):
stations = []
url = self.url()
fp = urlopen(url)
data = fp.read()
dom = xml.dom.minidom.parseString(data)
for node in dom.getElementsByTagName('station'):
station = Station()
station.uid = node.getElementsByTagName('id')[0].childNodes[0].toxml()
station.id = station.uid
station.name = node.getElementsByTagName('name')[0].childNodes[0].toxml()
if node.getElementsByTagName('description')[0].hasChildNodes():
station.description = node.getElementsByTagName('description')[0].childNodes[0].toxml()
else:
station.description = ''
station.lat = float(node.getElementsByTagName('latitude')[0].childNodes[0].toxml())
station.lng = float(node.getElementsByTagName('longitude')[0].childNodes[0].toxml())
station.zone = "0"
station.bikes = int(node.getElementsByTagName('free_bikes')[0].childNodes[0].toxml())
station.slots = int(node.getElementsByTagName('free_boxes')[0].childNodes[0].toxml())
#status.total = node.getElementsByTagName('boxes')[0].childNodes[0].toxml()
#status.status = node.getElementsByTagName('status')[0].childNodes[0].toxml()
stations.append(station)
return stations
def get_status(self, station, city):
return station
def get_zones(self, city):
return []
def dump_city(self, city):
#city.rect = self.get_city_bike_zone(service, city)
city.infos = 'http://' + self.config['server'] + "/citybike_xml.php"
data = self._dump_city(city)
print data
def dump_stations(self, city):
#city.rect = self.get_city_bike_zone(service, city)
data = self._dump_stations(city)
print data.encode('utf8')
def test():
prov = CityBikeWien()
countries = prov.get_countries()
print countries
print countries[0]
cities = prov.get_cities(countries[0])
print cities
print cities[0]
zones = prov.get_zones(cities[0])
print zones
if (zones):
print zones[0]
stations = prov.get_stations(cities[0])
print "Stations: ", len(stations)
station = prov.get_status(stations[0], cities[0])
print station
def main():
test()
if __name__ == '__main__':
main()
|
iksaif/lugdulov
|
python/citybikewien.py
|
Python
|
gpl-2.0
| 4,354
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This is a simple echo bot using decorators and webhook with flask
# It echoes any incoming text messages and does not use the polling method.
import logging
import time
import flask
import telebot
API_TOKEN = '<api_token>'
WEBHOOK_HOST = '<ip/host where the bot is running>'
WEBHOOK_PORT = 8443 # 443, 80, 88 or 8443 (port need to be 'open')
WEBHOOK_LISTEN = '0.0.0.0' # In some VPS you may need to put here the IP addr
WEBHOOK_SSL_CERT = './webhook_cert.pem' # Path to the ssl certificate
WEBHOOK_SSL_PRIV = './webhook_pkey.pem' # Path to the ssl private key
# Quick'n'dirty SSL certificate generation:
#
# openssl genrsa -out webhook_pkey.pem 2048
# openssl req -new -x509 -days 3650 -key webhook_pkey.pem -out webhook_cert.pem
#
# When asked for "Common Name (e.g. server FQDN or YOUR name)" you should reply
# with the same value in you put in WEBHOOK_HOST
WEBHOOK_URL_BASE = "https://%s:%s" % (WEBHOOK_HOST, WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/%s/" % (API_TOKEN)
logger = telebot.logger
telebot.logger.setLevel(logging.INFO)
bot = telebot.TeleBot(API_TOKEN)
app = flask.Flask(__name__)
# Empty webserver index, return nothing, just http 200
@app.route('/', methods=['GET', 'HEAD'])
def index():
return ''
# Process webhook calls
@app.route(WEBHOOK_URL_PATH, methods=['POST'])
def webhook():
if flask.request.headers.get('content-type') == 'application/json':
json_string = flask.request.get_data().decode('utf-8')
update = telebot.types.Update.de_json(json_string)
bot.process_new_updates([update])
return ''
else:
flask.abort(403)
# Handle '/start' and '/help'
@bot.message_handler(commands=['help', 'start'])
def send_welcome(message):
bot.reply_to(message,
("Hi there, I am EchoBot.\n"
"I am here to echo your kind words back to you."))
# Handle all other messages
@bot.message_handler(func=lambda message: True, content_types=['text'])
def echo_message(message):
bot.reply_to(message, message.text)
# Remove webhook, it fails sometimes the set if there is a previous webhook
bot.remove_webhook()
time.sleep(0.1)
# Set webhook
bot.set_webhook(url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH,
certificate=open(WEBHOOK_SSL_CERT, 'r'))
# Start flask server
app.run(host=WEBHOOK_LISTEN,
port=WEBHOOK_PORT,
ssl_context=(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV),
debug=True)
|
eternnoir/pyTelegramBotAPI
|
examples/webhook_examples/webhook_flask_echo_bot.py
|
Python
|
gpl-2.0
| 2,475
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
tkRAD - tkinter Rapid Application Development library
(c) 2013+ Raphaël SEBAN <motus@laposte.net>
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program.
If not, see: http://www.gnu.org/licenses/
"""
# lib imports
import re
import os
import os.path as OP
import configparser as CP
from . import path
from . import tools
# unique instance pointer
__option_manager = None
# service getter
def get_option_manager (**kw):
r"""
gets application-wide unique instance for rc option manager;
"""
global __option_manager
if not isinstance(__option_manager, OptionManager):
__option_manager = OptionManager(**kw)
# end if
return __option_manager
# end def
# service class
class OptionManager(CP.ConfigParser):
r"""
generic rc configuration file internal options manager;
"""
CONFIG = {
"dir": "~/.config/apps",
"file": "options.rc",
} # end of CONFIG
SECTIONS = (
"dirs", "files", "geometry", "gui", "xml",
) # end of SECTIONS
def __init__ (self, **kw):
r"""
class constructor - inits params and rc sections;
"""
# super inits
CP.ConfigParser.__init__(self)
# member inits
self.set_config_dir(kw.get("rc_dir"))
self.set_config_file(kw.get("rc_file"))
self.set_sections("DEFAULT")
self.set_sections(*self.SECTIONS)
self._reset_load()
# end def
def _get_path (self):
r"""
builds path along rc config dir and filename;
returns path;
"""
return OP.join(self.get_config_dir(), self.get_config_file())
# end def
def _reset_load (self):
r"""
resets loading op flag;
no return value (void);
"""
self.__loaded = False
# end def
def get_config_dir (self):
r"""
configuration directory getter;
"""
return path.normalize(self.__rc_dir)
# end def
def get_config_file (self):
r"""
configuration file radix getter (filename w/out extension);
"""
return self.__rc_file
# end def
def load (self):
r"""
tries to load predefined rc file or a default one;
loads successful file only once;
any later calls will not be taken in account;
use reload() to reset internal rc options;
returns a list of read files on success, None otherwise;
"""
# inits
_success = None
# inner controls
if not self.__loaded:
_success = self.read(
tools.choose_str(
self._get_path(),
OP.join(
path.normalize(self.CONFIG.get("dir")),
self.get_config_file()
),
)
)
self.__loaded = tools.is_plist(_success)
# end if
return _success
# end def
def reload (self):
r"""
forces reload of rc file and resets internal options;
use with caution as some runtime modified options might
not be saved before this operation;
returns a list of read files on success, None otherwise;
"""
self._reset_load()
return self.load()
# end def
def save (self):
r"""
saves internal options to a predefined rc file;
no return value (void);
"""
try:
# get RC file path
_path = self._get_path()
# ensure directories do exist
os.makedirs(self.get_config_dir(), exist_ok = True)
# now try to open RC file
with open(_path, "w") as _file:
# write data
self.write(_file)
# end with
except Exception as e:
# console warning
print(
"[WARNING] could *NOT* save "
"options configuration file."
"\nGot the following error:\n" + str(e)
)
# end try
# end def
def set_config_dir (self, value):
r"""
configuration directory setter;
no return value (void);
"""
# private member inits
self.__rc_dir = path.normalize(
tools.choose_str(value, self.CONFIG.get("dir"))
)
self._reset_load()
# end def
def set_config_file (self, value):
r"""
configuration file radix setter (filename w/out extension);
no return value (void);
"""
# private member inits
self.__rc_file = re.sub(
r"[^\w.]+", r"-",
tools.choose_str(value, self.CONFIG.get("file"))
)
self._reset_load()
# end def
def set_defaults (self, **kw):
r"""
fills the 'DEFAULT' rc file section with default
(key, value) pairs defined in @kw keywords param;
no return value (void);
"""
self["DEFAULT"].update(kw)
# end def
def set_sections (self, *names):
r"""
adds new sections to internal rc options if missing;
keeps already existing sections untouched;
no return value (void);
"""
for _section in set(names):
self.setdefault(_section, dict())
# end for
# end def
# end class OptionManager
|
ronaldsantos63/tkGAME
|
tkRAD/core/options.py
|
Python
|
gpl-3.0
| 6,240
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from simple_manager import __version__ as Version
setup(
name=u'simple-dependencies-manager',
version=Version,
description=u"A simple script that helps you take control of your projects so you do not get crazy!",
long_description=u"A simple script that helps you take control of your projects so you do not get crazy!",
keywords='dependencies manager git tags requirements',
author=u'Victor Pantoja',
author_email='victor.pantoja@gmail.com',
url='http://github.com/victorpantoja/simple-dependencies-manager',
license='Apache License 2.0',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python'],
packages=find_packages(),
package_dir={"simple_manager": "simple_manager"},
include_package_data=True,
scripts=['simple_manager/simple-manager.py'],
install_requires=[
"PyYAML==3.10"
]
)
|
victorpantoja/simple-dependencies-manager
|
setup.py
|
Python
|
apache-2.0
| 1,020
|
#!/usr/bin/env python3
import sys
import os, os.path
from IPython import get_ipython
import nbformat
import io
import time
NBVERSION = 4
def _compile_to_py(nb_path,py_path):
shell = get_ipython()
# load the notebook object
with io.open(nb_path, 'r', encoding='utf-8') as f:
nb = nbformat.read(f,NBVERSION)
with io.open(py_path,'w',encoding='utf-8') as pyf:
pyf.write(u'## Compiled from {} on {}\n'.format(nb_path,time.ctime()))
for cell in nb['cells']:
if cell['cell_type'] == 'code':
# transform the input to executable Python
##print ("Source",cell['source'])
ec = cell['execution_count']
code = shell.input_transformer_manager.transform_cell(cell['source'])
if code.startswith('##test:'):
continue
if code.startswith('get_ipython().run_cell_magic('):
continue
if code.startswith('## Test Section:'):
pyf.write(u'\n## Import ended by "## Test Section:"\n')
break
if code.startswith('#### End Import ####'):
pyf.write(u'\n## Import ended by "#### End Import ####"\n')
break
pyf.write(u'\n')
pyf.write(u'## In [{}]:\n'.format(' ' if ec is None else ec))
pyf.write(code)
if __name__ == '__main__':
for inf in sys.argv[1:]:
if inf.endswith('.ipynb'):
outf = inf[:-6] + '.py'
else:
outf = inf + '.py'
print('Compiling {} => {}'.format(inf,outf))
_compile_to_py(inf,outf)
|
nholtz/structural-analysis
|
matrix-methods/frame2d/Frame2D/ipynb_compiler.py
|
Python
|
cc0-1.0
| 1,694
|
from django.shortcuts import render
def error404(request):
respon = render(request,'errors/404error.tpl')
respon.status_code = 404
return respon
def error500(request):
respon = render(request,'errors/500error.tpl')
respon.status_code = 500
return respon
|
enixdark/10gen-Courses
|
Relational-SQL/DjangoBlog/DjangoBlog/views.py
|
Python
|
mit
| 282
|
#!/usr/bin/env python
import optparse
import sys
import pdb
#from numpy import *
from collections import defaultdict
'''
Ranks each group of translations in 'translations' using the features provided, with their corresponding weights.
@translations: the list of lists of translations to be scored, one list of translations for each Russian sentence
@translations type: list
@scores: a list of lists of feature vector tuples representing translations, one list of feature vectors for each Russian sentence
@scores type: list
@weights: list of weights for the features
@weights type: list
@outname: file in which the best translations are written to
@outname type: string
'''
def rerank(translations_list, scores, weights, outname):
outfile = open(outname, 'w')
(best_score, best) = (-1e300, '')
num_sents = (len(translations_list))
for rus_ind, translations in enumerate(translations_list): # for all Russian sentences
(best_score, best) = (-1e300, '')
for tr_ind, translation in enumerate(translations): # loop over this sentence's translations
score = 0.0
for w_ind, w in enumerate(weights):
score += w * scores[rus_ind][tr_ind][w_ind] # add the weighted feature score for this translation
if score > best_score:
(best_score, best_translation) = (score, translation)
try:
outfile.write("%s\n" % best_translation)
except (Exception):
sys.exit(1)
outfile.close()
def rerank_basic(lex=1, tm=1, lm=1, length=0, outfilename='default'):
outfile = open(outfilename,'w')
hypotheses = open("data/dev+test.with-len.txt")
weights = {'p(e)': lm, 'p(e|f)': tm, 'p_lex(f|e)': lex, 'len':length}
all_hyps = [hyp.split(' ||| ') for hyp in hypotheses]
all_feats = set()
for hyp in all_hyps:
_, _, feats = hyp
for feat in feats.split():
k,_ = feat.split('=')
all_feats.add(k)
num_sents = len(all_hyps) / 100
for s in xrange(0, num_sents):
hyps_for_one_sent = all_hyps[s * 100:s * 100 + 100]
(best_score, best) = (-1e300, '')
for (num, hyp, feats) in hyps_for_one_sent:
score = 0.0
for feat in feats.split(' '):
(k, v) = feat.split('=')
score += weights[k] * float(v)
if score > best_score:
(best_score, best) = (score, hyp)
try:
outfile.write("%s\n" % best)
except (Exception):
sys.exit(1)
return outfilename
|
asantinc/translation-decoding
|
reranker/rerankfun.py
|
Python
|
mit
| 2,468
|
#-*- coding:utf-8 -*-
import re
from miasm2.expression.expression import *
from pyparsing import *
from miasm2.core.cpu import *
from collections import defaultdict
import miasm2.arch.x86.regs as regs_module
from miasm2.arch.x86.regs import *
from miasm2.core.asmblock import AsmLabel
log = logging.getLogger("x86_arch")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)-5s: %(message)s"))
log.addHandler(console_handler)
log.setLevel(logging.WARN)
conditional_branch = ["JO", "JNO", "JB", "JAE",
"JZ", "JNZ", "JBE", "JA",
"JS", "JNS", "JPE", "JNP",
#"L", "NL", "NG", "G"]
"JL", "JGE", "JLE", "JG",
"JCXZ", "JECXZ", "JRCXZ"]
unconditional_branch = ['JMP', 'JMPF']
f_isad = "AD"
f_s08 = "S08"
f_u08 = "U08"
f_s16 = "S16"
f_u16 = "U16"
f_s32 = "S32"
f_u32 = "U32"
f_s64 = "S64"
f_u64 = "U64"
f_imm = 'IMM'
f_imm2size = {f_s08: 8, f_s16: 16, f_s32: 32, f_s64: 64,
f_u08: 8, f_u16: 16, f_u32: 32, f_u64: 64}
size2gpregs = {8: gpregs08, 16: gpregs16,
32: gpregs32, 64: gpregs64}
replace_regs64 = {
AL: RAX[:8], CL: RCX[:8], DL: RDX[:8], BL: RBX[:8],
AH: RAX[8:16], CH: RCX[8:16], DH: RDX[8:16], BH: RBX[8:16],
SPL: RSP[0:8], BPL: RBP[0:8], SIL: RSI[0:8], DIL: RDI[0:8],
R8B: R8[0:8], R9B: R9[0:8], R10B: R10[0:8], R11B: R11[0:8],
R12B: R12[0:8], R13B: R13[0:8], R14B: R14[0:8], R15B: R15[0:8],
AX: RAX[:16], CX: RCX[:16], DX: RDX[:16], BX: RBX[:16],
SP: RSP[:16], BP: RBP[:16], SI: RSI[:16], DI: RDI[:16],
R8W: R8[:16], R9W: R9[:16], R10W: R10[:16], R11W: R11[:16],
R12W: R12[:16], R13W: R13[:16], R14W: R14[:16], R15W: R15[:16],
EAX: RAX[:32], ECX: RCX[:32], EDX: RDX[:32], EBX: RBX[:32],
ESP: RSP[:32], EBP: RBP[:32], ESI: RSI[:32], EDI: RDI[:32],
R8D: R8[:32], R9D: R9[:32], R10D: R10[:32], R11D: R11[:32],
R12D: R12[:32], R13D: R13[:32], R14D: R14[:32], R15D: R15[:32],
IP: RIP[:16], EIP: RIP[:32],
ExprId("ST", 64): float_st0,
ExprId("ST(0)", 64): float_st0,
ExprId("ST(1)", 64): float_st1,
ExprId("ST(2)", 64): float_st2,
ExprId("ST(3)", 64): float_st3,
ExprId("ST(4)", 64): float_st4,
ExprId("ST(5)", 64): float_st5,
ExprId("ST(6)", 64): float_st6,
ExprId("ST(7)", 64): float_st7,
}
replace_regs32 = {
AL: EAX[:8], CL: ECX[:8], DL: EDX[:8], BL: EBX[:8],
AH: EAX[8:16], CH: ECX[8:16], DH: EDX[8:16], BH: EBX[8:16],
AX: EAX[:16], CX: ECX[:16], DX: EDX[:16], BX: EBX[:16],
SP: ESP[:16], BP: EBP[:16], SI: ESI[:16], DI: EDI[:16],
IP: EIP[:16],
ExprId("ST", 64): float_st0,
ExprId("ST(0)", 64): float_st0,
ExprId("ST(1)", 64): float_st1,
ExprId("ST(2)", 64): float_st2,
ExprId("ST(3)", 64): float_st3,
ExprId("ST(4)", 64): float_st4,
ExprId("ST(5)", 64): float_st5,
ExprId("ST(6)", 64): float_st6,
ExprId("ST(7)", 64): float_st7,
}
replace_regs16 = {
AL: AX[:8], CL: CX[:8], DL: DX[:8], BL: BX[:8],
AH: AX[8:16], CH: CX[8:16], DH: DX[8:16], BH: BX[8:16],
AX: AX[:16], CX: CX[:16], DX: DX[:16], BX: BX[:16],
SP: SP[:16], BP: BP[:16], SI: SI[:16], DI: DI[:16],
ExprId("ST", 64): float_st0,
ExprId("ST(0)", 64): float_st0,
ExprId("ST(1)", 64): float_st1,
ExprId("ST(2)", 64): float_st2,
ExprId("ST(3)", 64): float_st3,
ExprId("ST(4)", 64): float_st4,
ExprId("ST(5)", 64): float_st5,
ExprId("ST(6)", 64): float_st6,
ExprId("ST(7)", 64): float_st7,
}
replace_regs = {16: replace_regs16,
32: replace_regs32,
64: replace_regs64}
# parser helper ###########
PLUS = Suppress("+")
MULT = Suppress("*")
COLON = Suppress(":")
LBRACK = Suppress("[")
RBRACK = Suppress("]")
dbreg = Group(gpregs16.parser | gpregs32.parser | gpregs64.parser)
gpreg = (gpregs08.parser | gpregs08_64.parser | gpregs16.parser |
gpregs32.parser | gpregs64.parser | gpregs_xmm.parser |
gpregs_mm.parser)
def reg2exprid(r):
if not r.name in all_regs_ids_byname:
raise ValueError('unknown reg')
return all_regs_ids_byname[r.name]
def parse_deref_reg(s, l, t):
t = t[0][0]
return t[0]
def parse_deref_int(s, l, t):
t = t[0]
return t[0]
def parse_deref_regint(s, l, t):
t = t[0]
r1 = reg2exprid(t[0][0])
i1 = ExprInt(t[1].arg, r1.size)
return r1 + i1
def parse_deref_regreg(s, l, t):
t = t[0]
return t[0][0] + t[1][0]
def parse_deref_regregint(s, l, t):
t = t[0]
r1 = reg2exprid(t[0][0])
r2 = reg2exprid(t[1][0])
i1 = ExprInt(t[2].arg, r1.size)
return r1 + r2 + i1
def parse_deref_reg_intmreg(s, l, t):
t = t[0]
r1 = reg2exprid(t[0][0])
r2 = reg2exprid(t[1][0])
i1 = ExprInt(t[2].arg, r1.size)
return r1 + (r2 * i1)
def parse_deref_reg_intmreg_int(s, l, t):
t = t[0]
r1 = reg2exprid(t[0][0])
r2 = reg2exprid(t[1][0])
i1 = ExprInt(t[2].arg, r1.size)
i2 = ExprInt(t[3].arg, r1.size)
return r1 + (r2 * i1) + i2
def parse_deref_intmreg(s, l, t):
t = t[0]
r1 = reg2exprid(t[0][0])
i1 = ExprInt(t[1].arg, r1.size)
return r1 * i1
def parse_deref_intmregint(s, l, t):
t = t[0]
r1 = reg2exprid(t[0][0])
i1 = ExprInt(t[1].arg, r1.size)
i2 = ExprInt(t[1].arg, r1.size)
return (r1 * i1) + i2
def getreg(s, l, t):
t = t[0]
return t[0]
def parse_deref_ptr(s, l, t):
t = t[0]
return ExprMem(ExprOp('segm', t[0], t[1]))
def parse_deref_segmoff(s, l, t):
t = t[0]
return ExprOp('segm', t[0], t[1])
variable, operand, base_expr = gen_base_expr()
def ast_id2expr(t):
return mn_x86.regs.all_regs_ids_byname.get(t, t)
def ast_int2expr(a):
return ExprInt(a, 64)
my_var_parser = ParseAst(ast_id2expr, ast_int2expr)
base_expr.setParseAction(my_var_parser)
int_or_expr = base_expr
deref_mem_ad = Group(LBRACK + dbreg + RBRACK).setParseAction(parse_deref_reg)
deref_mem_ad |= Group(
LBRACK + int_or_expr + RBRACK).setParseAction(parse_deref_int)
deref_mem_ad |= Group(
LBRACK + dbreg + PLUS +
int_or_expr + RBRACK).setParseAction(parse_deref_regint)
deref_mem_ad |= Group(
LBRACK + dbreg + PLUS +
dbreg + RBRACK).setParseAction(parse_deref_regreg)
deref_mem_ad |= Group(
LBRACK + dbreg + PLUS + dbreg + PLUS +
int_or_expr + RBRACK).setParseAction(parse_deref_regregint)
deref_mem_ad |= Group(
LBRACK + dbreg + PLUS + dbreg + MULT +
int_or_expr + RBRACK).setParseAction(parse_deref_reg_intmreg)
deref_mem_ad |= Group(
LBRACK + dbreg + PLUS + dbreg + MULT + int_or_expr +
PLUS + int_or_expr + RBRACK).setParseAction(parse_deref_reg_intmreg_int)
deref_mem_ad |= Group(
LBRACK + dbreg + MULT +
int_or_expr + RBRACK).setParseAction(parse_deref_intmreg)
deref_mem_ad |= Group(
LBRACK + dbreg + MULT + int_or_expr +
PLUS + int_or_expr + RBRACK).setParseAction(parse_deref_intmregint)
deref_ptr = Group(int_or_expr + COLON +
int_or_expr).setParseAction(parse_deref_segmoff)
PTR = Suppress('PTR')
FAR = Suppress('FAR')
BYTE = Literal('BYTE')
WORD = Literal('WORD')
DWORD = Literal('DWORD')
QWORD = Literal('QWORD')
TBYTE = Literal('TBYTE')
XMMWORD = Literal('XMMWORD')
MEMPREFIX2SIZE = {'BYTE': 8, 'WORD': 16, 'DWORD': 32,
'QWORD': 64, 'TBYTE': 80, 'XMMWORD': 128}
SIZE2MEMPREFIX = dict((x[1], x[0]) for x in MEMPREFIX2SIZE.items())
def parse_deref_mem(s, l, t):
t = t[0]
if len(t) == 2:
s, ptr = t
return ExprMem(ptr, MEMPREFIX2SIZE[s[0]])
elif len(t) == 3:
s, segm, ptr = t
return ExprMem(ExprOp('segm', segm[0], ptr), MEMPREFIX2SIZE[s[0]])
else:
raise ValueError('len(t) > 3')
mem_size = Group(BYTE | DWORD | QWORD | WORD | TBYTE | XMMWORD)
deref_mem = Group(mem_size + PTR + Optional(Group(int_or_expr + COLON))
+ deref_mem_ad).setParseAction(parse_deref_mem)
rmarg = Group(gpregs08.parser |
gpregs08_64.parser |
gpregs16.parser |
gpregs32.parser |
gpregs64.parser |
gpregs_mm.parser |
gpregs_xmm.parser
).setParseAction(getreg)
rmarg |= deref_mem
mem_far = FAR + deref_mem
cl_or_imm = Group(r08_ecx.parser).setParseAction(getreg)
cl_or_imm |= int_or_expr
class r_al(reg_noarg, m_arg):
reg_info = r08_eax
parser = reg_info.parser
class r_ax(reg_noarg, m_arg):
reg_info = r16_eax
parser = reg_info.parser
class r_dx(reg_noarg, m_arg):
reg_info = r16_edx
parser = reg_info.parser
class r_eax(reg_noarg, m_arg):
reg_info = r32_eax
parser = reg_info.parser
class r_rax(reg_noarg, m_arg):
reg_info = r64_eax
parser = reg_info.parser
class r_cl(reg_noarg, m_arg):
reg_info = r08_ecx
parser = reg_info.parser
invmode = {16: 32, 32: 16}
def opmode_prefix(mode):
size, opmode, admode = mode
if size in [16, 32]:
if opmode:
return invmode[size]
else:
return size
elif size == 64:
if opmode:
return 16
else:
return 32
raise NotImplementedError('not fully functional')
def admode_prefix(mode):
size, opmode, admode = mode
if size in [16, 32]:
if admode:
return invmode[size]
else:
return size
elif size == 64:
return 64
raise NotImplementedError('not fully functional')
def v_opmode_info(size, opmode, rex_w, stk):
if size in [16, 32]:
if opmode:
return invmode[size]
else:
return size
elif size == 64:
# Rex has the maximum priority
# Then opmode
# Then stacker
if rex_w == 1:
return 64
elif opmode == 1:
return 16
elif stk:
return 64
else:
return 32
def v_opmode(p):
stk = hasattr(p, 'stk')
return v_opmode_info(p.mode, p.opmode, p.rex_w.value, stk)
def v_admode_info(size, admode):
if size in [16, 32]:
if admode:
return invmode[size]
else:
return size
elif size == 64:
if admode == 1:
return 32
return 64
def v_admode(p):
return v_admode_info(p.mode, p.admode)
def offsize(p):
if p.opmode:
return 16
else:
return p.mode
def get_prefix(s):
g = re.search('(\S+)(\s+)', s)
if not g:
return None, s
prefix, b = g.groups()
return prefix, s[len(prefix) + len(b):]
repeat_mn = ["INS", "OUTS",
"MOVSB", "MOVSW", "MOVSD", "MOVSQ",
"SCASB", "SCASW", "SCASD", "SCASQ",
"LODSB", "LODSW", "LODSD", "LODSQ",
"STOSB", "STOSW", "STOSD", "STOSQ",
"CMPSB", "CMPSW", "CMPSD", "CMPSQ",
]
segm2enc = {CS: 1, SS: 2, DS: 3, ES: 4, FS: 5, GS: 6}
enc2segm = dict([(x[1], x[0]) for x in segm2enc.items()])
class group:
def __init__(self):
self.value = None
class additional_info:
def __init__(self):
self.except_on_instr = False
self.g1 = group()
self.g2 = group()
self.vopmode = None
self.stk = False
self.v_opmode = None
self.v_admode = None
self.prefixed = ''
class instruction_x86(instruction):
__slots__ = []
delayslot = 0
def __init__(self, *args, **kargs):
super(instruction_x86, self).__init__(*args, **kargs)
def v_opmode(self):
return self.additional_info.v_opmode
def v_admode(self):
return self.additional_info.v_admode
def dstflow(self):
if self.name in conditional_branch + unconditional_branch:
return True
if self.name.startswith('LOOP'):
return True
return self.name in ['CALL']
def dstflow2label(self, symbol_pool):
if self.additional_info.g1.value & 6 and self.name in repeat_mn:
return
expr = self.args[0]
if isinstance(expr, ExprId):
if not isinstance(expr.name, AsmLabel) and expr not in all_regs_ids:
raise ValueError("ExprId must be a label or a register")
elif isinstance(expr, ExprInt):
ad = expr.arg + int(self.offset)
l = symbol_pool.getby_offset_create(ad)
s = ExprId(l, expr.size)
self.args[0] = s
else:
return
def breakflow(self):
if self.name in conditional_branch + unconditional_branch:
return True
if self.name.startswith('LOOP'):
return True
if self.name.startswith('RET'):
return True
if self.name.startswith('INT'):
return True
if self.name.startswith('SYS'):
return True
return self.name in ['CALL', 'HLT', 'IRET', 'IRETD', 'IRETQ', 'ICEBP']
def splitflow(self):
if self.name in conditional_branch:
return True
if self.name in unconditional_branch:
return False
if self.name.startswith('LOOP'):
return True
if self.name.startswith('INT'):
return True
if self.name.startswith('SYS'):
return True
return self.name in ['CALL']
def setdstflow(self, a):
return
def is_subcall(self):
return self.name in ['CALL']
def getdstflow(self, symbol_pool):
if self.additional_info.g1.value & 6 and self.name in repeat_mn:
ad = int(self.offset)
l = symbol_pool.getby_offset_create(ad)
s = ExprId(l, self.v_opmode())
return [s]
return [self.args[0]]
def get_symbol_size(self, symbol, symbol_pool):
return self.mode
def fixDstOffset(self):
expr = self.args[0]
if self.offset is None:
raise ValueError('symbol not resolved %s' % l)
if not isinstance(expr, ExprInt):
log.warning('dynamic dst %r', expr)
return
self.args[0] = ExprInt(int(expr) - self.offset, self.mode)
def get_info(self, c):
self.additional_info.g1.value = c.g1.value
self.additional_info.g2.value = c.g2.value
self.additional_info.stk = hasattr(c, 'stk')
self.additional_info.v_opmode = c.v_opmode()
self.additional_info.v_admode = c.v_admode()
self.additional_info.prefix = c.prefix
self.additional_info.prefixed = getattr(c, "prefixed", "")
def __str__(self):
o = super(instruction_x86, self).__str__()
if self.additional_info.g1.value & 1:
o = "LOCK %s" % o
if self.additional_info.g1.value & 2:
if getattr(self.additional_info.prefixed, 'default', "") != "\xF2":
o = "REPNE %s" % o
if self.additional_info.g1.value & 4:
if getattr(self.additional_info.prefixed, 'default', "") != "\xF3":
o = "REPE %s" % o
return o
def get_args_expr(self):
args = []
for a in self.args:
a = a.replace_expr(replace_regs[self.mode])
args.append(a)
return args
@staticmethod
def arg2str(expr, pos=None):
if isinstance(expr, ExprId) or isinstance(expr, ExprInt):
o = str(expr)
elif ((isinstance(expr, ExprOp) and expr.op == 'far' and
isinstance(expr.args[0], ExprMem)) or
isinstance(expr, ExprMem)):
if isinstance(expr, ExprOp):
prefix, expr = "FAR ", expr.args[0]
else:
prefix = ""
sz = SIZE2MEMPREFIX[expr.size]
segm = ""
if expr.is_mem_segm():
segm = "%s:" % expr.arg.args[0]
expr = expr.arg.args[1]
else:
expr = expr.arg
if isinstance(expr, ExprOp):
s = str(expr).replace('(', '').replace(')', '')
else:
s = str(expr)
o = prefix + sz + ' PTR %s[%s]' % (segm, s)
elif isinstance(expr, ExprOp) and expr.op == 'segm':
o = "%s:%s" % (expr.args[0], expr.args[1])
else:
raise ValueError('check this %r' % expr)
return "%s" % o
class mn_x86(cls_mn):
name = "x86"
prefix_op_size = False
prefix_ad_size = False
regs = regs_module
all_mn = []
all_mn_mode = defaultdict(list)
all_mn_name = defaultdict(list)
all_mn_inst = defaultdict(list)
bintree = {}
num = 0
delayslot = 0
pc = {16: IP, 32: EIP, 64: RIP}
sp = {16: SP, 32: ESP, 64: RSP}
instruction = instruction_x86
max_instruction_len = 15
@classmethod
def getpc(cls, attrib):
return cls.pc[attrib]
@classmethod
def getsp(cls, attrib):
return cls.sp[attrib]
def v_opmode(self):
if hasattr(self, 'stk'):
stk = 1
else:
stk = 0
return v_opmode_info(self.mode, self.opmode, self.rex_w.value, stk)
def v_admode(self):
size, opmode, admode = self.mode, self.opmode, self.admode
if size in [16, 32]:
if admode:
return invmode[size]
else:
return size
elif size == 64:
if admode == 1:
return 32
return 64
def additional_info(self):
info = additional_info()
info.g1.value = self.g1.value
info.g2.value = self.g2.value
info.stk = hasattr(self, 'stk')
info.v_opmode = self.v_opmode()
info.prefixed = ""
if hasattr(self, 'prefixed'):
info.prefixed = self.prefixed.default
return info
@classmethod
def check_mnemo(cls, fields):
pass
@classmethod
def getmn(cls, name):
return name.upper()
@classmethod
def mod_fields(cls, fields):
prefix = [d_g1, d_g2, d_rex_p, d_rex_w, d_rex_r, d_rex_x, d_rex_b]
return prefix + fields
@classmethod
def gen_modes(cls, subcls, name, bases, dct, fields):
dct['mode'] = None
return [(subcls, name, bases, dct, fields)]
@classmethod
def fromstring(cls, s, mode):
pref = 0
prefix, new_s = get_prefix(s)
if prefix == "LOCK":
pref |= 1
s = new_s
elif prefix == "REPNE":
pref |= 2
s = new_s
elif prefix == "REPE":
pref |= 4
s = new_s
c = super(mn_x86, cls).fromstring(s, mode)
c.additional_info.g1.value = pref
return c
@classmethod
def pre_dis(cls, v, mode, offset):
offset_o = offset
pre_dis_info = {'opmode': 0,
'admode': 0,
'g1': 0,
'g2': 0,
'rex_p': 0,
'rex_w': 0,
'rex_r': 0,
'rex_x': 0,
'rex_b': 0,
'prefix': "",
'prefixed': "",
}
while True:
c = v.getbytes(offset)
if c == '\x66':
pre_dis_info['opmode'] = 1
elif c == '\x67':
pre_dis_info['admode'] = 1
elif c == '\xf0':
pre_dis_info['g1'] = 1
elif c == '\xf2':
pre_dis_info['g1'] = 2
elif c == '\xf3':
pre_dis_info['g1'] = 4
elif c == '\x2e':
pre_dis_info['g2'] = 1
elif c == '\x36':
pre_dis_info['g2'] = 2
elif c == '\x3e':
pre_dis_info['g2'] = 3
elif c == '\x26':
pre_dis_info['g2'] = 4
elif c == '\x64':
pre_dis_info['g2'] = 5
elif c == '\x65':
pre_dis_info['g2'] = 6
elif mode == 64 and c in '@ABCDEFGHIJKLMNO':
x = ord(c)
pre_dis_info['rex_p'] = 1
pre_dis_info['rex_w'] = (x >> 3) & 1
pre_dis_info['rex_r'] = (x >> 2) & 1
pre_dis_info['rex_x'] = (x >> 1) & 1
pre_dis_info['rex_b'] = (x >> 0) & 1
offset += 1
break
else:
c = ''
break
pre_dis_info['prefix'] += c
offset += 1
return pre_dis_info, v, mode, offset, offset - offset_o
@classmethod
def get_cls_instance(cls, cc, mode, infos=None):
for opmode in [0, 1]:
for admode in [0, 1]:
c = cc()
c.init_class()
c.reset_class()
c.add_pre_dis_info()
c.dup_info(infos)
c.mode = mode
c.opmode = opmode
c.admode = admode
if not hasattr(c, 'stk') and hasattr(c, "fopmode") and c.fopmode.mode == 64:
c.rex_w.value = 1
yield c
def post_dis(self):
if self.g2.value:
for a in self.args:
if not isinstance(a.expr, ExprMem):
continue
m = a.expr
a.expr = ExprMem(
ExprOp('segm', enc2segm[self.g2.value], m.arg), m.size)
return self
def dup_info(self, infos):
if infos is not None:
self.g1.value = infos.g1.value
self.g2.value = infos.g2.value
def reset_class(self):
super(mn_x86, self).reset_class()
if hasattr(self, "opmode"):
del(self.opmode)
if hasattr(self, "admode"):
del(self.admode)
def add_pre_dis_info(self, pre_dis_info=None):
if pre_dis_info is None:
return True
if hasattr(self, "prefixed") and self.prefixed.default == "\x66":
pre_dis_info['opmode'] = 0
self.opmode = pre_dis_info['opmode']
self.admode = pre_dis_info['admode']
if hasattr(self, 'no_xmm_pref') and\
pre_dis_info['prefix'] and\
pre_dis_info['prefix'][-1] in '\x66\xf2\xf3':
return False
if (hasattr(self, "prefixed") and
not pre_dis_info['prefix'].endswith(self.prefixed.default)):
return False
if (self.rex_w.value is not None and
self.rex_w.value != pre_dis_info['rex_w']):
return False
else:
self.rex_w.value = pre_dis_info['rex_w']
self.rex_r.value = pre_dis_info['rex_r']
self.rex_b.value = pre_dis_info['rex_b']
self.rex_x.value = pre_dis_info['rex_x']
self.rex_p.value = pre_dis_info['rex_p']
if hasattr(self, 'no_rex') and\
(self.rex_r.value or self.rex_b.value or
self.rex_x.value or self.rex_p.value):
return False
self.g1.value = pre_dis_info['g1']
self.g2.value = pre_dis_info['g2']
self.prefix = pre_dis_info['prefix']
return True
def post_asm(self, v):
return v
def gen_prefix(self):
v = ""
rex = 0x40
if self.g1.value is None:
self.g1.value = 0
if self.g2.value is None:
self.g2.value = 0
if self.rex_w.value:
rex |= 0x8
if self.rex_r.value:
rex |= 0x4
if self.rex_x.value:
rex |= 0x2
if self.rex_b.value:
rex |= 0x1
if rex != 0x40 or self.rex_p.value == 1:
v = chr(rex) + v
if hasattr(self, 'no_rex'):
return None
if hasattr(self, 'prefixed'):
v = self.prefixed.default + v
if self.g1.value & 1:
v = "\xf0" + v
if self.g1.value & 2:
if hasattr(self, 'no_xmm_pref'):
return None
v = "\xf2" + v
if self.g1.value & 4:
if hasattr(self, 'no_xmm_pref'):
return None
v = "\xf3" + v
if self.g2.value:
v = {1: '\x2e', 2: '\x36', 3: '\x3e', 4:
'\x26', 5: '\x64', 6: '\x65'}[self.g2.value] + v
# mode prefix
if hasattr(self, "admode") and self.admode:
v = "\x67" + v
if hasattr(self, "opmode") and self.opmode:
if hasattr(self, 'no_xmm_pref'):
return None
v = "\x66" + v
return v
def encodefields(self, decoded):
v = super(mn_x86, self).encodefields(decoded)
prefix = self.gen_prefix()
if prefix is None:
return None
return prefix + v
def getnextflow(self, symbol_pool):
raise NotImplementedError('not fully functional')
def ir_pre_instruction(self):
return [ExprAff(mRIP[self.mode],
ExprInt(self.offset + self.l, mRIP[self.mode].size))]
@classmethod
def filter_asm_candidates(cls, instr, candidates):
cand_same_mode = []
cand_diff_mode = []
out = []
for c, v in candidates:
if (hasattr(c, 'no_xmm_pref') and
(c.g1.value & 2 or c.g1.value & 4 or c.opmode)):
continue
if hasattr(c, "fopmode") and v_opmode(c) != c.fopmode.mode:
continue
if hasattr(c, "fadmode") and v_admode(c) != c.fadmode.mode:
continue
# relative dstflow must not have opmode set
# (affect IP instead of EIP for instance)
if (instr.dstflow() and
instr.name not in ["JCXZ", "JECXZ", "JRCXZ"] and
len(instr.args) == 1 and
isinstance(instr.args[0], ExprInt) and c.opmode):
continue
out.append((c, v))
candidates = out
for c, v in candidates:
if v_opmode(c) == instr.mode:
cand_same_mode += v
for c, v in candidates:
if v_opmode(c) != instr.mode:
cand_diff_mode += v
cand_same_mode.sort(key=len)
cand_diff_mode.sort(key=len)
return cand_same_mode + cand_diff_mode
class bs_modname_size(bs_divert):
prio = 1
def divert(self, i, candidates):
out = []
for candidate in candidates:
cls, name, bases, dct, fields = candidate
fopmode = opmode_prefix(
(dct['mode'], dct['opmode'], dct['admode']))
mode = dct['mode']
size, opmode, admode = dct['mode'], dct['opmode'], dct['admode']
# no mode64 existance in name means no 64bit version of mnemo
if mode == 64:
if mode in self.args['name']:
nfields = fields[:]
f, i = getfieldindexby_name(nfields, 'rex_w')
f = bs("1", l=0, cls=(bs_fbit,), fname="rex_w")
osize = v_opmode_info(size, opmode, 1, 0)
nfields[i] = f
nfields = nfields[:-1]
args = dict(self.args)
ndct = dict(dct)
if osize in self.args['name']:
ndct['name'] = self.args['name'][osize]
out.append((cls, ndct['name'], bases, ndct, nfields))
nfields = fields[:]
nfields = nfields[:-1]
f, i = getfieldindexby_name(nfields, 'rex_w')
f = bs("0", l=0, cls=(bs_fbit,), fname="rex_w")
osize = v_opmode_info(size, opmode, 0, 0)
nfields[i] = f
args = dict(self.args)
ndct = dict(dct)
if osize in self.args['name']:
ndct['name'] = self.args['name'][osize]
out.append((cls, ndct['name'], bases, ndct, nfields))
else:
l = opmode_prefix((dct['mode'], dct['opmode'], dct['admode']))
osize = v_opmode_info(size, opmode, None, 0)
nfields = fields[:-1]
args = dict(self.args)
ndct = dict(dct)
if osize in self.args['name']:
ndct['name'] = self.args['name'][osize]
out.append((cls, ndct['name'], bases, ndct, nfields))
return out
class bs_modname_jecx(bs_divert):
prio = 1
def divert(self, i, candidates):
out = []
for candidate in candidates:
cls, name, bases, dct, fields = candidate
fopmode = opmode_prefix(
(dct['mode'], dct['opmode'], dct['admode']))
mode = dct['mode']
size, opmode, admode = dct['mode'], dct['opmode'], dct['admode']
nfields = fields[:]
nfields = nfields[:-1]
args = dict(self.args)
ndct = dict(dct)
if mode == 64:
if admode:
ndct['name'] = "JECXZ"
else:
ndct['name'] = "JRCXZ"
elif mode == 32:
if admode:
ndct['name'] = "JCXZ"
else:
ndct['name'] = "JECXZ"
elif mode == 16:
if admode:
ndct['name'] = "JECXZ"
else:
ndct['name'] = "JCXZ"
else:
raise ValueError('unhandled mode')
out.append((cls, ndct['name'], bases, ndct, nfields))
return out
class bs_modname_mode(bs_divert):
prio = 1
def divert(self, i, candidates):
out = []
for candidate in candidates:
cls, name, bases, dct, fields = candidate
fopmode = opmode_prefix(
(dct['mode'], dct['opmode'], dct['admode']))
size, opmode, admode = dct['mode'], dct['opmode'], dct['admode']
mode = dct['mode']
l = opmode_prefix((dct['mode'], dct['opmode'], dct['admode']))
osize = v_opmode_info(size, opmode, None, 0)
nfields = fields[:-1]
args = dict(self.args)
ndct = dict(dct)
if mode == 64 or osize == 32:
ndct['name'] = self.args['name'][mode]
else:
ndct['name'] = self.args['name'][16]
out.append((cls, ndct['name'], bases, ndct, nfields))
return out
class x86_imm(imm_noarg):
parser = base_expr
def decodeval(self, v):
return swap_uint(self.l, v)
def encodeval(self, v):
return swap_uint(self.l, v)
class x86_imm_fix_08(imm_noarg):
parser = base_expr
intsize = 8
intmask = (1 << intsize) - 1
def decodeval(self, v):
return self.ival
def encodeval(self, v):
if v != self.ival:
return False
return self.ival
class x86_08(x86_imm):
intsize = 8
intmask = (1 << intsize) - 1
class x86_16(x86_imm):
intsize = 16
intmask = (1 << intsize) - 1
class x86_32(x86_imm):
intsize = 32
intmask = (1 << intsize) - 1
class x86_64(x86_imm):
intsize = 64
intmask = (1 << intsize) - 1
class x86_08_ne(x86_imm):
intsize = 8
intmask = (1 << intsize) - 1
def encode(self):
return True
def decode(self, v):
v = swap_uint(self.l, v)
p = self.parent
admode = p.v_admode()
value = sign_ext(v, self.intsize, admode)
self.expr = ExprInt(value, admode)
return True
class x86_16_ne(x86_08_ne):
intsize = 16
intmask = (1 << intsize) - 1
class x86_32_ne(x86_08_ne):
intsize = 32
intmask = (1 << intsize) - 1
class x86_64_ne(x86_08_ne):
intsize = 64
intmask = (1 << intsize) - 1
class x86_s08to16(x86_imm):
in_size = 8
out_size = 16
def myexpr(self, x):
return ExprInt(x, 16)
def int2expr(self, v):
return self.myexpr(v)
def expr2int(self, e):
if not isinstance(e, ExprInt):
return None
v = int(e)
if v & ~((1 << self.l) - 1) != 0:
return None
return v
def decode(self, v):
v = v & self.lmask
v = self.decodeval(v)
if self.parent.v_opmode() == 64:
self.expr = ExprInt(sign_ext(v, self.in_size, 64), 64)
else:
if (1 << (self.l - 1)) & v:
v = sign_ext(v, self.l, self.out_size)
self.expr = self.myexpr(v)
return True
def encode(self):
if not isinstance(self.expr, ExprInt):
return False
v = int(self.expr)
opmode = self.parent.v_opmode()
out_size = self.out_size
if opmode != self.out_size:
if opmode == 32 and self.out_size == 64:
out_size = opmode
if v == sign_ext(
int(v & ((1 << self.in_size) - 1)), self.in_size, out_size):
pass
else:
# test with rex_w
self.parent.rex_w.value = 1
opmode = self.parent.v_opmode()
out_size = opmode
if (v != sign_ext(
int(v & ((1 << self.in_size) - 1)),
self.in_size, out_size)):
return False
if v != sign_ext(
int(v & ((1 << self.in_size) - 1)), self.in_size, out_size):
return False
v = self.encodeval(v)
self.value = (v & 0xffffffff) & self.lmask
return True
def decodeval(self, v):
return swap_uint(self.l, v)
def encodeval(self, v):
return swap_sint(self.l, v)
class x86_s08to32(x86_s08to16):
in_size = 8
out_size = 32
def myexpr(self, x):
return ExprInt(x, 32)
def decode(self, v):
v = v & self.lmask
v = self.decodeval(v)
if self.parent.rex_w.value == 1:
v = ExprInt(sign_ext(v, self.in_size, 64), 64)
else:
v = ExprInt(sign_ext(v, self.in_size, 32), 32)
self.expr = v
return True
class x86_s08to64(x86_s08to32):
in_size = 8
out_size = 64
def myexpr(self, x):
return ExprInt(x, 64)
class x86_s32to64(x86_s08to32):
in_size = 32
out_size = 64
def myexpr(self, x):
return ExprInt(x, 64)
class bs_eax(m_arg):
reg_info = r_eax_all
rindex = 0
parser = reg_info.parser
def decode(self, v):
p = self.parent
expr = None
if hasattr(p, 'w8') and p.w8.value == 0:
expr = regs08_expr[self.rindex]
else:
expr = size2gpregs[p.v_opmode()].expr[self.rindex]
self.expr = expr
return True
def encode(self):
self.value = 0
p = self.parent
expr = self.expr
osize = p.v_opmode()
if hasattr(p, 'w8'):
if p.w8.value is None:
# XXX TODO: priority in w8 erase?
if expr.size == 8:
p.w8.value = 0
else:
p.w8.value = 1
if hasattr(p, 'w8') and p.w8.value == 0:
return expr == regs08_expr[self.rindex]
elif p.mode in [16, 32]:
return expr == size2gpregs[osize].expr[self.rindex]
elif p.mode == 64:
if expr == size2gpregs[64].expr[self.rindex]:
p.rex_w.value = 1
return True
elif expr == size2gpregs[osize].expr[self.rindex]:
return True
return False
return False
class bs_seg(m_arg):
reg_info = r_eax_all
rindex = 0
parser = reg_info.parser
def decode(self, v):
self.expr = self.reg_info.expr[0]
return True
def encode(self):
self.value = 0
return self.expr == self.reg_info.expr[0]
class bs_edx(bs_eax):
reg_info = r_edx_all
rindex = 2
parser = reg_info.parser
class bs_st(bs_eax):
reg_info = r_st_all
rindex = 0
parser = reg_info.parser
class bs_cs(bs_seg):
reg_info = r_cs_all
rindex = 0
parser = reg_info.parser
class bs_ds(bs_seg):
reg_info = r_ds_all
rindex = 0
parser = reg_info.parser
class bs_es(bs_seg):
reg_info = r_es_all
rindex = 0
parser = reg_info.parser
class bs_ss(bs_seg):
reg_info = r_ss_all
rindex = 0
parser = reg_info.parser
class bs_fs(bs_seg):
reg_info = r_fs_all
rindex = 0
parser = reg_info.parser
class bs_gs(bs_seg):
reg_info = r_gs_all
rindex = 0
parser = reg_info.parser
class x86_reg_st(reg_noarg, m_arg):
reg_info = r_st_all
parser = reg_info.parser
class bs_sib_scale(bs_divert):
bsname = "sib_scale"
def divert(self, i, candidates):
out = []
done = False
for cls, name, bases, dct, fields in candidates:
if (not (admode_prefix(
(dct['mode'], dct['opmode'], dct['admode'])) != 16 and
'rm' in dct and dct['rm'] == 0b100 and
'mod' in dct and dct['mod'] != 0b11)):
ndct = dict(dct)
nfields = fields[:]
nfields[i] = None
ndct[self.args['fname']] = None
out.append((cls, ndct['name'], bases, ndct, nfields))
continue
nfields = fields[:]
args = dict(self.args)
ndct = dict(dct)
f = bs(**args)
nfields[i] = f
ndct[self.args['fname']] = None
out.append((cls, ndct['name'], bases, ndct, nfields))
return out
class bs_sib_index(bs_sib_scale):
pass
class bs_sib_base(bs_sib_scale):
pass
class bs_disp(bs_divert):
def divert(self, i, candidates):
out = []
done = False
for cls, name, bases, dct, fields in candidates:
ndct = dict(dct)
nfields = fields[:]
if (admode_prefix(
(dct['mode'], dct['opmode'], dct['admode'])) == 16):
if 'mod' in dct and dct['mod'] == 0b00 and \
'rm' in dct and dct['rm'] == 0b110:
nfields[i] = bs(
l=16, cls=(x86_16_ne,), fname=self.args['fname'])
ndct[self.args['fname']] = True
out.append((cls, ndct['name'], bases, ndct, nfields))
continue
elif 'mod' in dct and dct['mod'] == 0b01:
nfields[i] = bs(
l=8, cls=(x86_08_ne,), fname=self.args['fname'])
ndct[self.args['fname']] = True
out.append((cls, ndct['name'], bases, ndct, nfields))
continue
elif 'mod' in dct and dct['mod'] == 0b10:
nfields[i] = bs(
l=16, cls=(x86_16_ne,), fname=self.args['fname'])
ndct[self.args['fname']] = True
out.append((cls, ndct['name'], bases, ndct, nfields))
continue
else:
if 'mod' in dct and dct['mod'] == 0b00 and \
'rm' in dct and dct['rm'] == 0b101:
nfields[i] = bs(
l=32, cls=(x86_32_ne,), fname=self.args['fname'])
ndct[self.args['fname']] = True
out.append((cls, ndct['name'], bases, ndct, nfields))
continue
elif 'mod' in dct and dct['mod'] == 0b01:
nfields[i] = bs(
l=8, cls=(x86_08_ne,), fname=self.args['fname'])
ndct[self.args['fname']] = True
out.append((cls, ndct['name'], bases, ndct, nfields))
continue
elif 'mod' in dct and dct['mod'] == 0b10:
nfields[i] = bs(
l=32, cls=(x86_32_ne,), fname=self.args['fname'])
ndct[self.args['fname']] = True
out.append((cls, ndct['name'], bases, ndct, nfields))
continue
nfields[i] = None
ndct[self.args['fname']] = None
out.append((cls, ndct['name'], bases, ndct, nfields))
return out
def getmodrm(c):
return (c >> 6) & 3, (c >> 3) & 7, c & 7
def setmodrm(mod, re, rm):
return ((mod & 3) << 6) | ((re & 7) << 3) | (rm & 7)
def sib(c):
return modrm(c)
db_afs_64 = []
sib_64_s08_ebp = []
def gen_modrm_form():
global db_afs_64, sib_64_s08_ebp
ebp = 5
sib_s08_ebp = [{f_isad: True} for i in range(0x100)]
sib_u32_ebp = [{f_isad: True} for i in range(0x100)]
sib_u32 = [{f_isad: True} for i in range(0x100)]
sib_u64 = []
for rex_x in xrange(2):
o = []
for rex_b in xrange(2):
x = [{f_isad: True} for i in range(0x100)]
o.append(x)
sib_u64.append(o)
sib_u64_ebp = []
for rex_x in xrange(2):
o = []
for rex_b in xrange(2):
x = [{f_isad: True} for i in range(0x100)]
o.append(x)
sib_u64_ebp.append(o)
sib_64_s08_ebp = []
for rex_x in xrange(2):
o = []
for rex_b in xrange(2):
x = [{f_isad: True} for i in range(0x100)]
o.append(x)
sib_64_s08_ebp.append(o)
for sib_rez in [sib_s08_ebp,
sib_u32_ebp,
sib_u32,
sib_64_s08_ebp,
sib_u64_ebp,
sib_u64,
]:
for index in range(0x100):
ss, i, b = getmodrm(index)
if b == 0b101:
if sib_rez == sib_s08_ebp:
sib_rez[index][f_imm] = f_s08
sib_rez[index][ebp] = 1
elif sib_rez == sib_u32_ebp:
sib_rez[index][f_imm] = f_u32
sib_rez[index][ebp] = 1
elif sib_rez == sib_u32:
sib_rez[index][f_imm] = f_u32
elif sib_rez == sib_u64_ebp:
for rex_b in xrange(2):
for rex_x in xrange(2):
sib_rez[rex_x][rex_b][index][f_imm] = f_u32
sib_rez[rex_x][rex_b][index][ebp + 8 * rex_b] = 1
elif sib_rez == sib_u64:
for rex_b in xrange(2):
for rex_x in xrange(2):
sib_rez[rex_x][rex_b][index][f_imm] = f_u32
elif sib_rez == sib_64_s08_ebp:
for rex_b in xrange(2):
for rex_x in xrange(2):
sib_rez[rex_x][rex_b][index][f_imm] = f_s08
sib_rez[rex_x][rex_b][index][ebp + 8 * rex_b] = 1
else:
if sib_rez == sib_s08_ebp:
sib_rez[index][b] = 1
sib_rez[index][f_imm] = f_s08
elif sib_rez == sib_u32_ebp:
sib_rez[index][b] = 1
sib_rez[index][f_imm] = f_u32
elif sib_rez == sib_u32:
sib_rez[index][b] = 1
elif sib_rez == sib_u64_ebp:
for rex_b in xrange(2):
for rex_x in xrange(2):
sib_rez[rex_x][rex_b][index][b + 8 * rex_b] = 1
sib_rez[rex_x][rex_b][index][f_imm] = f_u32
elif sib_rez == sib_u64:
for rex_b in xrange(2):
for rex_x in xrange(2):
sib_rez[rex_x][rex_b][index][b + 8 * rex_b] = 1
elif sib_rez == sib_64_s08_ebp:
for rex_b in xrange(2):
for rex_x in xrange(2):
sib_rez[rex_x][rex_b][index][f_imm] = f_s08
sib_rez[rex_x][rex_b][index][b + 8 * rex_b] = 1
if i == 0b100 and sib_rez in [sib_s08_ebp, sib_u32_ebp, sib_u32]:
continue
if sib_rez in [sib_s08_ebp, sib_u32_ebp, sib_u32]:
tmp = i
if not tmp in sib_rez[index]:
sib_rez[index][tmp] = 0 # 1 << ss
sib_rez[index][tmp] += 1 << ss
else:
for rex_b in xrange(2):
for rex_x in xrange(2):
tmp = i + 8 * rex_x
if i == 0b100 and rex_x == 0:
continue
if not tmp in sib_rez[rex_x][rex_b][index]:
sib_rez[rex_x][rex_b][index][tmp] = 0 # 1 << ss
sib_rez[rex_x][rex_b][index][tmp] += 1 << ss
# 32bit
db_afs_32 = [None for i in range(0x100)]
for i in range(0x100):
index = i
mod, re, rm = getmodrm(i)
if mod == 0b00:
if rm == 0b100:
db_afs_32[index] = sib_u32
elif rm == 0b101:
db_afs_32[index] = {f_isad: True, f_imm: f_u32}
else:
db_afs_32[index] = {f_isad: True, rm: 1}
elif mod == 0b01:
if rm == 0b100:
db_afs_32[index] = sib_s08_ebp
continue
tmp = {f_isad: True, rm: 1, f_imm: f_s08}
db_afs_32[index] = tmp
elif mod == 0b10:
if rm == 0b100:
db_afs_32[index] = sib_u32_ebp
else:
db_afs_32[index] = {f_isad: True, rm: 1, f_imm: f_u32}
elif mod == 0b11:
db_afs_32[index] = {f_isad: False, rm: 1}
# 64bit
db_afs_64 = [None for i in range(0x400)]
for i in range(0x400):
index = i
rex_x = (index >> 9) & 1
rex_b = (index >> 8) & 1
mod, re, rm = getmodrm(i & 0xff)
if mod == 0b00:
if rm == 0b100:
db_afs_64[i] = sib_u64[rex_x][rex_b]
elif rm == 0b101:
db_afs_64[i] = {f_isad: True, f_imm: f_u32, 16: 1}
else:
db_afs_64[i] = {f_isad: True, rm + 8 * rex_b: 1}
elif mod == 0b01:
if rm == 0b100:
db_afs_64[i] = sib_64_s08_ebp[rex_x][rex_b]
continue
tmp = {f_isad: True, rm + 8 * rex_b: 1, f_imm: f_s08}
db_afs_64[i] = tmp
elif mod == 0b10:
if rm == 0b100:
db_afs_64[i] = sib_u64_ebp[rex_x][rex_b]
else:
db_afs_64[i] = {f_isad: True, rm + 8 * rex_b: 1, f_imm: f_u32}
elif mod == 0b11:
db_afs_64[i] = {f_isad: False, rm + 8 * rex_b: 1}
# 16bit
db_afs_16 = [None for i in range(0x100)]
_si = 6
_di = 7
_bx = 3
_bp = 5
for i in range(0x100):
index = i
mod, re, rm = getmodrm(i)
if mod == 0b00:
if rm == 0b100:
db_afs_16[index] = {f_isad: True, _si: 1}
elif rm == 0b101:
db_afs_16[index] = {f_isad: True, _di: 1}
elif rm == 0b110:
db_afs_16[index] = {
f_isad: True, f_imm: f_u16} # {f_isad:True,_bp:1}
elif rm == 0b111:
db_afs_16[index] = {f_isad: True, _bx: 1}
else:
db_afs_16[index] = {f_isad: True,
[_si, _di][rm % 2]: 1,
[_bx, _bp][(rm >> 1) % 2]: 1}
elif mod in [0b01, 0b10]:
if mod == 0b01:
my_imm = f_s08
else:
my_imm = f_u16
if rm == 0b100:
db_afs_16[index] = {f_isad: True, _si: 1, f_imm: my_imm}
elif rm == 0b101:
db_afs_16[index] = {f_isad: True, _di: 1, f_imm: my_imm}
elif rm == 0b110:
db_afs_16[index] = {f_isad: True, _bp: 1, f_imm: my_imm}
elif rm == 0b111:
db_afs_16[index] = {f_isad: True, _bx: 1, f_imm: my_imm}
else:
db_afs_16[index] = {f_isad: True,
[_si, _di][rm % 2]: 1,
[_bx, _bp][(rm >> 1) % 2]: 1,
f_imm: my_imm}
elif mod == 0b11:
db_afs_16[index] = {f_isad: False, rm: 1}
byte2modrm = {}
byte2modrm[16] = db_afs_16
byte2modrm[32] = db_afs_32
byte2modrm[64] = db_afs_64
modrm2byte = {16: defaultdict(list),
32: defaultdict(list),
64: defaultdict(list),
}
for size, db_afs in byte2modrm.items():
for i, modrm in enumerate(db_afs):
if not isinstance(modrm, list):
modrm = modrm.items()
modrm.sort()
modrm = tuple(modrm)
modrm2byte[size][modrm].append(i)
continue
for j, modrm_f in enumerate(modrm):
modrm_f = modrm_f.items()
modrm_f.sort()
modrm_f = tuple(modrm_f)
modrm2byte[size][modrm_f].append((i, j))
return byte2modrm, modrm2byte
byte2modrm, modrm2byte = gen_modrm_form()
# ret is modr; ret is displacement
def exprfindmod(e, o=None):
if o is None:
o = {}
if isinstance(e, ExprInt):
return e
if isinstance(e, ExprId):
i = size2gpregs[e.size].expr.index(e)
o[i] = 1
return None
elif isinstance(e, ExprOp):
out = None
if e.op == '+':
for a in e.args:
r = exprfindmod(a, o)
if out and r1:
raise ValueError('multiple displacement!')
out = r
return out
elif e.op == "*":
mul = int(e.args[1])
a = e.args[0]
i = size2gpregs[a.size].expr.index(a)
o[i] = mul
else:
raise ValueError('bad op')
return None
def test_addr_size(ptr, size):
if isinstance(ptr, ExprInt):
return ptr.arg < (1 << size)
else:
return ptr.size == size
SIZE2XMMREG = {64:gpregs_mm,
128:gpregs_xmm}
def parse_mem(expr, parent, w8, sx=0, xmm=0, mm=0):
dct_expr = {}
opmode = parent.v_opmode()
if expr.is_mem_segm() and expr.arg.args[0].is_int():
return None, None, False
if expr.is_mem_segm():
segm = expr.arg.args[0]
ptr = expr.arg.args[1]
else:
segm = None
ptr = expr.arg
dct_expr[f_isad] = True
ad_size = ptr.size
admode = parent.v_admode()
if not test_addr_size(ptr, admode):
return None, None, False
if (w8 == 1 and expr.size != opmode and not sx and
not (hasattr(parent, 'sd') or hasattr(parent, 'wd'))):
return None, None, False
if hasattr(parent, 'wd'):
if expr.size == 16:
parent.wd.value = 1
elif expr.size == 32:
pass
else:
return None, None, False
if (not isinstance(ptr, ExprInt) and
parent.mode == 64 and
ptr.size == 32 and
parent.admode != 1):
return None, None, False
dct_expr = {f_isad: True}
disp = exprfindmod(ptr, dct_expr)
out = []
if disp is None:
# add 0 disp
disp = ExprInt(0, 32)
if disp is not None:
for signed, encoding, cast_size in [(True, f_s08, 8),
(True, f_s16, 16),
(True, f_s32, 32),
(False, f_u08, 8),
(False, f_u16, 16),
(False, f_u32, 32)]:
value = ExprInt(int(disp), cast_size)
if admode < value.size:
if signed:
if int(disp.arg) != sign_ext(int(value), admode, disp.size):
continue
else:
if int(disp.arg) != int(value):
continue
else:
if int(disp.arg) != sign_ext(int(value), value.size, admode):
continue
x1 = dict(dct_expr)
x1[f_imm] = (encoding, value)
out.append(x1)
else:
out = [dct_expr]
return out, segm, True
def expr2modrm(expr, parent, w8, sx=0, xmm=0, mm=0):
dct_expr = {f_isad : False}
if mm or xmm:
if mm and expr.size != 64:
return None, None, False
elif xmm and expr.size != 128:
return None, None, False
if isinstance(expr, ExprId):
selreg = SIZE2XMMREG[expr.size]
if not expr in selreg.expr:
return None, None, False
i = selreg.expr.index(expr)
dct_expr[i] = 1
return [dct_expr], None, True
else:
return parse_mem(expr, parent, w8, sx, xmm, mm)
elif expr.size == 64 and expr not in gpregs_mm.expr:
if hasattr(parent, 'sd'):
parent.sd.value = 1
elif hasattr(parent, 'wd'):
pass
elif hasattr(parent, 'stk'):
pass
else:
parent.rex_w.value = 1
opmode = parent.v_opmode()
if sx == 1:
opmode = 16
if sx == 2:
opmode = 32
if expr.size == 8 and w8 != 0:
return None, None, False
if w8 == 0 and expr.size != 8:
return None, None, False
if not isinstance(expr, ExprMem):
dct_expr[f_isad] = False
if xmm:
if expr in gpregs_xmm.expr:
i = gpregs_xmm.expr.index(expr)
dct_expr[i] = 1
return [dct_expr], None, True
else:
return None, None, False
if mm:
if expr in gpregs_mm.expr:
i = gpregs_mm.expr.index(expr)
dct_expr[i] = 1
return [dct_expr], None, True
else:
return None, None, False
if w8 == 0:
if parent.mode == 64 and expr in gpregs08_64.expr:
r = gpregs08_64
parent.rex_p.value = 1
else:
parent.rex_p.value = 0
parent.rex_x.value = 0
r = size2gpregs[8]
if not expr in r.expr:
return None, None, False
i = r.expr.index(expr)
dct_expr[i] = 1
return [dct_expr], None, True
if opmode != expr.size:
return None, None, False
if not expr in size2gpregs[opmode].expr:
return None, None, False
i = size2gpregs[opmode].expr.index(expr)
if i > 7:
if parent.mode != 64:
return None, None, False
dct_expr[i] = 1
return [dct_expr], None, True
return parse_mem(expr, parent, w8, sx, xmm, mm)
def modrm2expr(modrm, parent, w8, sx=0, xmm=0, mm=0):
o = []
if not modrm[f_isad]:
modrm_k = [x[0] for x in modrm.iteritems() if x[1] == 1]
if len(modrm_k) != 1:
raise ValueError('strange reg encoding %r' % modrm)
modrm_k = modrm_k[0]
if w8 == 0:
opmode = 8
elif sx == 1:
opmode = 16
elif sx == 2:
opmode = 32
else:
opmode = parent.v_opmode()
if xmm:
expr = gpregs_xmm.expr[modrm_k]
elif mm:
expr = gpregs_mm.expr[modrm_k]
elif opmode == 8 and (parent.v_opmode() == 64 or parent.rex_p.value == 1):
expr = gpregs08_64.expr[modrm_k]
else:
expr = size2gpregs[opmode].expr[modrm_k]
return expr
admode = parent.v_admode()
opmode = parent.v_opmode()
for modrm_k, scale in modrm.items():
if isinstance(modrm_k, (int, long)):
expr = size2gpregs[admode].expr[modrm_k]
if scale != 1:
expr = ExprInt(scale, admode) * expr
o.append(expr)
if f_imm in modrm:
if parent.disp.value is None:
return None
o.append(ExprInt(int(parent.disp.expr), admode))
expr = ExprOp('+', *o)
if w8 == 0:
opmode = 8
elif sx == 1:
opmode = 16
elif sx == 2:
opmode = 32
if xmm:
opmode = 128
elif mm:
opmode = 64
expr = ExprMem(expr, size=opmode)
return expr
class x86_rm_arg(m_arg):
parser = rmarg
def fromstring(self, s, parser_result=None):
start, stop = super(x86_rm_arg, self).fromstring(s, parser_result)
p = self.parent
if start is None:
return None, None
s = self.expr.size
return start, stop
def get_modrm(self):
p = self.parent
admode = p.v_admode()
if not admode in [16, 32, 64]:
raise ValueError('strange admode %r', admode)
v = setmodrm(p.mod.value, 0, p.rm.value)
v |= p.rex_b.value << 8
v |= p.rex_x.value << 9
if p.mode == 64:
# XXXx to check
admode = 64
xx = byte2modrm[admode][v]
if isinstance(xx, list):
if not p.sib_scale:
return False
v = setmodrm(p.sib_scale.value,
p.sib_index.value,
p.sib_base.value)
xx = xx[v]
return xx
def decode(self, v):
p = self.parent
xx = self.get_modrm()
self.expr = modrm2expr(xx, p, 1)
return self.expr is not None
def gen_cand(self, v_cand, admode):
if not admode in modrm2byte:
# XXX TODO: 64bit
raise StopIteration
if not v_cand:
raise StopIteration
p = self.parent
o_rex_x = p.rex_x.value
o_rex_b = p.rex_b.value
# add candidate without 0 imm
new_v_cand = []
moddd = False
for v in v_cand:
new_v_cand.append(v)
if f_imm in v and int(v[f_imm][1]) == 0:
v = dict(v)
del(v[f_imm])
new_v_cand.append(v)
moddd = True
v_cand = new_v_cand
out_c = []
for v in v_cand:
disp = None
# patch value in modrm
if f_imm in v:
size, disp = v[f_imm]
disp = int(disp)
v[f_imm] = size
vo = v
v = v.items()
v.sort()
v = tuple(v)
admode = 64 if p.mode == 64 else admode
if not v in modrm2byte[admode]:
continue
xx = modrm2byte[admode][v]
# default case
for x in xx:
if type(x) == tuple:
modrm, sib = x
else:
modrm = x
sib = None
# 16 bit cannot have sib
if (not sib is None) and admode == 16:
continue
rex = modrm >> 8 # 0# XXX HACK REM temporary REX modrm>>8
if rex and admode != 64:
continue
p.rex_x.value = (rex >> 1) & 1
p.rex_b.value = rex & 1
if o_rex_x is not None and p.rex_x.value != o_rex_x:
continue
if o_rex_b is not None and p.rex_b.value != o_rex_b:
continue
mod, re, rm = getmodrm(modrm)
# check re on parent
if re != p.reg.value:
continue
if sib:
s_scale, s_index, s_base = getmodrm(sib)
else:
s_scale, s_index, s_base = None, None, None
p.mod.value = mod
p.rm.value = rm
p.sib_scale.value = s_scale
p.sib_index.value = s_index
p.sib_base.value = s_base
p.disp.value = disp
if disp is not None:
p.disp.l = f_imm2size[vo[f_imm]]
yield True
raise StopIteration
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
p = self.parent
admode = p.v_admode()
mode = self.expr.size
v_cand, segm, ok = expr2modrm(self.expr, p, 1)
if segm:
p.g2.value = segm2enc[segm]
for x in self.gen_cand(v_cand, admode):
yield x
class x86_rm_mem(x86_rm_arg):
def fromstring(self, s, parser_result=None):
self.expr = None
start, stop = super(x86_rm_mem, self).fromstring(s, parser_result)
if not isinstance(self.expr, ExprMem):
return None, None
return start, stop
class x86_rm_mem_far(x86_rm_arg):
parser = mem_far
def fromstring(self, s, parser_result=None):
self.expr = None
start, stop = super(x86_rm_mem_far, self).fromstring(s, parser_result)
if not isinstance(self.expr, ExprMem):
return None, None
self.expr = ExprOp('far', self.expr)
return start, stop
def decode(self, v):
ret = super(x86_rm_mem_far, self).decode(v)
if not ret:
return ret
if isinstance(self.expr, m2_expr.ExprMem):
self.expr = ExprOp('far', self.expr)
return True
def encode(self):
if not (isinstance(self.expr, m2_expr.ExprOp) and
self.expr.op == 'far'):
raise StopIteration
expr = self.expr.args[0]
if isinstance(expr, ExprInt):
raise StopIteration
p = self.parent
admode = p.v_admode()
mode = expr.size
v_cand, segm, ok = expr2modrm(expr, p, 1)
if segm:
p.g2.value = segm2enc[segm]
for x in self.gen_cand(v_cand, admode):
yield x
class x86_rm_w8(x86_rm_arg):
def decode(self, v):
p = self.parent
xx = self.get_modrm()
self.expr = modrm2expr(xx, p, p.w8.value)
return self.expr is not None
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
p = self.parent
if p.w8.value is None:
if self.expr.size == 8:
p.w8.value = 0
else:
p.w8.value = 1
v_cand, segm, ok = expr2modrm(self.expr, p, p.w8.value)
if segm:
p.g2.value = segm2enc[segm]
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_sx(x86_rm_arg):
def decode(self, v):
p = self.parent
xx = self.get_modrm()
self.expr = modrm2expr(xx, p, p.w8.value, 1)
return self.expr is not None
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
p = self.parent
if p.w8.value is None:
if self.expr.size == 8:
p.w8.value = 0
else:
p.w8.value = 1
v_cand, segm, ok = expr2modrm(self.expr, p, p.w8.value, 1)
if segm:
p.g2.value = segm2enc[segm]
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_sxd(x86_rm_arg):
def decode(self, v):
p = self.parent
xx = self.get_modrm()
self.expr = modrm2expr(xx, p, 1, 2)
return self.expr is not None
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
p = self.parent
v_cand, segm, ok = expr2modrm(self.expr, p, 1, 2)
if segm:
p.g2.value = segm2enc[segm]
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_sd(x86_rm_arg):
out_size = 64
def get_s_value(self):
return self.parent.sd.value
def set_s_value(self, value):
self.parent.sd.value = value
def decode(self, v):
p = self.parent
xx = self.get_modrm()
expr = modrm2expr(xx, p, 1)
if not isinstance(expr, ExprMem):
return False
if self.get_s_value() == 0:
expr = ExprMem(expr.arg, 32)
else:
expr = ExprMem(expr.arg, self.out_size)
self.expr = expr
return self.expr is not None
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
p = self.parent
if not self.expr.size in [32, 64]:
raise StopIteration
self.set_s_value(0)
v_cand, segm, ok = expr2modrm(self.expr, p, 1)
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_wd(x86_rm_sd):
out_size = 16
def get_s_value(self):
return self.parent.wd.value
def set_s_value(self, value):
self.parent.wd.value = value
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
p = self.parent
p.wd.value = 0
v_cand, segm, ok = expr2modrm(self.expr, p, 1)
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_08(x86_rm_arg):
msize = 8
def decode(self, v):
p = self.parent
xx = self.get_modrm()
expr = modrm2expr(xx, p, 0)
if not isinstance(expr, ExprMem):
self.expr = expr
return True
self.expr = ExprMem(expr.arg, self.msize)
return self.expr is not None
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
p = self.parent
v_cand, segm, ok = expr2modrm(self.expr, p, 0, 0, 0, 0)
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_reg_m08(x86_rm_arg):
msize = 8
def decode(self, v):
ret = x86_rm_arg.decode(self, v)
if not ret:
return ret
if not isinstance(self.expr, ExprMem):
return True
self.expr = ExprMem(self.expr.arg, self.msize)
return self.expr is not None
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
p = self.parent
if isinstance(self.expr, ExprMem):
expr = ExprMem(self.expr.arg, 32)
else:
expr = self.expr
v_cand, segm, ok = expr2modrm(expr, p, 1, 0, 0, 0)
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_reg_m16(x86_rm_reg_m08):
msize = 16
class x86_rm_m64(x86_rm_arg):
msize = 64
def decode(self, v):
p = self.parent
xx = self.get_modrm()
expr = modrm2expr(xx, p, 1)
if not isinstance(expr, ExprMem):
return False
self.expr = ExprMem(expr.arg, self.msize)
return self.expr is not None
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
p = self.parent
v_cand, segm, ok = expr2modrm(self.expr, p, 0, 0, 0, 1)
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_m80(x86_rm_m64):
msize = 80
def encode(self):
if isinstance(self.expr, ExprInt):
raise StopIteration
if not isinstance(self.expr, ExprMem) or self.expr.size != self.msize:
raise StopIteration
p = self.parent
mode = p.mode
if mode == 64:
mode = 32
self.expr = ExprMem(self.expr.arg, mode)
v_cand, segm, ok = expr2modrm(self.expr, p, 1)
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_m08(x86_rm_arg):
msize = 8
def decode(self, v):
p = self.parent
xx = self.get_modrm()
self.expr = modrm2expr(xx, p, 0)
return self.expr is not None
def encode(self):
if self.expr.size != 8:
raise StopIteration
p = self.parent
mode = p.mode
v_cand, segm, ok = expr2modrm(self.expr, p, 0)
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_m16(x86_rm_m80):
msize = 16
class x86_rm_mm(x86_rm_m80):
msize = 64
is_mm = True
is_xmm = False
def decode(self, v):
p = self.parent
xx = self.get_modrm()
expr = modrm2expr(xx, p, 0, 0, self.is_xmm, self.is_mm)
if isinstance(expr, ExprMem):
if self.msize is None:
return False
if expr.size != self.msize:
expr = ExprMem(expr.arg, self.msize)
self.expr = expr
return True
def encode(self):
expr = self.expr
if isinstance(expr, ExprInt):
raise StopIteration
if isinstance(expr, ExprMem) and expr.size != self.msize:
raise StopIteration
p = self.parent
mode = p.mode
if mode == 64:
mode = 32
if isinstance(expr, ExprMem):
if self.is_xmm:
expr = ExprMem(expr.arg, 128)
elif self.is_mm:
expr = ExprMem(expr.arg, 64)
v_cand, segm, ok = expr2modrm(expr, p, 0, 0, self.is_xmm, self.is_mm)
for x in self.gen_cand(v_cand, p.v_admode()):
yield x
class x86_rm_mm_m64(x86_rm_mm):
msize = 64
is_mm = True
is_xmm = False
class x86_rm_xmm(x86_rm_mm):
msize = 128
is_mm = False
is_xmm = True
class x86_rm_xmm_m32(x86_rm_mm):
msize = 32
is_mm = False
is_xmm = True
class x86_rm_xmm_m64(x86_rm_mm):
msize = 64
is_mm = False
is_xmm = True
class x86_rm_xmm_reg(x86_rm_mm):
msize = None
is_mm = False
is_xmm = True
class x86_rm_mm_reg(x86_rm_mm):
msize = None
is_mm = True
is_xmm = False
class x86_rm_reg_noarg(object):
prio = default_prio + 1
parser = gpreg
def fromstring(self, s, parser_result=None):
if not hasattr(self.parent, 'sx') and hasattr(self.parent, "w8"):
self.parent.w8.value = 1
if parser_result:
e, start, stop = parser_result[self.parser]
if e is None:
return None, None
self.expr = e
if self.expr.size == 8:
if hasattr(self.parent, 'sx') or not hasattr(self.parent, 'w8'):
return None, None
self.parent.w8.value = 0
return start, stop
try:
v, start, stop = self.parser.scanString(s).next()
except StopIteration:
return None, None
self.expr = v[0]
if self.expr.size == 0:
if hasattr(self.parent, 'sx') or not hasattr(self.parent, 'w8'):
return None, None
self.parent.w8.value = 0
return start, stop
def getrexsize(self):
return self.parent.rex_r.value
def setrexsize(self, v):
self.parent.rex_r.value = v
def decode(self, v):
v = v & self.lmask
p = self.parent
opmode = p.v_opmode()
if not hasattr(p, 'sx') and (hasattr(p, 'w8') and p.w8.value == 0):
opmode = 8
r = size2gpregs[opmode]
if p.mode == 64 and self.getrexsize():
v |= 0x8
if p.v_opmode() == 64 or p.rex_p.value == 1:
if not hasattr(p, 'sx') and (hasattr(p, 'w8') and p.w8.value == 0):
r = gpregs08_64
elif p.rex_r.value == 1:
v |= 8
self.expr = r.expr[v]
return True
def encode(self):
if not isinstance(self.expr, ExprId):
return False
if self.expr in gpregs64.expr and not hasattr(self.parent, 'stk'):
self.parent.rex_w.value = 1
opmode = self.parent.v_opmode()
if not hasattr(self.parent, 'sx') and hasattr(self.parent, 'w8'):
self.parent.w8.value = 1
if self.expr.size == 8:
if hasattr(self.parent, 'sx') or not hasattr(self.parent, 'w8'):
return False
self.parent.w8.value = 0
opmode = 8
r = size2gpregs[opmode]
if self.expr in r.expr:
i = r.expr.index(self.expr)
elif (opmode == 8 and self.parent.mode == 64 and
self.expr in gpregs08_64.expr):
i = gpregs08_64.expr.index(self.expr)
self.parent.rex_p.value = 1
else:
log.debug("cannot encode reg %r", self.expr)
return False
if self.parent.v_opmode() == 64:
if i > 7:
self.setrexsize(1)
i -= 8
elif self.parent.mode == 64 and i > 7:
i -= 8
self.setrexsize(1)
self.value = i
if self.value > self.lmask:
log.debug("cannot encode field value %x %x",
self.value, self.lmask)
return False
return True
class x86_rm_reg_mm(x86_rm_reg_noarg, m_arg):
selreg = gpregs_mm
def decode(self, v):
if self.parent.mode == 64 and self.getrexsize():
v |= 0x8
self.expr = self.selreg.expr[v]
return True
def encode(self):
if not isinstance(self.expr, ExprId):
return False
if self.expr not in self.selreg.expr:
return False
i = self.selreg.expr.index(self.expr)
if self.parent.mode == 64 and i > 7:
i -= 8
self.setrexsize(1)
self.value = i
if self.value > self.lmask:
log.debug("cannot encode field value %x %x",
self.value, self.lmask)
return False
return True
class x86_rm_reg_xmm(x86_rm_reg_mm):
selreg = gpregs_xmm
class x86_rm_reg(x86_rm_reg_noarg, m_arg):
pass
class x86_reg(x86_rm_reg):
def getrexsize(self):
return self.parent.rex_b.value
def setrexsize(self, v):
self.parent.rex_b.value = v
class x86_reg_modrm(x86_rm_reg):
def getrexsize(self):
return self.parent.rex_r.value
def setrexsize(self, v):
self.parent.rex_r.value = v
class x86_reg_noarg(x86_rm_reg_noarg):
def getrexsize(self):
return self.parent.rex_b.value
def setrexsize(self, v):
self.parent.rex_b.value = v
class x86_rm_segm(reg_noarg, m_arg):
prio = default_prio + 1
reg_info = segmreg
parser = reg_info.parser
class x86_rm_cr(reg_noarg, m_arg):
prio = default_prio + 1
reg_info = crregs
parser = reg_info.parser
class x86_rm_dr(reg_noarg, m_arg):
prio = default_prio + 1
reg_info = drregs
parser = reg_info.parser
class x86_rm_flt(reg_noarg, m_arg):
prio = default_prio + 1
reg_info = fltregs
parser = reg_info.parser
class bs_fbit(bsi):
def decode(self, v):
# value already decoded in pre_dis_info
return True
class bs_cl1(bsi, m_arg):
parser = cl_or_imm
def decode(self, v):
if v == 1:
self.expr = regs08_expr[1]
else:
self.expr = ExprInt(1, 8)
return True
def encode(self):
if self.expr == regs08_expr[1]:
self.value = 1
elif isinstance(self.expr, ExprInt) and int(self.expr) == 1:
self.value = 0
else:
return False
return True
def sib_cond(cls, mode, v):
if admode_prefix((mode, v["opmode"], v["admode"])) == 16:
return None
if v['mod'] == 0b11:
return None
elif v['rm'] == 0b100:
return cls.ll
else:
return None
return v['rm'] == 0b100
class bs_cond_scale(bs_cond):
# cond must return field len
ll = 2
@classmethod
def flen(cls, mode, v):
return sib_cond(cls, mode, v)
def encode(self):
if self.value is None:
self.value = 0
self.l = 0
return True
return super(bs_cond_scale, self).encode()
def decode(self, v):
self.value = v
return True
class bs_cond_index(bs_cond_scale):
ll = 3
@classmethod
def flen(cls, mode, v):
return sib_cond(cls, mode, v)
class bs_cond_disp(bs_cond):
# cond must return field len
@classmethod
def flen(cls, mode, v):
if admode_prefix((mode, v['opmode'], v['admode'])) == 16:
if v['mod'] == 0b00:
if v['rm'] == 0b110:
return 16
else:
return None
elif v['mod'] == 0b01:
return 8
elif v['mod'] == 0b10:
return 16
return None
# 32, 64
if 'sib_base' in v and v['sib_base'] == 0b101:
if v['mod'] == 0b00:
return 32
elif v['mod'] == 0b01:
return 8
elif v['mod'] == 0b10:
return 32
else:
return None
if v['mod'] == 0b00:
if v['rm'] == 0b101:
return 32
else:
return None
elif v['mod'] == 0b01:
return 8
elif v['mod'] == 0b10:
return 32
else:
return None
def encode(self):
if self.value is None:
self.value = 0
self.l = 0
return True
self.value = swap_uint(self.l, self.value)
return True
def decode(self, v):
admode = self.parent.v_admode()
v = swap_uint(self.l, v)
self.value = v
v = sign_ext(v, self.l, admode)
v = ExprInt(v, admode)
self.expr = v
return True
class bs_cond_imm(bs_cond_scale, m_arg):
parser = int_or_expr
max_size = 32
def fromstring(self, s, parser_result=None):
if parser_result:
expr, start, stop = parser_result[self.parser]
else:
try:
expr, start, stop = self.parser.scanString(s).next()
except StopIteration:
expr = None
self.expr = expr
if len(self.parent.args) > 1:
l = self.parent.args[0].expr.size
else:
l = self.parent.v_opmode()
if isinstance(self.expr, ExprInt):
v = int(self.expr)
mask = ((1 << l) - 1)
self.expr = ExprInt(v & mask, l)
if self.expr is None:
log.debug('cannot fromstring int %r', s)
return None, None
return start, stop
@classmethod
def flen(cls, mode, v):
if 'w8' not in v or v['w8'] == 1:
if 'se' in v and v['se'] == 1:
return 8
else:
osize = v_opmode_info(mode, v['opmode'], v['rex_w'], 0)
osize = min(osize, cls.max_size)
return osize
return 8
def getmaxlen(self):
return 32
def encode(self):
if not isinstance(self.expr, ExprInt):
raise StopIteration
arg0_expr = self.parent.args[0].expr
self.parent.rex_w.value = 0
# special case for push
if len(self.parent.args) == 1:
v = int(self.expr)
l = self.parent.v_opmode()
l = min(l, self.max_size)
self.l = l
mask = ((1 << self.l) - 1)
if v != sign_ext(v & mask, self.l, l):
raise StopIteration
self.value = swap_uint(self.l, v & ((1 << self.l) - 1))
yield True
raise StopIteration
# assume 2 args; use first arg to guess op size
if arg0_expr.size == 64:
self.parent.rex_w.value = 1
l = self.parent.v_opmode()
v = int(self.expr)
if arg0_expr.size == 8:
if not hasattr(self.parent, 'w8'):
raise StopIteration
self.parent.w8.value = 0
l = 8
if hasattr(self.parent, 'se'):
self.parent.se.value = 0
elif hasattr(self.parent, 'se'):
if hasattr(self.parent, 'w8'):
self.parent.w8.value = 1
# try to generate signed extended version
if v == sign_ext(v & 0xFF, 8, arg0_expr.size):
self.parent.se.value = 1
self.l = 8
self.value = v & 0xFF
yield True
self.parent.se.value = 0
else:
if hasattr(self.parent, 'w8'):
self.parent.w8.value = 1
if l == 64:
self.l = self.getmaxlen()
else:
self.l = l
mask = ((1 << self.l) - 1)
if v != sign_ext(v & mask, self.l, l):
raise StopIteration
self.value = swap_uint(self.l, v & ((1 << self.l) - 1))
yield True
def decode(self, v):
opmode = self.parent.v_opmode()
v = swap_uint(self.l, v)
self.value = v
l_out = opmode
if hasattr(self.parent, 'w8') and self.parent.w8.value == 0:
l_out = 8
v = sign_ext(v, self.l, l_out)
self.expr = ExprInt(v, l_out)
return True
class bs_cond_imm64(bs_cond_imm):
max_size = 64
def getmaxlen(self):
return 64
@classmethod
def flen(cls, mode, v):
if 'w8' not in v or v['w8'] == 1:
if 'se' in v and v['se'] == 1:
return 8
else:
osize = v_opmode_info(mode, v['opmode'], v['rex_w'], 0)
return osize
else:
return 8
class bs_rel_off(bs_cond_imm):
parser = int_or_expr
def fromstring(self, s, parser_result=None):
if parser_result:
expr, start, stop = parser_result[self.parser]
else:
try:
expr, start, stop = self.parser.scanString(s).next()
except StopIteration:
expr = None
self.expr = expr
l = self.parent.mode
if isinstance(self.expr, ExprInt):
v = int(self.expr)
mask = ((1 << l) - 1)
self.expr = ExprInt(v & mask, l)
return start, stop
@classmethod
def flen(cls, mode, v):
osize = v_opmode_info(mode, v['opmode'], v['rex_w'], 0)
if osize == 16:
return 16
else:
return 32
def encode(self):
if not isinstance(self.expr, ExprInt):
raise StopIteration
arg0_expr = self.parent.args[0].expr
if self.l != 0:
l = self.l
else:
l = self.parent.v_opmode()
self.l = l
l = offsize(self.parent)
prefix = self.parent.gen_prefix()
parent_len = len(prefix) * 8 + self.parent.l + self.l
assert(parent_len % 8 == 0)
v = int(self.expr.arg - parent_len/8)
if prefix is None:
raise StopIteration
mask = ((1 << self.l) - 1)
if self.l > l:
raise StopIteration
if v != sign_ext(v & mask, self.l, l):
raise StopIteration
self.value = swap_uint(self.l, v & ((1 << self.l) - 1))
yield True
def decode(self, v):
v = swap_uint(self.l, v)
size = offsize(self.parent)
v = sign_ext(v, self.l, size)
v += self.parent.l
self.expr = ExprInt(v, size)
return True
class bs_s08(bs_rel_off):
parser = int_or_expr
@classmethod
def flen(cls, mode, v):
return 8
def encode(self):
if not isinstance(self.expr, ExprInt):
raise StopIteration
arg0_expr = self.parent.args[0].expr
if self.l != 0:
l = self.l
else:
l = self.parent.v_opmode()
self.l = l
l = offsize(self.parent)
v = int(self.expr)
mask = ((1 << self.l) - 1)
if self.l > l:
raise StopIteration
if v != sign_ext(v & mask, self.l, l):
raise StopIteration
self.value = swap_uint(self.l, v & ((1 << self.l) - 1))
yield True
def decode(self, v):
v = swap_uint(self.l, v)
size = offsize(self.parent)
v = sign_ext(v, self.l, size)
self.expr = ExprInt(v, size)
return True
class bs_rel_off08(bs_rel_off):
@classmethod
def flen(cls, mode, v):
return 8
class bs_moff(bsi):
@classmethod
def flen(cls, mode, v):
osize = v_opmode_info(mode, v['opmode'], v['rex_w'], 0)
if osize == 16:
return 16
else:
return 32
def encode(self):
if not hasattr(self.parent, "mseg"):
raise StopIteration
m = self.parent.mseg.expr
if not (isinstance(m, ExprOp) and m.op == 'segm'):
raise StopIteration
if not isinstance(m.args[1], ExprInt):
raise StopIteration
l = self.parent.v_opmode()
if l == 16:
self.l = 16
else:
self.l = 32
v = int(m.args[1])
mask = ((1 << self.l) - 1)
if v != sign_ext(v & mask, self.l, l):
raise StopIteration
self.value = swap_uint(self.l, v & ((1 << self.l) - 1))
yield True
def decode(self, v):
opmode = self.parent.v_opmode()
if opmode == 64:
return False
v = swap_uint(self.l, v)
self.value = v
v = sign_ext(v, self.l, opmode)
self.expr = ExprInt(v, opmode)
return True
class bs_movoff(m_arg):
parser = deref_mem
def fromstring(self, s, parser_result=None):
if parser_result:
e, start, stop = parser_result[self.parser]
if e is None:
return None, None
if not isinstance(e, ExprMem):
return None, None
self.expr = e
if self.expr is None:
return None, None
return start, stop
try:
v, start, stop = self.parser.scanString(s).next()
except StopIteration:
return None, None
if not isinstance(e, ExprMem):
return None, None
self.expr = v[0]
if self.expr is None:
log.debug('cannot fromstring int %r', s)
return None, None
return start, stop
@classmethod
def flen(cls, mode, v):
if mode == 64:
if v['admode']:
return 32
else:
return 64
asize = v_admode_info(mode, v['admode'])
return asize
def encode(self):
p = self.parent
if not isinstance(self.expr, ExprMem) or not isinstance(self.expr.arg, ExprInt):
raise StopIteration
self.l = p.v_admode()
v = int(self.expr.arg)
mask = ((1 << self.l) - 1)
if v != mask & v:
raise StopIteration
self.value = swap_uint(self.l, v & ((1 << self.l) - 1))
yield True
def decode(self, v):
if self.parent.mode == 64:
if self.parent.admode == 1:
l = 32
else:
l = 64
else:
l = self.parent.v_admode()
v = swap_uint(self.l, v)
self.value = v
v = sign_ext(v, self.l, l)
v = ExprInt(v, l)
size = self.parent.v_opmode()
if self.parent.w8.value == 0:
size = 8
self.expr = ExprMem(v, size)
return True
class bs_msegoff(m_arg):
parser = deref_ptr
def fromstring(self, s, parser_result=None):
if parser_result:
e, start, stop = parser_result[self.parser]
if e is None:
return None, None
self.expr = e
if self.expr is None:
return None, None
return start, stop
try:
v, start, stop = self.parser.scanString(s).next()
except StopIteration:
return None, None
self.expr = v[0]
if self.expr is None:
log.debug('cannot fromstring int %r', s)
return None, None
return start, stop
def encode(self):
if not (isinstance(self.expr, ExprOp) and self.expr.op == 'segm'):
raise StopIteration
if not isinstance(self.expr.args[0], ExprInt):
raise StopIteration
if not isinstance(self.expr.args[1], ExprInt):
raise StopIteration
l = self.parent.v_opmode()
v = int(self.expr.args[0])
mask = ((1 << self.l) - 1)
if v != sign_ext(v & mask, self.l, l):
raise StopIteration
self.value = swap_uint(self.l, v & ((1 << self.l) - 1))
yield True
def decode(self, v):
opmode = self.parent.v_opmode()
v = swap_uint(self.l, v)
self.value = v
v = ExprInt(v, 16)
self.expr = ExprOp('segm', v, self.parent.off.expr)
return True
d_rex_p = bs(l=0, cls=(bs_fbit,), fname="rex_p")
d_rex_w = bs(l=0, cls=(bs_fbit,), fname="rex_w")
d_rex_r = bs(l=0, cls=(bs_fbit,), fname="rex_r")
d_rex_x = bs(l=0, cls=(bs_fbit,), fname="rex_x")
d_rex_b = bs(l=0, cls=(bs_fbit,), fname="rex_b")
d_g1 = bs(l=0, cls=(bs_fbit,), fname="g1")
d_g2 = bs(l=0, cls=(bs_fbit,), fname="g2")
d_cl1 = bs(l=1, cls=(bs_cl1,), fname="cl1")
w8 = bs(l=1, fname="w8")
se = bs(l=1, fname="se")
sx = bs(l=0, fname="sx")
sxd = bs(l=0, fname="sx")
xmmreg = bs(l=0, fname="xmmreg")
mmreg = bs(l=0, fname="mmreg")
pref_f2 = bs(l=0, fname="prefixed", default="\xf2")
pref_f3 = bs(l=0, fname="prefixed", default="\xf3")
pref_66 = bs(l=0, fname="prefixed", default="\x66")
no_xmm_pref = bs(l=0, fname="no_xmm_pref")
no_rex = bs(l=0, fname="no_rex")
sib_scale = bs(l=2, cls=(bs_cond_scale,), fname = "sib_scale")
sib_index = bs(l=3, cls=(bs_cond_index,), fname = "sib_index")
sib_base = bs(l=3, cls=(bs_cond_index,), fname = "sib_base")
disp = bs(l=0, cls=(bs_cond_disp,), fname = "disp")
s08 = bs(l=8, cls=(bs_s08, ))
u08 = bs(l=8, cls=(x86_08, m_arg))
u07 = bs(l=7, cls=(x86_08, m_arg))
u16 = bs(l=16, cls=(x86_16, m_arg))
u32 = bs(l=32, cls=(x86_32, m_arg))
s3264 = bs(l=32, cls=(x86_s32to64, m_arg))
u08_3 = bs(l=0, cls=(x86_imm_fix_08, m_arg), ival = 3)
d0 = bs("000", fname='reg')
d1 = bs("001", fname='reg')
d2 = bs("010", fname='reg')
d3 = bs("011", fname='reg')
d4 = bs("100", fname='reg')
d5 = bs("101", fname='reg')
d6 = bs("110", fname='reg')
d7 = bs("111", fname='reg')
sd = bs(l=1, fname="sd")
wd = bs(l=1, fname="wd")
stk = bs(l=0, fname="stk")
class field_size:
prio = default_prio
def __init__(self, d=None):
if d is None:
d = {}
self.d = d
def get(self, opm, adm=None):
return self.d[opm]
class bs_mem(object):
def encode(self):
return self.value != 0b11
def decode(self, v):
self.value = v
return v != 0b11
d_imm64 = bs(l=0, fname="imm64")
d_eax = bs(l=0, cls=(bs_eax, ), fname='eax')
d_edx = bs(l=0, cls=(bs_edx, ), fname='edx')
d_st = bs(l=0, cls=(x86_reg_st, ), fname='st')
d_imm = bs(l=0, cls=(bs_cond_imm,), fname="imm")
d_imm64 = bs(l=0, cls=(bs_cond_imm64,), fname="imm")
d_ax = bs(l=0, cls=(r_ax, ), fname='ax')
d_dx = bs(l=0, cls=(r_dx, ), fname='dx')
d_cl = bs(l=0, cls=(r_cl, ), fname='cl')
d_cs = bs(l=0, cls=(bs_cs, ), fname='cs')
d_ds = bs(l=0, cls=(bs_ds, ), fname='ds')
d_es = bs(l=0, cls=(bs_es, ), fname='es')
d_ss = bs(l=0, cls=(bs_ss, ), fname='ss')
d_fs = bs(l=0, cls=(bs_fs, ), fname='fs')
d_gs = bs(l=0, cls=(bs_gs, ), fname='gs')
# Offset must be decoded in last position to have final instruction len
rel_off = bs(l=0, cls=(bs_rel_off,), fname="off", order=-1)
# Offset must be decoded in last position to have final instruction len
rel_off08 = bs(l=8, cls=(bs_rel_off08,), fname="off", order=-1)
moff = bs(l=0, cls=(bs_moff,), fname="off")
msegoff = bs(l=16, cls=(bs_msegoff,), fname="mseg")
movoff = bs(l=0, cls=(bs_movoff,), fname="off")
mod = bs(l=2, fname="mod")
mod_mem = bs(l=2, cls=(bs_mem,), fname="mod")
rmreg = bs(l=3, cls=(x86_rm_reg, ), order =1, fname = "reg")
reg = bs(l=3, cls=(x86_reg, ), order =1, fname = "reg")
reg_modrm = bs(l=3, cls=(x86_reg_modrm, ), order =1, fname = "reg")
regnoarg = bs(l=3, default_val="000", order=1, fname="reg")
segm = bs(l=3, cls=(x86_rm_segm, ), order =1, fname = "reg")
crreg = bs(l=3, cls=(x86_rm_cr, ), order =1, fname = "reg")
drreg = bs(l=3, cls=(x86_rm_dr, ), order =1, fname = "reg")
mm_reg = bs(l=3, cls=(x86_rm_reg_mm, ), order =1, fname = "reg")
xmm_reg = bs(l=3, cls=(x86_rm_reg_xmm, ), order =1, fname = "reg")
fltreg = bs(l=3, cls=(x86_rm_flt, ), order =1, fname = "reg")
rm = bs(l=3, fname="rm")
rm_arg = bs(l=0, cls=(x86_rm_arg,), fname='rmarg')
rm_arg_w8 = bs(l=0, cls=(x86_rm_w8,), fname='rmarg')
rm_arg_sx = bs(l=0, cls=(x86_rm_sx,), fname='rmarg')
rm_arg_sxd = bs(l=0, cls=(x86_rm_sxd,), fname='rmarg')
rm_arg_sd = bs(l=0, cls=(x86_rm_sd,), fname='rmarg')
rm_arg_wd = bs(l=0, cls=(x86_rm_wd,), fname='rmarg')
rm_arg_08 = bs(l=0, cls=(x86_rm_08,), fname='rmarg')
rm_arg_reg_m08 = bs(l=0, cls=(x86_rm_reg_m08,), fname='rmarg')
rm_arg_reg_m16 = bs(l=0, cls=(x86_rm_reg_m16,), fname='rmarg')
rm_arg_m08 = bs(l=0, cls=(x86_rm_m08,), fname='rmarg')
rm_arg_m64 = bs(l=0, cls=(x86_rm_m64,), fname='rmarg')
rm_arg_m80 = bs(l=0, cls=(x86_rm_m80,), fname='rmarg')
rm_arg_m16 = bs(l=0, cls=(x86_rm_m16,), fname='rmarg')
rm_mem = bs(l=0, cls=(x86_rm_mem,), fname='rmarg')
rm_mem_far = bs(l=0, cls=(x86_rm_mem_far,), fname='rmarg')
rm_arg_mm = bs(l=0, cls=(x86_rm_mm,), fname='rmarg')
rm_arg_mm_m64 = bs(l=0, cls=(x86_rm_mm_m64,), fname='rmarg')
rm_arg_mm_reg = bs(l=0, cls=(x86_rm_mm_reg,), fname='rmarg')
rm_arg_xmm = bs(l=0, cls=(x86_rm_xmm,), fname='rmarg')
rm_arg_xmm_m32 = bs(l=0, cls=(x86_rm_xmm_m32,), fname='rmarg')
rm_arg_xmm_m64 = bs(l=0, cls=(x86_rm_xmm_m64,), fname='rmarg')
rm_arg_xmm_reg = bs(l=0, cls=(x86_rm_xmm_reg,), fname='rmarg')
swapargs = bs_swapargs(l=1, fname="swap", mn_mod=range(1 << 1))
class bs_op_mode(bsi):
def decode(self, v):
opmode = self.parent.v_opmode()
return opmode == self.mode
class bs_ad_mode(bsi):
def decode(self, v):
admode = self.parent.v_admode()
return admode == self.mode
class bs_op_mode_no64(bsi):
def encode(self):
if self.parent.mode == 64:
return False
return super(bs_op_mode_no64, self).encode()
def decode(self, v):
if self.parent.mode == 64:
return False
opmode = self.parent.v_opmode()
return opmode == self.mode
class bs_op_mode64(bsi):
def encode(self):
if self.parent.mode != 64:
return False
return super(bs_op_mode64, self).encode()
def decode(self, v):
if self.parent.mode != 64:
return False
return True
class bs_op_modeno64(bsi):
def encode(self):
if self.parent.mode == 64:
return False
return super(bs_op_modeno64, self).encode()
def decode(self, v):
if self.parent.mode == 64:
return False
return True
bs_opmode16 = bs(l=0, cls=(bs_op_mode,), mode = 16, fname="fopmode")
bs_opmode32 = bs(l=0, cls=(bs_op_mode,), mode = 32, fname="fopmode")
bs_opmode64 = bs(l=0, cls=(bs_op_mode,), mode = 64, fname="fopmode")
bs_admode16 = bs(l=0, cls=(bs_ad_mode,), mode = 16, fname="fadmode")
bs_admode32 = bs(l=0, cls=(bs_ad_mode,), mode = 32, fname="fadmode")
bs_admode64 = bs(l=0, cls=(bs_ad_mode,), mode = 64, fname="fadmode")
bs_opmode16_no64 = bs(l=0, cls=(bs_op_mode_no64,), mode = 16, fname="fopmode")
bs_opmode32_no64 = bs(l=0, cls=(bs_op_mode_no64,), mode = 32, fname="fopmode")
bs_mode64 = bs(l=0, cls=(bs_op_mode64,))
bs_modeno64 = bs(l=0, cls=(bs_op_modeno64,))
cond_list = ["O", "NO", "B", "AE",
"Z", "NZ", "BE", "A",
"S", "NS", "PE", "NP",
#"L", "NL", "NG", "G"]
"L", "GE", "LE", "G"]
cond = bs_mod_name(l=4, fname='cond', mn_mod=cond_list)
def rmmod(r, rm_arg_x=rm_arg, modrm=mod):
return [modrm, r, rm, sib_scale, sib_index, sib_base, disp, rm_arg_x]
#
# mode | reg | rm #
#
#
# scale | index | base #
#
#
# Prefix | REX prefix | Opcode | mod/rm | sib | displacement | immediate #
#
def addop(name, fields, args=None, alias=False):
dct = {"fields": fields}
dct["alias"] = alias
if args is not None:
dct['args'] = args
type(name, (mn_x86,), dct)
"""
class ia32_aaa(mn_x86):
fields = [bs8(0x37)]
"""
addop("aaa", [bs8(0x37)])
addop("aas", [bs8(0x3F)])
addop("aad", [bs8(0xd5), u08])
addop("aam", [bs8(0xd4), u08])
addop("adc", [bs("0001010"), w8, d_eax, d_imm])
addop("adc", [bs("100000"), se, w8] + rmmod(d2, rm_arg_w8) + [d_imm])
addop("adc", [bs("000100"), swapargs, w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("add", [bs("0000010"), w8, d_eax, d_imm])
addop("add", [bs("100000"), se, w8] + rmmod(d0, rm_arg_w8) + [d_imm])
addop("add", [bs("000000"), swapargs, w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("and", [bs("0010010"), w8, d_eax, d_imm])
addop("and", [bs("100000"), se, w8] + rmmod(d4, rm_arg_w8) + [d_imm])
addop("and", [bs("001000"), swapargs, w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("bsf", [bs8(0x0f), bs8(0xbc)] + rmmod(rmreg))
addop("bsr", [bs8(0x0f), bs8(0xbd), mod,
rmreg, rm, sib_scale, sib_index, sib_base, disp, rm_arg])
addop("bswap", [bs8(0x0f), bs('11001'), reg])
addop("bt", [bs8(0x0f), bs8(0xa3)] + rmmod(rmreg), [rm_arg, rmreg])
addop("bt", [bs8(0x0f), bs8(0xba)] + rmmod(d4) + [u08])
addop("btc", [bs8(0x0f), bs8(0xbb)] + rmmod(rmreg), [rm_arg, rmreg])
addop("btc", [bs8(0x0f), bs8(0xba)] + rmmod(d7) + [u08])
addop("btr", [bs8(0x0f), bs8(0xb3)] + rmmod(rmreg), [rm_arg, rmreg])
addop("btr", [bs8(0x0f), bs8(0xba)] + rmmod(d6) + [u08])
addop("bts", [bs8(0x0f), bs8(0xab)] + rmmod(rmreg), [rm_arg, rmreg])
addop("bts", [bs8(0x0f), bs8(0xba)] + rmmod(d5) + [u08])
addop("call", [bs8(0xe8), rel_off])
addop("call", [bs8(0xff), stk] + rmmod(d2))
addop("call", [bs8(0xff), stk] + rmmod(d3, rm_arg_x=rm_mem_far, modrm=mod_mem))
addop("call", [bs8(0x9a), bs_modeno64, moff, msegoff])
addop("cbw", [bs8(0x98), bs_opmode16])
addop("cwde", [bs8(0x98), bs_opmode32])
addop("cdqe", [bs8(0x98), bs_opmode64])
addop("clc", [bs8(0xf8)])
addop("cld", [bs8(0xfc)])
addop("cli", [bs8(0xfa)])
addop("clts", [bs8(0x0f), bs8(0x06)])
addop("cmc", [bs8(0xf5)])
addop("cmov", [bs8(0x0f), bs('0100'), cond] + rmmod(rmreg))
addop("cmp", [bs("0011110"), w8, d_eax, d_imm])
addop("cmp", [bs("100000"), se, w8] + rmmod(d7, rm_arg_w8) + [d_imm])
addop("cmp", [bs("001110"), swapargs, w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("cmpsb", [bs8(0xa6)])
addop("cmpsw", [bs8(0xa7), bs_opmode16])
addop("cmpsd", [bs8(0xa7), bs_opmode32])
addop("cmpsq", [bs8(0xa7), bs_opmode64])
addop("cmpxchg", [bs8(0x0f), bs('1011000'), w8]
+ rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("cmpxchg8b", [bs8(0x0f), bs8(0xc7), bs_opmode16] + rmmod(d1, rm_arg_m64))
addop("cmpxchg8b", [bs8(0x0f), bs8(0xc7), bs_opmode32] + rmmod(d1, rm_arg_m64))
addop("cmpxchg16b", [bs8(0x0f), bs8(0xc7), bs_opmode64] + rmmod(d1, rm_arg_m64))
# XXX TODO CMPXCHG8/16
addop("comiss", [bs8(0x0f), bs8(0x2f), no_xmm_pref] +
rmmod(xmm_reg, rm_arg_xmm_m32), [xmm_reg, rm_arg_xmm_m32])
addop("comisd", [bs8(0x0f), bs8(0x2f), pref_66] +
rmmod(xmm_reg, rm_arg_xmm_m64), [xmm_reg, rm_arg_xmm_m64])
addop("cpuid", [bs8(0x0f), bs8(0xa2)])
addop("cwd", [bs8(0x99), bs_opmode16])
addop("cdq", [bs8(0x99), bs_opmode32])
addop("cqo", [bs8(0x99), bs_opmode64])
addop("daa", [bs8(0x27)])
addop("das", [bs8(0x2f)])
addop("dec", [bs('1111111'), w8] + rmmod(d1, rm_arg_w8))
addop("dec", [bs('01001'), reg])
addop("div", [bs('1111011'), w8] + rmmod(d6, rm_arg_w8))
addop("enter", [bs8(0xc8), u16, u08])
# float #####
addop("fwait", [bs8(0x9b)])
addop("f2xm1", [bs8(0xd9), bs8(0xf0)])
addop("fabs", [bs8(0xd9), bs8(0xe1)])
addop("fadd", [bs("11011"), sd, bs("00")] + rmmod(d0, rm_arg_sd))
addop("fadd", [bs("11011"), swapargs, bs("00"),
bs("11000"), d_st, fltreg], [d_st, fltreg])
addop("faddp", [bs8(0xde), bs("11000"), fltreg, d_st])
addop("fiadd", [bs("11011"), wd, bs("10")] + rmmod(d0, rm_arg_wd))
addop("fbld", [bs8(0xdf)] + rmmod(d4, rm_arg_m80))
addop("fbldp", [bs8(0xdf)] + rmmod(d6, rm_arg_m80))
addop("fchs", [bs8(0xd9), bs8(0xe0)])
# addop("fclex", [bs8(0x9b), bs8(0xdb), bs8(0xe2)])
addop("fnclex", [bs8(0xdb), bs8(0xe2)])
addop("fcmovb", [bs8(0xda), bs("11000"), d_st, fltreg])
addop("fcmove", [bs8(0xda), bs("11001"), d_st, fltreg])
addop("fcmovbe", [bs8(0xda), bs("11010"), d_st, fltreg])
addop("fcmovu", [bs8(0xda), bs("11011"), d_st, fltreg])
addop("fcmovnb", [bs8(0xdb), bs("11000"), d_st, fltreg])
addop("fcmovne", [bs8(0xdb), bs("11001"), d_st, fltreg])
addop("fcmovnbe", [bs8(0xdb), bs("11010"), d_st, fltreg])
addop("fcmovnu", [bs8(0xdb), bs("11011"), d_st, fltreg])
addop("fcom", [bs("11011"), sd, bs("00")] + rmmod(d2, rm_arg_sd))
addop("fcom", [bs("11011"), swapargs, bs("00"),
bs("11010"), d_st, fltreg], [d_st, fltreg])
addop("fcomp", [bs("11011"), sd, bs("00")] + rmmod(d3, rm_arg_sd))
addop("fcomp",
[bs("11011"), swapargs, bs("00"), bs("11011"),
d_st, fltreg], [d_st, fltreg])
addop("fcompp", [bs8(0xde), bs8(0xd9)])
addop("fcomi", [bs8(0xdb), bs("11110"), d_st, fltreg])
addop("fcomip", [bs8(0xdf), bs("11110"), d_st, fltreg])
addop("fucomi", [bs8(0xdb), bs("11101"), d_st, fltreg])
addop("fucomip", [bs8(0xdf), bs("11101"), d_st, fltreg])
addop("fcos", [bs8(0xd9), bs8(0xff)])
addop("fdecstp", [bs8(0xd9), bs8(0xf6)])
addop("fdiv", [bs("11011"), sd, bs("00")] + rmmod(d6, rm_arg_sd))
addop("fdiv", [bs8(0xd8), bs("11110"), d_st, fltreg])
addop("fdiv", [bs8(0xdc), bs("11111"), fltreg, d_st])
addop("fdivp", [bs8(0xde), bs("11111"), fltreg, d_st])
addop("fidiv", [bs("11011"), wd, bs("10")] + rmmod(d6, rm_arg_wd))
addop("fdivr", [bs("11011"), sd, bs("00")] + rmmod(d7, rm_arg_sd))
addop("fdivr", [bs8(0xd8), bs("11111"), d_st, fltreg])
addop("fdivr", [bs8(0xdc), bs("11110"), fltreg, d_st])
addop("fdivrp", [bs8(0xde), bs("11110"), fltreg, d_st])
addop("fidivr", [bs("11011"), wd, bs("10")] + rmmod(d7, rm_arg_wd))
addop("ffree", [bs8(0xdd), bs("11000"), fltreg])
addop("ficom", [bs("11011"), wd, bs("10")] + rmmod(d2, rm_arg_wd))
addop("ficomp", [bs("11011"), wd, bs("10")] + rmmod(d3, rm_arg_wd))
addop("fild", [bs("11011"), wd, bs("11")] + rmmod(d0, rm_arg_wd))
addop("fild", [bs8(0xdf)] + rmmod(d5, rm_arg_m64))
addop("fincstp", [bs8(0xd9), bs8(0xf7)])
# addop("finit", [bs8(0x9b), bs8(0xdb), bs8(0xe3)])
addop("fninit", [bs8(0xdb), bs8(0xe3)])
addop("fist", [bs("11011"), wd, bs("11")] + rmmod(d2, rm_arg_wd))
addop("fistp", [bs("11011"), wd, bs("11")] + rmmod(d3, rm_arg_wd))
addop("fistp", [bs8(0xdf)] + rmmod(d7, rm_arg_m64))
addop("fisttp", [bs("11011"), wd, bs("11")] + rmmod(d1, rm_arg_wd))
addop("fisttp", [bs8(0xdd)] + rmmod(d1, rm_arg_m64))
addop("fld", [bs("11011"), sd, bs("01")] + rmmod(d0, rm_arg_sd))
addop("fld", [bs8(0xdb)] + rmmod(d5, rm_arg_m80))
addop("fld", [bs8(0xd9), bs("11000"), fltreg])
addop("fld1", [bs8(0xd9), bs8(0xe8)])
addop("fldl2t", [bs8(0xd9), bs8(0xe9)])
addop("fldl2e", [bs8(0xd9), bs8(0xea)])
addop("fldpi", [bs8(0xd9), bs8(0xeb)])
addop("fldlg2", [bs8(0xd9), bs8(0xec)])
addop("fldln2", [bs8(0xd9), bs8(0xed)])
addop("fldz", [bs8(0xd9), bs8(0xee)])
addop("fldcw", [bs8(0xd9)] + rmmod(d5, rm_arg_m16))
addop("fldenv", [bs8(0xd9)] + rmmod(d4, rm_arg_m80)) # XXX TODO: m14?
addop("fmul", [bs("11011"), sd, bs("00")] + rmmod(d1, rm_arg_sd))
addop("fmul", [bs("11011"), swapargs, bs("00"),
bs("11001"), d_st, fltreg], [d_st, fltreg])
addop("fmulp", [bs8(0xde), bs("11001"), fltreg, d_st])
addop("fimul", [bs("11011"), wd, bs("10")] + rmmod(d1, rm_arg_wd))
addop("fnop", [bs8(0xd9), bs8(0xd0)])
addop("fpatan", [bs8(0xd9), bs8(0xf3)])
addop("fprem", [bs8(0xd9), bs8(0xf8)])
addop("fprem1", [bs8(0xd9), bs8(0xf5)])
addop("fptan", [bs8(0xd9), bs8(0xf2)])
addop("frndint", [bs8(0xd9), bs8(0xfc)])
addop("frstor", [bs8(0xdd)] + rmmod(d4, rm_arg_m80)) # XXX TODO: m94 ?
# addop("fsave", [bs8(0x9b), bs8(0xdd)] + rmmod(d6, rm_arg_m80)) # XXX
# TODO: m94 ?
addop("fnsave", [bs8(0xdd)] + rmmod(d6, rm_arg_m80)) # XXX TODO: m94 ?
addop("fscale", [bs8(0xd9), bs8(0xfd)])
addop("fsin", [bs8(0xd9), bs8(0xfe)])
addop("fsincos", [bs8(0xd9), bs8(0xfb)])
addop("fsqrt", [bs8(0xd9), bs8(0xfa)])
addop("fst", [bs("11011"), sd, bs("01")] + rmmod(d2, rm_arg_sd))
addop("fst", [bs8(0xdd), bs("11010"), fltreg])
addop("fstp", [bs("11011"), sd, bs("01")] + rmmod(d3, rm_arg_sd))
addop("fstp", [bs8(0xdb)] + rmmod(d7, rm_arg_m80))
addop("fstp", [bs8(0xdd), bs("11011"), fltreg])
# addop("fstcw", [bs8(0x9b), bs8(0xd9)] + rmmod(d7, rm_arg_m16))
addop("fnstcw", [bs8(0xd9)] + rmmod(d7, rm_arg_m16))
# addop("fstenv", [bs8(0x9b), bs8(0xd9)] + rmmod(d6, rm_arg_m80)) # XXX
# TODO: m14?
addop("fnstenv", [bs8(0xd9)] + rmmod(d6, rm_arg_m80)) # XXX TODO: m14?
# addop("fstsw", [bs8(0x9b), bs8(0xdd)] + rmmod(d7, rm_arg_m16))
addop("fnstsw", [bs8(0xdd)] + rmmod(d7, rm_arg_m16))
# addop("fstsw", [bs8(0x9b), bs8(0xdf), bs8(0xe0), d_ax])
addop("fnstsw", [bs8(0xdf), bs8(0xe0), d_ax])
addop("fsub", [bs("11011"), sd, bs("00")] + rmmod(d4, rm_arg_sd))
addop("fsub", [bs8(0xd8), bs("11100"), d_st, fltreg])
addop("fsub", [bs8(0xdc), bs("11101"), fltreg, d_st])
addop("fsubp", [bs8(0xde), bs("11101"), fltreg, d_st])
addop("fisub", [bs("11011"), wd, bs("10")] + rmmod(d4, rm_arg_wd))
addop("fsubr", [bs("11011"), sd, bs("00")] + rmmod(d5, rm_arg_sd))
addop("fsubr", [bs8(0xd8), bs("11101"), d_st, fltreg])
addop("fsubr", [bs8(0xdc), bs("11100"), fltreg, d_st])
addop("fsubrp", [bs8(0xde), bs("11100"), fltreg, d_st])
addop("fisubr", [bs("11011"), wd, bs("10")] + rmmod(d5, rm_arg_wd))
addop("ftst", [bs8(0xd9), bs8(0xe4)])
addop("fucom", [bs8(0xdd), bs("11100"), fltreg])
addop("fucomp", [bs8(0xdd), bs("11101"), fltreg])
addop("fucompp", [bs8(0xda), bs8(0xe9)])
addop("fxam", [bs8(0xd9), bs8(0xe5)])
addop("fxch", [bs8(0xd9), bs("11001"), fltreg])
addop("fxrstor", [bs8(0x0f), bs8(0xae)]
+ rmmod(d1, rm_arg_m80)) # XXX TODO m512
addop("fxsave", [bs8(0x0f), bs8(0xae)]
+ rmmod(d0, rm_arg_m80)) # XXX TODO m512
addop("stmxcsr", [bs8(0x0f), bs8(0xae)] + rmmod(d3))
addop("ldmxcsr", [bs8(0x0f), bs8(0xae)] + rmmod(d2))
addop("fxtract", [bs8(0xd9), bs8(0xf4)])
addop("fyl2x", [bs8(0xd9), bs8(0xf1)])
addop("fyl2xp1", [bs8(0xd9), bs8(0xf9)])
addop("hlt", [bs8(0xf4)])
addop("icebp", [bs8(0xf1)])
addop("idiv", [bs('1111011'), w8] + rmmod(d7, rm_arg_w8))
addop("imul", [bs('1111011'), w8] + rmmod(d5, rm_arg_w8))
addop("imul", [bs8(0x0f), bs8(0xaf)] + rmmod(rmreg))
addop("imul", [bs("011010"), se, bs('1')] + rmmod(rmreg) + [d_imm])
addop("in", [bs("1110010"), w8, d_eax, u08])
addop("in", [bs("1110110"), w8, d_eax, d_edx])
addop("inc", [bs('1111111'), w8] + rmmod(d0, rm_arg_w8))
addop("inc", [bs('01000'), reg])
addop("insb", [bs8(0x6c)])
addop("insw", [bs8(0x6d), bs_opmode16])
addop("insd", [bs8(0x6d), bs_opmode32])
addop("insd", [bs8(0x6d), bs_opmode64])
addop("int", [bs8(0xcc), u08_3])
addop("int", [bs8(0xcd), u08])
addop("into", [bs8(0xce)])
addop("invd", [bs8(0x0f), bs8(0x08)])
addop("invlpg", [bs8(0x0f), bs8(0x01)] + rmmod(d7))
addop("iret", [bs8(0xcf), bs_opmode16])
addop("iretd", [bs8(0xcf), bs_opmode32])
addop("iretq", [bs8(0xcf), bs_opmode64])
addop("j", [bs('0111'), cond, rel_off08])
addop("jcxz", [bs8(0xe3), rel_off08, bs_admode16])
addop("jecxz", [bs8(0xe3), rel_off08, bs_admode32])
addop("jrcxz", [bs8(0xe3), rel_off08, bs_admode64])
addop("j", [bs8(0x0f), bs('1000'), cond, rel_off])
addop("jmp", [bs8(0xeb), rel_off08])
addop("jmp", [bs8(0xe9), rel_off])
# TODO XXX replace stk force64?
addop("jmp", [bs8(0xff), stk] + rmmod(d4))
addop("jmp", [bs8(0xea), bs_modeno64, moff, msegoff])
addop("jmp", [bs8(0xff)] + rmmod(d5, rm_arg_x=rm_mem_far, modrm=mod_mem))
addop("lahf", [bs8(0x9f)])
addop("lar", [bs8(0x0f), bs8(0x02)] + rmmod(rmreg))
addop("lea", [bs8(0x8d)] + rmmod(rmreg, rm_arg_x=rm_mem, modrm=mod_mem))
addop("les", [bs8(0xc4)] + rmmod(rmreg, rm_arg_x=rm_mem, modrm=mod_mem))
addop("lds", [bs8(0xc5)] + rmmod(rmreg, rm_arg_x=rm_mem, modrm=mod_mem))
addop("lss", [bs8(0x0f), bs8(0xb2)] + rmmod(rmreg, rm_arg_x=rm_mem, modrm=mod_mem))
addop("lfs", [bs8(0x0f), bs8(0xb4)] + rmmod(rmreg, rm_arg_x=rm_mem, modrm=mod_mem))
addop("lgs", [bs8(0x0f), bs8(0xb5)] + rmmod(rmreg, rm_arg_x=rm_mem, modrm=mod_mem))
addop("lgdt", [bs8(0x0f), bs8(0x01)] + rmmod(d2, modrm=mod_mem))
addop("lidt", [bs8(0x0f), bs8(0x01)] + rmmod(d3, modrm=mod_mem))
addop("lfence", [bs8(0x0f), bs8(0xae), bs8(0xe8)])
addop("leave", [bs8(0xc9), stk])
addop("lodsb", [bs8(0xac)])
addop("lodsw", [bs8(0xad), bs_opmode16])
addop("lodsd", [bs8(0xad), bs_opmode32])
addop("lodsq", [bs8(0xad), bs_opmode64])
addop("loop", [bs8(0xe2), rel_off08])
addop("loope", [bs8(0xe1), rel_off08])
addop("loopne", [bs8(0xe0), rel_off08])
addop("lsl", [bs8(0x0f), bs8(0x03)] + rmmod(rmreg))
addop("monitor", [bs8(0x0f), bs8(0x01), bs8(0xc8)])
addop("mov", [bs("100010"), swapargs, w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("mov", [bs("100011"), swapargs, bs('0')] + rmmod(segm), [rm_arg, segm])
addop("mov", [bs("101000"), swapargs, w8, d_eax, movoff], [d_eax, movoff])
addop("mov", [bs("1011"), w8, reg, d_imm64])
addop("mov", [bs("1100011"), w8] + rmmod(d0, rm_arg_w8) + [d_imm])
addop("mov", [bs8(0x0f), bs("001000"), swapargs, bs('0')]
+ rmmod(crreg), [rm_arg, crreg])
addop("mov", [bs8(0x0f), bs("001000"), swapargs, bs('1')]
+ rmmod(drreg), [rm_arg, drreg])
addop("movsb", [bs8(0xa4)])
addop("movsw", [bs8(0xa5), bs_opmode16])
addop("movsd", [bs8(0xa5), bs_opmode32])
addop("movsq", [bs8(0xa5), bs_opmode64])
addop("movsx", [bs8(0x0f), bs("1011111"), w8, sx] + rmmod(rmreg, rm_arg_sx))
addop("movsxd", [bs8(0x63), sxd, bs_mode64] + rmmod(rmreg, rm_arg_sxd))
addop("movups", [bs8(0x0f), bs("0001000"), swapargs, no_xmm_pref] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
addop("movsd", [bs8(0x0f), bs("0001000"), swapargs, pref_f2]
+ rmmod(xmm_reg, rm_arg_xmm_m64), [xmm_reg, rm_arg_xmm_m64])
addop("movss", [bs8(0x0f), bs("0001000"), swapargs, pref_f3] +
rmmod(xmm_reg, rm_arg_xmm_m32), [xmm_reg, rm_arg_xmm_m32])
addop("movupd", [bs8(0x0f), bs8(0x10), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("movd", [bs8(0x0f), bs('011'), swapargs, bs('1110'), no_xmm_pref] +
rmmod(mm_reg, rm_arg), [mm_reg, rm_arg])
addop("movd", [bs8(0x0f), bs('011'), swapargs, bs('1110'), pref_66, bs_opmode32] +
rmmod(xmm_reg, rm_arg), [xmm_reg, rm_arg])
addop("movq", [bs8(0x0f), bs('011'), swapargs, bs('1110'), pref_66, bs_opmode64] +
rmmod(xmm_reg, rm_arg), [xmm_reg, rm_arg])
addop("movq", [bs8(0x0f), bs('011'), swapargs, bs('1111'), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm_m64), [mm_reg, rm_arg_mm_m64])
addop("movq", [bs8(0x0f), bs8(0x7e), pref_f3] +
rmmod(xmm_reg, rm_arg_xmm_m64), [xmm_reg, rm_arg_xmm_m64])
addop("movq", [bs8(0x0f), bs8(0xd6), pref_66] +
rmmod(xmm_reg, rm_arg_xmm_m64), [rm_arg_xmm_m64, xmm_reg])
addop("movmskps", [bs8(0x0f), bs8(0x50), no_xmm_pref] +
rmmod(reg_modrm, rm_arg_xmm_reg))
addop("addss", [bs8(0x0f), bs8(0x58), pref_f3] + rmmod(xmm_reg, rm_arg_xmm_m32))
addop("addsd", [bs8(0x0f), bs8(0x58), pref_f2] + rmmod(xmm_reg, rm_arg_xmm_m64))
addop("subss", [bs8(0x0f), bs8(0x5c), pref_f3] + rmmod(xmm_reg, rm_arg_xmm_m32))
addop("subsd", [bs8(0x0f), bs8(0x5c), pref_f2] + rmmod(xmm_reg, rm_arg_xmm_m64))
addop("mulss", [bs8(0x0f), bs8(0x59), pref_f3] + rmmod(xmm_reg, rm_arg_xmm_m32))
addop("mulsd", [bs8(0x0f), bs8(0x59), pref_f2] + rmmod(xmm_reg, rm_arg_xmm_m64))
addop("divss", [bs8(0x0f), bs8(0x5e), pref_f3] + rmmod(xmm_reg, rm_arg_xmm_m32))
addop("divsd", [bs8(0x0f), bs8(0x5e), pref_f2] + rmmod(xmm_reg, rm_arg_xmm_m64))
addop("pminsw", [bs8(0x0f), bs8(0xea), no_xmm_pref] + rmmod(mm_reg, rm_arg_mm))
addop("pminsw", [bs8(0x0f), bs8(0xea), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("ucomiss", [bs8(0x0f), bs8(0x2e), no_xmm_pref] + rmmod(xmm_reg, rm_arg_xmm_m32))
addop("ucomisd", [bs8(0x0f), bs8(0x2e), pref_66] + rmmod(xmm_reg, rm_arg_xmm_m64))
addop("maxsd", [bs8(0x0f), bs8(0x5f), pref_f2] + rmmod(xmm_reg, rm_arg_xmm_m64))
addop("maxss", [bs8(0x0f), bs8(0x5f), pref_f3] + rmmod(xmm_reg, rm_arg_xmm_m32))
addop("movzx", [bs8(0x0f), bs("1011011"), w8, sx] + rmmod(rmreg, rm_arg_sx))
addop("mul", [bs('1111011'), w8] + rmmod(d4, rm_arg_w8))
addop("neg", [bs('1111011'), w8] + rmmod(d3, rm_arg_w8))
addop("nop", [bs8(0x0f), bs8(0x1f)] + rmmod(d0, rm_arg)) # XXX TODO m512
addop("not", [bs('1111011'), w8] + rmmod(d2, rm_arg_w8))
addop("or", [bs("0000110"), w8, d_eax, d_imm])
addop("or", [bs("100000"), se, w8] + rmmod(d1, rm_arg_w8) + [d_imm])
addop("or", [bs("000010"), swapargs, w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("out", [bs("1110011"), w8, u08, d_eax])
addop("out", [bs("1110111"), w8, d_edx, d_eax])
addop("outsb", [bs8(0x6e)])
addop("outsw", [bs8(0x6f), bs_opmode16])
addop("outsd", [bs8(0x6f), bs_opmode32])
addop("outsd", [bs8(0x6f), bs_opmode64])
addop("setalc", [bs8(0xD6)])
# addop("pause", [bs8(0xf3), bs8(0x90)])
addop("popw", [bs8(0x8f), stk, bs_opmode16] + rmmod(d0))
addop("popw", [bs("01011"), stk, reg, bs_opmode16])
addop("popw", [bs8(0x1f), stk, d_ds, bs_opmode16])
addop("popw", [bs8(0x07), stk, d_es, bs_opmode16])
addop("popw", [bs8(0x17), stk, d_ss, bs_opmode16])
addop("popw", [bs8(0x0f), stk, bs8(0xa1), d_fs, bs_opmode16])
addop("popw", [bs8(0x0f), stk, bs8(0xa9), d_gs, bs_opmode16])
addop("pop", [bs8(0x8f), stk, bs_opmode32] + rmmod(d0))
addop("pop", [bs("01011"), stk, reg, bs_opmode32])
addop("pop", [bs8(0x1f), stk, d_ds, bs_opmode32])
addop("pop", [bs8(0x07), stk, d_es, bs_opmode32])
addop("pop", [bs8(0x17), stk, d_ss, bs_opmode32])
addop("pop", [bs8(0x0f), stk, bs8(0xa1), d_fs, bs_opmode32])
addop("pop", [bs8(0x0f), stk, bs8(0xa9), d_gs, bs_opmode32])
addop("pop", [bs8(0x8f), stk, bs_opmode64] + rmmod(d0))
addop("pop", [bs("01011"), stk, reg, bs_opmode64])
addop("pop", [bs8(0x1f), stk, d_ds, bs_opmode64])
addop("pop", [bs8(0x07), stk, d_es, bs_opmode64])
addop("pop", [bs8(0x17), stk, d_ss, bs_opmode64])
addop("pop", [bs8(0x0f), stk, bs8(0xa1), d_fs, bs_opmode64])
addop("pop", [bs8(0x0f), stk, bs8(0xa9), d_gs, bs_opmode64])
addop("popa", [bs8(0x61), stk, bs_opmode16])
addop("popad", [bs8(0x61), stk, bs_opmode32])
addop("popfw", [bs8(0x9d), stk, bs_opmode16])
addop("popfd", [bs8(0x9d), stk, bs_opmode32])
addop("popfq", [bs8(0x9d), stk, bs_opmode64])
addop("prefetch0", [bs8(0x0f), bs8(0x18)] + rmmod(d1, rm_arg_m08))
addop("prefetch1", [bs8(0x0f), bs8(0x18)] + rmmod(d2, rm_arg_m08))
addop("prefetch2", [bs8(0x0f), bs8(0x18)] + rmmod(d3, rm_arg_m08))
addop("prefetchnta", [bs8(0x0f), bs8(0x18)] + rmmod(d0, rm_arg_m08))
addop("prefetchw", [bs8(0x0f), bs8(0x0d)] + rmmod(d1, rm_arg_m08))
addop("pushw", [bs8(0xff), stk, bs_opmode16] + rmmod(d6))
addop("pushw", [bs("01010"), stk, reg, bs_opmode16])
addop("pushw", [bs8(0x6a), s08, stk, bs_opmode16])
addop("pushw", [bs8(0x68), d_imm, stk, bs_opmode16])
addop("pushw", [bs8(0x0e), stk, d_cs, bs_opmode16])
addop("pushw", [bs8(0x16), stk, d_ss, bs_opmode16])
addop("pushw", [bs8(0x1e), stk, d_ds, bs_opmode16])
addop("pushw", [bs8(0x06), stk, d_es, bs_opmode16])
addop("pushw", [bs8(0x0f), stk, bs8(0xa0), d_fs, bs_opmode16])
addop("pushw", [bs8(0x0f), stk, bs8(0xa8), d_gs, bs_opmode16])
addop("push", [bs8(0xff), stk, bs_opmode32] + rmmod(d6))
addop("push", [bs("01010"), stk, reg, bs_opmode32])
addop("push", [bs8(0x6a), s08, stk, bs_opmode32])
addop("push", [bs8(0x68), d_imm, stk, bs_opmode32])
addop("push", [bs8(0x0e), stk, d_cs, bs_opmode32])
addop("push", [bs8(0x16), stk, d_ss, bs_opmode32])
addop("push", [bs8(0x1e), stk, d_ds, bs_opmode32])
addop("push", [bs8(0x06), stk, d_es, bs_opmode32])
addop("push", [bs8(0x0f), stk, bs8(0xa0), d_fs, bs_opmode32])
addop("push", [bs8(0x0f), stk, bs8(0xa8), d_gs, bs_opmode32])
addop("push", [bs8(0xff), stk, bs_opmode64] + rmmod(d6))
addop("push", [bs("01010"), stk, reg, bs_opmode64])
addop("push", [bs8(0x6a), s08, stk, bs_opmode64])
addop("push", [bs8(0x68), d_imm, stk, bs_opmode64])
addop("push", [bs8(0x0e), stk, d_cs, bs_opmode64])
addop("push", [bs8(0x16), stk, d_ss, bs_opmode64])
addop("push", [bs8(0x1e), stk, d_ds, bs_opmode64])
addop("push", [bs8(0x06), stk, d_es, bs_opmode64])
addop("push", [bs8(0x0f), stk, bs8(0xa0), d_fs, bs_opmode64])
addop("push", [bs8(0x0f), stk, bs8(0xa8), d_gs, bs_opmode64])
addop("pusha", [bs8(0x60), stk, bs_opmode16_no64])
addop("pushad", [bs8(0x60), stk, bs_opmode32_no64])
addop("pushfw", [bs8(0x9c), stk, bs_opmode16])
addop("pushfd", [bs8(0x9c), stk, bs_opmode32])
addop("pushfq", [bs8(0x9c), stk, bs_opmode64])
addop("rcl", [bs('110100'), d_cl1, w8] +
rmmod(d2, rm_arg_w8), [rm_arg_w8, d_cl1])
addop("rcl", [bs('1100000'), w8] + rmmod(d2, rm_arg_w8) + [u08])
addop("rcr", [bs('110100'), d_cl1, w8] +
rmmod(d3, rm_arg_w8), [rm_arg_w8, d_cl1])
addop("rcr", [bs('1100000'), w8] + rmmod(d3, rm_arg_w8) + [u08])
addop("rol", [bs('110100'), d_cl1, w8]
+ rmmod(d0, rm_arg_w8), [rm_arg_w8, d_cl1])
addop("rol", [bs('1100000'), w8] + rmmod(d0, rm_arg_w8) + [u08])
addop("ror", [bs('110100'), d_cl1, w8]
+ rmmod(d1, rm_arg_w8), [rm_arg_w8, d_cl1])
addop("ror", [bs('1100000'), w8] + rmmod(d1, rm_arg_w8) + [u08])
addop("rdmsr", [bs8(0x0f), bs8(0x32)])
addop("rdpmc", [bs8(0x0f), bs8(0x33)])
addop("rdtsc", [bs8(0x0f), bs8(0x31)])
addop("ret", [bs8(0xc3), stk])
addop("ret", [bs8(0xc2), stk, u16])
addop("retf", [bs8(0xcb), stk])
addop("retf", [bs8(0xca), stk, u16])
addop("rsm", [bs8(0x0f), bs8(0xaa)])
addop("sahf", [bs8(0x9e)])
# XXX tipo in doc: /4 instead of /6
addop("sal", [bs('110100'), d_cl1, w8] +
rmmod(d6, rm_arg_w8), [rm_arg_w8, d_cl1])
addop("sal", [bs('1100000'), w8] + rmmod(d6, rm_arg_w8) + [u08])
addop("sar", [bs('110100'), d_cl1, w8] +
rmmod(d7, rm_arg_w8), [rm_arg_w8, d_cl1])
addop("sar", [bs('1100000'), w8] + rmmod(d7, rm_arg_w8) + [u08])
addop("scasb", [bs8(0xae)])
addop("scasw", [bs8(0xaf), bs_opmode16])
addop("scasd", [bs8(0xaf), bs_opmode32])
addop("scasq", [bs8(0xaf), bs_opmode64])
addop("shl", [bs('110100'), d_cl1, w8]
+ rmmod(d4, rm_arg_w8), [rm_arg_w8, d_cl1])
addop("shl", [bs('1100000'), w8] + rmmod(d4, rm_arg_w8) + [u08])
addop("shr", [bs('110100'), d_cl1, w8]
+ rmmod(d5, rm_arg_w8), [rm_arg_w8, d_cl1])
addop("shr", [bs('1100000'), w8] + rmmod(d5, rm_arg_w8) + [u08])
addop("sbb", [bs("0001110"), w8, d_eax, d_imm])
addop("sbb", [bs("100000"), se, w8] + rmmod(d3, rm_arg_w8) + [d_imm])
addop("sbb", [bs("000110"), swapargs, w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("set", [bs8(0x0f), bs('1001'), cond] + rmmod(regnoarg, rm_arg_08))
addop("sgdt", [bs8(0x0f), bs8(0x01)] + rmmod(d0, modrm=mod_mem))
addop("shld", [bs8(0x0f), bs8(0xa4)] +
rmmod(rmreg) + [u08], [rm_arg, rmreg, u08])
addop("shld", [bs8(0x0f), bs8(0xa5)] +
rmmod(rmreg) + [d_cl], [rm_arg, rmreg, d_cl])
addop("shrd", [bs8(0x0f), bs8(0xac)] +
rmmod(rmreg) + [u08], [rm_arg, rmreg, u08])
addop("shrd", [bs8(0x0f), bs8(0xad)] +
rmmod(rmreg) + [d_cl], [rm_arg, rmreg, d_cl])
addop("sidt", [bs8(0x0f), bs8(0x01)] + rmmod(d1, modrm=mod_mem))
addop("sldt", [bs8(0x0f), bs8(0x00)] + rmmod(d0, rm_arg_x=rm_arg_reg_m16))
addop("smsw", [bs8(0x0f), bs8(0x01)] + rmmod(d4))
addop("stc", [bs8(0xf9)])
addop("std", [bs8(0xfd)])
addop("sti", [bs8(0xfb)])
addop("stosb", [bs8(0xaa)])
addop("stosw", [bs8(0xab), bs_opmode16])
addop("stosd", [bs8(0xab), bs_opmode32])
addop("stosq", [bs8(0xab), bs_opmode64])
addop("str", [bs8(0x0f), bs8(0x00)] + rmmod(d1))
addop("sub", [bs("0010110"), w8, d_eax, d_imm])
addop("sub", [bs("100000"), se, w8] + rmmod(d5, rm_arg_w8) + [d_imm])
addop("sub", [bs("001010"), swapargs, w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("syscall", [bs8(0x0f), bs8(0x05)])
addop("sysenter", [bs8(0x0f), bs8(0x34)])
addop("sysexit", [bs8(0x0f), bs8(0x35)])
addop("sysret", [bs8(0x0f), bs8(0x07)])
addop("test", [bs("1010100"), w8, d_eax, d_imm])
addop("test", [bs("1111011"), w8] + rmmod(d0, rm_arg_w8) + [d_imm])
addop("test", [bs("1000010"), w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("ud2", [bs8(0x0f), bs8(0x0b)])
addop("verr", [bs8(0x0f), bs8(0x00)] + rmmod(d4))
addop("verw", [bs8(0x0f), bs8(0x00)] + rmmod(d5))
addop("wbinvd", [bs8(0x0f), bs8(0x09)])
addop("wrmsr", [bs8(0x0f), bs8(0x30)])
addop("xadd", [bs8(0x0f), bs("1100000"), w8]
+ rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("nop", [bs8(0x90), no_rex], alias=True)
addop("xchg", [bs('10010'), d_eax, reg])
addop("xchg", [bs('1000011'), w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("xlat", [bs8(0xd7)])
addop("xor", [bs("0011010"), w8, d_eax, d_imm])
addop("xor", [bs("100000"), se, w8] + rmmod(d6, rm_arg_w8) + [d_imm])
addop("xor", [bs("001100"), swapargs, w8] +
rmmod(rmreg, rm_arg_w8), [rm_arg_w8, rmreg])
addop("xgetbv", [bs8(0x0f), bs8(0x01), bs8(0xd0)])
#### MMX/SSE/AVX operations
#### Categories are the same than here: https://software.intel.com/sites/landingpage/IntrinsicsGuide/
####
### Arithmetic (integers)
###
## Move
# SSE
addop("movapd", [bs8(0x0f), bs("0010100"), swapargs]
+ rmmod(xmm_reg, rm_arg_xmm) + [bs_opmode16], [xmm_reg, rm_arg_xmm])
addop("movaps", [bs8(0x0f), bs("0010100"), swapargs]
+ rmmod(xmm_reg, rm_arg_xmm) + [bs_opmode32], [xmm_reg, rm_arg_xmm])
addop("movaps", [bs8(0x0f), bs("0010100"), swapargs]
+ rmmod(xmm_reg, rm_arg_xmm) + [bs_opmode64], [xmm_reg, rm_arg_xmm])
addop("movdqu", [bs8(0x0f), bs("011"), swapargs, bs("1111"), pref_f3]
+ rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
addop("movdqa", [bs8(0x0f), bs("011"), swapargs, bs("1111"), pref_66]
+ rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
addop("movhpd", [bs8(0x0f), bs("0001011"), swapargs, pref_66] +
rmmod(xmm_reg, rm_arg_m64), [xmm_reg, rm_arg_m64])
addop("movhps", [bs8(0x0f), bs("0001011"), swapargs, no_xmm_pref] +
rmmod(xmm_reg, rm_arg_m64), [xmm_reg, rm_arg_m64])
addop("movlpd", [bs8(0x0f), bs("0001001"), swapargs, pref_66] +
rmmod(xmm_reg, rm_arg_m64), [xmm_reg, rm_arg_m64])
addop("movlps", [bs8(0x0f), bs("0001001"), swapargs, no_xmm_pref] +
rmmod(xmm_reg, rm_arg_m64), [xmm_reg, rm_arg_m64])
addop("movhlps", [bs8(0x0f), bs8(0x12), no_xmm_pref] +
rmmod(xmm_reg, rm_arg_xmm_reg), [xmm_reg, rm_arg_xmm_reg])
addop("movlhps", [bs8(0x0f), bs8(0x16), no_xmm_pref] +
rmmod(xmm_reg, rm_arg_xmm_reg), [xmm_reg, rm_arg_xmm_reg])
addop("movdq2q", [bs8(0x0f), bs8(0xd6), pref_f2] +
rmmod(mm_reg, rm_arg_xmm_reg), [mm_reg, rm_arg_xmm_reg])
## Additions
# SSE
addop("paddb", [bs8(0x0f), bs8(0xfc), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("paddw", [bs8(0x0f), bs8(0xfd), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("paddd", [bs8(0x0f), bs8(0xfe), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("paddq", [bs8(0x0f), bs8(0xd4), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("paddb", [bs8(0x0f), bs8(0xfc), no_xmm_pref] + rmmod(mm_reg, rm_arg_mm))
addop("paddw", [bs8(0x0f), bs8(0xfd), no_xmm_pref] + rmmod(mm_reg, rm_arg_mm))
addop("paddd", [bs8(0x0f), bs8(0xfe), no_xmm_pref] + rmmod(mm_reg, rm_arg_mm))
addop("paddq", [bs8(0x0f), bs8(0xd4), no_xmm_pref] + rmmod(mm_reg, rm_arg_mm))
## Substractions
# SSE
addop("psubb", [bs8(0x0f), bs8(0xf8), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("psubw", [bs8(0x0f), bs8(0xf9), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("psubd", [bs8(0x0f), bs8(0xfa), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("psubq", [bs8(0x0f), bs8(0xfb), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("psubb", [bs8(0x0f), bs8(0xf8), no_xmm_pref] + rmmod(mm_reg, rm_arg_mm))
addop("psubw", [bs8(0x0f), bs8(0xf9), no_xmm_pref] + rmmod(mm_reg, rm_arg_mm))
addop("psubd", [bs8(0x0f), bs8(0xfa), no_xmm_pref] + rmmod(mm_reg, rm_arg_mm))
addop("psubq", [bs8(0x0f), bs8(0xfb), no_xmm_pref] + rmmod(mm_reg, rm_arg_mm))
### Arithmetic (floating-point)
###
## Additions
# SSE
addop("addps", [bs8(0x0f), bs8(0x58), no_xmm_pref] + rmmod(xmm_reg, rm_arg_xmm))
addop("addpd", [bs8(0x0f), bs8(0x58), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
## Substractions
# SSE
addop("subps", [bs8(0x0f), bs8(0x5c), no_xmm_pref] + rmmod(xmm_reg, rm_arg_xmm))
addop("subpd", [bs8(0x0f), bs8(0x5c), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
## Multiplications
# SSE
addop("mulps", [bs8(0x0f), bs8(0x59), no_xmm_pref] + rmmod(xmm_reg, rm_arg_xmm))
addop("mulpd", [bs8(0x0f), bs8(0x59), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
## Divisions
# SSE
addop("divps", [bs8(0x0f), bs8(0x5e), no_xmm_pref] + rmmod(xmm_reg, rm_arg_xmm))
addop("divpd", [bs8(0x0f), bs8(0x5e), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
### Logical (floating-point)
###
## XOR
addop("xorps", [bs8(0x0f), bs8(0x57), no_xmm_pref] + rmmod(xmm_reg, rm_arg_xmm))
addop("xorpd", [bs8(0x0f), bs8(0x57), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
## AND
addop("andps", [bs8(0x0f), bs8(0x54), no_xmm_pref] + rmmod(xmm_reg, rm_arg_xmm))
addop("andpd", [bs8(0x0f), bs8(0x54), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
addop("andnps", [bs8(0x0f), bs8(0x55), no_xmm_pref] + rmmod(xmm_reg, rm_arg_xmm))
addop("andnpd", [bs8(0x0f), bs8(0x55), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
## OR
addop("orps", [bs8(0x0f), bs8(0x56), no_xmm_pref] + rmmod(xmm_reg, rm_arg_xmm))
addop("orpd", [bs8(0x0f), bs8(0x56), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
## AND
# MMX
addop("pand", [bs8(0x0f), bs8(0xdb), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm), [mm_reg, rm_arg_mm])
# SSE
addop("pand", [bs8(0x0f), bs8(0xdb), pref_66] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
## ANDN
# MMX
addop("pandn", [bs8(0x0f), bs8(0xdf), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm), [mm_reg, rm_arg_mm])
# SSE
addop("pandn", [bs8(0x0f), bs8(0xdf), pref_66] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
## OR
# MMX
addop("por", [bs8(0x0f), bs8(0xeb), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm), [mm_reg, rm_arg_mm])
# SSE
addop("por", [bs8(0x0f), bs8(0xeb), pref_66] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
## XOR
# MMX
addop("pxor", [bs8(0x0f), bs8(0xef), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
# MMX
addop("pxor", [bs8(0x0f), bs8(0xef), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pshufb", [bs8(0x0f), bs8(0x38), bs8(0x00), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("pshufb", [bs8(0x0f), bs8(0x38), bs8(0x00), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pshufd", [bs8(0x0f), bs8(0x70), pref_66] +
rmmod(xmm_reg, rm_arg_xmm) + [u08])
### Convert
### SS = single precision
### SD = double precision
###
## SS -> SD
##
addop("cvtdq2pd", [bs8(0x0f), bs8(0xe6), pref_f3]
+ rmmod(xmm_reg, rm_arg_xmm_m64))
addop("cvtdq2ps", [bs8(0x0f), bs8(0x5b), no_xmm_pref]
+ rmmod(xmm_reg, rm_arg_xmm))
addop("cvtpd2dq", [bs8(0x0f), bs8(0xe6), pref_f2]
+ rmmod(xmm_reg, rm_arg_xmm))
addop("cvtpd2pi", [bs8(0x0f), bs8(0x2d), pref_66]
+ rmmod(mm_reg, rm_arg_xmm))
addop("cvtpd2ps", [bs8(0x0f), bs8(0x5a), pref_66]
+ rmmod(xmm_reg, rm_arg_xmm))
addop("cvtpi2pd", [bs8(0x0f), bs8(0x2a), pref_66]
+ rmmod(xmm_reg, rm_arg_mm_m64))
addop("cvtpi2ps", [bs8(0x0f), bs8(0x2a), no_xmm_pref]
+ rmmod(xmm_reg, rm_arg_mm_m64))
addop("cvtps2dq", [bs8(0x0f), bs8(0x5b), pref_66]
+ rmmod(xmm_reg, rm_arg_xmm))
addop("cvtps2pd", [bs8(0x0f), bs8(0x5a), no_xmm_pref]
+ rmmod(xmm_reg, rm_arg_xmm_m64))
addop("cvtps2pi", [bs8(0x0f), bs8(0x2d), no_xmm_pref]
+ rmmod(mm_reg, rm_arg_xmm_m64))
addop("cvtsd2si", [bs8(0x0f), bs8(0x2d), pref_f2]
+ rmmod(reg, rm_arg_xmm_m64))
addop("cvtsd2ss", [bs8(0x0f), bs8(0x5a), pref_f2]
+ rmmod(xmm_reg, rm_arg_xmm_m64))
addop("cvtsi2sd", [bs8(0x0f), bs8(0x2a), pref_f2]
+ rmmod(xmm_reg, rm_arg))
addop("cvtsi2ss", [bs8(0x0f), bs8(0x2a), xmmreg, pref_f3]
+ rmmod(xmm_reg, rm_arg))
addop("cvtss2sd", [bs8(0x0f), bs8(0x5a), pref_f3]
+ rmmod(xmm_reg, rm_arg_xmm_m32))
addop("cvtss2si", [bs8(0x0f), bs8(0x2d), pref_f3]
+ rmmod(rmreg, rm_arg_xmm_m32))
addop("cvttpd2pi",[bs8(0x0f), bs8(0x2c), pref_66]
+ rmmod(mm_reg, rm_arg_xmm))
addop("cvttpd2dq",[bs8(0x0f), bs8(0xe6), pref_66]
+ rmmod(xmm_reg, rm_arg_xmm))
addop("cvttps2dq",[bs8(0x0f), bs8(0x5b), pref_f3]
+ rmmod(xmm_reg, rm_arg_xmm))
addop("cvttps2pi",[bs8(0x0f), bs8(0x2c), no_xmm_pref]
+ rmmod(mm_reg, rm_arg_xmm_m64))
addop("cvttsd2si",[bs8(0x0f), bs8(0x2c), pref_f2]
+ rmmod(reg, rm_arg_xmm_m64))
addop("cvttss2si",[bs8(0x0f), bs8(0x2c), pref_f3]
+ rmmod(reg, rm_arg_xmm_m32))
addop("psrlq", [bs8(0x0f), bs8(0x73), no_xmm_pref] +
rmmod(d2, rm_arg_mm) + [u08], [rm_arg_mm, u08])
addop("psrlq", [bs8(0x0f), bs8(0x73), pref_66] +
rmmod(d2, rm_arg_xmm) + [u08], [rm_arg_xmm, u08])
addop("psrlq", [bs8(0x0f), bs8(0xd3), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm), [mm_reg, rm_arg_mm])
addop("psrlq", [bs8(0x0f), bs8(0xd3), pref_66] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
addop("psrld", [bs8(0x0f), bs8(0x72), no_xmm_pref] +
rmmod(d2, rm_arg_mm) + [u08], [rm_arg_mm, u08])
addop("psrld", [bs8(0x0f), bs8(0x72), pref_66] +
rmmod(d2, rm_arg_xmm) + [u08], [rm_arg_xmm, u08])
addop("psrld", [bs8(0x0f), bs8(0xd2), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm), [mm_reg, rm_arg_mm])
addop("psrld", [bs8(0x0f), bs8(0xd2), pref_66] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
addop("psrldq", [bs8(0x0f), bs8(0x73), pref_66] +
rmmod(d3, rm_arg_xmm) + [u08], [rm_arg_xmm, u08])
addop("psrlw", [bs8(0x0f), bs8(0x71), no_xmm_pref] +
rmmod(d2, rm_arg_mm) + [u08], [rm_arg_mm, u08])
addop("psrlw", [bs8(0x0f), bs8(0x71), pref_66] +
rmmod(d2, rm_arg_xmm) + [u08], [rm_arg_xmm, u08])
addop("psrlw", [bs8(0x0f), bs8(0xd1), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm), [mm_reg, rm_arg_mm])
addop("psrlw", [bs8(0x0f), bs8(0xd1), pref_66] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
addop("psllq", [bs8(0x0f), bs8(0x73), no_xmm_pref] +
rmmod(d6, rm_arg_mm) + [u08], [rm_arg_mm, u08])
addop("psllq", [bs8(0x0f), bs8(0x73), pref_66] +
rmmod(d6, rm_arg_xmm) + [u08], [rm_arg_xmm, u08])
addop("psllq", [bs8(0x0f), bs8(0xf3), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm), [mm_reg, rm_arg_mm])
addop("psllq", [bs8(0x0f), bs8(0xf3), pref_66] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
addop("pslld", [bs8(0x0f), bs8(0x72), no_xmm_pref] +
rmmod(d6, rm_arg_mm) + [u08], [rm_arg_mm, u08])
addop("pslld", [bs8(0x0f), bs8(0x72), pref_66] +
rmmod(d6, rm_arg_xmm) + [u08], [rm_arg_xmm, u08])
addop("pslld", [bs8(0x0f), bs8(0xf2), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm), [mm_reg, rm_arg_mm])
addop("pslld", [bs8(0x0f), bs8(0xf2), pref_66] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
addop("psllw", [bs8(0x0f), bs8(0x71), no_xmm_pref] +
rmmod(d6, rm_arg_mm) + [u08], [rm_arg_mm, u08])
addop("psllw", [bs8(0x0f), bs8(0x71), pref_66] +
rmmod(d6, rm_arg_xmm) + [u08], [rm_arg_xmm, u08])
addop("psllw", [bs8(0x0f), bs8(0xf1), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm), [mm_reg, rm_arg_mm])
addop("psllw", [bs8(0x0f), bs8(0xf1), pref_66] +
rmmod(xmm_reg, rm_arg_xmm), [xmm_reg, rm_arg_xmm])
addop("pslldq", [bs8(0x0f), bs8(0x73), pref_66] +
rmmod(d7, rm_arg_xmm) + [u08], [rm_arg_xmm, u08])
addop("pmaxub", [bs8(0x0f), bs8(0xde), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("pmaxub", [bs8(0x0f), bs8(0xde), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pmaxuw", [bs8(0x0f), bs8(0x38), bs8(0x3e), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pmaxud", [bs8(0x0f), bs8(0x38), bs8(0x3f), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pminub", [bs8(0x0f), bs8(0xda), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("pminub", [bs8(0x0f), bs8(0xda), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pminuw", [bs8(0x0f), bs8(0x38), bs8(0x3a), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pminud", [bs8(0x0f), bs8(0x38), bs8(0x3b), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pcmpeqb", [bs8(0x0f), bs8(0x74), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("pcmpeqb", [bs8(0x0f), bs8(0x74), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pcmpeqw", [bs8(0x0f), bs8(0x75), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("pcmpeqw", [bs8(0x0f), bs8(0x75), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pcmpeqd", [bs8(0x0f), bs8(0x76), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("pcmpeqd", [bs8(0x0f), bs8(0x76), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pcmpgtd", [bs8(0x0f), bs8(0x66), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("pcmpgtd", [bs8(0x0f), bs8(0x66), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("punpckhbw", [bs8(0x0f), bs8(0x68), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("punpckhbw", [bs8(0x0f), bs8(0x68), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("punpckhwd", [bs8(0x0f), bs8(0x69), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("punpckhwd", [bs8(0x0f), bs8(0x69), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("punpckhdq", [bs8(0x0f), bs8(0x6a), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("punpckhdq", [bs8(0x0f), bs8(0x6a), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("punpckhqdq", [bs8(0x0f), bs8(0x6d), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("punpcklbw", [bs8(0x0f), bs8(0x60), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("punpcklbw", [bs8(0x0f), bs8(0x60), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("punpcklwd", [bs8(0x0f), bs8(0x61), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("punpcklwd", [bs8(0x0f), bs8(0x61), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("punpckldq", [bs8(0x0f), bs8(0x62), no_xmm_pref] +
rmmod(mm_reg, rm_arg_mm))
addop("punpckldq", [bs8(0x0f), bs8(0x62), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("punpcklqdq", [bs8(0x0f), bs8(0x6c), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("unpckhps", [bs8(0x0f), bs8(0x15), no_xmm_pref] +
rmmod(xmm_reg, rm_arg_xmm))
addop("unpckhpd", [bs8(0x0f), bs8(0x15), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("unpcklps", [bs8(0x0f), bs8(0x14), no_xmm_pref] +
rmmod(xmm_reg, rm_arg_xmm))
addop("unpcklpd", [bs8(0x0f), bs8(0x14), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("pinsrb", [bs8(0x0f), bs8(0x3a), bs8(0x20), pref_66] +
rmmod(xmm_reg, rm_arg_reg_m08) + [u08])
addop("pinsrd", [bs8(0x0f), bs8(0x3a), bs8(0x22), pref_66, bs_opmode32] +
rmmod(xmm_reg, rm_arg) + [u08])
addop("pinsrq", [bs8(0x0f), bs8(0x3a), bs8(0x22), pref_66] +
rmmod(xmm_reg, rm_arg_m64) + [bs_opmode64] + [u08])
addop("pinsrw", [bs8(0x0f), bs8(0xc4), no_xmm_pref] +
rmmod(mm_reg, rm_arg_reg_m16) + [u08])
addop("pinsrw", [bs8(0x0f), bs8(0xc4), pref_66] +
rmmod(xmm_reg, rm_arg_reg_m16) + [u08])
addop("pextrb", [bs8(0x0f), bs8(0x3a), bs8(0x14), pref_66] +
rmmod(xmm_reg, rm_arg_reg_m08) + [u08], [rm_arg_reg_m08, xmm_reg, u08])
addop("pextrd", [bs8(0x0f), bs8(0x3a), bs8(0x16), pref_66, bs_opmode32] +
rmmod(xmm_reg, rm_arg) + [u08], [rm_arg, xmm_reg, u08])
addop("pextrq", [bs8(0x0f), bs8(0x3a), bs8(0x16), pref_66] +
rmmod(xmm_reg, rm_arg_m64) + [bs_opmode64] + [u08], [rm_arg_m64, xmm_reg, u08])
addop("pextrw", [bs8(0x0f), bs8(0x3a), bs8(0x15), pref_66] +
rmmod(xmm_reg, rm_arg_reg_m16) + [u08], [rm_arg_reg_m16, xmm_reg, u08])
addop("pextrw", [bs8(0x0f), bs8(0xc5), no_xmm_pref] +
rmmod(mm_reg, rm_arg_reg_m16) + [u08], [rm_arg_reg_m16, mm_reg, u08])
addop("pextrw", [bs8(0x0f), bs8(0xc5), pref_66] +
rmmod(xmm_reg, rm_arg_reg_m16) + [u08], [rm_arg_reg_m16, xmm_reg, u08])
addop("sqrtpd", [bs8(0x0f), bs8(0x51), pref_66] +
rmmod(xmm_reg, rm_arg_xmm))
addop("sqrtps", [bs8(0x0f), bs8(0x51), no_xmm_pref] +
rmmod(xmm_reg, rm_arg_xmm))
addop("sqrtsd", [bs8(0x0f), bs8(0x51), pref_f2] +
rmmod(xmm_reg, rm_arg_xmm_m64))
addop("sqrtss", [bs8(0x0f), bs8(0x51), pref_f3] +
rmmod(xmm_reg, rm_arg_xmm_m32))
addop("pmovmskb", [bs8(0x0f), bs8(0xd7), no_xmm_pref] +
rmmod(reg_modrm, rm_arg_mm_reg))
addop("pmovmskb", [bs8(0x0f), bs8(0xd7), pref_66] +
rmmod(reg_modrm, rm_arg_xmm_reg))
addop("shufps", [bs8(0x0f), bs8(0xc6), no_xmm_pref] +
rmmod(xmm_reg, rm_arg_xmm) + [u08])
addop("shufpd", [bs8(0x0f), bs8(0xc6), pref_66] +
rmmod(xmm_reg, rm_arg_xmm) + [u08])
mn_x86.bintree = factor_one_bit(mn_x86.bintree)
# mn_x86.bintree = factor_fields_all(mn_x86.bintree)
"""
mod reg r/m
XX XXX XXX
"""
def print_size(e):
print e, e.size
return e
|
stephengroat/miasm
|
miasm2/arch/x86/arch.py
|
Python
|
gpl-2.0
| 136,563
|
"""
Django settings for a project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
_ = lambda x: x
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4uem%gj=-or8o&zuk(+!!+-^!r$bhisk8cxuil1=s&ch(in2a9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'grappelli.dashboard',
'grappelli',
'django.contrib.admin',
'markitup',
'compressor',
'sorl.thumbnail',
'project',
'project.articles',
'project.feeds',
'project.flat',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'project.context_processors.ajax_template',
'project.articles.context_processors.category_list',
'project.feeds.context_processors.feed_list',
'project.flat.context_processors.flatpage_list',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]
LANGUAGE_CODE = 'eu'
LANGUAGES = [
('eu', 'Euskara'),
]
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/grappelli/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'project/assets')]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
# DEPENDENCIES
THUMBNAIL_EXTENSION = 'png'
JQUERY_URL = 'js/jquery.min.js'
MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True})
MARKITUP_SET = 'markitup/sets/markdown'
MARKITUP_SKIN = 'markitup/skins/markitup'
MARKITUP_AUTO_PREVIEW = True
OEMBED_DEFAULT_PARSE_HTML = False
COMPRESS_OUTPUT_DIR = 'cache'
COMPRESS_PRECOMPILERS = [
('text/x-sass', 'django_libsass.SassCompiler'),
]
GRAPPELLI_INDEX_DASHBOARD = 'project.dashboard.Dashboard'
GRAPPELLI_ADMIN_TITLE = 'bidasoamedia.info'
# PROJECT
CATEGORY_NUMBER = 5
|
GISAElkartea/bidasoamedia
|
project/settings.py
|
Python
|
agpl-3.0
| 4,791
|
import sys
sys.path.append("/home/oracc/www/cuneifyplus")
import cgi
import os
from collections import OrderedDict
from html import escape
from traceback import format_exc
#from urllib.parse import quote
from cuneify_interface import (
TransliterationNotUnderstood,
UnrecognisedSymbol,
cuneify_line,
ordered_symbol_to_transliterations,
)
from environment import MY_URL, get_cache, get_font_directory
# A mapping from font name to description
FONT_NAMES = OrderedDict(
[
("Santakku", "Cursive Old Babylonian"),
("CuneiformOB", "Monumental Old Babylonian"),
("SantakkuM", "Monumental Old Babylonian"),
("UllikummiA", "Hittite"),
("UllikummiB", "Hittite"),
("UllikummiC", "Hittite"),
("Assurbanipal", "Neo-Assyrian"),
("CuneiformNA", "Neo-Assyrian"),
]
)
FONTS_PATH_NAME = "/fonts"
def _get_input_form(initial="Enter transliteration here..."):
""" Return a form that the user can use to enter some transliterated text """
font_name_selection = "".join(
[
'<option value="{0}">{1} (font: {0})</option>'.format(name, description)
for name, description in FONT_NAMES.items()
]
)
body = """
<form action="{}/cuneify" method="post">
<textarea rows="10" cols="80" name="input"></textarea>
<br /> <br />
<input type="checkbox" name="show_transliteration">Show transliteration with output<br /><br />
<select name="font_name">{}</select>
<input type="submit" name="action" value="Cuneify">
<input type="submit" name="action" value="Create sign list">
</form>""".format(
MY_URL, font_name_selection
)
# TODO Use 'initial' when it can be made to disappear on entry into widget
return body
def _get_cuneify_body(environ, transliteration, show_transliteration, font_name):
""" Return the HTML body contents when we've been given a transliteration, and show in the specified font """
body = ""
with get_cache(environ) as cache:
for line in transliteration.split("\n"):
# Make empty lines appear as breaks in the output
line = line.strip()
if line == "":
body += "<br />"
continue
try:
body += '<span class="{}">{}</span><br />'.format(
font_name.lower(),
cuneify_line(cache, line, show_transliteration).replace(
"\n", "<br />"
),
)
# body += '{}<br />'.format(cuneify_line(cache, line, show_transliteration).replace('\n', '<br />'))
except UnrecognisedSymbol as exception:
body += '<font color="red">Unknown symbol "{}" in "{}"</font><br />'.format(
exception.transliteration, line
)
except TransliterationNotUnderstood:
body += '<font color="red">Possible formatting error in "{}"</font><br />'.format(
line
)
# TODO will need javascript to re-populate the text area, I believe
# body += '<br /><br /><a href="{}?input={}">Go back</a><br />'.format(MY_URL, quote(transliteration))
body += '<br /><br /><a href="{}">Go back</a><br />'.format(MY_URL)
# TODO this can probably be neatened up a little bit
return body
def _get_symbol_list_body(environ, transliteration, font_name):
""" Return the HTML body for the symbol list page """
body = ""
with get_cache(environ) as cache:
(
symbol_to_transliterations,
unrecognised_tokens,
) = ordered_symbol_to_transliterations(
cache, transliteration, return_unrecognised=True
)
for cuneiform_symbol, transliterations in symbol_to_transliterations.items():
line = '<span class="{}">{}</span>: {}<br />'.format(
font_name.lower(), cuneiform_symbol, ", ".join(transliterations)
)
body += line
if len(unrecognised_tokens) > 0:
# Print out unrecognised tokens if there are any
body += '<br /><font color="red">These tokens were unrecognised: {}</font><br />'.format(
", ".join(unrecognised_tokens)
)
# TODO will need javascript to re-populate the text area, I believe
# body += '<br /><br /><a href="{}?input={}">Go back</a><br />'.format(MY_URL, quote(transliteration))
body += '<br /><br /><a href="{}">Go back</a><br />'.format(MY_URL)
# TODO this can probably be neatened up a little bit
return body
def construct_font_response(environ, start_response, path_info):
""" Given a requested path, construct a response with the data from the requested font file """
font_directory = get_font_directory(environ)
font_path = os.path.normpath(path_info.replace(FONTS_PATH_NAME, font_directory))
if not font_path.startswith(font_directory):
raise RuntimeError(
"Requesting font {} that is not in fonts directory {}".format(
font_path, font_directory
)
)
# The response body is just what we get from reading the font.
# TODO we could cache this in memory if reading the font is slow
with open(font_path, "rb") as f:
response_body = f.read()
status = "200 OK"
if font_path.endswith(".woff"):
ctype = "application/x-font-woff"
elif font_path.endswith(".eot"):
ctype = "application/vnd.ms-fontobject"
elif font_path.endswith(".ttf"):
ctype = "application/x-font-ttf"
response_headers = [
("Content-Type", ctype),
("Content-Length", str(len(response_body))),
]
start_response(status, response_headers)
return [response_body]
def application(environ, start_response):
""" Entry point for the application """
# Use the appropriate behaviour here
path_info = environ["PATH_INFO"]
form = cgi.FieldStorage(
fp=environ["wsgi.input"], environ=environ, keep_blank_values=True
)
if path_info.startswith(FONTS_PATH_NAME):
# Return the static font file
return construct_font_response(environ, start_response, path_info)
elif path_info == "/cuneify":
# Whatever else happens, we always need a non-empty transliteration
transliteration = escape(form.getvalue("input"))
if transliteration is None or transliteration == "":
# There is no transliteration, so show the input form again
body = _get_input_form()
# Get the values of the other form inputs
show_transliteration_value = form.getvalue("show_transliteration")
show_transliteration = (
show_transliteration_value is not None
and show_transliteration_value.lower() == "on"
)
font_name = escape(form.getvalue("font_name"))
action_value = escape(form.getvalue("action"))
# The type of form submission we make determines what we do now
if action_value == "Cuneify":
# We do a transliteration and show the output
body = _get_cuneify_body(
environ, transliteration, show_transliteration, font_name
)
elif action_value == "Create sign list":
# Make a symbol list!
body = _get_symbol_list_body(environ, transliteration, font_name)
else:
raise RuntimeError("Unrecognised action value {}".format(action_value))
else:
body = _get_input_form()
# All the CSS representing font classes
font_info = "\n".join(
[
"""@font-face {{{{
font-family: {1};
src: url(fonts/{1}.woff) format('woff'),
url(fonts/{1}.eot) format('embedded-opentype'),
url(fonts/{1}.ttf) format('truetype');
}}}}
.{0} {{{{
font-family: {1};
}}}}""".format(
font_name.lower(), font_name
)
for font_name in FONT_NAMES
]
)
response_body = (
"""<!doctype html>
<html lang="en">
<head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<style>"""
+ font_info
+ """</style>
</head>
<body>
{}
<br />
<hr>
<br />
Using most browsers, the cuneiform should appear on your screen, as the fonts are embedded in the website.
However, if you wish to copy-and-paste (e.g. into a Word document), you may need to install the fonts in order for the
characters to display correctly. To install the fonts, follow the links below:
<br />
<br />
Santakku — <a href="http://www.hethport.uni-wuerzburg.de/cuneifont/">http://www.hethport.uni-wuerzburg.de/cuneifont/</a> (click on "Old Babylonian Fonts") <br />
CuneiformOB - <a href="http://oracc.museum.upenn.edu/doc/help/visitingoracc/fonts/index.html">http://oracc.museum.upenn.edu/doc/help/visitingoracc/fonts/index.html</a> <br />
SantakkuM — <a href="http://www.hethport.uni-wuerzburg.de/cuneifont/">http://www.hethport.uni-wuerzburg.de/cuneifont/</a> (click on "Old Babylonian Fonts") <br />
UllikummiA — <a href="http://www.hethport.uni-wuerzburg.de/cuneifont/">http://www.hethport.uni-wuerzburg.de/cuneifont/</a> (click on "Hittite Fonts") <br />
UllikummiB — <a href="http://www.hethport.uni-wuerzburg.de/cuneifont/">http://www.hethport.uni-wuerzburg.de/cuneifont/</a> (click on "Hittite Fonts") <br />
UllikummiC — <a href="http://www.hethport.uni-wuerzburg.de/cuneifont/">http://www.hethport.uni-wuerzburg.de/cuneifont/</a> (click on "Hittite Fonts") <br />
Assurbanipal — <a href="http://www.hethport.uni-wuerzburg.de/cuneifont/">http://www.hethport.uni-wuerzburg.de/cuneifont/</a> (click on "Neo-Assyrian Font") <br />
CuneformNA — <a href="http://oracc.museum.upenn.edu/doc/help/visitingoracc/fonts/index.html">http://oracc.museum.upenn.edu/doc/help/visitingoracc/fonts/index.html</a> <br />
<br />
<br />
Powered by <a href="http://oracc.museum.upenn.edu/saao/knpp/cuneiformrevealed/cuneify/">Cuneify</a>,
by Steve Tinney. Created by Tom Gillam, 2016.
</body></html>"""
)
response_body = response_body.format(body)
response_body = response_body.encode("utf-8")
status = "200 OK"
# ctype = 'text/plain'
ctype = "text/html"
response_headers = [
("Content-Type", ctype),
("Content-Length", str(len(response_body))),
]
start_response(status, response_headers)
return [response_body]
# Below for testing only
#
if __name__ == "__main__":
from wsgiref.simple_server import make_server
httpd = make_server("localhost", 8051, application)
# Wait for a single request, serve it and quit.
httpd.handle_request()
|
oracc/oracc
|
misc/cuneifyplus/wsgi.py
|
Python
|
gpl-2.0
| 10,719
|
# Copyright (C) 2009, 2010 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read back in a dump file and process it"""
import gzip
import os
import sys
import tempfile
from meliae import (
_loader,
loader,
scanner,
tests,
warn,
)
# A simple dump, with a couple of cross references, etc.
# a@5 = 1
# b@4 = 2
# c@6 = 'a str'
# t@7 = (a, b)
# d@2 = {a:b, c:t}
# l@3 = [a, b]
# l.append(l)
# outer@1 = (d, l)
_example_dump = [
'{"address": 1, "type": "tuple", "size": 20, "len": 2, "refs": [2, 3]}',
'{"address": 3, "type": "list", "size": 44, "len": 3, "refs": [3, 4, 5]}',
'{"address": 5, "type": "int", "size": 12, "value": 1, "refs": []}',
'{"address": 4, "type": "int", "size": 12, "value": 2, "refs": []}',
'{"address": 2, "type": "dict", "size": 124, "len": 2, "refs": [4, 5, 6, 7]}',
'{"address": 7, "type": "tuple", "size": 20, "len": 2, "refs": [4, 5]}',
'{"address": 6, "type": "str", "size": 29, "len": 5, "value": "a str"'
', "refs": []}',
'{"address": 8, "type": "module", "size": 60, "name": "mymod", "refs": [2]}',
]
# Note that this doesn't have a complete copy of the references. Namely when
# you subclass object you get a lot of references, and type instances also
# reference other stuff that tends to chain to stuff like 'sys', which ends up
# referencing everything.
_instance_dump = [
'{"address": 1, "type": "MyClass", "size": 32, "refs": [2, 3]}',
'{"address": 3, "type": "type", "size": 452, "name": "MyClass", "refs": []}',
'{"address": 2, "type": "dict", "size": 140, "len": 4'
', "refs": [4, 5, 6, 7, 9, 10, 11, 12]}',
'{"address": 4, "type": "str", "size": 25, "len": 1, "value": "a", "refs": []}',
'{"address": 5, "type": "int", "size": 12, "value": 1, "refs": []}',
'{"address": 6, "type": "str", "size": 25, "len": 1, "value": "c", "refs": []}',
'{"address": 7, "type": "dict", "size": 140, "len": 1, "refs": [8, 6]}',
'{"address": 8, "type": "str", "size": 25, "len": 1, "value": "s", "refs": []}',
'{"address": 9, "type": "str", "size": 25, "len": 1, "value": "b", "refs": []}',
'{"address": 10, "type": "str", "size": 30, "len": 6'
', "value": "string", "refs": []}',
'{"address": 11, "type": "str", "size": 25, "len": 1, "value": "d", "refs": []}',
'{"address": 12, "type": "tuple", "size": 32, "len": 1, "refs": [13]}',
'{"address": 13, "type": "int", "size": 12, "value": 2, "refs": []}',
'{"address": 14, "type": "module", "size": 28, "name": "sys", "refs": [15]}',
'{"address": 15, "type": "dict", "size": 140, "len": 2, "refs": [5, 6, 9, 6]}',
]
_old_instance_dump = [
'{"address": 1, "type": "instance", "size": 36, "refs": [2, 3]}',
'{"address": 3, "type": "dict", "size": 140, "len": 2, "refs": [4, 5, 6, 7]}',
'{"address": 7, "type": "int", "size": 12, "value": 2, "refs": []}',
'{"address": 6, "type": "str", "size": 25, "len": 1, "value": "b", "refs": []}',
'{"address": 5, "type": "int", "size": 12, "value": 1, "refs": []}',
'{"address": 4, "type": "str", "size": 25, "len": 1, "value": "a", "refs": []}',
'{"address": 2, "type": "classobj", "size": 48, "name": "OldStyle"'
', "refs": [8, 43839680, 9]}',
'{"address": 9, "type": "str", "size": 32, "len": 8, "value": "OldStyle"'
', "refs": []}',
'{"address": 8, "type": "tuple", "size": 28, "len": 0, "refs": []}',
]
_intern_dict_dump = [
'{"address": 2, "type": "str", "size": 25, "len": 1, "value": "a", "refs": []}',
'{"address": 3, "type": "str", "size": 25, "len": 1, "value": "b", "refs": []}',
'{"address": 4, "type": "str", "size": 25, "len": 1, "value": "c", "refs": []}',
'{"address": 5, "type": "str", "size": 25, "len": 1, "value": "d", "refs": []}',
'{"address": 6, "type": "dict", "size": 512, "refs": [2, 5, 5, 5, 4, 4, 3, 3]}',
'{"address": 7, "type": "dict", "size": 512, "refs": [6, 6, 5, 5, 4, 4, 3, 3]}',
'{"address": 8, "type": "dict", "size": 512, "refs": [2, 2, 5, 5, 4, 4, 3, 3]}',
]
class TestLoad(tests.TestCase):
def test_load_smoketest(self):
test_dict = {1:2, None:'a string'}
t = tempfile.TemporaryFile(prefix='meliae-')
# On some platforms TemporaryFile returns a wrapper object with 'file'
# being the real object, on others, the returned object *is* the real
# file object
t_file = getattr(t, 'file', t)
scanner.dump_all_referenced(t_file, test_dict)
t_file.seek(0)
manager = loader.load(t_file, show_prog=False)
test_dict_id = id(test_dict)
self.assertTrue(test_dict_id in manager.objs,
'%s not found in %s' % (test_dict_id, manager.objs.keys()))
def test_load_one(self):
objs = loader.load([
'{"address": 1234, "type": "int", "size": 12, "value": 10'
', "refs": []}'], show_prog=False).objs
keys = objs.keys()
self.assertEqual([1234], keys)
obj = objs[1234]
self.assertTrue(isinstance(obj, _loader._MemObjectProxy))
# The address should be exactly the same python object as the key in
# the objs dictionary.
self.assertTrue(keys[0] is obj.address)
def test_load_without_simplejson(self):
objs = loader.load([
'{"address": 1234, "type": "int", "size": 12, "value": 10'
', "refs": []}',
'{"address": 2345, "type": "module", "size": 60, "name": "mymod"'
', "refs": [1234]}',
'{"address": 4567, "type": "str", "size": 150, "len": 126'
', "value": "Test \\\'whoami\\\'\\u000a\\"Your name\\""'
', "refs": []}'
], using_json=False, show_prog=False).objs
keys = sorted(objs.keys())
self.assertEqual([1234, 2345, 4567], keys)
obj = objs[1234]
self.assertTrue(isinstance(obj, _loader._MemObjectProxy))
# The address should be exactly the same python object as the key in
# the objs dictionary.
self.assertTrue(keys[0] is obj.address)
self.assertEqual(10, obj.value)
obj = objs[2345]
self.assertEqual("module", obj.type_str)
self.assertEqual("mymod", obj.value)
obj = objs[4567]
self.assertEqual("Test \\'whoami\\'\\u000a\\\"Your name\\\"", obj.value)
def test_load_example(self):
objs = loader.load(_example_dump, show_prog=False)
def test_load_defaults_to_computing_and_collapsing(self):
manager = loader.load(_instance_dump, show_prog=False, collapse=False)
instance_obj = manager[1]
self.assertEqual([2, 3], instance_obj.children)
manager = loader.load(_instance_dump, show_prog=False)
instance_obj = manager[1]
self.assertEqual([4, 5, 6, 7, 9, 10, 11, 12, 3], instance_obj.children)
def test_load_compressed(self):
# unfortunately NamedTemporaryFile's cannot be re-opened on Windows
fd, name = tempfile.mkstemp(prefix='meliae-')
f = os.fdopen(fd, 'wb')
try:
content = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=f)
for line in _example_dump:
content.write(line + '\n')
content.flush()
content.close()
del content
f.close()
objs = loader.load(name, show_prog=False).objs
objs[1]
finally:
f.close()
os.remove(name)
def test_get_all(self):
om = loader.load(_example_dump, show_prog=False)
the_ints = om.get_all('int')
self.assertEqual(2, len(the_ints))
self.assertEqual([4, 5], sorted([i.address for i in the_ints]))
def test_one(self):
om = loader.load(_example_dump, show_prog=False)
an_int = om[5]
self.assertEqual(5, an_int.address)
self.assertEqual('int', an_int.type_str)
class TestRemoveExpensiveReferences(tests.TestCase):
def test_remove_expensive_references(self):
lines = list(_example_dump)
lines.pop(-1) # Remove the old module
lines.append('{"address": 8, "type": "module", "size": 12'
', "name": "mymod", "refs": [9]}')
lines.append('{"address": 9, "type": "dict", "size": 124'
', "refs": [10, 11]}')
lines.append('{"address": 10, "type": "module", "size": 12'
', "name": "mod2", "refs": [12]}')
lines.append('{"address": 11, "type": "str", "size": 27'
', "value": "boo", "refs": []}')
lines.append('{"address": 12, "type": "dict", "size": 124'
', "refs": []}')
source = lambda:loader.iter_objs(lines)
mymod_dict = list(source())[8]
self.assertEqual([10, 11], mymod_dict.children)
result = list(loader.remove_expensive_references(source))
null_obj = result[0][1]
self.assertEqual(0, null_obj.address)
self.assertEqual('<ex-reference>', null_obj.type_str)
self.assertEqual([11, 0], result[9][1].children)
class TestMemObj(tests.TestCase):
def test_to_json(self):
manager = loader.load(_example_dump, show_prog=False, collapse=False)
objs = manager.objs.values()
objs.sort(key=lambda x:x.address)
expected = [
'{"address": 1, "type": "tuple", "size": 20, "refs": [2, 3]}',
'{"address": 2, "type": "dict", "size": 124, "refs": [4, 5, 6, 7]}',
'{"address": 3, "type": "list", "size": 44, "refs": [3, 4, 5]}',
'{"address": 4, "type": "int", "size": 12, "value": 2, "refs": []}',
'{"address": 5, "type": "int", "size": 12, "value": 1, "refs": []}',
'{"address": 6, "type": "str", "size": 29, "value": "a str", "refs": []}',
'{"address": 7, "type": "tuple", "size": 20, "refs": [4, 5]}',
'{"address": 8, "type": "module", "size": 60, "value": "mymod", "refs": [2]}',
]
self.assertEqual(expected, [obj.to_json() for obj in objs])
class TestObjManager(tests.TestCase):
def test_compute_parents(self):
manager = loader.load(_example_dump, show_prog=False)
objs = manager.objs
self.assertEqual((), objs[1].parents)
self.assertEqual([1, 3], objs[3].parents)
self.assertEqual([3, 7, 8], sorted(objs[4].parents))
self.assertEqual([3, 7, 8], sorted(objs[5].parents))
self.assertEqual([8], objs[6].parents)
self.assertEqual([8], objs[7].parents)
self.assertEqual((), objs[8].parents)
def test_compute_referrers(self):
# Deprecated
logged = []
def log_warn(msg, klass, stacklevel=None):
logged.append((msg, klass, stacklevel))
old_func = warn.trap_warnings(log_warn)
try:
manager = loader.load(_example_dump, show_prog=False)
manager.compute_referrers()
self.assertEqual([('.compute_referrers is deprecated.'
' Use .compute_parents instead.',
DeprecationWarning, 3),
], logged)
objs = manager.objs
finally:
warn.trap_warnings(old_func)
self.assertEqual((), objs[1].parents)
self.assertEqual([1, 3], objs[3].parents)
self.assertEqual([3, 7, 8], sorted(objs[4].parents))
self.assertEqual([3, 7, 8], sorted(objs[5].parents))
self.assertEqual([8], objs[6].parents)
self.assertEqual([8], objs[7].parents)
self.assertEqual((), objs[8].parents)
def test_compute_parents_ignore_repeated(self):
manager = loader.load(_intern_dict_dump, show_prog=False)
str_5 = manager[5]
# Each of these refers to str_5 multiple times, but they should only
# show up 1 time in the parent list.
self.assertEqual([6, 7, 8], sorted(str_5.parents))
def test_compute_parents_no_parents(self):
manager = loader.load(_intern_dict_dump, show_prog=False, max_parents=0)
str_5 = manager[5]
# Each of these refers to str_5 multiple times, but they should only
# show up 1 time in the parent list.
self.assertEqual([], sorted(str_5.parents))
def test_compute_parents_many_parents(self):
content = [
'{"address": 2, "type": "str", "size": 25, "len": 1, "value": "a", "refs": []}',
]
for x in xrange(200):
content.append('{"address": %d, "type": "tuple", "size": 20,'
' "len": 2, "refs": [2, 2]}' % (x+100))
# By default, we only track 100 parents
manager = loader.load(content, show_prog=False)
self.assertEqual(100, manager[2].num_parents)
manager = loader.load(content, show_prog=False, max_parents=0)
self.assertEqual(0, manager[2].num_parents)
manager = loader.load(content, show_prog=False, max_parents=-1)
self.assertEqual(200, manager[2].num_parents)
manager = loader.load(content, show_prog=False, max_parents=10)
self.assertEqual(10, manager[2].num_parents)
def test_compute_total_size(self):
manager = loader.load(_example_dump, show_prog=False)
objs = manager.objs
manager.compute_total_size(objs[8])
self.assertEqual(257, objs[8].total_size)
def test_compute_total_size_missing_ref(self):
lines = list(_example_dump)
# 999 isn't in the dump, not sure how we get these in real life, but
# they exist. we should live with references that can't be resolved.
lines[-1] = ('{"address": 8, "type": "tuple", "size": 16, "len": 1'
', "refs": [999]}')
manager = loader.load(lines, show_prog=False)
obj = manager[8]
manager.compute_total_size(obj)
self.assertEqual(16, obj.total_size)
def test_remove_expensive_references(self):
lines = list(_example_dump)
lines.pop(-1) # Remove the old module
lines.append('{"address": 8, "type": "module", "size": 12'
', "name": "mymod", "refs": [9]}')
lines.append('{"address": 9, "type": "dict", "size": 124'
', "refs": [10, 11]}')
lines.append('{"address": 10, "type": "module", "size": 12'
', "name": "mod2", "refs": [12]}')
lines.append('{"address": 11, "type": "str", "size": 27'
', "value": "boo", "refs": []}')
lines.append('{"address": 12, "type": "dict", "size": 124'
', "refs": []}')
manager = loader.load(lines, show_prog=False, collapse=False)
mymod_dict = manager.objs[9]
self.assertEqual([10, 11], mymod_dict.children)
manager.remove_expensive_references()
self.assertTrue(0 in manager.objs)
null_obj = manager.objs[0]
self.assertEqual(0, null_obj.address)
self.assertEqual('<ex-reference>', null_obj.type_str)
self.assertEqual([11, 0], mymod_dict.children)
def test_collapse_instance_dicts(self):
manager = loader.load(_instance_dump, show_prog=False, collapse=False)
# This should collapse all of the references from the instance's dict
# @2 into the instance @1
instance = manager.objs[1]
self.assertEqual(32, instance.size)
self.assertEqual([2, 3], instance.children)
inst_dict = manager.objs[2]
self.assertEqual(140, inst_dict.size)
self.assertEqual([4, 5, 6, 7, 9, 10, 11, 12], inst_dict.children)
mod = manager.objs[14]
self.assertEqual([15], mod.children)
mod_dict = manager.objs[15]
self.assertEqual([5, 6, 9, 6], mod_dict.children)
manager.compute_parents()
tpl = manager.objs[12]
self.assertEqual([2], tpl.parents)
self.assertEqual([1], inst_dict.parents)
self.assertEqual([14], mod_dict.parents)
manager.collapse_instance_dicts()
# The instance dict has been removed
self.assertEqual([4, 5, 6, 7, 9, 10, 11, 12, 3], instance.children)
self.assertEqual(172, instance.size)
self.assertFalse(2 in manager.objs)
self.assertEqual([1], tpl.parents)
self.assertEqual([5, 6, 9, 6], mod.children)
self.assertFalse(15 in manager.objs)
def test_collapse_old_instance_dicts(self):
manager = loader.load(_old_instance_dump, show_prog=False,
collapse=False)
instance = manager.objs[1]
self.assertEqual('instance', instance.type_str)
self.assertEqual(36, instance.size)
self.assertEqual([2, 3], instance.children)
inst_dict = manager[3]
self.assertEqual(140, inst_dict.size)
self.assertEqual([4, 5, 6, 7], inst_dict.children)
manager.compute_parents()
manager.collapse_instance_dicts()
# The instance dict has been removed, and its references moved into the
# instance, further, the type has been updated from generic 'instance'
# to being 'OldStyle'.
self.assertFalse(3 in manager.objs)
self.assertEqual(176, instance.size)
self.assertEqual([4, 5, 6, 7, 2], instance.children)
self.assertEqual('OldStyle', instance.type_str)
def test_expand_refs_as_dict(self):
# TODO: This test fails if simplejson is not installed, because the
# regex extractor does not cast to integers (they stay as
# strings). We could fix the test, or fix the extractor.
manager = loader.load(_instance_dump, show_prog=False, collapse=False)
as_dict = manager.refs_as_dict(manager[15])
self.assertEqual({1: 'c', 'b': 'c'}, as_dict)
manager.compute_parents()
manager.collapse_instance_dicts()
self.assertEqual({1: 'c', 'b': 'c'}, manager.refs_as_dict(manager[14]))
self.assertEqual({'a': 1, 'c': manager[7], 'b': 'string',
'd': manager[12]}, manager.refs_as_dict(manager[1]))
def test_expand_refs_as_list(self):
# TODO: This test fails if simplejson is not installed, because the
# regex extractor does not cast to integers (they stay as
# strings). We could fix the test, or fix the extractor.
manager = loader.load(_instance_dump, show_prog=False)
self.assertEqual([2], manager.refs_as_list(manager[12]))
def test_guess_intern_dict(self):
manager = loader.load(_intern_dict_dump, show_prog=False)
obj = manager.guess_intern_dict()
self.assertEqual(8, obj.address)
def test_summarize_refs(self):
manager = loader.load(_example_dump, show_prog=False)
summary = manager.summarize(manager[8])
# Note that the module is included in the summary
self.assertEqual(['int', 'module', 'str', 'tuple'],
sorted(summary.type_summaries.keys()))
self.assertEqual(257, summary.total_size)
def test_summarize_excluding(self):
manager = loader.load(_example_dump, show_prog=False)
summary = manager.summarize(manager[8], excluding=[4, 5])
# No ints when they are explicitly filtered
self.assertEqual(['module', 'str', 'tuple'],
sorted(summary.type_summaries.keys()))
self.assertEqual(233, summary.total_size)
|
isaacl/meliae
|
meliae/tests/test_loader.py
|
Python
|
gpl-3.0
| 19,580
|
from tests.package.test_perl import TestPerlBase
class TestPerlDBDmysql(TestPerlBase):
"""
package:
DBD-mysql XS
direct dependencies:
DBI XS
"""
config = TestPerlBase.config + \
"""
BR2_PACKAGE_PERL=y
BR2_PACKAGE_PERL_DBD_MYSQL=y
"""
def test_run(self):
self.login()
self.module_test("DBI")
self.module_test("DBD::mysql")
|
masahir0y/buildroot-yamada
|
support/testing/tests/package/test_perl_dbd_mysql.py
|
Python
|
gpl-2.0
| 426
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, sys
import imp
def opusRun(progressCB,logCB,params):
f, filename, description = imp.find_module('arcgisscripting', ['c:/Python25/Lib/site-packages'])
arcgisscripting = imp.load_module('arcgisscripting', f, filename, description)
gp = arcgisscripting.create()
my_dict = {}
for key, val in params.iteritems():
my_dict[str(key)] = str(val)
input = my_dict['in_features']
output = my_dict['out_features']
logCB("Executing feature to point\n")
gp.FeatureToPoint_management (input, output)
logCB("Finised feature to point\n")
def opusHelp():
help = 'This is a very basic buffer tool using the ESRI geoprocessing framework.\n' \
'\n' \
'The input and output parameters will accept paths to shapefiles (c:\\test.shp), ' \
'personal or file geodatabase feature classes (c:\\test.gdb\\test_fc or c:\\test.mdb\\test_fc), ' \
'or SDE feature classes (Database Connections\\Your Database Connection.sde\\your feature class).\n' \
'\n' \
'input_shapefile: path to the input shapefile or feature class\n' \
'output_shapefile: path to the output shapefile or feature class\n' \
'buffer_size: buffer size in the units that the shapefile or feature class is in\n'
return help
|
apdjustino/DRCOG_Urbansim
|
src/opus_gui/data_manager/run/tools/esri_feature_to_point.py
|
Python
|
agpl-3.0
| 1,487
|
from django import forms
from django.core.files import File
from aur.models import *
import aur.Package as PKGBUILD
import os
import sys
from registration.backends.default import DefaultBackend
from django.contrib.auth.models import Group
class PackageSearchForm(forms.Form):
# Borrowed from AUR2-BR
def __init__(self, *args, **kwargs):
super(PackageSearchForm, self).__init__(*args, **kwargs)
repository_choices = [('all', 'All')]
repository_choices += [(repository.name.lower(), repository.name)
for repository in Repository.objects.all()]
self.fields['repository'].choices = repository_choices
repository = forms.ChoiceField(initial='all', choices=(), required=False)
query = forms.CharField(max_length=30, label="Keywords", required=False)
searchby = forms.ChoiceField(
initial='name',
required=False,
label="Search By",choices=(
('name', 'Package Name'),
('maintainer', 'Maintainer'),
)
)
lastupdate = forms.DateTimeField(label="Last Update", required=False)
limit = forms.ChoiceField(initial='25', required=False, choices=(
(25, 25),
(50, 50),
(75, 75),
(100, 100),
(150, 150),
))
def get_or_default(self, key):
if not self.is_bound:
return self.fields[key].initial
return self.cleaned_data.get(key) or self.fields[key].initial
def search(self):
if self.is_bound and not self.is_valid():
return None
repository = self.get_or_default('repository')
lastupdate = self.get_or_default('lastupdate')
query = self.get_or_default('query')
# Find the packages by searching the description and package name, or
# maintainer
if query:
if self.get_or_default('searchby') == 'maintainer':
results = Package.objects.filter(maintainers__username__icontains=query)
else:
results = Package.objects.filter(name__icontains=query)
results |= Package.objects.filter(description__icontains=query)
# Split query to search for each word as a tag
for keyword in query.split():
results |= Package.objects.filter(tags__exact=keyword)
else:
results = Package.objects.all()
# Restrict results
if repository != 'all':
results = results.filter(repository__name__iexact=repository)
if lastupdate:
results = results.filter(updated__gte=lastupdate)
return results
class PackageField(forms.FileField):
widget = forms.widgets.FileInput
def __init__(self, *args, **kwargs):
super(PackageField, self).__init__(*args, **kwargs)
def clean(self, data, initial=None):
import tempfile
import tarfile
file = super(PackageField, self).clean(data, initial)
errors = list()
# Save the uploaded file to disk
directory = tempfile.mkdtemp()
filename = os.path.join(directory, file.name)
fp = open(filename, "wb")
for chunk in file.chunks():
fp.write(chunk)
fp.close()
# Try to parse the PKGBUILD
try:
pkg = PKGBUILD.Package(filename)
except:
raise forms.ValidationError(sys.exc_info()[1])
# Add path of the tarball so we can reference in other places
pkg['filename'] = filename
# Validate PKGBUILD
pkg.validate()
if not pkg.is_valid() or pkg.has_warnings():
errors.extend(pkg.get_errors())
errors.extend(pkg.get_warnings())
# Check if we have everything we need
for arch in pkg['arch']:
try:
Architecture.objects.get(name=arch)
except Architecture.DoesNotExist:
errors.append('architecture %s does not exist' % arch)
if pkg['install']:
try:
tar = tarfile.open(filename)
except tarfile.ReadError:
errors.append('install files are missing')
else:
files = tar.getnames()
for file in pkg['install']:
filepath = os.path.join(pkg['name'], file)
if not filepath in files:
errors.append('install file "%s" is missing' % file)
del files
# Report errors or return the validated package
if errors:
raise forms.ValidationError(errors)
else:
return pkg
class PackageSubmitForm(forms.Form):
repository = forms.ChoiceField(choices=())
package = PackageField(label="PKGBUILD")
# Borrowed from AUR2-BR
def __init__(self, *args, **kwargs):
super(PackageSubmitForm, self).__init__(*args, **kwargs)
repo_choices = [(repo.name.lower(), repo.name) for repo in Repository.objects.all()]
self.fields['repository'].choices = repo_choices
@transaction.commit_manually
def save(self, user):
import hashlib
import tarfile
pkg = self.cleaned_data['package']
tmpdir = os.path.dirname(pkg['filename'])
updating = False
creating = False
try:
package = Package.objects.get(name=pkg['name'])
except Package.DoesNotExist:
package = Package(name=pkg['name'])
creating = True
else:
updating = True
package.version = pkg['version']
package.release = pkg['release']
package.description = pkg['description']
package.url = pkg['url']
package.repository=Repository.objects.get(name__iexact=self.cleaned_data['repository'])
# Save the package so we can reference it
package.save()
if creating:
package.maintainers.add(user)
else:
# TODO: Check if user can upload/overwrite the package
pass
# Check for, and add dependencies
for dependency in pkg['depends']:
# This would be nice, but we don't have access to the official
# repositories
try:
dep = Package.objects.get(name=dependency)
except Package.DoesNotExist:
# Fail silently
pass
else:
package.depends.add(dep)
# Add provides
for provision in pkg['provides']:
object, created = Provision.objects.get_or_create(name=provision)
package.provides.add(object)
# Add licenses
for license in pkg['licenses']:
object, created = License.objects.get_or_create(name=license)
package.licenses.add(object)
# Add architectures
for arch in pkg['arch']:
object = Architecture.objects.get(name=arch)
package.architectures.add(object)
tar = tarfile.open(pkg['filename'], "r")
tmpdir_sources = os.path.join(tmpdir, 'sources')
tar.extractall(tmpdir_sources)
pkgbuild = os.path.join(tmpdir_sources, pkg['name'], 'PKGBUILD')
# Remove all sources. It's easier and cleaner this way.
if updating:
PackageFile.objects.filter(package=pkg['name']).delete()
package.tarball.delete()
# Hash and save PKGBUILD
fp = File(open(pkgbuild, "r"))
source = PackageFile(package=package)
source.filename.save('%(name)s/sources/PKGBUILD', fp)
source.save()
fp.seek(0)
md5hash = hashlib.md5(''.join(fp.readlines()))
hash = PackageHash(hash=md5hash.hexdigest(), file=source, type='md5')
hash.save()
fp.close()
# Save tarball
# TODO: Tar the saved sources instead of using the uploaded one, for
# security
fp = File(open(pkg['filename'], "rb"))
package.tarball.save(os.path.join('%(name)s', os.path.basename(pkg['filename'])), fp)
fp.close()
# Save source files
for index in range(len(pkg['source'])):
source_filename = pkg['source'][index]
source = PackageFile(package=package)
# If it's a local file, save to disk, otherwise record as url
source_file = os.path.join(tmpdir_sources, package.name, source_filename)
if os.path.exists(source_file):
fp = File(open(source_file, "r"))
source.filename.save('%(name)s/sources/' + source_filename, fp)
fp.close()
else:
# TODO: Check that it _is_ a url, otherwise report an error
# that files are missing
source.url = source_filename
source.save()
# Check for, and save, any hashes this file may have
for hash_type in ('md5', 'sha1', 'sha256', 'sha384', 'sha512'):
if pkg[hash_type + 'sums']:
PackageHash(hash=pkg[hash_type + 'sums'][index],
file=source, type=hash_type).save()
# Save install files
for file in pkg['install']:
source = PackageFile(package=package)
source_path = os.path.join(tmpdir_sources, pkg['name'], file)
fp = File(open(source_path, "r"))
source.filename.save('%(name)s/install/' + file, fp)
fp.close()
source.save()
transaction.commit()
# Remove temporary files
for root, dirs, files in os.walk(tmpdir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(tmpdir)
class AddGroupDefaultBackend(DefaultBackend):
def register(self, *args, **kwargs):
new_user = super(AddGroupDefaultBackend, self).register(*args, **kwargs)
new_user.groups.add(Group.objects.get(name = 'User'))
return new_user
|
BackupTheBerlios/aur2
|
archlinux/aur/forms.py
|
Python
|
gpl-2.0
| 10,012
|
## @package tags
# Module caffe2.python.layers.tags
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from caffe2.python import context
@context.define_context(allow_default=True)
class TagContext(object):
"""
Scope driven way to provide tags to the layers.
"""
def __init__(self, tags=None):
# Tags is expected to be list to keep order of adding/removing things
self.tags = tags or []
def add_tags(self, tags):
self.tags.extend(tags)
def remove_tags(self, tags):
assert self.tags[-len(tags):] == tags
self.tags = self.tags[:-len(tags)]
class Tags(object):
# TODO(amalevich): Tags might need to live in their own contexts, add this
# split later
EXCLUDE_FROM_TRAIN = 'exclude_from_train'
EXCLUDE_FROM_EVAL = 'exclude_from_eval'
EXCLUDE_FROM_PREDICTION = 'exclude_from_prediction'
EXCLUDE_FROM_ACCUMULATE_PRED = 'exclude_from_accumulate_pred'
PREPROCESSING = 'preprocessing'
HANDLE_AS_SPARSE_LAYER = 'handle_as_sparse_layer'
GRADIENT_FROM_PS = 'gradient_from_ps'
PREFER_GPU = 'prefer_gpu'
CPU_ONLY = 'cpu_only'
# The following three tags are hints to **distributed training framework**.
"""
Indicates a layer contains a sparse shardable parameter. The parameter
should be sharded nd operators on those parameters should be done on
distributed parameter servers.
"""
SPARSE_SHARDED = 'sparse_sharded'
"""
Indicates a layer contains a sparse parameters among others, and that the
parameters should not be sharded (i.e. should be placed together on a node).
"""
SPARSE_DONT_SHARD = 'sparse_dont_shard'
"""
Used to manually indicate a component for an operator. Parameters for
all operators with the same component should be colocated on the same
parameter server.
"""
COMPONENT = 'component:'
"""
Valid tag prefixes for distributed training framework.
"""
DT_TAGS = (SPARSE_SHARDED, SPARSE_DONT_SHARD, COMPONENT)
# In certain cases we want to have different schema for training and
# prediction, as an example in prediction we might need to have only
# subset of ids present in the orignal schema. This tag is one of the ways
# to mark operators that will be removed from prediction and should
# override schema for predictors.
PREDICTION_SCHEMA = 'prediction_schema'
def __init__(self, tags):
if not isinstance(tags, list):
tags = [tags]
self.tags = tags
def __enter__(self):
TagContext.current().add_tags(self.tags)
return self
def __exit__(self, type, value, traceback):
TagContext.current().remove_tags(self.tags)
def __call__(self, func):
@six.wraps(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
Tags.TRAIN_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
Tags.EVAL_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_TRAIN,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
Tags.PREDICTION_ONLY = [Tags.EXCLUDE_FROM_TRAIN, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
|
xzturn/caffe2
|
caffe2/python/layers/tags.py
|
Python
|
apache-2.0
| 3,396
|
from collections import namedtuple
import rpyc
import cv2
from detection.opencv import draw_lines
import time
State = namedtuple("State", "name act default_args")
State.__new__.__defaults__ = tuple([None] * 2) + ({},)
def main_loop(robot, start_state, state_dict, delay=0.02, remote_display=None):
print("Checking states...")
for state in state_dict.values():
if not isinstance(state, State):
raise Exception("The state " + str(state) + "is not of type State.")
state = start_state
kwargs = state.default_args
if remote_display is None:
cv2display = cv2
else:
print("using remote")
conn = rpyc.classic.connect(remote_display)
_mod = conn.modules["rick.rpc"]
cv2display = _mod.RemoteDisplay()
tstart = time.time()
while True:
print("CURRENT_STATE",state.name)
tend=tstart
time.sleep(max(0, delay-( time.time()-tstart)))
tstart = time.time()
print("elapsed time :",tend-tstart)
#draw_lines(frame)
_, frame = robot.cap.read()
next_state_name, processed_frame, kwargs = state.act(robot,frame, **kwargs)
state = state_dict[next_state_name]
kwargs = {**state.default_args, **kwargs}
cv2display.imshow("frame", processed_frame)
if cv2display.waitKey(1) & 0xFF == 27:
break
robot.cap.release()
cv2display.destroyAllWindows()
|
TheCamusean/DLRCev3
|
rick/rick/core.py
|
Python
|
mit
| 1,443
|
#! /usr/bin/env python3
import sys
import random
from enumeration import Goal, Status, Action
from entity import Room, Agent, Knowledge, Cave
from knowledge import perceive, tell, update, ask
def print_intro():
print('Hunt the Wumpus')
print('MIT License (MIT)')
print('Copyright (c) 2014 gliderkite\n')
def print_actions():
print('1) Move forward')
print('2) Turn left')
print('3) Turn right')
print('4) Grab')
print('5) Shoot')
def print_perceptions(perceptions):
wumpus, pit, gold = perceptions
if wumpus == Status.Present:
print('You perceived a stench.')
if pit == Status.Present:
print('You perceived a breeze.')
if gold == Status.Present:
print('You perceived a glitter.')
if perceptions == (Status.Absent,) * 3:
print('No perceptions.')
print()
def parse_action(action):
if action == 1:
return Action.Move, (0,)
elif action == 2:
return Action.Turn, -1
elif action == 3:
return Action.Turn, 1
elif action == 4:
return Action.Grab, None
elif action == 5:
return Action.Shoot, None
def print_cave(loc):
print(' __________________')
y = 0
while y < 4:
x = 0
while x < 4:
print('|_X_|' if (x, y) == loc else '|___|', end='')
x += 1
print()
y += 1
print()
if __name__ == '__main__':
# init seed
if '-seed' in sys.argv:
seed = int(sys.argv[sys.argv.index('-seed') + 1])
random.seed(seed)
# define entities
cave = Cave()
kb = Knowledge()
agent = Agent()
# display introduction
print_intro()
# run the game
while True:
#print('Cave:\n{}\n'.format(cave))
print('Agent:\n{}'.format(agent))
print_cave(agent.location)
# perceive in current location
perceptions = perceive(cave, agent.location)
if perceptions is None:
print('You died.')
break
#print('Perceptions:\n{}\n'.format(perceptions))
print_perceptions(perceptions)
if '-ai' in sys.argv:
tell(kb, perceptions, agent.location)
#print('Knowledge:\n{}\n'.format(kb))
update(kb, agent.location)
#print('Knowledge updated:\n{}\n'.format(kb))
goal = Goal.SeekGold if not agent.has_gold else Goal.BackToEntry
action = ask(kb, agent.location, agent.direction, goal)
print('Action:\n{} {}\n'.format(*action))
input('Next?')
else:
print_actions()
action = int(input('Choice? '))
print()
action = parse_action(action)
# perform the action
if agent.perform(action, cave, kb):
print('You perceived a scream.\n')
# check if the game is over
if agent.has_gold and agent.location == (0, 0):
print_cave(agent.location)
print('You win!')
break
|
gliderkite/wumpus
|
src/wumpus.py
|
Python
|
mit
| 2,699
|
#!/usr/bin/env python
# encoding=utf8
import setuptools
with open('README.md') as fh:
long_description = fh.read()
setuptools.setup(
name='EximHandler',
version='1.1.0',
author='Dan Michael O. Heggø',
author_email='danmichaelo@gmail.com',
description='A logging handler class which sends an email using exim',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/danmichaelo/eximhandler',
packages=['eximhandler'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
)
|
danmichaelo/eximhandler
|
setup.py
|
Python
|
unlicense
| 636
|
# -*- coding: utf-8 -*-
"""Setup file for easy installation"""
import sys
import os
from os.path import join, dirname, split
from setuptools import setup
PY3 = os.environ.get('BUILD_VERSION') == '3' or sys.version_info[0] == 3
version = __import__('social').__version__
LONG_DESCRIPTION = """
Python Social Auth is an easy to setup social authentication/registration
mechanism with support for several frameworks and auth providers.
Crafted using base code from django-social-auth, implements a common interface
to define new authentication providers from third parties. And to bring support
for more frameworks and ORMs.
"""
def long_description():
"""Return long description from README.rst if it's present
because it doesn't get installed."""
try:
return open(join(dirname(__file__), 'README.rst')).read()
except IOError:
return LONG_DESCRIPTION
def path_tokens(path):
if not path:
return []
head, tail = split(path)
return path_tokens(head) + [tail]
def get_packages():
exclude_pacakages = ('__pycache__',)
packages = []
for path_info in os.walk('social'):
tokens = path_tokens(path_info[0])
if tokens[-1] not in exclude_pacakages:
packages.append('.'.join(tokens))
return packages
requirements_file, tests_requirements_file = {
False: ('requirements.txt', 'social/tests/requirements.txt'),
True: ('requirements-python3.txt', 'social/tests/requirements-python3.txt')
}[PY3]
with open(requirements_file, 'r') as f:
requirements = f.readlines()
with open(tests_requirements_file, 'r') as f:
tests_requirements = [line for line in f.readlines() if '@' not in line]
setup(
name='python-social-auth',
version=version,
author='Matias Aguirre',
author_email='matiasaguirre@gmail.com',
description='Python social authentication made simple.',
license='BSD',
keywords='django, flask, pyramid, webpy, openid, oauth, social auth',
url='https://github.com/omab/python-social-auth',
packages=get_packages(),
long_description=long_description(),
install_requires=requirements,
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Internet',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
package_data={
'social/tests': ['social/tests/*.txt']
},
extras_require={
'django': ['social-auth-app-django'],
'django-mongoengine': ['social-auth-app-django-mongoengine'],
'flask': ['social-auth-app-flask', 'social-auth-app-flask-sqlalchemy'],
'flask-mongoengine': ['social-auth-app-flask-mongoengine'],
'flask-peewee': ['social-auth-app-flask-peewee'],
'cherrypy': ['social-auth-app-cherrypy'],
'pyramid': ['social-auth-app-pyramid'],
'tornado': ['social-auth-app-tornado'],
'webpy': ['social-auth-app-webpy']
},
include_package_data=True,
tests_require=tests_requirements,
test_suite='social.tests',
zip_safe=False
)
|
cjltsod/python-social-auth
|
setup.py
|
Python
|
bsd-3-clause
| 3,240
|
# -*- coding: utf-8 -*-
"""
Copyright 2003-2010 Cort Stratton. All rights reserved.
Copyright 2015, 2016 Hanson Robotics
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""This file contains the public interface to the aiml module."""
import AimlParser
import DefaultSubs
import Utils
from PatternMgr import PatternMgr
from WordSub import WordSub
from ConfigParser import ConfigParser
import copy
import glob
import os
import random
import re
import string
import sys
import time
import threading
import xml.sax
import logging
logger = logging.getLogger('hr.chatbot.aiml.kernel')
class Kernel:
# module constants
_globalSessionID = "_global" # key of the global session (duh)
_querySessionID = "_query" # key of the query session (duh)
_maxHistorySize = 10 # maximum length of the _inputs and _responses lists
# maximum number of recursive <srai>/<sr> tags before the response is
# aborted.
_maxRecursionDepth = 100
# special predicate keys
# keys to a queue (list) of recent user input
_inputHistory = "_inputHistory"
# keys to a queue (list) of recent responses.
_outputHistory = "_outputHistory"
# Should always be empty in between calls to respond()
_inputStack = "_inputStack"
def __init__(self):
self._verboseMode = True
self._version = "PyAIML 0.8.6"
self._brain = PatternMgr()
self._respondLock = threading.RLock()
self._textEncoding = "utf-8"
self._trace = []
# set up the sessions
self._sessions = {}
self._addSession(self._globalSessionID)
# Set up the bot predicates
self._botPredicates = {}
self.setBotPredicate("name", "Nameless")
# set up the word substitutors (subbers):
self._subbers = {}
self._subbers['gender'] = WordSub(DefaultSubs.defaultGender)
self._subbers['person'] = WordSub(DefaultSubs.defaultPerson)
self._subbers['person2'] = WordSub(DefaultSubs.defaultPerson2)
self._subbers['normal'] = WordSub(DefaultSubs.defaultNormal)
# set up the element processors
self._elementProcessors = {
"bot": self._processBot,
"condition": self._processCondition,
"date": self._processDate,
"formal": self._processFormal,
"gender": self._processGender,
"get": self._processGet,
"gossip": self._processGossip,
"id": self._processId,
"input": self._processInput,
"javascript": self._processJavascript,
"learn": self._processLearn,
"li": self._processLi,
"lowercase": self._processLowercase,
"person": self._processPerson,
"person2": self._processPerson2,
"random": self._processRandom,
"text": self._processText,
"sentence": self._processSentence,
"set": self._processSet,
"size": self._processSize,
"sr": self._processSr,
"srai": self._processSrai,
"star": self._processStar,
"system": self._processSystem,
"template": self._processTemplate,
"that": self._processThat,
"thatstar": self._processThatstar,
"think": self._processThink,
"topicstar": self._processTopicstar,
"uppercase": self._processUppercase,
"version": self._processVersion,
}
def bootstrap(self, brainFile=None, learnFiles=[], commands=[]):
"""Prepare a Kernel object for use.
If a brainFile argument is provided, the Kernel attempts to
load the brain at the specified filename.
If learnFiles is provided, the Kernel attempts to load the
specified AIML files.
Finally, each of the input strings in the commands list is
passed to respond().
"""
start = time.clock()
if brainFile:
self.loadBrain(brainFile)
# learnFiles might be a string, in which case it should be
# turned into a single-element list.
learns = learnFiles
try:
learns = [learnFiles + ""]
except:
pass
for file in learns:
self.learn(file)
# ditto for commands
cmds = commands
try:
cmds = [commands + ""]
except:
pass
for cmd in cmds:
logger.info(self._respond(cmd, self._globalSessionID))
if self._verboseMode:
logger.info("Kernel bootstrap completed in %.2f seconds" %
(time.clock() - start))
def verbose(self, isVerbose=True):
"""Enable/disable verbose output mode."""
self._verboseMode = isVerbose
def version(self):
"""Return the Kernel's version string."""
return self._version
def numCategories(self):
"""Return the number of categories the Kernel has learned."""
# there's a one-to-one mapping between templates and categories
return self._brain.numTemplates()
def resetBrain(self):
"""Reset the brain to its initial state.
This is essentially equivilant to:
del(kern)
kern = aiml.Kernel()
"""
del(self._brain)
self.__init__()
def loadBrain(self, filename):
"""Attempt to load a previously-saved 'brain' from the
specified filename.
NOTE: the current contents of the 'brain' will be discarded!
"""
if self._verboseMode:
logger.info("Loading brain from %s..." % filename,)
start = time.clock()
self._brain.restore(filename)
if self._verboseMode:
end = time.clock() - start
logger.info("done (%d categories in %.2f seconds)" %
(self._brain.numTemplates(), end))
def saveBrain(self, filename):
"""Dump the contents of the bot's brain to a file on disk."""
if self._verboseMode:
logger.info("Saving brain to %s..." % filename,)
start = time.clock()
self._brain.save(filename)
if self._verboseMode:
logger.info("done (%.2f seconds)" % (time.clock() - start))
def getPredicate(self, name, sessionID=_globalSessionID):
"""Retrieve the current value of the predicate 'name' from the
specified session.
If name is not a valid predicate in the session, the empty
string is returned.
"""
try:
return self._sessions[sessionID][name]
except KeyError:
return ""
def setPredicate(self, name, value, sessionID=_globalSessionID):
"""Set the value of the predicate 'name' in the specified
session.
If sessionID is not a valid session, it will be created. If
name is not a valid predicate in the session, it will be
created.
"""
self._addSession(
sessionID) # add the session, if it doesn't already exist.
self._sessions[sessionID][name] = value
def getBotPredicate(self, name):
"""Retrieve the value of the specified bot predicate.
If name is not a valid bot predicate, the empty string is returned.
"""
try:
return self._botPredicates[name]
except KeyError:
return ""
def setBotPredicate(self, name, value):
"""Set the value of the specified bot predicate.
If name is not a valid bot predicate, it will be created.
"""
self._botPredicates[name] = value
# Clumsy hack: if updating the bot name, we must update the
# name in the brain as well
if name == "name":
self._brain.setBotName(self.getBotPredicate("name"))
def setTextEncoding(self, encoding):
"""Set the text encoding used when loading AIML files (Latin-1, UTF-8, etc.)."""
self._textEncoding = encoding
def loadSubs(self, filename):
"""Load a substitutions file.
The file must be in the Windows-style INI format (see the
standard ConfigParser module docs for information on this
format). Each section of the file is loaded into its own
substituter.
"""
inFile = file(filename)
parser = ConfigParser()
parser.readfp(inFile, filename)
inFile.close()
for s in parser.sections():
# Add a new WordSub instance for this section. If one already
# exists, delete it.
if self._subbers.has_key(s):
del(self._subbers[s])
self._subbers[s] = WordSub()
# iterate over the key,value pairs and add them to the subber
for k, v in parser.items(s):
self._subbers[s][k] = v
def _addSession(self, sessionID):
"""Create a new session with the specified ID string."""
if self._sessions.has_key(sessionID):
return
# Create the session.
self._sessions[sessionID] = {
# Initialize the special reserved predicates
self._inputHistory: [],
self._outputHistory: [],
self._inputStack: []
}
def _deleteSession(self, sessionID):
"""Delete the specified session."""
if self._sessions.has_key(sessionID):
self._sessions.pop(sessionID)
def getSessionData(self, sessionID=None):
"""Return a copy of the session data dictionary for the
specified session.
If no sessionID is specified, return a dictionary containing
*all* of the individual session dictionaries.
"""
s = None
if sessionID is not None:
try:
s = self._sessions[sessionID]
except KeyError:
s = {}
else:
s = self._sessions
return copy.deepcopy(s)
def learn(self, filename):
"""Load and learn the contents of the specified AIML file.
If filename includes wildcard characters, all matching files
will be loaded and learned.
"""
errors = []
for f in glob.glob(filename):
if self._verboseMode:
logger.info("Loading %s..." % f,)
start = time.clock()
# Load and parse the AIML file.
parser = AimlParser.create_parser()
handler = parser.getContentHandler()
handler.setEncoding(self._textEncoding)
try:
parser.parse(f)
except xml.sax.SAXParseException, msg:
err = "\nFATAL PARSE ERROR in file %s:\n%s\n" % (f, msg)
errors.append(err)
logger.error(err)
continue
# store the pattern/template pairs in the PatternMgr.
for key, tem in handler.categories.items():
self._brain.add(key, tem)
# Parsing was successful.
if self._verboseMode:
logger.info("done (%.2f seconds)" % (time.clock() - start))
return errors
def respond(self, input, sessionID=_globalSessionID, query=False):
"""Return the Kernel's response to the input string."""
if len(input) == 0:
return ""
# ensure that input is a unicode string
try:
input = input.decode(self._textEncoding, 'replace')
except UnicodeError:
pass
except AttributeError:
pass
# prevent other threads from stomping all over us.
self._respondLock.acquire()
# Add the session, if it doesn't already exist
self._addSession(sessionID)
if query:
# Copy current session data to new query session, and delete it
# use
sessionData = self.getSessionData(sessionID)
sessionID = self._querySessionID
self._addSession(sessionID)
self._sessions[sessionID].update(sessionData)
self._trace = []
# split the input into discrete sentences
sentences = Utils.sentences(input)
finalResponse = ""
for s in sentences:
# Add the input to the history list before fetching the
# response, so that <input/> tags work properly.
inputHistory = self.getPredicate(self._inputHistory, sessionID)
inputHistory.append(s)
while len(inputHistory) > self._maxHistorySize:
inputHistory.pop(0)
self.setPredicate(self._inputHistory, inputHistory, sessionID)
# Fetch the response
response = self._respond(s, sessionID)
# add the data from this exchange to the history lists
outputHistory = self.getPredicate(self._outputHistory, sessionID)
outputHistory.append(response)
while len(outputHistory) > self._maxHistorySize:
outputHistory.pop(0)
self.setPredicate(self._outputHistory, outputHistory, sessionID)
# append this response to the final response.
finalResponse += (response + " ")
finalResponse = finalResponse.strip()
assert(len(self.getPredicate(self._inputStack, sessionID)) == 0)
if query:
self._deleteSession(self._querySessionID)
logger.debug("Trace: {}".format(self._trace))
# release the lock and return
self._respondLock.release()
try:
return finalResponse.encode(self._textEncoding)
except UnicodeError:
return finalResponse
# This version of _respond() just fetches the response for some input.
# It does not mess with the input and output histories. Recursive calls
# to respond() spawned from tags like <srai> should call this function
# instead of respond().
def _respond(self, input, sessionID):
"""Private version of respond(), does the real work."""
if len(input) == 0:
return ""
# guard against infinite recursion
inputStack = self.getPredicate(self._inputStack, sessionID)
if len(inputStack) > self._maxRecursionDepth:
if self._verboseMode:
err = "WARNING: maximum recursion depth exceeded (input='%s')" % input.encode(
self._textEncoding, 'replace')
logger.warn(err)
return ""
# push the input onto the input stack
inputStack = self.getPredicate(self._inputStack, sessionID)
inputStack.append(input)
self.setPredicate(self._inputStack, inputStack, sessionID)
# run the input through the 'normal' subber
subbedInput = self._subbers['normal'].sub(input)
# fetch the bot's previous response, to pass to the match()
# function as 'that'.
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try:
that = outputHistory[-1]
except IndexError:
that = ""
subbedThat = self._subbers['normal'].sub(that)
# fetch the current topic
topic = self.getPredicate("topic", sessionID)
subbedTopic = self._subbers['normal'].sub(topic)
# Determine the final response.
response = ""
elem = self._brain.match(subbedInput, subbedThat, subbedTopic)
if elem is None:
if self._verboseMode:
err = "No match found for input: %s" % input.encode(
self._textEncoding)
logger.debug(err)
else:
# Process the element into a response string.
_response = self._processElement(elem, sessionID).strip()
response += _response
response += " "
response = response.strip()
# pop the top entry off the input stack.
inputStack = self.getPredicate(self._inputStack, sessionID)
inputStack.pop()
self.setPredicate(self._inputStack, inputStack, sessionID)
return response
def _processElement(self, elem, sessionID):
"""Process an AIML element.
The first item of the elem list is the name of the element's
XML tag. The second item is a dictionary containing any
attributes passed to that tag, and their values. Any further
items in the list are the elements enclosed by the current
element's begin and end tags; they are handled by each
element's handler function.
"""
try:
handlerFunc = self._elementProcessors[elem[0]]
except:
# Oops -- there's no handler function for this element
# type!
if self._verboseMode:
err = "WARNING: No handler found for <%s> element\n" % elem[
0].encode(self._textEncoding, 'replace')
logger.warn(err)
return ""
_response = handlerFunc(elem, sessionID)
if elem[0] == 'template':
trace = {}
trace['doc'] = elem[1]['doc']
trace['loc'] = elem[1]['line']
trace['pattern'] = elem[1]['pattern']
trace['pattern-loc'] = elem[1]['pattern-loc']
self._trace.append(trace)
return _response
######################################################
### Individual element-processing functions follow ###
######################################################
# <bot>
def _processBot(self, elem, sessionID):
"""Process a <bot> AIML element.
Required element attributes:
name: The name of the bot predicate to retrieve.
<bot> elements are used to fetch the value of global,
read-only "bot predicates." These predicates cannot be set
from within AIML; you must use the setBotPredicate() function.
"""
attrName = elem[1]['name']
return self.getBotPredicate(attrName)
# <condition>
def _processCondition(self, elem, sessionID):
"""Process a <condition> AIML element.
Optional element attributes:
name: The name of a predicate to test.
value: The value to test the predicate for.
<condition> elements come in three flavors. Each has different
attributes, and each handles their contents differently.
The simplest case is when the <condition> tag has both a 'name'
and a 'value' attribute. In this case, if the predicate
'name' has the value 'value', then the contents of the element
are processed and returned.
If the <condition> element has only a 'name' attribute, then
its contents are a series of <li> elements, each of which has
a 'value' attribute. The list is scanned from top to bottom
until a match is found. Optionally, the last <li> element can
have no 'value' attribute, in which case it is processed and
returned if no other match is found.
If the <condition> element has neither a 'name' nor a 'value'
attribute, then it behaves almost exactly like the previous
case, except that each <li> subelement (except the optional
last entry) must now include both 'name' and 'value'
attributes.
"""
attr = None
response = ""
attr = elem[1]
# Case #1: test the value of a specific predicate for a
# specific value.
if attr.has_key('name') and attr.has_key('value'):
val = self.getPredicate(attr['name'], sessionID)
if val == attr['value']:
for e in elem[2:]:
response += self._processElement(e, sessionID)
return response
else:
# Case #2 and #3: Cycle through <li> contents, testing a
# name and value pair for each one.
try:
name = None
if attr.has_key('name'):
name = attr['name']
# Get the list of <li> elemnents
listitems = []
for e in elem[2:]:
if e[0] == 'li':
listitems.append(e)
# if listitems is empty, return the empty string
if len(listitems) == 0:
return ""
# iterate through the list looking for a condition that
# matches.
foundMatch = False
for li in listitems:
try:
liAttr = li[1]
# if this is the last list item, it's allowed
# to have no attributes. We just skip it for now.
if len(liAttr.keys()) == 0 and li == listitems[-1]:
continue
# get the name of the predicate to test
liName = name
if liName == None:
liName = liAttr['name']
# get the value to check against
liValue = liAttr['value']
# do the test
if liValue == '*' and self.getPredicate(liName, sessionID):
foundMatch = True
response += self._processElement(li, sessionID)
break
if self.getPredicate(liName, sessionID) == liValue:
foundMatch = True
response += self._processElement(li, sessionID)
break
except:
# No attributes, no name/value attributes, no
# such predicate/session, or processing error.
if self._verboseMode:
logger.info(
"Something amiss -- skipping listitem", li)
raise
if not foundMatch:
# Check the last element of listitems. If it has
# no 'name' or 'value' attribute, process it.
try:
li = listitems[-1]
liAttr = li[1]
if not (liAttr.has_key('name') or liAttr.has_key('value')):
response += self._processElement(li, sessionID)
except:
# listitems was empty, no attributes, missing
# name/value attributes, or processing error.
if self._verboseMode:
logger.error("error in default listitem")
raise
except:
# Some other catastrophic cataclysm
if self._verboseMode:
logger.error("catastrophic condition failure")
raise
return response
# <date>
def _processDate(self, elem, sessionID):
"""Process a <date> AIML element.
<date> elements resolve to the current date and time. The
AIML specification doesn't require any particular format for
this information, so I go with whatever's simplest.
"""
return time.asctime()
# <formal>
def _processFormal(self, elem, sessionID):
"""Process a <formal> AIML element.
<formal> elements process their contents recursively, and then
capitalize the first letter of each word of the result.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return string.capwords(response)
# <gender>
def _processGender(self, elem, sessionID):
"""Process a <gender> AIML element.
<gender> elements process their contents, and then swap the
gender of any third-person singular pronouns in the result.
This subsitution is handled by the aiml.WordSub module.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return self._subbers['gender'].sub(response)
# <get>
def _processGet(self, elem, sessionID):
"""Process a <get> AIML element.
Required element attributes:
name: The name of the predicate whose value should be
retrieved from the specified session and returned. If the
predicate doesn't exist, the empty string is returned.
<get> elements return the value of a predicate from the
specified session.
"""
return self.getPredicate(elem[1]['name'], sessionID)
# <gossip>
def _processGossip(self, elem, sessionID):
"""Process a <gossip> AIML element.
<gossip> elements are used to capture and store user input in
an implementation-defined manner, theoretically allowing the
bot to learn from the people it chats with. I haven't
descided how to define my implementation, so right now
<gossip> behaves identically to <think>.
"""
return self._processThink(elem, sessionID)
# <id>
def _processId(self, elem, sessionID):
""" Process an <id> AIML element.
<id> elements return a unique "user id" for a specific
conversation. In PyAIML, the user id is the name of the
current session.
"""
return sessionID
# <input>
def _processInput(self, elem, sessionID):
"""Process an <input> AIML element.
Optional attribute elements:
index: The index of the element from the history list to
return. 1 means the most recent item, 2 means the one
before that, and so on.
<input> elements return an entry from the input history for
the current session.
"""
inputHistory = self.getPredicate(self._inputHistory, sessionID)
try:
index = int(elem[1]['index'])
except:
index = 1
try:
return inputHistory[-index]
except IndexError:
if self._verboseMode:
err = "No such index %d while processing <input> element.\n" % index
logger.error(err)
return ""
# <javascript>
def _processJavascript(self, elem, sessionID):
"""Process a <javascript> AIML element.
<javascript> elements process their contents recursively, and
then run the results through a server-side Javascript
interpreter to compute the final response. Implementations
are not required to provide an actual Javascript interpreter,
and right now PyAIML doesn't; <javascript> elements are behave
exactly like <think> elements.
"""
return self._processThink(elem, sessionID)
# <learn>
def _processLearn(self, elem, sessionID):
"""Process a <learn> AIML element.
<learn> elements process their contents recursively, and then
treat the result as an AIML file to open and learn.
"""
filename = ""
for e in elem[2:]:
filename += self._processElement(e, sessionID)
self.learn(filename)
return ""
# <li>
def _processLi(self, elem, sessionID):
"""Process an <li> AIML element.
Optional attribute elements:
name: the name of a predicate to query.
value: the value to check that predicate for.
<li> elements process their contents recursively and return
the results. They can only appear inside <condition> and
<random> elements. See _processCondition() and
_processRandom() for details of their usage.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return response
# <lowercase>
def _processLowercase(self, elem, sessionID):
"""Process a <lowercase> AIML element.
<lowercase> elements process their contents recursively, and
then convert the results to all-lowercase.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return string.lower(response)
# <person>
def _processPerson(self, elem, sessionID):
"""Process a <person> AIML element.
<person> elements process their contents recursively, and then
convert all pronouns in the results from 1st person to 2nd
person, and vice versa. This subsitution is handled by the
aiml.WordSub module.
If the <person> tag is used atomically (e.g. <person/>), it is
a shortcut for <person><star/></person>.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
if len(elem[2:]) == 0: # atomic <person/> = <person><star/></person>
response = self._processElement(['star', {}], sessionID)
return self._subbers['person'].sub(response)
# <person2>
def _processPerson2(self, elem, sessionID):
"""Process a <person2> AIML element.
<person2> elements process their contents recursively, and then
convert all pronouns in the results from 1st person to 3rd
person, and vice versa. This subsitution is handled by the
aiml.WordSub module.
If the <person2> tag is used atomically (e.g. <person2/>), it is
a shortcut for <person2><star/></person2>.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
if len(elem[2:]) == 0: # atomic <person2/> = <person2><star/></person2>
response = self._processElement(['star', {}], sessionID)
return self._subbers['person2'].sub(response)
# <random>
def _processRandom(self, elem, sessionID):
"""Process a <random> AIML element.
<random> elements contain zero or more <li> elements. If
none, the empty string is returned. If one or more <li>
elements are present, one of them is selected randomly to be
processed recursively and have its results returned. Only the
chosen <li> element's contents are processed. Any non-<li> contents are
ignored.
"""
listitems = []
for e in elem[2:]:
if e[0] == 'li':
listitems.append(e)
if len(listitems) == 0:
return ""
# select and process a random listitem.
random.shuffle(listitems)
return self._processElement(listitems[0], sessionID)
# <sentence>
def _processSentence(self, elem, sessionID):
"""Process a <sentence> AIML element.
<sentence> elements process their contents recursively, and
then capitalize the first letter of the results.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
try:
response = response.strip()
words = string.split(response, " ", 1)
words[0] = string.capitalize(words[0])
response = string.join(words)
return response
except IndexError: # response was empty
return ""
# <set>
def _processSet(self, elem, sessionID):
"""Process a <set> AIML element.
Required element attributes:
name: The name of the predicate to set.
<set> elements process their contents recursively, and assign the results to a predicate
(given by their 'name' attribute) in the current session. The contents of the element
are also returned.
"""
value = ""
for e in elem[2:]:
value += self._processElement(e, sessionID)
self.setPredicate(elem[1]['name'], value, sessionID)
return value
# <size>
def _processSize(self, elem, sessionID):
"""Process a <size> AIML element.
<size> elements return the number of AIML categories currently
in the bot's brain.
"""
return str(self.numCategories())
# <sr>
def _processSr(self, elem, sessionID):
"""Process an <sr> AIML element.
<sr> elements are shortcuts for <srai><star/></srai>.
"""
star = self._processElement(['star', {}], sessionID)
response = self._respond(star, sessionID)
return response
# <srai>
def _processSrai(self, elem, sessionID):
"""Process a <srai> AIML element.
<srai> elements recursively process their contents, and then
pass the results right back into the AIML interpreter as a new
piece of input. The results of this new input string are
returned.
"""
newInput = ""
for e in elem[2:]:
newInput += self._processElement(e, sessionID)
return self._respond(newInput, sessionID)
# <star>
def _processStar(self, elem, sessionID):
"""Process a <star> AIML element.
Optional attribute elements:
index: Which "*" character in the current pattern should
be matched?
<star> elements return the text fragment matched by the "*"
character in the current input pattern. For example, if the
input "Hello Tom Smith, how are you?" matched the pattern
"HELLO * HOW ARE YOU", then a <star> element in the template
would evaluate to "Tom Smith".
"""
try:
index = int(elem[1]['index'])
except KeyError:
index = 1
# fetch the user's last input
inputStack = self.getPredicate(self._inputStack, sessionID)
input = self._subbers['normal'].sub(inputStack[-1])
# fetch the Kernel's last response (for 'that' context)
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try:
that = self._subbers['normal'].sub(outputHistory[-1])
except:
that = "" # there might not be any output yet
topic = self.getPredicate("topic", sessionID)
response = self._brain.star("star", input, that, topic, index)
return response
# <system>
def _processSystem(self, elem, sessionID):
"""Process a <system> AIML element.
<system> elements process their contents recursively, and then
attempt to execute the results as a shell command on the
server. The AIML interpreter blocks until the command is
complete, and then returns the command's output.
For cross-platform compatibility, any file paths inside
<system> tags should use Unix-style forward slashes ("/") as a
directory separator.
"""
# build up the command string
command = ""
for e in elem[2:]:
command += self._processElement(e, sessionID)
# normalize the path to the command. Under Windows, this
# switches forward-slashes to back-slashes; all system
# elements should use unix-style paths for cross-platform
# compatibility.
#executable,args = command.split(" ", 1)
#executable = os.path.normpath(executable)
#command = executable + " " + args
command = os.path.normpath(command)
# execute the command.
response = ""
try:
out = os.popen(command)
except RuntimeError, msg:
if self._verboseMode:
err = "WARNING: RuntimeError while processing \"system\" element:\n%s\n" % msg.encode(
self._textEncoding, 'replace')
logger.warn(err)
return "There was an error while computing my response. Please inform my botmaster."
# I'm told this works around a potential IOError exception.
time.sleep(0.01)
for line in out:
response += line + "\n"
response = string.join(response.splitlines()).strip()
return response
# <template>
def _processTemplate(self, elem, sessionID):
"""Process a <template> AIML element.
<template> elements recursively process their contents, and
return the results. <template> is the root node of any AIML
response tree.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return response
# text
def _processText(self, elem, sessionID):
"""Process a raw text element.
Raw text elements aren't really AIML tags. Text elements cannot contain
other elements; instead, the third item of the 'elem' list is a text
string, which is immediately returned. They have a single attribute,
automatically inserted by the parser, which indicates whether whitespace
in the text should be preserved or not.
"""
try:
elem[2] + ""
except TypeError:
raise TypeError, "Text element contents are not text"
# If the the whitespace behavior for this element is "default",
# we reduce all stretches of >1 whitespace characters to a single
# space. To improve performance, we do this only once for each
# text element encountered, and save the results for the future.
if elem[1]["xml:space"] == "default":
elem[2] = re.sub("\s+", " ", elem[2])
elem[1]["xml:space"] = "preserve"
return elem[2]
# <that>
def _processThat(self, elem, sessionID):
"""Process a <that> AIML element.
Optional element attributes:
index: Specifies which element from the output history to
return. 1 is the most recent response, 2 is the next most
recent, and so on.
<that> elements (when they appear inside <template> elements)
are the output equivilant of <input> elements; they return one
of the Kernel's previous responses.
"""
outputHistory = self.getPredicate(self._outputHistory, sessionID)
index = 1
try:
# According to the AIML spec, the optional index attribute
# can either have the form "x" or "x,y". x refers to how
# far back in the output history to go. y refers to which
# sentence of the specified response to return.
index = int(elem[1]['index'].split(',')[0])
except:
pass
try:
return outputHistory[-index]
except IndexError:
if self._verboseMode:
err = "No such index %d while processing <that> element.\n" % index
logger.error(err)
return ""
# <thatstar>
def _processThatstar(self, elem, sessionID):
"""Process a <thatstar> AIML element.
Optional element attributes:
index: Specifies which "*" in the <that> pattern to match.
<thatstar> elements are similar to <star> elements, except
that where <star/> returns the portion of the input string
matched by a "*" character in the pattern, <thatstar/> returns
the portion of the previous input string that was matched by a
"*" in the current category's <that> pattern.
"""
try:
index = int(elem[1]['index'])
except KeyError:
index = 1
# fetch the user's last input
inputStack = self.getPredicate(self._inputStack, sessionID)
input = self._subbers['normal'].sub(inputStack[-1])
# fetch the Kernel's last response (for 'that' context)
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try:
that = self._subbers['normal'].sub(outputHistory[-1])
except:
that = "" # there might not be any output yet
topic = self.getPredicate("topic", sessionID)
response = self._brain.star("thatstar", input, that, topic, index)
return response
# <think>
def _processThink(self, elem, sessionID):
"""Process a <think> AIML element.
<think> elements process their contents recursively, and then
discard the results and return the empty string. They're
useful for setting predicates and learning AIML files without
generating any output.
"""
for e in elem[2:]:
self._processElement(e, sessionID)
return ""
# <topicstar>
def _processTopicstar(self, elem, sessionID):
"""Process a <topicstar> AIML element.
Optional element attributes:
index: Specifies which "*" in the <topic> pattern to match.
<topicstar> elements are similar to <star> elements, except
that where <star/> returns the portion of the input string
matched by a "*" character in the pattern, <topicstar/>
returns the portion of current topic string that was matched
by a "*" in the current category's <topic> pattern.
"""
try:
index = int(elem[1]['index'])
except KeyError:
index = 1
# fetch the user's last input
inputStack = self.getPredicate(self._inputStack, sessionID)
input = self._subbers['normal'].sub(inputStack[-1])
# fetch the Kernel's last response (for 'that' context)
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try:
that = self._subbers['normal'].sub(outputHistory[-1])
except:
that = "" # there might not be any output yet
topic = self.getPredicate("topic", sessionID)
response = self._brain.star("topicstar", input, that, topic, index)
return response
# <uppercase>
def _processUppercase(self, elem, sessionID):
"""Process an <uppercase> AIML element.
<uppercase> elements process their contents recursively, and
return the results with all lower-case characters converted to
upper-case.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return string.upper(response)
# <version>
def _processVersion(self, elem, sessionID):
"""Process a <version> AIML element.
<version> elements return the version number of the AIML
interpreter.
"""
return self.version()
def getTraceDocs(self):
docs = []
for trace in self._trace:
docs.append(
'{doc}, {loc}, {pattern}, {pattern-loc}'.format(**trace))
docs.reverse()
return docs
##################################################
### Self-test functions follow ###
##################################################
def _testTag(kern, tag, input, outputList):
"""Tests 'tag' by feeding the Kernel 'input'. If the result
matches any of the strings in 'outputList', the test passes.
"""
global _numTests, _numPassed
_numTests += 1
logger.info("Testing <" + tag + ">:",)
response = kern.respond(input).decode(kern._textEncoding)
if response in outputList:
logger.info("PASSED")
_numPassed += 1
return True
else:
logger.error("FAILED (response: '%s')" %
response.encode(kern._textEncoding, 'replace'))
return False
if __name__ == "__main__":
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
# Run some self-tests
k = Kernel()
cwd = os.path.dirname(os.path.realpath(__file__))
k.bootstrap(learnFiles=os.path.join(cwd, "self-test.aiml"))
global _numTests, _numPassed
_numTests = 0
_numPassed = 0
_testTag(k, 'bot', 'test bot', ["My name is Nameless"])
k.setPredicate('gender', 'male')
_testTag(k, 'condition test #1',
'test condition name value', ['You are handsome'])
k.setPredicate('gender', 'female')
_testTag(k, 'condition test #2', 'test condition name value', [''])
_testTag(k, 'condition test #3',
'test condition name', ['You are beautiful'])
k.setPredicate('gender', 'robot')
_testTag(k, 'condition test #4',
'test condition name', ['You are genderless'])
_testTag(k, 'condition test #5', 'test condition', ['You are genderless'])
k.setPredicate('gender', 'male')
_testTag(k, 'condition test #6', 'test condition', ['You are handsome'])
# the date test will occasionally fail if the original and "test"
# times cross a second boundary. There's no good way to avoid
# this problem and still do a meaningful test, so we simply
# provide a friendly message to be printed if the test fails.
date_warning = """
NOTE: the <date> test will occasionally report failure even if it
succeeds. So long as the response looks like a date/time string,
there's nothing to worry about.
"""
if not _testTag(k, 'date', 'test date', ["The date is %s" % time.asctime()]):
logger.warn(date_warning)
_testTag(k, 'formal', 'test formal', ["Formal Test Passed"])
_testTag(k, 'gender', 'test gender', [
"He'd told her he heard that her hernia is history"])
_testTag(k, 'get/set', 'test get and set',
["I like cheese. My favorite food is cheese"])
_testTag(k, 'gossip', 'test gossip', ["Gossip is not yet implemented"])
_testTag(k, 'id', 'test id', ["Your id is _global"])
_testTag(k, 'input', 'test input', ['You just said: test input'])
_testTag(k, 'javascript', 'test javascript', [
"Javascript is not yet implemented"])
_testTag(k, 'lowercase', 'test lowercase', [
"The Last Word Should Be lowercase"])
_testTag(k, 'person', 'test person', ['HE is a cool guy.'])
_testTag(k, 'person2', 'test person2', ['YOU are a cool guy.'])
_testTag(k, 'person2 (no contents)',
'test person2 I Love Lucy', ['YOU Love Lucy'])
_testTag(k, 'random', 'test random', [
"response #1", "response #2", "response #3"])
_testTag(k, 'random empty', 'test random empty', ["Nothing here!"])
_testTag(k, 'sentence', "test sentence", [
"My first letter should be capitalized."])
_testTag(k, 'size', "test size", [
"I've learned %d categories" % k.numCategories()])
_testTag(k, 'sr', "test sr test srai", ["srai results: srai test passed"])
_testTag(k, 'sr nested', "test nested sr test srai",
["srai results: srai test passed"])
_testTag(k, 'srai', "test srai", ["srai test passed"])
_testTag(k, 'srai infinite', "test srai infinite", [""])
_testTag(k, 'star test #1', 'You should test star begin',
['Begin star matched: You should'])
_testTag(k, 'star test #2', 'test star creamy goodness middle',
['Middle star matched: creamy goodness'])
_testTag(k, 'star test #3', 'test star end the credits roll',
['End star matched: the credits roll'])
_testTag(k, 'star test #4', 'test star having multiple stars in a pattern makes me extremely happy',
['Multiple stars matched: having, stars in a pattern, extremely happy'])
_testTag(k, 'system', "test system", ["The system says hello!"])
_testTag(k, 'that test #1', "test that", [
"I just said: The system says hello!"])
_testTag(k, 'that test #2', "test that", [
"I have already answered this question"])
_testTag(k, 'thatstar test #1', "test thatstar", ["I say beans"])
_testTag(k, 'thatstar test #2', "test thatstar", ["I just said \"beans\""])
_testTag(k, 'thatstar test #3', "test thatstar multiple",
['I say beans and franks for everybody'])
_testTag(k, 'thatstar test #4', "test thatstar multiple",
['Yes, beans and franks for all!'])
_testTag(k, 'think', "test think", [""])
k.setPredicate("topic", "fruit")
_testTag(k, 'topic', "test topic", [
"We were discussing apples and oranges"])
k.setPredicate("topic", "Soylent Green")
_testTag(k, 'topicstar test #1', 'test topicstar',
["Solyent Green is made of people!"])
k.setPredicate("topic", "Soylent Ham and Cheese")
_testTag(k, 'topicstar test #2', 'test topicstar multiple', [
"Both Soylents Ham and Cheese are made of people!"])
_testTag(k, 'unicode support', u"你好", [u"Hey, you speak Chinese! 你好"])
_testTag(k, 'uppercase', 'test uppercase', [
"The Last Word Should Be UPPERCASE"])
_testTag(k, 'version', 'test version', [
"PyAIML is version %s" % k.version()])
_testTag(k, 'whitespace preservation', 'test whitespace', [
"Extra Spaces\n Rule! (but not in here!) But Here They Do!"])
# Report test results
logger.info("--------------------")
if _numTests == _numPassed:
logger.info("%d of %d tests passed!" % (_numPassed, _numTests))
sys.exit(0)
else:
logger.info("%d of %d tests passed (see above for detailed errors)" % (
_numPassed, _numTests))
sys.exit(1)
# Run an interactive interpreter
# print "\nEntering interactive mode (ctrl-c to exit)"
# while True: print k.respond(raw_input("> "))
|
hansonrobotics/chatbot
|
src/chatbot/aiml/Kernel.py
|
Python
|
mit
| 50,732
|
import logging
import time
from autotest.client.shared import error
from virttest import utils_misc
@error.context_aware
def run(test, params, env):
"""
change a removable media:
1) Boot VM with QMP/human monitor enabled.
2) Connect to QMP/human monitor server.
3) Eject original cdrom.
4) Eject original cdrom for second time.
5) Insert new image to cdrom.
6) Eject device after add new image by change command.
7) Insert original cdrom to cdrom.
8) Try to eject non-removable device w/o force option.
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment
"""
qemu_binary = utils_misc.get_qemu_binary(params)
if not utils_misc.qemu_has_option("qmp", qemu_binary):
logging.warn("qemu does not support qmp. Human monitor will be used.")
qmp_used = False
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
logging.info("Wait until device is ready")
time.sleep(10)
if vm.monitor.protocol == "qmp":
qmp_used = True
orig_img_name = params.get("cdrom_cd1")
p_dict = {"file": orig_img_name}
device_name = vm.get_block(p_dict)
if device_name is None:
msg = "Fail to get device using image %s" % orig_img_name
raise error.TestFail(msg)
error.context("Eject original device.")
eject_cmd = "eject device=%s" % device_name
vm.monitor.send_args_cmd(eject_cmd)
logging.info("Wait until device is ejected")
time.sleep(10)
blocks_info = vm.monitor.info("block")
if orig_img_name in str(blocks_info):
raise error.TestFail("Fail to eject cdrom %s. " % orig_img_name)
error.context("Eject original device for second time")
vm.monitor.send_args_cmd(eject_cmd)
new_img_name = params.get("new_img_name")
error.context("Insert new image to device.")
change_cmd = "change device=%s,target=%s" % (device_name, new_img_name)
vm.monitor.send_args_cmd(change_cmd)
logging.info("Wait until device changed")
time.sleep(10)
blocks_info = vm.monitor.info("block")
if new_img_name not in str(blocks_info):
raise error.TestFail("Fail to chang cdrom to %s." % new_img_name)
if qmp_used:
eject_cmd = "eject device=%s, force=True" % device_name
else:
eject_cmd = "eject device=%s" % device_name
error.context("Eject device after add new image by change command")
vm.monitor.send_args_cmd(eject_cmd)
logging.info("Wait until new image is ejected")
time.sleep(10)
blocks_info = vm.monitor.info("block")
if new_img_name in str(blocks_info):
raise error.TestFail("Fail to eject cdrom %s." % orig_img_name)
error.context("Insert %s to device %s" % (orig_img_name, device_name))
change_cmd = "change device=%s,target=%s" % (device_name, orig_img_name)
vm.monitor.send_args_cmd(change_cmd)
logging.info("Wait until device changed")
time.sleep(10)
blocks_info = vm.monitor.info("block")
if orig_img_name not in str(blocks_info):
raise error.TestFail("Fail to change cdrom to %s." % orig_img_name)
error.context("Try to eject non-removable device")
p_dict = {"removable": False}
device_name = vm.get_block(p_dict)
if device_name is None:
raise error.TestFail("Could not find non-removable device")
if params.get("force_eject", "no") == "yes":
if not qmp_used:
eject_cmd = "eject -f %s " % device_name
else:
eject_cmd = "eject device=%s, force=True" % device_name
else:
eject_cmd = "eject device=%s," % device_name
try:
vm.monitor.send_args_cmd(eject_cmd)
except Exception, e:
if "is not removable" not in str(e):
raise error.TestFail(e)
logging.debug("Catch exception message: %s" % e)
logging.info("Wait until device is ejected")
time.sleep(10)
blocks_info = vm.monitor.info("block")
if device_name not in str(blocks_info):
raise error.TestFail("Could remove non-removable device!")
session.close()
|
swapnakrishnan2k/tp-qemu
|
qemu/tests/eject_media.py
|
Python
|
gpl-2.0
| 4,185
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Ver 16 - 26 March 2017 -
import time
import mysql.connector
from mysql.connector import errorcode
import string
import datetime
from db import *
import sys
def output(x):
print(str(datetime.datetime.now().time())[:8]+ " "+ str(x))
sys.stdout.flush()
# -- DB Connection ---------------------------
try:
db = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
output("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
output("Database does not exists")
else:
output(err)
else:
output("Start procedure")
# -- END DB Connection ---------------------------
cur = db.cursor()
cur2 = db.cursor()
sql = "SELECT id FROM tbnode WHERE sendtime = 1 AND tbstatus_id = 1 AND currentstatus = 1 "
cur.execute(sql)
id = list(cur.fetchall())
while True:
for row in id:
try:
output ("Synchronizing time")
sql = "INSERT INTO tbdataout (timekey,type,v0,v1,v2,v3,v4,v5) VALUES (millis(),6," + str(row[0] ) + ",DAY(NOW()),MONTH(NOW()),YEAR(NOW()),HOUR(NOW()),MINUTE(NOW()))"
cur2.execute(sql)
db.commit()
output ("Time synchronized")
except mysql.connector.Error as err:
output ("database error... " + proc + "- " + str(err))
db.commit()
time.sleep(1200)
|
theflorianmaas/dh
|
Python/dhproc/syncTime.py
|
Python
|
mit
| 1,369
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that a module that we import into an SConscript file can itself
easily import the global SCons variables, and a handful of other variables
directly from SCons.Script modules.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
import m1
""")
test.write("m1.py", """\
from SCons.Script import *
SConscript('SConscript')
""")
test.write('SConscript', """\
import m2
import m3
import m4
""")
test.write("m2.py", """\
from SCons.Script import *
Command("file.out", "file.in", Copy("$TARGET", "$SOURCE"))
""")
test.write("m3.py", """\
import SCons.Script
SCons.Script.BuildTask
SCons.Script.CleanTask
SCons.Script.QuestionTask
old_SCons_Script_variables = [
'PrintHelp',
'OptParser',
'keep_going_on_error',
'print_explanations',
'print_includes',
'print_objects',
'print_time',
'memory_stats',
'ignore_errors',
'repositories',
'print_dtree',
'print_tree',
'sconscript_time',
'command_time',
'exit_status',
'profiling',
]
for var in old_SCons_Script_variables:
try:
getattr(SCons.Script, var)
except AttributeError:
pass
else:
raise Exception("unexpected variable SCons.Script.%s" % var)
""")
test.write("m4.py", """\
import SCons.Script.SConscript
SCons.Script.SConscript.Arguments
SCons.Script.SConscript.ArgList
SCons.Script.SConscript.BuildTargets
SCons.Script.SConscript.CommandLineTargets
SCons.Script.SConscript.DefaultTargets
""")
test.write("file.in", "file.in\n")
test.run(arguments = '.')
test.must_match("file.out", "file.in\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Distrotech/scons
|
test/Script-import.py
|
Python
|
mit
| 2,878
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
###############################################################################
# Library to configure the hypotethical pseudocomponents definition
#
# - Widget: Petro pseudocompoent configuration
# - ConfigDialog: Dialog tool for standalone use
###############################################################################
from PyQt5 import QtWidgets
from lib.petro import Petroleo
class Widget(QtWidgets.QWidget):
"""Petro new component configuration"""
def __init__(self, config=None, parent=None):
super(Widget, self).__init__(parent)
layout = QtWidgets.QGridLayout(self)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Molecular weight")), 1, 1)
self.M = QtWidgets.QComboBox()
for p in Petroleo.METHODS_M:
self.M.addItem(p)
layout.addWidget(self.M, 1, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Critic properties")), 2, 1)
self.critical = QtWidgets.QComboBox()
for c in Petroleo.METHODS_crit:
self.critical.addItem(c)
layout.addWidget(self.critical, 2, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Critic volume")), 3, 1)
self.vc = QtWidgets.QComboBox()
for v in Petroleo.METHODS_Vc:
self.vc.addItem(v)
layout.addWidget(self.vc, 3, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Acentric factor")), 4, 1)
self.f_acent = QtWidgets.QComboBox()
for w in Petroleo.METHODS_w:
self.f_acent.addItem(w)
layout.addWidget(self.f_acent, 4, 2)
layout.addWidget(QtWidgets.QLabel("Z<sub>c</sub>"), 5, 1)
self.Zc = QtWidgets.QComboBox()
for method in Petroleo.METHODS_Zc:
self.Zc.addItem(method)
layout.addWidget(self.Zc, 5, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Boiling Temperature")), 6, 1)
self.Tb = QtWidgets.QComboBox()
for tb in Petroleo.METHODS_Tb:
self.Tb.addItem(tb)
layout.addWidget(self.Tb, 6, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Specific Gravity")), 7, 1)
self.SG = QtWidgets.QComboBox()
for sg in Petroleo.METHODS_SG:
self.SG.addItem(sg)
layout.addWidget(self.SG, 7, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Refractive Index")), 8, 1)
self.n = QtWidgets.QComboBox()
for n in Petroleo.METHODS_n:
self.n.addItem(n)
layout.addWidget(self.n, 8, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "PNA composition")), 9, 1)
self.PNA = QtWidgets.QComboBox()
for method in Petroleo.METHODS_PNA:
self.PNA.addItem(method)
layout.addWidget(self.PNA, 9, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Destilate curve conversion")), 10, 1)
self.curves = QtWidgets.QComboBox()
self.curves.addItem("Riazi")
self.curves.addItem("Daubert")
layout.addWidget(self.curves, 10, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Hydrogen %")), 11, 1)
self.H = QtWidgets.QComboBox()
for method in Petroleo.METHODS_H:
self.H.addItem(method)
layout.addWidget(self.H, 11, 2)
layout.addItem(QtWidgets.QSpacerItem(
10, 0, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding), 15, 1, 1, 3)
if config.has_section("petro"):
self.M.setCurrentIndex(
config.getint("petro", "M"))
self.critical.setCurrentIndex(config.getint("petro", "critical"))
self.vc.setCurrentIndex(config.getint("petro", "vc"))
self.f_acent.setCurrentIndex(
config.getint("petro", "f_acent"))
self.Tb.setCurrentIndex(config.getint("petro", "Tb"))
self.SG.setCurrentIndex(config.getint("petro", "SG"))
self.n.setCurrentIndex(config.getint("petro", "n"))
self.Zc.setCurrentIndex(config.getint("petro", "Zc"))
self.PNA.setCurrentIndex(config.getint("petro", "PNA"))
self.H.setCurrentIndex(config.getint("petro", "H"))
self.curves.setCurrentIndex(config.getint("petro", "curve"))
def value(self, config):
if not config.has_section("petro"):
config.add_section("petro")
config.set("petro", "M",
str(self.M.currentIndex()))
config.set("petro", "critical", str(self.critical.currentIndex()))
config.set("petro", "vc", str(self.vc.currentIndex()))
config.set("petro", "f_acent",
str(self.f_acent.currentIndex()))
config.set("petro", "Tb", str(self.Tb.currentIndex()))
config.set("petro", "SG", str(self.SG.currentIndex()))
config.set("petro", "n", str(self.n.currentIndex()))
config.set("petro", "Zc", str(self.Zc.currentIndex()))
config.set("petro", "PNA", str(self.PNA.currentIndex()))
config.set("petro", "H", str(self.H.currentIndex()))
config.set("petro", "curve", str(self.curves.currentIndex()))
return config
class ConfigDialog(QtWidgets.QDialog):
"""Dialog to config thermal method calculations"""
def __init__(self, config=None, parent=None):
super(ConfigDialog, self).__init__(parent)
self.setWindowTitle(QtWidgets.QApplication.translate(
"pychemqt", "Moody diagram configuration"))
layout = QtWidgets.QVBoxLayout(self)
self.widget = Widget(config)
layout.addWidget(self.widget)
self.buttonBox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
layout.addWidget(self.buttonBox)
def value(self, config):
"""Function result for wizard"""
config = self.widget.value(config)
return config
if __name__ == "__main__":
import os
import sys
from configparser import ConfigParser
app = QtWidgets.QApplication(sys.argv)
conf_dir = os.path.expanduser('~') + "/.pychemqt/"
config = ConfigParser()
config.read(conf_dir+"pychemqtrc")
Dialog = ConfigDialog(config)
Dialog.show()
sys.exit(app.exec_())
|
jjgomera/pychemqt
|
UI/prefPetro.py
|
Python
|
gpl-3.0
| 7,535
|
import wget
import argparse
import os
import tensorflow as tf
import tarfile
import numpy as np
MODEL_DIR = "/tmp/chrome/mobilebert"
MODEL_URL = "https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/mobilebert_squad_savedmodels.tar.gz"
OUTPUT_DIR = "../android/app/src/main/assets"
def prepare_model(opts):
#download the model
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
saved_model_tar = os.path.join(MODEL_DIR, 'mobilebert_squad_saved.tar.gz')
if not os.path.exists(saved_model_tar):
wget.download(MODEL_URL, saved_model_tar)
#extract
tar_file = tarfile.open(saved_model_tar)
tar_file.extractall(path=MODEL_DIR)
#convert to tflite
model_path = os.path.join(MODEL_DIR, "mobilebert_squad_savedmodels/quant_saved_model")
print(model_path)
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
tflite_model = converter.convert()
with open(os.path.join(opts.out_dir, "text_mobilebert.tflite"), "wb") as f:
f.write(tflite_model)
# def evaluate(opts):
# print("evaluation")
# interpreter = tf.lite.Interpreter(model_path=os.path.join(OUTPUT_DIR, "text_mobilebert.tflite"))
# interpreter.allocate_tensors()
# input_details = interpreter.get_input_details()
# output_details = interpreter.get_output_details()
# print(input_details)
# print(output_details)
# input_shape = input_details[0]['shape']
# input_data = np.array(np.random.random_sample(input_shape), dtype=input_details[0]['dtype'])
# interpreter.set_tensor(input_details[0]['index'], input_data)
# interpreter.invoke()
# output_data = interpreter.get_tensor(output_details[0]['index'])
# print(output_data)
def evaluate_mobilebert(opts):
from mobilebert import run_classifier
task_name = "cola"
vocab_file = "./data/vocab.txt"
config = "3rdparty/googleResearch/mobilebert/config/uncased_L-24_H-128_B-512_A-4_F-4_OPT.json"
output = "./build"
data_dir = "/tmp/chrome/mobilebert/data_cache/"
eval = "True"
cmd = f'python3 -m run_classifier --task_name {task_name}\
--vocab_file {vocab_file}\
--bert_config_file {config}\
--output_dir {output}\
--do_eval {eval}\
--data_dir {data_dir}\
'
os.system(cmd)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--out-dir', help="Output directory of model files", default=OUTPUT_DIR)
parser.add_argument('--prepare', help="Prepare the TFLite model", action="store_true")
parser.add_argument('--evaluate', help="Evaluate TFLite model against dataset", action="store_true")
opts = parser.parse_args()
if opts.prepare:
prepare_model(opts)
if opts.evaluate:
evaluate_mobilebert(opts)
|
googleinterns/chrome-on-device-ml
|
experimental/python/mobilebert_model.py
|
Python
|
apache-2.0
| 2,684
|
import re
import tests.helper as th
def passes(out, err):
return all(
[th.reads(err, '/tests/linkat.test'),
th.reads(err, '/tests/null.test'),
th.writes(err, '/tmp/subdir2/hidden/awesome'),
th.mkdirs(err, '/tmp/subdir2/hidden'),
th.count_writes(err, 1),
th.count_readdir(err, 0),
])
needs_symlinks = False
skip_windows = True
|
droundy/bigbro
|
tests/execveat.py
|
Python
|
gpl-2.0
| 391
|
# -*- coding: utf-8 -*-
import sys
from decimal import Decimal
from itertools import product
from math import gcd
import warnings
import pytest
from pytest import raises as assert_raises
from numpy.testing import (
assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns, assert_array_less,
suppress_warnings)
from numpy import array, arange
import numpy as np
from scipy.fft import fft
from scipy.ndimage.filters import correlate1d
from scipy.optimize import fmin, linear_sum_assignment
from scipy import signal
from scipy.signal import (
correlate, correlation_lags, convolve, convolve2d,
fftconvolve, oaconvolve, choose_conv_method,
hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos,
invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt,
sosfilt_zi, tf2zpk, BadCoefficients, detrend, unique_roots, residue,
residuez)
from scipy.signal.windows import hann
from scipy.signal.signaltools import (_filtfilt_gust, _compute_factors,
_group_poles)
from scipy.signal._upfirdn import _upfirdn_modes
from scipy._lib import _testutils
class _TestConvolve(object):
def test_basic(self):
a = [3, 4, 5, 6, 5, 4]
b = [1, 2, 3]
c = convolve(a, b)
assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12]))
def test_same(self):
a = [3, 4, 5]
b = [1, 2, 3, 4]
c = convolve(a, b, mode="same")
assert_array_equal(c, array([10, 22, 34]))
def test_same_eq(self):
a = [3, 4, 5]
b = [1, 2, 3]
c = convolve(a, b, mode="same")
assert_array_equal(c, array([10, 22, 22]))
def test_complex(self):
x = array([1 + 1j, 2 + 1j, 3 + 1j])
y = array([1 + 1j, 2 + 1j])
z = convolve(x, y)
assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j]))
def test_zero_rank(self):
a = 1289
b = 4567
c = convolve(a, b)
assert_equal(c, a * b)
def test_broadcastable(self):
a = np.arange(27).reshape(3, 3, 3)
b = np.arange(3)
for i in range(3):
b_shape = [1]*3
b_shape[i] = 3
x = convolve(a, b.reshape(b_shape), method='direct')
y = convolve(a, b.reshape(b_shape), method='fft')
assert_allclose(x, y)
def test_single_element(self):
a = array([4967])
b = array([3920])
c = convolve(a, b)
assert_equal(c, a * b)
def test_2d_arrays(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve(a, b)
d = array([[2, 7, 16, 17, 12],
[10, 30, 62, 58, 38],
[12, 31, 58, 49, 30]])
assert_array_equal(c, d)
def test_input_swapping(self):
small = arange(8).reshape(2, 2, 2)
big = 1j * arange(27).reshape(3, 3, 3)
big += arange(27)[::-1].reshape(3, 3, 3)
out_array = array(
[[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j],
[52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j],
[46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j],
[40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]],
[[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j],
[282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j],
[246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j],
[142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]],
[[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j],
[174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j],
[138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j],
[70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]],
[[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j],
[68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j],
[38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j],
[12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]])
assert_array_equal(convolve(small, big, 'full'), out_array)
assert_array_equal(convolve(big, small, 'full'), out_array)
assert_array_equal(convolve(small, big, 'same'),
out_array[1:3, 1:3, 1:3])
assert_array_equal(convolve(big, small, 'same'),
out_array[0:3, 0:3, 0:3])
assert_array_equal(convolve(small, big, 'valid'),
out_array[1:3, 1:3, 1:3])
assert_array_equal(convolve(big, small, 'valid'),
out_array[1:3, 1:3, 1:3])
def test_invalid_params(self):
a = [3, 4, 5]
b = [1, 2, 3]
assert_raises(ValueError, convolve, a, b, mode='spam')
assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft')
assert_raises(ValueError, convolve, a, b, mode='ham', method='direct')
assert_raises(ValueError, convolve, a, b, mode='full', method='bacon')
assert_raises(ValueError, convolve, a, b, mode='same', method='bacon')
class TestConvolve(_TestConvolve):
def test_valid_mode2(self):
# See gh-5897
a = [1, 2, 3, 6, 5, 3]
b = [2, 3, 4, 5, 3, 4, 2, 2, 1]
expected = [70, 78, 73, 65]
out = convolve(a, b, 'valid')
assert_array_equal(out, expected)
out = convolve(b, a, 'valid')
assert_array_equal(out, expected)
a = [1 + 5j, 2 - 1j, 3 + 0j]
b = [2 - 3j, 1 + 0j]
expected = [2 - 3j, 8 - 10j]
out = convolve(a, b, 'valid')
assert_array_equal(out, expected)
out = convolve(b, a, 'valid')
assert_array_equal(out, expected)
def test_same_mode(self):
a = [1, 2, 3, 3, 1, 2]
b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3]
c = convolve(a, b, 'same')
d = array([57, 61, 63, 57, 45, 36])
assert_array_equal(c, d)
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'})
def test_convolve_method(self, n=100):
types = sum([t for _, t in np.sctypes.items()], [])
types = {np.dtype(t).name for t in types}
# These types include 'bool' and all precisions (int8, float32, etc)
# The removed types throw errors in correlate or fftconvolve
for dtype in ['complex256', 'complex192', 'float128', 'float96',
'str', 'void', 'bytes', 'object', 'unicode', 'string']:
if dtype in types:
types.remove(dtype)
args = [(t1, t2, mode) for t1 in types for t2 in types
for mode in ['valid', 'full', 'same']]
# These are random arrays, which means test is much stronger than
# convolving testing by convolving two np.ones arrays
np.random.seed(42)
array_types = {'i': np.random.choice([0, 1], size=n),
'f': np.random.randn(n)}
array_types['b'] = array_types['u'] = array_types['i']
array_types['c'] = array_types['f'] + 0.5j*array_types['f']
for t1, t2, mode in args:
x1 = array_types[np.dtype(t1).kind].astype(t1)
x2 = array_types[np.dtype(t2).kind].astype(t2)
results = {key: convolve(x1, x2, method=key, mode=mode)
for key in ['fft', 'direct']}
assert_equal(results['fft'].dtype, results['direct'].dtype)
if 'bool' in t1 and 'bool' in t2:
assert_equal(choose_conv_method(x1, x2), 'direct')
continue
# Found by experiment. Found approx smallest value for (rtol, atol)
# threshold to have tests pass.
if any([t in {'complex64', 'float32'} for t in [t1, t2]]):
kwargs = {'rtol': 1.0e-4, 'atol': 1e-6}
elif 'float16' in [t1, t2]:
# atol is default for np.allclose
kwargs = {'rtol': 1e-3, 'atol': 1e-3}
else:
# defaults for np.allclose (different from assert_allclose)
kwargs = {'rtol': 1e-5, 'atol': 1e-8}
assert_allclose(results['fft'], results['direct'], **kwargs)
def test_convolve_method_large_input(self):
# This is really a test that convolving two large integers goes to the
# direct method even if they're in the fft method.
for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]:
z = np.array([2**n], dtype=np.int64)
fft = convolve(z, z, method='fft')
direct = convolve(z, z, method='direct')
# this is the case when integer precision gets to us
# issue #6076 has more detail, hopefully more tests after resolved
if n < 50:
assert_equal(fft, direct)
assert_equal(fft, 2**(2*n))
assert_equal(direct, 2**(2*n))
def test_mismatched_dims(self):
# Input arrays should have the same number of dimensions
assert_raises(ValueError, convolve, [1], 2, method='direct')
assert_raises(ValueError, convolve, 1, [2], method='direct')
assert_raises(ValueError, convolve, [1], 2, method='fft')
assert_raises(ValueError, convolve, 1, [2], method='fft')
assert_raises(ValueError, convolve, [1], [[2]])
assert_raises(ValueError, convolve, [3], 2)
class _TestConvolve2d(object):
def test_2d_arrays(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
d = array([[2, 7, 16, 17, 12],
[10, 30, 62, 58, 38],
[12, 31, 58, 49, 30]])
e = convolve2d(a, b)
assert_array_equal(e, d)
def test_valid_mode(self):
e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
f = [[1, 2, 3], [3, 4, 5]]
h = array([[62, 80, 98, 116, 134]])
g = convolve2d(e, f, 'valid')
assert_array_equal(g, h)
# See gh-5897
g = convolve2d(f, e, 'valid')
assert_array_equal(g, h)
def test_valid_mode_complx(self):
e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j
h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]])
g = convolve2d(e, f, 'valid')
assert_array_almost_equal(g, h)
# See gh-5897
g = convolve2d(f, e, 'valid')
assert_array_equal(g, h)
def test_fillvalue(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
fillval = 1
c = convolve2d(a, b, 'full', 'fill', fillval)
d = array([[24, 26, 31, 34, 32],
[28, 40, 62, 64, 52],
[32, 46, 67, 62, 48]])
assert_array_equal(c, d)
def test_fillvalue_deprecations(self):
# Deprecated 2017-07, scipy version 1.0.0
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, "Casting complex values to real")
r = sup.record(DeprecationWarning, "could not cast `fillvalue`")
convolve2d([[1]], [[1, 2]], fillvalue=1j)
assert_(len(r) == 1)
warnings.filterwarnings(
"error", message="could not cast `fillvalue`",
category=DeprecationWarning)
assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=1j)
with suppress_warnings():
warnings.filterwarnings(
"always", message="`fillvalue` must be scalar or an array ",
category=DeprecationWarning)
assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=[1, 2])
warnings.filterwarnings(
"error", message="`fillvalue` must be scalar or an array ",
category=DeprecationWarning)
assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=[1, 2])
def test_fillvalue_empty(self):
# Check that fillvalue being empty raises an error:
assert_raises(ValueError, convolve2d, [[1]], [[1, 2]],
fillvalue=[])
def test_wrap_boundary(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve2d(a, b, 'full', 'wrap')
d = array([[80, 80, 74, 80, 80],
[68, 68, 62, 68, 68],
[80, 80, 74, 80, 80]])
assert_array_equal(c, d)
def test_sym_boundary(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve2d(a, b, 'full', 'symm')
d = array([[34, 30, 44, 62, 66],
[52, 48, 62, 80, 84],
[82, 78, 92, 110, 114]])
assert_array_equal(c, d)
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'})
class TestConvolve2d(_TestConvolve2d):
def test_same_mode(self):
e = [[1, 2, 3], [3, 4, 5]]
f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
g = convolve2d(e, f, 'same')
h = array([[22, 28, 34],
[80, 98, 116]])
assert_array_equal(g, h)
def test_valid_mode2(self):
# See gh-5897
e = [[1, 2, 3], [3, 4, 5]]
f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
expected = [[62, 80, 98, 116, 134]]
out = convolve2d(e, f, 'valid')
assert_array_equal(out, expected)
out = convolve2d(f, e, 'valid')
assert_array_equal(out, expected)
e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]]
f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]]
expected = [[27 - 1j, 46. + 2j]]
out = convolve2d(e, f, 'valid')
assert_array_equal(out, expected)
# See gh-5897
out = convolve2d(f, e, 'valid')
assert_array_equal(out, expected)
def test_consistency_convolve_funcs(self):
# Compare np.convolve, signal.convolve, signal.convolve2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.convolve(a, b, mode=mode),
signal.convolve(a, b, mode=mode))
assert_almost_equal(np.squeeze(
signal.convolve2d([a], [b], mode=mode)),
signal.convolve(a, b, mode=mode))
def test_invalid_dims(self):
assert_raises(ValueError, convolve2d, 3, 4)
assert_raises(ValueError, convolve2d, [3], [4])
assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]])
@pytest.mark.slow
@pytest.mark.xfail_on_32bit("Can't create large array for test")
def test_large_array(self):
# Test indexing doesn't overflow an int (gh-10761)
n = 2**31 // (1000 * np.int64().itemsize)
_testutils.check_free_memory(2 * n * 1001 * np.int64().itemsize / 1e6)
# Create a chequered pattern of 1s and 0s
a = np.zeros(1001 * n, dtype=np.int64)
a[::2] = 1
a = np.lib.stride_tricks.as_strided(a, shape=(n, 1000), strides=(8008, 8))
count = signal.convolve2d(a, [[1, 1]])
fails = np.where(count > 1)
assert fails[0].size == 0
class TestFFTConvolve(object):
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_real(self, axes):
a = array([1, 2, 3])
expected = array([1, 4, 10, 12, 9.])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_real_axes(self, axes):
a = array([1, 2, 3])
expected = array([1, 4, 10, 12, 9.])
a = np.tile(a, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_complex(self, axes):
a = array([1 + 1j, 2 + 2j, 3 + 3j])
expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_complex_axes(self, axes):
a = array([1 + 1j, 2 + 2j, 3 + 3j])
expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
a = np.tile(a, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['',
None,
[0, 1],
[1, 0],
[0, -1],
[-1, 0],
[-2, 1],
[1, -2],
[-2, -1],
[-1, -2]])
def test_2d_real_same(self, axes):
a = array([[1, 2, 3],
[4, 5, 6]])
expected = array([[1, 4, 10, 12, 9],
[8, 26, 56, 54, 36],
[16, 40, 73, 60, 36]])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [[1, 2],
[2, 1],
[1, -1],
[-1, 1],
[-2, 2],
[2, -2],
[-2, -1],
[-1, -2]])
def test_2d_real_same_axes(self, axes):
a = array([[1, 2, 3],
[4, 5, 6]])
expected = array([[1, 4, 10, 12, 9],
[8, 26, 56, 54, 36],
[16, 40, 73, 60, 36]])
a = np.tile(a, [2, 1, 1])
expected = np.tile(expected, [2, 1, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['',
None,
[0, 1],
[1, 0],
[0, -1],
[-1, 0],
[-2, 1],
[1, -2],
[-2, -1],
[-1, -2]])
def test_2d_complex_same(self, axes):
a = array([[1 + 2j, 3 + 4j, 5 + 6j],
[2 + 1j, 4 + 3j, 6 + 5j]])
expected = array([
[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
[10j, 44j, 118j, 156j, 122j],
[3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [[1, 2],
[2, 1],
[1, -1],
[-1, 1],
[-2, 2],
[2, -2],
[-2, -1],
[-1, -2]])
def test_2d_complex_same_axes(self, axes):
a = array([[1 + 2j, 3 + 4j, 5 + 6j],
[2 + 1j, 4 + 3j, 6 + 5j]])
expected = array([
[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
[10j, 44j, 118j, 156j, 122j],
[3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
])
a = np.tile(a, [2, 1, 1])
expected = np.tile(expected, [2, 1, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_real_same_mode(self, axes):
a = array([1, 2, 3])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected_1 = array([35., 41., 47.])
expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])
if axes == '':
out = fftconvolve(a, b, 'same')
else:
out = fftconvolve(a, b, 'same', axes=axes)
assert_array_almost_equal(out, expected_1)
if axes == '':
out = fftconvolve(b, a, 'same')
else:
out = fftconvolve(b, a, 'same', axes=axes)
assert_array_almost_equal(out, expected_2)
@pytest.mark.parametrize('axes', [1, -1, [1], [-1]])
def test_real_same_mode_axes(self, axes):
a = array([1, 2, 3])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected_1 = array([35., 41., 47.])
expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected_1 = np.tile(expected_1, [2, 1])
expected_2 = np.tile(expected_2, [2, 1])
out = fftconvolve(a, b, 'same', axes=axes)
assert_array_almost_equal(out, expected_1)
out = fftconvolve(b, a, 'same', axes=axes)
assert_array_almost_equal(out, expected_2)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_valid_mode_real(self, axes):
# See gh-5897
a = array([3, 2, 1])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = array([24., 31., 41., 43., 49., 25., 12.])
if axes == '':
out = fftconvolve(a, b, 'valid')
else:
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
if axes == '':
out = fftconvolve(b, a, 'valid')
else:
out = fftconvolve(b, a, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1]])
def test_valid_mode_real_axes(self, axes):
# See gh-5897
a = array([3, 2, 1])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = array([24., 31., 41., 43., 49., 25., 12.])
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_valid_mode_complex(self, axes):
a = array([3 - 1j, 2 + 7j, 1 + 0j])
b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])
if axes == '':
out = fftconvolve(a, b, 'valid')
else:
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
if axes == '':
out = fftconvolve(b, a, 'valid')
else:
out = fftconvolve(b, a, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_valid_mode_complex_axes(self, axes):
a = array([3 - 1j, 2 + 7j, 1 + 0j])
b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
out = fftconvolve(b, a, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
def test_valid_mode_ignore_nonaxes(self):
# See gh-5897
a = array([3, 2, 1])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = array([24., 31., 41., 43., 49., 25., 12.])
a = np.tile(a, [2, 1])
b = np.tile(b, [1, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'valid', axes=1)
assert_array_almost_equal(out, expected)
def test_empty(self):
# Regression test for #1745: crashes with 0-length input.
assert_(fftconvolve([], []).size == 0)
assert_(fftconvolve([5, 6], []).size == 0)
assert_(fftconvolve([], [7]).size == 0)
def test_zero_rank(self):
a = array(4967)
b = array(3920)
out = fftconvolve(a, b)
assert_equal(out, a * b)
def test_single_element(self):
a = array([4967])
b = array([3920])
out = fftconvolve(a, b)
assert_equal(out, a * b)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_random_data(self, axes):
np.random.seed(1234)
a = np.random.rand(1233) + 1j * np.random.rand(1233)
b = np.random.rand(1321) + 1j * np.random.rand(1321)
expected = np.convolve(a, b, 'full')
if axes == '':
out = fftconvolve(a, b, 'full')
else:
out = fftconvolve(a, b, 'full', axes=axes)
assert_(np.allclose(out, expected, rtol=1e-10))
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_random_data_axes(self, axes):
np.random.seed(1234)
a = np.random.rand(1233) + 1j * np.random.rand(1233)
b = np.random.rand(1321) + 1j * np.random.rand(1321)
expected = np.convolve(a, b, 'full')
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'full', axes=axes)
assert_(np.allclose(out, expected, rtol=1e-10))
@pytest.mark.parametrize('axes', [[1, 4],
[4, 1],
[1, -1],
[-1, 1],
[-4, 4],
[4, -4],
[-4, -1],
[-1, -4]])
def test_random_data_multidim_axes(self, axes):
a_shape, b_shape = (123, 22), (132, 11)
np.random.seed(1234)
a = np.random.rand(*a_shape) + 1j * np.random.rand(*a_shape)
b = np.random.rand(*b_shape) + 1j * np.random.rand(*b_shape)
expected = convolve2d(a, b, 'full')
a = a[:, :, None, None, None]
b = b[:, :, None, None, None]
expected = expected[:, :, None, None, None]
a = np.rollaxis(a.swapaxes(0, 2), 1, 5)
b = np.rollaxis(b.swapaxes(0, 2), 1, 5)
expected = np.rollaxis(expected.swapaxes(0, 2), 1, 5)
# use 1 for dimension 2 in a and 3 in b to test broadcasting
a = np.tile(a, [2, 1, 3, 1, 1])
b = np.tile(b, [2, 1, 1, 4, 1])
expected = np.tile(expected, [2, 1, 3, 4, 1])
out = fftconvolve(a, b, 'full', axes=axes)
assert_allclose(out, expected, rtol=1e-10, atol=1e-10)
@pytest.mark.slow
@pytest.mark.parametrize(
'n',
list(range(1, 100)) +
list(range(1000, 1500)) +
np.random.RandomState(1234).randint(1001, 10000, 5).tolist())
def test_many_sizes(self, n):
a = np.random.rand(n) + 1j * np.random.rand(n)
b = np.random.rand(n) + 1j * np.random.rand(n)
expected = np.convolve(a, b, 'full')
out = fftconvolve(a, b, 'full')
assert_allclose(out, expected, atol=1e-10)
out = fftconvolve(a, b, 'full', axes=[0])
assert_allclose(out, expected, atol=1e-10)
def fftconvolve_err(*args, **kwargs):
raise RuntimeError('Fell back to fftconvolve')
def gen_oa_shapes(sizes):
return [(a, b) for a, b in product(sizes, repeat=2)
if abs(a - b) > 3]
def gen_oa_shapes_2d(sizes):
shapes0 = gen_oa_shapes(sizes)
shapes1 = gen_oa_shapes(sizes)
shapes = [ishapes0+ishapes1 for ishapes0, ishapes1 in
zip(shapes0, shapes1)]
modes = ['full', 'valid', 'same']
return [ishapes+(imode,) for ishapes, imode in product(shapes, modes)
if imode != 'valid' or
(ishapes[0] > ishapes[1] and ishapes[2] > ishapes[3]) or
(ishapes[0] < ishapes[1] and ishapes[2] < ishapes[3])]
def gen_oa_shapes_eq(sizes):
return [(a, b) for a, b in product(sizes, repeat=2)
if a >= b]
class TestOAConvolve(object):
@pytest.mark.slow()
@pytest.mark.parametrize('shape_a_0, shape_b_0',
gen_oa_shapes_eq(list(range(100)) +
list(range(100, 1000, 23)))
)
def test_real_manylens(self, shape_a_0, shape_b_0):
a = np.random.rand(shape_a_0)
b = np.random.rand(shape_b_0)
expected = fftconvolve(a, b)
out = oaconvolve(a, b)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('shape_a_0, shape_b_0',
gen_oa_shapes([50, 47, 6, 4, 1]))
@pytest.mark.parametrize('is_complex', [True, False])
@pytest.mark.parametrize('mode', ['full', 'valid', 'same'])
def test_1d_noaxes(self, shape_a_0, shape_b_0,
is_complex, mode, monkeypatch):
a = np.random.rand(shape_a_0)
b = np.random.rand(shape_b_0)
if is_complex:
a = a + 1j*np.random.rand(shape_a_0)
b = b + 1j*np.random.rand(shape_b_0)
expected = fftconvolve(a, b, mode=mode)
monkeypatch.setattr(signal.signaltools, 'fftconvolve',
fftconvolve_err)
out = oaconvolve(a, b, mode=mode)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [0, 1])
@pytest.mark.parametrize('shape_a_0, shape_b_0',
gen_oa_shapes([50, 47, 6, 4]))
@pytest.mark.parametrize('shape_a_extra', [1, 3])
@pytest.mark.parametrize('shape_b_extra', [1, 3])
@pytest.mark.parametrize('is_complex', [True, False])
@pytest.mark.parametrize('mode', ['full', 'valid', 'same'])
def test_1d_axes(self, axes, shape_a_0, shape_b_0,
shape_a_extra, shape_b_extra,
is_complex, mode, monkeypatch):
ax_a = [shape_a_extra]*2
ax_b = [shape_b_extra]*2
ax_a[axes] = shape_a_0
ax_b[axes] = shape_b_0
a = np.random.rand(*ax_a)
b = np.random.rand(*ax_b)
if is_complex:
a = a + 1j*np.random.rand(*ax_a)
b = b + 1j*np.random.rand(*ax_b)
expected = fftconvolve(a, b, mode=mode, axes=axes)
monkeypatch.setattr(signal.signaltools, 'fftconvolve',
fftconvolve_err)
out = oaconvolve(a, b, mode=mode, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('shape_a_0, shape_b_0, '
'shape_a_1, shape_b_1, mode',
gen_oa_shapes_2d([50, 47, 6, 4]))
@pytest.mark.parametrize('is_complex', [True, False])
def test_2d_noaxes(self, shape_a_0, shape_b_0,
shape_a_1, shape_b_1, mode,
is_complex, monkeypatch):
a = np.random.rand(shape_a_0, shape_a_1)
b = np.random.rand(shape_b_0, shape_b_1)
if is_complex:
a = a + 1j*np.random.rand(shape_a_0, shape_a_1)
b = b + 1j*np.random.rand(shape_b_0, shape_b_1)
expected = fftconvolve(a, b, mode=mode)
monkeypatch.setattr(signal.signaltools, 'fftconvolve',
fftconvolve_err)
out = oaconvolve(a, b, mode=mode)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [[0, 1], [0, 2], [1, 2]])
@pytest.mark.parametrize('shape_a_0, shape_b_0, '
'shape_a_1, shape_b_1, mode',
gen_oa_shapes_2d([50, 47, 6, 4]))
@pytest.mark.parametrize('shape_a_extra', [1, 3])
@pytest.mark.parametrize('shape_b_extra', [1, 3])
@pytest.mark.parametrize('is_complex', [True, False])
def test_2d_axes(self, axes, shape_a_0, shape_b_0,
shape_a_1, shape_b_1, mode,
shape_a_extra, shape_b_extra,
is_complex, monkeypatch):
ax_a = [shape_a_extra]*3
ax_b = [shape_b_extra]*3
ax_a[axes[0]] = shape_a_0
ax_b[axes[0]] = shape_b_0
ax_a[axes[1]] = shape_a_1
ax_b[axes[1]] = shape_b_1
a = np.random.rand(*ax_a)
b = np.random.rand(*ax_b)
if is_complex:
a = a + 1j*np.random.rand(*ax_a)
b = b + 1j*np.random.rand(*ax_b)
expected = fftconvolve(a, b, mode=mode, axes=axes)
monkeypatch.setattr(signal.signaltools, 'fftconvolve',
fftconvolve_err)
out = oaconvolve(a, b, mode=mode, axes=axes)
assert_array_almost_equal(out, expected)
def test_empty(self):
# Regression test for #1745: crashes with 0-length input.
assert_(oaconvolve([], []).size == 0)
assert_(oaconvolve([5, 6], []).size == 0)
assert_(oaconvolve([], [7]).size == 0)
def test_zero_rank(self):
a = array(4967)
b = array(3920)
out = oaconvolve(a, b)
assert_equal(out, a * b)
def test_single_element(self):
a = array([4967])
b = array([3920])
out = oaconvolve(a, b)
assert_equal(out, a * b)
class TestAllFreqConvolves(object):
@pytest.mark.parametrize('convapproach',
[fftconvolve, oaconvolve])
def test_invalid_shapes(self, convapproach):
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
with assert_raises(ValueError,
match="For 'valid' mode, one must be at least "
"as large as the other in every dimension"):
convapproach(a, b, mode='valid')
@pytest.mark.parametrize('convapproach',
[fftconvolve, oaconvolve])
def test_invalid_shapes_axes(self, convapproach):
a = np.zeros([5, 6, 2, 1])
b = np.zeros([5, 6, 3, 1])
with assert_raises(ValueError,
match=r"incompatible shapes for in1 and in2:"
r" \(5L?, 6L?, 2L?, 1L?\) and"
r" \(5L?, 6L?, 3L?, 1L?\)"):
convapproach(a, b, axes=[0, 1])
@pytest.mark.parametrize('a,b',
[([1], 2),
(1, [2]),
([3], [[2]])])
@pytest.mark.parametrize('convapproach',
[fftconvolve, oaconvolve])
def test_mismatched_dims(self, a, b, convapproach):
with assert_raises(ValueError,
match="in1 and in2 should have the same"
" dimensionality"):
convapproach(a, b)
@pytest.mark.parametrize('convapproach',
[fftconvolve, oaconvolve])
def test_invalid_flags(self, convapproach):
with assert_raises(ValueError,
match="acceptable mode flags are 'valid',"
" 'same', or 'full'"):
convapproach([1], [2], mode='chips')
with assert_raises(ValueError,
match="when provided, axes cannot be empty"):
convapproach([1], [2], axes=[])
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
convapproach([1], [2], axes=[[1, 2], [3, 4]])
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
convapproach([1], [2], axes=[1., 2., 3., 4.])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
convapproach([1], [2], axes=[1])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
convapproach([1], [2], axes=[-2])
with assert_raises(ValueError,
match="all axes must be unique"):
convapproach([1], [2], axes=[0, 0])
@pytest.mark.parametrize('dtype', [np.longfloat, np.longcomplex])
def test_longdtype_input(self, dtype):
x = np.random.random((27, 27)).astype(dtype)
y = np.random.random((4, 4)).astype(dtype)
if np.iscomplexobj(dtype()):
x += .1j
y -= .1j
res = fftconvolve(x, y)
assert_allclose(res, convolve(x, y, method='direct'))
assert res.dtype == dtype
class TestMedFilt(object):
def test_basic(self):
f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],
[50, 50, 50, 50, 50, 0, 72, 77, 68, 66],
[50, 50, 50, 50, 50, 46, 47, 19, 64, 77],
[50, 50, 50, 50, 50, 42, 15, 29, 95, 35],
[50, 50, 50, 50, 50, 46, 34, 9, 21, 66],
[70, 97, 28, 68, 78, 77, 61, 58, 71, 42],
[64, 53, 44, 29, 68, 32, 19, 68, 24, 84],
[3, 33, 53, 67, 1, 78, 74, 55, 12, 83],
[7, 11, 46, 70, 60, 47, 24, 43, 61, 26],
[32, 61, 88, 7, 39, 4, 92, 64, 45, 61]]
d = signal.medfilt(f, [7, 3])
e = signal.medfilt2d(np.array(f, float), [7, 3])
assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0],
[0, 50, 50, 50, 50, 42, 19, 21, 29, 0],
[50, 50, 50, 50, 50, 47, 34, 34, 46, 35],
[50, 50, 50, 50, 50, 50, 42, 47, 64, 42],
[50, 50, 50, 50, 50, 50, 46, 55, 64, 35],
[33, 50, 50, 50, 50, 47, 46, 43, 55, 26],
[32, 50, 50, 50, 50, 47, 46, 45, 55, 26],
[7, 46, 50, 50, 47, 46, 46, 43, 45, 21],
[0, 32, 33, 39, 32, 32, 43, 43, 43, 0],
[0, 7, 11, 7, 4, 4, 19, 19, 24, 0]])
assert_array_equal(d, e)
def test_none(self):
# Ticket #1124. Ensure this does not segfault.
with pytest.warns(UserWarning):
signal.medfilt(None)
# Expand on this test to avoid a regression with possible contiguous
# numpy arrays that have odd strides. The stride value below gets
# us into wrong memory if used (but it does not need to be used)
dummy = np.arange(10, dtype=np.float64)
a = dummy[5:6]
a.strides = 16
assert_(signal.medfilt(a, 1) == 5.)
def test_refcounting(self):
# Check a refcounting-related crash
a = Decimal(123)
x = np.array([a, a], dtype=object)
if hasattr(sys, 'getrefcount'):
n = 2 * sys.getrefcount(a)
else:
n = 10
# Shouldn't segfault:
with pytest.warns(UserWarning):
for j in range(n):
signal.medfilt(x)
if hasattr(sys, 'getrefcount'):
assert_(sys.getrefcount(a) < n)
assert_equal(x, [a, a])
class TestWiener(object):
def test_basic(self):
g = array([[5, 6, 4, 3],
[3, 5, 6, 2],
[2, 3, 5, 6],
[1, 6, 9, 7]], 'd')
h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667],
[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],
[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],
[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])
assert_array_almost_equal(signal.wiener(g), h, decimal=6)
assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6)
padtype_options = ["mean", "median", "minimum", "maximum", "line"]
padtype_options += _upfirdn_modes
class TestResample(object):
def test_basic(self):
# Some basic tests
# Regression test for issue #3603.
# window.shape must equal to sig.shape[0]
sig = np.arange(128)
num = 256
win = signal.get_window(('kaiser', 8.0), 160)
assert_raises(ValueError, signal.resample, sig, num, window=win)
# Other degenerate conditions
assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1)
assert_raises(ValueError, signal.resample_poly, sig, 1, 0)
assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='')
assert_raises(ValueError, signal.resample_poly, sig, 2, 1,
padtype='mean', cval=10)
# test for issue #6505 - should not modify window.shape when axis ≠ 0
sig2 = np.tile(np.arange(160), (2, 1))
signal.resample(sig2, num, axis=-1, window=win)
assert_(win.shape == (160,))
@pytest.mark.parametrize('window', (None, 'hamming'))
@pytest.mark.parametrize('N', (20, 19))
@pytest.mark.parametrize('num', (100, 101, 10, 11))
def test_rfft(self, N, num, window):
# Make sure the speed up using rfft gives the same result as the normal
# way using fft
x = np.linspace(0, 10, N, endpoint=False)
y = np.cos(-x**2/6.0)
assert_allclose(signal.resample(y, num, window=window),
signal.resample(y + 0j, num, window=window).real)
y = np.array([np.cos(-x**2/6.0), np.sin(-x**2/6.0)])
y_complex = y + 0j
assert_allclose(
signal.resample(y, num, axis=1, window=window),
signal.resample(y_complex, num, axis=1, window=window).real,
atol=1e-9)
def test_input_domain(self):
# Test if both input domain modes produce the same results.
tsig = np.arange(256) + 0j
fsig = fft(tsig)
num = 256
assert_allclose(
signal.resample(fsig, num, domain='freq'),
signal.resample(tsig, num, domain='time'),
atol=1e-9)
@pytest.mark.parametrize('nx', (1, 2, 3, 5, 8))
@pytest.mark.parametrize('ny', (1, 2, 3, 5, 8))
@pytest.mark.parametrize('dtype', ('float', 'complex'))
def test_dc(self, nx, ny, dtype):
x = np.array([1] * nx, dtype)
y = signal.resample(x, ny)
assert_allclose(y, [1] * ny)
@pytest.mark.parametrize('padtype', padtype_options)
def test_mutable_window(self, padtype):
# Test that a mutable window is not modified
impulse = np.zeros(3)
window = np.random.RandomState(0).randn(2)
window_orig = window.copy()
signal.resample_poly(impulse, 5, 1, window=window, padtype=padtype)
assert_array_equal(window, window_orig)
@pytest.mark.parametrize('padtype', padtype_options)
def test_output_float32(self, padtype):
# Test that float32 inputs yield a float32 output
x = np.arange(10, dtype=np.float32)
h = np.array([1, 1, 1], dtype=np.float32)
y = signal.resample_poly(x, 1, 2, window=h, padtype=padtype)
assert(y.dtype == np.float32)
@pytest.mark.parametrize(
"method, ext, padtype",
[("fft", False, None)]
+ list(
product(
["polyphase"], [False, True], padtype_options,
)
),
)
def test_resample_methods(self, method, ext, padtype):
# Test resampling of sinusoids and random noise (1-sec)
rate = 100
rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201]
# Sinusoids, windowed to avoid edge artifacts
t = np.arange(rate) / float(rate)
freqs = np.array((1., 10., 40.))[:, np.newaxis]
x = np.sin(2 * np.pi * freqs * t) * hann(rate)
for rate_to in rates_to:
t_to = np.arange(rate_to) / float(rate_to)
y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to)
if method == 'fft':
y_resamps = signal.resample(x, rate_to, axis=-1)
else:
if ext and rate_to != rate:
# Match default window design
g = gcd(rate_to, rate)
up = rate_to // g
down = rate // g
max_rate = max(up, down)
f_c = 1. / max_rate
half_len = 10 * max_rate
window = signal.firwin(2 * half_len + 1, f_c,
window=('kaiser', 5.0))
polyargs = {'window': window, 'padtype': padtype}
else:
polyargs = {'padtype': padtype}
y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1,
**polyargs)
for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs):
if freq >= 0.5 * rate_to:
y_to.fill(0.) # mostly low-passed away
if padtype in ['minimum', 'maximum']:
assert_allclose(y_resamp, y_to, atol=3e-1)
else:
assert_allclose(y_resamp, y_to, atol=1e-3)
else:
assert_array_equal(y_to.shape, y_resamp.shape)
corr = np.corrcoef(y_to, y_resamp)[0, 1]
assert_(corr > 0.99, msg=(corr, rate, rate_to))
# Random data
rng = np.random.RandomState(0)
x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind
for rate_to in rates_to:
# random data
t_to = np.arange(rate_to) / float(rate_to)
y_to = np.interp(t_to, t, x)
if method == 'fft':
y_resamp = signal.resample(x, rate_to)
else:
y_resamp = signal.resample_poly(x, rate_to, rate,
padtype=padtype)
assert_array_equal(y_to.shape, y_resamp.shape)
corr = np.corrcoef(y_to, y_resamp)[0, 1]
assert_(corr > 0.99, msg=corr)
# More tests of fft method (Master 0.18.1 fails these)
if method == 'fft':
x1 = np.array([1.+0.j, 0.+0.j])
y1_test = signal.resample(x1, 4)
# upsampling a complex array
y1_true = np.array([1.+0.j, 0.5+0.j, 0.+0.j, 0.5+0.j])
assert_allclose(y1_test, y1_true, atol=1e-12)
x2 = np.array([1., 0.5, 0., 0.5])
y2_test = signal.resample(x2, 2) # downsampling a real array
y2_true = np.array([1., 0.])
assert_allclose(y2_test, y2_true, atol=1e-12)
def test_poly_vs_filtfilt(self):
# Check that up=1.0 gives same answer as filtfilt + slicing
random_state = np.random.RandomState(17)
try_types = (int, np.float32, np.complex64, float, complex)
size = 10000
down_factors = [2, 11, 79]
for dtype in try_types:
x = random_state.randn(size).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * random_state.randn(size)
# resample_poly assumes zeros outside of signl, whereas filtfilt
# can only constant-pad. Make them equivalent:
x[0] = 0
x[-1] = 0
for down in down_factors:
h = signal.firwin(31, 1. / down, window='hamming')
yf = filtfilt(h, 1.0, x, padtype='constant')[::down]
# Need to pass convolved version of filter to resample_poly,
# since filtfilt does forward and backward, but resample_poly
# only goes forward
hc = convolve(h, h[::-1])
y = signal.resample_poly(x, 1, down, window=hc)
assert_allclose(yf, y, atol=1e-7, rtol=1e-7)
def test_correlate1d(self):
for down in [2, 4]:
for nx in range(1, 40, down):
for nweights in (32, 33):
x = np.random.random((nx,))
weights = np.random.random((nweights,))
y_g = correlate1d(x, weights[::-1], mode='constant')
y_s = signal.resample_poly(
x, up=1, down=down, window=weights)
assert_allclose(y_g[::down], y_s)
class TestCSpline1DEval(object):
def test_basic(self):
y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0])
x = arange(len(y))
dx = x[1] - x[0]
cj = signal.cspline1d(y)
x2 = arange(len(y) * 10.0) / 10.0
y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0])
# make sure interpolated values are on knot points
assert_array_almost_equal(y2[::10], y, decimal=5)
def test_complex(self):
# create some smoothly varying complex signal to interpolate
x = np.arange(2)
y = np.zeros(x.shape, dtype=np.complex64)
T = 10.0
f = 1.0 / T
y = np.exp(2.0J * np.pi * f * x)
# get the cspline transform
cy = signal.cspline1d(y)
# determine new test x value and interpolate
xnew = np.array([0.5])
ynew = signal.cspline1d_eval(cy, xnew)
assert_equal(ynew.dtype, y.dtype)
class TestOrderFilt(object):
def test_basic(self):
assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1),
[2, 3, 2])
class _TestLinearFilter(object):
def generate(self, shape):
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
return self.convert_dtype(x)
def convert_dtype(self, arr):
if self.dtype == np.dtype('O'):
arr = np.asarray(arr)
out = np.empty(arr.shape, self.dtype)
iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'],
[['readonly'],['writeonly']])
for x, y in iter:
y[...] = self.type(x[()])
return out
else:
return np.array(arr, self.dtype, copy=False)
def test_rank_1_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, -0.5])
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_IIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([0.5, -0.5])
zi = self.convert_dtype([1, 2])
y_r = self.convert_dtype([1, 5, 9, 13, 17, 21])
zf_r = self.convert_dtype([13, -10])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_1_FIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1, 1])
a = self.convert_dtype([1])
zi = self.convert_dtype([1, 1])
y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.])
zf_r = self.convert_dtype([9, 5])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_0(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4],
[6, 4, 2]])
y = lfilter(b, a, x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
def test_rank_2_IIR_axis_1(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]])
y = lfilter(b, a, x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank_2_IIR_axis_0_init_cond(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((4,1)))
y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13],
[19, -17, 19]])
zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis]
y, zf = lfilter(b, a, x, axis=1, zi=zi)
assert_array_almost_equal(y_r2_a0_1, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_1_init_cond(self):
x = self.generate((4,3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((1,3)))
y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1],
[1, 3, 5], [5, 3, 1]])
zf_r = self.convert_dtype([[-23, -23, -23]])
y, zf = lfilter(b, a, x, axis=0, zi=zi)
assert_array_almost_equal(y_r2_a0_0, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_IIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_IIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 1
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_FIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_FIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 2
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1, 1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_zi_pseudobroadcast(self):
x = self.generate((4, 5, 20))
b,a = signal.butter(8, 0.2, output='ba')
b = self.convert_dtype(b)
a = self.convert_dtype(a)
zi_size = b.shape[0] - 1
# lfilter requires x.ndim == zi.ndim exactly. However, zi can have
# length 1 dimensions.
zi_full = self.convert_dtype(np.ones((4, 5, zi_size)))
zi_sing = self.convert_dtype(np.ones((1, 1, zi_size)))
y_full, zf_full = lfilter(b, a, x, zi=zi_full)
y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing)
assert_array_almost_equal(y_sing, y_full)
assert_array_almost_equal(zf_full, zf_sing)
# lfilter does not prepend ones
assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size))
def test_scalar_a(self):
# a can be a scalar.
x = self.generate(6)
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 2, 2, 2, 2])
y = lfilter(b, a[0], x)
assert_array_almost_equal(y, y_r)
def test_zi_some_singleton_dims(self):
# lfilter doesn't really broadcast (no prepending of 1's). But does
# do singleton expansion if x and zi have the same ndim. This was
# broken only if a subset of the axes were singletons (gh-4681).
x = self.convert_dtype(np.zeros((3,2,5), 'l'))
b = self.convert_dtype(np.ones(5, 'l'))
a = self.convert_dtype(np.array([1,0,0]))
zi = np.ones((3,1,4), 'l')
zi[1,:,:] *= 2
zi[2,:,:] *= 3
zi = self.convert_dtype(zi)
zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l'))
y_expected = np.zeros((3,2,5), 'l')
y_expected[:,:,:4] = [[[1]], [[2]], [[3]]]
y_expected = self.convert_dtype(y_expected)
# IIR
y_iir, zf_iir = lfilter(b, a, x, -1, zi)
assert_array_almost_equal(y_iir, y_expected)
assert_array_almost_equal(zf_iir, zf_expected)
# FIR
y_fir, zf_fir = lfilter(b, a[0], x, -1, zi)
assert_array_almost_equal(y_fir, y_expected)
assert_array_almost_equal(zf_fir, zf_expected)
def base_bad_size_zi(self, b, a, x, axis, zi):
b = self.convert_dtype(b)
a = self.convert_dtype(a)
x = self.convert_dtype(x)
zi = self.convert_dtype(zi)
assert_raises(ValueError, lfilter, b, a, x, axis, zi)
def test_bad_size_zi(self):
# rank 1
x1 = np.arange(6)
self.base_bad_size_zi([1], [1], x1, -1, [1])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1])
self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1])
self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3])
# rank 2
x2 = np.arange(12).reshape((4,3))
# for axis=0 zi.shape should == (max(len(a),len(b))-1, 3)
self.base_bad_size_zi([1], [1], x2, 0, [0])
# for each of these there are 5 cases tested (in this order):
# 1. not deep enough, right # elements
# 2. too deep, right # elements
# 3. right depth, right # elements, transposed
# 4. right depth, too few elements
# 5. right depth, too many elements
self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
# for axis=1 zi.shape should == (4, max(len(a),len(b))-1)
self.base_bad_size_zi([1], [1], x2, 1, [0])
self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
def test_empty_zi(self):
# Regression test for #880: empty array for zi crashes.
x = self.generate((5,))
a = self.convert_dtype([1])
b = self.convert_dtype([1])
zi = self.convert_dtype([])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, x)
assert_equal(zf.dtype, self.dtype)
assert_equal(zf.size, 0)
def test_lfiltic_bad_zi(self):
# Regression test for #3699: bad initial conditions
a = self.convert_dtype([1])
b = self.convert_dtype([1])
# "y" sets the datatype of zi, so it truncates if int
zi = lfiltic(b, a, [1., 0])
zi_1 = lfiltic(b, a, [1, 0])
zi_2 = lfiltic(b, a, [True, False])
assert_array_equal(zi, zi_1)
assert_array_equal(zi, zi_2)
def test_short_x_FIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([7, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_short_x_IIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1, 1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([-67, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_do_not_modify_a_b_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
b0 = b.copy()
a = self.convert_dtype([0.5, -0.5])
a0 = a.copy()
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
def test_do_not_modify_a_b_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, 1])
b0 = b.copy()
a = self.convert_dtype([2])
a0 = a.copy()
y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
class TestLinearFilterFloat32(_TestLinearFilter):
dtype = np.dtype('f')
class TestLinearFilterFloat64(_TestLinearFilter):
dtype = np.dtype('d')
class TestLinearFilterFloatExtended(_TestLinearFilter):
dtype = np.dtype('g')
class TestLinearFilterComplex64(_TestLinearFilter):
dtype = np.dtype('F')
class TestLinearFilterComplex128(_TestLinearFilter):
dtype = np.dtype('D')
class TestLinearFilterComplexExtended(_TestLinearFilter):
dtype = np.dtype('G')
class TestLinearFilterDecimal(_TestLinearFilter):
dtype = np.dtype('O')
def type(self, x):
return Decimal(str(x))
class TestLinearFilterObject(_TestLinearFilter):
dtype = np.dtype('O')
type = float
def test_lfilter_bad_object():
# lfilter: object arrays with non-numeric objects raise TypeError.
# Regression test for ticket #1452.
assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0])
assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0])
assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0])
with assert_raises(ValueError, match='common type'):
lfilter([1.], [1., 1.], ['a', 'b', 'c'])
def test_lfilter_notimplemented_input():
# Should not crash, gh-7991
assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5])
@pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short,
np.uint, int, np.ulonglong, np.ulonglong,
np.float32, np.float64, np.longdouble,
Decimal])
class TestCorrelateReal(object):
def _setup_rank1(self, dt):
a = np.linspace(0, 3, 4).astype(dt)
b = np.linspace(1, 2, 2).astype(dt)
y_r = np.array([0, 2, 5, 8, 3]).astype(dt)
return a, b, y_r
def equal_tolerance(self, res_dt):
# default value of keyword
decimal = 6
try:
dt_info = np.finfo(res_dt)
if hasattr(dt_info, 'resolution'):
decimal = int(-0.5*np.log10(dt_info.resolution))
except Exception:
pass
return decimal
def equal_tolerance_fft(self, res_dt):
# FFT implementations convert longdouble arguments down to
# double so don't expect better precision, see gh-9520
if res_dt == np.longdouble:
return self.equal_tolerance(np.double)
else:
return self.equal_tolerance(res_dt)
def test_method(self, dt):
if dt == Decimal:
method = choose_conv_method([Decimal(4)], [Decimal(3)])
assert_equal(method, 'direct')
else:
a, b, y_r = self._setup_rank3(dt)
y_fft = correlate(a, b, method='fft')
y_direct = correlate(a, b, method='direct')
assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype))
assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype))
assert_equal(y_fft.dtype, dt)
assert_equal(y_direct.dtype, dt)
def test_rank1_valid(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r[1:4])
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, y_r[1:4][::-1])
assert_equal(y.dtype, dt)
def test_rank1_same(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r[:-1])
assert_equal(y.dtype, dt)
def test_rank1_full(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r)
assert_equal(y.dtype, dt)
def _setup_rank3(self, dt):
a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(
dt)
b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(
dt)
y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.],
[46., 432., 1062., 1840., 2672., 1698., 864., 266.],
[134., 736., 1662., 2768., 3920., 2418., 1168., 314.],
[260., 952., 1932., 3056., 4208., 2580., 1240., 332.],
[202., 664., 1290., 1984., 2688., 1590., 712., 150.],
[114., 344., 642., 960., 1280., 726., 296., 38.]],
[[23., 400., 1035., 1832., 2696., 1737., 904., 293.],
[134., 920., 2166., 3680., 5280., 3306., 1640., 474.],
[325., 1544., 3369., 5512., 7720., 4683., 2192., 535.],
[571., 1964., 3891., 6064., 8272., 4989., 2324., 565.],
[434., 1360., 2586., 3920., 5264., 3054., 1312., 230.],
[241., 700., 1281., 1888., 2496., 1383., 532., 39.]],
[[22., 214., 528., 916., 1332., 846., 430., 132.],
[86., 484., 1098., 1832., 2600., 1602., 772., 206.],
[188., 802., 1698., 2732., 3788., 2256., 1018., 218.],
[308., 1006., 1950., 2996., 4052., 2400., 1078., 230.],
[230., 692., 1290., 1928., 2568., 1458., 596., 78.],
[126., 354., 636., 924., 1212., 654., 234., 0.]]],
dtype=dt)
return a, b, y_r
def test_rank3_valid(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5])
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1])
assert_equal(y.dtype, dt)
def test_rank3_same(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b, "same")
assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2])
assert_equal(y.dtype, dt)
def test_rank3_all(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b)
assert_array_almost_equal(y, y_r)
assert_equal(y.dtype, dt)
class TestCorrelate(object):
# Tests that don't depend on dtype
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'})
def test_invalid_params(self):
a = [3, 4, 5]
b = [1, 2, 3]
assert_raises(ValueError, correlate, a, b, mode='spam')
assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft')
assert_raises(ValueError, correlate, a, b, mode='ham', method='direct')
assert_raises(ValueError, correlate, a, b, mode='full', method='bacon')
assert_raises(ValueError, correlate, a, b, mode='same', method='bacon')
def test_mismatched_dims(self):
# Input arrays should have the same number of dimensions
assert_raises(ValueError, correlate, [1], 2, method='direct')
assert_raises(ValueError, correlate, 1, [2], method='direct')
assert_raises(ValueError, correlate, [1], 2, method='fft')
assert_raises(ValueError, correlate, 1, [2], method='fft')
assert_raises(ValueError, correlate, [1], [[2]])
assert_raises(ValueError, correlate, [3], 2)
def test_numpy_fastpath(self):
a = [1, 2, 3]
b = [4, 5]
assert_allclose(correlate(a, b, mode='same'), [5, 14, 23])
a = [1, 2, 3]
b = [4, 5, 6]
assert_allclose(correlate(a, b, mode='same'), [17, 32, 23])
assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12])
assert_allclose(correlate(a, b, mode='valid'), [32])
@pytest.mark.parametrize("mode", ["valid", "same", "full"])
@pytest.mark.parametrize("behind", [True, False])
@pytest.mark.parametrize("input_size", [100, 101, 1000, 1001, 10000, 10001])
def test_correlation_lags(mode, behind, input_size):
# generate random data
rng = np.random.RandomState(0)
in1 = rng.standard_normal(input_size)
offset = int(input_size/10)
# generate offset version of array to correlate with
if behind:
# y is behind x
in2 = np.concatenate([rng.standard_normal(offset), in1])
expected = -offset
else:
# y is ahead of x
in2 = in1[offset:]
expected = offset
# cross correlate, returning lag information
correlation = correlate(in1, in2, mode=mode)
lags = correlation_lags(in1.size, in2.size, mode=mode)
# identify the peak
lag_index = np.argmax(correlation)
# Check as expected
assert_equal(lags[lag_index], expected)
# Correlation and lags shape should match
assert_equal(lags.shape, correlation.shape)
@pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble])
class TestCorrelateComplex(object):
# The decimal precision to be used for comparing results.
# This value will be passed as the 'decimal' keyword argument of
# assert_array_almost_equal().
# Since correlate may chose to use FFT method which converts
# longdoubles to doubles internally don't expect better precision
# for longdouble than for double (see gh-9520).
def decimal(self, dt):
if dt == np.clongdouble:
dt = np.cdouble
return int(2 * np.finfo(dt).precision / 3)
def _setup_rank1(self, dt, mode):
np.random.seed(9)
a = np.random.randn(10).astype(dt)
a += 1j * np.random.randn(10).astype(dt)
b = np.random.randn(8).astype(dt)
b += 1j * np.random.randn(8).astype(dt)
y_r = (correlate(a.real, b.real, mode=mode) +
correlate(a.imag, b.imag, mode=mode)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag, mode=mode) +
correlate(a.imag, b.real, mode=mode))
return a, b, y_r
def test_rank1_valid(self, dt):
a, b, y_r = self._setup_rank1(dt, 'valid')
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_rank1_same(self, dt):
a, b, y_r = self._setup_rank1(dt, 'same')
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_rank1_full(self, dt):
a, b, y_r = self._setup_rank1(dt, 'full')
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_swap_full(self, dt):
d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt)
k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt)
y = correlate(d, k)
assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j])
def test_swap_same(self, dt):
d = [0.+0.j, 1.+1.j, 2.+2.j]
k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j]
y = correlate(d, k, mode="same")
assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j])
def test_rank3(self, dt):
a = np.random.randn(10, 8, 6).astype(dt)
a += 1j * np.random.randn(10, 8, 6).astype(dt)
b = np.random.randn(8, 6, 4).astype(dt)
b += 1j * np.random.randn(8, 6, 4).astype(dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
assert_equal(y.dtype, dt)
def test_rank0(self, dt):
a = np.array(np.random.randn()).astype(dt)
a += 1j * np.array(np.random.randn()).astype(dt)
b = np.array(np.random.randn()).astype(dt)
b += 1j * np.array(np.random.randn()).astype(dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
assert_equal(y.dtype, dt)
assert_equal(correlate([1], [2j]), correlate(1, 2j))
assert_equal(correlate([2j], [3j]), correlate(2j, 3j))
assert_equal(correlate([3j], [4]), correlate(3j, 4))
class TestCorrelate2d(object):
def test_consistency_correlate_funcs(self):
# Compare np.correlate, signal.correlate, signal.correlate2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.correlate(a, b, mode=mode),
signal.correlate(a, b, mode=mode))
assert_almost_equal(np.squeeze(signal.correlate2d([a], [b],
mode=mode)),
signal.correlate(a, b, mode=mode))
# See gh-5897
if mode == 'valid':
assert_almost_equal(np.correlate(b, a, mode=mode),
signal.correlate(b, a, mode=mode))
assert_almost_equal(np.squeeze(signal.correlate2d([b], [a],
mode=mode)),
signal.correlate(b, a, mode=mode))
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'})
def test_complex_input(self):
assert_equal(signal.correlate2d([[1]], [[2j]]), -2j)
assert_equal(signal.correlate2d([[2j]], [[3j]]), 6)
assert_equal(signal.correlate2d([[3j]], [[4]]), 12j)
class TestLFilterZI(object):
def test_basic(self):
a = np.array([1.0, -1.0, 0.5])
b = np.array([1.0, 0.0, 2.0])
zi_expected = np.array([5.0, -1.0])
zi = lfilter_zi(b, a)
assert_array_almost_equal(zi, zi_expected)
def test_scale_invariance(self):
# Regression test. There was a bug in which b was not correctly
# rescaled when a[0] was nonzero.
b = np.array([2, 8, 5])
a = np.array([1, 1, 8])
zi1 = lfilter_zi(b, a)
zi2 = lfilter_zi(2*b, 2*a)
assert_allclose(zi2, zi1, rtol=1e-12)
class TestFiltFilt(object):
filtfilt_kind = 'tf'
def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None,
method='pad', irlen=None):
if self.filtfilt_kind == 'tf':
b, a = zpk2tf(*zpk)
return filtfilt(b, a, x, axis, padtype, padlen, method, irlen)
elif self.filtfilt_kind == 'sos':
sos = zpk2sos(*zpk)
return sosfiltfilt(sos, x, axis, padtype, padlen)
def test_basic(self):
zpk = tf2zpk([1, 2, 3], [1, 2, 3])
out = self.filtfilt(zpk, np.arange(12))
assert_allclose(out, arange(12), atol=5.28e-11)
def test_sine(self):
rate = 2000
t = np.linspace(0, 1.0, rate + 1)
# A signal with low frequency and a high frequency.
xlow = np.sin(5 * 2 * np.pi * t)
xhigh = np.sin(250 * 2 * np.pi * t)
x = xlow + xhigh
zpk = butter(8, 0.125, output='zpk')
# r is the magnitude of the largest pole.
r = np.abs(zpk[1]).max()
eps = 1e-5
# n estimates the number of steps for the
# transient to decay by a factor of eps.
n = int(np.ceil(np.log(eps) / np.log(r)))
# High order lowpass filter...
y = self.filtfilt(zpk, x, padlen=n)
# Result should be just xlow.
err = np.abs(y - xlow).max()
assert_(err < 1e-4)
# A 2D case.
x2d = np.vstack([xlow, xlow + xhigh])
y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1)
assert_equal(y2d.shape, x2d.shape)
err = np.abs(y2d - xlow).max()
assert_(err < 1e-4)
# Use the previous result to check the use of the axis keyword.
# (Regression test for ticket #1620)
y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0)
assert_equal(y2d, y2dt.T)
def test_axis(self):
# Test the 'axis' keyword on a 3D array.
x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12)
zpk = butter(3, 0.125, output='zpk')
y0 = self.filtfilt(zpk, x, padlen=0, axis=0)
y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1)
assert_array_equal(y0, np.swapaxes(y1, 0, 1))
y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2)
assert_array_equal(y0, np.swapaxes(y2, 0, 2))
def test_acoeff(self):
if self.filtfilt_kind != 'tf':
return # only necessary for TF
# test for 'a' coefficient as single number
out = signal.filtfilt([.5, .5], 1, np.arange(10))
assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14)
def test_gust_simple(self):
if self.filtfilt_kind != 'tf':
pytest.skip('gust only implemented for TF systems')
# The input array has length 2. The exact solution for this case
# was computed "by hand".
x = np.array([1.0, 2.0])
b = np.array([0.5])
a = np.array([1.0, -0.5])
y, z1, z2 = _filtfilt_gust(b, a, x)
assert_allclose([z1[0], z2[0]],
[0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]])
assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1],
0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]])
def test_gust_scalars(self):
if self.filtfilt_kind != 'tf':
pytest.skip('gust only implemented for TF systems')
# The filter coefficients are both scalars, so the filter simply
# multiplies its input by b/a. When it is used in filtfilt, the
# factor is (b/a)**2.
x = np.arange(12)
b = 3.0
a = 2.0
y = filtfilt(b, a, x, method="gust")
expected = (b/a)**2 * x
assert_allclose(y, expected)
class TestSOSFiltFilt(TestFiltFilt):
filtfilt_kind = 'sos'
def test_equivalence(self):
"""Test equivalence between sosfiltfilt and filtfilt"""
x = np.random.RandomState(0).randn(1000)
for order in range(1, 6):
zpk = signal.butter(order, 0.35, output='zpk')
b, a = zpk2tf(*zpk)
sos = zpk2sos(*zpk)
y = filtfilt(b, a, x)
y_sos = sosfiltfilt(sos, x)
assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order)
def filtfilt_gust_opt(b, a, x):
"""
An alternative implementation of filtfilt with Gustafsson edges.
This function computes the same result as
`scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays
are accepted. The problem is solved using `fmin` from `scipy.optimize`.
`_filtfilt_gust` is significanly faster than this implementation.
"""
def filtfilt_gust_opt_func(ics, b, a, x):
"""Objective function used in filtfilt_gust_opt."""
m = max(len(a), len(b)) - 1
z0f = ics[:m]
z0b = ics[m:]
y_f = lfilter(b, a, x, zi=z0f)[0]
y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1]
y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
y_bf = lfilter(b, a, y_b, zi=z0f)[0]
value = np.sum((y_fb - y_bf)**2)
return value
m = max(len(a), len(b)) - 1
zi = lfilter_zi(b, a)
ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi))
result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x),
xtol=1e-10, ftol=1e-12,
maxfun=10000, maxiter=10000,
full_output=True, disp=False)
opt, fopt, niter, funcalls, warnflag = result
if warnflag > 0:
raise RuntimeError("minimization failed in filtfilt_gust_opt: "
"warnflag=%d" % warnflag)
z0f = opt[:m]
z0b = opt[m:]
# Apply the forward-backward filter using the computed initial
# conditions.
y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
y = lfilter(b, a, y_b, zi=z0f)[0]
return y, z0f, z0b
def check_filtfilt_gust(b, a, shape, axis, irlen=None):
# Generate x, the data to be filtered.
np.random.seed(123)
x = np.random.randn(*shape)
# Apply filtfilt to x. This is the main calculation to be checked.
y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen)
# Also call the private function so we can test the ICs.
yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
# filtfilt_gust_opt is an independent implementation that gives the
# expected result, but it only handles 1-D arrays, so use some looping
# and reshaping shenanigans to create the expected output arrays.
xx = np.swapaxes(x, axis, -1)
out_shape = xx.shape[:-1]
yo = np.empty_like(xx)
m = max(len(a), len(b)) - 1
zo1 = np.empty(out_shape + (m,))
zo2 = np.empty(out_shape + (m,))
for indx in product(*[range(d) for d in out_shape]):
yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx])
yo = np.swapaxes(yo, -1, axis)
zo1 = np.swapaxes(zo1, -1, axis)
zo2 = np.swapaxes(zo2, -1, axis)
assert_allclose(y, yo, rtol=1e-9, atol=1e-10)
assert_allclose(yg, yo, rtol=1e-9, atol=1e-10)
assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10)
assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10)
def test_choose_conv_method():
for mode in ['valid', 'same', 'full']:
for ndim in [1, 2]:
n, k, true_method = 8, 6, 'direct'
x = np.random.randn(*((n,) * ndim))
h = np.random.randn(*((k,) * ndim))
method = choose_conv_method(x, h, mode=mode)
assert_equal(method, true_method)
method_try, times = choose_conv_method(x, h, mode=mode, measure=True)
assert_(method_try in {'fft', 'direct'})
assert_(type(times) is dict)
assert_('fft' in times.keys() and 'direct' in times.keys())
n = 10
for not_fft_conv_supp in ["complex256", "complex192"]:
if hasattr(np, not_fft_conv_supp):
x = np.ones(n, dtype=not_fft_conv_supp)
h = x.copy()
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
x = np.array([2**51], dtype=np.int64)
h = x.copy()
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
x = [Decimal(3), Decimal(2)]
h = [Decimal(1), Decimal(4)]
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
def test_filtfilt_gust():
# Design a filter.
z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk')
# Find the approximate impulse response length of the filter.
eps = 1e-10
r = np.max(np.abs(p))
approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
np.random.seed(123)
b, a = zpk2tf(z, p, k)
for irlen in [None, approx_impulse_len]:
signal_len = 5 * approx_impulse_len
# 1-d test case
check_filtfilt_gust(b, a, (signal_len,), 0, irlen)
# 3-d test case; test each axis.
for axis in range(3):
shape = [2, 2, 2]
shape[axis] = signal_len
check_filtfilt_gust(b, a, shape, axis, irlen)
# Test case with length less than 2*approx_impulse_len.
# In this case, `filtfilt_gust` should behave the same as if
# `irlen=None` was given.
length = 2*approx_impulse_len - 50
check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len)
class TestDecimate(object):
def test_bad_args(self):
x = np.arange(12)
assert_raises(TypeError, signal.decimate, x, q=0.5, n=1)
assert_raises(TypeError, signal.decimate, x, q=2, n=0.5)
def test_basic_IIR(self):
x = np.arange(12)
y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round()
assert_array_equal(y, x[::2])
def test_basic_FIR(self):
x = np.arange(12)
y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round()
assert_array_equal(y, x[::2])
def test_shape(self):
# Regression test for ticket #1480.
z = np.zeros((30, 30))
d0 = signal.decimate(z, 2, axis=0, zero_phase=False)
assert_equal(d0.shape, (15, 30))
d1 = signal.decimate(z, 2, axis=1, zero_phase=False)
assert_equal(d1.shape, (30, 15))
def test_phaseshift_FIR(self):
with suppress_warnings() as sup:
sup.filter(BadCoefficients, "Badly conditioned filter")
self._test_phaseshift(method='fir', zero_phase=False)
def test_zero_phase_FIR(self):
with suppress_warnings() as sup:
sup.filter(BadCoefficients, "Badly conditioned filter")
self._test_phaseshift(method='fir', zero_phase=True)
def test_phaseshift_IIR(self):
self._test_phaseshift(method='iir', zero_phase=False)
def test_zero_phase_IIR(self):
self._test_phaseshift(method='iir', zero_phase=True)
def _test_phaseshift(self, method, zero_phase):
rate = 120
rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3
t_tot = int(100) # Need to let antialiasing filters settle
t = np.arange(rate*t_tot+1) / float(rate)
# Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts
freqs = np.array(rates_to) * 0.8 / 2
d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t)
* signal.windows.tukey(t.size, 0.1))
for rate_to in rates_to:
q = rate // rate_to
t_to = np.arange(rate_to*t_tot+1) / float(rate_to)
d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to)
* signal.windows.tukey(t_to.size, 0.1))
# Set up downsampling filters, match v0.17 defaults
if method == 'fir':
n = 30
system = signal.dlti(signal.firwin(n + 1, 1. / q,
window='hamming'), 1.)
elif method == 'iir':
n = 8
wc = 0.8*np.pi/q
system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi))
# Calculate expected phase response, as unit complex vector
if zero_phase is False:
_, h_resps = signal.freqz(system.num, system.den,
freqs/rate*2*np.pi)
h_resps /= np.abs(h_resps)
else:
h_resps = np.ones_like(freqs)
y_resamps = signal.decimate(d.real, q, n, ftype=system,
zero_phase=zero_phase)
# Get phase from complex inner product, like CSD
h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1)
h_resamps /= np.abs(h_resamps)
subnyq = freqs < 0.5*rate_to
# Complex vectors should be aligned, only compare below nyquist
assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0,
atol=1e-3, rtol=1e-3)
def test_auto_n(self):
# Test that our value of n is a reasonable choice (depends on
# the downsampling factor)
sfreq = 100.
n = 1000
t = np.arange(n) / sfreq
# will alias for decimations (>= 15)
x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t)
assert_allclose(np.linalg.norm(x), 1., rtol=1e-3)
x_out = signal.decimate(x, 30, ftype='fir')
assert_array_less(np.linalg.norm(x_out), 0.01)
class TestHilbert(object):
def test_bad_args(self):
x = np.array([1.0 + 0.0j])
assert_raises(ValueError, hilbert, x)
x = np.arange(8.0)
assert_raises(ValueError, hilbert, x, N=0)
def test_hilbert_theoretical(self):
# test cases by Ariel Rokem
decimal = 14
pi = np.pi
t = np.arange(0, 2 * pi, pi / 256)
a0 = np.sin(t)
a1 = np.cos(t)
a2 = np.sin(2 * t)
a3 = np.cos(2 * t)
a = np.vstack([a0, a1, a2, a3])
h = hilbert(a)
h_abs = np.abs(h)
h_angle = np.angle(h)
h_real = np.real(h)
# The real part should be equal to the original signals:
assert_almost_equal(h_real, a, decimal)
# The absolute value should be one everywhere, for this input:
assert_almost_equal(h_abs, np.ones(a.shape), decimal)
# For the 'slow' sine - the phase should go from -pi/2 to pi/2 in
# the first 256 bins:
assert_almost_equal(h_angle[0, :256],
np.arange(-pi / 2, pi / 2, pi / 256),
decimal)
# For the 'slow' cosine - the phase should go from 0 to pi in the
# same interval:
assert_almost_equal(
h_angle[1, :256], np.arange(0, pi, pi / 256), decimal)
# The 'fast' sine should make this phase transition in half the time:
assert_almost_equal(h_angle[2, :128],
np.arange(-pi / 2, pi / 2, pi / 128),
decimal)
# Ditto for the 'fast' cosine:
assert_almost_equal(
h_angle[3, :128], np.arange(0, pi, pi / 128), decimal)
# The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia
assert_almost_equal(h[1].imag, a0, decimal)
def test_hilbert_axisN(self):
# tests for axis and N arguments
a = np.arange(18).reshape(3, 6)
# test axis
aa = hilbert(a, axis=-1)
assert_equal(hilbert(a.T, axis=0), aa.T)
# test 1d
assert_almost_equal(hilbert(a[0]), aa[0], 14)
# test N
aan = hilbert(a, N=20, axis=-1)
assert_equal(aan.shape, [3, 20])
assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3])
# the next test is just a regression test,
# no idea whether numbers make sense
a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j,
1.000000000000000e+00 - 2.047794505137069j,
1.999999999999999e+00 - 2.244055555687583j,
3.000000000000000e+00 - 1.262750302935009j,
4.000000000000000e+00 - 1.066489252384493j,
5.000000000000000e+00 + 2.918022706971047j,
8.881784197001253e-17 + 3.845658908989067j,
-9.444121133484362e-17 + 0.985044202202061j,
-1.776356839400251e-16 + 1.332257797702019j,
-3.996802888650564e-16 + 0.501905089898885j,
1.332267629550188e-16 + 0.668696078880782j,
-1.192678053963799e-16 + 0.235487067862679j,
-1.776356839400251e-16 + 0.286439612812121j,
3.108624468950438e-16 + 0.031676888064907j,
1.332267629550188e-16 - 0.019275656884536j,
-2.360035624836702e-16 - 0.1652588660287j,
0.000000000000000e+00 - 0.332049855010597j,
3.552713678800501e-16 - 0.403810179797771j,
8.881784197001253e-17 - 0.751023775297729j,
9.444121133484362e-17 - 0.79252210110103j])
assert_almost_equal(aan[0], a0hilb, 14, 'N regression')
class TestHilbert2(object):
def test_bad_args(self):
# x must be real.
x = np.array([[1.0 + 0.0j]])
assert_raises(ValueError, hilbert2, x)
# x must be rank 2.
x = np.arange(24).reshape(2, 3, 4)
assert_raises(ValueError, hilbert2, x)
# Bad value for N.
x = np.arange(16).reshape(4, 4)
assert_raises(ValueError, hilbert2, x, N=0)
assert_raises(ValueError, hilbert2, x, N=(2, 0))
assert_raises(ValueError, hilbert2, x, N=(2,))
class TestPartialFractionExpansion(object):
@staticmethod
def assert_rp_almost_equal(r, p, r_true, p_true, decimal=7):
r_true = np.asarray(r_true)
p_true = np.asarray(p_true)
distance = np.hypot(abs(p[:, None] - p_true),
abs(r[:, None] - r_true))
rows, cols = linear_sum_assignment(distance)
assert_almost_equal(p[rows], p_true[cols], decimal=decimal)
assert_almost_equal(r[rows], r_true[cols], decimal=decimal)
def test_compute_factors(self):
factors, poly = _compute_factors([1, 2, 3], [3, 2, 1])
assert_equal(len(factors), 3)
assert_almost_equal(factors[0], np.poly([2, 2, 3]))
assert_almost_equal(factors[1], np.poly([1, 1, 1, 3]))
assert_almost_equal(factors[2], np.poly([1, 1, 1, 2, 2]))
assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3]))
factors, poly = _compute_factors([1, 2, 3], [3, 2, 1],
include_powers=True)
assert_equal(len(factors), 6)
assert_almost_equal(factors[0], np.poly([1, 1, 2, 2, 3]))
assert_almost_equal(factors[1], np.poly([1, 2, 2, 3]))
assert_almost_equal(factors[2], np.poly([2, 2, 3]))
assert_almost_equal(factors[3], np.poly([1, 1, 1, 2, 3]))
assert_almost_equal(factors[4], np.poly([1, 1, 1, 3]))
assert_almost_equal(factors[5], np.poly([1, 1, 1, 2, 2]))
assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3]))
def test_group_poles(self):
unique, multiplicity = _group_poles(
[1.0, 1.001, 1.003, 2.0, 2.003, 3.0], 0.1, 'min')
assert_equal(unique, [1.0, 2.0, 3.0])
assert_equal(multiplicity, [3, 2, 1])
def test_residue_general(self):
# Test are taken from issue #4464, note that poles in scipy are
# in increasing by absolute value order, opposite to MATLAB.
r, p, k = residue([5, 3, -2, 7], [-4, 0, 8, 3])
assert_almost_equal(r, [1.3320, -0.6653, -1.4167], decimal=4)
assert_almost_equal(p, [-0.4093, -1.1644, 1.5737], decimal=4)
assert_almost_equal(k, [-1.2500], decimal=4)
r, p, k = residue([-4, 8], [1, 6, 8])
assert_almost_equal(r, [8, -12])
assert_almost_equal(p, [-2, -4])
assert_equal(k.size, 0)
r, p, k = residue([4, 1], [1, -1, -2])
assert_almost_equal(r, [1, 3])
assert_almost_equal(p, [-1, 2])
assert_equal(k.size, 0)
r, p, k = residue([4, 3], [2, -3.4, 1.98, -0.406])
self.assert_rp_almost_equal(
r, p, [-18.125 - 13.125j, -18.125 + 13.125j, 36.25],
[0.5 - 0.2j, 0.5 + 0.2j, 0.7])
assert_equal(k.size, 0)
r, p, k = residue([2, 1], [1, 5, 8, 4])
self.assert_rp_almost_equal(r, p, [-1, 1, 3], [-1, -2, -2])
assert_equal(k.size, 0)
r, p, k = residue([3, -1.1, 0.88, -2.396, 1.348],
[1, -0.7, -0.14, 0.048])
assert_almost_equal(r, [-3, 4, 1])
assert_almost_equal(p, [0.2, -0.3, 0.8])
assert_almost_equal(k, [3, 1])
r, p, k = residue([1], [1, 2, -3])
assert_almost_equal(r, [0.25, -0.25])
assert_almost_equal(p, [1, -3])
assert_equal(k.size, 0)
r, p, k = residue([1, 0, -5], [1, 0, 0, 0, -1])
self.assert_rp_almost_equal(r, p,
[1, 1.5j, -1.5j, -1], [-1, -1j, 1j, 1])
assert_equal(k.size, 0)
r, p, k = residue([3, 8, 6], [1, 3, 3, 1])
self.assert_rp_almost_equal(r, p, [1, 2, 3], [-1, -1, -1])
assert_equal(k.size, 0)
r, p, k = residue([3, -1], [1, -3, 2])
assert_almost_equal(r, [-2, 5])
assert_almost_equal(p, [1, 2])
assert_equal(k.size, 0)
r, p, k = residue([2, 3, -1], [1, -3, 2])
assert_almost_equal(r, [-4, 13])
assert_almost_equal(p, [1, 2])
assert_almost_equal(k, [2])
r, p, k = residue([7, 2, 3, -1], [1, -3, 2])
assert_almost_equal(r, [-11, 69])
assert_almost_equal(p, [1, 2])
assert_almost_equal(k, [7, 23])
r, p, k = residue([2, 3, -1], [1, -3, 4, -2])
self.assert_rp_almost_equal(r, p, [4, -1 + 3.5j, -1 - 3.5j],
[1, 1 - 1j, 1 + 1j])
assert_almost_equal(k.size, 0)
def test_residue_leading_zeros(self):
# Leading zeros in numerator or denominator must not affect the answer.
r0, p0, k0 = residue([5, 3, -2, 7], [-4, 0, 8, 3])
r1, p1, k1 = residue([0, 5, 3, -2, 7], [-4, 0, 8, 3])
r2, p2, k2 = residue([5, 3, -2, 7], [0, -4, 0, 8, 3])
r3, p3, k3 = residue([0, 0, 5, 3, -2, 7], [0, 0, 0, -4, 0, 8, 3])
assert_almost_equal(r0, r1)
assert_almost_equal(r0, r2)
assert_almost_equal(r0, r3)
assert_almost_equal(p0, p1)
assert_almost_equal(p0, p2)
assert_almost_equal(p0, p3)
assert_almost_equal(k0, k1)
assert_almost_equal(k0, k2)
assert_almost_equal(k0, k3)
def test_resiude_degenerate(self):
# Several tests for zero numerator and denominator.
r, p, k = residue([0, 0], [1, 6, 8])
assert_almost_equal(r, [0, 0])
assert_almost_equal(p, [-2, -4])
assert_equal(k.size, 0)
r, p, k = residue(0, 1)
assert_equal(r.size, 0)
assert_equal(p.size, 0)
assert_equal(k.size, 0)
with pytest.raises(ValueError, match="Denominator `a` is zero."):
residue(1, 0)
def test_residuez_general(self):
r, p, k = residuez([1, 6, 6, 2], [1, -(2 + 1j), (1 + 2j), -1j])
self.assert_rp_almost_equal(r, p, [-2+2.5j, 7.5+7.5j, -4.5-12j],
[1j, 1, 1])
assert_almost_equal(k, [2j])
r, p, k = residuez([1, 2, 1], [1, -1, 0.3561])
self.assert_rp_almost_equal(r, p,
[-0.9041 - 5.9928j, -0.9041 + 5.9928j],
[0.5 + 0.3257j, 0.5 - 0.3257j],
decimal=4)
assert_almost_equal(k, [2.8082], decimal=4)
r, p, k = residuez([1, -1], [1, -5, 6])
assert_almost_equal(r, [-1, 2])
assert_almost_equal(p, [2, 3])
assert_equal(k.size, 0)
r, p, k = residuez([2, 3, 4], [1, 3, 3, 1])
self.assert_rp_almost_equal(r, p, [4, -5, 3], [-1, -1, -1])
assert_equal(k.size, 0)
r, p, k = residuez([1, -10, -4, 4], [2, -2, -4])
assert_almost_equal(r, [0.5, -1.5])
assert_almost_equal(p, [-1, 2])
assert_almost_equal(k, [1.5, -1])
r, p, k = residuez([18], [18, 3, -4, -1])
self.assert_rp_almost_equal(r, p,
[0.36, 0.24, 0.4], [0.5, -1/3, -1/3])
assert_equal(k.size, 0)
r, p, k = residuez([2, 3], np.polymul([1, -1/2], [1, 1/4]))
assert_almost_equal(r, [-10/3, 16/3])
assert_almost_equal(p, [-0.25, 0.5])
assert_equal(k.size, 0)
r, p, k = residuez([1, -2, 1], [1, -1])
assert_almost_equal(r, [0])
assert_almost_equal(p, [1])
assert_almost_equal(k, [1, -1])
r, p, k = residuez(1, [1, -1j])
assert_almost_equal(r, [1])
assert_almost_equal(p, [1j])
assert_equal(k.size, 0)
r, p, k = residuez(1, [1, -1, 0.25])
assert_almost_equal(r, [0, 1])
assert_almost_equal(p, [0.5, 0.5])
assert_equal(k.size, 0)
r, p, k = residuez(1, [1, -0.75, .125])
assert_almost_equal(r, [-1, 2])
assert_almost_equal(p, [0.25, 0.5])
assert_equal(k.size, 0)
r, p, k = residuez([1, 6, 2], [1, -2, 1])
assert_almost_equal(r, [-10, 9])
assert_almost_equal(p, [1, 1])
assert_almost_equal(k, [2])
r, p, k = residuez([6, 2], [1, -2, 1])
assert_almost_equal(r, [-2, 8])
assert_almost_equal(p, [1, 1])
assert_equal(k.size, 0)
r, p, k = residuez([1, 6, 6, 2], [1, -2, 1])
assert_almost_equal(r, [-24, 15])
assert_almost_equal(p, [1, 1])
assert_almost_equal(k, [10, 2])
r, p, k = residuez([1, 0, 1], [1, 0, 0, 0, 0, -1])
self.assert_rp_almost_equal(r, p,
[0.2618 + 0.1902j, 0.2618 - 0.1902j,
0.4, 0.0382 - 0.1176j, 0.0382 + 0.1176j],
[-0.8090 + 0.5878j, -0.8090 - 0.5878j,
1.0, 0.3090 + 0.9511j, 0.3090 - 0.9511j],
decimal=4)
assert_equal(k.size, 0)
def test_residuez_trailing_zeros(self):
# Trailing zeros in numerator or denominator must not affect the
# answer.
r0, p0, k0 = residuez([5, 3, -2, 7], [-4, 0, 8, 3])
r1, p1, k1 = residuez([5, 3, -2, 7, 0], [-4, 0, 8, 3])
r2, p2, k2 = residuez([5, 3, -2, 7], [-4, 0, 8, 3, 0])
r3, p3, k3 = residuez([5, 3, -2, 7, 0, 0], [-4, 0, 8, 3, 0, 0, 0])
assert_almost_equal(r0, r1)
assert_almost_equal(r0, r2)
assert_almost_equal(r0, r3)
assert_almost_equal(p0, p1)
assert_almost_equal(p0, p2)
assert_almost_equal(p0, p3)
assert_almost_equal(k0, k1)
assert_almost_equal(k0, k2)
assert_almost_equal(k0, k3)
def test_residuez_degenerate(self):
r, p, k = residuez([0, 0], [1, 6, 8])
assert_almost_equal(r, [0, 0])
assert_almost_equal(p, [-2, -4])
assert_equal(k.size, 0)
r, p, k = residuez(0, 1)
assert_equal(r.size, 0)
assert_equal(p.size, 0)
assert_equal(k.size, 0)
with pytest.raises(ValueError, match="Denominator `a` is zero."):
residuez(1, 0)
with pytest.raises(ValueError,
match="First coefficient of determinant `a` must "
"be non-zero."):
residuez(1, [0, 1, 2, 3])
def test_inverse_unique_roots_different_rtypes(self):
# This test was inspired by github issue 2496.
r = [3 / 10, -1 / 6, -2 / 15]
p = [0, -2, -5]
k = []
b_expected = [0, 1, 3]
a_expected = [1, 7, 10, 0]
# With the default tolerance, the rtype does not matter
# for this example.
for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'):
b, a = invres(r, p, k, rtype=rtype)
assert_allclose(b, b_expected)
assert_allclose(a, a_expected)
b, a = invresz(r, p, k, rtype=rtype)
assert_allclose(b, b_expected)
assert_allclose(a, a_expected)
def test_inverse_repeated_roots_different_rtypes(self):
r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
p = [0, -2, -2, -5]
k = []
b_expected = [0, 0, 1, 3]
b_expected_z = [-1/6, -2/3, 11/6, 3]
a_expected = [1, 9, 24, 20, 0]
for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'):
b, a = invres(r, p, k, rtype=rtype)
assert_allclose(b, b_expected, atol=1e-14)
assert_allclose(a, a_expected)
b, a = invresz(r, p, k, rtype=rtype)
assert_allclose(b, b_expected_z, atol=1e-14)
assert_allclose(a, a_expected)
def test_inverse_bad_rtype(self):
r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
p = [0, -2, -2, -5]
k = []
with pytest.raises(ValueError, match="`rtype` must be one of"):
invres(r, p, k, rtype='median')
with pytest.raises(ValueError, match="`rtype` must be one of"):
invresz(r, p, k, rtype='median')
def test_invresz_one_coefficient_bug(self):
# Regression test for issue in gh-4646.
r = [1]
p = [2]
k = [0]
b, a = invresz(r, p, k)
assert_allclose(b, [1.0])
assert_allclose(a, [1.0, -2.0])
def test_invres(self):
b, a = invres([1], [1], [])
assert_almost_equal(b, [1])
assert_almost_equal(a, [1, -1])
b, a = invres([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], [])
assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j])
assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j])
b, a = invres([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3])
assert_almost_equal(b, [1, -1 - 1j, 1 - 2j, 0.5 - 3j, 10])
assert_almost_equal(a, [1, -3 - 1j, 4])
b, a = invres([-1, 2, 1j, 3 - 1j, 4, -2],
[-1, 2 - 1j, 2 - 1j, 3, 3, 3], [])
assert_almost_equal(b, [4 - 1j, -28 + 16j, 40 - 62j, 100 + 24j,
-292 + 219j, 192 - 268j])
assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j,
108 - 54j, -81 + 108j])
b, a = invres([-1, 1j], [1, 1], [1, 2])
assert_almost_equal(b, [1, 0, -4, 3 + 1j])
assert_almost_equal(a, [1, -2, 1])
def test_invresz(self):
b, a = invresz([1], [1], [])
assert_almost_equal(b, [1])
assert_almost_equal(a, [1, -1])
b, a = invresz([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], [])
assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j])
assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j])
b, a = invresz([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3])
assert_almost_equal(b, [2.5, -3 - 1j, 1 - 2j, -1 - 3j, 12])
assert_almost_equal(a, [1, -3 - 1j, 4])
b, a = invresz([-1, 2, 1j, 3 - 1j, 4, -2],
[-1, 2 - 1j, 2 - 1j, 3, 3, 3], [])
assert_almost_equal(b, [6, -50 + 11j, 100 - 72j, 80 + 58j,
-354 + 228j, 234 - 297j])
assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j,
108 - 54j, -81 + 108j])
b, a = invresz([-1, 1j], [1, 1], [1, 2])
assert_almost_equal(b, [1j, 1, -3, 2])
assert_almost_equal(a, [1, -2, 1])
def test_inverse_scalar_arguments(self):
b, a = invres(1, 1, 1)
assert_almost_equal(b, [1, 0])
assert_almost_equal(a, [1, -1])
b, a = invresz(1, 1, 1)
assert_almost_equal(b, [2, -1])
assert_almost_equal(a, [1, -1])
class TestVectorstrength(object):
def test_single_1dperiod(self):
events = np.array([.5])
period = 5.
targ_strength = 1.
targ_phase = .1
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_single_2dperiod(self):
events = np.array([.5])
period = [1, 2, 5.]
targ_strength = [1.] * 3
targ_phase = np.array([.5, .25, .1])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_array_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_equal_1dperiod(self):
events = np.array([.25, .25, .25, .25, .25, .25])
period = 2
targ_strength = 1.
targ_phase = .125
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_equal_2dperiod(self):
events = np.array([.25, .25, .25, .25, .25, .25])
period = [1, 2, ]
targ_strength = [1.] * 2
targ_phase = np.array([.25, .125])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_spaced_1dperiod(self):
events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
period = 1
targ_strength = 1.
targ_phase = .1
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_spaced_2dperiod(self):
events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
period = [1, .5]
targ_strength = [1.] * 2
targ_phase = np.array([.1, .2])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_partial_1dperiod(self):
events = np.array([.25, .5, .75])
period = 1
targ_strength = 1. / 3.
targ_phase = .5
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_partial_2dperiod(self):
events = np.array([.25, .5, .75])
period = [1., 1., 1., 1.]
targ_strength = [1. / 3.] * 4
targ_phase = np.array([.5, .5, .5, .5])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_opposite_1dperiod(self):
events = np.array([0, .25, .5, .75])
period = 1.
targ_strength = 0
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
def test_opposite_2dperiod(self):
events = np.array([0, .25, .5, .75])
period = [1.] * 10
targ_strength = [0.] * 10
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
def test_2d_events_ValueError(self):
events = np.array([[1, 2]])
period = 1.
assert_raises(ValueError, vectorstrength, events, period)
def test_2d_period_ValueError(self):
events = 1.
period = np.array([[1]])
assert_raises(ValueError, vectorstrength, events, period)
def test_zero_period_ValueError(self):
events = 1.
period = 0
assert_raises(ValueError, vectorstrength, events, period)
def test_negative_period_ValueError(self):
events = 1.
period = -1
assert_raises(ValueError, vectorstrength, events, period)
def cast_tf2sos(b, a):
"""Convert TF2SOS, casting to complex128 and back to the original dtype."""
# tf2sos does not support all of the dtypes that we want to check, e.g.:
#
# TypeError: array type complex256 is unsupported in linalg
#
# so let's cast, convert, and cast back -- should be fine for the
# systems and precisions we are testing.
dtype = np.asarray(b).dtype
b = np.array(b, np.complex128)
a = np.array(a, np.complex128)
return tf2sos(b, a).astype(dtype)
def assert_allclose_cast(actual, desired, rtol=1e-7, atol=0):
"""Wrap assert_allclose while casting object arrays."""
if actual.dtype.kind == 'O':
dtype = np.array(actual.flat[0]).dtype
actual, desired = actual.astype(dtype), desired.astype(dtype)
assert_allclose(actual, desired, rtol, atol)
@pytest.mark.parametrize('func', (sosfilt, lfilter))
def test_nonnumeric_dtypes(func):
x = [Decimal(1), Decimal(2), Decimal(3)]
b = [Decimal(1), Decimal(2), Decimal(3)]
a = [Decimal(1), Decimal(2), Decimal(3)]
x = np.array(x)
assert x.dtype.kind == 'O'
desired = lfilter(np.array(b, float), np.array(a, float), x.astype(float))
if func is sosfilt:
actual = sosfilt([b + a], x)
else:
actual = lfilter(b, a, x)
assert all(isinstance(x, Decimal) for x in actual)
assert_allclose(actual.astype(float), desired.astype(float))
# Degenerate cases
if func is lfilter:
args = [1., 1.]
else:
args = [tf2sos(1., 1.)]
with pytest.raises(NotImplementedError,
match='input type .* not supported'):
func(*args, x=['foo'])
with pytest.raises(ValueError, match='must be at least 1-D'):
func(*args, x=1.)
@pytest.mark.parametrize('dt', 'fdgFDGO')
class TestSOSFilt(object):
# The test_rank* tests are pulled from _TestLinearFilter
def test_rank1(self, dt):
x = np.linspace(0, 5, 6).astype(dt)
b = np.array([1, -1]).astype(dt)
a = np.array([0.5, -0.5]).astype(dt)
# Test simple IIR
y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(dt)
sos = cast_tf2sos(b, a)
assert_array_almost_equal(sosfilt(cast_tf2sos(b, a), x), y_r)
# Test simple FIR
b = np.array([1, 1]).astype(dt)
# NOTE: This was changed (rel. to TestLinear...) to add a pole @zero:
a = np.array([1, 0]).astype(dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(dt)
assert_array_almost_equal(sosfilt(cast_tf2sos(b, a), x), y_r)
b = [1, 1, 0]
a = [1, 0, 0]
x = np.ones(8)
sos = np.concatenate((b, a))
sos.shape = (1, 6)
y = sosfilt(sos, x)
assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2])
def test_rank2(self, dt):
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(dt)
b = np.array([1, -1]).astype(dt)
a = np.array([0.5, 0.5]).astype(dt)
y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]],
dtype=dt)
y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]], dtype=dt)
y = sosfilt(cast_tf2sos(b, a), x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
y = sosfilt(cast_tf2sos(b, a), x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank3(self, dt):
shape = (4, 3, 2)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
b = np.array([1, -1]).astype(dt)
a = np.array([0.5, 0.5]).astype(dt)
# Test last axis
y = sosfilt(cast_tf2sos(b, a), x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j]))
def test_initial_conditions(self, dt):
b1, a1 = signal.butter(2, 0.25, 'low')
b2, a2 = signal.butter(2, 0.75, 'low')
b3, a3 = signal.butter(2, 0.75, 'low')
b = np.convolve(np.convolve(b1, b2), b3)
a = np.convolve(np.convolve(a1, a2), a3)
sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3]))
x = np.random.rand(50).astype(dt)
# Stopping filtering and continuing
y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6))
y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]]
assert_allclose_cast(y_true, lfilter(b, a, x))
y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2)))
y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]]
assert_allclose_cast(y_true, y_sos)
# Use a step function
zi = sosfilt_zi(sos)
x = np.ones(8, dt)
y, zf = sosfilt(sos, x, zi=zi)
assert_allclose_cast(y, np.ones(8))
assert_allclose_cast(zf, zi)
# Initial condition shape matching
x.shape = (1, 1) + x.shape # 3D
assert_raises(ValueError, sosfilt, sos, x, zi=zi)
zi_nd = zi.copy()
zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1])
assert_raises(ValueError, sosfilt, sos, x,
zi=zi_nd[:, :, :, [0, 1, 1]])
y, zf = sosfilt(sos, x, zi=zi_nd)
assert_allclose_cast(y[0, 0], np.ones(8))
assert_allclose_cast(zf[:, 0, 0, :], zi)
def test_initial_conditions_3d_axis1(self, dt):
# Test the use of zi when sosfilt is applied to axis 1 of a 3-d input.
# Input array is x.
x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3))
x = x.astype(dt)
# Design a filter in ZPK format and convert to SOS
zpk = signal.butter(6, 0.35, output='zpk')
sos = zpk2sos(*zpk)
nsections = sos.shape[0]
# Filter along this axis.
axis = 1
# Initial conditions, all zeros.
shp = list(x.shape)
shp[axis] = 2
shp = [nsections] + shp
z0 = np.zeros(shp)
# Apply the filter to x.
yf, zf = sosfilt(sos, x, axis=axis, zi=z0)
# Apply the filter to x in two stages.
y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0)
y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1)
# y should equal yf, and z2 should equal zf.
y = np.concatenate((y1, y2), axis=axis)
assert_allclose_cast(y, yf, rtol=1e-10, atol=1e-13)
assert_allclose_cast(z2, zf, rtol=1e-10, atol=1e-13)
# let's try the "step" initial condition
zi = sosfilt_zi(sos)
zi.shape = [nsections, 1, 2, 1]
zi = zi * x[:, 0:1, :]
y = sosfilt(sos, x, axis=axis, zi=zi)[0]
# check it against the TF form
b, a = zpk2tf(*zpk)
zi = lfilter_zi(b, a)
zi.shape = [1, zi.size, 1]
zi = zi * x[:, 0:1, :]
y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0]
assert_allclose_cast(y, y_tf, rtol=1e-10, atol=1e-13)
def test_bad_zi_shape(self, dt):
# The shape of zi is checked before using any values in the
# arguments, so np.empty is fine for creating the arguments.
x = np.empty((3, 15, 3), dt)
sos = np.zeros((4, 6))
zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3)
with pytest.raises(ValueError, match='should be all ones'):
sosfilt(sos, x, zi=zi, axis=1)
sos[:, 3] = 1.
with pytest.raises(ValueError, match='Invalid zi shape'):
sosfilt(sos, x, zi=zi, axis=1)
def test_sosfilt_zi(self, dt):
sos = signal.butter(6, 0.2, output='sos')
zi = sosfilt_zi(sos)
y, zf = sosfilt(sos, np.ones(40, dt), zi=zi)
assert_allclose_cast(zf, zi, rtol=1e-13)
# Expected steady state value of the step response of this filter:
ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1))
assert_allclose_cast(y, ss, rtol=1e-13)
# zi as array-like
_, zf = sosfilt(sos, np.ones(40, dt), zi=zi.tolist())
assert_allclose_cast(zf, zi, rtol=1e-13)
class TestDeconvolve(object):
def test_basic(self):
# From docstring example
original = [0, 1, 0, 0, 1, 1, 0, 0]
impulse_response = [2, 1]
recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0]
recovered, remainder = signal.deconvolve(recorded, impulse_response)
assert_allclose(recovered, original)
class TestDetrend(object):
def test_basic(self):
detrended = detrend(array([1, 2, 3]))
detrended_exact = array([0, 0, 0])
assert_array_almost_equal(detrended, detrended_exact)
def test_copy(self):
x = array([1, 1.2, 1.5, 1.6, 2.4])
copy_array = detrend(x, overwrite_data=False)
inplace = detrend(x, overwrite_data=True)
assert_array_almost_equal(copy_array, inplace)
class TestUniqueRoots(object):
def test_real_no_repeat(self):
p = [-1.0, -0.5, 0.3, 1.2, 10.0]
unique, multiplicity = unique_roots(p)
assert_almost_equal(unique, p, decimal=15)
assert_equal(multiplicity, np.ones(len(p)))
def test_real_repeat(self):
p = [-1.0, -0.95, -0.89, -0.8, 0.5, 1.0, 1.05]
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min')
assert_almost_equal(unique, [-1.0, -0.89, 0.5, 1.0], decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max')
assert_almost_equal(unique, [-0.95, -0.8, 0.5, 1.05], decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg')
assert_almost_equal(unique, [-0.975, -0.845, 0.5, 1.025], decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
def test_complex_no_repeat(self):
p = [-1.0, 1.0j, 0.5 + 0.5j, -1.0 - 1.0j, 3.0 + 2.0j]
unique, multiplicity = unique_roots(p)
assert_almost_equal(unique, p, decimal=15)
assert_equal(multiplicity, np.ones(len(p)))
def test_complex_repeat(self):
p = [-1.0, -1.0 + 0.05j, -0.95 + 0.15j, -0.90 + 0.15j, 0.0,
0.5 + 0.5j, 0.45 + 0.55j]
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min')
assert_almost_equal(unique, [-1.0, -0.95 + 0.15j, 0.0, 0.45 + 0.55j],
decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max')
assert_almost_equal(unique,
[-1.0 + 0.05j, -0.90 + 0.15j, 0.0, 0.5 + 0.5j],
decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg')
assert_almost_equal(
unique, [-1.0 + 0.025j, -0.925 + 0.15j, 0.0, 0.475 + 0.525j],
decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
def test_gh_4915(self):
p = np.roots(np.convolve(np.ones(5), np.ones(5)))
true_roots = [-(-1)**(1/5), (-1)**(4/5), -(-1)**(3/5), (-1)**(2/5)]
unique, multiplicity = unique_roots(p)
unique = np.sort(unique)
assert_almost_equal(np.sort(unique), true_roots, decimal=7)
assert_equal(multiplicity, [2, 2, 2, 2])
def test_complex_roots_extra(self):
unique, multiplicity = unique_roots([1.0, 1.0j, 1.0])
assert_almost_equal(unique, [1.0, 1.0j], decimal=15)
assert_equal(multiplicity, [2, 1])
unique, multiplicity = unique_roots([1, 1 + 2e-9, 1e-9 + 1j], tol=0.1)
assert_almost_equal(unique, [1.0, 1e-9 + 1.0j], decimal=15)
assert_equal(multiplicity, [2, 1])
def test_single_unique_root(self):
p = np.random.rand(100) + 1j * np.random.rand(100)
unique, multiplicity = unique_roots(p, 2)
assert_almost_equal(unique, [np.min(p)], decimal=15)
assert_equal(multiplicity, [100])
|
nmayorov/scipy
|
scipy/signal/tests/test_signaltools.py
|
Python
|
bsd-3-clause
| 130,387
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.