code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import os, json
from shutil import copyfile
from opendm import io
from opendm import log
from opendm import system
from opendm import context
from opendm import types
from opendm import gsd
from opendm.dem import commands, utils
from opendm.cropper import Cropper
from opendm import pseudogeo
from opendm.tiles.tiler import generate_dem_tiles
class ODMDEMStage(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
reconstruction = outputs['reconstruction']
dem_input = tree.odm_georeferencing_model_laz
pc_model_found = io.file_exists(dem_input)
ignore_resolution = False
pseudo_georeference = False
if not reconstruction.is_georeferenced():
# Special case to clear previous run point cloud
# (NodeODM will generate a fake georeferenced laz during postprocessing
# with non-georeferenced datasets). odm_georeferencing_model_laz should
# not be here! Perhaps we should improve this.
if io.file_exists(tree.odm_georeferencing_model_laz) and self.rerun():
os.remove(tree.odm_georeferencing_model_laz)
log.ODM_WARNING("Not georeferenced, using ungeoreferenced point cloud...")
dem_input = tree.path("odm_filterpoints", "point_cloud.ply")
pc_model_found = io.file_exists(dem_input)
ignore_resolution = True
pseudo_georeference = True
resolution = gsd.cap_resolution(args.dem_resolution, tree.opensfm_reconstruction,
gsd_error_estimate=-3,
ignore_gsd=args.ignore_gsd,
ignore_resolution=ignore_resolution,
has_gcp=reconstruction.has_gcp())
log.ODM_INFO('Classify: ' + str(args.pc_classify))
log.ODM_INFO('Create DSM: ' + str(args.dsm))
log.ODM_INFO('Create DTM: ' + str(args.dtm))
log.ODM_INFO('DEM input file {0} found: {1}'.format(dem_input, str(pc_model_found)))
# define paths and create working directories
odm_dem_root = tree.path('odm_dem')
if not io.dir_exists(odm_dem_root):
system.mkdir_p(odm_dem_root)
if args.pc_classify and pc_model_found:
pc_classify_marker = os.path.join(odm_dem_root, 'pc_classify_done.txt')
if not io.file_exists(pc_classify_marker) or self.rerun():
log.ODM_INFO("Classifying {} using Simple Morphological Filter".format(dem_input))
commands.classify(dem_input,
args.smrf_scalar,
args.smrf_slope,
args.smrf_threshold,
args.smrf_window,
verbose=args.verbose
)
with open(pc_classify_marker, 'w') as f:
f.write('Classify: smrf\n')
f.write('Scalar: {}\n'.format(args.smrf_scalar))
f.write('Slope: {}\n'.format(args.smrf_slope))
f.write('Threshold: {}\n'.format(args.smrf_threshold))
f.write('Window: {}\n'.format(args.smrf_window))
progress = 20
self.update_progress(progress)
if args.pc_rectify:
commands.rectify(dem_input, args.debug)
# Do we need to process anything here?
if (args.dsm or args.dtm) and pc_model_found:
dsm_output_filename = os.path.join(odm_dem_root, 'dsm.tif')
dtm_output_filename = os.path.join(odm_dem_root, 'dtm.tif')
if (args.dtm and not io.file_exists(dtm_output_filename)) or \
(args.dsm and not io.file_exists(dsm_output_filename)) or \
self.rerun():
products = []
if args.dsm or (args.dtm and args.dem_euclidean_map): products.append('dsm')
if args.dtm: products.append('dtm')
radius_steps = [(resolution / 100.0) / 2.0]
for _ in range(args.dem_gapfill_steps - 1):
radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary, maybe there's a better value?
for product in products:
commands.create_dem(
dem_input,
product,
output_type='idw' if product == 'dtm' else 'max',
radiuses=list(map(str, radius_steps)),
gapfill=args.dem_gapfill_steps > 0,
outdir=odm_dem_root,
resolution=resolution / 100.0,
decimation=args.dem_decimation,
verbose=args.verbose,
max_workers=args.max_concurrency,
keep_unfilled_copy=args.dem_euclidean_map
)
dem_geotiff_path = os.path.join(odm_dem_root, "{}.tif".format(product))
bounds_file_path = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
if args.crop > 0:
# Crop DEM
Cropper.crop(bounds_file_path, dem_geotiff_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space)
if args.dem_euclidean_map:
unfilled_dem_path = io.related_file_path(dem_geotiff_path, postfix=".unfilled")
if args.crop > 0:
# Crop unfilled DEM
Cropper.crop(bounds_file_path, unfilled_dem_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space)
commands.compute_euclidean_map(unfilled_dem_path,
io.related_file_path(dem_geotiff_path, postfix=".euclideand"),
overwrite=True)
if pseudo_georeference:
pseudogeo.add_pseudo_georeferencing(dem_geotiff_path)
if args.tiles:
generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency)
progress += 30
self.update_progress(progress)
else:
log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root)
else:
log.ODM_WARNING('DEM will not be generated')
|
OpenDroneMap/OpenDroneMap
|
stages/odm_dem.py
|
Python
|
gpl-3.0
| 6,644
|
from __future__ import unicode_literals
import sys
from datetime import date
from django.apps import apps
from django.contrib.auth import management
from django.contrib.auth.checks import check_user_model
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import (
changepassword, createsuperuser,
)
from django.contrib.auth.models import (
AbstractBaseUser, Group, Permission, User,
)
from django.contrib.contenttypes.models import ContentType
from django.core import checks, exceptions
from django.core.management import call_command
from django.core.management.base import CommandError
from django.db import models
from django.test import (
SimpleTestCase, TestCase, mock, override_settings, override_system_checks,
)
from django.test.utils import isolate_apps
from django.utils import six
from django.utils.encoding import force_str
from django.utils.translation import ugettext_lazy as _
from .models import (
CustomUser, CustomUserNonUniqueUsername, CustomUserWithFK, Email,
)
def mock_inputs(inputs):
"""
Decorator to temporarily replace input/getpass to allow interactive
createsuperuser.
"""
def inner(test_func):
def wrapped(*args):
class mock_getpass:
@staticmethod
def getpass(prompt=b'Password: ', stream=None):
if six.PY2:
# getpass on Windows only supports prompt as bytestring (#19807)
assert isinstance(prompt, six.binary_type)
if callable(inputs['password']):
return inputs['password']()
return inputs['password']
def mock_input(prompt):
# prompt should be encoded in Python 2. This line will raise an
# Exception if prompt contains unencoded non-ASCII on Python 2.
prompt = str(prompt)
assert str('__proxy__') not in prompt
response = ''
for key, val in inputs.items():
if force_str(key) in prompt.lower():
response = val
break
return response
old_getpass = createsuperuser.getpass
old_input = createsuperuser.input
createsuperuser.getpass = mock_getpass
createsuperuser.input = mock_input
try:
test_func(*args)
finally:
createsuperuser.getpass = old_getpass
createsuperuser.input = old_input
return wrapped
return inner
class MockTTY(object):
"""
A fake stdin object that pretends to be a TTY to be used in conjunction
with mock_inputs.
"""
def isatty(self):
return True
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
])
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='joe', password='qwerty')
self.stdout = six.StringIO()
self.stderr = six.StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
@mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty')
def test_that_changepassword_command_changes_joes_password(self, mock_get_pass):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
call_command('changepassword', username='joe', stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(User.objects.get(username="joe").check_password("not qwerty"))
@mock.patch.object(changepassword.Command, '_get_pass', side_effect=lambda *args: str(args))
def test_that_max_tries_exits_1(self, mock_get_pass):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
with self.assertRaises(CommandError):
call_command('changepassword', username='joe', stdout=self.stdout, stderr=self.stderr)
@mock.patch.object(changepassword.Command, '_get_pass', return_value='1234567890')
def test_password_validation(self, mock_get_pass):
"""
A CommandError should be raised if the user enters in passwords which
fail validation three times.
"""
abort_msg = "Aborting password change for user 'joe' after 3 attempts"
with self.assertRaisesMessage(CommandError, abort_msg):
call_command('changepassword', username='joe', stdout=self.stdout, stderr=self.stderr)
self.assertIn('This password is entirely numeric.', self.stderr.getvalue())
@mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty')
def test_that_changepassword_command_works_with_nonascii_output(self, mock_get_pass):
"""
#21627 -- Executing the changepassword management command should allow
non-ASCII characters from the User object representation.
"""
# 'Julia' with accented 'u':
User.objects.create_user(username='J\xfalia', password='qwerty')
call_command('changepassword', username='J\xfalia', stdout=self.stdout)
class MultiDBChangepasswordManagementCommandTestCase(TestCase):
multi_db = True
@mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty')
def test_that_changepassword_command_with_database_option_uses_given_db(self, mock_get_pass):
"""
changepassword --database should operate on the specified DB.
"""
user = User.objects.db_manager('other').create_user(username='joe', password='qwerty')
self.assertTrue(user.check_password('qwerty'))
out = six.StringIO()
call_command('changepassword', username='joe', database='other', stdout=out)
command_output = out.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(User.objects.using('other').get(username="joe").check_password('not qwerty'))
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
AUTH_PASSWORD_VALIDATORS=[{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}],
)
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_basic_usage(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
# created password should be unusable
self.assertFalse(u.has_usable_password())
@mock_inputs({
'password': "nopasswd",
'u\u017eivatel': 'foo', # username (cz)
'email': 'nolocale@somewhere.org'})
def test_non_ascii_verbose_name(self):
username_field = User._meta.get_field('username')
old_verbose_name = username_field.verbose_name
username_field.verbose_name = _('u\u017eivatel')
new_io = six.StringIO()
try:
call_command(
"createsuperuser",
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
finally:
username_field.verbose_name = old_verbose_name
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
def test_verbosity_zero(self):
# We can suppress output on the management command
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, 'joe2@somewhere.org')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User._default_manager.get(username="joe+admin@somewhere.org")
self.assertEqual(u.email, 'joe@somewhere.org')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
email="joe@somewhere.org",
date_of_birth="1976-04-01",
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="joe@somewhere.org")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
with self.assertRaises(CommandError):
call_command(
"createsuperuser",
interactive=False,
username="joe@somewhere.org",
stdout=new_io,
stderr=new_io,
)
self.assertEqual(CustomUser._default_manager.count(), 0)
@override_settings(
AUTH_USER_MODEL='auth_tests.CustomUserNonUniqueUsername',
AUTHENTICATION_BACKENDS=['my.custom.backend'],
)
def test_swappable_user_username_non_unique(self):
@mock_inputs({
'username': 'joe',
'password': 'nopasswd',
})
def createsuperuser():
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=True,
email="joe@somewhere.org",
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
for i in range(2):
createsuperuser()
users = CustomUserNonUniqueUsername.objects.filter(username="joe")
self.assertEqual(users.count(), 2)
def test_skip_if_not_in_TTY(self):
"""
If the command is not called from a TTY, it should be skipped and a
message should be displayed (#7423).
"""
class FakeStdin(object):
"""A fake stdin object that has isatty() return False."""
def isatty(self):
return False
out = six.StringIO()
call_command(
"createsuperuser",
stdin=FakeStdin(),
stdout=out,
interactive=True,
)
self.assertEqual(User._default_manager.count(), 0)
self.assertIn("Superuser creation skipped", out.getvalue())
def test_passing_stdin(self):
"""
You can pass a stdin object as an option and it should be
available on self.stdin.
If no such option is passed, it defaults to sys.stdin.
"""
sentinel = object()
command = createsuperuser.Command()
call_command(
command,
stdin=sentinel,
stdout=six.StringIO(),
stderr=six.StringIO(),
interactive=False,
verbosity=0,
username='janet',
email='janet@example.com',
)
self.assertIs(command.stdin, sentinel)
command = createsuperuser.Command()
call_command(
command,
stdout=six.StringIO(),
stderr=six.StringIO(),
interactive=False,
verbosity=0,
username='joe',
email='joe@example.com',
)
self.assertIs(command.stdin, sys.stdin)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK')
def test_fields_with_fk(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=email.email,
group=group.pk,
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
non_existent_email = 'mymail2@gmail.com'
msg = 'email instance with email %r does not exist.' % non_existent_email
with self.assertRaisesMessage(CommandError, msg):
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=non_existent_email,
stdout=new_io,
)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK')
def test_fields_with_fk_interactive(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
@mock_inputs({
'password': 'nopasswd',
'username (email.id)': email.pk,
'email (email.email)': email.email,
'group (group.id)': group.pk,
})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
test(self)
def test_password_validation(self):
"""
Creation should fail if the password fails validation.
"""
new_io = six.StringIO()
# Returns '1234567890' the first two times it is called, then
# 'password' subsequently.
def bad_then_good_password(index=[0]):
index[0] += 1
if index[0] <= 2:
return '1234567890'
return 'password'
@mock_inputs({
'password': bad_then_good_password,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"This password is entirely numeric.\n"
"Superuser created successfully."
)
test(self)
def test_validation_mismatched_passwords(self):
"""
Creation should fail if the user enters mismatched passwords.
"""
new_io = six.StringIO()
# The first two passwords do not match, but the second two do match and
# are valid.
entered_passwords = ["password", "not password", "password2", "password2"]
def mismatched_passwords_then_matched():
return entered_passwords.pop(0)
@mock_inputs({
'password': mismatched_passwords_then_matched,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"Error: Your passwords didn't match.\n"
"Superuser created successfully."
)
test(self)
def test_validation_blank_password_entered(self):
"""
Creation should fail if the user enters blank passwords.
"""
new_io = six.StringIO()
# The first two passwords are empty strings, but the second two are
# valid.
entered_passwords = ["", "", "password2", "password2"]
def blank_passwords_then_valid():
return entered_passwords.pop(0)
@mock_inputs({
'password': blank_passwords_then_valid,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"Error: Blank passwords aren't allowed.\n"
"Superuser created successfully."
)
test(self)
class MultiDBCreatesuperuserTestCase(TestCase):
multi_db = True
def test_createsuperuser_command_with_database_option(self):
"""
changepassword --database should operate on the specified DB.
"""
new_io = six.StringIO()
call_command(
'createsuperuser',
interactive=False,
username='joe',
email='joe@somewhere.org',
database='other',
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
user = User.objects.using('other').get(username='joe')
self.assertEqual(user.email, 'joe@somewhere.org')
class CustomUserModelValidationTestCase(SimpleTestCase):
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserNonListRequiredFields')
@override_system_checks([check_user_model])
@isolate_apps('auth_tests', kwarg_name='apps')
def test_required_fields_is_list(self, apps):
"""REQUIRED_FIELDS should be a list."""
class CustomUserNonListRequiredFields(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = 'date_of_birth'
errors = checks.run_checks(app_configs=apps.get_app_configs())
expected = [
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
obj=CustomUserNonListRequiredFields,
id='auth.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserBadRequiredFields')
@override_system_checks([check_user_model])
@isolate_apps('auth_tests', kwarg_name='apps')
def test_username_not_in_required_fields(self, apps):
"""USERNAME_FIELD should not appear in REQUIRED_FIELDS."""
class CustomUserBadRequiredFields(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'date_of_birth']
errors = checks.run_checks(apps.get_app_configs())
expected = [
checks.Error(
"The field named as the 'USERNAME_FIELD' for a custom user model "
"must not be included in 'REQUIRED_FIELDS'.",
obj=CustomUserBadRequiredFields,
id='auth.E002',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserNonUniqueUsername')
@override_system_checks([check_user_model])
def test_username_non_unique(self):
"""
A non-unique USERNAME_FIELD should raise an error only if we use the
default authentication backend. Otherwise, an warning should be raised.
"""
errors = checks.run_checks()
expected = [
checks.Error(
"'CustomUserNonUniqueUsername.username' must be "
"unique because it is named as the 'USERNAME_FIELD'.",
obj=CustomUserNonUniqueUsername,
id='auth.E003',
),
]
self.assertEqual(errors, expected)
with self.settings(AUTHENTICATION_BACKENDS=['my.custom.backend']):
errors = checks.run_checks()
expected = [
checks.Warning(
"'CustomUserNonUniqueUsername.username' is named as "
"the 'USERNAME_FIELD', but it is not unique.",
hint='Ensure that your authentication backend(s) can handle non-unique usernames.',
obj=CustomUserNonUniqueUsername,
id='auth.W004',
)
]
self.assertEqual(errors, expected)
class PermissionTestCase(TestCase):
def setUp(self):
self._original_permissions = Permission._meta.permissions[:]
self._original_default_permissions = Permission._meta.default_permissions
self._original_verbose_name = Permission._meta.verbose_name
def tearDown(self):
Permission._meta.permissions = self._original_permissions
Permission._meta.default_permissions = self._original_default_permissions
Permission._meta.verbose_name = self._original_verbose_name
ContentType.objects.clear_cache()
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
auth_app_config = apps.get_app_config('auth')
# check duplicated default permission
Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
msg = (
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'."
)
with self.assertRaisesMessage(CommandError, msg):
create_permissions(auth_app_config, verbosity=0)
# check duplicated custom permissions
Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
msg = "The permission codename 'my_custom_permission' is duplicated for model 'auth.Permission'."
with self.assertRaisesMessage(CommandError, msg):
create_permissions(auth_app_config, verbosity=0)
# should not raise anything
Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(auth_app_config, verbosity=0)
def test_default_permissions(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
]
create_permissions(auth_app_config, verbosity=0)
# add/change/delete permission by default + custom permission
self.assertEqual(Permission.objects.filter(
content_type=permission_content_type,
).count(), 4)
Permission.objects.filter(content_type=permission_content_type).delete()
Permission._meta.default_permissions = []
create_permissions(auth_app_config, verbosity=0)
# custom permission only since default permissions is empty
self.assertEqual(Permission.objects.filter(
content_type=permission_content_type,
).count(), 1)
def test_verbose_name_length(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
Permission.objects.filter(content_type=permission_content_type).delete()
Permission._meta.verbose_name = "some ridiculously long verbose name that is out of control" * 5
msg = "The verbose_name of auth.permission is longer than 244 characters"
with self.assertRaisesMessage(exceptions.ValidationError, msg):
create_permissions(auth_app_config, verbosity=0)
def test_custom_permission_name_length(self):
auth_app_config = apps.get_app_config('auth')
ContentType.objects.get_by_natural_key('auth', 'permission')
custom_perm_name = 'a' * 256
Permission._meta.permissions = [
('my_custom_permission', custom_perm_name),
]
try:
msg = (
"The permission name %s of auth.permission is longer than "
"255 characters" % custom_perm_name
)
with self.assertRaisesMessage(exceptions.ValidationError, msg):
create_permissions(auth_app_config, verbosity=0)
finally:
Permission._meta.permissions = []
|
yephper/django
|
tests/auth_tests/test_management.py
|
Python
|
bsd-3-clause
| 28,594
|
# -*- coding: utf-8 -*-
"""
top_rmq.utils
~~~~~
Implements RMQConn object for publishing and consuming messages to/from
RabbitMQ queues.
:copyright: (c) 2017 Peter Schutt
"""
import configparser
import json
import logging
import pathlib
import ssl
import backoff
import pika
LOGGER = logging.getLogger(__name__)
def backoff_hdlr(d):
LOGGER.debug(f"Backing off {d['wait']:0.1f} seconds after {d['tries']} "
f"tries calling function {d['target']} with args {d['args']} "
f"and kwargs {d['kwargs']}")
class RMQConn:
""" Class that holds a connection to a RabbitMQ message queue and
provides methods to connect, publish/consume and disconnect from
a RabbitMQ server.
Implements a context manager for easy connections and tidy closing.
"""
def __init__(self, cnf_name, cnf_loc=None):
""" Create RMQConn instance.
:param cnf_identifier: Identifier string that designates desired
configuration in configuration file.
:param cnf_loc: (optional) Specify location of the configuration \
file for the application.
(default=pathlib.Path.home()}/.top_rmq/rmq.cnf)
"""
self._conn = None
cnf_loc = cnf_loc or f"{pathlib.Path.home()}/.toputils/rmq.local"
cnf = configparser.ConfigParser()
cnf.read(cnf_loc)
self.params = cnf[cnf_name]
self.conn_params_obj = self._get_conn_params()
self.ex_params = self._get_exchange_params()
self.b_params = self._get_bind_params()
self.q_params = self._get_queue_params()
self.pub_params = self._get_publish_params()
def _get_conn_params(self):
cred_params = {
"username": self.params.get("username", "guest"),
"password": self.params.get("password", "guest"),
}
cred_params = {k: v for k, v in cred_params.items() if v is not None}
creds = pika.credentials.PlainCredentials(**cred_params)
conn_params = {
"host": self.params.get("host"),
"port": self.params.get("port"),
"virtual_host": self.params.get("virtual_host"),
"channel_max": self.params.get("channel_max"),
"frame_max": self.params.get("frame_max"),
"heartbeat": self.params.get("heartbeat"),
"ssl": self.params.getboolean("ssl"),
"connection_attempts": self.params.get("connection_attempts"),
"retry_delay": self.params.get("retry_delay"),
"socket_timeout": self.params.get("socket_timeout"),
"locale": self.params.get("locale"),
"backpressure_detection": self.params.getboolean(
"backpressure_detection"
),
"blocked_connection_timeout": self.params.get(
"blocked_connection_timeout"
),
"client_properties": self.params.get("client_properties")
}
if conn_params["ssl"]:
conn_params["port"] = 5671
conn_params["ssl_options"] = {
"keyfile": self.params.get("keyfile"),
"certfile": self.params.get("certfile"),
"ca_certs": self.params.get("ca_certs"),
"ssl_version": ssl.PROTOCOL_TLSv1,
"cert_reqs": ssl.CERT_REQUIRED
}
conn_params = {
k: v for k, v in conn_params.items() if v is not None
}
for key in ("port", "channel_max", "frame_max", "heartbeat",
"connection_attempts", "blocked_connection_timeout"):
try:
conn_params[key] = int(conn_params[key])
except KeyError:
pass
for key in ("retry_delay", "socket_timeout"):
try:
conn_params[key] = float(conn_params[key])
except KeyError:
pass
conn_params["credentials"] = creds
return pika.ConnectionParameters(**conn_params)
def _get_exchange_params(self):
ex_params = {
"exchange": self.params.get("exchange"),
"exchange_type": self.params.get("exchange_type"),
"passive": self.params.getboolean("ex_passive"),
"durable": self.params.getboolean("ex_durable"),
"auto_delete": self.params.getboolean("ex_auto_delete"),
"internal": self.params.getboolean("internal"),
"nowait": self.params.getboolean("ex_nowait"),
"arguments": self.params.get("ex_arguments")
}
ex_params = {k: v for k, v in ex_params.items() if v is not None}
try:
ex_params["arguments"] = json.loads(ex_params["arguments"])
except KeyError:
pass
return ex_params
def _get_queue_params(self):
q_params = {
"queue": self.params.get("queue"),
"passive": self.params.getboolean("q_passive"),
"durable": self.params.getboolean("q_durable"),
"exclusive": self.params.getboolean("exclusive"),
"auto_delete": self.params.getboolean("q_auto_delete"),
"nowait": self.params.getboolean("q_nowait"),
"arguments": self.params.get("q_arguments")
}
q_params = {k: v for k, v in q_params.items() if v is not None}
try:
q_params["arguments"] = json.loads(q_params["arguments"])
except KeyError:
pass
return q_params
def _get_publish_params(self):
pub_params = {
"routing_key": self.params.get("routing_key"),
"mandatory": self.params.getboolean("mandatory"),
"immediate": self.params.getboolean("immediate")
}
pub_params = {k: v for k, v in pub_params.items() if v is not None}
properties = {
"content_type": self.params.get("content_type"),
"content_encoding": self.params.get("content_encoding"),
"headers": self.params.get("headers"),
"delivery_mode": self.params.get("delivery_mode"),
"priority": self.params.get("priority")
}
properties = {k: v for k, v in properties.items() if v is not None}
try:
properties['delivery_mode'] = int(properties['delivery_mode'])
except KeyError:
pass
pub_params["properties"] = pika.BasicProperties(**{
k: v for k, v in properties.items() if v is not None
})
return pub_params
def _get_bind_params(self):
b_params = {
"nowait": self.params.get("b_nowait"),
"arguments": self.params.get("b_arguments")
}
b_params = {k: v for k, v in b_params.items() if v is not None}
try:
b_params["arguments"] = json.loads(b_params["arguments"])
except KeyError:
pass
return b_params
@backoff.on_exception(backoff.expo,
pika.exceptions.ConnectionClosed,
max_tries=5,
on_backoff=backoff_hdlr)
def connect(self):
""" Connect to RabbitMQ server with designated configuration,
create channel and bind channel to queue."""
self._conn = pika.BlockingConnection(self.conn_params_obj)
self._channel = self._conn.channel()
self._channel.queue_declare(**self.q_params)
self._channel.confirm_delivery()
self._channel.exchange_declare(**self.ex_params)
self._channel.queue_bind(
queue=self.q_params.get("queue"),
exchange=self.ex_params.get("exchange"),
routing_key=self.pub_params.get("routing_key"),
**self.b_params
)
def close(self):
"""Close channel and connection if they have been established."""
if self._channel is not None:
try:
self._channel.close()
except (pika.exceptions.ChannelClosed,
pika.exceptions.ChannelAlreadyClosing):
pass
if self._conn is not None:
self._conn.close()
def __enter__(self):
self.connect()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
@backoff.on_exception(backoff.expo,
pika.exceptions.NackError,
max_tries=5,
on_backoff=backoff_hdlr)
def publish(self, msg):
""" publish message to queue and log confirmation of delivery"""
if self._channel.basic_publish(
exchange=self.ex_params.get("exchange"),
body=msg,
**self.pub_params
):
LOGGER.info('Message publish was confirmed')
else:
LOGGER.warning('Message could not be confirmed')
|
5uper5hoot/toputils
|
toputils/rmq/utils.py
|
Python
|
mit
| 8,845
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
"""
Interface for VW's active learning mode, which must be communicated with
over a socked.
Derived in great part from
https://github.com/JohnLangford/vowpal_wabbit/blob/master/utl/active_interactor.py
by Michael J.T. O'Kelly, 2014-04-11
"""
import socket
import time
import pexpect
DEFAULT_PORT = 26542
CONNECTION_WAIT = 0.1 # Time between socket connection attempts
MAX_CONNECTION_ATTEMPTS = 50
def get_active_default_settings():
result = dict(active=True,
port=DEFAULT_PORT,
predictions='/dev/null',
)
return result
class ActiveVWProcess():
"""Class for spawning and interacting with a WV process
in active learning mode. This class implements a subset of the interface
of a pexpect.spawn() object so that it can be a drop-in replacement
for the VW.vw_process member.
"""
_buffer = b''
def __init__(self, command, port=DEFAULT_PORT):
"""'command' is assumed to have the necessary options for use with this
class, which should be guaranteed in the calling context."""
# Launch the VW process, which we will communicate with only
# via its socket
self.vw_process = pexpect.spawn(command)
time.sleep(5) #the spawned process is not instantly ready for connections
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection_tries = 0
while connection_tries < MAX_CONNECTION_ATTEMPTS:
try:
self.sock.connect(('127.0.0.1', port))
break # Quit this loop once successful
except socket.error:
connection_tries += 1
time.sleep(CONNECTION_WAIT)
self.before = None
def sendline(self, line):
line = line + '\n' # This would have been added automatically by pexpect
if not isinstance(line, bytes):
line = line.encode('UTF-8')
self.sock.sendall(line)
def _recvline(self):
if b'\n' in self._buffer:
line, _, self._buffer = self._buffer.partition(b'\n')
return line
while True:
more = self.sock.recv(4096)
self._buffer += more
if not more:
rv = self._buffer
self._buffer = b''
return rv
if b'\n' in more:
line, _, self._buffer = self._buffer.partition(b'\n')
return line
def expect_exact(self, *args, **kwargs):
"""This does not attempt to duplicate the expect_exact API,
but just sets self.before to the latest response line."""
response = self._recvline()
self.before = response.strip()
def close(self):
self.sock.close()
self.vw_process.close()
|
MrMathias/wabbit_wappa
|
wabbit_wappa/active_learner.py
|
Python
|
mit
| 3,003
|
# file test_existdb/test_query.py
#
# Copyright 2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import unittest
from eulxml import xmlmap
from eulexistdb.db import ExistDB
from eulexistdb.exceptions import DoesNotExist
from eulexistdb.exceptions import ReturnedMultiple
from eulexistdb.query import QuerySet, Xquery, XmlQuery
from test_existdb.test_db import EXISTDB_SERVER_URL
from localsettings import EXISTDB_SERVER_URL, EXISTDB_SERVER_USER, \
EXISTDB_SERVER_PASSWORD, EXISTDB_TEST_COLLECTION
class QuerySubModel(xmlmap.XmlObject):
subname = xmlmap.StringField("subname")
ssc = xmlmap.NodeField('subsubclass', xmlmap.XmlObject)
class QueryTestModel(xmlmap.XmlObject):
ROOT_NAMESPACES = {'ex': 'http://example.com/'}
id = xmlmap.StringField('@id')
name = xmlmap.StringField('name')
description = xmlmap.StringField('description')
wnn = xmlmap.IntegerField('wacky_node_name')
sub = xmlmap.NodeField("sub", QuerySubModel)
or_field = xmlmap.StringField('name|description|@id')
substring = xmlmap.StringField('substring(name, 1, 1)')
nsfield = xmlmap.StringField('ex:field')
years = xmlmap.StringListField('year')
COLLECTION = EXISTDB_TEST_COLLECTION
FIXTURE_ONE = '''
<root id="one" xmlns:ex='http://example.com/'>
<name>one</name>
<description>this one has one one
</description>
<wacky_node_name>42</wacky_node_name>
<sub>
<subname>la</subname>
</sub>
<ex:field>namespaced</ex:field>
<year>2001</year>
<year>2000</year>
</root>
'''
FIXTURE_TWO = '''
<root id="abc">
<name>two</name>
<description>this one only has two</description>
<year>1990</year>
<year>1999</year>
<year>2013</year>
</root>
'''
FIXTURE_THREE = '''
<root id="xyz">
<name>three</name>
<description>third!</description>
<year>2010</year>
</root>
'''
FIXTURE_FOUR = '''
<root id="def">
<name>four</name>
<description>This one contains "quote" and &!</description>
</root>
'''
NUM_FIXTURES = 4
def load_fixtures(db):
db.createCollection(COLLECTION, True)
db.load(FIXTURE_ONE, COLLECTION + '/f1.xml')
db.load(FIXTURE_TWO, COLLECTION + '/f2.xml')
db.load(FIXTURE_THREE, COLLECTION + '/f3.xml')
db.load(FIXTURE_FOUR, COLLECTION + '/f4.xml')
class ExistQueryTest(unittest.TestCase):
def setUp(self):
self.db = ExistDB(server_url=EXISTDB_SERVER_URL,
username=EXISTDB_SERVER_USER, password=EXISTDB_SERVER_PASSWORD)
load_fixtures(self.db)
self.qs = QuerySet(using=self.db, xpath='/root', collection=COLLECTION, model=QueryTestModel)
def tearDown(self):
self.db.removeCollection(COLLECTION)
# release any queryset sessions before test user account
# is removed in module teardown
del self.qs
def test_count(self):
load_fixtures(self.db)
self.assertEqual(NUM_FIXTURES, self.qs.count(), "queryset count returns number of fixtures")
def test_getitem(self):
qs = self.qs.order_by('id') # adding sort order to test reliably
self.assertEqual("abc", qs[0].id)
self.assertEqual("def", qs[1].id)
self.assertEqual("one", qs[2].id)
self.assertEqual("xyz", qs[3].id)
# test getting single item beyond initial set
qs = self.qs.order_by('id')
# load initial result cache
self.assertEqual("abc", qs[0].id)
# retrieve individual items beyond the current cache
self.assertEqual("one", qs[2].id)
self.assertEqual("xyz", qs[3].id)
def test_getitem_typeerror(self):
self.assertRaises(TypeError, self.qs.__getitem__, "foo")
def test_getitem_indexerror(self):
self.assertRaises(IndexError, self.qs.__getitem__, -1)
self.assertRaises(IndexError, self.qs.__getitem__, 23)
def test_getslice(self):
slice = self.qs.order_by('id')[0:2]
self.assert_(isinstance(slice, QuerySet))
self.assert_(isinstance(slice[0], QueryTestModel))
self.assertEqual(2, slice.count())
self.assertEqual(2, len(slice))
self.assertEqual('abc', slice[0].id)
self.assertEqual('def', slice[1].id)
self.assertRaises(IndexError, slice.__getitem__, 2)
slice = self.qs.order_by('id')[1:3]
self.assertEqual('def', slice[0].id)
self.assertEqual('one', slice[1].id)
slice = self.qs.order_by('id')[3:5]
self.assertEqual(1, slice.count())
self.assertEqual('xyz', slice[0].id)
self.assertRaises(IndexError, slice.__getitem__, 1)
# test slicing with unspecified bounds
slice = self.qs.order_by('id')[:2]
self.assertEqual(2, slice.count())
self.assertEqual('def', slice[1].id)
slice = self.qs.order_by('id')[1:]
self.assertEqual(3, slice.count())
self.assertEqual('one', slice[1].id)
self.assertEqual('xyz', slice[2].id)
def test_filter(self):
fqs = self.qs.filter(contains="two")
self.assertEqual(1, fqs.count(), "count returns 1 when filtered - contains 'two'")
self.assertEqual("two", fqs[0].name, "name matches filter")
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_filter_field(self):
fqs = self.qs.filter(name="one")
self.assertEqual(1, fqs.count(), "count returns 1 when filtered on name = 'one' (got %s)"
% self.qs.count())
self.assertEqual("one", fqs[0].name, "name matches filter")
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_filter_xmlquery(self):
fqs = self.qs.filter(name=XmlQuery(term="one"))
self.assertEqual(1, fqs.count(),
"count returns 1 when filtered on name = <query><term>one</term></query> (got %s)"
% self.qs.count())
self.assertEqual("one", fqs[0].name, "name matches filter")
def test_filter_field_xpath(self):
fqs = self.qs.filter(id="abc")
self.assertEqual(1, fqs.count(), "count returns 1 when filtered on @id = 'abc' (got %s)"
% self.qs.count())
self.assertEqual("two", fqs[0].name, "name returned is correct for id filter")
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_filter_field_contains(self):
fqs = self.qs.filter(name__contains="o")
self.assertEqual(3, fqs.count(),
"should get 3 matches for filter on name contains 'o' (got %s)" % fqs.count())
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_filter_field_contains_special(self):
fqs = self.qs.filter(description__contains=' "quote" ')
self.assertEqual(1, fqs.count(),
"should get 1 match for filter on desc contains ' \"quote\" ' (got %s)" % fqs.count())
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
fqs = self.qs.filter(description__contains=' &!')
self.assertEqual(1, fqs.count(),
"should get 1 match for filter on desc contains ' &!' (got %s)" % fqs.count())
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_filter_field_startswith(self):
fqs = self.qs.filter(name__startswith="o")
self.assertEqual(1, fqs.count(),
"should get 1 match for filter on name starts with 'o' (got %s)" % fqs.count())
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_filter_subobject_field(self):
fqs = self.qs.filter(sub__subname="la")
self.assertEqual(1, fqs.count(),
"should get 1 match for filter on sub_subname = 'la' (got %s)" % fqs.count())
def test_filter_in(self):
fqs = self.qs.filter(id__in=['abc', 'xyz', 'qrs'])
self.assertEqual(
2, fqs.count(),
"should get 2 matches for filter on id in list (got %s)" % fqs.count())
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
fqs = self.qs.filter(document_name__in=['f1.xml', 'f2.xml'])
self.assertEqual(
2, fqs.count(),
"should get 2 matches for filter on document name in list (got %s)" % fqs.count())
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
# filtering on a special field - should still be able to return/access it via only
fqs = self.qs.filter(document_name__in=['f1.xml', 'f2.xml']) \
.only('id', 'document_name').order_by('document_name')
self.assertEqual(
2, fqs.count(),
"should get 2 matches for filter on document name in list (got %s)" % fqs.count())
self.assertEqual('f1.xml', fqs[0].document_name)
fqs = self.qs.filter(document_name__in=['f1.xml', 'f2.xml']) \
.also('id', 'document_name').order_by('document_name')
self.assertEqual(
2, fqs.count(),
"should get 2 matches for filter on document name in list (got %s)" % fqs.count())
self.assertEqual('f1.xml', fqs[0].document_name)
def test_filter_exists(self):
fqs = self.qs.filter(id__exists=True)
self.assertEqual(4, fqs.count(),
"filter on id exists=true returns all documents")
fqs = self.qs.filter(id__exists=False)
self.assertEqual(0, fqs.count(),
"filter on id exists=false returns no documents")
fqs = self.qs.filter(wnn__exists=False)
self.assertEqual(3, fqs.count(),
"filter on wacky node name exists=false returns 3 documents")
def test_or_filter(self):
fqs = self.qs.or_filter(id='abc', name='four').only('id')
self.assertEqual(
2, fqs.count(),
"should get 2 matches for OR filter on id='abc' or name='four' (got %s)" % fqs.count())
ids = [obj.id for obj in fqs.all()]
self.assert_('abc' in ids, 'id "abc" in list of ids when OR filter includes id="abc"')
self.assert_('def' in ids, 'id "def" in list of ids when OR filter includes name="four')
def test_exclude(self):
fqs = self.qs.exclude(id='abc', name='one').only('id')
self.assertEqual(
2, fqs.count(),
"should get 2 matches for exclude filter on id='abc' or name='one' (got %s)" % fqs.count())
ids = [obj.id for obj in fqs.all()]
self.assert_('abc' not in ids, 'id "abc" should not be in list of ids when exclude id="abc"')
def test_filter_gtelte(self):
# < <= > >=
# subclass to add a numeric field to test with
class CountQueryTestModel(QueryTestModel):
name_count = xmlmap.IntegerField('count(name)')
qs = QuerySet(using=self.db, xpath='/root', collection=COLLECTION,
model=CountQueryTestModel)
# each fixture has one and only one name
self.assertEqual(0, qs.filter(name_count__gt=1).count())
self.assertEqual(4, qs.filter(name_count__gte=1).count())
self.assertEqual(4, qs.filter(name_count__lte=1).count())
self.assertEqual(0, qs.filter(name_count__lt=1).count())
def test_filter_document_path(self):
# get full test path to first document
item = self.qs.filter(name='one').only('document_name', 'collection_name').get()
path = '%s/%s' % (item.collection_name, item.document_name)
#
fqs = self.qs.filter(document_path=path, name='one')
self.assertEqual(1, fqs.count())
fqs = self.qs.filter(document_path=path, name='two')
self.assertEqual(0, fqs.count())
def test_get(self):
result = self.qs.get(contains="two")
self.assert_(isinstance(result, QueryTestModel), "get() with contains returns single result")
self.assertEqual(result.name, "two", "result returned by get() has correct data")
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_get_toomany(self):
self.assertRaises(ReturnedMultiple, self.qs.get, contains="one")
def test_get_nomatch(self):
self.assertRaises(DoesNotExist, self.qs.get, contains="fifty-four")
def test_get_byname(self):
result = self.qs.get(name="one")
self.assert_(isinstance(result, QueryTestModel), "get() with contains returns single result")
self.assertEqual(result.name, "one", "result returned by get() has correct data")
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_filter_get(self):
result = self.qs.filter(contains="one").filter(name="two").get()
self.assert_(isinstance(result, QueryTestModel))
self.assertEqual("two", result.name, "filtered get() returns correct data")
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_reset(self):
self.qs.filter(contains="two")
self.qs.reset()
self.assertEqual(NUM_FIXTURES, self.qs.count(), "main queryset remains unchanged by filter")
def test_order_by(self):
# element
fqs = self.qs.order_by('name')
self.assertEqual('four', fqs[0].name)
self.assertEqual('one', fqs[1].name)
self.assertEqual('three', fqs[2].name)
self.assertEqual('two', fqs[3].name)
self.assert_('order by ' not in self.qs.query.getQuery(), "main queryset unchanged by order_by()")
# attribute
fqs = self.qs.order_by('id')
self.assertEqual('abc', fqs[0].id)
self.assertEqual('def', fqs[1].id)
self.assertEqual('one', fqs[2].id)
self.assertEqual('xyz', fqs[3].id)
# reverse sorting
fqs = self.qs.order_by('-name')
self.assertEqual('four', fqs[3].name)
self.assertEqual('two', fqs[0].name)
fqs = self.qs.order_by('-id')
self.assertEqual('abc', fqs[3].id)
self.assertEqual('xyz', fqs[0].id)
# case-insensitive sorting - upper-case description should not sort first
fqs = self.qs.order_by('~description')
self.assert_(fqs[0].description.startswith('third'))
self.assert_(fqs[1].description.startswith('This one contains'))
# reverse case-insensitive sorting - flags in either order
fqs = self.qs.order_by('~-description')
self.assert_(fqs[3].description.startswith('third'))
fqs = self.qs.order_by('-~description')
self.assert_(fqs[3].description.startswith('third'))
def test_order_by_raw(self):
fqs = self.qs.order_by_raw('min(%(xq_var)s/year)')
self.assert_('1990' in fqs[0].years)
self.assert_('2001' in fqs[1].years)
self.assert_('2010' in fqs[2].years)
self.assertEqual([], fqs[3].years)
fqs = self.qs.order_by_raw('min(%(xq_var)s/year)', ascending=False)
self.assertEqual([], fqs[0].years)
self.assert_('2010' in fqs[1].years)
self.assert_('2001' in fqs[2].years)
self.assert_('1990' in fqs[3].years)
def test_only(self):
self.qs.only('name')
self.assert_('element name {' not in self.qs.query.getQuery(), "main queryset unchanged by only()")
fqs = self.qs.filter(id='one').only('name', 'id', 'sub', 'or_field')
self.assert_(isinstance(fqs[0], QueryTestModel)) # actually a Partial type derived from this
# attributes that should be present
self.assertNotEqual(fqs[0].id, None)
self.assertNotEqual(fqs[0].sub, None)
self.assertNotEqual(fqs[0].sub.subname, None)
self.assertNotEqual(fqs[0].or_field, None)
# attribute not returned
self.assertEqual(fqs[0].description, None)
self.assertEqual('one', fqs[0].id)
self.assertEqual('one', fqs[0].name)
self.assertEqual('la', fqs[0].sub.subname)
self.assertEqual('one', fqs[0].or_field) # = name (first of ORed fields present)
fqs = self.qs.filter(id='one').only('wnn')
self.assertTrue(hasattr(fqs[0], "wnn"))
self.assertEqual(42, fqs[0].wnn)
# nested field return
fqs = self.qs.filter(id='one').only('name', 'id', 'sub__subname')
self.assertEqual('la', fqs[0].sub.subname)
# xpath function return
fqs = self.qs.filter(id='one').only('substring')
self.assertEqual('o', fqs[0].substring)
# sub-subclass
fqs = self.qs.filter(id='one').only('sub__ssc')
self.assert_(isinstance(fqs[0], QueryTestModel))
def test_only_hash(self):
fqs = self.qs.only('hash')
# no filters, should return all 3 test objects
for result in fqs:
# each return object should have a 40-character SHA-1 hash checksum
self.assertEqual(40, len(result.hash),
'xquery result should have 40-character checksum, got %s' % result.hash)
def test_document_name(self):
fqs = self.qs.filter(id='one').only('document_name')
# document_name attribute should be present
self.assertNotEqual(fqs[0].document_name, None)
self.assertEqual(fqs[0].document_name, "f1.xml")
fqs = self.qs.filter(id='one').also('document_name')
self.assertNotEqual(fqs[0].document_name, None)
self.assertEqual(fqs[0].document_name, "f1.xml")
def test_collection_name(self):
fqs = self.qs.filter(id='one').only('collection_name')
self.assertEqual(fqs[0].collection_name, '/db' + COLLECTION)
fqs = self.qs.filter(id='one').also('collection_name')
self.assertEqual(fqs[0].collection_name, '/db' + COLLECTION)
def test_only_lastmodified(self):
fqs = self.qs.only('last_modified')
# no filters, should return all 3 test objects
for result in fqs:
self.assert_(isinstance(result.last_modified, datetime))
def test_iter(self):
for q in self.qs:
self.assert_(isinstance(q, QueryTestModel))
def test_slice_iter(self):
i = 0
for q in self.qs[1:2]:
i += 1
self.assertEqual(1, i)
def test_also(self):
class SubqueryTestModel(xmlmap.XmlObject):
name = xmlmap.StringField('.')
parent_id = xmlmap.StringField('parent::root/@id')
qs = QuerySet(using=self.db, collection=COLLECTION, model=SubqueryTestModel, xpath='//name')
name = qs.also('parent_id').get(name__exact='two')
self.assertEqual('abc', name.parent_id,
"parent id set correctly when returning at name level with also parent_id specified; should be 'abc', got '"
+ name.parent_id + "'")
def test_also_subfield(self):
class SubqueryTestModel(xmlmap.XmlObject):
subname = xmlmap.StringField('subname')
parent = xmlmap.NodeField('parent::root', QueryTestModel)
qs = QuerySet(using=self.db, collection=COLLECTION, model=SubqueryTestModel, xpath='//sub')
name = qs.also('parent__id', 'parent__wnn').get(subname__exact='la')
self.assertEqual('la', name.subname)
self.assertEqual('one', name.parent.id)
self.assertEqual(42, name.parent.wnn)
def test_also_raw(self):
class SubqueryTestModel(QueryTestModel):
myid = xmlmap.StringField('@id')
qs = QuerySet(using=self.db, collection=COLLECTION, model=SubqueryTestModel, xpath='/root')
qs = qs.filter(id='abc').also_raw(myid='string(%(xq_var)s//name/ancestor::root/@id)')
self.assertEqual('abc', qs[0].myid)
# filtered version of the queryset with raw
obj = qs.filter(name='two').get()
self.assertEqual('abc', obj.myid)
# multiple parameters
obj = qs.filter(id='abc').also_raw(id='string(%(xq_var)s/@id)',
name='normalize-space(%(xq_var)s//name)').get(id='abc')
self.assertEqual('abc', obj.id)
self.assertEqual('two', obj.name)
def test_only_raw(self):
qs = self.qs.only_raw(id='xs:string(%(xq_var)s//name/ancestor::root/@id)').filter(name='two')
self.assertEqual('abc', qs[0].id)
# filtered version
obj = qs.get()
self.assertEqual('abc', obj.id)
# when combined with regular only, other fields come back correctly
qs = self.qs.only('name', 'description', 'substring')
obj = qs.only_raw(id='xs:string(%(xq_var)s//name/ancestor::root/@id)').get(id='abc')
self.assertEqual('two', obj.name)
self.assertEqual('t', obj.substring)
self.assertEqual('this one only has two', obj.description)
self.assertEqual('abc', obj.id)
# subfield
obj = qs.only_raw(sub__subname='normalize-space(%(xq_var)s//subname)').get(id='one')
self.assertEqual('la', obj.sub.subname)
# multiple parameters
obj = self.qs.filter(id='abc').only_raw(id='string(%(xq_var)s/@id)',
name='normalize-space(%(xq_var)s//name)').get(id='abc')
self.assertEqual('abc', obj.id)
self.assertEqual('two', obj.name)
# list field - multiple return values
class MyQueryTest(QueryTestModel):
name = xmlmap.StringListField('name')
qs = QuerySet(using=self.db, xpath='/root', collection=COLLECTION, model=MyQueryTest)
# return one object but find all the names in the test collection
obj = qs.filter(id='abc').only_raw(name='collection("/db%s")//name' % COLLECTION).get(id='abc')
# 4 names in test fixtures - should come back as a list of those 4 names
self.assertEqual(4, len(obj.name))
def test_getDocument(self):
obj = self.qs.getDocument("f1.xml")
self.assert_(isinstance(obj, QueryTestModel),
"object returned by getDocument is instance of QueryTestModel")
self.assertEqual("one", obj.name)
def test_distinct(self):
qs = QuerySet(using=self.db, collection=COLLECTION, xpath='//name')
vals = qs.distinct()
self.assert_('one' in vals)
self.assert_('two' in vals)
self.assert_('three' in vals)
self.assert_('four' in vals)
self.assert_('abc' not in vals)
def test_namespaces(self):
# filter on a field with a namespace
fqs = self.qs.filter(nsfield='namespaced').all()
self.assertEqual('namespaced', fqs[0].nsfield)
class ExistQueryTest__FullText(unittest.TestCase):
# when full-text indexing is enabled, eXist must index files when they are loaded to the db
# this makes tests *significantly* slower
# any tests that require full-text queries should be here
# sample lucene configuration for testing full-text queries
FIXTURE_INDEX = '''
<collection xmlns="http://exist-db.org/collection-config/1.0">
<index>
<lucene>
<analyzer class="org.apache.lucene.analysis.standard.StandardAnalyzer"/>
<text qname="description"/>
<text qname="root"/>
</lucene>
</index>
</collection>
'''
def setUp(self):
self.db = ExistDB(server_url=EXISTDB_SERVER_URL,
username=EXISTDB_SERVER_USER, password=EXISTDB_SERVER_PASSWORD)
# create index for collection - should be applied to newly loaded files
self.db.loadCollectionIndex(COLLECTION, self.FIXTURE_INDEX)
load_fixtures(self.db)
self.qs = QuerySet(using=self.db, xpath='/root',
collection=COLLECTION, model=QueryTestModel)
def tearDown(self):
self.db.removeCollection(COLLECTION)
self.db.removeCollectionIndex(COLLECTION)
def test_filter_fulltext_terms(self):
fqs = self.qs.filter(description__fulltext_terms='only two')
self.assertEqual(1, fqs.count(),
"should get 1 match for fulltext_terms search on = 'only two' (got %s)" % fqs.count())
def test_filter_fulltext_options(self):
qs = QuerySet(using=self.db, xpath='/root',
collection=COLLECTION, model=QueryTestModel,
fulltext_options={'default-operator': 'and'})
# search for terms present in fixtures - but not both present in one doc
fqs = qs.filter(description__fulltext_terms='only third')
# for now, just confirm that the option is passed through to query
self.assert_('<default-operator>and</default-operator>' in fqs.query.getQuery())
# TODO: test this properly!
# query options not supported in current version of eXist
# self.assertEqual(0, fqs.count())
def test_order_by__fulltext_score(self):
fqs = self.qs.filter(description__fulltext_terms='one').order_by('-fulltext_score')
self.assertEqual('one', fqs[0].name) # one appears 3 times, should be first
def test_only__fulltext_score(self):
fqs = self.qs.filter(description__fulltext_terms='one').only('fulltext_score', 'name')
self.assert_(isinstance(fqs[0], QueryTestModel)) # actually a Partial type derived from this
# fulltext score attribute should be present
self.assertNotEqual(fqs[0].fulltext_score, None)
self.assert_(float(fqs[0].fulltext_score) > 0.5) # full-text score should be a float
def test_fulltext_highlight(self):
fqs = self.qs.filter(description__fulltext_terms='only two')
# result from fulltext search - by default, xml should have exist:match tags
self.assert_('<exist:match' in fqs[0].serialize())
fqs = self.qs.filter(description__fulltext_terms='only two', highlight=False)
# with highlighting disabled, should not have exist:match tags
self.assert_('<exist:match' not in fqs[0].serialize())
# order of args in the same filter should not matter
fqs = self.qs.filter(highlight=False, description__fulltext_terms='only two')
# with highlighting disabled, should not have exist:match tags
self.assert_('<exist:match' not in fqs[0].serialize())
# separate filters should also work
fqs = self.qs.filter(description__fulltext_terms='only two').filter(highlight=False)
# with highlighting disabled, should not have exist:match tags
self.assert_('<exist:match' not in fqs[0].serialize())
def test_highlight(self):
fqs = self.qs.filter(highlight='supercalifragilistic')
self.assertEqual(4, fqs.count(),
"highlight filter returns all documents even though search term is not present")
fqs = self.qs.filter(highlight='one').order_by('id')
self.assert_('<exist:match' in fqs[0].serialize())
def test_match_count(self):
fqs = self.qs.filter(id='one', highlight='one').only('match_count')
self.assertEqual(fqs[0].match_count, 4, "4 matched words should be found")
def test_using(self):
fqs = self.qs.using('new-collection')
# using should update the collection on the xquery object
self.assertEqual('new-collection', fqs.query.collection)
class XqueryTest(unittest.TestCase):
def test_defaults(self):
xq = Xquery()
self.assertEquals('/node()', xq.getQuery())
def test_xpath(self):
xq = Xquery(xpath='/path/to/el')
self.assertEquals('/path/to/el', xq.getQuery())
def test_coll(self):
xq = Xquery(collection='myExistColl')
self.assertEquals('collection("/db/myExistColl")/node()', xq.getQuery())
xq = Xquery(xpath='/root/el', collection='/coll/sub')
self.assertEquals('collection("/db/coll/sub")/root/el', xq.getQuery())
def test_set_collection(self):
# initialize with no collection
xq = Xquery(xpath='/el')
xq.set_collection('coll')
self.assertEquals('collection("/db/coll")/el', xq.getQuery())
# initialize with one collection, then switch
xq = Xquery(collection='coll1')
xq.set_collection('coll2')
self.assertEquals('collection("/db/coll2")/node()', xq.getQuery())
# leading slash is ok too
xq.set_collection('/coll3')
self.assertEquals('collection("/db/coll3")/node()', xq.getQuery())
# set to None
xq.set_collection(None)
self.assertEquals('/node()', xq.getQuery())
def test_document(self):
xq = Xquery(xpath='/el', document="/db/coll/file.xml")
self.assertEquals('doc("/db/coll/file.xml")/el', xq.getQuery())
# document takes precedence over collection
xq.set_collection('coll') # should be ignored
self.assertEquals('doc("/db/coll/file.xml")/el', xq.getQuery())
def test_sort(self):
xq = Xquery(collection="mycoll")
xq.xq_var = '$n'
xq.sort('@id')
self.assert_('order by $n/@id ascending' in xq.getQuery())
self.assert_('collection("/db/mycoll")' in xq.getQuery())
# prep_xpath function should clean up more complicated xpaths
xq.sort('name|@id')
self.assert_('order by $n/name|$n/@id' in xq.getQuery())
# sort descending
xq.sort('@id', ascending=False)
self.assert_('order by $n/@id descending' in xq.getQuery())
# sort case-insensitive
xq.sort('@id', case_insensitive=True)
self.assert_('order by fn:lower-case($n/@id) ascending' in xq.getQuery())
# case-insensitive and descending
xq.sort('@id', case_insensitive=True, ascending=False)
self.assert_('order by fn:lower-case($n/@id) descending' in xq.getQuery())
def test_filters(self):
xq = Xquery(xpath='/el')
xq.add_filter('.', 'contains', 'dog')
self.assertEquals('/el[contains(., "dog")]', xq.getQuery())
# filters are additive
xq.add_filter('.', 'startswith', 'S')
self.assertEquals('/el[contains(., "dog")][starts-with(., "S")]', xq.getQuery())
def test_filters_fulltext(self):
xq = Xquery(xpath='/el')
xq.add_filter('.', 'fulltext_terms', 'dog')
self.assertEquals('/el[ft:query(., "dog")]', xq.getQuery())
def test_fulltext_options(self):
# pass in options for a full-text query
xq = Xquery(xpath='/el', fulltext_options={'default-operator': 'and'})
xq.add_filter('.', 'fulltext_terms', 'dog')
self.assert_('<default-operator>and</default-operator>' in xq.getQuery())
self.assert_('/el[ft:query(., "dog", $ft_options)]', xq.getQuery())
def test_filters_highlight(self):
xq = Xquery(xpath='/el')
xq.add_filter('.', 'highlight', 'dog star')
self.assertEquals('util:expand((/el[ft:query(., "dog star")]|/el))',
xq.getQuery())
def test_filter_escaping(self):
xq = Xquery(xpath='/el')
xq.add_filter('.', 'contains', '"&')
self.assertEquals('/el[contains(., """&")]', xq.getQuery())
def test_filter_in(self):
xq = Xquery(xpath='/el')
xq.add_filter('@id', 'in', ['a', 'b', 'c'])
self.assertEquals('/el[@id="a" or @id="b" or @id="c"]', xq.getQuery())
# filter on a 'special' field - requires let & where statements
xq = Xquery(xpath='/el')
xq.add_filter('document_name', 'in', ['a.xml', 'b.xml'])
self.assert_('let $document_name' in xq.getQuery())
self.assert_('where $document_name="a.xml" or $document_name="b.xml"'
in xq.getQuery())
def test_filter_exists(self):
xq = Xquery(xpath='/el')
xq.add_filter('@id', 'exists', True)
self.assertEquals('/el[@id]', xq.getQuery())
xq = Xquery(xpath='/el')
xq.add_filter('@id', 'exists', False)
self.assertEquals('/el[not(@id)]', xq.getQuery())
def test_filter_gtlt(self):
xq = Xquery(xpath='/el')
xq.add_filter('@id', 'gt', 5)
self.assert_('el[@id > 5]' in xq.getQuery())
xq = Xquery(xpath='/el')
xq.add_filter('@id', 'gte', 5)
self.assert_('/el[@id >= 5]' in xq.getQuery())
xq.add_filter('@id', 'lt', '10')
self.assert_('el[@id >= 5]' in xq.getQuery())
self.assert_('[@id < "10"]' in xq.getQuery())
xq.add_filter('@id', 'lte', 3)
self.assert_('[@id <= 3]' in xq.getQuery())
def test_or_filters(self):
xq = Xquery(xpath='/el')
xq.add_filter('.', 'contains', 'dog', mode='OR')
xq.add_filter('.', 'startswith', 'S', mode='OR')
self.assertEquals('/el[contains(., "dog") or starts-with(., "S")]',
xq.getQuery())
def test_not_filters(self):
xq = Xquery(xpath='/el')
xq.add_filter('.', 'contains', 'dog', mode='NOT')
self.assertEquals('/el[not(contains(., "dog"))]', xq.getQuery())
xq = Xquery(xpath='/el')
xq.add_filter('.', 'contains', 'dog', mode='NOT')
xq.add_filter('.', 'startswith', 'S', mode='NOT')
self.assertEquals('/el[not(contains(., "dog")) and not(starts-with(., "S"))]',
xq.getQuery())
def test_return_only(self):
xq = Xquery(xpath='/el')
xq.xq_var = '$n'
xq.return_only({'myid': '@id', 'some_name': 'name',
'first_letter': 'substring(@n,1,1)'})
xq_return = xq._constructReturn()
self.assert_('return <el>' in xq_return)
self.assert_('<field>{$n/@id}</field>' in xq_return)
self.assert_('<field>{$n/name}</field>' in xq_return)
self.assert_('<field>{substring($n/@n,1,1)}</field>' in xq_return)
self.assert_('</el>' in xq_return)
xq = Xquery(xpath='/some/el/notroot')
xq.return_only({'id': '@id'})
self.assert_('return <notroot>' in xq._constructReturn())
# case where node test can't be the return element
xq = Xquery(xpath='/foo/bar/node()')
xq.return_only({'myid': '@id'})
xq_return = xq._constructReturn()
self.assert_('return <node>' in xq_return)
xq = Xquery(xpath='/foo/bar/*')
xq.return_only({'myid': '@id'})
xq_return = xq._constructReturn()
self.assert_('return <node>' in xq_return)
def test_return_only__fulltext_score(self):
xq = Xquery(xpath='/el')
xq.xq_var = '$n'
xq.return_only({'fulltext_score': ''})
self.assert_('let $fulltext_score := ft:score($n)' in xq.getQuery())
self.assert_('<fulltext_score>{$fulltext_score}</fulltext_score>' in xq._constructReturn())
def test_return_also(self):
xq = Xquery(xpath='/el')
xq.xq_var = '$n'
xq.return_also({'myid': '@id', 'some_name': 'name'})
self.assert_('{$n}' in xq._constructReturn())
self.assert_('<field>{$n/@id}</field>' in xq._constructReturn())
def test_return_also__fulltext_score(self):
xq = Xquery(xpath='/el')
xq.xq_var = '$n'
xq.return_also({'fulltext_score': ''})
self.assert_('let $fulltext_score := ft:score($n)' in xq.getQuery())
self.assert_('<fulltext_score>{$fulltext_score}</fulltext_score>' in xq._constructReturn())
def test_return_also__highlight(self):
xq = Xquery(xpath='/el')
xq.xq_var = '$n'
xq.return_also({'fulltext_score': ''})
xq.add_filter('.', 'highlight', 'dog star')
self.assert_('(/el[ft:query(., "dog star")]|/el)' in xq.getQuery())
def test_return_also_raw(self):
xq = Xquery(xpath='/el')
xq.xq_var = '$n'
xq._raw_prefix = 'r_'
xq.return_also({'myid': 'count(util:expand(%(xq_var)s/@id))'}, raw=True)
self.assert_('<r_myid>{count(util:expand($n/@id))}</r_myid>' in xq._constructReturn())
xq = Xquery(xpath='/el')
xq.xq_var = '$n'
xq._raw_prefix = 'r_'
xq.return_also({'myid': '@id'}, raw=True)
self.assert_('<r_myid>{@id}</r_myid>' in xq._constructReturn())
def test_set_limits(self):
# subsequence with xpath
xq = Xquery(xpath='/el')
xq.xq_var = '$n'
xq.set_limits(low=0, high=4)
self.assertEqual('subsequence(/el, 1, 4)', xq.getQuery())
# subsequence with FLWR query
xq.return_only({'name': 'name'})
self.assert_('subsequence(for $n in' in xq.getQuery())
# additive limits
xq = Xquery(xpath='/el')
xq.set_limits(low=2, high=10)
xq.set_limits(low=1, high=5)
self.assertEqual('subsequence(/el, 4, 4)', xq.getQuery())
# no high specified
xq = Xquery(xpath='/el')
xq.set_limits(low=10)
self.assertEqual('subsequence(/el, 11, )', xq.getQuery())
# no low
xq = Xquery(xpath='/el')
xq.set_limits(high=15)
self.assertEqual('subsequence(/el, 1, 15)', xq.getQuery())
def test_clear_limits(self):
xq = Xquery(xpath='/el')
xq.set_limits(low=2, high=5)
xq.clear_limits()
self.assertEqual('/el', xq.getQuery())
def test_distinct(self):
# distinct-values
xq = Xquery(xpath='/el')
xq.distinct()
self.assertEqual('distinct-values(/el)', xq.getQuery())
def test_prep_xpath(self):
xq = Xquery()
xq.xq_var = '$n'
# handle attributes
self.assertEqual('<field>{$n/@id}</field>', xq.prep_xpath('@id', return_field=True))
self.assertEqual('<field>{$n/../@id}</field>', xq.prep_xpath('../@id', return_field=True))
self.assertEqual('<field>{$n/parent::root/@id}</field>', xq.prep_xpath('parent::root/@id', return_field=True))
# handle regular nodes
self.assertEqual('<field>{$n/title}</field>', xq.prep_xpath('title', return_field=True))
# function call - regular node
self.assertEqual('substring($n/title,1,1)', xq.prep_xpath('substring(title,1,1)'))
# function call - abbreviated step
self.assertEqual('substring($n/.,1,1)', xq.prep_xpath('substring(.,1,1)'))
# xpath with OR - absolute paths
self.assertEqual('<field>{$n/name|$n/title}</field>', xq.prep_xpath('/name|/title', return_field=True))
# xpath with OR - relative paths
self.assertEqual('<field>{$n/name|$n/title}</field>', xq.prep_xpath('name|title', return_field=True))
# xpath with OR - mixed absolute and relative paths
self.assertEqual('<field>{$n/name|$n/title}</field>', xq.prep_xpath('/name|title', return_field=True))
# multiple ORs
self.assertEqual('<field>{$n/name|$n/title|$n/@year}</field>',
xq.prep_xpath('/name|/title|@year', return_field=True))
# .//node inside a function call
self.assertEqual('<field>{normalize-space($n/.//name)}</field>',
xq.prep_xpath('normalize-space($n/.//name)', return_field=True))
# node|node inside a function call
self.assertEqual('fn:lower-case($n/name|$n/title)',
xq.prep_xpath('fn:lower-case(name|title)'))
# node|node inside a nested function call
self.assertEqual('fn:lower-case(normalize-space($n/name|$n/title))',
xq.prep_xpath('fn:lower-case(normalize-space(name|title))'))
def test_namespaces(self):
xq = Xquery(xpath='/foo:el', namespaces={'foo': 'urn:foo#'})
ns_declaration = '''declare namespace foo='urn:foo#';'''
# xpath-only xquery should have namespace declaration
self.assert_(ns_declaration in xq.getQuery())
# full FLOWR xquery should also have declaration
xq.return_only({'id': '@id'})
self.assert_(ns_declaration in xq.getQuery())
|
emory-libraries/eulexistdb
|
test/test_existdb/test_query.py
|
Python
|
apache-2.0
| 40,539
|
"""
This module contains all commands for searching the database for events with given criteria. The most important function here is searchNordic and most other functions and classes are here just to support it.
Functions and Classes
---------------------
"""
import numpy as np
from datetime import date
from datetime import datetime
from datetime import timedelta
from datetime import time
from nordb.core import usernameUtilities
from nordb.database import sql2nordic
SEARCH_TYPES = {
"origin_date":[date],
"origin_time":[time],
"epicenter_latitude":[float],
"epicenter_longitude":[float],
"magnitude_1":[float],
"solution_type":[str],
"distance_indicator":[str],
"event_desc_id":[str],
"event_id":[int],
"depth":[float]
}
SEARCH_TYPE_HEADERS = {
"origin_time":"nordic_header_main",
"origin_date":"nordic_header_main",
"epicenter_latitude":"nordic_header_main",
"epicenter_longitude":"nordic_header_main",
"magnitude_1":"nordic_header_main",
"solution_type":"nordic_event",
"distance_indicator":"nordic_header_main",
"event_desc_id":"nordic_header_main",
"event_id":"nordic_header_main",
"depth":"nordic_header_main"
}
class NordicSearch:
"""
Class for searching events from database with multiple criteria.
"""
def __init__(self):
self.criteria = []
def getCriteriaString(self):
"""
Get all criteria in a formatted string for printing purposes.
"""
criteria_string = ""
for crit in self.criteria:
if crit.command_type == 1:
criteria_string += " {0}: {1}\n".format(crit.search_type, crit.getValue()[0])
elif crit.command_type == 2:
criteria_string += " {0}: {1}-{2}\n".format(crit.search_type, crit.getValue()[0], crit.getValue()[1])
elif crit.command_type == 3:
criteria_string += " {0}: {1}-> \n".format(crit.search_type, crit.getValue()[0])
else:
criteria_string += " {0}: <-{1} \n".format(crit.search_type, crit.getValue()[0])
return criteria_string
def getCriteriaAmount(self):
"""
Return the amount of criteria in the NordicSearch
"""
return len(self.criteria)
def clear(self):
"""
Clear all criteria from NordicSearch object
"""
self.criteria = []
def addSearchExactly(self, search_type, search_val):
"""
Add SearchExactly criteria to the NordicSearch object.
:param str search_type:
:param int,float,datetime,str search_val: Value to which the search type will be compared to
"""
self.criteria.append(ExactlyValue(search_type, search_val))
def addSearchBetween(self, search_type, search_val_low, search_val_upp):
"""
Add SearchBetween criteria to the NordicSearch object.
:param str search_type:
:param int,float,datetime,str search_val_low: Lower value to which the search type will be compared to
:param int,float,datetime,str search_val_upp: Upper value to which the search type will be compared to
"""
self.criteria.append(BetweenValues(search_type, search_val_low, search_val_upp))
def addSearchOver(self, search_type, search_val):
"""
Add SearchOver criteria to the NordicSearch object.
:param str search_type:
:param int,float,datetime,str search_val: Value to which the search type will be compared to
"""
self.criteria.append(OverValue(search_type, search_val))
def addSearchUnder(self, search_type, search_val):
"""
Add SearchUnder criteria to the NordicSearch object.
:param str search_type:
:param int,float,datetime,str search_val: Value to which the search type will be compared to
"""
self.criteria.append(UnderValue(search_type, search_val))
def getSearchQueryAndValues(self):
query_str = ""
query_vals = []
for query in self.criteria:
query_str += "AND " + query.getQuery()
query_vals.extend(query.getValue())
return query_str, query_vals
def searchEventIdAndDate(self, db_conn = None):
"""
Search for all event ids and their dates that fit to the criteria given to the NordicSearch and return them
:returns: a list of event_ids and dates
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
events = []
query = ( "SELECT "
" id, origin_date, origin_time "
"FROM "
" (SELECT "
" DISTINCT ON (nordic_event.id) nordic_event.id AS id, "
" nordic_header_main.origin_time AS origin_time, "
" nordic_header_main.origin_date AS origin_date, "
" nordic_event.root_id AS root_id "
" FROM "
" nordic_event, nordic_header_main "
" WHERE "
" nordic_event.id = nordic_header_main.event_id "
)
query_str, query_vals = self.getSearchQueryAndValues()
query += query_str
query += ") AS subq ORDER BY root_id"
cur = conn.cursor()
cur.execute(query, query_vals)
ans = cur.fetchall()
if db_conn is None:
conn.close()
if len(ans) == 0:
return []
return ans
def searchEvents(self, db_conn = None):
"""
Search for all the events that fit to the criteria given to the NordicSearch and return them.
:returns: array of NordicEvent objects
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
event_ids = self.searchEventIds(db_conn = conn)
events = sql2nordic.getNordic(event_ids, db_conn = conn)
if db_conn is None:
conn.close()
return events
def searchEventIds(self, db_conn = None):
"""
Search for all event ids that fit to the criteria given to the NordicSearch and return them.
:returns: a list of event ids
"""
event_ids = []
temp = self.searchEventIdAndDate(db_conn=db_conn)
return [temp[i][0] for i in range(len(temp))]
def searchEventRoots(self, db_conn = None):
"""
Search for event root ids that have events that fit to the criteria given to the NordicSearch and return them.
:returns: a list of event root ids
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
root_ids = []
query = (
"SELECT "
" DISTINCT nordic_event.root_id "
"FROM "
" nordic_event, nordic_header_main "
"WHERE "
" nordic_event.id = nordic_header_main.event_id "
)
query_str, query_vals = self.getSearchQueryAndValues()
query += query_str
ans = None
conn = usernameUtilities.log2nordb()
cur = conn.cursor()
cur.execute(query, query_vals)
ans = cur.fetchall()
if db_conn is None:
conn.close()
return ans
class Command:
"""
Class for command that is returned by string2Command.
:ivar int command_type: Type of command.
"""
def __init__(self, command_type, search_type):
if search_type not in SEARCH_TYPES.keys():
raise Exception("Not a valid search type! ({0})".format(search_type))
if command_type != 1:
if search_type in ["solution_type", "distance_indicator", "event_desc_id"]:
raise Exception("Cannot search between string values! ({0})".format(search_type))
self.command_type = command_type
self.search_type = search_type
def getQuery(self):
"""
Functiong for creating the query for the command
"""
return None
def getValue(self):
return None
def createQuery(self, value):
search_criteria = "{0}.{1}".format(SEARCH_TYPE_HEADERS[self.search_type], self.search_type)
if self.command_type == 1:
return " {0} = %s ".format(search_criteria)
elif self.command_type == 2:
return " {0} >= %s AND {0} <= %s ".format(search_criteria)
elif self.command_type == 3:
return " {0} >= %s ".format(search_criteria)
elif self.command_type == 4:
return " {0} <= %s ".format(search_criteria)
class ExactlyValue(Command):
"""
Command for determining if the value is exactly the given value.
:ivar int command_tpe: Type of command. Initial value: 1
:ivar int,float,datetime value: Value that all other values will be compared to. Can be of any type. Initial value: value
"""
def __init__(self, search_type, value):
Command.__init__(self, 1, search_type)
if type(value) not in SEARCH_TYPES[search_type]:
raise Exception("Given search value is not a correct type! (Given: {0}, Required: {1})".format(type(value), SEARCH_TYPES[search_type]))
self.value = value
def getQuery(self):
return self.createQuery(self.value)
def getValue(self):
return (self.value,)
class BetweenValues(Command):
"""
Command for determining if a value falls exactly between the given values.
:ivar int command_tpe: Type of command. In this case 2.
:ivar int,float,datetime valueLower: value of the lower limit of the comparison
:ivar int,float,datetime valueUpper: value for the upper limit of the comparison
"""
def __init__(self, search_type, value_lower, value_upper):
Command.__init__(self, 2, search_type)
if type(value_lower) not in SEARCH_TYPES[search_type]:
raise Exception("Given lower search value is not a correct type! (Given: {0}, Required: {1})".format(type(value_lower), SEARCH_TYPES[search_type]))
if type(value_upper) not in SEARCH_TYPES[search_type]:
raise Exception("Given upper search value is not a correct type! (Given: {0}, Required: {1})".format(type(value_upper), SEARCH_TYPES[search_type]))
self.value_lower = value_lower
self.value_upper = value_upper
def getQuery(self):
return self.createQuery(self.value_lower)
def getValue(self):
return self.value_lower, self.value_upper
class OverValue(Command):
"""
Command for determining if the value is over or equal to the Commands value
:ivar int,float,datetime command_tpe (int): Type of command. In this case 3.
:ivar int value: Value that all other values will be compared to. Can be of any type.
"""
def __init__(self, search_type, value):
Command.__init__(self, 3, search_type)
if type(value) not in SEARCH_TYPES[search_type]:
raise Exception("Given search value is not a correct type! (Given: {0}, Required: {1})".format(type(value), SEARCH_TYPES[search_type]))
self.value = value
def getQuery(self):
return self.createQuery(self.value)
def getValue(self):
return (self.value,)
class UnderValue(Command):
"""
Command for determining if the value is lower or equal to the Commands value
:ivar int command_tpe: Type of command. In this case 4.
:ivar int,float,datetime value: Value that all other values will be compared to. Can be of any type.
"""
def __init__(self, search_type, value):
Command.__init__(self, 4, search_type)
if type(value) not in SEARCH_TYPES[search_type]:
raise Exception("Given search value is not a correct type! (Given: {0}, Required: {1})".format(type(value), SEARCH_TYPES[search_type]))
self.value = value
def getQuery(self):
return self.createQuery(self.value)
def getValue(self):
return (self.value,)
def searchSameEvents(nordic_event):
"""
Function for searching and returning all events that are the same compared to the event given by the user.
:param NordicEvent nordic_event: Event for which the search is done for
:returns: List of :class:`NordicEvent` that are indentical to the event
"""
m_header = nordic_event.main_h[0]
search = NordicSearch()
if m_header.origin_date is not None:
search.addSearchExactly("origin_date", m_header.origin_date)
if m_header.origin_time is not None:
search.addSearchExactly("origin_time", m_header.origin_time)
if m_header.epicenter_latitude is not None:
search.addSearchExactly("epicenter_latitude", m_header.epicenter_latitude)
if m_header.epicenter_longitude is not None:
search.addSearchExactly("epicenter_longitude", m_header.epicenter_longitude)
if m_header.magnitude_1 is not None:
search.addSearchExactly("magnitude_1", m_header.magnitude_1)
return search.searchEvents()
def searchSimilarEvents(nordic_event, time_diff = 20.0, latitude_diff = 0.2, longitude_diff = 0.2, magnitude_diff = 0.5):
"""
Function for searching and returning all events that are considered similar to the event given by user.
Default conditions for similarity: \b
-Events must occur 20 seconds maximum apart from each other
-Events must be 0.2 deg maximum apart from each other in latitude and longitude
-Events must have magnitude difference of 0.5 maximum
:param NordicEvent nordic_event: Event for which the search is done for
:param float time_diff: maximum time difference in seconds
:param float latitude_diff: maximum latitude difference in degrees
:param float longitude_diff: maximum longitude difference in degrees
:param float magnitude_diff: maximum magnitude difference
:returns: Array of :class:`NordicEvent` that fit to the search criteria
"""
m_header = nordic_event.main_h[0]
search = NordicSearch()
if (m_header.origin_date is None or m_header.origin_time is None):
return []
origin_datetime = datetime.combine(m_header.origin_date, m_header.origin_time)
search.addSearchBetween("origin_date",
(origin_datetime - timedelta(seconds = time_diff)).date(),
(origin_datetime + timedelta(seconds = time_diff)).date())
search.addSearchBetween("origin_time",
(origin_datetime - timedelta(seconds = time_diff)).time(),
(origin_datetime + timedelta(seconds = time_diff)).time())
if m_header.epicenter_latitude is not None:
search.addSearchBetween("epicenter_latitude", m_header.epicenter_latitude - latitude_diff, m_header.epicenter_latitude + latitude_diff)
if m_header.epicenter_longitude is not None:
search.addSearchBetween("epicenter_longitude", m_header.epicenter_longitude - longitude_diff, m_header.epicenter_longitude + longitude_diff)
if m_header.magnitude_1 is not None:
search.addSearchBetween("magnitude_1", m_header.magnitude_1 - magnitude_diff, m_header.magnitude_1 + magnitude_diff)
return search.searchEvents()
def searchEvents(latitude = None, longitude = None, distance = 100.0,
magnitude = -9.0, magnitude_diff = 2.0,
date=None, date_diff=-9.0):
"""
Search all events close to a location close to a point.
:param float latitude: latitude coordinate of the point
:param float longitude: longitude coordinate of the point
:param float distance: distance from the point in kilometers
:param float magnitude: magnitude of the event
:param float magnitude_diff: maximum allowed magnitude difference of the event. Set negative value for searching exactly for a magnitude
:param date date: date of the event
:paran float date_diff: maximum allowed date difference from date in days. Set negative value for searching exactly at the date
"""
search = NordicSearch()
if latitude is not None:
lat_diff = (0.5*distance) / 110.574
search.addSearchBetween("epicenter_latitude", latitude-lat_diff, latitude+lat_diff)
if longitude is not None:
lon_diff = (0.5*distance) / (float(np.cos(np.deg2rad(latitude))) * 111.32)
search.addSearchBetween("epicenter_longitude", longitude-lon_diff, longitude+lon_diff)
if magnitude > 0.0:
if magnitude_diff < 0:
search.addSearchExactly("magnitude_1", magnitude)
else:
search.addSearchBetween("magnitude_1",
magnitude-magnitude_diff,
magnitude+magnitude_diff)
if date is not None:
if date_diff < 0:
search.addSearchBetween("origin_date", date, date)
else:
search.addSearchBetween("origin_date",
date-timedelta(days=date_diff),
date+timedelta(days=date_diff))
return search.searchEvents()
|
MrCubanfrog/NorDB
|
nordb/database/nordicSearch.py
|
Python
|
mit
| 17,679
|
import os
import sys
import typing
from decimal import Decimal
from knowit import VIDEO_EXTENSIONS
if sys.version_info < (3, 8):
OS_FAMILY = str
else:
OS_FAMILY = typing.Literal['windows', 'macos', 'unix']
OPTION_MAP = typing.Dict[str, typing.Tuple[str]]
def recurse_paths(
paths: typing.Union[str, typing.Iterable[str]]
) -> typing.List[str]:
"""Return a list of video files."""
enc_paths = []
if isinstance(paths, str):
paths = [p.strip() for p in paths.split(',')] if ',' in paths else paths.split()
for path in paths:
if os.path.isfile(path):
enc_paths.append(path)
if os.path.isdir(path):
for root, directories, filenames in os.walk(path):
for filename in filenames:
if os.path.splitext(filename)[1] in VIDEO_EXTENSIONS:
full_path = os.path.join(root, filename)
enc_paths.append(full_path)
# Lets remove any dupes since mediainfo is rather slow.
unique_paths = dict.fromkeys(enc_paths)
return list(unique_paths)
def to_dict(
obj: typing.Any,
classkey: typing.Optional[typing.Type] = None
) -> typing.Union[str, dict, list]:
"""Transform an object to dict."""
if isinstance(obj, str):
return obj
elif isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = to_dict(v, classkey)
return data
elif hasattr(obj, '_ast'):
return to_dict(obj._ast())
elif hasattr(obj, '__iter__'):
return [to_dict(v, classkey) for v in obj]
elif hasattr(obj, '__dict__'):
values = [(key, to_dict(value, classkey))
for key, value in obj.__dict__.items() if not callable(value) and not key.startswith('_')]
data = {k: v for k, v in values if v is not None}
if classkey is not None and hasattr(obj, '__class__'):
data[classkey] = obj.__class__.__name__
return data
return obj
def detect_os() -> OS_FAMILY:
"""Detect os family: windows, macos or unix."""
if os.name in ('nt', 'dos', 'os2', 'ce'):
return 'windows'
if sys.platform == 'darwin':
return 'macos'
return 'unix'
def define_candidate(
locations: OPTION_MAP,
names: OPTION_MAP,
os_family: typing.Optional[OS_FAMILY] = None,
suggested_path: typing.Optional[str] = None,
) -> typing.Generator[str, None, None]:
"""Select family-specific options and generate possible candidates."""
os_family = os_family or detect_os()
family_names = names[os_family]
all_locations = (suggested_path, ) + locations[os_family]
yield from build_candidates(all_locations, family_names)
def build_candidates(
locations: typing.Iterable[typing.Optional[str]],
names: typing.Iterable[str],
) -> typing.Generator[str, None, None]:
"""Build candidate names."""
for location in locations:
if not location:
continue
if location == '__PATH__':
yield from build_path_candidates(names)
elif os.path.isfile(location):
yield location
elif os.path.isdir(location):
for name in names:
cmd = os.path.join(location, name)
if os.path.isfile(cmd):
yield cmd
def build_path_candidates(
names: typing.Iterable[str],
os_family: typing.Optional[OS_FAMILY] = None,
) -> typing.Generator[str, None, None]:
"""Build candidate names on environment PATH."""
os_family = os_family or detect_os()
if os_family != 'windows':
yield from names
else:
paths = os.environ['PATH'].split(';')
yield from (
os.path.join(path, name)
for path in paths
for name in names
)
def round_decimal(value: Decimal, min_digits=0, max_digits: typing.Optional[int] = None):
exponent = value.normalize().as_tuple().exponent
if exponent >= 0:
return round(value, min_digits)
decimal_places = abs(exponent)
if decimal_places <= min_digits:
return round(value, min_digits)
if max_digits:
return round(value, min(max_digits, decimal_places))
return value
|
ratoaq2/knowit
|
knowit/utils.py
|
Python
|
mit
| 4,268
|
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fail if the C extension module doesn't exist.
Only really intended to be used by internal build scripts.
"""
import sys
sys.path[0:0] = [""]
import bson # noqa: E402
import pymongo # noqa: E402
if not pymongo.has_c() or not bson.has_c():
sys.exit("could not load C extensions")
|
mongodb/mongo-python-driver
|
tools/fail_if_no_c.py
|
Python
|
apache-2.0
| 871
|
from time import sleep
def foo(N, message):
sleep(N)
print message
return message
|
fikipollo/PySiQ
|
test/test_functions.py
|
Python
|
mit
| 95
|
# -*- coding: utf-8 -*-
#
# This file is part of CERN Open Data Portal.
# Copyright (C) 2017 CERN.
#
# CERN Open Data Portal is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Open Data Portal is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Open Data Portal; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Default configuration for CERN Open Data theme."""
from __future__ import absolute_import, print_function
def _(x):
"""Identity function for string extraction."""
return x
# Default language and timezone
BABEL_DEFAULT_LANGUAGE = 'en'
BABEL_DEFAULT_TIMEZONE = 'Europe/Zurich'
I18N_LANGUAGES = [
('en', _('English')),
]
BASE_TEMPLATE = 'cernopendata_theme/page.html'
HEADER_TEMPLATE = 'cernopendata_theme/header.html'
COVER_TEMPLATE = 'invenio_theme/page_cover.html'
SETTINGS_TEMPLATE = 'invenio_theme/settings/content.html'
# Theme
THEME_SITENAME = _('CERN Open Data Portal')
THEME_LOGO = 'img/cernopendata.svg'
|
RaoOfPhysics/opendata.cern.ch
|
cernopendata/modules/theme/config.py
|
Python
|
gpl-2.0
| 1,661
|
import unittest
import process_umd as umd
import common
class Test(unittest.TestCase):
path = 'data/country_admin_export_clean.xlsx - Admin_2013.csv'
thresh = 50
ts = umd.main(path, thresh, national=False)
raw = common.load(path)
ts_nat = umd.main(path, thresh, national=True)
raw_nat = common.load(path)
def test_loss(self):
"""Check that loss is calculated properly for a given threshold."""
acre = self.ts.query("region == 'Acre'")
df = self.raw[self.raw['name'] == 'Brazil_Acre']
result = list(acre.query('year == 2003')['loss'])
expected = list(df['loss_75_2003'] + df['loss_100_2003'])
self.assertEqual(result, expected)
def test_gain(self):
"""Check that gain field is properly generated."""
acre = self.ts.query("region == 'Acre'")
df = self.raw[self.raw['name'] == 'Brazil_Acre']
result = list(acre[acre.year == 2003]['gain'])
expected = list(df.gain0012 / 12.)
self.assertEqual(result, expected)
|
wri/harvest-hansen
|
test_process_umd.py
|
Python
|
apache-2.0
| 1,045
|
# Time: O(n)
# Space: O(h)
# You need to find the largest value in each row of a binary tree.
#
# Example:
# Input:
#
# 1
# / \
# 3 2
# / \ \
# 5 3 9
#
# Output: [1, 3, 9]
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def largestValuesHelper(root, depth, result):
if not root:
return
if depth == len(result):
result.append(root.val)
else:
result[depth] = max(result[depth], root.val)
largestValuesHelper(root.left, depth+1, result)
largestValuesHelper(root.right, depth+1, result)
result = []
largestValuesHelper(root, 0, result)
return result
# Time: O(n)
# Space: O(n)
class Solution2(object):
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
result = []
curr = [root]
while any(curr):
result.append(max(node.val for node in curr))
curr = [child for node in curr for child in (node.left, node.right) if child]
return result
|
kamyu104/LeetCode
|
Python/find-largest-value-in-each-tree-row.py
|
Python
|
mit
| 1,405
|
# [1] https://doi.org/10.1063/1.5082885
# Minimum dynamic path
# Unke, 2019
from collections import namedtuple
import operator
import re
import numpy as np
from pysisyphus.constants import AU2KJPERMOL
from pysisyphus.dynamics.driver import md, MDResult
from pysisyphus.dynamics.helpers import (
dump_coords,
get_mb_velocities_for_geom,
temperature_for_kinetic_energy,
)
from pysisyphus.helpers_pure import highlight_text
def parse_raw_term_func(raw_term_func):
funcs = {
"<": operator.lt,
">": operator.gt,
"<=": operator.le,
">=": operator.ge,
"==": operator.eq,
}
def comp_closure(indices, op, ref_value):
a_ind, b_ind = indices
func = funcs[op]
def comp_func(coords3d):
a = coords3d[a_ind]
b = coords3d[b_ind]
dist = np.linalg.norm(a - b)
return func(dist, ref_value)
return comp_func
operator_re = re.compile("([<>=]+)")
mobj = operator_re.split(raw_term_func)
if mobj is None:
print(f"Could not parse term_func '{raw_term_func}!'")
return None
indices, op, ref_value = mobj
ref_value = float(ref_value)
indices = [int(ind) for ind in indices.split(",")]
return comp_closure(indices, op, ref_value)
def parse_raw_term_funcs(raw_term_funcs):
comp_funcs = {}
for k, v in raw_term_funcs.items():
comp_func = parse_raw_term_func(v)
if comp_func:
comp_funcs[k] = comp_func
return comp_funcs
def run_md(geom, dt, steps, v0=None, term_funcs=None, external=False):
if external and hasattr(geom.calculator, "run_md"):
t = dt * steps
t_ps = t * 1e-3
md_kwargs = {
"atoms": geom.atoms,
"coords": geom.coords,
"t": t,
"dt": dt,
"velocities": v0,
"dump": dt,
}
print("Running MD with external calculator implementation.")
if term_funcs is not None:
print("Termination functions are not supported in external MD!")
geoms = geom.calculator.run_md(**md_kwargs)
md_result = MDResult(
coords=[geom.coords for geom in geoms],
t_ps=t_ps,
step=int(t / dt - 1),
terminated=None,
T=None,
E_tot=None,
)
else:
md_kwargs = {
"v0": v0,
"steps": steps,
"dt": dt,
"term_funcs": term_funcs,
"verbose": False,
"remove_com_v": False,
}
print("Running MD with internal implementation.")
md_result = md(geom, **md_kwargs)
return md_result
MDPResult = namedtuple(
"MDResult",
"ascent_xs md_init_plus md_init_minus "
"md_fin_plus md_fin_minus "
"md_fin_plus_term md_fin_minus_term",
)
def mdp(
geom,
steps,
dt,
term_funcs=None,
steps_init=None,
E_excess=0.0,
displ_length=0.1,
epsilon=5e-4,
ascent_alpha=0.05,
max_ascent_steps=25,
max_init_trajs=10,
dump=True,
seed=None,
external_md=False,
):
# Sanity checks and forcing some types
dt = float(dt)
assert dt > 0.0
steps = int(steps)
t = dt * steps
# assert t > dt
if steps_init is None:
steps_init = steps // 10
print(f"No 'steps_init' provided! Using {steps_init}")
E_excess = float(E_excess)
assert E_excess >= 0.0
displ_length = float(displ_length)
assert displ_length >= 0.0
if term_funcs is None:
term_funcs = {}
for k, v in term_funcs.items():
if callable(v):
continue
elif isinstance(v, str):
term_funcs[k] = parse_raw_term_func(v)
else:
raise Exception(f"Invalid term function '{k}: {v}' encountered!")
print(highlight_text("Minimum dynamic path calculation"))
if seed is None:
# 2**32 - 1
seed = np.random.randint(4294967295)
np.random.seed(seed)
print(f"Using seed {seed} to initialize the random number generator.\n")
E_TS = geom.energy
E_tot = E_TS + E_excess
# Distribute E_excess evenly on E_pot and E_kin
E_pot_diff = 0.5 * E_excess
E_pot_desired = E_TS + E_pot_diff
print(f"E_TS={E_TS:.6f} au")
# Determine transition vector
w, v = np.linalg.eigh(geom.hessian)
assert w[0] < -1e-8
trans_vec = v[:, 0]
# Disable removal of translation/rotation for analytical potentials
remove_com_v = remove_rot_v = geom.cart_coords.size > 3
if E_excess == 0.0:
print("MDP without excess energy.")
# Without excess energy we have to do an initial displacement along
# the transition vector to get a non-vanishing gradient.
initial_displacement = displ_length * trans_vec
x0_plus = geom.coords + initial_displacement
x0_minus = geom.coords - initial_displacement
v0_zero = np.zeros_like(geom.coords)
md_kwargs = {
"v0": v0_zero.copy(),
"t": t,
"dt": dt,
"term_funcs": term_funcs,
"external": external_md,
}
geom.coords = x0_plus
md_fin_plus = run_md(geom, **md_kwargs)
geom.coords = x0_minus
md_fin_minus = run_md(geom, **md_kwargs)
if dump:
dump_coords(geom.atoms, md_fin_plus.coords, "mdp_plus.trj")
dump_coords(geom.atoms, md_fin_minus.coords, "mdp_minus.trj")
mdp_result = MDPResult(
ascent_xs=None,
md_init_plus=None,
md_init_minus=None,
md_fin_plus=md_fin_plus,
md_fin_minus=md_fin_minus,
)
return mdp_result
print(f"E_excess={E_excess:.6f} au, ({E_excess*AU2KJPERMOL:.1f} kJ/mol)")
print(f"E_pot,desired=E_TS + {E_pot_diff*AU2KJPERMOL:.1f} kJ/mol")
print()
# Generate random vector perpendicular to transition vector
perp_vec = np.random.rand(*trans_vec.shape)
# Zero last element if we have an analytical surface
if perp_vec.size == 3:
perp_vec[2] = 0
# Orthogonalize vector
perp_vec = perp_vec - (perp_vec @ trans_vec) * trans_vec
perp_vec /= np.linalg.norm(perp_vec)
# Initial displacement from x_TS to x, generating a point with
# non-vanishing gradient.
x = geom.coords + epsilon * perp_vec
geom.coords = x
# Do steepest ascent until E_tot is reached
E_pot = geom.energy
ascent_xs = list()
for i in range(max_ascent_steps):
ascent_xs.append(geom.coords.copy())
ascent_converged = E_pot >= E_pot_desired
if ascent_converged:
break
gradient = geom.gradient
E_pot = geom.energy
direction = gradient / np.linalg.norm(gradient)
step = ascent_alpha * direction
new_coords = geom.coords + step
geom.coords = new_coords
# calc = geom.calculator
# class Opt:
# pass
# _opt = Opt()
# _opt.coords = np.array(ascent_xs)
# calc.plot_opt(_opt, show=True)
assert ascent_converged, "Steepest ascent didn't converge!"
assert (E_tot - E_pot) > 0.0, (
"Potential energy after steepst ascent is greater than the desired "
f"total energy ({E_pot:.6f} > {E_tot:.6f}). Maybe try a smaller epsilon? "
f"The current value Ɛ={epsilon:.6f} may be too big!"
)
ascent_xs = np.array(ascent_xs)
if dump:
dump_coords(geom.atoms, ascent_xs, "mdp_ee_ascent.trj")
x0 = geom.coords.copy()
print(highlight_text("Runninig initialization trajectories", level=1))
for i in range(max_init_trajs):
# Determine random momentum vector for the given kinetic energy
E_kin = E_tot - E_pot
T = temperature_for_kinetic_energy(len(geom.atoms), E_kin)
v0 = get_mb_velocities_for_geom(
geom, T, remove_com_v=remove_com_v, remove_rot_v=remove_rot_v
).flatten()
# Zero last element if we have an analytical surface
if v0.size == 3:
v0[2] = 0
# Run initial MD to check if both trajectories run towards different
# basins of attraction.
# First MD with positive v0
md_init_kwargs = {
"v0": v0.copy(),
"steps": steps_init,
"dt": dt,
"external": external_md,
}
geom.coords = x0.copy()
md_init_plus = run_md(geom, **md_init_kwargs)
# Second MD with negative v0
geom.coords = x0.copy()
md_init_kwargs["v0"] = -v0.copy()
md_init_minus = run_md(geom, **md_init_kwargs)
dump_coords(geom.atoms, md_init_plus.coords, f"mdp_ee_init_plus_{i:02d}.trj")
dump_coords(geom.atoms, md_init_minus.coords, f"mdp_ee_init_minus_{i:02d}.trj")
# Check if both MDs run into different basins of attraction.
# We (try to) do this by calculating the overlap between the
# transition vector and the normalized vector defined by the
# difference between x0 and the endpoint of the respective
# test trajectory. Both overlaps should have different sings.
end_plus = md_init_plus.coords[-1]
pls = end_plus - x0
pls /= np.linalg.norm(pls)
end_minus = md_init_minus.coords[-1]
minus = end_minus - x0
minus /= np.linalg.norm(minus)
p = trans_vec @ pls
m = trans_vec @ minus
init_trajs_converged = np.sign(p) != np.sign(m)
if init_trajs_converged:
print("Trajectories ran into different basins. Breaking.")
break
if dump:
dump_coords(geom.atoms, md_init_plus.coords, "mdp_ee_init_plus.trj")
dump_coords(geom.atoms, md_init_minus.coords, "mdp_ee_init_minus.trj")
assert init_trajs_converged
print(f"Ran 2*{i+1} initialization trajectories.")
print()
# Run actual trajectories, using the supplied termination functions if possible.
print(highlight_text("Running actual full trajectories.", level=1))
def print_status(terminated, step):
if terminated:
msg = f"\tTerminated by '{terminated}' in step {step}."
else:
msg = "\tMax time steps reached!"
print(msg)
# "Production"/Final MDs
md_fin_kwargs = {
"v0": v0.copy(),
"steps": steps,
"dt": dt,
"term_funcs": term_funcs,
"external": external_md,
}
# MD with positive v0.
geom.coords = x0.copy()
md_fin_plus = run_md(geom, **md_fin_kwargs)
print_status(md_fin_plus.terminated, md_fin_plus.step)
# MD with negative v0.
geom.coords = x0.copy()
md_fin_kwargs["v0"] = -v0
md_fin_minus = run_md(geom, **md_fin_kwargs)
print_status(md_fin_minus.terminated, md_fin_minus.step)
md_fin_plus_term = md_fin_plus.terminated
md_fin_minus_term = md_fin_minus.terminated
if dump:
dump_coords(geom.atoms, md_fin_plus.coords, "mdp_ee_fin_plus.trj")
dump_coords(geom.atoms, md_fin_minus.coords, "mdp_ee_fin_minus.trj")
mdp_result = MDPResult(
ascent_xs=ascent_xs,
md_init_plus=md_init_plus,
md_init_minus=md_init_minus,
md_fin_plus=md_fin_plus,
md_fin_minus=md_fin_minus,
md_fin_plus_term=md_fin_plus_term,
md_fin_minus_term=md_fin_minus_term,
)
return mdp_result
|
eljost/pysisyphus
|
pysisyphus/dynamics/mdp.py
|
Python
|
gpl-3.0
| 11,338
|
import os
SHARE_DIR = '../../share'
TEMPLATE_DIR = os.path.join(SHARE_DIR, 'template')
CONFIG_DIR = '../../conf'
|
StratusLab/client
|
api/code/src/test/python/TestDefaults.py
|
Python
|
apache-2.0
| 114
|
import sys
import io
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
from pathlib import Path
def read_version(package):
with (Path(package) / '__init__.py').open() as fd:
for line in fd:
if line.startswith('__version__ = '):
return line.split()[-1].strip().strip("'")
version = read_version('kube_aws_autoscaler')
class PyTest(TestCommand):
user_options = [('cov-html=', None, 'Generate junit html report')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
self.pytest_args = ['--cov', 'kube_aws_autoscaler', '--cov-report', 'term-missing', '-v']
self.cov_html = False
def finalize_options(self):
TestCommand.finalize_options(self)
if self.cov_html:
self.pytest_args.extend(['--cov-report', 'html'])
self.pytest_args.extend(['tests'])
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
def readme():
return io.open('README.rst', encoding='utf-8').read()
tests_require = [
'pytest',
'pytest-cov'
]
setup(
name='kube-aws-autoscaler',
packages=find_packages(),
version=version,
description='Kubernetes AWS Autoscaler',
long_description=readme(),
author='Henning Jacobs',
url='https://github.com/hjacobs/kube-aws-autoscaler',
keywords='kubernetes operations aws autoscaler',
license='GNU General Public License v3 (GPLv3)',
tests_require=tests_require,
extras_require={'tests': tests_require},
cmdclass={'test': PyTest},
test_suite='tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Clustering',
'Topic :: System :: Monitoring',
],
entry_points={'console_scripts': ['kube-aws-autoscaler = kube_aws_autoscaler.main:main']}
)
|
hjacobs/kube-aws-autoscaler
|
setup.py
|
Python
|
gpl-3.0
| 2,248
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.campaigns.models.campaigns import CatalogCampaign
from ._base import BaseCampaignForm
class CatalogCampaignForm(BaseCampaignForm):
class Meta(BaseCampaignForm.Meta):
model = CatalogCampaign
exclude = BaseCampaignForm.Meta.exclude + ["filters", "coupon"]
|
hrayr-artunyan/shuup
|
shuup/campaigns/admin_module/forms/_catalog.py
|
Python
|
agpl-3.0
| 508
|
class SecurityConfig:
SECURITY_PASSWORD_HASH = 'bcrypt'
SECURITY_PASSWORD_SALT = 'useless'
SECURITY_TOKEN_AUTHENTICATION_KEY = 'token'
SECURITY_SEND_REGISTER_EMAIL = False
SECURITY_REGISTERABLE = True
SECURITY_RECOVERABLE = True
|
piotrdubiel/scribeserver
|
config/defaults.py
|
Python
|
mit
| 255
|
# -*- coding: utf-8 -*-
# @Author: Gillett Hernandez
# @Date: 2017-08-21 14:45:40
# @Last Modified by: Gillett Hernandez
# @Last Modified time: 2017-09-29 05:04:09
from collections import Counter
from euler_funcs import get_primes, prime_factorization, divisor_pairs_pf, divisor_pairs, isint, timed, is_prime, true_large_prime_sieve
d_upper_limit = lambda n0, N: ((4*n0**2 + 3*N)**0.5 - n0)/3
single_expression = lambda n0, d: 3*d**2 + 2*n0*d - n0**2
def lowest_n0_with_lowest_N_gt_limit(limit):
# n > (3*limit-1)/4
return (3*limit-1)//4
def invert(cache, keys=[1,10]):
newdict = {}
for key in cache:
l = cache[key]
if l in keys:
if l not in newdict:
newdict[l] = [key]
else:
newdict[l] += [key]
return newdict
@timed
def fix_n0(Nlimit, lookingfor=[1,10], debug=False):
cache = {}
n0limit = lowest_n0_with_lowest_N_gt_limit(Nlimit)
lowest_n = single_expression(1, 1)
if debug:
print(f"n0limit = {1+n0limit}, n0 in [{n0limit//3}..{1+int(upper(n0limit,Nlimit))}]")
for n0 in range(1,1+n0limit):
dlimit = int(d_upper_limit(n0, Nlimit))
#print(n0)
if n0//3-1 > 1+dlimit:
break
## for i in range(int((1+4*(n0-1)))//3, int(1+4*n0)//3):
## if i in cache and len(cache[i]) not in lookingfor:
## del cache[i]
for d in range(n0//3-1, 3+dlimit):
if True:
z = n0
y = z+d
x = y+d
n = x**2 - y**2 - z**2
else:
n = 3*d**2 + 2*n0*d - n0**2
if n > Nlimit:
break
if n <= 0:
continue
if n not in cache:
cache[n] = 0
cache[n] += 1
if debug: print(f"{x}^2 - {y}^2 - {z}^2 = {n}. d={d}")
# print(cache)
if debug: print("\n\n\n")
return cache
def report_n(limit):
cache = fix_n0(limit)
ic = invert(cache)
return len(ic[1]), len(ic.get(10,[0]))
def main():
N = 10000
cache = fix_n0(N)
primes = get_primes(limit=N)
ic = invert(cache)
l = list(sorted(ic[1]))
# for e in filter(lambda e: e % 16 == 0,l):
# print(solutions(e, primes, True))
# for e in filter(lambda e: e % 4 == 0 and e % 16 != 0,l):
# print(solutions(e, primes, True))
for i in range(1, N):
break
if i not in cache:
continue
l = cache[i]
# sol = solutions(i,primes)
pf = prime_factorization(i, primes)
if l == 1:
if is_prime(i, primes):
# print(f"p check, {i%4}")
continue
elif 2 in pf and pf[2] == 2:
if i//4 == 1:
continue
assert is_prime(i//4, primes), i//4
# print("4|n and p check")
continue
elif 2 in pf and pf[2] == 4:
if i//16 == 1:
continue
assert is_prime(i//16, primes), i//16
# print("16|p and p check")
continue
else:
print(pf)
break
# print(pf)
# print(solutions(i, primes))
# print(" ".join(f"({a % 4}, {b % 4})" for a,b in list(divisor_pairs_pf(pf))))
# print("\n"*2)
lp1 = len(list(filter(lambda e: e % 4 == 3, primes)))
lp2 = len(list(filter(lambda e: e < N//4, primes)))
lp3 = len(list(filter(lambda e: e < N//16, primes)))
print(f"number of Ns with only 1 solution = {len(ic[1])}")
print(f"(number of primes = 3 mod 4 below N = {lp1}, + number of primes below N//4 = {lp2}, + number of primes below N//16 = {lp3}) = {lp1+lp2+lp3}")
print("THEY'RE EQUAL")
# primes = true_large_prime_sieve(50000000)
# control = get_primes(limit=1+int(50000000**0.5))
# print(len(primes))
# with open("primes_to_50000000.txt", "w") as fd:
# for p in primes:
# # if not is_prime(p, control):
# # print(p)
# # continue
# fd.write(str(p) + "\n")
with open("primes_to_50000000.txt", "r") as fd:
primes = [int(l) for l in fd.read().split("\n") if l != ""]
# primes10 = get_primes(limit=10000000)
# for p in primes:
# if p not in primes10:
# print(f"{p} generated by tlps1 not prime")
# if p > 10000000:
# break
# return
N = 50000000
print(len(primes))
p1 = list(filter(lambda e: e % 4 == 3, primes))
p2 = list(filter(lambda e: e <= N//4, primes))
p3 = list(filter(lambda e: e <= N//16, primes))
# last20p2 = p2[-5:]
# last20p3 = p3[-5:]
c = 0
# for p in last20p2:
# sol = solutions(4*p,primes)
# l = len(sol)
# if l == 1:
# print(p*4)
# else:
# print(f"nope, {p*4}")
# if 4*p >= N:
# print("woops")
# c += 1
# for p in last20p3:
# sol = solutions(16*p,primes)
# l = len(sol)
# if l == 1:
# print(p*16)
# else:
# print(f"nope, {p*16}")
# if 16*p >= N:
# print("woops")
# c += 1
ls = [len(pl) for pl in [p1,p2,p3]]
print(sum(ls) - c)
# print(len(primes))
# elif l > 1:
# print(pf)
# for e in filter(lambda e: e % 4 != 0,l):
# pairs = list(divisor_pairs(e, primes))
# if not is_prime(e, primes):
# print(f"{e} is not prime, pairs = {pairs}")
if __name__ == '__main__':
main()
|
gillett-hernandez/project-euler
|
Python/problem_136.py
|
Python
|
mit
| 5,669
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name
# pylint flaggs import errors, as the bot doesn't know have openshift-tools libs
#pylint: disable=import-error
"""
ops-metric-client: Script that sends metrics to through metric_sender.
This script will send metrics to Zagg & Hawk according to which client is active.
This script will send:
heartbeat (registration information)
single metric.
By default this script reads /etc/openshift_tools/metric_sender.yaml
A different config file can be set from the cli
Examples
# Send a heartbeat (looks in a config file for specifics)
ops-metric-client --send-heartbeat
# Send a single metric (generic interface, send any adhoc metrics)
ops-metric-client -s hostname.example.com -k zbx.item.name -o someval
# Send a dynamic low level discovery with macros
# low level dynamic objects require:
# - discovery key: This is what was setup and defined in Zabbix in the disovery rule
# - macro string: This is the variable that will be used to setup the item and trigger
# - macro name: This is the name of object. This is a comma seperated list of names
ops-metric-client -s --discovery-key filesys --macro-string #FILESYS --macro-names /,/var,/home
"""
import json
import argparse
from openshift_tools.monitoring.metric_sender import MetricSender, MetricSenderHeartbeat
import yaml
class OpsMetricClient(object):
""" class to send data via MeticSender """
def __init__(self):
self.metric_sender = None
self.args = None
self.config = None
self.heartbeat = None
def run(self):
""" main function to run the script """
self.parse_args()
self.parse_config(self.args.config_file)
self.config_metric_sender()
if self.args.send_heartbeat:
self.add_heartbeat()
if self.args.key and self.args.value:
self.add_metric()
if self.args.discovery_key and self.args.macro_string and self.args.macro_names:
self.add_dynamic_metric()
self.metric_sender.send_metrics()
@staticmethod
def adjust_type(val, metric_type):
"""
:param val: string representing metric value
:param metric_type: string or numeric
:return: if metric_type isn't string - value converted to float or int according to json parsing
"""
if metric_type == 'string':
return val
try:
return json.loads(val)
except ValueError:
return val
def parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='metric sender')
parser.add_argument('--send-heartbeat', help="send heartbeat metric to zagg", action="store_true")
group = parser.add_mutually_exclusive_group()
group.add_argument('-s', '--host',
help='specify host name as registered in Zabbix')
group.add_argument('--synthetic', default=False, action='store_true',
help='send as cluster-wide synthetic host')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
parser.add_argument('-c', '--config-file', help='ops-metric-client config file',
default='/etc/openshift_tools/metric_sender.yaml')
parser.add_argument('-m', '--metric', choices=['numeric', 'string'],
default='numeric',
help='use specific metrics type [numeric, string]')
key_value_group = parser.add_argument_group('Sending a Key-Value Pair')
key_value_group.add_argument('-k', '--key', help='metric key')
key_value_group.add_argument('-o', '--value', help='metric value')
key_value_group.add_argument('-t', '--tags', help='list of space delimited key tags: units=byte ...', nargs='*')
low_level_discovery_group = parser.add_argument_group('Sending a Low Level Discovery Item')
low_level_discovery_group.add_argument('--discovery-key', help='discovery key')
low_level_discovery_group.add_argument('--macro-string', help='macro string')
low_level_discovery_group.add_argument('--macro-names', help='comma separated list of macro names')
self.args = parser.parse_args()
def parse_config(self, config_file):
""" parse config file """
self.config = yaml.load(file(config_file))
def config_metric_sender(self):
""" configure the metric_sender """
if self.args.host:
host = self.args.host
elif self.args.synthetic:
host = self.config['synthetic_clusterwide']['host']['name']
else:
host = self.config['host']['name']
metric_verbose = self.args.verbose
metric_debug = self.args.debug
if isinstance(metric_verbose, str):
metric_verbose = (metric_verbose == 'True')
if isinstance(metric_debug, str):
metric_debug = (metric_debug == 'True')
self.metric_sender = MetricSender(host=host, verbose=metric_verbose, debug=metric_debug,
config_file=self.args.config_file)
def add_heartbeat(self):
""" crate a heartbeat metric """
if self.args.synthetic:
heartbeat = MetricSenderHeartbeat(templates=self.config['synthetic_clusterwide']['heartbeat']['templates'],
hostgroups=self.config['heartbeat']['hostgroups'])
else:
heartbeat = MetricSenderHeartbeat(templates=self.config['heartbeat']['templates'],
hostgroups=self.config['heartbeat']['hostgroups'])
self.metric_sender.add_heartbeat(heartbeat)
def add_metric(self):
""" send key/value pair """
# Get tags from command line args
tags = dict([i.split("=")[0], i.split("=")[1]] for i in self.args.tags) if self.args.tags else {}
self.metric_sender.add_metric({self.args.key : self.adjust_type(self.args.value, self.args.metric)},
key_tags=tags)
def add_dynamic_metric(self):
""" send zabbix low level discovery item to zagg """
self.metric_sender.add_dynamic_metric(self.args.discovery_key,
self.args.macro_string,
self.args.macro_names.split(','))
if __name__ == "__main__":
OMC = OpsMetricClient()
OMC.run()
|
twiest/openshift-tools
|
scripts/monitoring/ops-metric-client.py
|
Python
|
apache-2.0
| 6,739
|
import unittest
import mock
from pulp.client.commands.repo.cudl import CreateRepositoryCommand, DeleteRepositoryCommand
from pulp.client.commands.repo.sync_publish import PublishStatusCommand, RunPublishRepositoryCommand
from pulp.client.extensions.core import PulpCli
from pulp_openstack.extensions.admin import pulp_cli
from pulp_openstack.extensions.admin import images
from pulp_openstack.extensions.admin.repo_list import ListOpenstackRepositoriesCommand
from pulp_openstack.extensions.admin.upload import UploadOpenstackImageCommand
class TestInitialize(unittest.TestCase):
def test_structure(self):
context = mock.MagicMock()
context.config = {
'filesystem': {'upload_working_dir': '/a/b/c'},
'output': {'poll_frequency_in_seconds': 3}
}
context.cli = PulpCli(context)
# create the tree of commands and sections
pulp_cli.initialize(context)
# verify that sections exist and have the right commands
openstack_section = context.cli.root_section.subsections['openstack']
repo_section = openstack_section.subsections['repo']
self.assertTrue(isinstance(repo_section.commands['create'], CreateRepositoryCommand))
self.assertTrue(isinstance(repo_section.commands['delete'], DeleteRepositoryCommand))
self.assertTrue(isinstance(repo_section.commands['list'], ListOpenstackRepositoriesCommand))
self.assertTrue(isinstance(repo_section.commands['copy'], images.ImageCopyCommand))
self.assertTrue(isinstance(repo_section.commands['remove'], images.ImageRemoveCommand))
upload_section = repo_section.subsections['uploads']
self.assertTrue(isinstance(upload_section.commands['upload'], UploadOpenstackImageCommand))
section = repo_section.subsections['publish']
glance_section = section.subsections['http']
self.assertTrue(isinstance(glance_section.commands['status'], PublishStatusCommand))
self.assertTrue(isinstance(glance_section.commands['run'], RunPublishRepositoryCommand))
section = repo_section.subsections['publish']
glance_section = section.subsections['glance']
self.assertTrue(isinstance(glance_section.commands['status'], PublishStatusCommand))
self.assertTrue(isinstance(glance_section.commands['run'], RunPublishRepositoryCommand))
|
pulp/pulp_openstack
|
extensions_admin/test/unit/extensions/admin/test_pulp_cli.py
|
Python
|
gpl-2.0
| 2,372
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class CaCertificatesMozilla(Package):
"""The Mozilla CA certificate store in PEM format"""
homepage = "https://curl.se/docs/caextract.html"
url = "https://curl.se/ca/cacert-2021-04-13.pem"
maintainers = ['haampie']
version('2021-07-05', sha256='a3b534269c6974631db35f952e8d7c7dbf3d81ab329a232df575c2661de1214a', expand=False)
version('2021-05-25', sha256='3a32ad57e7f5556e36ede625b854057ac51f996d59e0952c207040077cbe48a9', expand=False)
version('2021-04-13', sha256='533610ad2b004c1622a40622f86ced5e89762e1c0e4b3ae08b31b240d863e91f', expand=False)
version('2021-01-19', sha256='e010c0c071a2c79a76aa3c289dc7e4ac4ed38492bfda06d766a80b707ebd2f29', expand=False)
version('2020-12-08', sha256='313d562594ebd07846ad6b840dd18993f22e0f8b3f275d9aacfae118f4f00fb7', expand=False)
version('2020-10-14', sha256='bb28d145ed1a4ee67253d8ddb11268069c9dafe3db25a9eee654974c4e43eee5', expand=False)
version('2020-07-22', sha256='2782f0f8e89c786f40240fc1916677be660fb8d8e25dede50c9f6f7b0c2c2178', expand=False)
version('2020-06-24', sha256='726889705b00f736200ed7999f7a50021b8735d53228d679c4e6665aa3b44987', expand=False)
version('2020-01-01', sha256='adf770dfd574a0d6026bfaa270cb6879b063957177a991d453ff1d302c02081f', expand=False)
version('2019-11-27', sha256='0d98a1a961aab523c9dc547e315e1d79e887dea575426ff03567e455fc0b66b4', expand=False)
version('2019-10-16', sha256='5cd8052fcf548ba7e08899d8458a32942bf70450c9af67a0850b4c711804a2e4', expand=False)
version('2019-08-28', sha256='38b6230aa4bee062cd34ee0ff6da173250899642b1937fc130896290b6bd91e3', expand=False)
# Make spack checksum work
def url_for_version(self, version):
return "https://curl.se/ca/cacert-{0}.pem".format(version)
def setup_dependent_package(self, module, dep_spec):
"""Returns the absolute path to the bundled certificates"""
self.spec.pem_path = join_path(self.prefix.share, 'cacert.pem')
# Install the the pem file as share/cacert.pem
def install(self, spec, prefix):
share = join_path(prefix, 'share')
mkdir(share)
install("cacert-{0}.pem".format(spec.version),
join_path(share, "cacert.pem"))
|
LLNL/spack
|
var/spack/repos/builtin/packages/ca-certificates-mozilla/package.py
|
Python
|
lgpl-2.1
| 2,432
|
# -*- coding: utf-8 -*-
"""
This module executes vsgen unittests (i.e. all tests in the current folder). It exists as an alernative to the command line interface::
> python -m unittest discover --start-directory . --pattern test*.py
For more testing options see the unittest documentation available at https://docs.python.org/3.5/library/unittest.html.
This module exposes an __main__ entry point useful for test development (usually from an Python IDE) and not recommeded for normal test execution.
"""
import os
import sys
def main(argv=[]):
"""
Test main script
"""
import argparse
import unittest
parser = argparse.ArgumentParser(description='Executes the vsgen unit tests.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--testname', help='Specifies the test name to execute. This must be the fully qualified \'dotted\' path of the form \'package.module.class.function\' (e.g. \'tests.unit.test_feature.TestClass.test_function\'). If not provided all tests resolved from the internal test discovery process are executed.', action='append')
parser.add_argument('-f', '--testpattern', help='Specifies the test file pattern to execute during test discovery. If not provided all tests resolved from the internal test discovery process are executed.', default='test*.py')
parser.add_argument('-p', '--testpath', help='Specifies the test path for test discovery. If not provided, the internal test discovery uses the current directory.', default=os.path.dirname(os.path.realpath(__file__)))
args = parser.parse_args(argv[1:])
loader = unittest.TestLoader()
if args.testname:
testsuite = loader.loadTestsFromNames(args.testname)
else:
testsuite = loader.discover(args.testpath, args.testpattern)
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(testsuite)
return 0 if not result.failures and not result.errors else 1
if __name__ == '__main__':
# To use this package as an application we need to correct the sys.path
module_path = os.path.dirname(os.path.realpath(__file__))
package_path = os.path.normpath(os.path.join(module_path, '..'))
if package_path not in sys.path:
sys.path.append(package_path)
sys.exit(main(sys.argv))
|
dbarsam/python-vsgen-ptvs
|
tests/__main__.py
|
Python
|
mit
| 2,306
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
from openerp import tools
from email.header import decode_header
from openerp import SUPERUSER_ID, api
from openerp.osv import osv, orm, fields
from openerp.tools import html_email_clean
from openerp.tools.translate import _
from HTMLParser import HTMLParser
_logger = logging.getLogger(__name__)
try:
from mako.template import Template as MakoTemplate
except ImportError:
_logger.warning("payment_acquirer: mako templates not available, payment acquirer will not work!")
""" Some tools for parsing / creating email fields """
def decode(text):
"""Returns unicode() string conversion of the the given encoded smtp header text"""
if text:
text = decode_header(text.replace('\r', ''))
return ''.join([tools.ustr(x[0], x[1]) for x in text])
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
class mail_message(osv.Model):
""" Messages model: system notification (replacing res.log notifications),
comments (OpenChatter discussion) and incoming emails. """
_name = 'mail.message'
_description = 'Message'
_inherit = ['ir.needaction_mixin']
_order = 'id desc'
_rec_name = 'record_name'
_message_read_limit = 30
_message_read_fields = ['id', 'parent_id', 'model', 'res_id', 'body', 'subject', 'date', 'to_read', 'email_from',
'type', 'vote_user_ids', 'attachment_ids', 'author_id', 'partner_ids', 'record_name']
_message_record_name_length = 18
_message_read_more_limit = 1024
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
if context and context.get('default_type') and context.get('default_type') not in self._columns['type'].selection:
context = dict(context, default_type=None)
return super(mail_message, self).default_get(cr, uid, fields, context=context)
def _get_to_read(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('is_read', '=', False),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_to_read(self, cr, uid, obj, name, domain, context=None):
""" Search for messages to read by the current user. Condition is
inversed because we search unread message on a is_read column. """
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.is_read', '=', not domain[0][2])]
def _get_starred(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('starred', '=', True),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_starred(self, cr, uid, obj, name, domain, context=None):
""" Search for starred messages by the current user."""
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.starred', '=', domain[0][2])]
_columns = {
'type': fields.selection([
('email', 'Email'),
('comment', 'Comment'),
('notification', 'System notification'),
], 'Type', size=12,
help="Message type: email for email message, notification for system "\
"message, comment for other messages such as user replies"),
'email_from': fields.char('From',
help="Email address of the sender. This field is set when no matching partner is found for incoming emails."),
'reply_to': fields.char('Reply-To',
help='Reply email address. Setting the reply_to bypasses the automatic thread creation.'),
'same_thread': fields.boolean('Same thread',
help='Redirect answers to the same discussion thread.'),
'author_id': fields.many2one('res.partner', 'Author', select=1,
ondelete='set null',
help="Author of the message. If not set, email_from may hold an email address that did not match any partner."),
'author_avatar': fields.related('author_id', 'image_small', type="binary", string="Author's Avatar"),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
'notified_partner_ids': fields.many2many('res.partner', 'mail_notification',
'message_id', 'partner_id', 'Notified partners',
help='Partners that have a notification pushing this message in their mailboxes'),
'attachment_ids': fields.many2many('ir.attachment', 'message_attachment_rel',
'message_id', 'attachment_id', 'Attachments'),
'parent_id': fields.many2one('mail.message', 'Parent Message', select=True,
ondelete='set null', help="Initial thread message."),
'child_ids': fields.one2many('mail.message', 'parent_id', 'Child Messages'),
'model': fields.char('Related Document Model', size=128, select=1),
'res_id': fields.integer('Related Document ID', select=1),
'record_name': fields.char('Message Record Name', help="Name get of the related document."),
'notification_ids': fields.one2many('mail.notification', 'message_id',
string='Notifications', auto_join=True,
help='Technical field holding the message notifications. Use notified_partner_ids to access notified partners.'),
'subject': fields.char('Subject'),
'date': fields.datetime('Date'),
'message_id': fields.char('Message-Id', help='Message unique identifier', select=1, readonly=1, copy=False),
'body': fields.html('Contents', help='Automatically sanitized HTML contents'),
'to_read': fields.function(_get_to_read, fnct_search=_search_to_read,
type='boolean', string='To read',
help='Current user has an unread notification linked to this message'),
'starred': fields.function(_get_starred, fnct_search=_search_starred,
type='boolean', string='Starred',
help='Current user has a starred notification linked to this message'),
'subtype_id': fields.many2one('mail.message.subtype', 'Subtype',
ondelete='set null', select=1,),
'vote_user_ids': fields.many2many('res.users', 'mail_vote',
'message_id', 'user_id', string='Votes',
help='Users that voted for this message'),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing mail server', readonly=1),
}
def _needaction_domain_get(self, cr, uid, context=None):
return [('to_read', '=', True)]
def _get_default_from(self, cr, uid, context=None):
this = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
if this.alias_name and this.alias_domain:
return '%s <%s@%s>' % (this.name, this.alias_name, this.alias_domain)
elif this.email:
return '%s <%s>' % (this.name, this.email)
raise osv.except_osv(_('Invalid Action!'), _("Unable to send email, please configure the sender's email address or alias."))
def _get_default_author(self, cr, uid, context=None):
return self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
_defaults = {
'type': 'email',
'date': fields.datetime.now,
'author_id': lambda self, cr, uid, ctx=None: self._get_default_author(cr, uid, ctx),
'body': '',
'email_from': lambda self, cr, uid, ctx=None: self._get_default_from(cr, uid, ctx),
'same_thread': True,
}
#------------------------------------------------------
# Vote/Like
#------------------------------------------------------
def vote_toggle(self, cr, uid, ids, context=None):
''' Toggles vote. Performed using read to avoid access rights issues.
Done as SUPERUSER_ID because uid may vote for a message he cannot modify. '''
for message in self.read(cr, uid, ids, ['vote_user_ids'], context=context):
new_has_voted = not (uid in message.get('vote_user_ids'))
if new_has_voted:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(4, uid)]}, context=context)
else:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(3, uid)]}, context=context)
return new_has_voted or False
#------------------------------------------------------
# download an attachment
#------------------------------------------------------
def download_attachment(self, cr, uid, id_message, attachment_id, context=None):
""" Return the content of linked attachments. """
# this will fail if you cannot read the message
message_values = self.read(cr, uid, [id_message], ['attachment_ids'], context=context)[0]
if attachment_id in message_values['attachment_ids']:
attachment = self.pool.get('ir.attachment').browse(cr, SUPERUSER_ID, attachment_id, context=context)
if attachment.datas and attachment.datas_fname:
return {
'base64': attachment.datas,
'filename': attachment.datas_fname,
}
return False
#------------------------------------------------------
# Notification API
#------------------------------------------------------
@api.cr_uid_ids_context
def set_message_read(self, cr, uid, msg_ids, read, create_missing=True, context=None):
""" Set messages as (un)read. Technically, the notifications related
to uid are set to (un)read. If for some msg_ids there are missing
notifications (i.e. due to load more or thread parent fetching),
they are created.
:param bool read: set notification as (un)read
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
:return number of message mark as read
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('is_read', '=', not read)]
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)read
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, {'is_read': read}, context=context)
return len(notif_ids)
# some messages do not have notifications: find which one, create notification, update read status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, {'partner_id': user_pid, 'is_read': read, 'message_id': msg_id}, context=context)
notification_obj.write(cr, uid, notif_ids, {'is_read': read}, context=context)
return len(notif_ids)
@api.cr_uid_ids_context
def set_message_starred(self, cr, uid, msg_ids, starred, create_missing=True, context=None):
""" Set messages as (un)starred. Technically, the notifications related
to uid are set to (un)starred.
:param bool starred: set notification as (un)starred
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('starred', '=', not starred)]
values = {
'starred': starred
}
if starred:
values['is_read'] = False
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)starred
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
# some messages do not have notifications: find which one, create notification, update starred status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, dict(values, partner_id=user_pid, message_id=msg_id), context=context)
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
#------------------------------------------------------
# Message loading for web interface
#------------------------------------------------------
def _message_read_dict_postprocess(self, cr, uid, messages, message_tree, context=None):
""" Post-processing on values given by message_read. This method will
handle partners in batch to avoid doing numerous queries.
:param list messages: list of message, as get_dict result
:param dict message_tree: {[msg.id]: msg browse record}
"""
res_partner_obj = self.pool.get('res.partner')
ir_attachment_obj = self.pool.get('ir.attachment')
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
# 1. Aggregate partners (author_id and partner_ids) and attachments
partner_ids = set()
attachment_ids = set()
for key, message in message_tree.iteritems():
if message.author_id:
partner_ids |= set([message.author_id.id])
if message.subtype_id and message.notified_partner_ids: # take notified people of message with a subtype
partner_ids |= set([partner.id for partner in message.notified_partner_ids])
elif not message.subtype_id and message.partner_ids: # take specified people of message without a subtype (log)
partner_ids |= set([partner.id for partner in message.partner_ids])
if message.attachment_ids:
attachment_ids |= set([attachment.id for attachment in message.attachment_ids])
# Read partners as SUPERUSER -> display the names like classic m2o even if no access
partners = res_partner_obj.name_get(cr, SUPERUSER_ID, list(partner_ids), context=context)
partner_tree = dict((partner[0], partner) for partner in partners)
# 2. Attachments as SUPERUSER, because could receive msg and attachments for doc uid cannot see
attachments = ir_attachment_obj.read(cr, SUPERUSER_ID, list(attachment_ids), ['id', 'datas_fname', 'name', 'file_type_icon'], context=context)
attachments_tree = dict((attachment['id'], {
'id': attachment['id'],
'filename': attachment['datas_fname'],
'name': attachment['name'],
'file_type_icon': attachment['file_type_icon'],
}) for attachment in attachments)
# 3. Update message dictionaries
for message_dict in messages:
message_id = message_dict.get('id')
message = message_tree[message_id]
if message.author_id:
author = partner_tree[message.author_id.id]
else:
author = (0, message.email_from)
partner_ids = []
if message.subtype_id:
partner_ids = [partner_tree[partner.id] for partner in message.notified_partner_ids
if partner.id in partner_tree]
else:
partner_ids = [partner_tree[partner.id] for partner in message.partner_ids
if partner.id in partner_tree]
attachment_ids = []
for attachment in message.attachment_ids:
if attachment.id in attachments_tree:
attachment_ids.append(attachments_tree[attachment.id])
message_dict.update({
'is_author': pid == author[0],
'author_id': author,
'partner_ids': partner_ids,
'attachment_ids': attachment_ids,
'user_pid': pid
})
return True
def _message_read_dict(self, cr, uid, message, parent_id=False, context=None):
""" Return a dict representation of the message. This representation is
used in the JS client code, to display the messages. Partners and
attachments related stuff will be done in post-processing in batch.
:param dict message: mail.message browse record
"""
# private message: no model, no res_id
is_private = False
if not message.model or not message.res_id:
is_private = True
# votes and favorites: res.users ids, no prefetching should be done
vote_nb = len(message.vote_user_ids)
has_voted = uid in [user.id for user in message.vote_user_ids]
try:
if parent_id:
max_length = 300
else:
max_length = 100
body_short = html_email_clean(message.body, remove=False, shorten=True, max_length=max_length)
except Exception:
body_short = '<p><b>Encoding Error : </b><br/>Unable to convert this message (id: %s).</p>' % message.id
_logger.exception(Exception)
return {'id': message.id,
'type': message.type,
'subtype': message.subtype_id.name if message.subtype_id else False,
'body': message.body,
'body_short': body_short,
'model': message.model,
'res_id': message.res_id,
'record_name': message.record_name,
'subject': message.subject,
'date': message.date,
'to_read': message.to_read,
'parent_id': parent_id,
'is_private': is_private,
'author_id': False,
'author_avatar': message.author_avatar,
'is_author': False,
'partner_ids': [],
'vote_nb': vote_nb,
'has_voted': has_voted,
'is_favorite': message.starred,
'attachment_ids': [],
}
def _message_read_add_expandables(self, cr, uid, messages, message_tree, parent_tree,
message_unload_ids=[], thread_level=0, domain=[], parent_id=False, context=None):
""" Create expandables for message_read, to load new messages.
1. get the expandable for new threads
if display is flat (thread_level == 0):
fetch message_ids < min(already displayed ids), because we
want a flat display, ordered by id
else:
fetch message_ids that are not childs of already displayed
messages
2. get the expandables for new messages inside threads if display
is not flat
for each thread header, search for its childs
for each hole in the child list based on message displayed,
create an expandable
:param list messages: list of message structure for the Chatter
widget to which expandables are added
:param dict message_tree: dict [id]: browse record of this message
:param dict parent_tree: dict [parent_id]: [child_ids]
:param list message_unload_ids: list of message_ids we do not want
to load
:return bool: True
"""
def _get_expandable(domain, message_nb, parent_id, max_limit):
return {
'domain': domain,
'nb_messages': message_nb,
'type': 'expandable',
'parent_id': parent_id,
'max_limit': max_limit,
}
if not messages:
return True
message_ids = sorted(message_tree.keys())
# 1. get the expandable for new threads
if thread_level == 0:
exp_domain = domain + [('id', '<', min(message_unload_ids + message_ids))]
else:
exp_domain = domain + ['!', ('id', 'child_of', message_unload_ids + parent_tree.keys())]
ids = self.search(cr, uid, exp_domain, context=context, limit=1)
if ids:
# inside a thread: prepend
if parent_id:
messages.insert(0, _get_expandable(exp_domain, -1, parent_id, True))
# new threads: append
else:
messages.append(_get_expandable(exp_domain, -1, parent_id, True))
# 2. get the expandables for new messages inside threads if display is not flat
if thread_level == 0:
return True
for message_id in message_ids:
message = message_tree[message_id]
# generate only for thread header messages (TDE note: parent_id may be False is uid cannot see parent_id, seems ok)
if message.parent_id:
continue
# check there are message for expandable
child_ids = set([child.id for child in message.child_ids]) - set(message_unload_ids)
child_ids = sorted(list(child_ids), reverse=True)
if not child_ids:
continue
# make groups of unread messages
id_min, id_max, nb = max(child_ids), 0, 0
for child_id in child_ids:
if not child_id in message_ids:
nb += 1
if id_min > child_id:
id_min = child_id
if id_max < child_id:
id_max = child_id
elif nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(child_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, False))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
id_min, id_max, nb = max(child_ids), 0, 0
else:
id_min, id_max, nb = max(child_ids), 0, 0
if nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(message_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, id_min))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
return True
@api.cr_uid_context
def message_read(self, cr, uid, ids=None, domain=None, message_unload_ids=None,
thread_level=0, context=None, parent_id=False, limit=None):
""" Read messages from mail.message, and get back a list of structured
messages to be displayed as discussion threads. If IDs is set,
fetch these records. Otherwise use the domain to fetch messages.
After having fetch messages, their ancestors will be added to obtain
well formed threads, if uid has access to them.
After reading the messages, expandable messages are added in the
message list (see ``_message_read_add_expandables``). It consists
in messages holding the 'read more' data: number of messages to
read, domain to apply.
:param list ids: optional IDs to fetch
:param list domain: optional domain for searching ids if ids not set
:param list message_unload_ids: optional ids we do not want to fetch,
because i.e. they are already displayed somewhere
:param int parent_id: context of parent_id
- if parent_id reached when adding ancestors, stop going further
in the ancestor search
- if set in flat mode, ancestor_id is set to parent_id
:param int limit: number of messages to fetch, before adding the
ancestors and expandables
:return list: list of message structure for the Chatter widget
"""
assert thread_level in [0, 1], 'message_read() thread_level should be 0 (flat) or 1 (1 level of thread); given %s.' % thread_level
domain = domain if domain is not None else []
message_unload_ids = message_unload_ids if message_unload_ids is not None else []
if message_unload_ids:
domain += [('id', 'not in', message_unload_ids)]
limit = limit or self._message_read_limit
message_tree = {}
message_list = []
parent_tree = {}
# no specific IDS given: fetch messages according to the domain, add their parents if uid has access to
if ids is None:
ids = self.search(cr, uid, domain, context=context, limit=limit)
# fetch parent if threaded, sort messages
for message in self.browse(cr, uid, ids, context=context):
message_id = message.id
if message_id in message_tree:
continue
message_tree[message_id] = message
# find parent_id
if thread_level == 0:
tree_parent_id = parent_id
else:
tree_parent_id = message_id
parent = message
while parent.parent_id and parent.parent_id.id != parent_id:
parent = parent.parent_id
tree_parent_id = parent.id
if not parent.id in message_tree:
message_tree[parent.id] = parent
# newest messages first
parent_tree.setdefault(tree_parent_id, [])
if tree_parent_id != message_id:
parent_tree[tree_parent_id].append(self._message_read_dict(cr, uid, message_tree[message_id], parent_id=tree_parent_id, context=context))
if thread_level:
for key, message_id_list in parent_tree.iteritems():
message_id_list.sort(key=lambda item: item['id'])
message_id_list.insert(0, self._message_read_dict(cr, uid, message_tree[key], context=context))
# create final ordered message_list based on parent_tree
parent_list = parent_tree.items()
parent_list = sorted(parent_list, key=lambda item: max([msg.get('id') for msg in item[1]]) if item[1] else item[0], reverse=True)
message_list = [message for (key, msg_list) in parent_list for message in msg_list]
# get the child expandable messages for the tree
self._message_read_dict_postprocess(cr, uid, message_list, message_tree, context=context)
self._message_read_add_expandables(cr, uid, message_list, message_tree, parent_tree,
thread_level=thread_level, message_unload_ids=message_unload_ids, domain=domain, parent_id=parent_id, context=context)
return message_list
#------------------------------------------------------
# mail_message internals
#------------------------------------------------------
def init(self, cr):
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'mail_message_model_res_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE INDEX mail_message_model_res_id_idx ON mail_message (model, res_id)""")
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
doc_ids = doc_dict.keys()
allowed_doc_ids = self.pool[doc_model].search(cr, uid, [('id', 'in', doc_ids)], context=context)
return set([message_id for allowed_doc_id in allowed_doc_ids for message_id in doc_dict[allowed_doc_id]])
def _find_allowed_doc_ids(self, cr, uid, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
allowed_ids = set()
for doc_model, doc_dict in model_ids.iteritems():
if not model_access_obj.check(cr, uid, doc_model, 'read', False):
continue
allowed_ids |= self._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
return allowed_ids
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to remove
ids uid could not see according to our custom rules. Please refer
to check_access_rule for more details about those rules.
After having received ids of a classic search, keep only:
- if author_id == pid, uid is the author, OR
- a notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document is model, res_id
- otherwise: remove the id
"""
# Rules do not apply to administrator
if uid == SUPERUSER_ID:
return super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
# Perform a super with count as False, to have the ids, not a counter
ids = super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=False, access_rights_uid=access_rights_uid)
if not ids and count:
return 0
elif not ids:
return ids
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
author_ids, partner_ids, allowed_ids = set([]), set([]), set([])
model_ids = {}
messages = super(mail_message, self).read(cr, uid, ids, ['author_id', 'model', 'res_id', 'notified_partner_ids'], context=context)
for message in messages:
if message.get('author_id') and message.get('author_id')[0] == pid:
author_ids.add(message.get('id'))
elif pid in message.get('notified_partner_ids'):
partner_ids.add(message.get('id'))
elif message.get('model') and message.get('res_id'):
model_ids.setdefault(message.get('model'), {}).setdefault(message.get('res_id'), set()).add(message.get('id'))
allowed_ids = self._find_allowed_doc_ids(cr, uid, model_ids, context=context)
final_ids = author_ids | partner_ids | allowed_ids
if count:
return len(final_ids)
else:
# re-construct a list based on ids, because set did not keep the original order
id_list = [id for id in ids if id in final_ids]
return id_list
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Access rules of mail.message:
- read: if
- author_id == pid, uid is the author, OR
- mail_notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document if model, res_id
- otherwise: raise
- create: if
- no model, no res_id, I create a private message OR
- pid in message_follower_ids if model, res_id OR
- mail_notification (parent_id.id, pid) exists, uid has been notified of the parent, OR
- uid have write or create access on the related document if model, res_id, OR
- otherwise: raise
- write: if
- author_id == pid, uid is the author, OR
- uid has write or create access on the related document if model, res_id
- otherwise: raise
- unlink: if
- uid has write or create access on the related document if model, res_id
- otherwise: raise
"""
def _generate_model_record_ids(msg_val, msg_ids):
""" :param model_record_ids: {'model': {'res_id': (msg_id, msg_id)}, ... }
:param message_values: {'msg_id': {'model': .., 'res_id': .., 'author_id': ..}}
"""
model_record_ids = {}
for id in msg_ids:
vals = msg_val.get(id, {})
if vals.get('model') and vals.get('res_id'):
model_record_ids.setdefault(vals['model'], set()).add(vals['res_id'])
return model_record_ids
if uid == SUPERUSER_ID:
return
if isinstance(ids, (int, long)):
ids = [ids]
not_obj = self.pool.get('mail.notification')
fol_obj = self.pool.get('mail.followers')
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=None).partner_id.id
# Read mail_message.ids to have their values
message_values = dict.fromkeys(ids, {})
cr.execute('SELECT DISTINCT id, model, res_id, author_id, parent_id FROM "%s" WHERE id = ANY (%%s)' % self._table, (ids,))
for id, rmod, rid, author_id, parent_id in cr.fetchall():
message_values[id] = {'model': rmod, 'res_id': rid, 'author_id': author_id, 'parent_id': parent_id}
# Author condition (READ, WRITE, CREATE (private)) -> could become an ir.rule ?
author_ids = []
if operation == 'read' or operation == 'write':
author_ids = [mid for mid, message in message_values.iteritems()
if message.get('author_id') and message.get('author_id') == partner_id]
elif operation == 'create':
author_ids = [mid for mid, message in message_values.iteritems()
if not message.get('model') and not message.get('res_id')]
# Parent condition, for create (check for received notifications for the created message parent)
notified_ids = []
if operation == 'create':
parent_ids = [message.get('parent_id') for mid, message in message_values.iteritems()
if message.get('parent_id')]
not_ids = not_obj.search(cr, SUPERUSER_ID, [('message_id.id', 'in', parent_ids), ('partner_id', '=', partner_id)], context=context)
not_parent_ids = [notif.message_id.id for notif in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('parent_id') in not_parent_ids]
# Notification condition, for read (check for received notifications and create (in message_follower_ids)) -> could become an ir.rule, but not till we do not have a many2one variable field
other_ids = set(ids).difference(set(author_ids), set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
if operation == 'read':
not_ids = not_obj.search(cr, SUPERUSER_ID, [
('partner_id', '=', partner_id),
('message_id', 'in', ids),
], context=context)
notified_ids = [notification.message_id.id for notification in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
elif operation == 'create':
for doc_model, doc_ids in model_record_ids.items():
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', doc_model),
('res_id', 'in', list(doc_ids)),
('partner_id', '=', partner_id),
], context=context)
fol_mids = [follower.res_id for follower in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == doc_model and message.get('res_id') in fol_mids]
# CRUD: Access rights related to the document
other_ids = other_ids.difference(set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
document_related_ids = []
for model, doc_ids in model_record_ids.items():
model_obj = self.pool[model]
mids = model_obj.exists(cr, uid, list(doc_ids))
if hasattr(model_obj, 'check_mail_message_access'):
model_obj.check_mail_message_access(cr, uid, mids, operation, context=context)
else:
self.pool['mail.thread'].check_mail_message_access(cr, uid, mids, operation, model_obj=model_obj, context=context)
document_related_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == model and message.get('res_id') in mids]
# Calculate remaining ids: if not void, raise an error
other_ids = other_ids.difference(set(document_related_ids))
if not other_ids:
return
raise orm.except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
def _get_record_name(self, cr, uid, values, context=None):
""" Return the related document name, using name_get. It is done using
SUPERUSER_ID, to be sure to have the record name correctly stored. """
if not values.get('model') or not values.get('res_id') or values['model'] not in self.pool:
return False
return self.pool[values['model']].name_get(cr, SUPERUSER_ID, [values['res_id']], context=context)[0][1]
def _get_reply_to(self, cr, uid, values, context=None):
""" Return a specific reply_to: alias of the document through message_get_reply_to
or take the email_from
"""
model, res_id, email_from = values.get('model'), values.get('res_id'), values.get('email_from')
ctx = dict(context, thread_model=model)
return self.pool['mail.thread'].message_get_reply_to(cr, uid, [res_id], default=email_from, context=ctx)[res_id]
def _get_message_id(self, cr, uid, values, context=None):
if values.get('same_thread', True) is False:
message_id = tools.generate_tracking_message_id('reply_to')
elif values.get('res_id') and values.get('model'):
message_id = tools.generate_tracking_message_id('%(res_id)s-%(model)s' % values)
else:
message_id = tools.generate_tracking_message_id('private')
return message_id
def create(self, cr, uid, values, context=None):
context = dict(context or {})
default_starred = context.pop('default_starred', False)
if 'email_from' not in values: # needed to compute reply_to
values['email_from'] = self._get_default_from(cr, uid, context=context)
if 'message_id' not in values:
values['message_id'] = self._get_message_id(cr, uid, values, context=context)
if 'reply_to' not in values:
values['reply_to'] = self._get_reply_to(cr, uid, values, context=context)
if 'record_name' not in values and 'default_record_name' not in context:
values['record_name'] = self._get_record_name(cr, uid, values, context=context)
newid = super(mail_message, self).create(cr, uid, values, context)
self._notify(cr, uid, newid, context=context,
force_send=context.get('mail_notify_force_send', True),
user_signature=context.get('mail_notify_user_signature', True))
# TDE FIXME: handle default_starred. Why not setting an inv on starred ?
# Because starred will call set_message_starred, that looks for notifications.
# When creating a new mail_message, it will create a notification to a message
# that does not exist, leading to an error (key not existing). Also this
# this means unread notifications will be created, yet we can not assure
# this is what we want.
if default_starred:
self.set_message_starred(cr, uid, [newid], True, context=context)
return newid
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
""" Override to explicitely call check_access_rule, that is not called
by the ORM. It instead directly fetches ir.rules and apply them. """
self.check_access_rule(cr, uid, ids, 'read', context=context)
res = super(mail_message, self).read(cr, uid, ids, fields=fields, context=context, load=load)
return res
def unlink(self, cr, uid, ids, context=None):
# cascade-delete attachments that are directly attached to the message (should only happen
# for mail.messages that act as parent for a standalone mail.mail record).
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
attachments_to_delete = []
for message in self.browse(cr, uid, ids, context=context):
for attach in message.attachment_ids:
if attach.res_model == self._name and (attach.res_id == message.id or attach.res_id == 0):
attachments_to_delete.append(attach.id)
if attachments_to_delete:
self.pool.get('ir.attachment').unlink(cr, uid, attachments_to_delete, context=context)
return super(mail_message, self).unlink(cr, uid, ids, context=context)
#------------------------------------------------------
# Messaging API
#------------------------------------------------------
def _notify(self, cr, uid, newid, context=None, force_send=False, user_signature=True):
""" Add the related record followers to the destination partner_ids if is not a private message.
Call mail_notification.notify to manage the email sending
"""
notification_obj = self.pool.get('mail.notification')
message = self.browse(cr, uid, newid, context=context)
partners_to_notify = set([])
# all followers of the mail.message document have to be added as partners and notified if a subtype is defined (otherwise: log message)
if message.subtype_id and message.model and message.res_id:
fol_obj = self.pool.get("mail.followers")
# browse as SUPERUSER because rules could restrict the search results
fol_ids = fol_obj.search(
cr, SUPERUSER_ID, [
('res_model', '=', message.model),
('res_id', '=', message.res_id),
], context=context)
partners_to_notify |= set(
fo.partner_id.id for fo in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)
if message.subtype_id.id in [st.id for st in fo.subtype_ids]
)
# remove me from notified partners, unless the message is written on my own wall
if message.subtype_id and message.author_id and message.model == "res.partner" and message.res_id == message.author_id.id:
partners_to_notify |= set([message.author_id.id])
elif message.author_id:
partners_to_notify -= set([message.author_id.id])
# all partner_ids of the mail.message have to be notified regardless of the above (even the author if explicitly added!)
if message.partner_ids:
partners_to_notify |= set([p.id for p in message.partner_ids])
# notify
notification_obj._notify(
cr, uid, newid, partners_to_notify=list(partners_to_notify), context=context,
force_send=force_send, user_signature=user_signature
)
message.refresh()
# An error appear when a user receive a notification without notifying
# the parent message -> add a read notification for the parent
if message.parent_id:
# all notified_partner_ids of the mail.message have to be notified for the parented messages
partners_to_parent_notify = set(message.notified_partner_ids).difference(message.parent_id.notified_partner_ids)
for partner in partners_to_parent_notify:
notification_obj.create(cr, uid, {
'message_id': message.parent_id.id,
'partner_id': partner.id,
'is_read': True,
}, context=context)
|
barachka/odoo
|
addons/mail/mail_message.py
|
Python
|
agpl-3.0
| 47,038
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Nova instances.
"""
import logging
from django import http
from django import template
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core import validators
from django.shortcuts import redirect, render_to_response
from django.utils.translation import ugettext as _
from django_openstack import api
from django_openstack import forms
from novaclient import exceptions as novaclient_exceptions
LOG = logging.getLogger('django_openstack.dash.views.keypairs')
class DeleteKeypair(forms.SelfHandlingForm):
keypair_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
LOG.info('Deleting keypair "%s"' % data['keypair_id'])
api.keypair_delete(request, data['keypair_id'])
messages.info(request, _('Successfully deleted keypair: %s')
% data['keypair_id'])
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in DeleteKeypair")
messages.error(request,
_('Error deleting keypair: %s') % e.message)
return redirect(request.build_absolute_uri())
class CreateKeypair(forms.SelfHandlingForm):
name = forms.CharField(max_length="20", label="Keypair Name",
validators=[validators.RegexValidator('\w+')])
def handle(self, request, data):
try:
LOG.info('Creating keypair "%s"' % data['name'])
keypair = api.keypair_create(request, data['name'])
response = http.HttpResponse(mimetype='application/binary')
response['Content-Disposition'] = \
'attachment; filename=%s.pem' % keypair.name
response.write(keypair.private_key)
return response
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in CreateKeyPair")
messages.error(request,
_('Error Creating Keypair: %s') % e.message)
return redirect(request.build_absolute_uri())
class ImportKeypair(forms.SelfHandlingForm):
name = forms.CharField(max_length="20", label="Keypair Name",
validators=[validators.RegexValidator('\w+')])
public_key = forms.CharField(label='Public Key', widget=forms.Textarea)
def handle(self, request, data):
try:
LOG.info('Importing keypair "%s"' % data['name'])
api.keypair_import(request, data['name'], data['public_key'])
messages.success(request, _('Successfully imported public key: %s')
% data['name'])
return redirect('dash_keypairs', request.user.tenant_id)
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in ImportKeypair")
messages.error(request,
_('Error Importing Keypair: %s') % e.message)
return redirect(request.build_absolute_uri())
@login_required
def index(request, tenant_id):
delete_form, handled = DeleteKeypair.maybe_handle(request)
if handled:
return handled
try:
keypairs = api.keypair_list(request)
except novaclient_exceptions.ClientException, e:
keypairs = []
LOG.exception("ClientException in keypair index")
messages.error(request, _('Error fetching keypairs: %s') % e.message)
return render_to_response('django_openstack/dash/keypairs/index.html', {
'keypairs': keypairs,
'delete_form': delete_form,
}, context_instance=template.RequestContext(request))
@login_required
def create(request, tenant_id):
form, handled = CreateKeypair.maybe_handle(request)
if handled:
return handled
return render_to_response('django_openstack/dash/keypairs/create.html', {
'create_form': form,
}, context_instance=template.RequestContext(request))
@login_required
def import_keypair(request, tenant_id):
form, handled = ImportKeypair.maybe_handle(request)
if handled:
return handled
return render_to_response('django_openstack/dash/keypairs/import.html', {
'create_form': form,
}, context_instance=template.RequestContext(request))
|
ohnoimdead/horizon
|
django-openstack/django_openstack/dash/views/keypairs.py
|
Python
|
apache-2.0
| 5,132
|
"""
Views for the PyAMF web based unit test runner.
"""
import logging, time
from django.shortcuts import render_to_response
from django.http import HttpResponseNotFound, HttpResponse
import pyamf
import simplejson
from punit import models
def frontpage(request):
tests = models.get_all_tests()
return render_to_response('punit/index.html', {
'tests': tests,
'expected': models.expected_failures,
'pyamf_version': '.'.join(map(lambda x: str(x), pyamf.__version__))
})
def run_test_method(request, module, test_case, method):
start = time.time()
try:
result = models.run_test(module, test_case, method)
except AttributeError:
return HttpResponseNotFound()
stop = time.time()
content = simplejson.dumps({
'test': '%s.%s.%s' % (module, test_case, method),
'start': start,
'stop': stop,
'passed': result.wasSuccessful(),
'failures': result.failures,
'errors': result.errors,
'expected_failure': '%s.%s.%s' % (module, test_case, method) in models.expected_failures
})
return HttpResponse(content=content, mimetype='application/x-javascript')
def all_tests(request):
return HttpResponse(mimetype='application/x-javascript', content=simplejson.dumps({
'tests': models.get_all_tests(),
'expected': models.expected_failures,
'skipped': models.skipped_tests
}))
|
cardmagic/PyAMF
|
doc/tutorials/examples/actionscript/google_appengine/punit/views.py
|
Python
|
mit
| 1,440
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
DOCUMENTATION = '''
options:
api_token:
description:
- This is the API Token for the Hetzner Cloud.
required: True
type: str
endpoint:
description:
- This is the API Endpoint for the Hetzner Cloud.
default: https://api.hetzner.cloud/v1
type: str
requirements:
- hcloud-python >= 1.0.0
seealso:
- name: Documentation for Hetzner Cloud API
description: Complete reference for the Hetzner Cloud API.
link: https://docs.hetzner.cloud/
'''
|
alxgu/ansible
|
lib/ansible/plugins/doc_fragments/hcloud.py
|
Python
|
gpl-3.0
| 744
|
from tsp.graph import Graph
__all__ = ['Graph', 'main']
import matplotlib.pyplot as plot
import numpy as np
import time
from argparse import ArgumentParser
from progressbar import ProgressBar
from tsp.data import DataLoader
from tsp.ga import GAFactory
from tsp.gui import GUI
def main():
parser = ArgumentParser(description="Solving the TSP through GAs")
parser.add_argument('data_file', metavar='D', nargs=1,
help='The data file to load')
parser.add_argument('-p', '--preprocess', dest='preprocess', default=False,
help='Preprocess the graph')
parser.add_argument('--selection', dest='selector', default='default',
help='Selection Scheme')
parser.add_argument('--crossover', dest='crossover', default='default',
help='Crossover Scheme')
parser.add_argument('--mutator', dest='mutator', default='default',
help='Mutation Scheme')
parser.add_argument('--population', dest='population', default=100,
help='Population Size')
parser.add_argument('--mutation-rate', dest='mutation_rate', default=0.01,
help='Mutation Rate')
parser.add_argument('--crossover-rate', dest='crossover_rate', default=0.6,
help='Crossover Rate')
parser.add_argument("--generations", dest="generations",
help="Number of generations to run for.")
parser.add_argument("--average", dest="average", help="Average over X runs")
args = parser.parse_args()
d = DataLoader()
g = d.load(args.data_file[0], preprocess=args.preprocess)
gui = GUI(g)
run(int(args.generations), int(args.average), args, g, gui)
def run(total_generations, average_over, args, g, gui):
print "Starting experiments. Averaging over {} runs.".format(average_over)
best = []
average = []
progress = ProgressBar()
gui.draw_map(True)
for i in progress(range(average_over)):
(run_best, run_avg) = do_run(total_generations, args, g, gui)
best.append(run_best)
average.append(run_avg)
print "Best result: {}".format(min([min(i) for i in best]))
plot.plot(np.mean(best, axis=0), "b-", label="Best Fitness")
plot.plot(np.mean(average, axis=0), "r-", label="Average Fitness")
plot.legend()
ga = GAFactory.getGA(args, g)
plot.title("{}; {} nodes.".format(str(ga), g.dimension))
plot.xlabel("Generations")
# TODO make this a bit more solid.
# plot.ylim(ymin=9352, ymax=100000) # 129
# plot.ylim(ymin=6656, ymax=30000) #dj34
plot.ylabel("Route Length (fitness)")
plot.show()
# plot.savefig('report/img/results/{}n{}{}.png'.format(ga.file_path(), g.dimension, ga.file_name()), papertype='a4')
def do_run(total_generations, args, g, gui):
ga = GAFactory.getGA(args, g)
f = ga.population[0]
average = []
best = []
for i in xrange(total_generations):
ga.step()
best.append(ga.population[0].score)
average.append(np.mean([x.score for x in ga.population]))
gui.update_map(ga.population)
return (best, average)
if __name__ == "__main__":
main()
|
SoftlySplinter/sem6120-assign2
|
tsp/__init__.py
|
Python
|
unlicense
| 3,091
|
from parsimonious import NodeVisitor
from parsimonious import Grammar
class DocketVisitor(NodeVisitor):
def generic_visit(self, node, vc):
return self.stringify_list(vc)
def visit_docket(self, node, vc):
docket = " <docket> %s </docket> " % self.stringify_list(vc)
return docket
def visit_page(self, node, vc):
page = " <page> %s </page> " % self.stringify_list(vc)
return page
def visit_caption(self, node, vc):
# print("visiting caption.")
line = " <caption> %s </caption> " % self.stringify_list(vc)
return line
def visit_commonwealth_line(self, node, vc):
line = " <state> %s </state> " % node.text
return line
def visit_defendant_line(self, node, vc):
# print("visiting defendant line.")
defendant = " <defendant> %s </defendant> " % self.stringify_list(vc)
return defendant
def visit_docket_number(self, node, vc):
docket = self.stringify_list(vc)
# print("visiting docket number.")
# print(docket)
try:
index = docket.index(":")
# print(index)
docket = "%s <docket_number> %s </docket_number> " % (docket[0:index+1], docket[index+1:])
except:
# print("index not found.")
docket = " <docket_number> % s</docket_number> " % docket
return docket
def visit_body(self, node, vc):
body = " <body> %s </body> " % self.stringify_list(vc)
return body
def visit_footer(self, node, vc):
footer = self.stringify_list(vc)
footer = " <footer> %s </footer> " % footer
return footer
def visit_section(self, node, vc):
section = self.stringify_list(vc)
section_name = vc[0]
section = " <section name='%s'> %s </section> " % (section_name.strip(), section)
return section
def visit_section_header(self,node, vc):
return node.text
def visit_new_line(self, node, vc):
return node.text
def visit_content(self, node, vc):
return node.text
#Private method, if that were possible in python.
def stringify_list(self, list):
output = ""
for element in list:
output += element
return output
# End of Class
grammar = r"""
docket = page+
page = header body? footer page_break?
header = ws* ~"COURT OF COMMON PLEAS OF PHILADELPHIA COUNTY"i new_line line docket_number line line line line caption
docket_number = content new_line?
caption = commonwealth_line line line defendant_line
commonwealth_line = ws* ~"Commonwealth of Pennsylvania"i ws* new_line
defendant_line = content new_line
body = ((section !start_of_footer)* section) /
((line !start_of_footer)* line) /
(line body)
section = section_header (line !start_of_footer)+ line
section_header = (ws* ~"CASE INFORMATION"i new_line) /
(ws* ~"STATUS INFORMATION"i new_line) /
(ws* ~"calendar events"i new_line) /
(ws* ~"defendant information"i new_line) /
(ws* ~"case participants"i new_line) /
(ws* ~"bail information"i new_line) /
(ws* ~"charges"i new_line) /
(ws* ~"disposition sentencing/penalties"i new_line) /
(ws* ~"commonwealth information"i ws* ~"attorney information"i new_line) /
(ws* ~"entries"i new_line) /
(ws* ~"payment plan summary"i new_line) /
(ws* ~"case financial information"i new_line)
start_of_footer = (ws* ~"CPCMS 9082" content new_line)
footer = start_of_footer line+
line = content new_line?
content = ~"[a-z0-9`\ \"=_\.,\-\(\)\'\$\?\*%;:#&\[\]/@§]*"i
new_line = "\n"
page_break = "\f"
ws = " "
"""
|
NateV/GrammarDev
|
grammar_dev/grammars/Sectionize.py
|
Python
|
gpl-2.0
| 3,580
|
import pyshark
import collections
import time
import operator
import logging
import serial
import struct
import socket
### LOGGING AND SERIAL SETUP ###################################################
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(message)s',
filename='pysharktest.log',
filemode='w')
logging.debug('A debug message')
#logging.info('Some information')
logging.warning('A shot across the bows')
### SOCKET #########################################################################
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 5000 # Reserve a port for your service.
try:
s.connect((host, port))
time.sleep(2)
s.send(b"pyshark says what up!\n")
except socket.error as e:
print(e,s)
### DATA #########################################################################
capture = pyshark.FileCapture('steam2.pcapng',only_summaries=True)
def makeDicts():
"""Just mess around and make a custom dictionary out of the data dump"""
listOfDicts = []
for pkt in capture:
listOfDicts.append({"source": pkt.source, "destination":pkt.destination, "protocol":pkt.protocol, "info":pkt.info})
return listOfDicts
### UTILITY FUNCTIONS #########################################################################
def sortMaxtoMin(allTheStuff,keyToUse,cN):
""" Sort incoming data from most active to least, return the top X items """
rawSources = []
dictSources = {}
toReturn = []
for i in allTheStuff:
rawSources.append(i[keyToUse])
print(len(rawSources))
toCount = [item for item, count in collections.Counter(rawSources).items() if count > 0]
#print(toCount)
for i in toCount:
dictSources.update({i:rawSources.count(i)})
sortedIps = sorted(dictSources.items(),key=operator.itemgetter(1),reverse=True)
if cN <= 0 or cN > len(sortedIps):
cN = len(sortedIps)
#print(cN)
toReturn = sortedIps[:cN]
#print(toReturn)
return toReturn
def formatPrinter(title,listOfThings):
"""Just a print utitlity"""
print(title)
for i in listOfThings:
print("{0}: {1}".format(i[0],i[1]))
print("----------------")
print(" ")
def sendToSocket(title,listOfThings):
"""Send some things to a socket"""
print(title)
time.sleep(2)
for i in listOfThings:
time.sleep(1)
print("{0}: {1}".format(i[0],i[1]))
toSend = "{0}: {1}\n".format(i[0],i[1])
s.send(toSend.encode())
allTheStuff = makeDicts()
protocols = sortMaxtoMin(allTheStuff,"protocol",10) ## sourceList, the key you're looking for, how many reps above?
activeIP = sortMaxtoMin(allTheStuff,"source",5)
infoTest = sortMaxtoMin(allTheStuff,"info",5)
formatPrinter("Active IPs",activeIP)
formatPrinter("Common Protocols",protocols)
formatPrinter("info",infoTest)
sendToSocket("Protocols",protocols)
### THE SHIT I AIN'T USING #########################################################################
def getProtocols():
"""Look for some common protocols we'd like to note"""
prots = [
"SSDP",
"TCP",
"TLSv1.2",
"DNS",
"QUIC",
"MDNS",
"UDP",
"CDP",
"ARP",
"IGMPv2",
"ICMPv6",
"DHCPv6",
"NETBios",
"NBNS"
]
capProts = []
toSort = {}
for pkt in capture:
capProts.append(pkt.protocol)
for i in prots:
toSort.update({i:capProts.count(i)})
sortedprots = sorted(toSort.items(),key=operator.itemgetter(1),reverse=True)
return sortedprots
## trying to make data into a dict
def getData(protocol):
listOfDicts = []
for pkt in capture:
if pkt.protocol == protocol:
listOfDicts.append({"source": pkt.source, "destination":pkt.destination, "info":pkt.info})
return listOfDicts
def getTheInfo(infoItem):
options = ["Server Hello",
"Application Data",
"Change Cipher Spec",
"Encrypted Handshake Message",
"Server Key Exchange",
"Client Key Exchange",
"Encrypted Alert",
"New Session Ticket",
"Hello Request",
"Ignored Unknown Record",
"Certificate",
"M-SEARCH * HTTP/1.1 ",
"[TCP Keep-Alive]",
"[TCP Dup ACK 5230#1]",
"[TCP segment of a reassembled PDU]",
"[TCP Window Update]",
"[TCP Retransmission]",
"[TCP Keep-Alive ACK] "
]
allTheInfo = []
for x in infoItem:
print(x["info"])
allTheInfo.append(x["info"])
for i in options:
print(i, ": ", allTheInfo.count(i))
"""
cap0 = capture[0]
print(dir(cap0))
print(cap0.info)
print(cap0.length)
print(cap0.protocol)
print(cap0.no)
print(cap0.source)
print(cap0.destination)
print(cap0.summary_line)
print(cap0.time)
"""
## datasets to work with
"""
ipSSDP = getData("SSDP") #this is always search
ipTCP = getData("TCP") # this has a lot of itneresting things
ipTLSV = getData("TLSv1.2") #weird
ipDNS = getData("DNS") #hmm
ipQUIC = getData("QUIC") # kind of boring on the high level
ipMDNS = getData("MDNS")
print("Protocols")
print("----------------")
print("SSDP: ",len(ipSSDP))
print("TCP: ",len(ipTCP))
print("TLSV: ",len(ipTLSV))
print("DNS: ",len(ipDNS))
print("QUIC: ",len(ipQUIC))
print("MDNS: ", len(ipMDNS))
print("----------------")
"""
|
sharkwheels/Independet_study_2017
|
week2-wireshark/pyshark_test.py
|
Python
|
mit
| 5,054
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orgs', '0013_auto_20150715_1831'),
('contacts', '0004_auto_20150324_1024'),
('msgs', '0004_message_pollrun'),
]
operations = [
migrations.CreateModel(
name='InboxMessage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rapidpro_message_id', models.IntegerField()),
('text', models.CharField(max_length=640, null=True)),
('archived', models.BooleanField(default=False)),
('created_on', models.DateTimeField(null=True)),
('delivered_on', models.DateTimeField(null=True)),
('sent_on', models.DateTimeField(null=True)),
('contact_from', models.ForeignKey(related_name='inbox_messages', to='contacts.Contact')),
('org', models.ForeignKey(related_name='inbox_messages', verbose_name='Organization', to='orgs.Org')),
],
),
]
|
xkmato/tracpro
|
tracpro/msgs/migrations/0005_inboxmessage.py
|
Python
|
bsd-3-clause
| 1,185
|
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HDSStream, HLSStream, HTTPStream
MEDIA_URL = "http://www.ardmediathek.de/play/media/{0}"
SWF_URL = "http://www.ardmediathek.de/ard/static/player/base/flash/PluginFlash.swf"
HDCORE_PARAMETER = "?hdcore=3.3.0"
QUALITY_MAP = {
"auto": "auto",
3: "544p",
2: "360p",
1: "288p",
0: "144p"
}
_url_re = re.compile(r"http(s)?://(?:(\w+\.)?ardmediathek.de/tv|mediathek.daserste.de/)")
_media_id_re = re.compile(r"/play/(?:media|config)/(\d+)")
_media_schema = validate.Schema({
"_mediaArray": [{
"_mediaStreamArray": [{
validate.optional("_server"): validate.text,
"_stream": validate.any(validate.text, [validate.text]),
"_quality": validate.any(int, validate.text)
}]
}]
})
_smil_schema = validate.Schema(
validate.union({
"base": validate.all(
validate.xml_find("head/meta"),
validate.get("base"),
validate.url(scheme="http")
),
"cdn": validate.all(
validate.xml_find("head/meta"),
validate.get("cdn")
),
"videos": validate.all(
validate.xml_findall("body/seq/video"),
[validate.get("src")]
)
})
)
class ard_mediathek(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_http_streams(self, info):
name = QUALITY_MAP.get(info["_quality"], "vod")
urls = info["_stream"]
if not isinstance(info["_stream"], list):
urls = [urls]
for url in urls:
stream = HTTPStream(self.session, url)
yield name, stream
def _get_hds_streams(self, info):
# Needs the hdcore parameter added
url = info["_stream"] + HDCORE_PARAMETER
return HDSStream.parse_manifest(self.session, url, pvswf=SWF_URL).items()
def _get_hls_streams(self, info):
return HLSStream.parse_variant_playlist(self.session, info["_stream"]).items()
def _get_smil_streams(self, info):
res = self.session.http.get(info["_stream"])
smil = self.session.http.xml(res, "SMIL config", schema=_smil_schema)
for video in smil["videos"]:
url = "{0}/{1}{2}".format(smil["base"], video, HDCORE_PARAMETER)
streams = HDSStream.parse_manifest(self.session, url, pvswf=SWF_URL, is_akamai=smil["cdn"] == "akamai")
for stream in streams.items():
yield stream
def _get_streams(self):
res = self.session.http.get(self.url)
match = _media_id_re.search(res.text)
if match:
media_id = match.group(1)
else:
return
self.logger.debug("Found media id: {0}", media_id)
res = self.session.http.get(MEDIA_URL.format(media_id))
media = self.session.http.json(res, schema=_media_schema)
for media in media["_mediaArray"]:
for stream in media["_mediaStreamArray"]:
stream_ = stream["_stream"]
if isinstance(stream_, list):
if not stream_:
continue
stream_ = stream_[0]
if stream_.endswith(".f4m"):
parser = self._get_hds_streams
parser_name = "HDS"
elif stream_.endswith(".smil"):
parser = self._get_smil_streams
parser_name = "SMIL"
elif stream_.endswith(".m3u8"):
parser = self._get_hls_streams
parser_name = "HLS"
elif stream_.startswith("http"):
parser = self._get_http_streams
parser_name = "HTTP"
try:
for s in parser(stream):
yield s
except IOError as err:
self.logger.error("Failed to extract {0} streams: {1}",
parser_name, err)
__plugin__ = ard_mediathek
|
back-to/streamlink
|
src/streamlink/plugins/ard_mediathek.py
|
Python
|
bsd-2-clause
| 4,132
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from datetime import datetime
class RuswarpDriver(webdriver.PhantomJS):
def parse_data_value(self, value_txt):
"""
Return the value coerced to be a float, bool or left as a string.
Try to convert to a number, and then the less common boolean.
"""
try:
return float(value_txt)
except ValueError:
if value_txt == 'false':
return False
if value_txt == 'true':
return True
return value_txt
def parse_data_rows(self, rows):
""" Generator that yields data name, value pairs from a iter of rows """
for row in rows:
name_el, value_el, _ = row.find_elements_by_css_selector('td')
# Get the text content for each element
# Coerce the value into the correct type
name = name_el.text
value = self.parse_data_value(value_el.text)
# Check we have found a value
if not name or not value:
continue
yield (name, value,)
def get_data(self):
""" Return dict containing all the tag names and their values """
# Make the containing div visible, otherwise the values won't render
self.execute_script("document.getElementById('DataViewerFrame').style.display = 'block'")
table = self.find_element_by_id('propertyTable')
rows = table.find_elements_by_css_selector('tbody tr')
data = {
name: value for name, value in self.parse_data_rows(rows)
}
# Record a timestamp
data['time'] = datetime.utcnow()
return data
def wait_for_element(self, element_id):
""" Wait for a given DOM id to exist """
wait = WebDriverWait(self, 10)
wait.until(EC.presence_of_element_located((By.ID, element_id)))
|
craigloftus/ruswarp-scraper
|
driver.py
|
Python
|
gpl-2.0
| 2,048
|
# -*- coding: utf-8 -*-
"""
Choices of ``critica.apps.issues`` application.
"""
from django.utils.translation import ugettext_lazy as _
# Status
# ------------------------------------------------------------------------------
STATUS_PUBLISHED = 1
STATUS_COMPLETE = 2
STATUS_NEW = 3
STATUS_CHOICES = (
(STATUS_PUBLISHED, _('Published')),
(STATUS_COMPLETE, _('Complete')),
(STATUS_NEW, _('New')),
)
|
brunobord/critica
|
apps/issues/choices.py
|
Python
|
gpl-3.0
| 414
|
# Copyright 2011-2013, Damian Johnson
# Copyright 2013, Sean Robinson
# See LICENSE for licensing information
"""
Handlers for text configuration files. Configurations are simple string to
string mappings, with the configuration files using the following rules...
* the key/value is separated by a space
* anything after a "#" is ignored as a comment
* excess whitespace is trimmed
* empty lines are ignored
* multi-line values can be defined by following the key with lines starting
with a '|'
For instance...
::
# This is my sample config
user.name Galen
user.password yabba1234 # here's an inline comment
user.notes takes a fancy to pepperjack cheese
blankEntry.example
msg.greeting
|Multi-line message exclaiming of the
|wonder and awe that is pepperjack!
... would be loaded as...
::
config = {
"user.name": "Galen",
"user.password": "yabba1234",
"user.notes": "takes a fancy to pepperjack cheese",
"blankEntry.example": "",
"msg.greeting": "Multi-line message exclaiming of the\\nwonder and awe that is pepperjack!",
}
Configurations are managed via the :class:`~stem.util.conf.Config` class. The
:class:`~stem.util.conf.Config` can be be used directly with its
:func:`~stem.util.conf.Config.get` and :func:`~stem.util.conf.Config.set`
methods, but usually modules will want a local dictionary with just the
configurations that it cares about.
To do this use the :func:`~stem.util.conf.config_dict` function. For example...
::
import getpass
from stem.util import conf, connection
def config_validator(key, value):
if key == "timeout":
# require at least a one second timeout
return max(1, value)
elif key == "endpoint":
if not connection.is_valid_ipv4_address(value):
raise ValueError("'%s' isn't a valid IPv4 address" % value)
elif key == "port":
if not connection.is_valid_port(value):
raise ValueError("'%s' isn't a valid port" % value)
elif key == "retries":
# negative retries really don't make sense
return max(0, value)
CONFIG = conf.config_dict("ssh_login", {
"username": getpass.getuser(),
"password": "",
"timeout": 10,
"endpoint": "263.12.8.0",
"port": 22,
"reconnect": False,
"retries": 3,
}, config_validator)
There's several things going on here so lets take it step by step...
* The :func:`~stem.util.conf.config_dict` provides a dictionary that's bound
to a given configuration. If the "ssh_proxy_config" configuration changes
then so will the contents of CONFIG.
* The dictionary we're passing to :func:`~stem.util.conf.config_dict` provides
two important pieces of information: default values and their types. See the
Config's :func:`~stem.util.conf.Config.get` method for how these type
inferences work.
* The config_validator is a hook we're adding to make sure CONFIG only gets
values we think are valid. In this case it ensures that our timeout value
is at least one second, and rejects endpoints or ports that are invalid.
Now lets say our user has the following configuration file...
::
username waddle_doo
password jabberwocky
timeout -15
port 9000000
retries lots
reconnect true
logging debug
... and we load it as follows...
::
>>> from from stem.util import conf
>>> our_config = conf.get_config("ssh_login")
>>> our_config.load("/home/atagar/user_config")
>>> print CONFIG
{
"username": "waddle_doo",
"password": "jabberwocky",
"timeout": 1,
"endpoint": "263.12.8.0",
"port": 22,
"reconnect": True,
"retries": 3,
}
Here's an expanation of what happened...
* the username, password, and reconnect attributes took the values in the
configuration file
* the 'config_validator' we added earlier allows for a minimum timeout of one
and rejected the invalid port (with a log message)
* we weren't able to convert the retries' "lots" value to an integer so it kept
its default value and logged a warning
* the user didn't supply an endpoint so that remained unchanged
* our CONFIG didn't have a 'logging' attribute so it was ignored
**Module Overview:**
::
config_dict - provides a dictionary that's kept in sync with our config
get_config - singleton for getting configurations
parse_enum_csv - helper funcion for parsing confguration entries for enums
Config - Custom configuration
|- load - reads a configuration file
|- save - writes the current configuration to a file
|- clear - empties our loaded configuration contents
|- add_listener - notifies the given listener when an update occurs
|- clear_listeners - removes any attached listeners
|- keys - provides keys in the loaded configuration
|- set - sets the given key/value pair
|- unused_keys - provides keys that have never been requested
|- get - provides the value for a given key, with type inference
+- get_value - provides the value for a given key as a string
"""
import threading
from stem.util import log
CONFS = {} # mapping of identifier to singleton instances of configs
class _SyncListener(object):
def __init__(self, config_dict, interceptor):
self.config_dict = config_dict
self.interceptor = interceptor
def update(self, config, key):
if key in self.config_dict:
new_value = config.get(key, self.config_dict[key])
if new_value == self.config_dict[key]:
return # no change
if self.interceptor:
interceptor_value = self.interceptor(key, new_value)
if interceptor_value:
new_value = interceptor_value
self.config_dict[key] = new_value
def config_dict(handle, conf_mappings, handler = None):
"""
Makes a dictionary that stays synchronized with a configuration.
This takes a dictionary of 'config_key => default_value' mappings and
changes the values to reflect our current configuration. This will leave
the previous values alone if...
* we don't have a value for that config_key
* we can't convert our value to be the same type as the default_value
If a handler is provided then this is called just prior to assigning new
values to the config_dict. The handler function is expected to accept the
(key, value) for the new values and return what we should actually insert
into the dictionary. If this returns None then the value is updated as
normal.
For more information about how we convert types see our
:func:`~stem.util.conf.Config.get` method.
**The dictionary you get from this is manged by the
:class:`~stem.util.conf.Config` class and should be treated as being
read-only.**
:param str handle: unique identifier for a config instance
:param dict conf_mappings: config key/value mappings used as our defaults
:param functor handler: function referred to prior to assigning values
"""
selected_config = get_config(handle)
selected_config.add_listener(_SyncListener(conf_mappings, handler).update)
return conf_mappings
def get_config(handle):
"""
Singleton constructor for configuration file instances. If a configuration
already exists for the handle then it's returned. Otherwise a fresh instance
is constructed.
:param str handle: unique identifier used to access this config instance
"""
if not handle in CONFS:
CONFS[handle] = Config()
return CONFS[handle]
def parse_enum(key, value, enumeration):
"""
Provides the enumeration value for a given key. This is a case insensitive
lookup and raises an exception if the enum key doesn't exist.
:param str key: configuration key being looked up
:param str value: value to be parsed
:param stem.util.enum.Enum enumeration: enumeration the values should be in
:returns: enumeration value
:raises: **ValueError** if the **value** isn't among the enumeration keys
"""
return parse_enum_csv(key, value, enumeration, 1)[0]
def parse_enum_csv(key, value, enumeration, count = None):
"""
Parses a given value as being a comma separated listing of enumeration keys,
returning the corresponding enumeration values. This is intended to be a
helper for config handlers. The checks this does are case insensitive.
The **count** attribute can be used to make assertions based on the number of
values. This can be...
* None to indicate that there's no restrictions.
* An int to indicate that we should have this many values.
* An (int, int) tuple to indicate the range that values can be in. This range
is inclusive and either can be None to indicate the lack of a lower or
upper bound.
:param str key: configuration key being looked up
:param str value: value to be parsed
:param stem.util.enum.Enum enumeration: enumeration the values should be in
:param int,tuple count: validates that we have this many items
:returns: list with the enumeration values
:raises: **ValueError** if the count assertion fails or the **value** entries
don't match the enumeration keys
"""
values = [val.upper().strip() for val in value.split(',')]
if values == ['']:
return []
if count is None:
pass # no count validateion checks to do
elif isinstance(count, int):
if len(values) != count:
raise ValueError("Config entry '%s' is expected to be %i comma separated values, got '%s'" % (key, count, value))
elif isinstance(count, tuple) and len(count) == 2:
minimum, maximum = count
if minimum is not None and len(values) < minimum:
raise ValueError("Config entry '%s' must have at least %i comma separated values, got '%s'" % (key, minimum, value))
if maximum is not None and len(values) > maximum:
raise ValueError("Config entry '%s' can have at most %i comma separated values, got '%s'" % (key, maximum, value))
else:
raise ValueError("The count must be None, an int, or two value tuple. Got '%s' (%s)'" % (count, type(count)))
result = []
enum_keys = [k.upper() for k in enumeration.keys()]
enum_values = list(enumeration)
for val in values:
if val in enum_keys:
result.append(enum_values[enum_keys.index(val)])
else:
raise ValueError("The '%s' entry of config entry '%s' wasn't in the enumeration (expected %s)" % (val, key, ', '.join(enum_keys)))
return result
class Config(object):
"""
Handler for easily working with custom configurations, providing persistence
to and from files. All operations are thread safe.
**Example usage:**
User has a file at '/home/atagar/myConfig' with...
::
destination.ip 1.2.3.4
destination.port blarg
startup.run export PATH=$PATH:~/bin
startup.run alias l=ls
And they have a script with...
::
from stem.util import conf
# Configuration values we'll use in this file. These are mappings of
# configuration keys to the default values we'll use if the user doesn't
# have something different in their config file (or it doesn't match this
# type).
ssh_config = conf.config_dict("ssh_login", {
"login.user": "atagar",
"login.password": "pepperjack_is_awesome!",
"destination.ip": "127.0.0.1",
"destination.port": 22,
"startup.run": [],
})
# Makes an empty config instance with the handle of 'ssh_login'. This is
# a singleton so other classes can fetch this same configuration from
# this handle.
user_config = conf.get_config("ssh_login")
# Loads the user's configuration file, warning if this fails.
try:
user_config.load("/home/atagar/myConfig")
except IOError as exc:
print "Unable to load the user's config: %s" % exc
# This replace the contents of ssh_config with the values from the user's
# config file if...
#
# * the key is present in the config file
# * we're able to convert the configuration file's value to the same type
# as what's in the mapping (see the Config.get() method for how these
# type inferences work)
#
# For instance in this case...
#
# * the login values are left alone because they aren't in the user's
# config file
#
# * the 'destination.port' is also left with the value of 22 because we
# can't turn "blarg" into an integer
#
# The other values are replaced, so ssh_config now becomes...
#
# {"login.user": "atagar",
# "login.password": "pepperjack_is_awesome!",
# "destination.ip": "1.2.3.4",
# "destination.port": 22,
# "startup.run": ["export PATH=$PATH:~/bin", "alias l=ls"]}
#
# Information for what values fail to load and why are reported to
# 'stem.util.log'.
"""
def __init__(self):
self._path = None # location we last loaded from or saved to
self._contents = {} # configuration key/value pairs
self._raw_contents = [] # raw contents read from configuration file
self._listeners = [] # functors to be notified of config changes
# used for both _contents and _raw_contents access
self._contents_lock = threading.RLock()
# keys that have been requested (used to provide unused config contents)
self._requested_keys = set()
def load(self, path = None):
"""
Reads in the contents of the given path, adding its configuration values
to our current contents.
:param str path: file path to be loaded, this uses the last loaded path if
not provided
:raises:
* **IOError** if we fail to read the file (it doesn't exist, insufficient
permissions, etc)
* **ValueError** if no path was provided and we've never been provided one
"""
if path:
self._path = path
elif not self._path:
raise ValueError("Unable to load configuration: no path provided")
with open(self._path, "r") as config_file:
read_contents = config_file.readlines()
with self._contents_lock:
self._raw_contents = read_contents
remainder = list(self._raw_contents)
while remainder:
line = remainder.pop(0)
# strips any commenting or excess whitespace
comment_start = line.find("#")
if comment_start != -1:
line = line[:comment_start]
line = line.strip()
# parse the key/value pair
if line:
try:
key, value = line.split(" ", 1)
value = value.strip()
except ValueError:
log.debug("Config entry '%s' is expected to be of the format 'Key Value', defaulting to '%s' -> ''" % (line, line))
key, value = line, ""
if not value:
# this might be a multi-line entry, try processing it as such
multiline_buffer = []
while remainder and remainder[0].lstrip().startswith("|"):
content = remainder.pop(0).lstrip()[1:] # removes '\s+|' prefix
content = content.rstrip("\n") # trailing newline
multiline_buffer.append(content)
if multiline_buffer:
self.set(key, "\n".join(multiline_buffer), False)
continue
self.set(key, value, False)
def save(self, path = None):
"""
Saves configuration contents to disk. If a path is provided then it
replaces the configuration location that we track.
:param str path: location to be saved to
:raises: **ValueError** if no path was provided and we've never been provided one
"""
if path:
self._path = path
elif not self._path:
raise ValueError("Unable to save configuration: no path provided")
with self._contents_lock:
with open(self._path, 'w') as output_file:
for entry_key in sorted(self.keys()):
for entry_value in self.get_value(entry_key, multiple = True):
# check for multi line entries
if "\n" in entry_value:
entry_value = "\n|" + entry_value.replace("\n", "\n|")
output_file.write('%s %s\n' % (entry_key, entry_value))
def clear(self):
"""
Drops the configuration contents and reverts back to a blank, unloaded
state.
"""
with self._contents_lock:
self._contents.clear()
self._raw_contents = []
self._requested_keys = set()
def add_listener(self, listener, backfill = True):
"""
Registers the function to be notified of configuration updates. Listeners
are expected to be functors which accept (config, key).
:param functor listener: function to be notified when our configuration is changed
:param bool backfill: calls the function with our current values if **True**
"""
with self._contents_lock:
self._listeners.append(listener)
if backfill:
for key in self.keys():
listener(self, key)
def clear_listeners(self):
"""
Removes all attached listeners.
"""
self._listeners = []
def keys(self):
"""
Provides all keys in the currently loaded configuration.
:returns: **list** if strings for the configuration keys we've loaded
"""
return self._contents.keys()
def unused_keys(self):
"""
Provides the configuration keys that have never been provided to a caller
via :func:`~stem.util.conf.config_dict` or the
:func:`~stem.util.conf.Config.get` and
:func:`~stem.util.conf.Config.get_value` methods.
:returns: **set** of configuration keys we've loaded but have never been requested
"""
return set(self.keys()).difference(self._requested_keys)
def set(self, key, value, overwrite = True):
"""
Appends the given key/value configuration mapping, behaving the same as if
we'd loaded this from a configuration file.
:param str key: key for the configuration mapping
:param str,list value: value we're setting the mapping to
:param bool overwrite: replaces the previous value if **True**, otherwise
the values are appended
"""
with self._contents_lock:
if isinstance(value, str):
if not overwrite and key in self._contents:
self._contents[key].append(value)
else:
self._contents[key] = [value]
for listener in self._listeners:
listener(self, key)
elif isinstance(value, (list, tuple)):
if not overwrite and key in self._contents:
self._contents[key] += value
else:
self._contents[key] = value
for listener in self._listeners:
listener(self, key)
else:
raise ValueError("Config.set() only accepts str, list, or tuple. Provided value was a '%s'" % type(value))
def get(self, key, default = None):
"""
Fetches the given configuration, using the key and default value to
determine the type it should be. Recognized inferences are:
* **default is a boolean => boolean**
* values are case insensitive
* provides the default if the value isn't "true" or "false"
* **default is an integer => int**
* provides the default if the value can't be converted to an int
* **default is a float => float**
* provides the default if the value can't be converted to a float
* **default is a list => list**
* string contents for all configuration values with this key
* **default is a tuple => tuple**
* string contents for all configuration values with this key
* **default is a dictionary => dict**
* values without "=>" in them are ignored
* values are split into key/value pairs on "=>" with extra whitespace
stripped
:param str key: config setting to be fetched
:param default object: value provided if no such key exists or fails to be converted
:returns: given configuration value with its type inferred with the above rules
"""
is_multivalue = isinstance(default, (list, tuple, dict))
val = self.get_value(key, default, is_multivalue)
if val == default:
return val # don't try to infer undefined values
if isinstance(default, bool):
if val.lower() == "true":
val = True
elif val.lower() == "false":
val = False
else:
log.debug("Config entry '%s' is expected to be a boolean, defaulting to '%s'" % (key, str(default)))
val = default
elif isinstance(default, int):
try:
val = int(val)
except ValueError:
log.debug("Config entry '%s' is expected to be an integer, defaulting to '%i'" % (key, default))
val = default
elif isinstance(default, float):
try:
val = float(val)
except ValueError:
log.debug("Config entry '%s' is expected to be a float, defaulting to '%f'" % (key, default))
val = default
elif isinstance(default, list):
pass # nothing special to do (already a list)
elif isinstance(default, tuple):
val = tuple(val)
elif isinstance(default, dict):
valMap = {}
for entry in val:
if "=>" in entry:
entryKey, entryVal = entry.split("=>", 1)
valMap[entryKey.strip()] = entryVal.strip()
else:
log.debug("Ignoring invalid %s config entry (expected a mapping, but \"%s\" was missing \"=>\")" % (key, entry))
val = valMap
return val
def get_value(self, key, default = None, multiple = False):
"""
This provides the current value associated with a given key.
:param str key: config setting to be fetched
:param object default: value provided if no such key exists
:param bool multiple: provides back a list of all values if **True**,
otherwise this returns the last loaded configuration value
:returns: **str** or **list** of string configuration values associated
with the given key, providing the default if no such key exists
"""
with self._contents_lock:
if key in self._contents:
self._requested_keys.add(key)
if multiple:
return self._contents[key]
else:
return self._contents[key][-1]
else:
message_id = "stem.util.conf.missing_config_key_%s" % key
log.log_once(message_id, log.TRACE, "config entry '%s' not found, defaulting to '%s'" % (key, default))
return default
|
arlolra/stem
|
stem/util/conf.py
|
Python
|
lgpl-3.0
| 22,101
|
import pytest
from midi.midi import Message
from midi.types import NoteOff, SysEx
from midi.utils import get_status_value
@pytest.fixture
def note_off_msg():
note_off = NoteOff(10, 100)
return Message(note_off, 1)
@pytest.fixture
def sysex_msg():
"""A SysEx message with 4 bytes of random data with manufacturer ID #35."""
sysex = SysEx(35, 0x12, 0xac, 0x9a, 0x8d)
return Message(sysex)
def test_message_content(note_off_msg):
assert len(note_off_msg) == 3
assert note_off_msg[0] == 128 # status byte of NoteOff sent on channel 1
assert note_off_msg[1] == 10
assert note_off_msg[2] == 100
assert note_off_msg.content == [128, 10, 100]
assert note_off_msg.bytes_content == [bytes([c])
for c in note_off_msg.content]
assert not hasattr(note_off_msg, 'control_number')
assert hasattr(note_off_msg, 'note_number')
assert hasattr(note_off_msg, 'velocity')
assert note_off_msg.note_number == note_off_msg[1]
assert note_off_msg.velocity == note_off_msg[2]
def test_sysex_content(sysex_msg):
assert len(sysex_msg) == 7 # ID + 4 bytes of data + start byte + end byte
assert sysex_msg[0] == 0xf0 # SysEx start byte
assert sysex_msg[1] == 35
assert sysex_msg[-1] == 0xf7 # SysEx end byte
|
edouardtheron/py-midi
|
tests/test_message_object.py
|
Python
|
gpl-3.0
| 1,316
|
import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
@pytest.fixture
def driver(request):
d = DesiredCapabilities.CHROME
d['loggingPrefs'] = {'browser': 'ALL'}
wd = webdriver.Chrome(desired_capabilities=d)
request.addfinalizer(wd.quit)
return wd
def test_browser_log_checkin(driver):
driver.get("http://localhost/litecart/admin/")
driver.implicitly_wait(5)
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
# open Catalog of goods
driver.get("http://localhost/litecart/admin/?app=catalog&doc=catalog&category_id=1")
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "a.button")))
elements = driver.find_elements_by_css_selector("table.dataTable a:not([title=Edit]")
for i in range(3, len(elements)):
elements[i].click()
driver.find_element_by_css_selector("button i.fa.fa-times").click()
# print messages
for entry in driver.get_log('browser'):
print(entry)
elements = driver.find_elements_by_css_selector("table.dataTable a:not([title=Edit]")
#logs = driver.get_log('browser')
#messages = map(lambda l: l['message'], logs)
#has_console_logs = any(map(lambda m: m.find('console log') >= 0, messages))
#print('Success' if has_console_logs else 'Failure')
#driver.quit()
#When the chrome driver is used the console log message is found in the list of browser messages (has_console_log is True).
|
olga121/Selenium_Webdriver
|
test_brouser_log_checking.py
|
Python
|
apache-2.0
| 1,792
|
#MenuTitle: Un-Round Corners
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
__doc__="""
Removes corners of outlines of the selected letters (current master only).
1. It doesn't do perfect job at curved segments, but does keep the original in the background. Please fine-tune by hand.
2. If it creates really funny results, try changing start node position.
3. It might miss corners with big round radius. You can change the roundness at line 16.
4. The script assumes there is no extreme point in the corner.
"""
import GlyphsApp
thisFont = Glyphs.font # frontmost font
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
roundness = 30
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
def line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
def intersection(L1, L2):
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x,y
else:
return False
def nudge(oncurveMv, offcurve1, offcurve2, oncurveSt, offsetX, offsetY):
distanceX = oncurveMv.x - oncurveSt.x
distanceX1 = oncurveMv.x - offcurve1.x
distanceX2 = offcurve2.x - oncurveSt.x
if distanceX1 != 0:
offcurve1.x += (1-distanceX1/distanceX)*offsetX
else:
offcurve1.x += offsetX
if distanceX2 != 0:
offcurve2.x += (distanceX2/distanceX)*offsetX
distanceY = oncurveMv.y - oncurveSt.y
distanceY1 = oncurveMv.y - offcurve1.y
distanceY2 = offcurve2.y - oncurveSt.y
if distanceY1 != 0:
offcurve1.y += (1-distanceY1/distanceY)*offsetY
else:
offcurve1.y += offsetY
if distanceY2 != 0:
offcurve2.y += (distanceY2/distanceY)*offsetY
oncurveMv.x += offsetX
oncurveMv.y += offsetY
def sharpen(path, n):
if path.nodes[n+4]:
node0 = path.nodes[n-1]
node1 = path.nodes[n]
node2 = path.nodes[n+1]
node3 = path.nodes[n+2]
node4 = path.nodes[n+3]
node5 = path.nodes[n+4]
# node2 and 3 should be the offcurve points in question.
# if it starts from straight segment to straight
if node0.type != GSOFFCURVE and node1.type == GSLINE and node2.type == GSOFFCURVE and node3.type == GSOFFCURVE and node4.type != GSOFFCURVE and node5.type == GSLINE:
if abs(node1.x - node4.x) <= roundness and abs(node1.y - node4.y) <= roundness:
L1 = line(node0.position, node1.position)
L2 = line(node4.position, node5.position)
R = intersection(L1, L2)
path.removeNodeAtIndex_(n+1)
path.removeNodeAtIndex_(n+1)
path.removeNodeAtIndex_(n+1)
node1.position = R
node1.type = GSLINE
node1.connection = GSSHARP
return True
# if it starts from curved segment to straight
elif node0.type == GSOFFCURVE and node1.type != GSOFFCURVE and node2.type == GSOFFCURVE and node3.type == GSOFFCURVE and node4.type != GSOFFCURVE and node5.type == GSLINE:
if abs(node1.x - node4.x) <= roundness and abs(node1.y - node4.y) <= roundness:
L1 = line(node0.position, node1.position)
L2 = line(node4.position, node5.position)
R = intersection(L1, L2)
path.removeNodeAtIndex_(n+1)
path.removeNodeAtIndex_(n+1)
path.removeNodeAtIndex_(n+1)
offsetX = R[0]-path.nodes[n].x
offsetY = R[1]-path.nodes[n].y
nudge(path.nodes[n], path.nodes[n-1], path.nodes[n-2], path.nodes[n-3], offsetX, offsetY)
path.nodes[n].type = GSCURVE
path.nodes[n].connection = GSSHARP
return True
# if it starts from straight segemt to curve
elif node0.type == GSCURVE and node1.type == GSLINE and node2.type == GSOFFCURVE and node3.type == GSOFFCURVE and node4.type == GSCURVE and node5.type == GSOFFCURVE:
if abs(node1.x - node4.x) <= roundness and abs(node1.y - node4.y) <= roundness:
L1 = line(node0.position, node1.position)
L2 = line(node4.position, node5.position)
R = intersection(L1, L2)
path.nodes[n-1].type = GSCURVE
path.nodes[n-1].connection = GSSHARP
path.removeNodeAtIndex_(n)
path.removeNodeAtIndex_(n)
path.removeNodeAtIndex_(n)
path.nodes[n].type = GSLINE
path.nodes[n].connection = GSSHARP
offsetX = R[0]-path.nodes[n].x
offsetY = R[1]-path.nodes[n].y
nudge(path.nodes[n], path.nodes[n+1], path.nodes[n+2], path.nodes[n+3], offsetX, offsetY)
return True
else:
return False
def unRound( thisLayer ):
try:
thisLayer.setBackground_(thisLayer)
for thisPath in thisLayer.paths:
nodeTotal = len(thisPath.nodes)
for i in range(len(thisPath.nodes)):
if i >= nodeTotal-1: #it's weird, but this is the way to avoid first node error.
sharpen(thisPath, -1)
break
elif sharpen(thisPath, i):
nodeTotal -= 3
except Exception as e:
Glyphs.showMacroWindow()
print("Un-Round Corners Error (unRound): %s" % e)
for thisLayer in listOfSelectedLayers:
thisGlyph = thisLayer.parent
thisGlyph.beginUndo() # begin undo grouping
thisGlyph.name
unRound( thisLayer )
thisGlyph.endUndo() # end undo grouping
# brings macro window to front and clears its log:
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
|
Tosche/Glyphs-Scripts
|
Path/Un-Round Corners.py
|
Python
|
apache-2.0
| 5,144
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_vswitch
short_description: Add a VMware Standard Switch to an ESXi host
description:
- Add a VMware Standard Switch to an ESXi host
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
switch_name:
description:
- vSwitch name to add
required: True
nic_name:
description:
- vmnic name to attach to vswitch
required: True
number_of_ports:
description:
- Number of port to configure on vswitch
default: 128
required: False
mtu:
description:
- MTU to configure on vswitch
required: False
state:
description:
- Add or remove the switch
default: 'present'
choices:
- 'present'
- 'absent'
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
Example from Ansible playbook
- name: Add a VMware vSwitch
local_action:
module: vmware_vswitch
hostname: esxi_hostname
username: esxi_username
password: esxi_password
switch_name: vswitch_name
nic_name: vmnic_name
mtu: 9000
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def find_vswitch_by_name(host, vswitch_name):
for vss in host.config.network.vswitch:
if vss.name == vswitch_name:
return vss
return None
class VMwareHostVirtualSwitch(object):
def __init__(self, module):
self.host_system = None
self.content = None
self.vss = None
self.module = module
self.switch_name = module.params['switch_name']
self.number_of_ports = module.params['number_of_ports']
self.nic_name = module.params['nic_name']
self.mtu = module.params['mtu']
self.state = module.params['state']
self.content = connect_to_api(self.module)
def process_state(self):
try:
vswitch_states = {
'absent': {
'present': self.state_destroy_vswitch,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_vswitch,
'present': self.state_exit_unchanged,
'absent': self.state_create_vswitch,
}
}
vswitch_states[self.state][self.check_vswitch_configuration()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
# Source from
# https://github.com/rreubenur/pyvmomi-community-samples/blob/patch-1/samples/create_vswitch.py
def state_create_vswitch(self):
vss_spec = vim.host.VirtualSwitch.Specification()
vss_spec.numPorts = self.number_of_ports
vss_spec.mtu = self.mtu
vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[self.nic_name])
self.host_system.configManager.networkSystem.AddVirtualSwitch(vswitchName=self.switch_name, spec=vss_spec)
self.module.exit_json(changed=True)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_destroy_vswitch(self):
config = vim.host.NetworkConfig()
for portgroup in self.host_system.configManager.networkSystem.networkInfo.portgroup:
if portgroup.spec.vswitchName == self.vss.name:
portgroup_config = vim.host.PortGroup.Config()
portgroup_config.changeOperation = "remove"
portgroup_config.spec = vim.host.PortGroup.Specification()
portgroup_config.spec.name = portgroup.spec.name
portgroup_config.spec.name = portgroup.spec.name
portgroup_config.spec.vlanId = portgroup.spec.vlanId
portgroup_config.spec.vswitchName = portgroup.spec.vswitchName
portgroup_config.spec.policy = vim.host.NetworkPolicy()
config.portgroup.append(portgroup_config)
self.host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify")
self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name)
self.module.exit_json(changed=True)
def state_update_vswitch(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def check_vswitch_configuration(self):
host = get_all_objs(self.content, [vim.HostSystem])
if not host:
self.module.fail_json(msg="Unable to find host")
self.host_system = host.keys()[0]
self.vss = find_vswitch_by_name(self.host_system, self.switch_name)
if self.vss is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(switch_name=dict(required=True, type='str'),
nic_name=dict(required=True, type='str'),
number_of_ports=dict(required=False, type='int', default=128),
mtu=dict(required=False, type='int', default=1500),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
host_virtual_switch = VMwareHostVirtualSwitch(module)
host_virtual_switch.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
ramondelafuente/ansible-modules-extras
|
cloud/vmware/vmware_vswitch.py
|
Python
|
gpl-3.0
| 6,816
|
#!/usr/bin/python
'''
lanzar el script escribiendo esta linea dentro de la carpeta con los jp4s:
ls *.jpg | parallel -j 8 python jp4-elphel-exr.py {}
'''
import os, sys, subprocess
for infile in sys.argv[0:]:
f, e = os.path.splitext(infile)
command = "elphel_dng 100 "+f+e+" "+f+".dng"
subprocess.call(command,shell=True)
command = "exiftool -tagsFromFile "+f+e+" "+f+".dng"
subprocess.call(command,shell=True)
command = "exiftool -ISO=100 -FocalLength=4.5 -ExposureTime=0.04 -ApertureValue=2.0 -overwrite_original "+f+".dng"
subprocess.call(command,shell=True)
# dcraw -T -4 -q 1 -a -m 2
# |
# `-> you have to configure this inside qtpfsgui preferences
command = "qtpfsgui "+f+".dng -s "+f+".exr"
subprocess.call(command,shell=True)
command = "rm "+f+".dng_original"
subprocess.call(command,shell=True)
command = "rm "+f+".dng"
subprocess.call(command,shell=True)
|
kinoraw/nautilus-scripts
|
old/elphel/jp4-elphel-exr.py
|
Python
|
gpl-3.0
| 952
|
"""
Copyright 2007 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
from __future__ import absolute_import
from os import path
from gi.repository import Gtk
from . import Constants, Utils, Dialogs
class FileDialogHelper(Gtk.FileChooserDialog, object):
"""
A wrapper class for the gtk file chooser dialog.
Implement a file chooser dialog with only necessary parameters.
"""
title = ""
action = Gtk.FileChooserAction.OPEN
filter_label = ""
filter_ext = ""
def __init__(self, parent, current_file_path):
"""
FileDialogHelper constructor.
Create a save or open dialog with cancel and ok buttons.
Use standard settings: no multiple selection, local files only, and the * filter.
Args:
action: Gtk.FileChooserAction.OPEN or Gtk.FileChooserAction.SAVE
title: the title of the dialog (string)
"""
ok_stock = {
Gtk.FileChooserAction.OPEN: "gtk-open",
Gtk.FileChooserAction.SAVE: "gtk-save",
}[self.action]
Gtk.FileChooserDialog.__init__(
self, title=self.title, action=self.action, transient_for=parent
)
self.add_buttons(
"gtk-cancel", Gtk.ResponseType.CANCEL, ok_stock, Gtk.ResponseType.OK
)
self.set_select_multiple(False)
self.set_local_only(True)
self.parent = parent
self.current_file_path = current_file_path or path.join(
Constants.DEFAULT_FILE_PATH,
Constants.NEW_FLOGRAPH_TITLE + Constants.FILE_EXTENSION,
)
self.set_current_folder(path.dirname(current_file_path)) # current directory
self.setup_filters()
def setup_filters(self, filters=None):
set_default = True
filters = filters or (
[(self.filter_label, self.filter_ext)] if self.filter_label else []
)
filters.append(("All Files", ""))
for label, ext in filters:
if not label:
continue
f = Gtk.FileFilter()
f.set_name(label)
f.add_pattern("*" + ext)
self.add_filter(f)
if not set_default:
self.set_filter(f)
set_default = True
def run(self):
"""Get the filename and destroy the dialog."""
response = Gtk.FileChooserDialog.run(self)
filename = self.get_filename() if response == Gtk.ResponseType.OK else None
self.destroy()
return filename
class SaveFileDialog(FileDialogHelper):
"""A dialog box to save or open flow graph files. This is a base class, do not use."""
action = Gtk.FileChooserAction.SAVE
def __init__(self, parent, current_file_path):
super(SaveFileDialog, self).__init__(parent, current_file_path)
self.set_current_name(
path.splitext(path.basename(self.current_file_path))[0] + self.filter_ext
)
self.set_create_folders(True)
self.set_do_overwrite_confirmation(True)
class OpenFileDialog(FileDialogHelper):
"""A dialog box to save or open flow graph files. This is a base class, do not use."""
action = Gtk.FileChooserAction.OPEN
def show_missing_message(self, filename):
Dialogs.MessageDialogWrapper(
self.parent,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.CLOSE,
"Cannot Open!",
"File <b>{filename}</b> Does not Exist!".format(
filename=Utils.encode(filename)
),
).run_and_destroy()
def get_filename(self):
"""
Run the dialog and get the filename.
If this is a save dialog and the file name is missing the extension, append the file extension.
If the file name with the extension already exists, show a overwrite dialog.
If this is an open dialog, return a list of filenames.
Returns:
the complete file path
"""
filenames = Gtk.FileChooserDialog.get_filenames(self)
for filename in filenames:
if not path.exists(filename):
self.show_missing_message(filename)
return None # rerun
return filenames
class OpenFlowGraph(OpenFileDialog):
title = "Open a Flow Graph from a File..."
filter_label = "Flow Graph Files"
filter_ext = Constants.FILE_EXTENSION
def __init__(self, parent, current_file_path=""):
super(OpenFlowGraph, self).__init__(parent, current_file_path)
self.set_select_multiple(True)
class OpenQSS(OpenFileDialog):
title = "Open a QSS theme..."
filter_label = "QSS Themes"
filter_ext = ".qss"
class SaveFlowGraph(SaveFileDialog):
title = "Save a Flow Graph to a File..."
filter_label = "Flow Graph Files"
filter_ext = Constants.FILE_EXTENSION
class SaveConsole(SaveFileDialog):
title = "Save Console to a File..."
filter_label = "Test Files"
filter_ext = ".txt"
class SaveScreenShot(SaveFileDialog):
title = "Save a Flow Graph Screen Shot..."
filters = [("PDF Files", ".pdf"), ("PNG Files", ".png"), ("SVG Files", ".svg")]
filter_ext = ".pdf" # the default
def __init__(self, parent, current_file_path=""):
super(SaveScreenShot, self).__init__(parent, current_file_path)
self.config = Gtk.Application.get_default().config
self._button = button = Gtk.CheckButton(label="Background transparent")
self._button.set_active(self.config.screen_shot_background_transparent())
self.set_extra_widget(button)
def setup_filters(self, filters=None):
super(SaveScreenShot, self).setup_filters(self.filters)
def show_missing_message(self, filename):
Dialogs.MessageDialogWrapper(
self.parent,
Gtk.MessageType.ERROR,
Gtk.ButtonsType.CLOSE,
"Can not Save!",
"File Extension of <b>{filename}</b> not supported!".format(
filename=Utils.encode(filename)
),
).run_and_destroy()
def run(self):
valid_exts = {ext for label, ext in self.filters}
filename = None
while True:
response = Gtk.FileChooserDialog.run(self)
if response != Gtk.ResponseType.OK:
filename = None
break
filename = self.get_filename()
if path.splitext(filename)[1] in valid_exts:
break
self.show_missing_message(filename)
bg_transparent = self._button.get_active()
self.config.screen_shot_background_transparent(bg_transparent)
self.destroy()
return filename, bg_transparent
|
skoslowski/gnuradio
|
grc/gui/FileDialogs.py
|
Python
|
gpl-3.0
| 6,746
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'StudyPrivate'
db.delete_table(u'feed_studyprivate')
def backwards(self, orm):
# Adding model 'StudyPrivate'
db.create_table(u'feed_studyprivate', (
('created_at', self.gf('django.db.models.fields.DateTimeField')()),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')()),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='studyprivate_related', null=True, to=orm['auth.User'], blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('study', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feed.Study'])),
))
db.send_create_signal(u'feed', ['StudyPrivate'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feed.ageunit': {
'Meta': {'ordering': "['label']", 'object_name': 'AgeUnit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ageunit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anatomicallocation': {
'Meta': {'ordering': "['label']", 'object_name': 'AnatomicalLocation'},
'category': ('django.db.models.fields.IntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anatomicallocation_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ontology_term': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['feed.MuscleOwl']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anteriorposterioraxis': {
'Meta': {'object_name': 'AnteriorPosteriorAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anteriorposterioraxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behavior': {
'Meta': {'ordering': "['label']", 'object_name': 'Behavior'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'behavior_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behaviorowl': {
'Meta': {'object_name': 'BehaviorOwl'},
'bfo_part_of_some': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'has_parts'", 'symmetrical': 'False', 'to': u"orm['feed.BehaviorOwl']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_is_class': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'has_subClass_descendants'", 'symmetrical': 'False', 'to': u"orm['feed.BehaviorOwl']"}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.channel': {
'Meta': {'object_name': 'Channel'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channel_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.channellineup': {
'Meta': {'ordering': "['position']", 'object_name': 'ChannelLineup'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Channel']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channellineup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.depthaxis': {
'Meta': {'object_name': 'DepthAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'depthaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.developmentstage': {
'Meta': {'ordering': "['label']", 'object_name': 'DevelopmentStage'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'developmentstage_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.dorsalventralaxis': {
'Meta': {'object_name': 'DorsalVentralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dorsalventralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.electrodetype': {
'Meta': {'ordering': "['label']", 'object_name': 'ElectrodeType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'electrodetype_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgchannel': {
'Meta': {'object_name': 'EmgChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'emg_amplification': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'emg_filtering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Emgfiltering']"}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.EmgSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.emgfiltering': {
'Meta': {'ordering': "['label']", 'object_name': 'Emgfiltering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'emgfiltering_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgsensor': {
'Meta': {'ordering': "['id']", 'object_name': 'EmgSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'electrode_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ElectrodeType']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.emgsetup': {
'Meta': {'object_name': 'EmgSetup', '_ormbases': [u'feed.Setup']},
'preamplifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.eventchannel': {
'Meta': {'object_name': 'EventChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.eventsetup': {
'Meta': {'object_name': 'EventSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.experiment': {
'Meta': {'object_name': 'Experiment'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impl_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'subj_age': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subj_ageunit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AgeUnit']", 'null': 'True', 'blank': 'True'}),
'subj_devstage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DevelopmentStage']"}),
'subj_tooth': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']"}),
'subject_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.forcechannel': {
'Meta': {'object_name': 'ForceChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ForceSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.forcesensor': {
'Meta': {'object_name': 'ForceSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.forcesetup': {
'Meta': {'object_name': 'ForceSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.illustration': {
'Meta': {'object_name': 'Illustration'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'illustration_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']", 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.kinematicschannel': {
'Meta': {'object_name': 'KinematicsChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.KinematicsSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.kinematicssensor': {
'Meta': {'object_name': 'KinematicsSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.kinematicssetup': {
'Meta': {'object_name': 'KinematicsSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.mediallateralaxis': {
'Meta': {'object_name': 'MedialLateralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'mediallateralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.muscleowl': {
'Meta': {'object_name': 'MuscleOwl'},
'bfo_part_of_some': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'has_parts'", 'symmetrical': 'False', 'to': u"orm['feed.MuscleOwl']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_is_class': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'has_subClass_descendants'", 'symmetrical': 'False', 'to': u"orm['feed.MuscleOwl']"}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.pressurechannel': {
'Meta': {'object_name': 'PressureChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.PressureSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.pressuresensor': {
'Meta': {'object_name': 'PressureSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.pressuresetup': {
'Meta': {'object_name': 'PressureSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.proximaldistalaxis': {
'Meta': {'object_name': 'ProximalDistalAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proximaldistalaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.restraint': {
'Meta': {'ordering': "['label']", 'object_name': 'Restraint'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'restraint_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sensor': {
'Meta': {'object_name': 'Sensor'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sensor_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loc_ap': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnteriorPosteriorAxis']", 'null': 'True', 'blank': 'True'}),
'loc_dv': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DorsalVentralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_ml': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MedialLateralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_pd': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ProximalDistalAxis']", 'null': 'True', 'blank': 'True'}),
'loc_side': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Side']"}),
'location_freetext': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.session': {
'Meta': {'ordering': "['position']", 'object_name': 'Session'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.Channel']", 'through': u"orm['feed.ChannelLineup']", 'symmetrical': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_anesthesia_sedation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_restraint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Restraint']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.setup': {
'Meta': {'object_name': 'Setup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'setup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sampling_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.side': {
'Meta': {'ordering': "['label']", 'object_name': 'Side'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'side_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sonochannel': {
'Meta': {'object_name': 'SonoChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'crystal1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals1_related'", 'to': u"orm['feed.SonoSensor']"}),
'crystal2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals2_related'", 'to': u"orm['feed.SonoSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.sonosensor': {
'Meta': {'object_name': 'SonoSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.sonosetup': {
'Meta': {'object_name': 'SonoSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'}),
'sonomicrometer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.strainchannel': {
'Meta': {'object_name': 'StrainChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.StrainSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.strainsensor': {
'Meta': {'object_name': 'StrainSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.strainsetup': {
'Meta': {'object_name': 'StrainSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.study': {
'Meta': {'ordering': "['title']", 'object_name': 'Study'},
'approval': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'approval_secured': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'study_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'funding': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'funding_agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lab': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pi': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'resources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.subject': {
'Meta': {'object_name': 'Subject'},
'breed': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subject_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'taxon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Taxon']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.taxon': {
'Meta': {'ordering': "['genus']", 'object_name': 'Taxon'},
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'taxon_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.trial': {
'Meta': {'object_name': 'Trial'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'behavior_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'behavior_primary': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Behavior']"}),
'behavior_secondary': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'behaviorowl_primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_in_trials'", 'null': 'True', 'to': u"orm['feed.BehaviorOwl']"}),
'behaviorowl_secondary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'secondary_in_trials'", 'null': 'True', 'to': u"orm['feed.BehaviorOwl']"}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trial_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'estimated_duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'food_property': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_treatment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {}),
'waveform_picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'feed.unit': {
'Meta': {'ordering': "['technique', 'label']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'unit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['feed']
|
Squishymedia/feedingdb
|
src/feeddb/feed/migrations/0073_auto__del_studyprivate.py
|
Python
|
gpl-3.0
| 40,612
|
from django.apps import AppConfig
class FileserverConfig(AppConfig):
name = 'fileserver'
|
opentelega/OpenTelega
|
fileserver/apps.py
|
Python
|
mit
| 95
|
# dr14_t.meter: compute the DR14 value of the given audiofiles
# Copyright (C) 2011 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
from dr14tmeter.audio_math import *
from dr14tmeter.out_messages import *
import math
import time
try:
import matplotlib
import matplotlib.pyplot as pyplot
import matplotlib.mlab as mlab
except:
____foo = None
try:
class MyTimeFormatter( matplotlib.ticker.Formatter ):
def __init__(self,utime=1.0 , milli_sec=False ):
self.utime = utime
self.milli_sec = milli_sec
def __call__( self , x , pos=None ):
minu = int( self.utime*x / 60 ) ;
sec = int( self.utime*x - minu*60 )
msec = int ( 1000 * ( self.utime*x - int( self.utime*x ) ) )
if self.milli_sec :
return "%02d:%02d.%03d" % ( minu , sec , msec )
else :
return "%02d:%02d" % ( minu , sec )
except:
class MyTimeFormatter:
def __init__(self):
raise
|
magicgoose/dr14_t.meter
|
dr14tmeter/my_time_formatter.py
|
Python
|
gpl-3.0
| 1,668
|
from django.db import models
# Create your models here.
from aldryn_apphooks_config.fields import AppHookConfigField
from aldryn_apphooks_config.managers import AppHookConfigManager
from faq.cms_appconfig import FaqConfig
class Entry(models.Model):
app_config = AppHookConfigField(FaqConfig)
question = models.TextField(blank=True, default='')
answer = models.TextField()
objects = AppHookConfigManager()
def __unicode__(self):
return self.question
class Meta:
verbose_name_plural = 'entries'
|
amitbend/Django-CMS-tutorial-project-django-1.8
|
faq/models.py
|
Python
|
gpl-2.0
| 538
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/MSCommon/arch.py 4577 2009/12/27 19:43:56 scons"
__doc__ = """Module to define supported Windows chip architectures.
"""
import os
class ArchDefinition:
"""
A class for defining architecture-specific settings and logic.
"""
def __init__(self, arch, synonyms=[]):
self.arch = arch
self.synonyms = synonyms
SupportedArchitectureList = [
ArchitectureDefinition(
'x86',
['i386', 'i486', 'i586', 'i686'],
),
ArchitectureDefinition(
'x86_64',
['AMD64', 'amd64', 'em64t', 'EM64T', 'x86_64'],
),
ArchitectureDefinition(
'ia64',
['IA64'],
),
]
SupportedArchitectureMap = {}
for a in SupportedArchitectureList:
SupportedArchitectureMap[a.arch] = a
for s in a.synonyms:
SupportedArchitectureMap[s] = a
|
barnone/EigenD
|
tools/packages/SCons/Tool/MSCommon/arch.py
|
Python
|
gpl-3.0
| 2,011
|
from django.db import models
from django.utils import simplejson
# Create your models here.
class Log(models.Model):
# Participant ID. Serves as a session ID.
pid = models.CharField(max_length=16)
# Task ID. Serves as a unique task ID.
tid = models.IntegerField(default=0)
# Which module is the action from?
module = models.CharField(max_length=64)
# Which action has been made by the client?
action = models.CharField(max_length=64)
# Details about the action (parameters, numbers, position, etc.)
message = models.CharField(max_length=256)
# URL parameters
params = models.CharField(max_length=256)
# video ID
video = models.CharField(max_length=32)
is_admin = models.BooleanField(default=False)
added_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.username
# def toJSON(self):
# return simplejson.dumps(self, default=dthandler, sort_keys=True)
|
pmitros/LectureScapeBlock
|
lecturescape/lecturescape/app/models.py
|
Python
|
agpl-3.0
| 977
|
import os
import sys
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
PYTHON_BIN = os.path.dirname(sys.executable)
VAR_ROOT = os.path.join(os.path.dirname(PYTHON_BIN), 'var')
if not os.path.exists(VAR_ROOT):
os.mkdir(VAR_ROOT)
#==============================================================================
# I18N
#==============================================================================
TIME_ZONE = 'Europe/Berlin'
LANGUAGE_CODE = 'de-de'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
#==============================================================================
# Static File Handling
#==============================================================================
MEDIA_ROOT = os.path.join(VAR_ROOT, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(VAR_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
#==============================================================================
# Application
#==============================================================================
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'pagination.middleware.PaginationMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'django_de.urls'
WSGI_APPLICATION = 'django_de.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.markup',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'south',
'pagination',
'django_de',
'django_de.news',
'django_de.events',
'django_de.versions',
)
#==============================================================================
# Logging
#==============================================================================
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'simple': {
'format': '[%(levelname)s] %(name)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#==============================================================================
# Caches
#==============================================================================
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
|
django-de/django-de-v3
|
django_de/global_settings.py
|
Python
|
bsd-3-clause
| 3,699
|
# -*- coding:utf8 -*-
# Author: shizhenyu96@gamil.com
# github: https://github.com/imndszy
from . import admin
from flask import render_template, request, redirect, url_for
from app.admin.functions import admin_login_required
from app.models import Activity
@admin.route('/')
@admin_login_required
def index():
return render_template('admin/admin_index.html')
@admin.route('/publish')
@admin_login_required
def publish():
return render_template('admin/admin.html')
@admin.route('/login')
def login():
return render_template('admin/login.html')
#活动详情页
@admin.route('/admin_detail/<int:acid>')
@admin_login_required
def detail(acid):
if acid:
activity = Activity.query.filter_by(acid=acid).first()
sth = activity.return_dict()
if sth.get('actual_stus') is None:
sth['actual_stus'] = 0
return render_template('admin/admin_detail.html',title=sth['subject'],
introduce=sth['introduce'],
number=str(sth.get('actual_stus')) + '/' +str(sth['required_stus']),
acid=acid, ac_place=activity.ac_place, ac_start=activity.start_time)
return redirect(url_for('admin.index'))
|
StarInworld/voluntary
|
app/admin/views.py
|
Python
|
mit
| 1,236
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
|
rapidpro/ureport
|
ureport/__init__.py
|
Python
|
agpl-3.0
| 255
|
from flavorsync.parser.parser import Parser
from flask import json
class JSONParser(Parser):
def from_model(self, data):
return json.dumps(data.to_dict(), default=lambda o: o.__dict__)
|
Fiware/ops.Flavor-sync
|
flavorsync/parser/json/json_parser.py
|
Python
|
apache-2.0
| 197
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import time
import sys
import os
import threading
import traceback
import json
import Queue
from network import Network
from util import print_msg
from simple_config import SimpleConfig
class NetworkProxy(threading.Thread):
# connects to daemon
# sends requests, runs callbacks
def __init__(self, config = {}):
threading.Thread.__init__(self)
self.daemon = True
self.config = SimpleConfig(config) if type(config) == type({}) else config
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.daemon_port = config.get('daemon_port', 8000)
self.message_id = 0
self.unanswered_requests = {}
self.subscriptions = {}
self.debug = False
self.lock = threading.Lock()
def start(self, start_daemon=False):
daemon_started = False
while True:
try:
self.socket.connect(('', self.daemon_port))
threading.Thread.start(self)
return True
except socket.error:
if not start_daemon:
return False
elif not daemon_started:
print "Starting daemon [%s]"%self.config.get('server')
daemon_started = True
pid = os.fork()
if (pid == 0): # The first child.
os.chdir("/")
os.setsid()
os.umask(0)
pid2 = os.fork()
if (pid2 == 0): # Second child
server = NetworkServer(self.config)
try:
server.main_loop()
except KeyboardInterrupt:
print "Ctrl C - Stopping server"
sys.exit(1)
sys.exit(0)
else:
time.sleep(0.1)
def parse_json(self, message):
s = message.find('\n')
if s==-1:
return None, message
j = json.loads( message[0:s] )
return j, message[s+1:]
def run(self):
# read responses and trigger callbacks
message = ''
while True:
try:
data = self.socket.recv(1024)
except:
data = ''
if not data:
break
message += data
while True:
response, message = self.parse_json(message)
if response is not None:
self.process(response)
else:
break
print "NetworkProxy: exiting"
def process(self, response):
# runs callbacks
if self.debug: print "<--", response
msg_id = response.get('id')
with self.lock:
method, params, callback = self.unanswered_requests.pop(msg_id)
result = response.get('result')
callback(None, {'method':method, 'params':params, 'result':result, 'id':msg_id})
def subscribe(self, messages, callback):
# detect if it is a subscription
with self.lock:
if self.subscriptions.get(callback) is None:
self.subscriptions[callback] = []
for message in messages:
if message not in self.subscriptions[callback]:
self.subscriptions[callback].append(message)
self.do_send( messages, callback )
def do_send(self, messages, callback):
"""return the ids of the requests that we sent"""
out = ''
ids = []
for m in messages:
method, params = m
request = json.dumps( { 'id':self.message_id, 'method':method, 'params':params } )
self.unanswered_requests[self.message_id] = method, params, callback
ids.append(self.message_id)
if self.debug: print "-->", request
self.message_id += 1
out += request + '\n'
while out:
sent = self.socket.send( out )
out = out[sent:]
return ids
def synchronous_get(self, requests, timeout=100000000):
queue = Queue.Queue()
ids = self.do_send(requests, lambda i,x: queue.put(x))
id2 = ids[:]
res = {}
while ids:
r = queue.get(True, timeout)
_id = r.get('id')
if _id in ids:
ids.remove(_id)
res[_id] = r.get('result')
out = []
for _id in id2:
out.append(res[_id])
return out
def get_servers(self):
return self.synchronous_get([('network.get_servers',[])])[0]
def get_header(self, height):
return self.synchronous_get([('network.get_header',[height])])[0]
def get_local_height(self):
return self.synchronous_get([('network.get_local_height',[])])[0]
def is_connected(self):
return self.synchronous_get([('network.is_connected',[])])[0]
def is_up_to_date(self):
return self.synchronous_get([('network.is_up_to_date',[])])[0]
def main_server(self):
return self.synchronous_get([('network.main_server',[])])[0]
def stop(self):
return self.synchronous_get([('daemon.shutdown',[])])[0]
def trigger_callback(self, cb):
pass
class ClientThread(threading.Thread):
# read messages from client (socket), and sends them to Network
# responses are sent back on the same socket
def __init__(self, server, network, socket):
threading.Thread.__init__(self)
self.server = server
self.daemon = True
self.s = socket
self.s.settimeout(0.1)
self.network = network
self.queue = Queue.Queue()
self.unanswered_requests = {}
self.debug = False
def run(self):
message = ''
while True:
self.send_responses()
try:
data = self.s.recv(1024)
except socket.timeout:
continue
if not data:
break
message += data
while True:
cmd, message = self.parse_json(message)
if not cmd:
break
self.process(cmd)
#print "client thread terminating"
def parse_json(self, message):
n = message.find('\n')
if n==-1:
return None, message
j = json.loads( message[0:n] )
return j, message[n+1:]
def process(self, request):
if self.debug: print "<--", request
method = request['method']
params = request['params']
_id = request['id']
if method.startswith('network.'):
out = {'id':_id}
try:
f = getattr(self.network, method[8:])
except AttributeError:
out['error'] = "unknown method"
try:
out['result'] = f(*params)
except BaseException as e:
out['error'] =str(e)
self.queue.put(out)
return
if method == 'daemon.shutdown':
self.server.running = False
self.queue.put({'id':_id, 'result':True})
return
def cb(i,r):
_id = r.get('id')
if _id is not None:
my_id = self.unanswered_requests.pop(_id)
r['id'] = my_id
self.queue.put(r)
new_id = self.network.interface.send([(method, params)], cb) [0]
self.unanswered_requests[new_id] = _id
def send_responses(self):
while True:
try:
r = self.queue.get_nowait()
except Queue.Empty:
break
out = json.dumps(r) + '\n'
while out:
n = self.s.send(out)
out = out[n:]
if self.debug: print "-->", r
class NetworkServer:
def __init__(self, config):
network = Network(config)
if not network.start(wait=True):
print_msg("Not connected, aborting.")
sys.exit(1)
self.network = network
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.daemon_port = config.get('daemon_port', 8000)
self.server.bind(('', self.daemon_port))
self.server.listen(5)
self.server.settimeout(1)
self.running = False
self.timeout = config.get('daemon_timeout', 60)
def main_loop(self):
self.running = True
t = time.time()
while self.running:
try:
connection, address = self.server.accept()
except socket.timeout:
if time.time() - t > self.timeout:
break
continue
t = time.time()
client = ClientThread(self, self.network, connection)
client.start()
if __name__ == '__main__':
import simple_config
config = simple_config.SimpleConfig({'verbose':True, 'server':'ecdsa.net:50002:s'})
server = NetworkServer(config)
try:
server.main_loop()
except KeyboardInterrupt:
print "Ctrl C - Stopping server"
sys.exit(1)
|
bitxbay/BitXBay
|
lib/daemon.py
|
Python
|
gpl-3.0
| 10,196
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets custom targeting values for the given predefined custom
targeting key.
To create custom targeting values, run
create_custom_targeting_keys_and_values.py. To determine which custom
targeting keys exist, run get_all_custom_targeting_keys_and_values.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
CUSTOM_TARGETING_KEY_ID = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE'
def main(client, key_id):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201411')
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
query = 'WHERE customTargetingKeyId = :keyId'
statement = dfp.FilterStatement(query, values)
# Get custom targeting values by statement.
while True:
response = custom_targeting_service.getCustomTargetingValuesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for value in response['results']:
print ('Custom targeting value with id \'%s\', name \'%s\', and display'
' name \'%s\' was found.'
% (value['id'], value['name'], value['displayName']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CUSTOM_TARGETING_KEY_ID)
|
cctaylor/googleads-python-lib
|
examples/dfp/v201411/custom_targeting_service/get_custom_targeting_values_by_statement.py
|
Python
|
apache-2.0
| 2,247
|
#
# Copyright (c) 2015 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import pprint
from typing import Any, Mapping, Optional
from xml.etree.ElementTree import ParseError
from flask import abort, redirect, request
from saml2 import BINDING_HTTP_POST, BINDING_HTTP_REDIRECT
from saml2.client import Saml2Client
from saml2.ident import decode
from saml2.response import LogoutResponse, UnsolicitedResponse
from werkzeug.wrappers import Response
from eduid_userdb import UserDB
from eduid_userdb.exceptions import MultipleUsersReturned, UserDoesNotExist
from eduid_userdb.user import User
from eduid_common.api.app import EduIDBaseApp
from eduid_common.api.utils import verify_relay_state
from eduid_common.session import EduidSession, session
from .cache import IdentityCache, OutstandingQueriesCache, StateCache
from .utils import SPConfig, get_saml_attribute
logger = logging.getLogger(__name__)
class BadSAMLResponse(Exception):
"""Bad SAML response"""
def get_authn_ctx(session_info):
"""
Get the SAML2 AuthnContext of the currently logged in users session.
session_info is a dict like
{'authn_info': [('http://www.swamid.se/policy/assurance/al1',
['https://dev.idp.eduid.se/idp.xml'])],
...
}
:param session_info: The SAML2 session_info
:return: The first AuthnContext
:rtype: string | None
"""
try:
return session_info['authn_info'][0][0]
except KeyError:
return None
def get_authn_request(
saml2_config: SPConfig,
session: EduidSession,
came_from: str,
selected_idp: str,
force_authn: bool = False,
sign_alg: Optional[str] = None,
digest_alg: Optional[str] = None,
):
kwargs = {
'force_authn': str(force_authn).lower(),
}
logger.debug(f'Authn request args: {kwargs}')
client = Saml2Client(saml2_config)
try:
(session_id, info) = client.prepare_for_authenticate(
entityid=selected_idp,
relay_state=came_from,
binding=BINDING_HTTP_REDIRECT,
sigalg=sign_alg,
digest_alg=digest_alg,
**kwargs,
)
except TypeError:
logger.error('Unable to know which IdP to use')
raise
oq_cache = OutstandingQueriesCache(session)
oq_cache.set(session_id, came_from)
return info
def get_authn_response(saml2_config: SPConfig, session: EduidSession, raw_response) -> Mapping:
"""
Check a SAML response and return the 'session_info' pysaml2 dict.
Example session_info:
{'authn_info': [('urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport', [],
'2019-06-17T00:00:01Z')],
'ava': {'eduPersonPrincipalName': ['eppn@eduid.se'],
'eduidIdPCredentialsUsed': ['...']},
'came_from': 'https://dashboard.eduid.se/profile/personaldata',
'issuer': 'https://login.idp.eduid.se/idp.xml',
'name_id': <saml2.saml.NameID object>,
'not_on_or_after': 156000000,
'session_index': 'id-foo'}
"""
client = Saml2Client(saml2_config, identity_cache=IdentityCache(session))
oq_cache = OutstandingQueriesCache(session)
outstanding_queries = oq_cache.outstanding_queries()
try:
# process the authentication response
response = client.parse_authn_request_response(raw_response, BINDING_HTTP_POST, outstanding_queries)
except AssertionError:
logger.error('SAML response is not verified')
raise BadSAMLResponse(
"""SAML response is not verified. May be caused by the response
was not issued at a reasonable time or the SAML status is not ok.
Check the IDP datetime setup"""
)
except ParseError as e:
logger.error('SAML response is not correctly formatted: {!r}'.format(e))
raise BadSAMLResponse(
"""SAML response is not correctly formatted and therefore the
XML document could not be parsed.
"""
)
except UnsolicitedResponse as e:
logger.exception('Unsolicited SAML response')
# Extra debug to try and find the cause for some of these that seem to be incorrect
logger.debug(f'Session: {session}')
logger.debug(f'Outstanding queries cache: {oq_cache}')
logger.debug(f'Outstanding queries: {outstanding_queries}')
raise e
if response is None:
logger.error('SAML response is None')
raise BadSAMLResponse("SAML response has errors. Please check the logs")
session_id = response.session_id()
oq_cache.delete(session_id)
session_info = response.session_info()
logger.debug('Session info:\n{!s}\n\n'.format(pprint.pformat(session_info)))
return session_info
def authenticate(session_info: Mapping[str, Any], strip_suffix: Optional[str], userdb: UserDB) -> Optional[User]:
"""
Locate a user using the identity found in the SAML assertion.
:param session_info: Session info received by pysaml2 client
:param strip_suffix: SAML scope to strip from the end of the eppn
:param userdb: In what database to look for the user
:returns: User, if found
"""
if session_info is None:
raise TypeError('Session info is None')
attribute_values = get_saml_attribute(session_info, 'eduPersonPrincipalName')
if not attribute_values:
logger.error('Could not find attribute eduPersonPrincipalName in the SAML assertion')
return None
saml_user = attribute_values[0]
# eduPersonPrincipalName might be scoped and the scope (e.g. "@example.com")
# might have to be removed before looking for the user in the database.
if strip_suffix:
if saml_user.endswith(strip_suffix):
saml_user = saml_user[: -len(strip_suffix)]
logger.debug(f'Looking for user with eduPersonPrincipalName == {repr(saml_user)}')
try:
return userdb.get_user_by_eppn(saml_user)
except UserDoesNotExist:
logger.error(f'No user with eduPersonPrincipalName = {repr(saml_user)} found')
except MultipleUsersReturned:
logger.error(f'There are more than one user with eduPersonPrincipalName == {repr(saml_user)}')
return None
def saml_logout(sp_config: SPConfig, user: User, location: str) -> Response:
"""
SAML Logout Request initiator.
This function initiates the SAML2 Logout request
using the pysaml2 library to create the LogoutRequest.
"""
if '_saml2_session_name_id' not in session:
logger.warning(f'The session does not contain the subject id for user {user}')
session.invalidate()
logger.info(f'Invalidated session for {user}')
logger.info(f'Redirection user to {location} for logout')
return redirect(location)
# Since we have a subject_id, call the IdP using SOAP to do a global logout
state = StateCache(session) # _saml2_state in the session
identity = IdentityCache(session) # _saml2_identities in the session
client = Saml2Client(sp_config, state_cache=state, identity_cache=identity)
_subject_id = decode(session['_saml2_session_name_id'])
logger.info(f'Initiating global logout for {_subject_id}')
logouts = client.global_logout(_subject_id)
logger.debug(f'Logout response: {logouts}')
# Invalidate session, now that Saml2Client is done with the information within.
session.invalidate()
logger.info(f'Invalidated session for {user}')
loresponse = list(logouts.values())[0]
# loresponse is a dict for REDIRECT binding, and LogoutResponse for SOAP binding
if isinstance(loresponse, LogoutResponse):
if loresponse.status_ok():
location = verify_relay_state(request.form.get('RelayState', location), location)
return redirect(location)
else:
logger.error(f'The logout response was not OK: {loresponse}')
abort(500)
headers_tuple = loresponse[1]['headers']
location = headers_tuple[0][1]
logger.info(f'Redirecting {user} to {location} after successful logout')
return redirect(location)
|
SUNET/eduid-common
|
src/eduid_common/authn/eduid_saml2.py
|
Python
|
bsd-3-clause
| 9,629
|
# Copyright (C) 2014 Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This module provides a simple interface for loading a shared library via ctypes,
allowing it to be specified in an OS-independent way and searched for preferentially
according to the paths that pkg-config specifies.
"""
from __future__ import print_function
import os, fnmatch, ctypes, sys, subprocess
from ctypes.util import find_library
from collections import deque
try:
from subprocess import getoutput
except ImportError:
from commands import getoutput
def pkg_config(pkg_libraries):
"""Use pkg-config to query for the location of libraries, library directories,
and header directories
Arguments:
pkg_libries(list): A list of packages as strings
Returns:
libraries(list), library_dirs(list), include_dirs(list)
"""
libraries=[]
library_dirs=[]
include_dirs=[]
# Check that we have the packages
for pkg in pkg_libraries:
if os.system('pkg-config --exists %s 2>/dev/null' % pkg) == 0:
pass
else:
print("Could not find library {0}".format(pkg))
sys.exit(1)
# Get the pck-config flags
if len(pkg_libraries)>0 :
# PKG_CONFIG_ALLOW_SYSTEM_CFLAGS explicitly lists system paths.
# On system-wide LAL installs, this is needed for swig to find lalswig.i
for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s" % ' '.join(pkg_libraries)).split():
if token.startswith("-l"):
libraries.append(token[2:])
elif token.startswith("-L"):
library_dirs.append(token[2:])
elif token.startswith("-I"):
include_dirs.append(token[2:])
return libraries, library_dirs, include_dirs
def pkg_config_header_strings(pkg_libraries):
""" Returns a list of header strings that could be passed to a compiler
"""
_, _, header_dirs = pkg_config(pkg_libraries)
header_strings = []
for header_dir in header_dirs:
header_strings.append("-I" + header_dir)
return header_strings
def pkg_config_check_exists(package):
return (os.system('pkg-config --exists {0} 2>/dev/null'.format(package)) == 0)
def pkg_config_libdirs(packages):
"""
Returns a list of all library paths that pkg-config says should be included when
linking against the list of packages given as 'packages'. An empty return list means
that the package may be found in the standard system locations, irrespective of
pkg-config.
"""
# don't try calling pkg-config if NO_PKGCONFIG is set in environment
if os.environ.get("NO_PKGCONFIG", None):
return []
# if calling pkg-config failes, don't continue and don't try again.
try:
FNULL = open(os.devnull, 'w')
subprocess.check_call(["pkg-config", "--version"], stdout=FNULL, close_fds=True)
except:
print("PyCBC.libutils: pkg-config call failed, setting NO_PKGCONFIG=1",
file=sys.stderr)
os.environ['NO_PKGCONFIG'] = "1"
return []
# First, check that we can call pkg-config on each package in the list
for pkg in packages:
if not pkg_config_check_exists(pkg):
raise ValueError("Package {0} cannot be found on the pkg-config search path".format(pkg))
libdirs = []
for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs-only-L {0}".format(' '.join(packages))).split():
if token.startswith("-L"):
libdirs.append(token[2:])
return libdirs
def get_libpath_from_dirlist(libname, dirs):
"""
This function tries to find the architecture-independent library given by libname in the first
available directory in the list dirs. 'Architecture-independent' means omitting any prefix such
as 'lib' or suffix such as 'so' or 'dylib' or version number. Within the first directory in which
a matching pattern can be found, the lexicographically first such file is returned, as a string
giving the full path name. The only supported OSes at the moment are posix and mac, and this
function does not attempt to determine which is being run. So if for some reason your directory
has both '.so' and '.dylib' libraries, who knows what will happen. If the library cannot be found,
None is returned.
"""
dirqueue = deque(dirs)
while (len(dirqueue) > 0):
nextdir = dirqueue.popleft()
possible = []
# Our directory might be no good, so try/except
try:
for libfile in os.listdir(nextdir):
if fnmatch.fnmatch(libfile,'lib'+libname+'.so*') or \
fnmatch.fnmatch(libfile,'lib'+libname+'.dylib*') or \
fnmatch.fnmatch(libfile,libname+'.dll') or \
fnmatch.fnmatch(libfile,'cyg'+libname+'-*.dll'):
possible.append(libfile)
except OSError:
pass
# There might be more than one library found, we want the highest-numbered
if (len(possible) > 0):
possible.sort()
return os.path.join(nextdir,possible[-1])
# If we get here, we didn't find it...
return None
def get_ctypes_library(libname, packages, mode=None):
"""
This function takes a library name, specified in architecture-independent fashion (i.e.
omitting any prefix such as 'lib' or suffix such as 'so' or 'dylib' or version number) and
a list of packages that may provide that library, and according first to LD_LIBRARY_PATH,
then the results of pkg-config, and falling back to the system search path, will try to
return a CDLL ctypes object. If 'mode' is given it will be used when loading the library.
"""
libdirs = []
# First try to get from LD_LIBRARY_PATH
if "LD_LIBRARY_PATH" in os.environ:
libdirs += os.environ["LD_LIBRARY_PATH"].split(":")
# Next try to append via pkg_config
try:
libdirs += pkg_config_libdirs(packages)
except ValueError:
pass
# Note that the function below can accept an empty list for libdirs, in which case
# it will return None
fullpath = get_libpath_from_dirlist(libname,libdirs)
if fullpath is None:
# This won't actually return a full-path, but it should be something
# that can be found by CDLL
fullpath = find_library(libname)
if fullpath is None:
# We got nothin'
return None
else:
if mode is None:
return ctypes.CDLL(fullpath)
else:
return ctypes.CDLL(fullpath,mode=mode)
|
stevereyes01/pycbc
|
pycbc/libutils.py
|
Python
|
gpl-3.0
| 7,325
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 13 12:27:48 2014
@author: Sebastian Held <sebastian.held@gmx.de>
"""
"""
Dieses Programm erzeugt eine iCalendar Datei, die die Müll-Abholtage enthält.
Auf https://www.mywastewatcher.de/abfallkalender/ die eigene Straße einstellen und
eine csv-Datei erzeugen lassen (Abfallkalender_Hamminkeln.csv).
Die csv-Datei beginnt wie folgt:
```
Farbe;Abfallart;Abfuhrtag;Wochentag;V;Bemerkung
rot;SC;04.01.2017;Mittwoch;;
grau;RA;05.01.2017;Donnerstag;;
```
Aufruf des Programmes:
python AbfallkalenderHamminkeln.py gridAbfuhrtermine.csv
es wird die Datei gridAbfuhrtermine.csv.ics erzeugt.
Diese Datei kann auf eine ownCloud hochgeladen werden und dann per Klick in
den Kalender importiert werden.
"""
import sys
import uuid
# tested with icalendar-3.8.4 from http://icalendar.readthedocs.org
from icalendar import Calendar, Event
from datetime import datetime, timedelta
import csv
def main():
# create calendar
cal = Calendar()
cal.add('version','2.0')
cal.add('prodid','-//Ingenieurbüro Held//AbfallkalenderHamminkeln_Python_import//DE')
f = open(sys.argv[1], mode='rt', encoding='iso-8859-1')
try:
reader = csv.reader(f, delimiter=';', quoting=csv.QUOTE_NONE)
for row in reader:
#print(row)
try:
try:
datum = datetime.strptime(row[2],'%d.%m.%y')
except:
datum = datetime.strptime(row[2],'%d.%m.%Y')
#print(datum)
event = Event()
event.add('dtstamp',datetime.now())
event.add('dtstart',datum.date())
# ownCloud is broken, it needs a dtend property
event.add('dtend',(datum + timedelta(days=1)).date())
event.add('summary','Müll ' + row[0])
# rfc5545 corrects the specification to require uid
event.add('uid',uuid.uuid4().hex);
event.add('description',row[5].strip())
cal.add_component(event)
except:
pass
finally:
f.close()
# write to disk
f = open(sys.argv[1]+'.ics', 'wb')
f.write(cal.to_ical())
f.close()
sys.exit(0)
if __name__ == '__main__':
main()
|
sibbi77/Abfallkalender
|
AbfallkalenderHamminkeln.py
|
Python
|
gpl-3.0
| 2,309
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.core.tasks.task import Task
class NoopExecTask(Task):
"""A base class for tasks which do nothing but produce some product_type(s).
Useful when scheduling a specific goal, as one can install subclasses of this which produce a
known product_type into that goal, then depend on those products elsewhere.
Generally tasks depend on a specific product or products, as opposed to a given goal, and do
not need this, but some tasks, eg "compile changed targets" just know they want the "compile"
goal to be run, rather than a specific product, eg jvm classfiles.
"""
def execute(self):
pass
class NoopCompile(NoopExecTask):
"""A no-op that provides a product type that can be used to force scheduling."""
@classmethod
def product_types(cls):
return ['ran_compile']
class NoopTest(NoopExecTask):
"""A no-op that provides a product type that can be used to force scheduling."""
@classmethod
def product_types(cls):
return ['ran_tests']
|
megaserg/pants
|
src/python/pants/backend/core/tasks/noop.py
|
Python
|
apache-2.0
| 1,306
|
import os
import sys
import string
import zipfile
import tempfile
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from bento.private.bytecode import \
bcompile
from bento.core.utils import \
pprint, ensure_dir
from bento._config \
import \
IPKG_PATH
from bento.core import \
PackageMetadata
from bento.conv import \
to_distutils_meta
from bento.installed_package_description import \
InstalledPkgDescription, iter_files
from bento.commands.errors \
import \
UsageException
from bento.commands.core import \
Command
from bento.commands.egg_utils import \
EggInfo, egg_info_dirname
from bento.commands.wininst_utils import \
wininst_filename, create_exe
import bento.compat.api as compat
class BuildWininstCommand(Command):
long_descr = """\
Purpose: build wininst
Usage: bentomaker build_wininst [OPTIONS]"""
short_descr = "build wininst."
def run(self, ctx):
argv = ctx.get_command_arguments()
p = ctx.options_context.parser
o, a = p.parse_args(argv)
if o.help:
p.print_help()
return
n = ctx.build_node.make_node(IPKG_PATH)
ipkg = InstalledPkgDescription.from_file(n.abspath())
create_wininst(ipkg, src_root_node=ctx.build_node, build_node=ctx.build_node)
def create_wininst(ipkg, src_root_node, build_node, egg_info=None, wininst=None):
meta = PackageMetadata.from_ipkg(ipkg)
if egg_info is None:
egg_info = EggInfo.from_ipkg(ipkg)
# XXX: do this correctly, maybe use same as distutils ?
if wininst is None:
wininst = wininst_filename(os.path.join("dist", meta.fullname))
ensure_dir(wininst)
egg_info_dir = os.path.join("PURELIB", egg_info_dirname(meta.fullname))
fid, arcname = tempfile.mkstemp(prefix="zip")
zid = compat.ZipFile(arcname, "w", compat.ZIP_DEFLATED)
try:
for filename, cnt in egg_info.iter_meta(build_node):
zid.writestr(os.path.join(egg_info_dir, filename), cnt)
wininst_paths = compat.defaultdict(lambda: r"DATA\share\$pkgname")
wininst_paths.update({"bindir": "SCRIPTS", "sitedir": "PURELIB",
"gendatadir": "$sitedir"})
d = {}
for k in ipkg._path_variables:
d[k] = wininst_paths[k]
ipkg.update_paths(d)
file_sections = ipkg.resolve_paths(src_root_node.abspath())
def write_content(source, target, kind):
zid.write(source, target)
for kind, source, target in iter_files(file_sections):
write_content(source, target, kind)
finally:
zid.close()
os.close(fid)
create_exe(ipkg, arcname, wininst)
|
abadger/Bento
|
bento/commands/build_wininst.py
|
Python
|
bsd-3-clause
| 2,781
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pppcemr', '0008_auto_20150901_1317'),
]
operations = [
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('office', models.ForeignKey(to='pppcemr.Office')),
],
),
]
|
sstebbins/pppcpro
|
pppcemr/migrations/0009_room.py
|
Python
|
agpl-3.0
| 595
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import htpc
import cherrypy
import jsonrpclib
import logging
from ts import norbits
from ts import yts
from ts import ka
from ts import getstrike
from ts import ptp
from ts import rarbg
from cherrypy.lib.auth2 import require
class Torrentsearch(object):
def __init__(self):
self.logger = logging.getLogger('modules.torrentsearch')
self.rb = rarbg.Rarbg()
htpc.MODULES.append({
'name': 'Torrents',
'id': 'torrentsearch',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'torrentsearch_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'torrentsearch_name'},
{'type': 'bool', 'label': 'Enable BTN', 'name': 'torrents_btn_enabled'},
{'type': 'password', 'label': 'BTN apikey', 'name': 'torrentsearch_btn_apikey'},
{'type': 'bool', 'label': 'Norbits', 'name': 'torrents_norbits_enabled'},
{'type': 'text', 'label': 'Norbits username', 'name': 'torrents_norbits_username'},
{'type': 'password', 'label': 'Norbits passkey', 'name': 'torrents_norbits_passkey'},
{'type': 'bool', 'label': 'PTP', 'name': 'torrents_ptp_enabled'},
{'type': 'text', 'label': 'PTP username', 'name': 'torrents_ptp_username'},
{'type': 'password', 'label': 'PTP password', 'name': 'torrents_ptp_password'},
{'type': 'password', 'label': 'PTP passkey', 'name': 'torrents_ptp_passkey'},
{'type': 'bool', 'label': 'Rarbg', 'name': 'torrents_rarbg_enabled'},
{'type': 'bool', 'label': 'YTS', 'name': 'torrents_yts_enabled'},
{'type': 'bool', 'label': 'KAT', 'name': 'torrents_ka_enabled'},
{'type': 'bool', 'label': 'Strike', 'name': 'torrents_getstrike_enabled', 'desc': 'DTH tracker'},
]
})
@cherrypy.expose()
@require()
def index(self, query='', **kwargs):
return htpc.LOOKUP.get_template('torrentsearch.html').render(query=query, scriptname='torrentsearch', torrentproviders=self.torrentproviders())
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def search(self, query=None, provider='all'):
self.logger.debug(query)
self.logger.debug(provider)
r = []
if provider == 'all':
if htpc.settings.get('torrents_btn_enabled'):
r += self.btn(query)
if htpc.settings.get('torrents_norbits_enabled'):
r += self.search_norbits(query, 'all')
if htpc.settings.get('torrents_yts_enabled'):
r += self.search_yts(query)
if htpc.settings.get('torrents_ka_enabled'):
r += self.search_ka(query)
if htpc.settings.get('torrents_getstrike_enabled'):
r += self.search_getstrike(query, 'all')
if htpc.settings.get('torrents_ptp_enabled'):
r += self.search_ptp(query, 'movie')
if htpc.settings.get('torrents_rarbg_enabled'):
r += self.search_rarbg(query, None)
elif provider == 'btn':
if htpc.settings.get('torrents_btn_enabled'):
r += self.btn(query)
elif provider == 'rarbg':
if htpc.settings.get('torrents_rarbg_enabled'):
r += self.search_rarbg(query, None)
elif provider == 'yts':
if htpc.settings.get('torrents_yts_enabled'):
r += self.search_yts(query)
elif provider == 'getstrike':
if htpc.settings.get('torrents_getstrike_enabled'):
r += self.search_getstrike(query, 'all')
elif provider == 'kat':
self.logger.debug("PUSSY")
if htpc.settings.get('torrents_ka_enabled'):
r += self.search_ka(query)
self.logger.debug('Found %s torrents in total' % len(r))
return r
def btn(self, query=None):
result = None
try:
btn = jsonrpclib.Server('http://api.btnapps.net')
result = btn.getTorrents(htpc.settings.get('torrentsearch_btn_apikey', ''), query, 999)
except Exception as e:
self.logger.error("Failed to fetch search results from BTN %s" % e)
return []
search_results = []
try:
if result:
if 'torrents' in result:
for k, v in result['torrents'].iteritems():
v["BrowseURL"] = 'https://broadcasthe.net/torrents.php?id=%s&torrentid=%s' % (v['GroupID'], v['TorrentID'])
v["Provider"] = "btn"
search_results.append(v)
return search_results
else:
return search_results
else:
return search_results
except Exception as e:
self.logger.error("Failed to fetch search results from BTN %s" % e)
return []
def torrentproviders(self):
torrentproviders = []
if htpc.settings.get('torrents_btnapikey') and htpc.settings.get('torrents_btn_enabled') == 1:
torrentproviders.append('BTN')
if (htpc.settings.get('torrents_norbits_enabled') == 1 and
htpc.settings.get('torrents_norbits_passkey') and htpc.settings.get('torrents_norbits_username')):
torrentproviders.append('norbits')
if htpc.settings.get('torrents_yts_enabled') == 1:
torrentproviders.append('YTS')
if htpc.settings.get('torrents_ka_enabled') == 1:
torrentproviders.append('KAT')
if htpc.settings.get('torrents_getstrike_enabled') == 1:
torrentproviders.append('GetStrike')
if (htpc.settings.get('torrents_ptp_enabled') == 1 and htpc.settings.get('torrents_ptp_passkey')
and htpc.settings.get('torrents_ptp_username') and htpc.settings.get('torrents_ptp_password')):
torrentproviders.append('PTP')
if htpc.settings.get('torrents_rarbg_enabled') == 1:
torrentproviders.append('rarbg')
return torrentproviders
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def getclients(self):
l = []
qbt = {}
trans = {}
utor = {}
delu = {}
if htpc.settings.get('qbittorrent_enable', ''):
qbt['title'] = 'qBittorrent'
qbt['active'] = 1
qbt['path'] = 'qbittorrent/to_client/'
l.append(qbt)
else:
qbt['title'] = 'qBittorrent'
qbt['active'] = 0
qbt['path'] = 'qbittorrent/command/'
l.append(qbt)
if htpc.settings.get('transmission_enable', ''):
trans['title'] = 'transmission'
trans['active'] = 1
trans['path'] = 'transmission/to_client/'
l.append(trans)
else:
trans['title'] = 'transmission'
trans['active'] = 0
trans['path'] = 'transmission/to_client/'
l.append(trans)
if htpc.settings.get('deluge_enable', ''):
delu['title'] = 'Deluge'
delu['active'] = 1
delu['path'] = 'deluge/to_client'
l.append(delu)
else:
delu['title'] = 'Deluge'
delu['active'] = 0
delu['path'] = 'deluge/to_client'
l.append(delu)
if htpc.settings.get('utorrent_enable', ''):
utor['title'] = 'uTorrent'
utor['active'] = 1
utor['path'] = 'utorrent/to_client/'
l.append(utor)
else:
utor['title'] = 'uTorrent'
utor['active'] = 0
utor['path'] = 'utorrent/to_client/'
l.append(utor)
return l
def search_norbits(self, q, cat):
results = norbits.search(q, cat)
return results
def search_yts(self, q, cat=None):
return yts.YTS().search(q, cat)
def search_ka(self, q, cat="all"):
return ka.search(q, cat)
def search_getstrike(self, q, cat):
return getstrike.search(q, cat)
def search_ptp(self, q, cat):
return ptp.search(q, cat)
def search_rarbg(self, q, cat):
return self.rb.search(q, cat)
|
clausqr/HTPC-Manager
|
modules/torrentsearch.py
|
Python
|
mit
| 8,337
|
# coding=utf-8
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import traceback
import urllib
import re
import datetime
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard import db
from sickbeard import classes
from sickbeard.show_name_helpers import allPossibleShowNames, sanitizeSceneName
from unidecode import unidecode
class KATProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "KickAssTorrents")
self.supportsBacklog = True
self.public = True
self.enabled = False
self.confirmed = False
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = KATCache(self)
self.urls = {'base_url': 'https://kat.cr/'}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
def imageName(self):
return 'kat.png'
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' '
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string += str(ep_obj.airdate).split('-')[0]
search_string['Season'].append(ep_string)
elif ep_obj.show.anime:
ep_string += "%02d" % ep_obj.scene_absolute_number
search_string['Season'].append(ep_string)
else:
ep_string = '%s S%02d -S%02dE category:tv' % (sanitizeSceneName(show_name), ep_obj.scene_season, ep_obj.scene_season) #1) showName SXX -SXXE
search_string['Season'].append(ep_string)
ep_string = '%s "Season %d" -Ep* category:tv' % (sanitizeSceneName(show_name), ep_obj.scene_season) # 2) showName "Season X"
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' '
if self.show.air_by_date:
ep_string += str(ep_obj.airdate).replace('-', ' ')
elif self.show.sports:
ep_string += str(ep_obj.airdate).replace('-', ' ') + '|' + ep_obj.airdate.strftime('%b')
elif self.show.anime:
ep_string += "%02d" % ep_obj.scene_absolute_number
else:
ep_string += sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + '|' + \
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' category:tv'
if add_string:
ep_string += ' ' + add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _get_size(self, item):
title, url, id, seeders, leechers, size, pubdate = item
return size or -1
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
if mode != 'RSS':
searchURL = self.url + 'usearch/%s/?field=seeders&sorder=desc&rss=1' % urllib.quote_plus(search_string)
else:
searchURL = self.url + 'tv/?field=time_add&sorder=desc&rss=1'
logger.log(u"Search string: " + searchURL, logger.DEBUG)
try:
entries = self.cache.getRSSFeed(searchURL)['entries']
for item in entries or []:
try:
link = item['link']
id = item['guid']
title = item['title']
url = item['torrent_magneturi']
verified = bool(int(item['torrent_verified']) or 0)
seeders = int(item['torrent_seeds'])
leechers = int(item['torrent_peers'])
size = int(item['torrent_contentlength'])
except (AttributeError, TypeError, KeyError):
continue
if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
continue
if self.confirmed and not verified:
logger.log(u"KAT Provider found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
continue
if not title or not url:
continue
try:
pubdate = datetime.datetime(*item['published_parsed'][0:6])
except AttributeError:
try:
pubdate = datetime.datetime(*item['updated_parsed'][0:6])
except AttributeError:
try:
pubdate = datetime.datetime(*item['created_parsed'][0:6])
except AttributeError:
try:
pubdate = datetime.datetime(*item['date'][0:6])
except AttributeError:
pubdate = datetime.datetime.today()
item = title, url, id, seeders, leechers, size, pubdate
items[mode].append(item)
except Exception, e:
logger.log(u"Failed to parsing " + self.name + " Traceback: " + traceback.format_exc(),
logger.ERROR)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers, size, pubdate = item
if title:
title = self._clean_title_from_provider(title)
if url:
url = url.replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate, s.indexer FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
pubdate = item[6]
results.append(classes.Proper(title, url, pubdate, self.show))
return results
def seedRatio(self):
return self.ratio
class KATCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll KickAss every 10 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['rss']}
return {'entries': self.provider._doSearch(search_params)}
provider = KATProvider()
|
bbbenja/SickRage
|
sickbeard/providers/kat.py
|
Python
|
gpl-3.0
| 9,357
|
from foam.sfa.rspecs.elements.element import Element
class OpenFlowSwitch(Element):
fields = [
'component_id',
'component_manager_id',
'dpid',
'port',
]
|
dana-i2cat/felix
|
ofam/src/src/foam/sfa/rspecs/elements/openflow_switch.py
|
Python
|
apache-2.0
| 182
|
'''
Damped scroll effect
====================
.. versionadded:: 1.7.0
This damped scroll effect will use the
:attr:`~kivy.effects.scroll.ScrollEffect.overscroll` to calculate the scroll
value, and slows going back to the upper or lower limit.
'''
__all__ = ('DampedScrollEffect',)
from kivy.effects.scroll import ScrollEffect
from kivy.properties import NumericProperty, BooleanProperty
from kivy.metrics import sp
class DampedScrollEffect(ScrollEffect):
'''DampedScrollEffect class. See the module documentation for more
information.
'''
edge_damping = NumericProperty(0.25)
'''Edge damping.
:attr:`edge_damping` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.25
'''
spring_constant = NumericProperty(2.0)
'''Spring constant.
:attr:`spring_constant` is a :class:`~kivy.properties.NumericProperty` and
defaults to 2.0
'''
min_overscroll = NumericProperty(.5)
'''An overscroll less than this amount will be normalized to 0.
.. versionadded:: 1.8.0
:attr:`min_overscroll` is a :class:`~kivy.properties.NumericProperty` and
defaults to .5.
'''
round_value = BooleanProperty(True)
'''If True, when the motion stops, :attr:`value` is rounded to the nearest
integer.
.. versionadded:: 1.8.0
:attr:`round_value` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
def update_velocity(self, dt):
if abs(self.velocity) <= self.min_velocity and self.overscroll == 0:
self.velocity = 0
# why does this need to be rounded? For now refactored it.
if self.round_value:
self.value = round(self.value)
return
total_force = self.velocity * self.friction * dt / self.std_dt
if abs(self.overscroll) > self.min_overscroll:
total_force += self.velocity * self.edge_damping
total_force += self.overscroll * self.spring_constant
else:
self.overscroll = 0
stop_overscroll = ''
if not self.is_manual:
if self.overscroll > 0 and self.velocity < 0:
stop_overscroll = 'max'
elif self.overscroll < 0 and self.velocity > 0:
stop_overscroll = 'min'
self.velocity = self.velocity - total_force
if not self.is_manual:
self.apply_distance(self.velocity * dt)
if stop_overscroll == 'min' and self.value > self.min:
self.value = self.min
self.velocity = 0
return
if stop_overscroll == 'max' and self.value < self.max:
self.value = self.max
self.velocity = 0
return
self.trigger_velocity_update()
def on_value(self, *args):
scroll_min = self.min
scroll_max = self.max
if scroll_min > scroll_max:
scroll_min, scroll_max = scroll_max, scroll_min
if self.value < scroll_min:
self.overscroll = self.value - scroll_min
elif self.value > scroll_max:
self.overscroll = self.value - scroll_max
else:
self.overscroll = 0
self.scroll = self.value
def on_overscroll(self, *args):
self.trigger_velocity_update()
def apply_distance(self, distance):
os = abs(self.overscroll)
if os:
distance /= 1. + os / sp(200.)
super(DampedScrollEffect, self).apply_distance(distance)
|
matham/kivy
|
kivy/effects/dampedscroll.py
|
Python
|
mit
| 3,511
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import algolia
setup(
name='django-algolia',
version=algolia.__version__,
description='Synchronize your models with the Algolia API for easier and faster searches',
url='https://github.com/Kmaschta/django-algolia',
license='BSD',
packages=find_packages(),
install_requires=[
'Django==1.6',
'algoliasearch>=1.5.2',
],
author='Kmaschta',
author_email='kmaschta@gmail.com',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
'Framework :: Django :: 1.6',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
Kmaschta/django-algolia
|
setup.py
|
Python
|
bsd-2-clause
| 984
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para antena 3
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import urlparse,re
import urllib, urllib2
from core import logger
from core import scrapertools
from core.item import Item
from core import jsontools
DEBUG = False
CHANNELNAME = "a3media"
import hmac
def isGeneric():
return True
def mainlist(item):
logger.info("[a3media.py] mainlist")
'''
<nav class="list clearfix">
<a href="http://www.atresplayer.com/television/series/">Series</a>
<a href="http://www.atresplayer.com/television/programas/">Programas</a>
<a href="http://www.atresplayer.com/television/deportes/">Deportes</a>
<a href="http://www.atresplayer.com/television/noticias/">Noticias</a>
<a href="http://www.atresplayer.com/television/documentales/">Documentales</a>
<a href="http://www.atresplayer.com/television/series-infantiles/">Infantil</a>
<a href="http://www.atresplayer.com/television/webseries/">Webseries</a>
<a href="http://www.atresplayer.com/television/especial/">Más Contenido</a>
</nav>
'''
url="http://www.atresplayer.com/"
data = scrapertools.cachePage(url)
logger.info(data)
patron = '<nav class="list clearfix">(.*?)</nav>'
bloque = scrapertools.get_match(data,patron)
itemlist = []
if str(bloque)!="":
patron = '<a[^h]+href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(bloque)
itemlist.append( Item(channel=CHANNELNAME, title="Destacados" , action="programas" , extra="dest", url=url, folder=True) )
for scrapedurl, scrapedtitle in matches:
itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="programas" , url=scrapedurl, folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="A.....Z" , action="alfabetico" , url="http://www.atresplayer.com/buscador/sections.json", folder=True) )
return itemlist
def alfabetico(item):
logger.info("[a3media.py] temporadas")
data = scrapertools.cachePage(item.url)
logger.info(data)
'''
{"href":"television/series-infantiles/academia-gladiadores/","img":"/clipping/2013/06/26/00721/703.jpg","titulo":"Academia de Gladiadores","letra":"A","descripcion":"ATRESPLAYER TV. Vídeos de ACADEMIA DE GLADIADORES.","cadena":"atres"}
'''
patron = '{"href":"([^"]+)","img":"([^"]+)","titulo":"([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for url,img,scrapedtitle in matches:
scrapedurl = "http://www.atresplayer.com/" + url
scrapedthumbnail = "http://www.atresplayer.com" + img
scrapedplot = ""
# Añade al listado
itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="temporadas" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , folder=True) )
return itemlist
def temporadas(item):
logger.info("[a3media.py] temporadas")
data = scrapertools.cachePage(item.url)
logger.info(data)
'''
<div class="fn_sinopsis_lay fn_slide_lay hide">
<p class="mar-b_5">La acción se desarrolla a partir de octubre de 1961. Tras la doble boda entre Mauro e Inés y Daniel y Belén, muchos de los personajes deciden rehacer sus vidas lejos de Madrid. Mauro e Inés, junto con Tomás, se trasladan a Barcelona. Pedrito, en contra de su voluntad, se marcha finalmente con Felisa a Suiza para reencontrarse con su familia, abandonando a una desconsolada Dorita. Daniel y Belén cruzarán finalmente el charco hasta Colombia, dejando el hostal La Estrella en manos de Manolita. El eje central de todos los personajes, nuevos y antiguos, continúa girando alrededor de la Plaza de los Frutos, escenario principal de la ficción, y de El Asturiano, un bar clásico que se adapta a los tiempos que corren y que servirá de punto de encuentro entre los personajes y lugar de unión entre las distintas tramas.</p>
</div>
'''
patron = '<p class="mar-b_5">(.*?)</p>'
try:
plot = scrapertools.get_match(data,patron)
item.plot = plot
except:
pass
'''
<ul class="fn_lay hide">
<li><a class="item chapter_b mar-b_5" href="http://www.atresplayer.com/television/series/amar-es-para-siempre/temporada-2/">Temporada 2</a></li>
<li><a class="item chapter_b mar-b_5" href="http://www.atresplayer.com/television/series/amar-es-para-siempre/temporada-1/">Temporada 1</a></li>
</ul>
'''
patron = '<li><a class="item chapter_b mar-b_5" href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
scrapedplot=""
for scrapedurl, scrapedtitle in matches:
# Añade al listado
itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="episodios" , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot , folder=True) )
if len(itemlist) == 0:
if not '<div class="mod_carrousel_compuesto clearfix">' in data: ## hay subprogramas
itemlist = programas(item)
else: ## No hay temporadas?
itemlist = episodios(item)
return itemlist
def programas(item):
logger.info("[a3media.py] programas")
data = scrapertools.cachePage(item.url)
logger.info(data)
if item.extra == "dest":
try:
patron = '<div class="grid_4">(.*?)<div class="grid_12">'
match = scrapertools.get_match(data,patron)
data = match
except:
pass
'''
<div class="mod_promo antena3 ">
<a title="El tiempo entre costuras" href="http://www.atresplayer.com/television/series/el-tiempo-entre-costuras/">
<img title="El tiempo entre costuras" src="/clipping/2013/10/21/00568/702.jpg" alt="El tiempo entre costuras"/>
</a>
'''
patron = '<div class="mod_promo [^"]+">[^<]+'
patron += '<a title="([^"]+)" href="([^"]+)">[^<]+'
patron += '<img.*?src="([^"]+)"'
if item.extra == "dest":
patron += '.*?<span class="segunda-linea fn_ellipsis">([^<]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for match in matches:
scrapedtitle = match[0]
scrapedurl = match[1]
scrapedthumbnail = "http://www.atresplayer.com" + match[2]
scrapedplot = ""
if item.extra == "dest":
scrapedtitle = scrapedtitle + " "+match[3]
accion = "play"
else:
accion = "temporadas"
# Añade al listado
itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action=accion , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , folder=True) )
return itemlist
def episodios(item):
logger.info("[a3media.py] episodios")
data = scrapertools.cachePage(item.url)
logger.info(data)
patron = '<p class="mar-b_5">(.*?)</p>'
try:
plot = scrapertools.get_match(data,patron)
item.plot = plot
except:
pass
data = scrapertools.cachePage(item.url+"carousel.json")
logger.info(data)
'''
{"title":"42 (31-10-13)","hrefHtml":"http://www.atresplayer.com/television/series/amar-es-para-siempre/temporada-2/capitulo-42-31-10-13_2013103000399.html","srcImage":"/clipping/2013/10/30/00042/703.jpg","icono":"","textButton":"Ver contenido"}
'''
patron = '{"title":"([^"]+)","hrefHtml":"([^"]+)","srcImage":"([^"]+)","icono":([^,]+),"textButton":"[^"]+"}'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for scrapedtitle, scrapedurl, scrapedthumbnail, icono in matches:
scrapedthumbnail = "http://www.atresplayer.com" + scrapedthumbnail
icono=icono.replace('"','')
if icono=="":
# Añade al listado
itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="play" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=item.plot , folder=False) )
return itemlist
def play(item):
logger.info("[a3media.py] play")
itemlist = []
data = scrapertools.cache_page("http://www.pydowntv.com/api/"+item.url)
json_object = jsontools.load_json(data)
logger.info("json_object="+repr(json_object))
itemlist.append( Item(url=json_object["videos"][0]["url_video"][0]) )
return itemlist
def getApiTime():
stime = scrapertools.cachePage("http://servicios.atresplayer.com/api/admin/time")
return long(stime) / 1000L
def d(s, s1):
l = 3000L + getApiTime()
s2 = e(s+str(l), s1)
return "%s|%s|%s" % (s, str(l), s2)
def e(s, s1):
return hmac.new(s1, s).hexdigest()
def load_json(data):
# callback to transform json string values to utf8
def to_utf8(dct):
rdct = {}
for k, v in dct.items() :
if isinstance(v, (str, unicode)) :
rdct[k] = v.encode('utf8', 'ignore')
else :
rdct[k] = v
return rdct
try :
from lib import simplejson
json_data = simplejson.loads(data, object_hook=to_utf8)
return json_data
except:
try:
import json
json_data = json.loads(data, object_hook=to_utf8)
return json_data
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
|
titienmiami/mmc.repository
|
plugin.video.tvalacarta/tvalacarta/channels/a3media.py
|
Python
|
gpl-2.0
| 9,184
|
"""
Real life example:
Fitting data from an X-ray reflectometer
The sample is composed of a thin
silver nano-particle layer on a silicon
substrate. The substrate is covered with
SiO2 layer. The nano-particle layer has negligible density
and does not considerably affect
the observed reflectometry picture.
The following parameters of the experiment
are fitted:
1. beam intensity
2. footprint correction factor
3. beam angular divergence
4. Material concentration in the SiO2 layer
5. Thickness of SiO2 layer
6. Sample roughness
Fitting is done in two steps:
First the whole range of experimental data is fitted,
then the data related to the instrument is fixed and
on the second step only the right-hand part of
experimental data (i.e. the part of the reflectometry curve
associated with bigger incident angles)
is concerned for fitting. At the second
stage only the sample parameters are fitted,
since only they affect the shape of the reflectometry
curve at bigger incident angles.
"""
from matplotlib import pyplot as plt
import numpy as np
from os import path
import bornagain as ba
from scipy.optimize import differential_evolution
def get_real_data(filename="mg6a_Merged.txt.gz"):
"""
Loads real data files and merges them once.
Returns a Nx3 array (N - the number of experimental data entries)
with first column being coordinates,
second one being values,
and the third one being weights to restore intensity values from experiment
"""
if not hasattr(get_real_data, "data"):
filepath = path.join(path.dirname(path.realpath(__file__)), filename)
real_data = np.loadtxt(filepath, usecols=(0, 1, 3), skiprows=1)
# translating axis values from double incident angle (degrees)
# to incident angle (radians)
real_data[:, 0] *= np.pi / 360
get_real_data.data = real_data
return get_real_data.data.copy()
def get_real_data_axis(start, end):
"""
Get axis coordinates of the experimental data
:param start: first bin to extract
:param end: last bin to extract
:return: 1D array with axis coordinates
"""
return get_real_data()[start:end, 0]
def get_real_data_values(start, end):
"""
Get experimental data values as a 1D array
:param start: first bin to extract
:param end: last bin to extract
:return: 1D array with experimental data values
"""
return get_real_data()[start:end, 1]
def get_weights(start, end):
"""
Get weights to restore genuine intensity of experimental instrument
:param start: first bin to extract
:param end: last bin to extract
:return: 1D array with weights to restore beam intensity
"""
return get_real_data()[start:end, 2]
def create_simulation(arg_dict, bin_start, bin_end):
"""
Creates and returns specular simulation
"""
wavelength = 1.54 * ba.angstrom
alpha_distr = ba.RangedDistributionGaussian(30, 3)
footprint = ba.FootprintFactorGaussian(arg_dict["footprint_factor"])
scan = ba.AngularSpecScan(wavelength,
get_real_data_axis(bin_start, bin_end))
scan.setAbsoluteAngularResolution(alpha_distr, arg_dict["divergence"])
scan.setFootprintFactor(footprint)
simulation = ba.SpecularSimulation()
simulation.setScan(scan)
simulation.setBeamIntensity(arg_dict["intensity"])
return simulation
def buildSample(arg_dict):
"""
Creates sample and returns it
"""
# defining materials
m_air = ba.HomogeneousMaterial("Air", 0.0, 0.0)
m_si_o2 = ba.HomogeneousMaterial("SiO2",
8.57040868e-06 * arg_dict["concentration"],
1.11016654e-07 * arg_dict["concentration"])
m_si = ba.HomogeneousMaterial("Si", 7.57211137e-06, 1.72728178e-07)
# roughness
r_si = ba.LayerRoughness(arg_dict["roughness"], 0, 0)
# layers
air_layer = ba.Layer(m_air)
oxide_layer = ba.Layer(m_si_o2, arg_dict["thickness"])
substrate_layer = ba.Layer(m_si)
# assembling multilayer
multi_layer = ba.MultiLayer()
multi_layer.addLayer(air_layer)
multi_layer.addLayerWithTopRoughness(oxide_layer, r_si)
multi_layer.addLayerWithTopRoughness(substrate_layer, r_si)
return multi_layer
def run_simulation(arg_dict, bin_start=0, bin_end=-1):
"""
Runs simulation and returns its result
"""
simulation = create_simulation(arg_dict, bin_start, bin_end)
simulation.setSample(buildSample(arg_dict))
simulation.runSimulation()
return simulation.result()
def chi_2(real_data, sim_data, weights):
"""
Computes chi_2 metrics and returns its value
"""
sim_data_upsc = np.multiply(weights, sim_data)
sim_data_upsc[sim_data_upsc is 0] = 1e-30
real_data_upsc = np.multiply(weights, real_data)
diff = real_data_upsc - sim_data_upsc
return np.sum(np.divide(np.multiply(diff,diff), sim_data_upsc))
def create_par_dict(*arg):
"""
Creates a dictionary with parameter names and values
and returns it
"""
return {'intensity': arg[0],
'footprint_factor': arg[1],
'divergence':arg[2],
'concentration': arg[3],
'thickness': arg[4],
'roughness': arg[5]
}
def objective_primary(args):
"""
Objective function for preliminary stage of optimization
"""
bin_start = 15 # first bin in the experimental data to calculate
bin_end = -1 # last bin in the experimental data to calculate
arg_dict = create_par_dict(*args)
sim_result = run_simulation(arg_dict, bin_start, bin_end)
sim_data = sim_result.array()
return chi_2(get_real_data_values(bin_start, bin_end),
sim_data, get_weights(bin_start, bin_end))
def objective_fine(args, intensity, footprint_factor, divergence):
"""
Objective function for tuning the right-hand side of experimental data
"""
bin_start = 404 # first bin in the experimental data to calculate
bin_end = -1 # last bin in the experimental data to calculate
arg_dict = create_par_dict(intensity, footprint_factor, divergence, *args)
sim_result = run_simulation(arg_dict, bin_start, bin_end)
sim_data = sim_result.array()
return chi_2(get_real_data_values(bin_start, bin_end),
sim_data, get_weights(bin_start, bin_end))
def run_fitting():
"""
Runs fitting and returns its result
"""
# running preliminary optimization on the total range of experimental data.
bounds = [(1e6, 1e8), # beam intensity
(0.0, 0.1), # beam-to-sample width ratio
(0.0, 0.08 * ba.deg), # beam_divergence
(0.0, 1.0), # oxide_concentration
(0.0, 2.0 * ba.nm), # oxide_thickness
(0.0, 2.0 * ba.nm)] # roughness
print("Start preliminary fitting of experimental data:\n")
preliminary_result = differential_evolution(objective_primary, bounds,
maxiter=20, popsize=60,
mutation=(0.5, 1.5),
disp=True, tol=1e-5)
bounds = [(0.0, 1.0), # oxide_concentration
(0.0, 2.0 * ba.nm), # oxide_thickness
(0.0, 2.0 * ba.nm)] # roughness
fixed_args = (preliminary_result.x[0], # beam intensity
preliminary_result.x[1], # beam-to-sample width ratio
preliminary_result.x[2] # beam divergence
)
print("\nStart fitting big incident angle part of experimental data:\n")
fine_tuning_result = differential_evolution(objective_fine, bounds,
fixed_args, maxiter=20,
popsize=40, mutation=(0.5, 1.5),
disp=True, tol=1e-5)
result = create_par_dict(*fixed_args, *fine_tuning_result.x)
print("\nFitting result:")
print(result,"\n")
return result
def plot_result(sim_result, ref_result, bin_start=0, bin_end=-1):
"""
Plots the graphs of obtained simulation data
"""
sim_data = sim_result.array()
ref_data = ref_result.array()
plt.semilogy(get_real_data_axis(bin_start, bin_end) * 180 / np.pi,
get_real_data_values(bin_start, bin_end),
sim_result.axis(), sim_data,
ref_result.axis(), ref_data)
xlabel = ba.get_axes_labels(sim_result, ba.AxesUnits.DEFAULT)[0]
ylabel = "Intensity"
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16)
plt.legend(['Experimental data', 'Simulation', 'Reference'],
loc='upper right', fontsize=16)
plt.show()
if __name__ == '__main__':
fit_data = run_fitting()
ref_data = create_par_dict(3.78271438e+06, # beam intensity
9.58009763e-04, # beam-to-sample width ratio
2.30471294e-04, # beam angular divergence
0.58721753, # oxide concentration
1.25559347, # oxide thickness
0.19281863) # roughness
plot_result(run_simulation(fit_data), run_simulation(ref_data))
|
DmitryYurov/BornAgain
|
Examples/python/fitting/ex03_ExtendedExamples/specular/RealLifeReflectometryFitting.py
|
Python
|
gpl-3.0
| 9,258
|
py_object = object
import pixie.vm.object as object
from pixie.vm.object import affirm, runtime_error
from pixie.vm.primitives import nil, false
from rpython.rlib.rarithmetic import r_uint
from rpython.rlib.listsort import TimSort
from rpython.rlib.jit import elidable_promote, promote
from rpython.rlib.objectmodel import we_are_translated
import rpython.rlib.jit as jit
import pixie.vm.rt as rt
BYTECODES = ["LOAD_CONST",
"ADD",
"EQ",
"INVOKE",
"TAIL_CALL",
"DUP_NTH",
"RETURN",
"COND_BR",
"JMP",
"CLOSED_OVER",
"MAKE_CLOSURE",
"SET_VAR",
"POP",
"DEREF_VAR",
"INSTALL",
"LOOP_RECUR",
"ARG",
"PUSH_SELF",
"POP_UP_N",
"MAKE_MULTI_ARITY",
"MAKE_VARIADIC",
"YIELD",
"PUSH_NS"]
for x in range(len(BYTECODES)):
globals()[BYTECODES[x]] = r_uint(x)
@jit.unroll_safe
def resize_list(lst, new_size):
"""'Resizes' a list, via reallocation and copy"""
affirm(len(lst) < new_size, u"New list must be larger than old list")
new_list = [None] * new_size
i = r_uint(0)
while i < len(lst):
new_list[i] = lst[i]
i += 1
return new_list
@jit.unroll_safe
def list_copy(from_lst, from_loc, to_list, to_loc, count):
from_loc = r_uint(from_loc)
to_loc = r_uint(to_loc)
count = r_uint(count)
i = r_uint(0)
while i < count:
to_list[to_loc + i] = from_lst[from_loc + i]
i += 1
return to_list
@jit.unroll_safe
def slice_to_end(from_list, start_pos):
start_pos = r_uint(start_pos)
items_to_copy = len(from_list) - start_pos
new_lst = [None] * items_to_copy
list_copy(from_list, start_pos, new_lst, 0, items_to_copy)
return new_lst
@jit.unroll_safe
def slice_from_start(from_list, count, extra=r_uint(0)):
new_lst = [None] * (count + extra)
list_copy(from_list, 0, new_lst, 0, count)
return new_lst
# class TailCall(object.Object):
# _type = object.Type("TailCall")
# __immutable_fields_ = ["_f", "_args"]
# def __init__(self, f, args):
# self._f = f
# self._args = args
#
# def run(self):
# return self._f._invoke(self._args)
class BaseCode(object.Object):
_immutable_fields_ = ["_meta"]
def __init__(self):
assert isinstance(self, BaseCode)
self._name = u"unknown"
self._is_macro = False
self._meta = nil
def meta(self):
return self._meta
def with_meta(self, meta):
assert false, "not implemented"
def name(self):
return self._name
def set_macro(self):
self._is_macro = True
def is_macro(self):
assert isinstance(self, BaseCode)
return self._is_macro
def get_consts(self):
raise NotImplementedError()
def get_bytecode(self):
raise NotImplementedError()
@elidable_promote()
def stack_size(self):
return 0
def invoke_with(self, args, this_fn):
return self.invoke(args)
def join_last(words, sep):
"""
Joins by commas and uses 'sep' on last word.
Eg. join_last(['dog', 'cat', 'rat'] , 'and') = 'dog, cat and rat'
"""
if len(words) == 1:
return words[0]
else:
if len(words) == 2:
s = words[0] + u" " + sep + u" " + words[1]
else:
s = u", ".join(words[0:-1])
s += u" " + sep + u" " + words[-1]
return s
class MultiArityFn(BaseCode):
_type = object.Type(u"pixie.stdlib.MultiArityFn")
_immutable_fields_ = ["_arities[*]", "_required_arity", "_rest_fn"]
def type(self):
return MultiArityFn._type
def __init__(self, name, arities, required_arity=0, rest_fn=None, meta=nil):
BaseCode.__init__(self)
self._name = name
self._arities = arities
self._required_arity = required_arity
self._rest_fn = rest_fn
self._meta = meta
def with_meta(self, meta):
return MultiArityFn(self._name, self._arities, self._required_arity, self._rest_fn, meta)
@elidable_promote()
def get_fn(self, arity):
f = self._arities.get(arity, None)
if f is not None:
return f
if self._rest_fn is not None and arity >= self._required_arity:
return self._rest_fn
acc = []
sorted = TimSort(self.get_arities())
sorted.sort()
for x in sorted.list:
acc.append(unicode(str(x)))
if self._rest_fn:
acc.append(unicode(str(self._rest_fn.required_arity())) + u"+")
runtime_error(u"Wrong number of arguments " + unicode(str(arity)) + u" for function '" + unicode(self._name) + u"'. Expected " + join_last(acc, u"or"),
u"pixie.stdlib/InvalidArityException")
def get_arities(self):
return self._arities.keys()
def invoke(self, args):
return self.invoke_with(args, self)
def invoke_with(self, args, self_fn):
return self.get_fn(len(args)).invoke_with(args, self_fn)
class NativeFn(BaseCode):
"""Wrapper for a native function"""
_type = object.Type(u"pixie.stdlib.NativeFn")
def __init__(self, doc=None):
BaseCode.__init__(self)
def type(self):
return NativeFn._type
def invoke(self, args):
return self.inner_invoke(args)
def inner_invoke(self, args):
raise NotImplementedError()
def invoke_with(self, args, this_fn):
return self.invoke(args)
class Code(BaseCode):
"""Interpreted code block. Contains consts and """
_type = object.Type(u"pixie.stdlib.Code")
_immutable_fields_ = ["_arity", "_consts[*]", "_bytecode", "_stack_size", "_meta"]
def type(self):
return Code._type
def __init__(self, name, arity, bytecode, consts, stack_size, debug_points, meta=nil):
BaseCode.__init__(self)
self._arity = arity
self._bytecode = bytecode
self._consts = consts
self._name = name
self._stack_size = stack_size
self._debug_points = debug_points
self._meta = meta
def with_meta(self, meta):
return Code(self._name, self._arity, self._bytecode, self._consts, self._stack_size, self._debug_points, meta=meta)
def get_debug_points(self):
return self._debug_points
def invoke(self, args):
if len(args) == self.get_arity():
return self.invoke_with(args, self)
else:
runtime_error(u"Invalid number of arguments " + unicode(str(len(args)))
+ u" for function '" + unicode(str(self._name)) + u"'. Expected "
+ unicode(str(self.get_arity())),
u":pixie.stdlib/InvalidArityException")
def invoke_with(self, args, this_fn):
try:
return interpret(self, args, self_obj=this_fn)
except object.WrappedException as ex:
ex._ex._trace.append(object.PixieCodeInfo(self._name))
raise
@elidable_promote()
def get_arity(self):
return self._arity
@elidable_promote()
def get_consts(self):
return self._consts
@elidable_promote()
def get_bytecode(self):
return self._bytecode
@elidable_promote()
def stack_size(self):
return self._stack_size
@elidable_promote()
def get_base_code(self):
return self
class VariadicCode(BaseCode):
_immutable_fields_ = ["_required_arity", "_code", "_meta"]
_type = object.Type(u"pixie.stdlib.VariadicCode")
def type(self):
return VariadicCode._type
def __init__(self, code, required_arity, meta=nil):
BaseCode.__init__(self)
self._required_arity = r_uint(required_arity)
self._code = code
self._meta = meta
def with_meta(self, meta):
return VariadicCode(self._code, self._required_arity, meta)
def name(self):
return None
def required_arity(self):
return self._required_arity
def invoke(self, args):
return self.invoke_with(args, self)
def invoke_with(self, args, self_fn):
from pixie.vm.array import array
argc = len(args)
if self._required_arity == 0:
return self._code.invoke_with([array(args)], self_fn)
if argc == self._required_arity:
new_args = resize_list(args, len(args) + 1)
new_args[len(args)] = array([])
return self._code.invoke_with(new_args, self_fn)
elif argc > self._required_arity:
start = slice_from_start(args, self._required_arity, 1)
rest = slice_to_end(args, self._required_arity)
start[self._required_arity] = array(rest)
return self._code.invoke_with(start, self_fn)
affirm(False, u"Got " + unicode(str(argc)) + u" arg(s) need at least " + unicode(str(self._required_arity)))
class Closure(BaseCode):
_type = object.Type(u"pixie.stdlib.Closure")
_immutable_fields_ = ["_closed_overs[*]", "_code", "_meta"]
def type(self):
return Closure._type
def __init__(self, code, closed_overs, meta=nil):
BaseCode.__init__(self)
affirm(isinstance(code, Code), u"Code argument to Closure must be an instance of Code")
self._code = code
self._closed_overs = closed_overs
self._meta = meta
def with_meta(self, meta):
return Closure(self._code, self._closed_overs, meta)
def name(self):
return None
def invoke(self, args):
return self.invoke_with(args, self)
def invoke_with(self, args, self_fn):
try:
return interpret(self, args, self_obj=self_fn)
except object.WrappedException as ex:
code = self._code
assert isinstance(code, Code)
ex._ex._trace.append(object.PixieCodeInfo(code._name))
raise
def get_closed_over(self, idx):
return self._closed_overs[idx]
def get_consts(self):
return self._code.get_consts()
def get_bytecode(self):
return self._code.get_bytecode()
def stack_size(self):
return self._code.stack_size()
def get_closed_overs(self):
return self._closed_overs
def get_base_code(self):
return self._code.get_base_code()
def get_debug_points(self):
return self._code.get_debug_points()
class Undefined(object.Object):
_type = object.Type(u"pixie.stdlib.Undefined")
def type(self):
return Undefined._type
undefined = Undefined()
class DynamicVars(py_object):
def __init__(self):
self._vars = rt.cons(rt.hashmap(), nil)
def push_binding_frame(self):
self._vars = rt.cons(rt.first(self._vars), self._vars)
def pop_binding_frame(self):
self._vars = rt.next(self._vars)
def current_frame(self):
return rt.first(self._vars)
def get_current_frames(self):
return self._vars
def set_current_frames(self, vars):
self._vars = vars
def get_var_value(self, var, not_found):
return rt._val_at(self.current_frame(), var, not_found)
def set_var_value(self, var, val):
cur_frame = self.current_frame()
self.pop_binding_frame()
self._vars = rt.cons(rt._assoc(cur_frame, var, val), self._vars)
class Var(BaseCode):
_type = object.Type(u"pixie.stdlib.Var")
_immutable_fields_ = ["_rev?"]
def type(self):
return Var._type
def __init__(self, ns, name):
BaseCode.__init__(self)
self._ns = ns
self._name = name
self._rev = 0
self._root = undefined
self._dynamic = False
def set_root(self, o):
affirm(o is not None, u"Invalid var set")
self._rev += 1
self._root = o
return self
def set_value(self, val):
affirm(self._dynamic, u"Can't set the value of a non-dynamic var")
_dynamic_vars.set_var_value(self, val)
return self
def set_dynamic(self):
self._dynamic = True
self._rev += 1
def get_dynamic_value(self):
return _dynamic_vars.get_var_value(self, self._root)
@elidable_promote()
def _is_dynamic(self, rev):
return self._dynamic
def is_dynamic(self):
return self._is_dynamic(self._rev)
@elidable_promote()
def get_root(self, rev):
return self._root
def deref(self):
if self.is_dynamic():
if we_are_translated():
return self.get_dynamic_value()
else:
## NOT RPYTHON
if globals().has_key("_dynamic_vars"):
return self.get_dynamic_value()
else:
return self.get_root(self._rev)
else:
val = self.get_root(self._rev)
affirm(val is not undefined, u"Var " + self._name + u" is undefined")
return val
def is_defined(self):
return self._root is not undefined
def invoke_with(self, args, this_fn):
return self.invoke(args)
def invoke(self, args):
return self.deref().invoke(args)
class bindings(py_object):
def __init__(self, *args):
self._args = list(args)
def __enter__(self):
_dynamic_vars.push_binding_frame()
for x in range(0, len(self._args), 2):
self._args[x].set_value(self._args[x + 1])
def __exit__(self, exc_type, exc_val, exc_tb):
_dynamic_vars.pop_binding_frame()
class Refer(py_object):
def __init__(self, ns, refer_syms=[], refer_all=False):
self._namespace = ns
self._refer_syms = refer_syms
self._refer_all = refer_all
class Namespace(object.Object):
_type = object.Type(u"pixie.stdlib.Namespace")
def type(self):
return Namespace._type
def __init__(self, name):
self._registry = {}
self._name = name
self._refers = {}
def intern_or_make(self, name):
assert name is not None
affirm(isinstance(name, unicode), u"Var names must be unicode")
v = self._registry.get(name, None)
if v is None:
v = Var(self._name, name)
self._registry[name] = v
return v
def add_refer(self, ns, as_nm=None, refer_all=False):
assert isinstance(ns, Namespace)
if as_nm is not None:
assert isinstance(as_nm, unicode)
if as_nm is None:
as_nm = ns._name
self._refers[as_nm] = Refer(ns, refer_all=refer_all)
def add_refer_symbol(self, sym, var):
assert isinstance(self, Namespace)
name = rt.name(sym)
prev_binding = self._registry.get(name, None)
if prev_binding is not None:
print rt.name(rt.str(rt.wrap(u"Warning: "), sym, rt.wrap(u" already refers to "), prev_binding))
self._registry[name] = var
return var
def include_stdlib(self):
stdlib = _ns_registry.find_or_make(u"pixie.stdlib")
self.add_refer(stdlib, refer_all=True)
def resolve_ns(self, ns_alias):
refer = self._refers.get(ns_alias, None)
resolved_ns = None
if refer is not None:
resolved_ns = refer._namespace
if resolved_ns is None:
resolved_ns = _ns_registry.get(ns_alias, None)
if resolved_ns is None:
affirm(False, u"Unable to resolve namespace: " + ns_alias + u" inside namespace " + self._name)
return resolved_ns
def resolve(self, s, use_refers=True):
import pixie.vm.symbol as symbol
affirm(isinstance(s, symbol.Symbol), u"Must resolve symbols")
ns = rt.namespace(s)
name = rt.name(s)
if ns is not None:
resolved_ns = self.resolve_ns(ns)
else:
resolved_ns = self
assert isinstance(resolved_ns, Namespace)
var = resolved_ns._registry.get(name, None)
if var is None and use_refers:
for refer_nm in self._refers:
refer = self._refers[refer_nm]
if name in refer._refer_syms or refer._refer_all:
var = refer._namespace.resolve(symbol.Symbol(name), False)
if var is not None:
return var
return None
return var
def get(self, name, default):
return self._registry.get(name, default)
class NamespaceRegistry(py_object):
def __init__(self):
self._registry = {}
def find_or_make(self, name):
#affirm(isinstance(name, unicode), u"Namespace names must be unicode")
v = self._registry.get(name, None)
if v is None:
v = Namespace(name)
self._registry[name] = v
return v
def get(self, name, default):
return self._registry.get(name, default)
_ns_registry = NamespaceRegistry()
def intern_var(ns, name=None):
if name is None:
name = ns
ns = u""
return _ns_registry.find_or_make(ns).intern_or_make(name)
def get_var_if_defined(ns, name, els=None):
w_ns = _ns_registry.get(ns, None)
if w_ns is None:
return els
return w_ns.get(name, els)
class DefaultProtocolFn(NativeFn):
def __init__(self, pfn):
BaseCode.__init__(self)
self._pfn = pfn
def invoke(self, args):
tp = args[0].type()
assert isinstance(tp, object.Type)
pfn = self._pfn
if isinstance(pfn, PolymorphicFn):
protocol = pfn._protocol
elif isinstance(pfn, DoublePolymorphicFn):
protocol = pfn._protocol
else:
assert False
assert isinstance(protocol, Protocol)
affirm(False, u"No override for " + tp._name + u" on " + self._pfn._name + u" in protocol " + protocol._name)
class Protocol(object.Object):
_type = object.Type(u"pixie.stdlib.Protocol")
_immutable_fields_ = ["_rev?"]
def type(self):
return Protocol._type
def __init__(self, name):
self._name = name
self._polyfns = {}
self._satisfies = {}
self._rev = 0
def add_method(self, pfn):
self._polyfns[pfn] = pfn
def add_satisfies(self, tp):
self._satisfies[tp] = tp
self._rev += 1
@elidable_promote()
def _get_satisfies(self, tp, rev):
return tp in self._satisfies
def satisfies(self, tp):
return self._get_satisfies(tp, self._rev)
class PolymorphicFn(BaseCode):
_type = object.Type(u"pixie.stdlib.PolymorphicFn")
def type(self):
return PolymorphicFn._type
_immutable_fields_ = ["_rev?"]
def __init__(self, name, protocol):
BaseCode.__init__(self)
self._name = name
self._dict = {}
# stored separately to allow ordered extending (e.g. more general protocols later)
self._protos = []
self._rev = 0
self._protocol = protocol
self._default_fn = DefaultProtocolFn(self)
self._fn_cache = {}
protocol.add_method(self)
def extend(self, tp, fn):
self._dict[tp] = fn
if isinstance(tp, Protocol):
self._protos.append(tp)
self._rev += 1
self._fn_cache = {}
self._protocol.add_satisfies(tp)
## We have to special case this so that the GC doesn't go nuts trying to do a ton during
## collection.
self.maybe_mark_finalizer(tp)
def maybe_mark_finalizer(self, tp):
## Gets overridden in stdlib
pass
def _find_parent_fn(self, tp):
## Search the entire object tree to find the function to execute
assert isinstance(tp, object.Type)
find_tp = tp
while True:
result = self._dict.get(find_tp, None)
if result is not None:
return result
for proto in self._protos:
if proto.satisfies(find_tp):
return self._dict[proto]
find_tp = find_tp._parent
if find_tp is None:
break
return self._default_fn
def set_default_fn(self, fn):
self._default_fn = fn
self._rev += 1
self._fn_cache = {}
@elidable_promote()
def get_protocol_fn(self, tp, rev):
fn = self._fn_cache.get(tp, None)
if fn is None:
fn = self._find_parent_fn(tp)
self._fn_cache[tp] = fn
return promote(fn)
def invoke(self, args):
affirm(len(args) >= 1, u"Wrong number of args")
a = args[0].type()
fn = self.get_protocol_fn(a, self._rev)
try:
return fn.invoke(args)
except object.WrappedException as ex:
ex._ex._trace.append(object.PolymorphicCodeInfo(self._name, args[0].type()))
raise
class DoublePolymorphicFn(BaseCode):
"""A function that is polymorphic on the first two arguments"""
_type = object.Type(u"pixie.stdlib.DoublePolymorphicFn")
def type(self):
return DefaultProtocolFn._type
_immutable_fields_ = ["_rev?"]
def __init__(self, name, protocol):
BaseCode.__init__(self)
self._name = name
self._dict = {}
self._rev = 0
self._protocol = protocol
self._default_fn = DefaultProtocolFn(self)
protocol.add_method(self)
def extend2(self, tp1, tp2, fn):
d1 = self._dict.get(tp1, None)
if d1 is None:
d1 = {}
self._dict[tp1] = d1
d1[tp2] = fn
self._rev += 1
self._protocol.add_satisfies(tp1)
def set_default_fn(self, fn):
self._default_fn = fn
self._rev += 1
@elidable_promote()
def get_fn(self, tp1, tp2, _rev):
d1 = self._dict.get(tp1, None)
if d1 is None:
return self._default_fn
fn = d1.get(tp2, self._default_fn)
return promote(fn)
def invoke(self, args):
affirm(len(args) >= 2, u"DoublePolymorphicFunctions take at least two args")
a = args[0].type()
b = args[1].type()
fn = self.get_fn(a, b, self._rev)
return fn.invoke(args)
# class ElidableFn(object.Object):
# _type = object.Type(u"pixie.stdlib.ElidableFn")
# __immutable_fields__ = ["_boxed_fn"]
# def type(self):
# return ElidableFn._type
#
# def __init__(self, boxed_fn):
# self._boxed_fn = boxed_fn
#
# @elidable
# def _elidable_invoke_0(self, fn):
# return self._boxed_fn.invoke([])
#
# @elidable
# def _elidable_invoke_1(self, fn, arg0):
# return self._boxed_fn.invoke([arg0])
#
# @elidable
# def _elidable_invoke_2(self, fn, arg0, arg1):
# return self._boxed_fn.invoke([arg0, arg1])
#
#
# def invoke(self, args):
# largs = jit.promote(len(args))
# fn = self._boxed_fn.promote()
# if largs == 0:
# return self._elidable_invoke_0(fn).promote()
# elif largs == 1:
# return self._elidable_invoke_1(fn, args[0].promote()).promote()
# elif largs == 2:
# return self._elidable_invoke_2(fn, args[0].promote(), args[1].promote()).promote()
# affirm(False, u"Too many args to Elidable Fn")
def munge(s):
return s.replace("-", "_").replace("?", "_QMARK_").replace("!", "_BANG_")
import inspect
def defprotocol(ns, name, methods):
"""Define a protocol in the given namespace with the given name and methods, vars will
be created in the namespace for the protocol and methods. This function will dump
variables for the created protocols/methods in the globals() where this function is called."""
ns = unicode(ns)
name = unicode(name)
methods = map(unicode, methods)
gbls = inspect.currentframe().f_back.f_globals
proto = Protocol(name)
intern_var(ns, name).set_root(proto)
gbls[munge(name)] = proto
for method in methods:
poly = PolymorphicFn(method, proto)
intern_var(ns, method).set_root(poly)
gbls[munge(method)] = poly
def assert_type(x, tp):
affirm(isinstance(x, tp), u"Fatal Error, this should never happen")
return x
## PYTHON FLAGS
CO_VARARGS = 0x4
def wrap_fn(fn, tp=object.Object):
"""Converts a native Python function into a pixie function."""
docstring = unicode(fn.__doc__) if fn.__doc__ else u""
def as_native_fn(f):
return type("W" + fn.__name__, (NativeFn,), {"inner_invoke": f, "_doc": docstring})()
def as_variadic_fn(f):
return type("W" + fn.__name__[:len("__args")], (NativeFn,), {"inner_invoke": f, "_doc": docstring})()
code = fn.func_code
if fn.__name__.endswith("__args"):
return as_variadic_fn(lambda self, args: fn(args))
fn_name = unicode(getattr(fn, "__real_name__", fn.__name__))
if code.co_flags & CO_VARARGS:
raise Exception("Variadic functions not supported by wrap")
else:
argc = code.co_argcount
if argc == 0:
def wrapped_fn(self, args):
affirm(len(args) == 0, u"Expected 0 arguments to " + fn_name)
try:
return fn()
except object.WrappedException as ex:
ex._ex._trace.append(object.NativeCodeInfo(fn_name))
raise
return as_native_fn(wrapped_fn)
if argc == 1:
def wrapped_fn(self, args):
affirm(len(args) == 1, u"Expected 1 arguments to " + fn_name)
try:
return fn(args[0])
except object.WrappedException as ex:
ex._ex._trace.append(object.NativeCodeInfo(fn_name))
raise
return as_native_fn(wrapped_fn)
if argc == 2:
def wrapped_fn(self, args):
affirm(len(args) == 2, u"Expected 2 arguments to " + fn_name)
try:
return fn(args[0], args[1])
except object.WrappedException as ex:
ex._ex._trace.append(object.NativeCodeInfo(fn_name))
raise
return as_native_fn(wrapped_fn)
if argc == 3:
def wrapped_fn(self, args):
affirm(len(args) == 3, u"Expected 3 arguments to " + fn_name)
try:
return fn(args[0], args[1], args[2])
except object.WrappedException as ex:
ex._ex._trace.append(object.NativeCodeInfo(fn_name))
raise
return as_native_fn(wrapped_fn)
if argc == 4:
def wrapped_fn(self, args):
affirm(len(args) == 4, u"Expected 4 arguments to " + fn_name)
try:
return fn(args[0], args[1], args[2], args[3])
except object.WrappedException as ex:
ex._ex._trace.append(object.NativeCodeInfo(fn_name))
raise
return as_native_fn(wrapped_fn)
assert False, "implement more"
def extend(pfn, tp1, tp2=None):
"""Extends a protocol to the given Type (not python type), with the decorated function
wraps the decorated function"""
if isinstance(tp1, type):
assert_tp = tp1
tp1 = tp1._type
else:
assert_tp = object.Object
def extend_inner(fn):
if tp2 is None:
pfn.extend(tp1, wrap_fn(fn, assert_tp))
else:
pfn.extend2(tp1, tp2, wrap_fn(fn, assert_tp))
return pfn
return extend_inner
def as_var(ns, name=None):
"""Locates a var with the given name (defaulting to the namespace pixie.stdlib), sets
the root to the decorated function. If the function is not an instance of BaseCode it will
be wrapped. """
if name is None:
name = ns
ns = "pixie.stdlib"
name = name if isinstance(name, unicode) else unicode(name)
ns = ns if isinstance(ns, unicode) else unicode(ns)
var = intern_var(ns, name)
def with_fn(fn):
fn.__real_name__ = name
if not isinstance(fn, object.Object):
fn = wrap_fn(fn)
var.set_root(fn)
return fn
return with_fn
def returns(type):
"""Tags a var as for unwrapping in rt. When rt imports this var it will be automatically converted to this type"""
def with_fn(fn):
fn._returns = type
return fn
return with_fn
class bindings(py_object):
def __init__(self, *args):
self._args = list(args)
def __enter__(self):
_dynamic_vars.push_binding_frame()
for x in range(0, len(self._args), 2):
self._args[x].set_value(self._args[x + 1])
def __exit__(self, exc_type, exc_val, exc_tb):
_dynamic_vars.pop_binding_frame()
def init():
globals()["_dynamic_vars"] = DynamicVars()
|
kidaa/pixie
|
pixie/vm/code.py
|
Python
|
lgpl-3.0
| 28,946
|
from utils.commands import ErrorHandlingCommand
from django.conf import settings
from utils.amazon import default_s3_store
from videos.models import Video, VIDEO_TYPE_FLV, VIDEO_TYPE_HTML5
import urllib
import os
import commands
import sys
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.db.models import ObjectDoesNotExist
VIDEO_UPLOAD_PATH = getattr(settings, 'VIDEO_UPLOAD_PATH', \
os.path.join(settings.MEDIA_ROOT, 'videos'))
VIDEO_THUMBNAILS_FOLDER = getattr(settings, 'VIDEO_THUMBNAILS_PATH', 'videos/thumbnails/')
THUMBNAILS_PATH = os.path.join(settings.MEDIA_ROOT, VIDEO_THUMBNAILS_FOLDER)
class Command(ErrorHandlingCommand):
def handle(self, *args, **options):
print 'Run load thumbnail command'
self.verbosity = int(options.get('verbosity', 1))
self.s3_store = self.init_s3()
if not os.path.exists(VIDEO_UPLOAD_PATH):
os.makedirs(VIDEO_UPLOAD_PATH)
if not os.path.exists(THUMBNAILS_PATH):
os.makedirs(THUMBNAILS_PATH)
qs = Video.objects.filter(thumbnail='', videourl__original=True, videourl__type__in=[VIDEO_TYPE_FLV, VIDEO_TYPE_HTML5])
for video in qs:
self.print_to_console(u'Handling %s' % video.__unicode__())
try:
video_url = video.videourl_set.filter(original=True)[:1].get()
except ObjectDoesNotExist:
continue
path = self.get_file_path(video, video_url)
if not os.path.exists(path):
self.print_to_console(u'Saving...')
urllib.urlretrieve(video_url.url, path)
self.print_to_console(u'Video saved.')
else:
self.print_to_console(u'File exist.')
self.get_thumbnail(video, path)
self.print_to_console(u'-----------------')
#--- Save original thumbnails to S3 Store ---
self.print_to_console(u'Save original thumbnails to S3 Store...')
qs = Video.objects.exclude(thumbnail='').filter(s3_thumbnail='')
for video in qs:
self.print_to_console(u'Handling %s' % video.__unicode__())
name = video.thumbnail.strip('/').split('/')[-1]
cf = ContentFile(urllib.urlopen(video.thumbnail).read())
video.s3_thumbnail.save('%s/%s' % (video.video_id, name), cf, True)
def init_s3(self):
if not default_s3_store:
raise ImproperlyConfigured('Have not settings for thumbnails uploading to S3 Store.')
return default_s3_store
def get_thumbnail(self, video, path):
self.print_to_console(u'Get thumbnail...')
grabimage = "ffmpeg -y -i %s -vframes 1 -ss 00:00:%s -an -vcodec png -f rawvideo %s"
thumbnailfilename = "%s.png" % video.video_id
thumbnailpath = os.path.normpath(os.path.join(THUMBNAILS_PATH, thumbnailfilename))
grab_result = 'Command is not runned yet'
try:
grab_result = commands.getoutput(grabimage % (path, 10, thumbnailpath))
if not os.path.exists(thumbnailpath):
raise Exception('Error in converting: %s' % grab_result)
if not os.path.getsize(thumbnailpath):
grab_result = commands.getoutput(grabimage % (path, 5, thumbnailpath))
self.print_to_console(u'Saving in S3 Store...')
cf = ContentFile(open(thumbnailpath, 'rb').read())
video.s3_thumbnail.save(thumbnailfilename, cf, True)
video.thumbnail = video.s3_thumbnail.url
video.save()
os.remove(thumbnailpath)
os.remove(path)
except:
if settings.DEBUG:
raise
self.handle_error(sys.exc_info())
def get_file_path(self, video, video_url):
type = video_url.url.split('.')[-1]
name = '%s.%s' % (video.video_id, type)
return os.path.join(VIDEO_UPLOAD_PATH, name)
|
ofer43211/unisubs
|
apps/videos/management/commands/load_thumbnails.py
|
Python
|
agpl-3.0
| 4,203
|
__author__ = 'joon'
import sys
sys.path.insert(0, 'src')
sys.path.insert(0, 'lib')
sys.path.insert(0, 'ResearchTools')
from imports.basic_modules import *
from imports.ResearchTools import *
from imports.libmodules import *
def config_generate(control, conf, EXP_PHASE):
assert (EXP_PHASE == 'guide-generate')
assert (control['g_test_dataset'] == control['s_test_dataset'])
assert (control['test_dataset'] == control['s_test_dataset'])
assert (control['g_test_datatype'] == control['s_test_datatype'])
assert (control['test_datatype'] == control['s_test_datatype'])
control_seed = subcontrol(control, 'g')
control_saliency = subcontrol(control, 's')
from seed.config import config_test as config_test_seed
from saliency.config import config_test as config_test_saliency
control_seed, _, control_seed_token, _ = config_test_seed(control_seed, conf, 'seed-test')
control_saliency, control_saliency_token, _ = config_test_saliency(control_saliency, conf, 'saliency-test')
defaults = dict(
# seed
g_init='VGG_ILSVRC_16_layers',
g_dataset='voc12train_aug',
g_datatype='Segmentation',
g_base_lr=0.001,
g_batch_size=15,
g_balbatch='clsbal',
g_test_iter=8000,
g_test_dataset='voc12train_aug',
g_test_datatype='Segmentation',
g_test_ranking='none',
g_test_interpord=1,
g_test_gtcls='use',
# SAL
s_net='DeepLabv2_ResNet',
s_dataset='MSRA',
s_datatype='NP',
s_test_dataset='voc12train_aug',
s_test_datatype='Segmentation',
gtcls='use',
seedthres=50,
salthres=50,
test_dataset='voc12train_aug',
test_datatype='Segmentation',
)
control_token = control.copy()
for ky in defaults:
if control_token[ky] == defaults[ky]:
control_token.pop(ky)
conf['EXP_PHASE'] = EXP_PHASE
load_pascal_conf(control, conf)
pprint.pprint(conf)
pprint.pprint(control)
return control, control_token, control_seed, control_seed_token, control_saliency, control_saliency_token, conf
|
coallaoh/GuidedLabelling
|
src/guide_generation/config.py
|
Python
|
mit
| 2,151
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A word-counting workflow."""
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import re
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
class WordExtractingDoFn(beam.DoFn):
"""Parse each line of input text into words."""
def __init__(self):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(WordExtractingDoFn, self).__init__()
beam.DoFn.__init__(self)
self.words_counter = Metrics.counter(self.__class__, 'words')
self.word_lengths_counter = Metrics.counter(self.__class__, 'word_lengths')
self.word_lengths_dist = Metrics.distribution(
self.__class__, 'word_len_dist')
self.empty_line_counter = Metrics.counter(self.__class__, 'empty_lines')
def process(self, element):
"""Returns an iterator over the words of this element.
The element is a line of text. If the line is blank, note that, too.
Args:
element: the element being processed
Returns:
The processed element.
"""
text_line = element.strip()
if not text_line:
self.empty_line_counter.inc(1)
words = re.findall(r'[\w\']+', text_line, re.UNICODE)
for w in words:
self.words_counter.inc()
self.word_lengths_counter.inc(len(w))
self.word_lengths_dist.update(len(w))
return words
def run(argv=None, save_main_session=True):
"""Main entry point; defines and runs the wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument(
'--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
p = beam.Pipeline(options=pipeline_options)
# Read the text file[pattern] into a PCollection.
lines = p | 'read' >> ReadFromText(known_args.input)
# Count the occurrences of each word.
def count_ones(word_ones):
(word, ones) = word_ones
return (word, sum(ones))
counts = (
lines
| 'split' >>
(beam.ParDo(WordExtractingDoFn()).with_output_types(unicode))
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(count_ones))
# Format the counts into a PCollection of strings.
def format_result(word_count):
(word, count) = word_count
return '%s: %d' % (word, count)
output = counts | 'format' >> beam.Map(format_result)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | 'write' >> WriteToText(known_args.output)
result = p.run()
result.wait_until_finish()
# Do not query metrics when creating a template which doesn't run
if (not hasattr(result, 'has_job') # direct runner
or result.has_job): # not just a template creation
empty_lines_filter = MetricsFilter().with_name('empty_lines')
query_result = result.metrics().query(empty_lines_filter)
if query_result['counters']:
empty_lines_counter = query_result['counters'][0]
logging.info('number of empty lines: %d', empty_lines_counter.result)
word_lengths_filter = MetricsFilter().with_name('word_len_dist')
query_result = result.metrics().query(word_lengths_filter)
if query_result['distributions']:
word_lengths_dist = query_result['distributions'][0]
logging.info('average word length: %d', word_lengths_dist.result.mean)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
iemejia/incubator-beam
|
sdks/python/apache_beam/examples/wordcount.py
|
Python
|
apache-2.0
| 5,046
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0019_auto_20141021_1157'),
]
operations = [
migrations.RemoveField(
model_name='robouser',
name='sec_major_one',
),
migrations.RemoveField(
model_name='robouser',
name='sec_major_two',
),
migrations.AlterField(
model_name='robouser',
name='cell',
field=models.DecimalField(help_text='Cell Phone # if you wish to provide it to Officers', blank=True, decimal_places=0, null=True, max_digits=10),
),
migrations.AlterField(
model_name='robouser',
name='magnetic',
field=models.CharField(help_text='9 Character Magnetic Card ID(found on Student ID)', max_length=9, null=True, blank=True),
),
]
|
CMU-Robotics-Club/roboticsclub.org
|
robocrm/migrations/0020_auto_20141027_0145.py
|
Python
|
mit
| 976
|
from oscar.apps.catalogue import config
class CatalogueConfig(config.CatalogueConfig):
name = 'demo.apps.catalogue'
|
pgovers/oscar-wagtail-demo
|
demo/apps/catalogue/config.py
|
Python
|
mit
| 122
|
"""
Views used by XQueue certificate generation.
"""
import json
import logging
from django.contrib.auth.models import User
from django.db import transaction
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from opaque_keys.edx.keys import CourseKey
import dogstats_wrapper as dog_stats_api
from capa.xqueue_interface import XQUEUE_METRIC_NAME
from certificates.api import generate_user_certificates
from certificates.models import (
CertificateStatuses,
ExampleCertificate,
GeneratedCertificate,
certificate_status_for_student
)
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.json_request import JsonResponse, JsonResponseBadRequest
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@csrf_exempt
def request_certificate(request):
"""Request the on-demand creation of a certificate for some user, course.
A request doesn't imply a guarantee that such a creation will take place.
We intentionally use the same machinery as is used for doing certification
at the end of a course run, so that we can be sure users get graded and
then if and only if they pass, do they get a certificate issued.
"""
if request.method == "POST":
if request.user.is_authenticated():
username = request.user.username
student = User.objects.get(username=username)
course_key = CourseKey.from_string(request.POST.get('course_id'))
course = modulestore().get_course(course_key, depth=2)
status = certificate_status_for_student(student, course_key)['status']
if status in [CertificateStatuses.unavailable, CertificateStatuses.notpassing, CertificateStatuses.error]:
log_msg = u'Grading and certification requested for user %s in course %s via /request_certificate call'
log.info(log_msg, username, course_key)
status = generate_user_certificates(student, course_key, course=course)
return HttpResponse(json.dumps({'add_status': status}), content_type='application/json')
return HttpResponse(json.dumps({'add_status': 'ERRORANONYMOUSUSER'}), content_type='application/json')
@csrf_exempt
def update_certificate(request):
"""
Will update GeneratedCertificate for a new certificate or
modify an existing certificate entry.
See models.py for a state diagram of certificate states
This view should only ever be accessed by the xqueue server
"""
status = CertificateStatuses
if request.method == "POST":
xqueue_body = json.loads(request.POST.get('xqueue_body'))
xqueue_header = json.loads(request.POST.get('xqueue_header'))
try:
course_key = CourseKey.from_string(xqueue_body['course_id'])
cert = GeneratedCertificate.eligible_certificates.get(
user__username=xqueue_body['username'],
course_id=course_key,
key=xqueue_header['lms_key'])
except GeneratedCertificate.DoesNotExist:
log.critical(
'Unable to lookup certificate\n'
'xqueue_body: %s\n'
'xqueue_header: %s',
xqueue_body,
xqueue_header
)
return HttpResponse(json.dumps({
'return_code': 1,
'content': 'unable to lookup key'
}), content_type='application/json')
if 'error' in xqueue_body:
cert.status = status.error
if 'error_reason' in xqueue_body:
# Hopefully we will record a meaningful error
# here if something bad happened during the
# certificate generation process
#
# example:
# (aamorm BerkeleyX/CS169.1x/2012_Fall)
# <class 'simples3.bucket.S3Error'>:
# HTTP error (reason=error(32, 'Broken pipe'), filename=None) :
# certificate_agent.py:175
cert.error_reason = xqueue_body['error_reason']
else:
if cert.status == status.generating:
cert.download_uuid = xqueue_body['download_uuid']
cert.verify_uuid = xqueue_body['verify_uuid']
cert.download_url = xqueue_body['url']
cert.status = status.downloadable
elif cert.status in [status.deleting]:
cert.status = status.deleted
else:
log.critical(
'Invalid state for cert update: %s', cert.status
)
return HttpResponse(
json.dumps({
'return_code': 1,
'content': 'invalid cert status'
}),
content_type='application/json'
)
dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[
u'action:update_certificate',
u'course_id:{}'.format(cert.course_id)
])
cert.save()
return HttpResponse(json.dumps({'return_code': 0}),
content_type='application/json')
@csrf_exempt
@require_POST
def update_example_certificate(request):
"""Callback from the XQueue that updates example certificates.
Example certificates are used to verify that certificate
generation is configured correctly for a course.
Unlike other certificates, example certificates
are not associated with a particular user or displayed
to students.
For this reason, we need a different end-point to update
the status of generated example certificates.
Arguments:
request (HttpRequest)
Returns:
HttpResponse (200): Status was updated successfully.
HttpResponse (400): Invalid parameters.
HttpResponse (403): Rate limit exceeded for bad requests.
HttpResponse (404): Invalid certificate identifier or access key.
"""
log.info(u"Received response for example certificate from XQueue.")
rate_limiter = BadRequestRateLimiter()
# Check the parameters and rate limits
# If these are invalid, return an error response.
if rate_limiter.is_rate_limit_exceeded(request):
log.info(u"Bad request rate limit exceeded for update example certificate end-point.")
return HttpResponseForbidden("Rate limit exceeded")
if 'xqueue_body' not in request.POST:
log.info(u"Missing parameter 'xqueue_body' for update example certificate end-point")
rate_limiter.tick_bad_request_counter(request)
return JsonResponseBadRequest("Parameter 'xqueue_body' is required.")
if 'xqueue_header' not in request.POST:
log.info(u"Missing parameter 'xqueue_header' for update example certificate end-point")
rate_limiter.tick_bad_request_counter(request)
return JsonResponseBadRequest("Parameter 'xqueue_header' is required.")
try:
xqueue_body = json.loads(request.POST['xqueue_body'])
xqueue_header = json.loads(request.POST['xqueue_header'])
except (ValueError, TypeError):
log.info(u"Could not decode params to example certificate end-point as JSON.")
rate_limiter.tick_bad_request_counter(request)
return JsonResponseBadRequest("Parameters must be JSON-serialized.")
# Attempt to retrieve the example certificate record
# so we can update the status.
try:
uuid = xqueue_body.get('username')
access_key = xqueue_header.get('lms_key')
cert = ExampleCertificate.objects.get(uuid=uuid, access_key=access_key)
except ExampleCertificate.DoesNotExist:
# If we are unable to retrieve the record, it means the uuid or access key
# were not valid. This most likely means that the request is NOT coming
# from the XQueue. Return a 404 and increase the bad request counter
# to protect against a DDOS attack.
log.info(u"Could not find example certificate with uuid '%s' and access key '%s'", uuid, access_key)
rate_limiter.tick_bad_request_counter(request)
raise Http404
if 'error' in xqueue_body:
# If an error occurs, save the error message so we can fix the issue.
error_reason = xqueue_body.get('error_reason')
cert.update_status(ExampleCertificate.STATUS_ERROR, error_reason=error_reason)
log.warning(
(
u"Error occurred during example certificate generation for uuid '%s'. "
u"The error response was '%s'."
), uuid, error_reason
)
else:
# If the certificate generated successfully, save the download URL
# so we can display the example certificate.
download_url = xqueue_body.get('url')
if download_url is None:
rate_limiter.tick_bad_request_counter(request)
log.warning(u"No download URL provided for example certificate with uuid '%s'.", uuid)
return JsonResponseBadRequest(
"Parameter 'download_url' is required for successfully generated certificates."
)
else:
cert.update_status(ExampleCertificate.STATUS_SUCCESS, download_url=download_url)
log.info("Successfully updated example certificate with uuid '%s'.", uuid)
# Let the XQueue know that we handled the response
return JsonResponse({'return_code': 0})
|
lduarte1991/edx-platform
|
lms/djangoapps/certificates/views/xqueue.py
|
Python
|
agpl-3.0
| 9,680
|
import os
# main
DIR = os.path.abspath(os.path.dirname(__file__))
IMGDIR = os.path.join(DIR, 'imgs')
DATADIR = os.path.join(DIR, 'data')
LOGFILENAME = raw_input("Participant: ")
LOGFILE = os.path.join(DATADIR, LOGFILENAME)
# display
DISPTYPE = 'pygame'
DISPSIZE = (1024,768)
# validation points
PXY = [0.15, 0.5, 0.85]
CALIBPOINTS = []
for x in PXY:
for y in PXY:
CALIBPOINTS.append((int(x*DISPSIZE[0]),(int(y*DISPSIZE[1]))))
# images
IMAGES = []
for imgname in os.listdir(IMGDIR):
IMAGES.append(os.path.join(IMGDIR, imgname))
# light-dark
PUPTRIALS = 50
SACTRIALS = 50
# timing
ITI = 1000
POINTTIME = 2000
IMGTIME = 10000
BASELINETIME = 200
PUPTRIALTIME = 2500
# trackers
TRACKERTYPE = 'eyelink'
DUMMYMODE = False
|
esdalmaijer/EyeTribe_test
|
experiment/constants.py
|
Python
|
gpl-3.0
| 726
|
from django.conf.urls import patterns, url
urlpatterns = patterns('fpuf.utils.views',
url(r'^about/$', 'about', name ="varis_about_about" ) ,
url(r'^missatges/$', 'about', name ="varis_missatges_veure" ) ,
url(r'^condicions/$', 'condicions', name ="varis_condicions_condicions" ) ,
)
|
ctrl-alt-d/fpuf
|
fpuf/utils/urls.py
|
Python
|
gpl-3.0
| 381
|
"""
libguestfs tools test utility functions.
"""
import logging
import signal
import os
import re
from autotest.client import os_dep, utils
from autotest.client.shared import error
import aexpect
import propcan
class LibguestfsCmdError(Exception):
"""
Error of libguestfs-tool command.
"""
def __init__(self, details=''):
self.details = details
Exception.__init__(self)
def __str__(self):
return str(self.details)
def lgf_cmd_check(cmd):
"""
To check whether the cmd is supported on this host.
:param cmd: the cmd to use a libguest tool.
:return: None if the cmd is not exist, otherwise return its path.
"""
libguestfs_cmds = ['libguestfs-test-tool', 'guestfish', 'guestmount',
'virt-alignment-scan', 'virt-cat', 'virt-copy-in',
'virt-copy-out', 'virt-df', 'virt-edit',
'virt-filesystems', 'virt-format', 'virt-inspector',
'virt-list-filesystems', 'virt-list-partitions',
'virt-ls', 'virt-make-fs', 'virt-rescue',
'virt-resize', 'virt-sparsify', 'virt-sysprep',
'virt-tar', 'virt-tar-in', 'virt-tar-out',
'virt-win-reg', 'virt-inspector2']
if cmd not in libguestfs_cmds:
raise LibguestfsCmdError(
"Command %s is not supported by libguestfs yet." % cmd)
try:
return os_dep.command(cmd)
except ValueError:
logging.warning("You have not installed %s on this host.", cmd)
return None
def lgf_command(cmd, ignore_status=True, debug=False, timeout=60):
"""
Interface of libguestfs tools' commands.
:param cmd: Command line to execute.
:return: CmdResult object.
:raise: LibguestfsCmdError if non-zero exit status
and ignore_status=False
"""
if debug:
logging.debug("Running command %s in debug mode.", cmd)
# Raise exception if ignore_status is False
try:
ret = utils.run(cmd, ignore_status=ignore_status,
verbose=debug, timeout=timeout)
except error.CmdError, detail:
raise LibguestfsCmdError(detail)
if debug:
logging.debug("status: %s", ret.exit_status)
logging.debug("stdout: %s", ret.stdout.strip())
logging.debug("stderr: %s", ret.stderr.strip())
# Return CmdResult instance when ignore_status is True
return ret
class LibguestfsBase(propcan.PropCanBase):
"""
Base class of libguestfs tools.
"""
__slots__ = ['ignore_status', 'debug', 'timeout', 'uri', 'lgf_exec']
def __init__(self, lgf_exec="/bin/true", ignore_status=True,
debug=False, timeout=60, uri=None):
init_dict = {}
init_dict['ignore_status'] = ignore_status
init_dict['debug'] = debug
init_dict['timeout'] = timeout
init_dict['uri'] = uri
init_dict['lgf_exec'] = lgf_exec
super(LibguestfsBase, self).__init__(init_dict)
def set_ignore_status(self, ignore_status):
"""
Enforce setting ignore_status as a boolean.
"""
if bool(ignore_status):
self.__dict_set__('ignore_status', True)
else:
self.__dict_set__('ignore_status', False)
def set_debug(self, debug):
"""
Accessor method for 'debug' property that logs message on change
"""
if not self.INITIALIZED:
self.__dict_set__('debug', debug)
else:
current_setting = self.__dict_get__('debug')
desired_setting = bool(debug)
if not current_setting and desired_setting:
self.__dict_set__('debug', True)
logging.debug("Libguestfs debugging enabled")
# current and desired could both be True
if current_setting and not desired_setting:
self.__dict_set__('debug', False)
logging.debug("Libguestfs debugging disabled")
def set_timeout(self, timeout):
"""
Accessor method for 'timeout' property, timeout should be digit
"""
if type(timeout) is int:
self.__dict_set__('timeout', timeout)
else:
try:
timeout = int(str(timeout))
self.__dict_set__('timeout', timeout)
except ValueError:
logging.debug("Set timeout failed.")
def get_uri(self):
"""
Accessor method for 'uri' property that must exist
"""
# self.get() would call get_uri() recursivly
try:
return self.__dict_get__('uri')
except KeyError:
return None
# There are two ways to call guestfish:
# 1.Guestfish classies provided below(shell session)
# 2.guestfs module provided in system libguestfs package
class Guestfish(LibguestfsBase):
"""
Execute guestfish, using a new guestfish shell each time.
"""
__slots__ = []
def __init__(self, disk_img=None, ro_mode=False,
libvirt_domain=None, inspector=False,
uri=None, mount_options=None, run_mode="interactive"):
"""
Initialize guestfish command with options.
:param disk_img: if it is not None, use option '-a disk'.
:param ro_mode: only for disk_img. add option '--ro' if it is True.
:param libvirt_domain: if it is not None, use option '-d domain'.
:param inspector: guestfish mounts vm's disks automatically
:param uri: guestfish's connect uri
:param mount_options: Mount the named partition or logical volume
on the given mountpoint.
"""
guestfs_exec = "guestfish"
if lgf_cmd_check(guestfs_exec) is None:
raise LibguestfsCmdError
if run_mode not in ['remote', 'interactive']:
raise AssertionError("run_mode should be remote or interactive")
if run_mode == "remote":
guestfs_exec += " --listen"
else:
if uri:
guestfs_exec += " -c '%s'" % uri
if disk_img:
guestfs_exec += " -a '%s'" % disk_img
if libvirt_domain:
guestfs_exec += " -d '%s'" % libvirt_domain
if ro_mode:
guestfs_exec += " --ro"
if inspector:
guestfs_exec += " -i"
if mount_options is not None:
guestfs_exec += " --mount %s" % mount_options
super(Guestfish, self).__init__(guestfs_exec)
def complete_cmd(self, command):
"""
Execute built-in command in a complete guestfish command
(Not a guestfish session).
command: guestfish [--options] [commands]
"""
guestfs_exec = self.__dict_get__('lgf_exec')
ignore_status = self.__dict_get__('ignore_status')
debug = self.__dict_get__('debug')
timeout = self.__dict_get__('timeout')
if command:
guestfs_exec += " %s" % command
return lgf_command(guestfs_exec, ignore_status, debug, timeout)
else:
raise LibguestfsCmdError("No built-in command was passed.")
class GuestfishSession(aexpect.ShellSession):
"""
A shell session of guestfish.
"""
# Check output against list of known error-status strings
ERROR_REGEX_LIST = ['libguestfs: error:\s*']
def __init__(self, guestfs_exec=None, a_id=None, prompt=r"><fs>\s*"):
"""
Initialize guestfish session server, or client if id set.
:param guestfs_cmd: path to guestfish executable
:param id: ID of an already running server, if accessing a running
server, or None if starting a new one.
:param prompt: Regular expression describing the shell's prompt line.
"""
# aexpect tries to auto close session because no clients connected yet
super(GuestfishSession, self).__init__(guestfs_exec, a_id,
prompt=prompt,
auto_close=False)
def cmd_status_output(self, cmd, timeout=60, internal_timeout=None,
print_func=None):
"""
Send a guestfish command and return its exit status and output.
:param cmd: guestfish command to send
(must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:return: A tuple (status, output) where status is the exit status and
output is the output of cmd
:raise ShellTimeoutError: Raised if timeout expires
:raise ShellProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ShellStatusError: Raised if the exit status cannot be obtained
:raise ShellError: Raised if an unknown error occurs
"""
out = self.cmd_output(cmd, timeout, internal_timeout, print_func)
for line in out.splitlines():
if self.match_patterns(line, self.ERROR_REGEX_LIST) is not None:
return 1, out
return 0, out
def cmd_result(self, cmd, ignore_status=False):
"""Mimic utils.run()"""
exit_status, stdout = self.cmd_status_output(cmd)
stderr = '' # no way to retrieve this separately
result = utils.CmdResult(cmd, stdout, stderr, exit_status)
if not ignore_status and exit_status:
raise error.CmdError(cmd, result,
"Guestfish Command returned non-zero exit status")
return result
class GuestfishRemote(object):
"""
Remote control of guestfish.
"""
# Check output against list of known error-status strings
ERROR_REGEX_LIST = ['libguestfs: error:\s*']
def __init__(self, guestfs_exec=None, a_id=None):
"""
Initialize guestfish session server, or client if id set.
:param guestfs_cmd: path to guestfish executable
:param a_id: guestfish remote id
"""
if a_id is None:
try:
ret = utils.run(guestfs_exec, ignore_status=False,
verbose=True, timeout=60)
except error.CmdError, detail:
raise LibguestfsCmdError(detail)
self.a_id = re.search("\d+", ret.stdout.strip()).group()
else:
self.a_id = a_id
def get_id(self):
return self.a_id
def cmd_status_output(self, cmd, ignore_status=None, verbose=None, timeout=60):
"""
Send a guestfish command and return its exit status and output.
:param cmd: guestfish command to send(must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to return
:return: A tuple (status, output) where status is the exit status
and output is the output of cmd
:raise LibguestfsCmdError: Raised if commands execute failed
"""
guestfs_exec = "guestfish --remote=%s " % self.a_id
cmd = guestfs_exec + cmd
try:
ret = utils.run(cmd, ignore_status=ignore_status,
verbose=verbose, timeout=timeout)
except error.CmdError, detail:
raise LibguestfsCmdError(detail)
for line in self.ERROR_REGEX_LIST:
if re.search(line, ret.stdout.strip()):
raise LibguestfsCmdError(detail)
logging.debug("command: %s", cmd)
logging.debug("stdout: %s", ret.stdout.strip())
return 0, ret.stdout.strip()
def cmd(self, cmd, ignore_status=False):
"""Mimic utils.run()"""
exit_status, stdout = self.cmd_status_output(cmd)
stderr = '' # no way to retrieve this separately
result = utils.CmdResult(cmd, stdout, stderr, exit_status)
if not ignore_status and exit_status:
raise error.CmdError(cmd, result,
"Guestfish Command returned non-zero exit status")
return result
def cmd_result(self, cmd, ignore_status=False):
"""Mimic utils.run()"""
exit_status, stdout = self.cmd_status_output(cmd)
stderr = '' # no way to retrieve this separately
result = utils.CmdResult(cmd, stdout, stderr, exit_status)
if not ignore_status and exit_status:
raise error.CmdError(cmd, result,
"Guestfish Command returned non-zero exit status")
return result
class GuestfishPersistent(Guestfish):
"""
Execute operations using persistent guestfish session.
"""
__slots__ = ['session_id', 'run_mode']
# Help detect leftover sessions
SESSION_COUNTER = 0
def __init__(self, disk_img=None, ro_mode=False,
libvirt_domain=None, inspector=False,
uri=None, mount_options=None, run_mode="interactive"):
super(GuestfishPersistent, self).__init__(disk_img, ro_mode,
libvirt_domain, inspector,
uri, mount_options, run_mode)
self.__dict_set__('run_mode', run_mode)
if self.get('session_id') is None:
# set_uri does not call when INITIALIZED = False
# and no session_id passed to super __init__
self.new_session()
# Check whether guestfish session is prepared.
guestfs_session = self.open_session()
if run_mode != "remote":
status, output = guestfs_session.cmd_status_output('is-config', timeout=60)
if status != 0:
logging.debug("Persistent guestfish session is not responding.")
raise aexpect.ShellStatusError(self.lgf_exec, 'is-config')
def close_session(self):
"""
If a persistent session exists, close it down.
"""
try:
run_mode = self.get('run_mode')
existing = self.open_session()
# except clause exits function
# Try to end session with inner command 'quit'
try:
existing.cmd("quit")
# It should jump to exception followed normally
except aexpect.ShellProcessTerminatedError:
self.__class__.SESSION_COUNTER -= 1
self.__dict_del__('session_id')
return # guestfish session was closed normally
# Close with 'quit' did not respond
# So close with aexpect functions
if run_mode != "remote":
if existing.is_alive():
# try nicely first
existing.close()
if existing.is_alive():
# Be mean, incase it's hung
existing.close(sig=signal.SIGTERM)
# Keep count:
self.__class__.SESSION_COUNTER -= 1
self.__dict_del__('session_id')
except LibguestfsCmdError:
# Allow other exceptions to be raised
pass # session was closed already
def new_session(self):
"""
Open new session, closing any existing
"""
# Accessors may call this method, avoid recursion
# Must exist, can't be None
guestfs_exec = self.__dict_get__('lgf_exec')
self.close_session()
# Always create new session
run_mode = self.get('run_mode')
if run_mode == "remote":
new_session = GuestfishRemote(guestfs_exec)
else:
new_session = GuestfishSession(guestfs_exec)
# Keep count
self.__class__.SESSION_COUNTER += 1
session_id = new_session.get_id()
self.__dict_set__('session_id', session_id)
def open_session(self):
"""
Return session with session_id in this class.
"""
try:
session_id = self.__dict_get__('session_id')
run_mode = self.get('run_mode')
if session_id:
try:
if run_mode == "remote":
return GuestfishRemote(a_id=session_id)
else:
return GuestfishSession(a_id=session_id)
except aexpect.ShellStatusError:
# session was already closed
self.__dict_del__('session_id')
raise LibguestfsCmdError(
"Open session '%s' failed." % session_id)
except KeyError:
raise LibguestfsCmdError("No session id.")
# Inner command for guestfish should be executed in a guestfish session
def inner_cmd(self, command):
"""
Execute inner command of guestfish in a pesistent session.
:param command: inner command to be executed.
"""
session = self.open_session()
# Allow to raise error by default.
ignore_status = self.__dict_get__('ignore_status')
return session.cmd_result(command, ignore_status=ignore_status)
def add_drive(self, filename):
"""
add-drive - add an image to examine or modify
This function is the equivalent of calling "add_drive_opts" with no
optional parameters, so the disk is added writable, with the format
being detected automatically.
"""
return self.inner_cmd("add-drive %s" % filename)
def add_drive_opts(self, filename, readonly=False, format=None,
iface=None, name=None):
"""
add-drive-opts - add an image to examine or modify.
This function adds a disk image called "filename" to the handle.
"filename" may be a regular host file or a host device.
"""
cmd = "add-drive-opts %s" % filename
if readonly:
cmd += " readonly:true"
else:
cmd += " readonly:false"
if format:
cmd += " format:%s" % format
if iface:
cmd += " iface:%s" % iface
if name:
cmd += " name:%s" % name
return self.inner_cmd(cmd)
def add_drive_ro(self, filename):
"""
add-ro/add-drive-ro - add a drive in snapshot mode (read-only)
This function is the equivalent of calling "add_drive_opts" with the
optional parameter "GUESTFS_ADD_DRIVE_OPTS_READONLY" set to 1, so the
disk is added read-only, with the format being detected automatically.
"""
return self.inner_cmd("add-drive-ro %s" % filename)
def add_domain(self, domain, libvirturi=None, readonly=False, iface=None,
live=False, allowuuid=False, readonlydisk=None):
"""
domain/add-domain - add the disk(s) from a named libvirt domain
This function adds the disk(s) attached to the named libvirt domain
"dom". It works by connecting to libvirt, requesting the domain and
domain XML from libvirt, parsing it for disks, and calling
"add_drive_opts" on each one.
"""
cmd = "add-domain %s" % domain
if libvirturi:
cmd += " libvirturi:%s" % libvirturi
if readonly:
cmd += " readonly:true"
else:
cmd += " readonly:false"
if iface:
cmd += " iface:%s" % iface
if live:
cmd += " live:true"
if allowuuid:
cmd += " allowuuid:true"
if readonlydisk:
cmd += " readonlydisk:%s" % readonlydisk
return self.inner_cmd(cmd)
def run(self):
"""
run/launch - launch the qemu subprocess
Internally libguestfs is implemented by running a virtual machine
using qemu.
"""
return self.inner_cmd("launch")
def df(self):
"""
df - report file system disk space usage
This command runs the "df" command to report disk space used.
"""
return self.inner_cmd("df")
def df_h(self):
"""
df-h - report file system disk space usage (human readable)
This command runs the "df -h" command to report disk space used in
human-readable format.
"""
return self.inner_cmd("df-h")
def dd(self, src, dest):
"""
dd - copy from source to destination using dd
This command copies from one source device or file "src" to another
destination device or file "dest".Normally you would use this to copy
to or from a device or partition,for example to duplicate a filesystem
"""
return self.inner_cmd("dd %s %s" % (src, dest))
def copy_size(self, src, dest, size):
"""
copy-size - copy size bytes from source to destination using dd
This command copies exactly "size" bytes from one source device or file
"src" to another destination device or file "dest".
"""
return self.inner_cmd("copy-size %s %s %s" % (src, dest, size))
def list_partitions(self):
"""
list-partitions - list the partitions
List all the partitions detected on all block devices.
"""
return self.inner_cmd("list-partitions")
def mount(self, device, mountpoint):
"""
mount - mount a guest disk at a position in the filesystem
Mount a guest disk at a position in the filesystem.
"""
return self.inner_cmd("mount %s %s" % (device, mountpoint))
def mount_ro(self, device, mountpoint):
"""
mount-ro - mount a guest disk, read-only
This is the same as the "mount" command, but it mounts the
filesystem with the read-only (*-o ro*) flag.
"""
return self.inner_cmd("mount-ro %s %s" % (device, mountpoint))
def mount_options(self, options, device, mountpoint):
"""
mount - mount a guest disk at a position in the filesystem
Mount a guest disk at a position in the filesystem.
"""
return self.inner_cmd("mount-options %s %s %s" % (options, device, mountpoint))
def mounts(self):
"""
mounts - show mounted filesystems
This returns the list of currently mounted filesystems.
"""
return self.inner_cmd("mounts")
def mountpoints(self):
"""
mountpoints - show mountpoints
This call is similar to "mounts".
That call returns a list of devices.
"""
return self.inner_cmd("mountpoints")
def do_mount(self, mountpoint):
"""
do_mount - Automaticly mount
Mount a lvm or physical partation to '/'
"""
partition_type = self.params.get("partition_type")
if partition_type == "lvm":
vg_name = self.params.get("vg_name", "vol_test")
lv_name = self.params.get("lv_name", "vol_file")
device = "/dev/%s/%s" % (vg_name, lv_name)
logging.info("mount lvm partition...%s" % device)
elif partition_type == "physical":
pv_name = self.params.get("pv_name", "/dev/sdb")
device = pv_name + "1"
logging.info("mount physical partition...%s" % device)
self.mount(device, mountpoint)
def read_file(self, path):
"""
read-file - read a file
This calls returns the contents of the file "path" as a buffer.
"""
return self.inner_cmd("read-file %s" % path)
def cat(self, path):
"""
cat - list the contents of a file
Return the contents of the file named "path".
"""
return self.inner_cmd("cat %s" % path)
def write(self, path, content):
"""
write - create a new file
This call creates a file called "path". The content of the file
is the string "content" (which can contain any 8 bit data).
"""
return self.inner_cmd("write '%s' '%s'" % (path, content))
def write_append(self, path, content):
"""
write-append - append content to end of file
This call appends "content" to the end of file "path".
If "path" does not exist, then a new file is created.
"""
return self.inner_cmd("write-append '%s' '%s'" % (path, content))
def inspect_os(self):
"""
inspect-os - inspect disk and return list of operating systems found
This function uses other libguestfs functions and certain heuristics to
inspect the disk(s) (usually disks belonging to a virtual machine),
looking for operating systems.
"""
return self.inner_cmd("inspect-os")
def inspect_get_roots(self):
"""
inspect-get-roots - return list of operating systems found by
last inspection
This function is a convenient way to get the list of root devices
"""
return self.inner_cmd("inspect-get-roots")
def inspect_get_arch(self, root):
"""
inspect-get-arch - get architecture of inspected operating system
This returns the architecture of the inspected operating system.
"""
return self.inner_cmd("inspect-get-arch %s" % root)
def inspect_get_distro(self, root):
"""
inspect-get-distro - get distro of inspected operating system
This returns the distro (distribution) of the inspected
operating system.
"""
return self.inner_cmd("inspect-get-distro %s" % root)
def inspect_get_filesystems(self, root):
"""
inspect-get-filesystems - get filesystems associated with inspected
operating system
This returns a list of all the filesystems that we think are associated
with this operating system.
"""
return self.inner_cmd("inspect-get-filesystems %s" % root)
def inspect_get_hostname(self, root):
"""
inspect-get-hostname - get hostname of the operating system
This function returns the hostname of the operating system as found by
inspection of the guest's configuration files.
"""
return self.inner_cmd("inspect-get-hostname %s" % root)
def inspect_get_major_version(self, root):
"""
inspect-get-major-version - get major version of inspected operating
system
This returns the major version number of the inspected
operating system.
"""
return self.inner_cmd("inspect-get-major-version %s" % root)
def inspect_get_minor_version(self, root):
"""
inspect-get-minor-version - get minor version of inspected operating
system
This returns the minor version number of the inspected operating system
"""
return self.inner_cmd("inspect-get-minor-version %s" % root)
def inspect_get_mountpoints(self, root):
"""
inspect-get-mountpoints - get mountpoints of inspected operating system
This returns a hash of where we think the filesystems associated with
this operating system should be mounted.
"""
return self.inner_cmd("inspect-get-mountpoints %s" % root)
def list_filesystems(self):
"""
list-filesystems - list filesystems
This inspection command looks for filesystems on partitions, block
devices and logical volumes, returning a list of devices containing
filesystems and their type.
"""
return self.inner_cmd("list-filesystems")
def list_devices(self):
"""
list-devices - list the block devices
List all the block devices.
"""
return self.inner_cmd("list-devices")
def tar_out(self, directory, tarfile):
"""
tar-out - pack directory into tarfile
This command packs the contents of "directory" and downloads it
to local file "tarfile".
"""
return self.inner_cmd("tar-out %s %s" % (directory, tarfile))
def tar_in(self, tarfile, directory):
"""
tar-in - unpack tarfile to directory
This command uploads and unpacks local file "tarfile"
(an *uncompressed* tar file) into "directory".
"""
return self.inner_cmd("tar-in %s %s" % (tarfile, directory))
def tar_in_opts(self, tarfile, directory, compress=None):
"""
tar-in-opts - unpack tarfile to directory
This command uploads and unpacks local file "tarfile"
(an *compressed* tar file) into "directory".
"""
if compress:
return self.inner_cmd("tar-in-opts %s %s compress:%s" % (tarfile, directory, compress))
else:
return self.inner_cmd("tar-in-opts %s %s" % (tarfile, directory))
def file_architecture(self, filename):
"""
file-architecture - detect the architecture of a binary file
This detects the architecture of the binary "filename", and returns it
if known.
"""
return self.inner_cmd("file-architecture %s" % filename)
def filesize(self, file):
"""
filesize - return the size of the file in bytes
This command returns the size of "file" in bytes.
"""
return self.inner_cmd("filesize %s" % file)
def stat(self, path):
"""
stat - get file information
Returns file information for the given "path".
"""
return self.inner_cmd("stat %s" % path)
def lstat(self, path):
"""
lstat - get file information for a symbolic link
Returns file information for the given "path".
"""
return self.inner_cmd("lstat %s" % path)
def lstatlist(self, path, names):
"""
lstatlist - lstat on multiple files
This call allows you to perform the "lstat" operation on multiple files,
where all files are in the directory "path". "names" is the list of
files from this directory.
"""
return self.inner_cmd("lstatlist %s %s" % (path, names))
def umask(self, mask):
"""
umask - set file mode creation mask (umask)
This function sets the mask used for creating new files and device nodes
to "mask & 0777".
"""
return self.inner_cmd("umask %s" % mask)
def get_umask(self):
"""
get-umask - get the current umask
Return the current umask. By default the umask is 022 unless it has been
set by calling "umask".
"""
return self.inner_cmd("get-umask")
def mkdir(self, path):
"""
mkdir - create a directory
Create a directory named "path".
"""
return self.inner_cmd("mkdir %s" % path)
def mkdir_mode(self, path, mode):
"""
mkdir-mode - create a directory with a particular mode
This command creates a directory, setting the initial permissions of the
directory to "mode".
"""
return self.inner_cmd("mkdir-mode %s %s" % (path, mode))
def mknod(self, mode, devmajor, devminor, path):
"""
mknod - make block, character or FIFO devices
This call creates block or character special devices, or named pipes
(FIFOs).
"""
return self.inner_cmd("mknod %s %s %s %s" % (mode, devmajor, devminor, path))
def rm_rf(self, path):
"""
rm-rf - remove a file or directory recursively
Remove the file or directory "path", recursively removing the contents
if its a directory. This is like the "rm -rf" shell command.
"""
return self.inner_cmd("rm-rf %s" % path)
def copy_out(self, remote, localdir):
"""
copy-out - copy remote files or directories out of an image
"copy-out" copies remote files or directories recursively out of the
disk image, placing them on the host disk in a local directory called
"localdir" (which must exist).
"""
return self.inner_cmd("copy-out %s %s" % (remote, localdir))
def copy_in(self, local, remotedir):
"""
copy-in - copy local files or directories into an image
"copy-in" copies local files or directories recursively into the disk
image, placing them in the directory called "/remotedir" (which must
exist).
"""
return self.inner_cmd("copy-in %s /%s" % (local, remotedir))
def chmod(self, mode, path):
"""
chmod - change file mode
Change the mode (permissions) of "path" to "mode". Only numeric modes
are supported.
"""
return self.inner_cmd("chmod %s %s" % (mode, path))
def chown(self, owner, group, path):
"""
chown - change file owner and group
Change the file owner to "owner" and group to "group".
"""
return self.inner_cmd("chown %s %s %s" % (owner, group, path))
def lchown(self, owner, group, path):
"""
lchown - change file owner and group
Change the file owner to "owner" and group to "group". This is like
"chown" but if "path" is a symlink then the link itself is changed, not
the target.
"""
return self.inner_cmd("lchown %s %s %s" % (owner, group, path))
def du(self, path):
"""
du - estimate file space usage
This command runs the "du -s" command to estimate file space usage for
"path".
"""
return self.inner_cmd("du %s" % path)
def file(self, path):
"""
file - determine file type
This call uses the standard file(1) command to determine the type or
contents of the file.
"""
return self.inner_cmd("file %s" % path)
def rm(self, path):
"""
rm - remove a file
Remove the single file "path".
"""
return self.inner_cmd("rm %s" % path)
def is_file(self, path, followsymlinks=None):
"""
is-file - test if a regular file
This returns "true" if and only if there is a regular file with the
given "path" name.
"""
cmd = "is-file %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_file_opts(self, path, followsymlinks=None):
"""
is-file_opts - test if a regular file
This returns "true" if and only if there is a regular file with the
given "path" name.
An alias of command is-file
"""
cmd = "is-file-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_blockdev(self, path, followsymlinks=None):
"""
is-blockdev - test if block device
This returns "true" if and only if there is a block device with the
given "path" name
"""
cmd = "is-blockdev %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_blockdev_opts(self, path, followsymlinks=None):
"""
is-blockdev_opts - test if block device
This returns "true" if and only if there is a block device with the
given "path" name
An alias of command is-blockdev
"""
cmd = "is-blockdev-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_chardev(self, path, followsymlinks=None):
"""
is-chardev - test if character device
This returns "true" if and only if there is a character device with the
given "path" name.
"""
cmd = "is-chardev %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_chardev_opts(self, path, followsymlinks=None):
"""
is-chardev_opts - test if character device
This returns "true" if and only if there is a character device with the
given "path" name.
An alias of command is-chardev
"""
cmd = "is-chardev-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_dir(self, path, followsymlinks=None):
"""
is-dir - test if a directory
This returns "true" if and only if there is a directory with the given
"path" name. Note that it returns false for other objects like files.
"""
cmd = "is-dir %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_dir_opts(self, path, followsymlinks=None):
"""
is-dir-opts - test if character device
This returns "true" if and only if there is a character device with the
given "path" name.
An alias of command is-dir
"""
cmd = "is-dir-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_fifo(self, path, followsymlinks=None):
"""
is-fifo - test if FIFO (named pipe)
This returns "true" if and only if there is a FIFO (named pipe) with
the given "path" name.
"""
cmd = "is-fifo %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_fifo_opts(self, path, followsymlinks=None):
"""
is-fifo-opts - test if FIFO (named pipe)
This returns "true" if and only if there is a FIFO (named pipe) with
the given "path" name.
An alias of command is-fifo
"""
cmd = "is-fifo-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_lv(self, device):
"""
is-lv - test if device is a logical volume
This command tests whether "device" is a logical volume, and returns
true iff this is the case.
"""
return self.inner_cmd("is-lv %s" % device)
def is_socket(self, path, followsymlinks=None):
"""
is-socket - test if socket
This returns "true" if and only if there is a Unix domain socket with
the given "path" name.
"""
cmd = "is-socket %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_socket_opts(self, path, followsymlinks=None):
"""
is-socket-opts - test if socket
This returns "true" if and only if there is a Unix domain socket with
the given "path" name.
An alias of command is-socket
"""
cmd = "is-socket-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_symlink(self, path):
"""
is-symlink - test if symbolic link
This returns "true" if and only if there is a symbolic link with the
given "path" name.
"""
return self.inner_cmd("is-symlink %s" % path)
def is_whole_device(self, device):
"""
is-symlink - test if symbolic link
This returns "true" if and only if "device" refers to a whole block
device. That is, not a partition or a logical device.
"""
return self.inner_cmd("is-whole-device %s" % device)
def is_zero(self, path):
"""
is-zero - test if a file contains all zero bytes
This returns true iff the file exists and the file is empty or it
contains all zero bytes.
"""
return self.inner_cmd("is-zero %s" % path)
def is_zero_device(self, device):
"""
is-zero-device - test if a device contains all zero bytes
This returns true iff the device exists and contains all zero bytes.
Note that for large devices this can take a long time to run.
"""
return self.inner_cmd("is-zero-device %s" % device)
def cp(self, src, dest):
"""
cp - copy a file
This copies a file from "src" to "dest" where "dest" is either a
destination filename or destination directory.
"""
return self.inner_cmd("cp %s %s" % (src, dest))
def exists(self, path):
"""
exists - test if file or directory exists
This returns "true" if and only if there is a file, directory (or
anything) with the given "path" name
"""
return self.inner_cmd("exists %s" % path)
def cp_a(self, src, dest):
"""
cp-a - copy a file or directory recursively
This copies a file or directory from "src" to "dest" recursively using
the "cp -a" command.
"""
return self.inner_cmd("cp-a %s %s" % (src, dest))
def equal(self, file1, file2):
"""
equal - test if two files have equal contents
This compares the two files "file1" and "file2" and returns true if
their content is exactly equal, or false otherwise.
"""
return self.inner_cmd("equal %s %s" % (file1, file2))
def fill(self, c, len, path):
"""
fill - fill a file with octets
This command creates a new file called "path". The initial content of
the file is "len" octets of "c", where "c" must be a number in the range
"[0..255]".
"""
return self.inner_cmd("fill %s %s %s" % (c, len, path))
def fill_dir(self, dir, nr):
"""
fill-dir - fill a directory with empty files
This function, useful for testing filesystems, creates "nr" empty files
in the directory "dir" with names 00000000 through "nr-1" (ie. each file
name is 8 digits long padded with zeroes).
"""
return self.inner_cmd("fill-dir %s %s" % (dir, nr))
def fill_pattern(self, pattern, len, path):
"""
fill-pattern - fill a file with a repeating pattern of bytes
This function is like "fill" except that it creates a new file of length
"len" containing the repeating pattern of bytes in "pattern". The
pattern is truncated if necessary to ensure the length of the file is
exactly "len" bytes.
"""
return self.inner_cmd("fill-pattern %s %s %s" % (pattern, len, path))
def strings(self, path):
"""
strings - print the printable strings in a file
This runs the strings(1) command on a file and returns the list of
printable strings found.
"""
return self.inner_cmd("strings %s" % path)
def head(self, path):
"""
head - return first 10 lines of a file
This command returns up to the first 10 lines of a file as a list of
strings.
"""
return self.inner_cmd("head %s" % path)
def head_n(self, nrlines, path):
"""
head-n - return first N lines of a file
If the parameter "nrlines" is a positive number, this returns the first
"nrlines" lines of the file "path".
"""
return self.inner_cmd("head-n %s %s" % (nrlines, path))
def tail(self, path):
"""
tail - return last 10 lines of a file
This command returns up to the last 10 lines of a file as a list of
strings.
"""
return self.inner_cmd("tail %s" % path)
def pread(self, path, count, offset):
"""
pread - read part of a file
This command lets you read part of a file. It reads "count" bytes of the
file, starting at "offset", from file "path".
"""
return self.inner_cmd("pread %s %s %s" % (path, count, offset))
def hexdump(self, path):
"""
hexdump - dump a file in hexadecimal
This runs "hexdump -C" on the given "path". The result is the
human-readable, canonical hex dump of the file.
"""
return self.inner_cmd("hexdump %s" % path)
def more(self, filename):
"""
more - view a file
This is used to view a file.
"""
return self.inner_cmd("more %s" % filename)
def download(self, remotefilename, filename):
"""
download - download a file to the local machine
Download file "remotefilename" and save it as "filename" on the local
machine.
"""
return self.inner_cmd("download %s %s" % (remotefilename, filename))
def download_offset(self, remotefilename, filename, offset, size):
"""
download-offset - download a file to the local machine with offset and
size
Download file "remotefilename" and save it as "filename" on the local
machine.
"""
return self.inner_cmd("download-offset %s %s %s %s" % (remotefilename, filename, offset, size))
def upload(self, filename, remotefilename):
"""
upload - upload a file from the local machine
Upload local file "filename" to "remotefilename" on the filesystem.
"""
return self.inner_cmd("upload %s %s" % (filename, remotefilename))
def upload_offset(self, filename, remotefilename, offset):
"""
upload - upload a file from the local machine with offset
Upload local file "filename" to "remotefilename" on the filesystem.
"""
return self.inner_cmd("upload-offset %s %s %s" % (filename, remotefilename, offset))
def fallocate(self, path, len):
"""
fallocate - preallocate a file in the guest filesystem
This command preallocates a file (containing zero bytes) named "path" of
size "len" bytes. If the file exists already, it is overwritten.
"""
return self.inner_cmd("fallocate %s %s" % (path, len))
def fallocate64(self, path, len):
"""
fallocate - preallocate a file in the guest filesystem
This command preallocates a file (containing zero bytes) named "path" of
size "len" bytes. If the file exists already, it is overwritten.
"""
return self.inner_cmd("fallocate64 %s %s" % (path, len))
def part_init(self, device, parttype):
"""
part-init - create an empty partition table
This creates an empty partition table on "device" of one of the
partition types listed below. Usually "parttype" should be either
"msdos" or "gpt" (for large disks).
"""
return self.inner_cmd("part-init %s %s" % (device, parttype))
def part_add(self, device, prlogex, startsect, endsect):
"""
part-add - add a partition to the device
This command adds a partition to "device". If there is no partition
table on the device, call "part_init" first.
"""
cmd = "part-add %s %s %s %s" % (device, prlogex, startsect, endsect)
return self.inner_cmd(cmd)
def part_del(self, device, partnum):
"""
part-del device partnum
This command deletes the partition numbered "partnum" on "device".
Note that in the case of MBR partitioning, deleting an extended
partition also deletes any logical partitions it contains.
"""
return self.inner_cmd("part_del %s %s" % (device, partnum))
def part_set_bootable(self, device, partnum, bootable):
"""
part-set-bootable device partnum bootable
This sets the bootable flag on partition numbered "partnum" on device
"device". Note that partitions are numbered from 1.
"""
return self.inner_cmd("part-set-bootable %s %s %s" % (device, partnum, bootable))
def part_set_mbr_id(self, device, partnum, idbyte):
"""
part-set-mbr-id - set the MBR type byte (ID byte) of a partition
Sets the MBR type byte (also known as the ID byte) of the numbered
partition "partnum" to "idbyte". Note that the type bytes quoted in
most documentation are in fact hexadecimal numbers, but usually documented
without any leading "0x" which might be confusing.
"""
return self.inner_cmd("part-set-mbr-id %s %s %s" % (device, partnum, idbyte))
def part_set_name(self, device, partnum, name):
"""
part-set-name - set partition name
This sets the partition name on partition numbered "partnum" on device
"device". Note that partitions are numbered from 1.
"""
return self.inner_cmd("part-set-name %s %s %s" % (device, partnum, name))
def part_to_dev(self, partition):
"""
part-to-dev - convert partition name to device name
This function takes a partition name (eg. "/dev/sdb1") and removes the
partition number, returning the device name (eg. "/dev/sdb").
The named partition must exist, for example as a string returned from
"list_partitions".
"""
return self.inner_cmd("part-to-dev %s" % partition)
def part_to_partnum(self, partition):
"""
part-to-partnum - convert partition name to partition number
This function takes a partition name (eg. "/dev/sdb1") and returns the
partition number (eg. 1).
The named partition must exist, for example as a string returned from
"list_partitions".
"""
return self.inner_cmd("part_to_partnum %s" % partition)
def checksum(self, csumtype, path):
"""
checksum - compute MD5, SHAx or CRC checksum of file
This call computes the MD5, SHAx or CRC checksum of the file named
"path".
"""
return self.inner_cmd("checksum %s %s" % (csumtype, path))
def checksum_device(self, csumtype, device):
"""
checksum-device - compute MD5, SHAx or CRC checksum of the contents of a
device
This call computes the MD5, SHAx or CRC checksum of the contents of the
device named "device". For the types of checksums supported see the
"checksum" command.
"""
return self.inner_cmd("checksum-device %s %s" % (csumtype, device))
def checksums_out(self, csumtype, directory, sumsfile):
"""
checksums-out - compute MD5, SHAx or CRC checksum of files in a
directory
This command computes the checksums of all regular files in "directory"
and then emits a list of those checksums to the local output file
"sumsfile".
"""
return self.inner_cmd("checksums-out %s %s %s" % (csumtype, directory, sumsfile))
def is_config(self):
"""
is-config - is ready to accept commands
This returns true if this handle is in the "CONFIG" state
"""
return self.inner_cmd("is-config")
def is_ready(self):
"""
is-ready - is ready to accept commands
This returns true if this handle is ready to accept commands
(in the "READY" state).
"""
return self.inner_cmd("is-ready")
def part_list(self, device):
"""
part-list - list partitions on a device
This command parses the partition table on "device" and
returns the list of partitions found.
"""
return self.inner_cmd("part-list %s" % device)
def mkfs(self, fstype, device):
"""
mkfs - make a filesystem
This creates a filesystem on "device" (usually a partition or LVM
logical volume). The filesystem type is "fstype", for example "ext3".
"""
return self.inner_cmd("mkfs %s %s" % (fstype, device))
def mkfs_opts(self, fstype, device, opts):
"""
mkfs-opts - make a filesystem with optional arguments
This creates a filesystem on "device" (usually a partition or LVM
logical volume). The filesystem type is "fstype", for example "ext3".
"""
return self.inner_cmd("mkfs %s %s %s" % (fstype, device, opts))
def part_disk(self, device, parttype):
"""
part-disk - partition whole disk with a single primary partition
This command is simply a combination of "part_init" followed by
"part_add" to create a single primary partition covering
the whole disk.
"""
return self.inner_cmd("part-disk %s %s" % (device, parttype))
def part_get_bootable(self, device, partnum):
"""
part-get-bootable - return true if a partition is bootable
This command returns true if the partition "partnum" on "device"
has the bootable flag set.
"""
return self.inner_cmd("part-get-bootable %s %s" % (device, partnum))
def part_get_mbr_id(self, device, partnum):
"""
part-get-mbr-id - get the MBR type byte (ID byte) from a partition
Returns the MBR type byte (also known as the ID byte) from the
numbered partition "partnum".
"""
return self.inner_cmd("part-get-mbr-id %s %s" % (device, partnum))
def part_get_parttype(self, device):
"""
part-get-parttype - get the partition table type
This command examines the partition table on "device" and returns the
partition table type (format) being used.
"""
return self.inner_cmd("part-get-parttype %s" % device)
def fsck(self, fstype, device):
"""
fsck - run the filesystem checker
This runs the filesystem checker (fsck) on "device" which should have
filesystem type "fstype".
"""
return self.inner_cmd("fsck %s %s" % (fstype, device))
def blockdev_getss(self, device):
"""
blockdev-getss - get sectorsize of block device
This returns the size of sectors on a block device. Usually 512,
but can be larger for modern devices.
"""
return self.inner_cmd("blockdev-getss %s" % device)
def blockdev_getsz(self, device):
"""
blockdev-getsz - get total size of device in 512-byte sectors
This returns the size of the device in units of 512-byte sectors
(even if the sectorsize isn't 512 bytes ... weird).
"""
return self.inner_cmd("blockdev-getsz %s" % device)
def blockdev_getbsz(self, device):
"""
blockdev-getbsz - get blocksize of block device
This returns the block size of a device.
"""
return self.inner_cmd("blockdev-getbsz %s" % device)
def blockdev_getsize64(self, device):
"""
blockdev-getsize64 - get total size of device in bytes
This returns the size of the device in bytes
"""
return self.inner_cmd("blockdev-getsize64 %s" % device)
def blockdev_setbsz(self, device, blocksize):
"""
blockdev-setbsz - set blocksize of block device
This sets the block size of a device.
"""
return self.inner_cmd("blockdev-setbsz %s %s" % (device, blocksize))
def blockdev_getro(self, device):
"""
blockdev-getro - is block device set to read-only
Returns a boolean indicating if the block device is read-only
(true if read-only, false if not).
"""
return self.inner_cmd("blockdev-getro %s" % device)
def blockdev_setro(self, device):
"""
blockdev-setro - set block device to read-only
Sets the block device named "device" to read-only.
"""
return self.inner_cmd("blockdev-setro %s" % device)
def blockdev_setrw(self, device):
"""
blockdev-setrw - set block device to read-write
Sets the block device named "device" to read-write.
"""
return self.inner_cmd("blockdev-setrw %s" % device)
def blockdev_flushbufs(self, device):
"""
blockdev-flushbufs - flush device buffers
This tells the kernel to flush internal buffers associated with
"device".
"""
return self.inner_cmd("blockdev-flushbufs %s" % device)
def blockdev_rereadpt(self, device):
"""
blockdev-rereadpt - reread partition table
Reread the partition table on "device".
"""
return self.inner_cmd("blockdev-rereadpt %s" % device)
def canonical_device_name(self, device):
"""
canonical-device-name - return canonical device name
This utility function is useful when displaying device names to
the user.
"""
return self.inner_cmd("canonical-device-name %s" % device)
def device_index(self, device):
"""
device-index - convert device to index
This function takes a device name (eg. "/dev/sdb") and returns the
index of the device in the list of devices
"""
return self.inner_cmd("device-index %s" % device)
def disk_format(self, filename):
"""
disk-format - detect the disk format of a disk image
Detect and return the format of the disk image called "filename",
"filename" can also be a host device, etc
"""
return self.inner_cmd("disk-format %s" % filename)
def disk_has_backing_file(self, filename):
"""
disk-has-backing-file - return whether disk has a backing file
Detect and return whether the disk image "filename" has a backing file
"""
return self.inner_cmd("disk-has-backing-file %s" % filename)
def disk_virtual_size(self, filename):
"""
disk-virtual-size - return virtual size of a disk
Detect and return the virtual size in bytes of the disk image"
"""
return self.inner_cmd("disk-virtual-size %s" % filename)
def max_disks(self):
"""
max-disks - maximum number of disks that may be added
Return the maximum number of disks that may be added to a handle
"""
return self.inner_cmd("max-disks")
def nr_devices(self):
"""
nr-devices - return number of whole block devices (disks) added
This returns the number of whole block devices that were added
"""
return self.inner_cmd("nr-devices")
def scrub_device(self, device):
"""
scrub-device - scrub (securely wipe) a device
This command writes patterns over "device" to make data retrieval more
difficult
"""
return self.inner_cmd("scrub-device %s" % device)
def scrub_file(self, file):
"""
scrub-file - scrub (securely wipe) a file
This command writes patterns over a file to make data retrieval more
difficult
"""
return self.inner_cmd("scrub-file %s" % file)
def scrub_freespace(self, dir):
"""
scrub-freespace - scrub (securely wipe) free space
This command creates the directory "dir" and then fills it with files
until the filesystem is full,and scrubs the files as for "scrub_file",
and deletes them. The intention is to scrub any free space on the
partition containing "dir"
"""
return self.inner_cmd("scrub-freespace %s" % dir)
def md_create(self, name, device, missingbitmap=None, nrdevices=None,
spare=None, chunk=None, level=None):
"""
md-create - create a Linux md (RAID) device
Create a Linux md (RAID) device named "name" on the devices in the list
"devices".
"""
cmd = "md-create %s %s" % (name, device)
if missingbitmap:
cmd += " missingbitmap:%s" % missingbitmap
if nrdevices:
cmd += " nrdevices:%s" % nrdevices
if spare:
cmd += " spare:%s" % spare
if chunk:
cmd += " chunk:%s" % chunk
if level:
cmd += " level:%s" % level
return self.inner_cmd(cmd)
def list_md_devices(self):
"""
list-md-devices - list Linux md (RAID) devices
List all Linux md devices.
"""
return self.inner_cmd("list-md-devices")
def md_stop(self, md):
"""
md-stop - stop a Linux md (RAID) device
This command deactivates the MD array named "md".
The device is stopped, but it is not destroyed or zeroed.
"""
return self.inner_cmd("md-stop %s" % md)
def md_stat(self, md):
"""
md-stat - get underlying devices from an MD device
This call returns a list of the underlying devices which make up the
single software RAID array device "md".
"""
return self.inner_cmd("md-stat %s" % md)
def md_detail(self, md):
"""
md-detail - obtain metadata for an MD device
This command exposes the output of 'mdadm -DY <md>'. The following
fields are usually present in the returned hash. Other fields may also
be present.
"""
return self.inner_cmd("md-detail %s" % md)
def sfdisk(self, device, cyls, heads, sectors, lines):
"""
sfdisk - create partitions on a block device
This is a direct interface to the sfdisk(8) program for creating
partitions on block devices.
*This function is deprecated.* In new code, use the "part-add" call
instead.
Deprecated functions will not be removed from the API, but the fact
that they are deprecated indicates that there are problems with correct
use of these functions.
"""
return self.inner_cmd("sfdisk %s %s %s %s %s"
% (device, cyls, heads, sectors, lines))
def sfdisk_l(self, device):
"""
sfdisk-l - display the partition table
This displays the partition table on "device", in the human-readable
output of the sfdisk(8) command. It is not intended to be parsed.
*This function is deprecated.* In new code, use the "part-list" call
instead.
"""
return self.inner_cmd("sfdisk-l %s" % device)
def sfdiskM(self, device, lines):
"""
sfdiskM - create partitions on a block device
This is a simplified interface to the "sfdisk" command, where partition
sizes are specified in megabytes only (rounded to the nearest cylinder)
and you don't need to specify the cyls, heads and sectors parameters
which were rarely if ever used anyway.
*This function is deprecated.* In new code, use the "part-add" call
instead.
"""
return self.inner_cmd("sfdiskM %s %s" % (device, lines))
def sfdisk_N(self, device, partnum, cyls, heads, sectors, line):
"""
sfdisk-N - modify a single partition on a block device
This runs sfdisk(8) option to modify just the single partition "n"
(note: "n" counts from 1).
For other parameters, see "sfdisk". You should usually pass 0 for the
cyls/heads/sectors parameters.
*This function is deprecated.* In new code, use the "part-add" call
instead.
"""
return self.inner_cmd("sfdisk-N %s %s %s %s %s %s"
% (device, partnum, cyls, heads, sectors, line))
def sfdisk_disk_geometry(self, device):
"""
sfdisk-disk-geometry - display the disk geometry from the partition
table
This displays the disk geometry of "device" read from the partition
table. Especially in the case where the underlying block device has
been resized, this can be different from the kernel's idea of the
geometry
"""
return self.inner_cmd("sfdisk-disk-geometry %s" % device)
def sfdisk_kernel_geometry(self, device):
"""
sfdisk-kernel-geometry - display the kernel geometry
This displays the kernel's idea of the geometry of "device".
"""
return self.inner_cmd("sfdisk-kernel-geometry %s" % device)
def pvcreate(self, physvols):
"""
pvcreate - create an LVM physical volume
This creates an LVM physical volume called "physvols".
"""
return self.inner_cmd("pvcreate %s" % (physvols))
def pvs(self):
"""
pvs - list the LVM physical volumes (PVs)
List all the physical volumes detected. This is the equivalent of the
pvs(8) command.
"""
return self.inner_cmd("pvs")
def pvs_full(self):
"""
pvs-full - list the LVM physical volumes (PVs)
List all the physical volumes detected. This is the equivalent of the
pvs(8) command. The "full" version includes all fields.
"""
return self.inner_cmd("pvs-full")
def pvresize(self, device):
"""
pvresize - resize an LVM physical volume
This resizes (expands or shrinks) an existing LVM physical volume to
match the new size of the underlying device
"""
return self.inner_cmd("pvresize %s" % device)
def pvresize_size(self, device, size):
"""
pvresize-size - resize an LVM physical volume (with size)
This command is the same as "pvresize" except that it allows you to
specify the new size (in bytes) explicitly.
"""
return self.inner_cmd("pvresize-size %s %s" % (device, size))
def pvremove(self, device):
"""
pvremove - remove an LVM physical volume
This wipes a physical volume "device" so that LVM will no longer
recognise it.
The implementation uses the "pvremove" command which refuses to wipe
physical volumes that contain any volume groups, so you have to remove
those first.
"""
return self.inner_cmd("pvremove %s" % device)
def pvuuid(self, device):
"""
pvuuid - get the UUID of a physical volume
This command returns the UUID of the LVM PV "device".
"""
return self.inner_cmd("pvuuid %s" % device)
def vgcreate(self, volgroup, physvols):
"""
vgcreate - create an LVM volume group
This creates an LVM volume group called "volgroup" from the
non-empty list of physical volumes "physvols".
"""
return self.inner_cmd("vgcreate %s %s" % (volgroup, physvols))
def vgs(self):
"""
vgs - list the LVM volume groups (VGs)
List all the volumes groups detected.
"""
return self.inner_cmd("vgs")
def vgs_full(self):
"""
vgs-full - list the LVM volume groups (VGs)
List all the volumes groups detected. This is the equivalent of the
vgs(8) command. The "full" version includes all fields.
"""
return self.inner_cmd("vgs-full")
def vgrename(self, volgroup, newvolgroup):
"""
vgrename - rename an LVM volume group
Rename a volume group "volgroup" with the new name "newvolgroup".
"""
return self.inner_cmd("vgrename %s %s" % (volgroup, newvolgroup))
def vgremove(self, vgname):
"""
vgremove - remove an LVM volume group
Remove an LVM volume group "vgname", (for example "VG").
"""
return self.inner_cmd("vgremove %s" % vgname)
def vgscan(self):
"""
vgscan - rescan for LVM physical volumes, volume groups and logical
volumes
This rescans all block devices and rebuilds the list of LVM physical
volumes, volume groups and logical volumes.
"""
return self.inner_cmd("vgscan")
def vguuid(self, vgname):
"""
vguuid - get the UUID of a volume group
This command returns the UUID of the LVM VG named "vgname"
"""
return self.inner_cmd("vguuid %s" % vgname)
def vg_activate(self, activate, volgroups):
"""
vg-activate - activate or deactivate some volume groups
This command activates or (if "activate" is false) deactivates all
logical volumes in the listed volume groups "volgroups"
"""
return self.inner_cmd("vg-activate %s %s" % (activate, volgroups))
def vg_activate_all(self, activate):
"""
vg-activate-all - activate or deactivate all volume groups
This command activates or (if "activate" is false) deactivates all
logical volumes in all volume groups.
"""
return self.inner_cmd("vg-activate-all %s" % activate)
def vglvuuids(self, vgname):
"""
vglvuuids - get the LV UUIDs of all LVs in the volume group
Given a VG called "vgname", this returns the UUIDs of all the logical
volumes created in this volume group.
"""
return self.inner_cmd("vglvuuids %s" % vgname)
def vgpvuuids(self, vgname):
"""
vgpvuuids - get the PV UUIDs containing the volume group
Given a VG called "vgname", this returns the UUIDs of all the physical
volumes that this volume group resides on.
"""
return self.inner_cmd("vgpvuuids %s" % vgname)
def lvcreate(self, logvol, volgroup, mbytes):
"""
lvcreate - create an LVM logical volume
This creates an LVM logical volume called "logvol" on the
volume group "volgroup", with "size" megabytes.
"""
return self.inner_cmd("lvcreate %s %s %s" % (logvol, volgroup, mbytes))
def lvuuid(self, device):
"""
lvuuid - get the UUID of a logical volume
This command returns the UUID of the LVM LV "device".
"""
return self.inner_cmd("lvuuid %s" % device)
def lvm_canonical_lv_name(self, lvname):
"""
lvm-canonical-lv-name - get canonical name of an LV
This converts alternative naming schemes for LVs that you might
find to the canonical name.
"""
return self.inner_cmd("lvm-canonical-lv-name %s" % lvname)
def lvremove(self, device):
"""
lvremove - remove an LVM logical volume
Remove an LVM logical volume "device", where "device" is the path
to the LV, such as "/dev/VG/LV".
"""
return self.inner_cmd("lvremove %s" % device)
def lvresize(self, device, mbytes):
"""
lvresize - resize an LVM logical volume
This resizes (expands or shrinks) an existing LVM logical volume to
"mbytes".
"""
return self.inner_cmd("lvresize %s %s" % (device, mbytes))
def lvs(self):
"""
lvs - list the LVM logical volumes (LVs)
List all the logical volumes detected.
"""
return self.inner_cmd("lvs")
def lvs_full(self):
"""
lvs-full - list the LVM logical volumes (LVs)
List all the logical volumes detected. This is the equivalent of the
lvs(8) command. The "full" version includes all fields.
"""
return self.inner_cmd("lvs-full")
def lvm_clear_filter(self):
"""
lvm-clear-filter - clear LVM device filter
This undoes the effect of "lvm_set_filter". LVM will be able to see
every block device.
This command also clears the LVM cache and performs a volume group scan.
"""
return self.inner_cmd("lvm-clear-filter")
def lvm_remove_all(self):
"""
lvm-remove-all - remove all LVM LVs, VGs and PVs
This command removes all LVM logical volumes, volume groups and physical
volumes.
"""
return self.inner_cmd("lvm-remove-all")
def lvm_set_filter(self, device):
"""
lvm-set-filter - set LVM device filter
This sets the LVM device filter so that LVM will only be able to "see"
the block devices in the list "devices", and will ignore all other
attached block devices.
"""
return self.inner_cmd("lvm-set-filter %s" % device)
def lvresize_free(self, lv, percent):
"""
lvresize-free - expand an LV to fill free space
This expands an existing logical volume "lv" so that it fills "pc"% of
the remaining free space in the volume group. Commonly you would call
this with pc = 100 which expands the logical volume as much as possible,
using all remaining free space in the volume group.
"""
return self.inner_cmd("lvresize-free %s %s" % (lv, percent))
def lvrename(self, logvol, newlogvol):
"""
lvrename - rename an LVM logical volume
Rename a logical volume "logvol" with the new name "newlogvol"
"""
return self.inner_cmd("lvrename %s %s" % (logvol, newlogvol))
def vfs_type(self, mountable):
"""
vfs-type - get the Linux VFS type corresponding to a mounted device
Gets the filesystem type corresponding to the filesystem on "mountable"
"""
return self.inner_cmd("vfs-type %s" % (mountable))
def touch(self, path):
"""
touch - update file timestamps or create a new file
Touch acts like the touch(1) command. It can be used to update the
timestamps on a file, or, if the file does not exist, to create a new
zero-length file.
"""
return self.inner_cmd("touch %s" % (path))
def umount_all(self):
"""
umount-all - unmount all filesystems
This unmounts all mounted filesystems.
Some internal mounts are not unmounted by this call.
"""
return self.inner_cmd("umount-all")
def ls(self, directory):
"""
ls - list the files in a directory
List the files in "directory" (relative to the root directory, there is
no cwd). The '.' and '..' entries are not returned, but hidden files are
shown.
"""
return self.inner_cmd("ls %s" % (directory))
def ll(self, directory):
"""
ll - list the files in a directory (long format)
List the files in "directory" (relative to the root directory, there is
no cwd) in the format of 'ls -la'.
"""
return self.inner_cmd("ll %s" % (directory))
def sync(self):
"""
lsync - sync disks, writes are flushed through to the disk image
This syncs the disk, so that any writes are flushed through to the
underlying disk image.
"""
return self.inner_cmd("sync")
def debug(self, subcmd, extraargs):
"""
debug - debugging and internals
The "debug" command exposes some internals of "guestfsd" (the guestfs
daemon) that runs inside the hypervisor.
"""
return self.inner_cmd("debug %s %s" % (subcmd, extraargs))
# libguestfs module functions follow #####
def libguest_test_tool_cmd(qemuarg=None, qemudirarg=None,
timeoutarg=None, ignore_status=True,
debug=False, timeout=60):
"""
Execute libguest-test-tool command.
:param qemuarg: the qemu option
:param qemudirarg: the qemudir option
:param timeoutarg: the timeout option
:return: a CmdResult object
:raise: raise LibguestfsCmdError
"""
cmd = "libguestfs-test-tool"
if qemuarg is not None:
cmd += " --qemu '%s'" % qemuarg
if qemudirarg is not None:
cmd += " --qemudir '%s'" % qemudirarg
if timeoutarg is not None:
cmd += " --timeout %s" % timeoutarg
# Allow to raise LibguestfsCmdError if ignore_status is False.
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_edit_cmd(disk_or_domain, file_path, is_disk=False, options=None,
extra=None, expr=None, connect_uri=None, ignore_status=True,
debug=False, timeout=60):
"""
Execute virt-edit command to check whether it is ok.
Since virt-edit will need uses' interact, maintain and return
a session if there is no raise after command has been executed.
:param disk_or_domain: a img path or a domain name.
:param file_path: the file need to be edited in img file.
:param options: the options of virt-edit.
:param extra: additional suffix of command.
:return: a session of executing virt-edit command.
"""
# disk_or_domain and file_path are necessary parameters.
cmd = "virt-edit"
if connect_uri is not None:
cmd += " -c %s" % connect_uri
if is_disk:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s" % file_path
if options is not None:
cmd += " %s" % options
if extra is not None:
cmd += " %s" % extra
if expr is not None:
cmd += " -e '%s'" % expr
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_clone_cmd(original, newname=None, autoclone=False, **dargs):
"""
Clone existing virtual machine images.
:param original: Name of the original guest to be cloned.
:param newname: Name of the new guest virtual machine instance.
:param autoclone: Generate a new guest name, and paths for new storage.
:param dargs: Standardized function API keywords. There are many
options not listed, they can be passed in dargs.
"""
def storage_config(cmd, options):
"""Configure options for storage"""
# files should be a list
files = options.get("files", [])
if len(files):
for file in files:
cmd += " --file '%s'" % file
if options.get("nonsparse") is not None:
cmd += " --nonsparse"
return cmd
def network_config(cmd, options):
"""Configure options for network"""
mac = options.get("mac")
if mac is not None:
cmd += " --mac '%s'" % mac
return cmd
cmd = "virt-clone --original '%s'" % original
if newname is not None:
cmd += " --name '%s'" % newname
if autoclone is True:
cmd += " --auto-clone"
# Many more options can be added if necessary.
cmd = storage_config(cmd, dargs)
cmd = network_config(cmd, dargs)
ignore_status = dargs.get("ignore_status", True)
debug = dargs.get("debug", False)
timeout = dargs.get("timeout", 60)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_sparsify_cmd(indisk, outdisk, compress=False, convert=None,
format=None, ignore_status=True, debug=False,
timeout=60):
"""
Make a virtual machine disk sparse.
:param indisk: The source disk to be sparsified.
:param outdisk: The destination disk.
"""
cmd = "virt-sparsify"
if compress is True:
cmd += " --compress"
if format is not None:
cmd += " --format '%s'" % format
cmd += " '%s'" % indisk
if convert is not None:
cmd += " --convert '%s'" % convert
cmd += " '%s'" % outdisk
# More options can be added if necessary.
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_resize_cmd(indisk, outdisk, **dargs):
"""
Resize a virtual machine disk.
:param indisk: The source disk to be resized
:param outdisk: The destination disk.
"""
cmd = "virt-resize"
ignore_status = dargs.get("ignore_status", True)
debug = dargs.get("debug", False)
timeout = dargs.get("timeout", 60)
resize = dargs.get("resize")
resized_size = dargs.get("resized_size", "0")
expand = dargs.get("expand")
shrink = dargs.get("shrink")
ignore = dargs.get("ignore")
delete = dargs.get("delete")
if resize is not None:
cmd += " --resize %s=%s" % (resize, resized_size)
if expand is not None:
cmd += " --expand %s" % expand
if shrink is not None:
cmd += " --shrink %s" % shrink
if ignore is not None:
cmd += " --ignore %s" % ignore
if delete is not None:
cmd += " --delete %s" % delete
cmd += " %s %s" % (indisk, outdisk)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_list_partitions_cmd(disk_or_domain, long=False, total=False,
human_readable=False, ignore_status=True,
debug=False, timeout=60):
"""
"virt-list-partitions" is a command line tool to list the partitions
that are contained in a virtual machine or disk image.
:param disk_or_domain: a disk or a domain to be mounted
"""
cmd = "virt-list-partitions %s" % disk_or_domain
if long is True:
cmd += " --long"
if total is True:
cmd += " --total"
if human_readable is True:
cmd += " --human-readable"
return lgf_command(cmd, ignore_status, debug, timeout)
def guestmount(disk_or_domain, mountpoint, inspector=False,
readonly=False, **dargs):
"""
guestmount - Mount a guest filesystem on the host using
FUSE and libguestfs.
:param disk_or_domain: a disk or a domain to be mounted
If you need to mount a disk, set is_disk to True in dargs
:param mountpoint: the mountpoint of filesystems
:param inspector: mount all filesystems automatically
:param readonly: if mount filesystem with readonly option
"""
def get_special_mountpoint(cmd, options):
special_mountpoints = options.get("special_mountpoints", [])
for mountpoint in special_mountpoints:
cmd += " -m %s" % mountpoint
return cmd
cmd = "guestmount"
ignore_status = dargs.get("ignore_status", True)
debug = dargs.get("debug", False)
timeout = dargs.get("timeout", 60)
# If you need to mount a disk, set is_disk to True
is_disk = dargs.get("is_disk", False)
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
if inspector is True:
cmd += " -i"
if readonly is True:
cmd += " --ro"
cmd = get_special_mountpoint(cmd, dargs)
cmd += " %s" % mountpoint
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_filesystems(disk_or_domain, **dargs):
"""
virt-filesystems - List filesystems, partitions, block devices,
LVM in a virtual machine or disk image
:param disk_or_domain: a disk or a domain to be mounted
If you need to mount a disk, set is_disk to True in dargs
"""
def get_display_type(cmd, options):
all = options.get("all", False)
filesystems = options.get("filesystems", False)
extra = options.get("extra", False)
partitions = options.get("partitions", False)
block_devices = options.get("block_devices", False)
logical_volumes = options.get("logical_volumes", False)
volume_groups = options.get("volume_groups", False)
physical_volumes = options.get("physical_volumes", False)
long_format = options.get("long_format", False)
human_readable = options.get("human_readable", False)
if all is True:
cmd += " --all"
if filesystems is True:
cmd += " --filesystems"
if extra is True:
cmd += " --extra"
if partitions is True:
cmd += " --partitions"
if block_devices is True:
cmd += " --block_devices"
if logical_volumes is True:
cmd += " --logical_volumes"
if volume_groups is True:
cmd += " --volume_groups"
if physical_volumes is True:
cmd += " --physical_volumes"
if long_format is True:
cmd += " --long"
if human_readable is True:
cmd += " -h"
return cmd
cmd = "virt-filesystems"
# If you need to mount a disk, set is_disk to True
is_disk = dargs.get("is_disk", False)
ignore_status = dargs.get("ignore_status", True)
debug = dargs.get("debug", False)
timeout = dargs.get("timeout", 60)
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd = get_display_type(cmd, dargs)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_list_partitions(disk_or_domain, long=False, total=False,
human_readable=False, ignore_status=True,
debug=False, timeout=60):
"""
"virt-list-partitions" is a command line tool to list the partitions
that are contained in a virtual machine or disk image.
:param disk_or_domain: a disk or a domain to be mounted
"""
cmd = "virt-list-partitions %s" % disk_or_domain
if long is True:
cmd += " --long"
if total is True:
cmd += " --total"
if human_readable is True:
cmd += " --human-readable"
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_list_filesystems(disk_or_domain, format=None, long=False,
all=False, ignore_status=True, debug=False,
timeout=60):
"""
"virt-list-filesystems" is a command line tool to list the filesystems
that are contained in a virtual machine or disk image.
:param disk_or_domain: a disk or a domain to be mounted
"""
cmd = "virt-list-filesystems %s" % disk_or_domain
if format is not None:
cmd += " --format %s" % format
if long is True:
cmd += " --long"
if all is True:
cmd += " --all"
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_df(disk_or_domain, ignore_status=True, debug=False, timeout=60):
"""
"virt-df" is a command line tool to display free space on
virtual machine filesystems.
"""
cmd = "virt-df %s" % disk_or_domain
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_sysprep_cmd(disk_or_domain, options=None,
extra=None, ignore_status=True,
debug=False, timeout=600):
"""
Execute virt-sysprep command to reset or unconfigure a virtual machine.
:param disk_or_domain: a img path or a domain name.
:param options: the options of virt-sysprep.
:return: a CmdResult object.
"""
if os.path.isfile(disk_or_domain):
disk_or_domain = "-a " + disk_or_domain
else:
disk_or_domain = "-d " + disk_or_domain
cmd = "virt-sysprep %s" % (disk_or_domain)
if options is not None:
cmd += " %s" % options
if extra is not None:
cmd += " %s" % extra
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_cat_cmd(disk_or_domain, file_path, options=None, ignore_status=True,
debug=False, timeout=60):
"""
Execute virt-cat command to print guest's file detail.
:param disk_or_domain: a img path or a domain name.
:param file_path: the file to print detail
:param options: the options of virt-cat.
:return: a CmdResult object.
"""
# disk_or_domain and file_path are necessary parameters.
if os.path.isfile(disk_or_domain):
disk_or_domain = "-a " + disk_or_domain
else:
disk_or_domain = "-d " + disk_or_domain
cmd = "virt-cat %s '%s'" % (disk_or_domain, file_path)
if options is not None:
cmd += " %s" % options
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_tar_in(disk_or_domain, tar_file, destination, is_disk=False,
ignore_status=True, debug=False, timeout=60):
"""
"virt-tar-in" unpacks an uncompressed tarball into a virtual machine
disk image or named libvirt domain.
"""
cmd = "virt-tar-in"
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s %s" % (tar_file, destination)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_tar_out(disk_or_domain, directory, tar_file, is_disk=False,
ignore_status=True, debug=False, timeout=60):
"""
"virt-tar-out" packs a virtual machine disk image directory into a tarball.
"""
cmd = "virt-tar-out"
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s %s" % (directory, tar_file)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_copy_in(disk_or_domain, file, destination, is_disk=False,
ignore_status=True, debug=False, timeout=60):
"""
"virt-copy-in" copies files and directories from the local disk into a
virtual machine disk image or named libvirt domain.
#TODO: expand file to files
"""
cmd = "virt-copy-in"
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s %s" % (file, destination)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_copy_out(disk_or_domain, file_path, localdir, is_disk=False,
ignore_status=True, debug=False, timeout=60):
"""
"virt-copy-out" copies files and directories out of a virtual machine
disk image or named libvirt domain.
"""
cmd = "virt-copy-out"
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s %s" % (file_path, localdir)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_format(disk, filesystem=None, image_format=None, lvm=None,
partition=None, wipe=False, ignore_status=False,
debug=False, timeout=60):
"""
Virt-format takes an existing disk file (or it can be a host partition,
LV etc), erases all data on it, and formats it as a blank disk.
"""
cmd = "virt-format -a %s" % disk
if filesystem is not None:
cmd += " --filesystem=%s" % filesystem
if image_format is not None:
cmd += " --format=%s" % image_format
if lvm is not None:
cmd += " --lvm=%s" % lvm
if partition is not None:
cmd += " --partition=%s" % partition
if wipe is True:
cmd += " --wipe"
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_inspector(disk_or_domain, is_disk=False, ignore_status=True,
debug=False, timeout=60):
"""
virt-inspector2 examines a virtual machine or disk image and tries to
determine the version of the operating system and other information
about the virtual machine.
"""
# virt-inspector has been replaced by virt-inspector2 in RHEL7
# Check it here to choose which one to be used.
cmd = lgf_cmd_check("virt-inspector2")
if cmd is None:
cmd = "virt-inspector"
# If you need to mount a disk, set is_disk to True
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
return lgf_command(cmd, ignore_status, debug, timeout)
|
rbian/virt-test
|
virttest/utils_libguestfs.py
|
Python
|
gpl-2.0
| 91,195
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from past.builtins import basestring
import importlib
import logging
import os
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.utils import formatdate
from airflow import configuration
def send_email(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed'):
"""
Send email using backend specified in EMAIL_BACKEND.
"""
path, attr = configuration.get('email', 'EMAIL_BACKEND').rsplit('.', 1)
module = importlib.import_module(path)
backend = getattr(module, attr)
return backend(to, subject, html_content, files=files, dryrun=dryrun, cc=cc, bcc=bcc, mime_subtype=mime_subtype)
def send_email_smtp(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed'):
"""
Send an email with html content
>>> send_email('test@example.com', 'foo', '<b>Foo</b> bar', ['/dev/null'], dryrun=True)
"""
SMTP_MAIL_FROM = configuration.get('smtp', 'SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = SMTP_MAIL_FROM
msg['To'] = ", ".join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ", ".join(cc)
recipients = recipients + cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients = recipients + bcc
msg['Date'] = formatdate(localtime=True)
mime_text = MIMEText(html_content, 'html')
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
msg.attach(MIMEApplication(
f.read(),
Content_Disposition='attachment; filename="%s"' % basename,
Name=basename
))
send_MIME_email(SMTP_MAIL_FROM, recipients, msg, dryrun)
def send_MIME_email(e_from, e_to, mime_msg, dryrun=False):
SMTP_HOST = configuration.get('smtp', 'SMTP_HOST')
SMTP_PORT = configuration.getint('smtp', 'SMTP_PORT')
SMTP_USER = configuration.get('smtp', 'SMTP_USER')
SMTP_PASSWORD = configuration.get('smtp', 'SMTP_PASSWORD')
SMTP_STARTTLS = configuration.getboolean('smtp', 'SMTP_STARTTLS')
SMTP_SSL = configuration.getboolean('smtp', 'SMTP_SSL')
if not dryrun:
s = smtplib.SMTP_SSL(SMTP_HOST, SMTP_PORT) if SMTP_SSL else smtplib.SMTP(SMTP_HOST, SMTP_PORT)
if SMTP_STARTTLS:
s.starttls()
if SMTP_USER and SMTP_PASSWORD:
s.login(SMTP_USER, SMTP_PASSWORD)
logging.info("Sent an alert email to " + str(e_to))
s.sendmail(e_from, e_to, mime_msg.as_string())
s.quit()
def get_email_address_list(address_string):
if isinstance(address_string, basestring):
if ',' in address_string:
address_string = address_string.split(',')
elif ';' in address_string:
address_string = address_string.split(';')
else:
address_string = [address_string]
return address_string
|
kerzhner/airflow
|
airflow/utils/email.py
|
Python
|
apache-2.0
| 3,917
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Oğuz Kırat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import win32file, win32api
import os, wmi
class win32_PartitionUtils:
def __init__(self):
self.drives = {}
def win32_get_total_size(drive=None):
r = win32file.GetDiskFreeSpace('C:')
capacity = r[3]*r[0]*r[1] / (1024**2) #capacity = totalClusters*sectPerCluster*bytesPerSector as MegaByte
return capacity
def win32_detect_removable_drives(self):
self.drives={}
c = wmi.WMI()
for pm in c.Win32_DiskDrive():
self.drives[pm.index]=["Disk "+str(pm.index)+":"+str(pm.Model), pm.Model, pm.Name, pm.Partitions, str(pm.Size)[:-9], pm.DeviceID, pm.index, pm.InterfaceType, pm.MediaType]
def win32_detect_dvd_drives(self):
for drive in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
try:
drive = drive + ':'
if win32file.GetDriveType(drive) == win32file.DRIVE_CDROM: #will be changed
r = win32file.GetDiskFreeSpace(drive)
capacity = r[3]*r[0]*r[1] / (1024**2) #capacity = totalClusters*sectPerCluster*bytesPerSector as MegaByte
print capacity
print r
self.drives[drive] = {
'label': 'label',
'mount': drive,
'size': capacity,
'device': drive,
'is_mount' : 1 # will be edited
}
except:
pass
|
okrt/usbtransfer
|
wintools.py
|
Python
|
gpl-2.0
| 2,310
|
import unittest
import json
from app import app
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config["TESTING_DATAFILE"] = "testing/testdata.json"
self.app = app.test_client()
def tearDown(self):
pass
def test_empty_query(self):
response = self.app.get("/result.json")
expected = json.loads(open("testing/expectations/noparam.expected").read())
received = json.loads(response.data)
self.assertItemsEqual(received,expected)
def test_select_nonexistent_AS(self):
received= json.loads(self.app.get("/result.json?ases=AS3320").data)
expected = json.loads(
"""
{"total": {"fp": "", "index": null, "as_no": "", "nick": "", "cc": "", "p_exit": 0.0, "adv_bw": 0.0, "guard": "", "link": true, "p_guard": 0.0, "p_middle": 0.0, "exit": "", "as_info": "", "cw": 0.0, "as_name": ""}, "results": [], "excluded": null}
"""
)
self.assertItemsEqual(received,expected)
def test_select_AS_by_number(self):
received = json.loads(self.app.get("/result.json?ases=7922").data)
expected = json.loads(
"""
{"total": {"fp": "", "index": null, "as_no": "", "nick": "(total in selection)", "cc": "", "p_exit": 0.0018185999999999999, "adv_bw": 0.005388199999999999, "guard": "", "link": true, "p_guard": 0.0, "p_middle": 0.0034887, "exit": "", "as_info": "", "cw": 0.0017691999999999999, "as_name": ""}, "results": [{"fp": "CE9CC720B9300FC7E041CCC2B749F283AB5EE1C2", "index": 1, "as_no": "AS7922", "nick": "Tornearse", "cc": "US", "p_exit": 0.0018185999999999999, "adv_bw": 0.005388199999999999, "guard": "-", "link": null, "p_guard": 0.0, "p_middle": 0.0034887, "exit": "Exit", "as_info": "AS7922 Comcast Cable Communications, Inc.", "cw": 0.0017691999999999999, "as_name": "Comcast Cable Communications, Inc."}], "excluded": null}
""")
self.assertItemsEqual(received,expected)
def test_select_AS1234567890(self):
received = json.loads(self.app.get("/result.json?ases=AS1234567890").data)
expected = json.loads(
"""
{"total": {"fp": "", "index": null, "as_no": "", "nick": "", "cc": "", "p_exit": 0.0, "adv_bw": 0.0, "guard": "", "link": true, "p_guard": 0.0, "p_middle": 0.0, "exit": "", "as_info": "", "cw": 0.0, "as_name": ""}, "results": [], "excluded": null}
"""
)
self.assertItemsEqual(received,expected)
def test_select_AS_with_label(self):
received = json.loads(self.app.get("/result.json?ases=AS7922").data)
expected = json.loads(
"""
{"total": {"fp": "", "index": null, "as_no": "", "nick": "(total in selection)", "cc": "", "p_exit": 0.0018185999999999999, "adv_bw": 0.005388199999999999, "guard": "", "link": true, "p_guard": 0.0, "p_middle": 0.0034887, "exit": "", "as_info": "", "cw": 0.0017691999999999999, "as_name": ""}, "results": [{"fp": "CE9CC720B9300FC7E041CCC2B749F283AB5EE1C2", "index": 1, "as_no": "AS7922", "nick": "Tornearse", "cc": "US", "p_exit": 0.0018185999999999999, "adv_bw": 0.005388199999999999, "guard": "-", "link": null, "p_guard": 0.0, "p_middle": 0.0034887, "exit": "Exit", "as_info": "AS7922 Comcast Cable Communications, Inc.", "cw": 0.0017691999999999999, "as_name": "Comcast Cable Communications, Inc."}], "excluded": null}
""")
self.assertItemsEqual(received,expected)
def test_limit_dataset_size(self):
received = json.loads(self.app.get("/result.json?top=5").data)
self.assertEqual(len(received['results']),5)
expected = json.loads(open("testing/expectations/top5.expected").read())
self.assertItemsEqual(received,expected)
if __name__ == '__main':
unittest.main()
|
nusenu/tor-compass
|
testing/app_tests.py
|
Python
|
mit
| 3,590
|
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="scatter", **kwargs):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatter/_showlegend.py
|
Python
|
mit
| 452
|
from optimus.pages.views.base import PageViewBase
from optimus.conf.registry import settings # noqa: F401
class IndexView(PageViewBase):
"""
Sample page for index
"""
title = "My project"
template_name = "index.html"
# Default destination include the language code
destination = "index_{language_code}.html"
|
sveetch/Optimus
|
optimus/starters/basic/{{cookiecutter.package_name}}/project/views/index.py
|
Python
|
mit
| 340
|
#!/usr/bin/env python
def get_host_from_uri(uri):
"""
examples: qemu+ssh://root@hail.cloud.example.com/system
test:///default
qemu+ssh://192.168.122.6/system
"""
uri = uri.split("//")[-1].split("/")[0].split("@")[-1]
if uri == '':
return 'localhost'
return uri
class FilterModule(object):
''' A filter to fix network format '''
def filters(self):
return {
'get_host_from_uri': get_host_from_uri
}
|
herlo/linchpin
|
linchpin/provision/filter_plugins/get_host_from_uri.py
|
Python
|
gpl-3.0
| 497
|
#!/usr/bin/python
import telepot
import pprint
import time
import sys
import os
import json
import requests
import ConfigParser
import datetime
import traceback
import sqlite3
# "Domotibot" e' un bot di telegram che permette la gestione di alcune funzionalita' all'interno
# di una rete domestica. Viene usata la libreria "telepot" per le connessioni alle API di Telegram
#
# Programma di Francesco Tucci
# Versione 1.04 del 17/04/2016
#
# Il programma e' rilasciato con licenza GPL v.3
#
# genero un timestamp per l'inserimento nel file di log all'inizio di ogni riga
# ritorna il timestamp nel formato dd-mm-aaaa hh:mm:ss
def adesso():
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return st
# qualche nota sul file di log:
# - il timestamp nel formato dd-mm-aaaa hh:mm:ss messo all'inizio della riga e' generato dalla funzione adesso()
# - dopo il timestamp metto tre caratteri che riguardano il tipo si messaggio
# 0 = [INF] informazione
# 1 = [AVV] avviso
# 2 = [ERR] errore
# cosi' posso filtrare il log alla ricerca di errori senza vedere tutti i messaggi meno gravi
def logga(livello, messaggio):
# apro il file di log in append
log = open (ConfigSectionMap("Sistema")['log'], "a")
# inizio mettendo il timestamp
stringa = adesso()
# aggiungo il livello di gravita'
if livello == 0:
stringa = stringa + " [INF]"
elif livello == 1:
stringa = stringa + " [AVV]"
else:
stringa = stringa + " [ERR]"
# inserisco il messaggio
stringa = stringa + " " + messaggio
# lo scrivo nel file
log.write(stringa + "\n")
# chiudo il file di log
log.close()
# funzione per la memorizzazione di tutti i parametri nel file di configrazione
# per poter accedere al parametro basta usare il comando
# x = ConfigSectionMap("nome_sezione)['nome_parametro']
# ritorna un array con tutti i valori della sezione richiesta
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
print("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
# scrivo un file di semaforo con data e ora di accensione, per evitare la falsa rilevazione
# tutte le volte che il sistema si accende
def scrivi_semaforo():
quando_acceso = open (ConfigSectionMap("Sistema")['log_file_accensione'], "w")
quando_acceso.write("adesso")
quando_acceso.close
# per verificare se il processo di motion e' attivo
# ritorna il numero di occorrenze del processo "motion" attive nel sistema
# (attenzione, se si sta lavorando con il file di configurazione di motion aperto il conteggio e' falsato)
def verifica_motion():
processname = 'motion'
tmp = os.popen("ps -Af").read()
proccount = tmp.count(processname)
return proccount
# cosa fare quando si riceve un messaggio
def handle(msg):
# questi sono i dati del messaggio in arrivo
id_utente = msg['from']['id'] # id utente per rispondere male a chi non e' ablitato
nome_utente = msg['from']['first_name'] # nome utente per rispondere con gentilezza ai comandi
if 'last_name' in msg:
cognome_utente = msg['from']['last_name'] # cognome dell'utente che ha inviato il messaggio
else:
cognome_utente = ""
#cognome_utente = "n.d."
id_chat = msg['chat']['id'] # id della chat a cui rispondere
testo = msg['text'].lower() # testo dei messaggi ricevuti, convertito tutto in minuscole per facilitare il lavoro con il parse
# debug, per verificare cosa e' effettivamente arrivato al bot
logga(0, "L'utente " + nome_utente + " (" +str(id_utente) + ") ha scritto <<" + testo + ">> nella chat " + str(id_chat))
# comando per reimpostare la tastiera standard e quello per creare la tastiera personalizzata
hide_keyboard = {'hide_keyboard': True}
show_keyboard = {'keyboard': [['TVcc ON','TVcc OFF', 'TVcc?', 'Now'],['Temp?', 'Watt?']]}
# controllo che i comandi arrivino dagli utenti abilitati
utenti_abilitati = [ConfigSectionMap("Sistema")['utente_1'], ConfigSectionMap("Sistema")['utente_2'], ConfigSectionMap("Sistema")['utente_3'], ConfigSectionMap("Sistema")['utente_4'], ConfigSectionMap("Sistema")['utente_5']]
utente_abilitato = False
for index in range(len(utenti_abilitati)):
if utenti_abilitati[index] != "no":
if id_utente == int(utenti_abilitati[index]):
utente_abilitato = True
if utente_abilitato == False:
bot.sendMessage(id_utente, "Spiacente, bot non attivo")
#mando un messaggio all'amministratore del sistema per informare che un altro utete ha provato a scrivere
messaggio = "Attenzione l'utente " + nome_utente + " " + cognome_utente + " (id " + str(id_utente) + ") ha scritto questo: <<" + testo + ">>"
bot.sendMessage(ConfigSectionMap("Sistema")['utente_1'], messaggio)
logga(1, "Messaggio da utente non autorizzato! --> " + nome_utente + " " + cognome_utente + " (id " + str(id_utente) + ") ha scritto questo: <<" + testo + ">>")
else:
# questo e' il comando per iniziare ad interagire
if testo == "/ciao" or testo == "/ciao@[nomedelbot]":
messaggio = "Ciao " + nome_utente + ", cosa posso fare per te?"
# alla risposta aggiunge la tastiera personalizzata
bot.sendMessage(id_chat, messaggio, reply_markup=show_keyboard)
# *******
# voglio accendere il sistema di videosorveglianza
# *******
elif testo == "tvcc on":
logga(0, "Accendo motion come da richiesta")
# controllo se il processo 'motion' e' attivo o no
motion_on = verifica_motion()
# se il processo 'motion' non e' attivo lo avvio e avviso che e' attivato
if motion_on == 0:
# attivo il processo
os.system("/home/pi/motion-mmal/motion")
scrivi_semaforo()
# gli lascio il tempo di attivarsi
time.sleep(5)
# controllo se e' in esecuzione
motion_on = verifica_motion()
if motion_on == 0:
messaggio = "Sistema TVcc NON attivato, potrebbe esserci un problema"
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
logga(2, "Motion non si e' avviato")
else:
messaggio = "Sistema TVcc attivato con successo!"
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
logga(0, "Motion regolarmente attivo")
# se il processo era gia' attivo, avviso che non ho fatto nulla
else:
messaggio = "Il Sistema TVcc era gia' attivo, non ho fatto nulla."
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
logga(1, "Motion non attivato, era gia' attivo")
# *******
# voglio spegnere il sistema di videosorveglianza
# *******
elif testo == 'tvcc off':
logga(0, "Spengo motion come richiesto")
# controllo se il processo 'motion' e' attivo o no
motion_on = verifica_motion()
# se il processo 'motion' e' attivo lo uccido e avviso che e' disattivato
if motion_on != 0:
os.system("pkill motion")
time.sleep(5)
motion_on = verifica_motion()
# se il processo 'motion' non e' attivo va bene, e' ammazzato davvero
if motion_on == 0:
messaggio = "Confermo che il sistema TVcc e' stato disattivato"
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
logga(0, "Motion disattivato")
else:
messaggio = "Non sono riuscito a spegnere il sistema TVcc!"
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
logga(3, "Motion ancora attivo, ci sono " + str(motion_on) + " processi attivi")
else:
messaggio = "Il sistema TVcc era gia' disattivato, non ho fatto niente."
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
logga (0, "Motion gia' spento, nessun intervento")
# *******
# voglio sapere lo stato del sisterma di videosorveglianza
# *******
elif testo == 'tvcc?':
# controllo se il processo 'motion' e' attivo o no
motion_on = verifica_motion()
logga(0, "Verifica dello stato di motion")
if motion_on == 0:
messaggio = "Il sistema TVcc e' DISATTIVATO"
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
logga(0, "motion spento")
else:
messaggio = "Il sistema TVcc e' ATTIVO"
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
logga(0, "motion acceso")
# *******
# voglio fare un foto dell'area (solo se il sistema non e' attivo)
# *******
elif testo == 'now':
# controllo se il processo 'motion' e' attivo o no
motion_on = verifica_motion()
# se il processo 'motion' e' attivo non posso fare la foto
if motion_on != 0:
messaggio = "Posso fare la foto solo con il sistema TVcc non attivo, se ti serve farla adesso devi prima disattivarlo"
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
logga(1, "Richiesta foto istantanea non ammissibile con motion attivo")
else:
os.system("raspistill -w 1600 -h 1200 -ex verylong -t 1 -o /home/pi/Pictures/SingoloClick.jpg")
path_image = '/home/pi/Pictures/SingoloClick.jpg'
foto = open(path_image, 'rb')
messaggio = "Ecco la foto che mi hai chiesto"
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
bot.sendPhoto(id_chat, foto)
os.system("rm /home/pi/Pictures/SingoloClick.jpg")
logga(0, "Scattata la foto istantanea")
# *******
# Voglio un report sulla temperatura in casa
# *******
elif testo == "temp?":
# inizializzo la variabile di connessione
connessione = None
try:
# connessione al DB
connessione = sqlite3.connect('/[cartella]/[database].db')
# generazione del cursore
cursore = connessione.cursor()
# leggo l'ultimo valore memorizzato
cursore.execute("SELECT ID, strftime('%H:%M', Timestamp, 'localtime'), Luogo, Temp, Umid FROM Temperature WHERE Luogo = 1 ORDER BY ID DESC LIMIT 1;")
# memorizzo tutti i valore recuperati
righe = cursore.fetchall()
# compongo il messaggio
for row in righe:
messaggio = "*Ultima lettura delle ore " + str(row[1]) + "*\nT: " + str(row[3]) + " - Umid.: " + str(row[4]) + "%\n\n"
# cerco il valore massimo delle ultime 24h
cursore.execute("SELECT MAX(Temp), strftime('%d/%m %H:%M', Timestamp, 'localtime') from temperature where timestamp >= datetime('now','-1 day');")
righe = cursore.fetchall()
for row in righe:
messaggio = messaggio + "*Nelle ultime 24 ore:*\n"
messaggio = messaggio + "Massima: " + str(row[0]) + " (" + str(row[1]) + ")\n"
# cerco il valore minimo delle ultime 24h
cursore.execute("SELECT MIN(Temp), strftime('%d/%m %H:%M', Timestamp, 'localtime') from temperature where timestamp >= datetime('now','-1 day');")
righe = cursore.fetchall()
for row in righe:
messaggio = messaggio + "Minima: " + str(row[0]) + " (" + str(row[1]) + ")\n"
bot.sendMessage(id_chat, messaggio, parse_mode='Markdown', reply_markup=hide_keyboard)
except sqlite3.Error, e:
return "Error %s:" % e.args[0]
finally:
if connessione:
connessione.commit()
connessione.close()
# *******
# Voglio un report sul consumo di corrente
# *******
elif testo == "watt?":
# inizializzo la variabile di connessione
connessione = None
try:
# connessione al DB
connessione = sqlite3.connect('/[cartella]/[database].db')
# generazione del cursore
cursore = connessione.cursor()
# leggo l'ultimo valore memorizzato
cursore.execute("SELECT ID, strftime('%H:%M', Timestamp, 'localtime'), Luogo, Consumo FROM Corrente WHERE Luogo = 1 ORDER BY ID DESC LIMIT 1;")
# memorizzo tutti i valore recuperati
righe = cursore.fetchall()
# compongo il messaggio
for row in righe:
messaggio = "*Ultima lettura delle ore " + str(row[1]) + "*\nPotenza: " + str(row[3]) + " W\n\n"
# cerco il valore massimo delle ultime 24h
cursore.execute("SELECT MAX(Consumo), strftime('%d/%m %H:%M', Timestamp, 'localtime') from Corrente where timestamp >= datetime('now','-1 day');")
righe = cursore.fetchall()
for row in righe:
messaggio = messaggio + "*Nelle ultime 24 ore:*\n"
messaggio = messaggio + "Picco massimo: " + str(row[0]) + " W (" + str(row[1]) + ")\n"
# cerco il valore minimo delle ultime 24h
cursore.execute("SELECT MIN(Consumo), strftime('%d/%m %H:%M', Timestamp, 'localtime') from Corrente where timestamp >= datetime('now','-1 day');")
righe = cursore.fetchall()
for row in righe:
messaggio = messaggio + "Consumo minimo: " + str(row[0]) + " W (" + str(row[1]) + ")\n"
# cerco il valore media delle ultime 24h
cursore.execute("SELECT AVG(Consumo), strftime('%d/%m %H:%M', Timestamp, 'localtime') from Corrente where timestamp >= datetime('now','-1 day');")
righe = cursore.fetchall()
for row in righe:
messaggio = messaggio + "Consumo medio: " + str(int(row[0])) + " W\n"
bot.sendMessage(id_chat, messaggio, parse_mode='Markdown', reply_markup=hide_keyboard)
except sqlite3.Error, e:
return "Error %s:" % e.args[0]
finally:
if connessione:
connessione.commit()
connessione.close()
else:
messaggio = "Ciao " + nome_utente + ", per interagire con me scrivi '/ciao' e segui le istruzioni"
bot.sendMessage(id_chat, messaggio, reply_markup=hide_keyboard)
loggo(1, "Messaggio non riconosciuto")
# qui inizia il codice principale (non c'e' il 'main' in python)
# che viene eseguto solo all'avvio del sistema
# leggo il file di configurazione per recuparare tutti i parametri di funzionamento del sistema
# la cosa migliore sarebbe avere questo file in /etc/tvcc.conf per rispettare le convenzioni in linux
# io lo tengo nella cartella dove lavoro per questione di comodita'
Config = ConfigParser.ConfigParser()
Config.read("/home/pi/domotica_tucci/tvcc.conf")
# registro l'avvio del sistema
logga(0, "Sistema avviato")
# all'avvio del sistema notifico la cosa al gruppo, ma prima devo verificare
# che la connessione ad Internet sia operativa
# cerco di ottenere l'IP pubblico
# aggiunta la gestione delle eccezioni per l'avvio se la rete manca (27-01-2016)
connesso = False
while connesso == False:
try:
req = requests.get("http://httpbin.org/ip")
connesso = True
logga(0, "Internet c'e")
except Exception, err:
logga(3, "Manca Internet " + str(traceback.format_exc()))
time.sleep(30)
# ci provo fino a che la richiesta non mi da' messaggio http200
while req.status_code != 200:
time.sleep(30)
req = requests.get("http://httpbin.org/ip")
logga(2, "http status code: " + str(req.status_code))
# se la richiesta e' http 200, vuol dire che e' buona, quindi procedo
if req.status_code == 200:
# change the HTTP response body into a JSON type
text = json.loads(req.text)
# retreive value by key using dict
ip = text['origin']
logga(0, "IP pubblico: " + ip)
# ottengo l'IP della lan e della wlan
indirizzo_eth0 = os.popen("/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'").read()
indirizzo_eth0 = indirizzo_eth0.replace('\n', '')
#indirizzo_wlan0 = os.popen("/sbin/ifconfig wlan00 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'").read()
logga (0, "IP di LAN: " + indirizzo_eth0)
# creo il bot e mi collego a Telegram usando la mia chiave univoca
# che mi e' stata comunicata da The Botfather (si deve rispettare la famiglia)
# aggiunta la gestione delle eccezioni per capire che fare quando manca connessione o i server Telegram sono giu' (27/01/2016)
connessione_telegram = False
while connessione_telegram == False:
try:
bot = telepot.Bot(ConfigSectionMap("Sistema")['id_bot'])
utente = bot.getMe()
logga(0, "Connessione a Telegram avvenuta! ID utente del bot: " + str(utente['id']))
connessione_telegram = True
except Exception, err:
logga(3, "Connessione a Telegram fallita o caduta")
logga(3, traceback.format_exc())
time.sleep(30)
# mando i messaggi informativi alla chat
messaggio = "Ciao, sono stato appena riavviato, per sicurezza adesso avvio la videosorveglianza e ti mostro alcune informazioni\n"
messaggio = messaggio + "IP pubblico del tuo sistema: " + ip + "\n"
messaggio = messaggio + "IP di LAN: " + indirizzo_eth0
bot.sendMessage(ConfigSectionMap("Sistema")['id_chat'], messaggio)
# a questo punto avvio Motion solo se non e' indicato il parametro 'off' nella riga di comando
if len(sys.argv) >=2:
if sys.argv[1] == 'off':
messaggio = "Mi hai chiesto di non avviare la TVcc, quindi non l'ho fatto"
bot.sendMessage(ConfigSectionMap("Sistema")['id_chat'], messaggio)
logga(0, "motion non attivato per switch OFF a riga di comando")
else:
os.system("/home/pi/motion-mmal/motion")
logga(0, "Comando avvio motion al boot del sistema")
scrivi_semaforo()
# gli do il tempo di avviarsi
time.sleep(5)
# controllo se il processo 'motion' si e' attivato
motion_on = verifica_motion()
# se non si e' attivato lo segnalo
if motion_on == 0:
messaggio = "ATTENZIONE, il sistema TVcc non ha risposto al comando di attivazione!"
bot.sendMessage(ConfigSectionMap("Sistema")['id_chat'], messaggio)
logga(2, "Motion non e' partito")
# se invece si e' attivato lo notifico
else:
messaggio = "Confermo che il sistema TVcc e' adesso attivo"
bot.sendMessage(ConfigSectionMap("Sistema")['id_chat'], messaggio)
logga(0, "Motion e' partito al boot del sistema")
# controllo comandi in arrivo
try:
bot.notifyOnMessage(handle)
except Exception, err:
logga(3, "Connessione a Telegram fallita o caduta")
logga(3, traceback.format_exc())
# da qui in poi ci sono i comandi per tenere sotto controllo tutta la
# parte di domotica e sensoristica del sistema, con avviso in caso di problemi
while 1:
time.sleep(10)
|
cesco78/domotica-telegram
|
tvcc.py
|
Python
|
gpl-3.0
| 20,611
|
class GenericClassFactory(object):
"""
Factory for the registration and creation of classes
Usage:
# Create singleton factory object
example_factory = GenericClassFactory(YourAbstractClass)
def get_user_io_factory():
return example_factory;
example_factory.register_algorithm(id_class='example_id', class_obj=ChildOfYourAbstractClass)
"""
def __init__(self, abstract_class_type):
self._algorithms = {}
self._abstract_class_type = abstract_class_type
def register_algorithm(self, id_class, class_obj):
"""
Register an algorithm in the factory under the given ID
Algorithm has to be a subclass of self._abstract_class_type
:param id_class: ID for the algorithm
:type id_class: str
:param class_obj: the algorithm class
:type class_obj: class that inherits from self._abstract_class_type
:return:
"""
if not issubclass(class_obj, self._abstract_class_type):
assert ("Algo is not subclass of " + str(self._abstract_class_type))
if self._algorithms.has_key(id_class):
assert ("Algorithm ID already in use")
else:
self._algorithms[id_class] = class_obj
def create_algorithm(self, id_class, **kwargs):
"""
Initialize the algorithm with the given ID
:param id_class: the id of the impl that should be created
:type id_class str
:return: a specific instance of self._abstract_class_type
"""
if not self._algorithms.has_key(id_class):
raise LookupError("Cannot find class_id: " + id_class)
else:
return self._algorithms[id_class](**kwargs)
|
cehberlin/photobooth
|
utils.py
|
Python
|
lgpl-3.0
| 1,723
|
# portage: news management code
# Copyright 2006-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
__all__ = ["NewsManager", "NewsItem", "DisplayRestriction",
"DisplayProfileRestriction", "DisplayKeywordRestriction",
"DisplayInstalledRestriction",
"count_unread_news", "display_news_notifications"]
import io
import logging
import os as _os
import re
from portage import OrderedDict
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
from portage.const import NEWS_LIB_PATH
from portage.util import apply_secpass_permissions, ensure_dirs, \
grabfile, normalize_path, write_atomic, writemsg_level
from portage.data import portage_gid
from portage.dep import isvalidatom
from portage.localization import _
from portage.locks import lockfile, unlockfile
from portage.output import colorize
from portage.exception import InvalidLocation, OperationNotPermitted, \
PermissionDenied
class NewsManager(object):
"""
This object manages GLEP 42 style news items. It will cache news items
that have previously shown up and notify users when there are relevant news
items that apply to their packages that the user has not previously read.
Creating a news manager requires:
root - typically ${ROOT} see man make.conf and man emerge for details
news_path - path to news items; usually $REPODIR/metadata/news
unread_path - path to the news.repoid.unread file; this helps us track news items
"""
def __init__(self, portdb, vardb, news_path, unread_path, language_id='en'):
self.news_path = news_path
self.unread_path = unread_path
self.language_id = language_id
self.config = vardb.settings
self.vdb = vardb
self.portdb = portdb
# GLEP 42 says:
# All news item related files should be root owned and in the
# portage group with the group write (and, for directories,
# execute) bits set. News files should be world readable.
self._uid = int(self.config["PORTAGE_INST_UID"])
self._gid = portage_gid
self._file_mode = 0o0064
self._dir_mode = 0o0074
self._mode_mask = 0o0000
portdir = portdb.porttree_root
profiles_base = os.path.join(portdir, 'profiles') + os.path.sep
profile_path = None
if portdb.settings.profile_path:
profile_path = normalize_path(
os.path.realpath(portdb.settings.profile_path))
if profile_path.startswith(profiles_base):
profile_path = profile_path[len(profiles_base):]
self._profile_path = profile_path
def _unread_filename(self, repoid):
return os.path.join(self.unread_path, 'news-%s.unread' % repoid)
def _skip_filename(self, repoid):
return os.path.join(self.unread_path, 'news-%s.skip' % repoid)
def _news_dir(self, repoid):
repo_path = self.portdb.getRepositoryPath(repoid)
if repo_path is None:
raise AssertionError(_("Invalid repoID: %s") % repoid)
return os.path.join(repo_path, self.news_path)
def updateItems(self, repoid):
"""
Figure out which news items from NEWS_PATH are both unread and relevant to
the user (according to the GLEP 42 standards of relevancy). Then add these
items into the news.repoid.unread file.
"""
# Ensure that the unread path exists and is writable.
try:
ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
mode=self._dir_mode, mask=self._mode_mask)
except (OperationNotPermitted, PermissionDenied):
return
if not os.access(self.unread_path, os.W_OK):
return
news_dir = self._news_dir(repoid)
try:
news = _os.listdir(_unicode_encode(news_dir,
encoding=_encodings['fs'], errors='strict'))
except OSError:
return
skip_filename = self._skip_filename(repoid)
unread_filename = self._unread_filename(repoid)
unread_lock = lockfile(unread_filename, wantnewlockfile=1)
try:
try:
unread = set(grabfile(unread_filename))
unread_orig = unread.copy()
skip = set(grabfile(skip_filename))
skip_orig = skip.copy()
except PermissionDenied:
return
for itemid in news:
try:
itemid = _unicode_decode(itemid,
encoding=_encodings['fs'], errors='strict')
except UnicodeDecodeError:
itemid = _unicode_decode(itemid,
encoding=_encodings['fs'], errors='replace')
writemsg_level(
_("!!! Invalid encoding in news item name: '%s'\n") % \
itemid, level=logging.ERROR, noiselevel=-1)
continue
if itemid in skip:
continue
filename = os.path.join(news_dir, itemid,
itemid + "." + self.language_id + ".txt")
if not os.path.isfile(filename):
continue
item = NewsItem(filename, itemid)
if not item.isValid():
continue
if item.isRelevant(profile=self._profile_path,
config=self.config, vardb=self.vdb):
unread.add(item.name)
skip.add(item.name)
if unread != unread_orig:
write_atomic(unread_filename,
"".join("%s\n" % x for x in sorted(unread)))
apply_secpass_permissions(unread_filename,
uid=self._uid, gid=self._gid,
mode=self._file_mode, mask=self._mode_mask)
if skip != skip_orig:
write_atomic(skip_filename,
"".join("%s\n" % x for x in sorted(skip)))
apply_secpass_permissions(skip_filename,
uid=self._uid, gid=self._gid,
mode=self._file_mode, mask=self._mode_mask)
finally:
unlockfile(unread_lock)
def getUnreadItems(self, repoid, update=False):
"""
Determine if there are unread relevant items in news.repoid.unread.
If there are unread items return their number.
If update is specified, updateNewsItems( repoid ) will be called to
check for new items.
"""
if update:
self.updateItems(repoid)
unread_filename = self._unread_filename(repoid)
unread_lock = None
try:
unread_lock = lockfile(unread_filename, wantnewlockfile=1)
except (InvalidLocation, OperationNotPermitted, PermissionDenied):
pass
try:
try:
return len(grabfile(unread_filename))
except PermissionDenied:
return 0
finally:
if unread_lock:
unlockfile(unread_lock)
_formatRE = re.compile("News-Item-Format:\s*([^\s]*)\s*$")
_installedRE = re.compile("Display-If-Installed:(.*)\n")
_profileRE = re.compile("Display-If-Profile:(.*)\n")
_keywordRE = re.compile("Display-If-Keyword:(.*)\n")
class NewsItem(object):
"""
This class encapsulates a GLEP 42 style news item.
It's purpose is to wrap parsing of these news items such that portage can determine
whether a particular item is 'relevant' or not. This requires parsing the item
and determining 'relevancy restrictions'; these include "Display if Installed" or
"display if arch: x86" and so forth.
Creation of a news item involves passing in the path to the particular news item.
"""
def __init__(self, path, name):
"""
For a given news item we only want if it path is a file.
"""
self.path = path
self.name = name
self._parsed = False
self._valid = True
def isRelevant(self, vardb, config, profile):
"""
This function takes a dict of keyword arguments; one should pass in any
objects need to do to lookups (like what keywords we are on, what profile,
and a vardb so we can look at installed packages).
Each restriction will pluck out the items that are required for it to match
or raise a ValueError exception if the required object is not present.
Restrictions of the form Display-X are OR'd with like-restrictions;
otherwise restrictions are AND'd. any_match is the ORing and
all_match is the ANDing.
"""
if not self._parsed:
self.parse()
if not len(self.restrictions):
return True
kwargs = \
{ 'vardb' : vardb,
'config' : config,
'profile' : profile }
all_match = True
for values in self.restrictions.values():
any_match = False
for restriction in values:
if restriction.checkRestriction(**kwargs):
any_match = True
if not any_match:
all_match = False
return all_match
def isValid(self):
if not self._parsed:
self.parse()
return self._valid
def parse(self):
f = io.open(_unicode_encode(self.path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace')
lines = f.readlines()
f.close()
self.restrictions = {}
invalids = []
for i, line in enumerate(lines):
# Optimization to ignore regex matchines on lines that
# will never match
format_match = _formatRE.match(line)
if format_match is not None and format_match.group(1) != '1.0':
invalids.append((i + 1, line.rstrip('\n')))
break
if not line.startswith('D'):
continue
restricts = { _installedRE : DisplayInstalledRestriction,
_profileRE : DisplayProfileRestriction,
_keywordRE : DisplayKeywordRestriction }
for regex, restriction in restricts.items():
match = regex.match(line)
if match:
restrict = restriction(match.groups()[0].strip())
if not restrict.isValid():
invalids.append((i + 1, line.rstrip("\n")))
else:
self.restrictions.setdefault(
id(restriction), []).append(restrict)
continue
if invalids:
self._valid = False
msg = []
msg.append(_("Invalid news item: %s") % (self.path,))
for lineno, line in invalids:
msg.append(_(" line %d: %s") % (lineno, line))
writemsg_level("".join("!!! %s\n" % x for x in msg),
level=logging.ERROR, noiselevel=-1)
self._parsed = True
class DisplayRestriction(object):
"""
A base restriction object representing a restriction of display.
news items may have 'relevancy restrictions' preventing them from
being important. In this case we need a manner of figuring out if
a particular item is relevant or not. If any of it's restrictions
are met, then it is displayed
"""
def isValid(self):
return True
def checkRestriction(self, **kwargs):
raise NotImplementedError('Derived class should override this method')
class DisplayProfileRestriction(DisplayRestriction):
"""
A profile restriction where a particular item shall only be displayed
if the user is running a specific profile.
"""
def __init__(self, profile):
self.profile = profile
def checkRestriction(self, **kwargs):
if self.profile == kwargs['profile']:
return True
return False
class DisplayKeywordRestriction(DisplayRestriction):
"""
A keyword restriction where a particular item shall only be displayed
if the user is running a specific keyword.
"""
def __init__(self, keyword):
self.keyword = keyword
def checkRestriction(self, **kwargs):
if kwargs['config']['ARCH'] == self.keyword:
return True
return False
class DisplayInstalledRestriction(DisplayRestriction):
"""
An Installation restriction where a particular item shall only be displayed
if the user has that item installed.
"""
def __init__(self, atom):
self.atom = atom
def isValid(self):
return isvalidatom(self.atom)
def checkRestriction(self, **kwargs):
vdb = kwargs['vardb']
if vdb.match(self.atom):
return True
return False
def count_unread_news(portdb, vardb, repos=None, update=True):
"""
Returns a dictionary mapping repos to integer counts of unread news items.
By default, this will scan all repos and check for new items that have
appeared since the last scan.
@param portdb: a portage tree database
@type portdb: pordbapi
@param vardb: an installed package database
@type vardb: vardbapi
@param repos: names of repos to scan (None means to scan all available repos)
@type repos: list or None
@param update: check for new items (default is True)
@type update: boolean
@rtype: dict
@return: dictionary mapping repos to integer counts of unread news items
"""
NEWS_PATH = os.path.join("metadata", "news")
UNREAD_PATH = os.path.join(vardb.settings['EROOT'], NEWS_LIB_PATH, "news")
news_counts = OrderedDict()
if repos is None:
repos = portdb.getRepositories()
permission_msgs = set()
for repo in repos:
try:
manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
count = manager.getUnreadItems(repo, update=True)
except PermissionDenied as e:
# NOTE: The NewsManager typically handles permission errors by
# returning silently, so PermissionDenied won't necessarily be
# raised even if we do trigger a permission error above.
msg = _unicode_decode("Permission denied: '%s'\n") % (e,)
if msg in permission_msgs:
pass
else:
permission_msgs.add(msg)
writemsg_level(msg, level=logging.ERROR, noiselevel=-1)
news_counts[repo] = 0
else:
news_counts[repo] = count
return news_counts
def display_news_notifications(news_counts):
"""
Display a notification for unread news items, using a dictionary mapping
repos to integer counts, like that returned from count_unread_news().
"""
newsReaderDisplay = False
for repo, count in news_counts.items():
if count > 0:
if not newsReaderDisplay:
newsReaderDisplay = True
print()
print(colorize("WARN", " * IMPORTANT:"), end=' ')
print("%s news items need reading for repository '%s'." % (count, repo))
if newsReaderDisplay:
print(colorize("WARN", " *"), end=' ')
print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
print()
|
clickbeetle/portage-cb
|
pym/portage/news.py
|
Python
|
gpl-2.0
| 13,120
|
def zero():
print ""
def one():
print ""
# data function information
# funcs is """like""" a matrix of """functions"""
# def is like a function
funcs = {
'circle' : { 'area' : calc_circle_area, 'draw' : draw_circle_area},
'square' : one,
2 : two
}
# funcs is like "procedural generation"
# Work for September 20th, 2016
def make_student(name, age):
return {'name' : name, 'age' : age}.
# bryant = {'name' : 'Bryant', 'age' : 17}
# bruno = {'name' : 'Bruno', 'age' : 17}
bryant = make_student("Bryant", 17)
bruno = make_student("Bruno", 17)
# I didn't quite understand the purpose of dictionaries and the def function,
# but now I remember the ultimate goal of coding: save them lines, playa.
students = [bryant, bruno]
def calc_year_born(age):
return 1998
for s in students:
year_born = calc_year_born(s['age'])
print "%s was born in %d" % (s['name'], year_born )
# What kind of geometric crap did chris just do?
|
FlippantSol/CIS-121
|
file.py
|
Python
|
mit
| 1,005
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "style-src *"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_style-src_asterisk</title>
<link rel="author" title="Intel" href="http://www.intel.com/"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#style-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="style-src *"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
<link rel="stylesheet" type="text/css" href='""" + url1 + """/tests/csp/support/w3c/canvas-index.css'/>
<link rel="stylesheet" type="text/css" href="support/blue-100x100.css"/>
<style>
#test-green {
background-color: green;
}
</style>
</head>
<body>
<div id="log"></div>
<div id="test-blue"></div>
<div id="test-green"></div>
<h3>ext-css:""" + url1 + """/tests/csp/support/w3c/canvas-index.css</h3>
<script>
test(function() {
var div = document.querySelector("h3");
var fix = getComputedStyle(div)["display"];
assert_equals(fix, "inline", "style setted incorrectly");
}, document.title + "_allowed_ext");
test(function() {
var div = document.querySelector("#test-blue");
var fix = getComputedStyle(div)["backgroundColor"];
assert_equals(fix, "rgb(0, 0, 255)", "style setted incorrectly");
}, document.title + "_allowed_int");
test(function() {
var div = document.querySelector("#test-green");
var fix = getComputedStyle(div)["backgroundColor"];
assert_not_equals(fix, "rgb(0, 128, 0)", "style setted incorrectly");
}, document.title + "_blocked_inline");
</script>
</body>
</html> """
|
kaixinjxq/web-testing-service
|
wts/tests/csp/csp_style-src_asterisk.py
|
Python
|
bsd-3-clause
| 3,755
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C)
# 2004-2011: Pexego Sistemas Informáticos. (http://pexego.es)
# 2013: Top Consultant Software Creations S.L.
# (http://www.topconsultant.es/)
# 2014-2015: Serv. Tecnol. Avanzados - Pedro M. Baeza
# (http://www.serviciosbaeza.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Modelo 349 AEAT",
"version": "8.0.2.2.0",
"author": "Pexego, "
"Top Consultant, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Antiun Ingeniería S.L.,"
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": 'Localisation/Accounting',
'website': 'https://github.com/OCA/l10n-spain',
"depends": [
"account",
"account_invoice_currency",
"account_refund_original",
"l10n_es",
"l10n_es_aeat",
],
'data': [
"wizard/export_mod349_to_boe.xml",
"views/account_fiscal_position_view.xml",
"views/account_invoice_view.xml",
"views/mod349_view.xml",
"report/mod349_report.xml",
"security/ir.model.access.csv",
"security/mod_349_security.xml",
],
'post_init_hook': '_assign_invoice_operation_keys',
'installable': True,
}
|
diagramsoftware/l10n-spain
|
l10n_es_aeat_mod349/__openerp__.py
|
Python
|
agpl-3.0
| 2,121
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the multi-processing analysis process."""
import os
import time
import unittest
from plaso.analysis import interface as analysis_interface
from plaso.containers import sessions
from plaso.engine import configurations
from plaso.multi_process import analysis_process
from plaso.multi_process import plaso_queue
from plaso.multi_process import zeromq_queue
from tests import test_lib as shared_test_lib
from tests.multi_process import test_lib
class TestAnalysisPlugin(analysis_interface.AnalysisPlugin):
"""Analysis plugin for testing."""
NAME = 'test_plugin'
# pylint: disable=arguments-renamed
# pylint: disable=unused-argument
def CompileReport(self, mediator):
"""Compiles a report of the analysis.
After the plugin has received every copy of an event to
analyze this function will be called so that the report
can be assembled.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
"""
return
def ExamineEvent(self, mediator, event, event_data, event_data_stream):
"""Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
return
class AnalysisProcessTest(test_lib.MultiProcessingTestCase):
"""Tests the multi-processing analysis process."""
# pylint: disable=protected-access
_QUEUE_TIMEOUT = 5
def testInitialization(self):
"""Tests the initialization."""
with shared_test_lib.TempDirectory() as temp_directory:
configuration = configurations.ProcessingConfiguration()
configuration.task_storage_path = temp_directory
test_process = analysis_process.AnalysisProcess(
None, None, None, None, configuration, name='TestAnalysis')
self.assertIsNotNone(test_process)
def testGetStatus(self):
"""Tests the _GetStatus function."""
with shared_test_lib.TempDirectory() as temp_directory:
configuration = configurations.ProcessingConfiguration()
configuration.task_storage_path = temp_directory
test_process = analysis_process.AnalysisProcess(
None, None, None, None, configuration, name='TestAnalysis')
status_attributes = test_process._GetStatus()
self.assertIsNotNone(status_attributes)
self.assertEqual(status_attributes['identifier'], 'TestAnalysis')
self.assertIsNone(status_attributes['number_of_produced_reports'])
# TODO: add test with analysis mediator.
def testMain(self):
"""Tests the _Main function."""
output_event_queue = zeromq_queue.ZeroMQPushBindQueue(
name='test output event queue', timeout_seconds=self._QUEUE_TIMEOUT)
output_event_queue.Open()
input_event_queue = zeromq_queue.ZeroMQPullConnectQueue(
name='test input event queue', delay_open=True,
port=output_event_queue.port,
timeout_seconds=self._QUEUE_TIMEOUT)
session = sessions.Session()
analysis_plugin = TestAnalysisPlugin()
with shared_test_lib.TempDirectory() as temp_directory:
# Set up the processed for the task storage file generated by the
# analysis plugin.
os.mkdir(os.path.join(temp_directory, 'processed'))
configuration = configurations.ProcessingConfiguration()
configuration.task_storage_path = temp_directory
test_process = analysis_process.AnalysisProcess(
input_event_queue, None, session, analysis_plugin, configuration,
name='TestAnalysis')
test_process._FOREMAN_STATUS_WAIT = 1
test_process.start()
output_event_queue.PushItem(plaso_queue.QueueAbort(), block=False)
output_event_queue.Close(abort=True)
# Sleep for 1 second to allow the analysis process to terminate.
# Before the temporary directory is removed.
time.sleep(1)
# TODO: add test for _ProcessEvent.
def testSignalAbort(self):
"""Tests the SignalAbort function."""
with shared_test_lib.TempDirectory() as temp_directory:
configuration = configurations.ProcessingConfiguration()
configuration.task_storage_path = temp_directory
test_process = analysis_process.AnalysisProcess(
None, None, None, None, configuration, name='TestAnalysis')
test_process.SignalAbort()
if __name__ == '__main__':
unittest.main()
|
log2timeline/plaso
|
tests/multi_process/analysis_process.py
|
Python
|
apache-2.0
| 4,590
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
class sale_journal_invoice_type(osv.osv):
_name = 'sale_journal.invoice.type'
_description = 'Invoice Types'
_columns = {
'name': fields.char('Invoice Type', size=64, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the invoice type without removing it."),
'note': fields.text('Note'),
'invoicing_method': fields.selection([('simple', 'Non grouped'), ('grouped', 'Grouped')], 'Invoicing method', required=True),
}
_defaults = {
'active': True,
'invoicing_method': 'simple'
}
sale_journal_invoice_type()
#==============================================
# sale journal inherit
#==============================================
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_invoice_type': fields.property(
'sale_journal.invoice.type',
type = 'many2one',
relation = 'sale_journal.invoice.type',
string = "Invoicing Type",
view_load = True,
group_name = "Accounting Properties",
help = "This invoicing type will be used, by default, for invoicing the current partner."),
}
res_partner()
class picking(osv.osv):
_inherit = "stock.picking"
_columns = {
'invoice_type_id': fields.many2one('sale_journal.invoice.type', 'Invoice Type', readonly=True)
}
picking()
class sale(osv.osv):
_inherit = "sale.order"
_columns = {
'invoice_type_id': fields.many2one('sale_journal.invoice.type', 'Invoice Type')
}
def _prepare_order_picking(self, cr, uid, order, context=None):
result = super(sale,self)._prepare_order_picking(cr, uid, order, context=context)
result.update(invoice_type_id=order.invoice_type_id and order.invoice_type_id.id or False)
return result
def onchange_partner_id(self, cr, uid, ids, part):
result = super(sale, self).onchange_partner_id(cr, uid, ids, part)
if part:
itype = self.pool.get('res.partner').browse(cr, uid, part).property_invoice_type
if itype:
result['value']['invoice_type_id'] = itype.id
return result
sale()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
crmccreary/openerp_server
|
openerp/addons/sale_journal/sale_journal.py
|
Python
|
agpl-3.0
| 3,321
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ReplicationControllerSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, min_ready_seconds=None, replicas=None, selector=None, template=None):
"""
V1ReplicationControllerSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'min_ready_seconds': 'int',
'replicas': 'int',
'selector': 'dict(str, str)',
'template': 'V1PodTemplateSpec'
}
self.attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'replicas': 'replicas',
'selector': 'selector',
'template': 'template'
}
self._min_ready_seconds = min_ready_seconds
self._replicas = replicas
self._selector = selector
self._template = template
@property
def min_ready_seconds(self):
"""
Gets the min_ready_seconds of this V1ReplicationControllerSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:return: The min_ready_seconds of this V1ReplicationControllerSpec.
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""
Sets the min_ready_seconds of this V1ReplicationControllerSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:param min_ready_seconds: The min_ready_seconds of this V1ReplicationControllerSpec.
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def replicas(self):
"""
Gets the replicas of this V1ReplicationControllerSpec.
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
:return: The replicas of this V1ReplicationControllerSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1ReplicationControllerSpec.
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
:param replicas: The replicas of this V1ReplicationControllerSpec.
:type: int
"""
self._replicas = replicas
@property
def selector(self):
"""
Gets the selector of this V1ReplicationControllerSpec.
Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:return: The selector of this V1ReplicationControllerSpec.
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this V1ReplicationControllerSpec.
Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:param selector: The selector of this V1ReplicationControllerSpec.
:type: dict(str, str)
"""
self._selector = selector
@property
def template(self):
"""
Gets the template of this V1ReplicationControllerSpec.
Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
:return: The template of this V1ReplicationControllerSpec.
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""
Sets the template of this V1ReplicationControllerSpec.
Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
:param template: The template of this V1ReplicationControllerSpec.
:type: V1PodTemplateSpec
"""
self._template = template
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ReplicationControllerSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
djkonro/client-python
|
kubernetes/client/models/v1_replication_controller_spec.py
|
Python
|
apache-2.0
| 7,495
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "AnimTools",
"author": "Piotr Trochim <ptrochim@gmail.com>",
"version": (0, 1),
"blender": (2, 75),
"location": "Tool bar > Animation tab > AnimTools",
"description": "A set of animation tools",
"warning": "",
"category": "Animation",
}
if "bpy" in locals():
import importlib
if "extract_motion" in locals():
importlib.reload(extract_motion)
import bpy
from bpy.props import *
from . import extract_motion
#
# GUI
#
class ExtractMotionGUI(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "Animation"
bl_label = 'Animation Tools'
@classmethod
def poll(cls, context):
return (context.object is not None)
def draw_header(self, context):
layout = self.layout
obj = context.object
layout.prop(obj, "select", text="")
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator('anim.extract_motion_animtools', text="Extract Motion")
# << Register other animation filters here
##################################################
# Plugin registration
##################################################
def register():
bpy.utils.register_module(__name__)
pass
def unregister():
bpy.utils.unregister_module(__name__)
pass
if __name__ == "__main__":
register()
|
paksas/anim_tools
|
anim_tools/__init__.py
|
Python
|
gpl-2.0
| 2,208
|
#!/usr/bin/env python
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs-gui.
#
# tagfs-gui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs-gui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs-gui. If not, see <http://www.gnu.org/licenses/>.
#
from distutils.core import setup, Command
import sys
import os
from os.path import (
basename,
dirname,
abspath,
splitext,
join as pjoin
)
from glob import glob
from unittest import TestLoader, TextTestRunner
projectDir = dirname(abspath(__file__))
srcDir = pjoin(projectDir, 'src')
modulesDir = pjoin(srcDir, 'modules')
testDir = pjoin(srcDir, 'test')
def printEnv():
print "..using:"
print " modulesDir:", modulesDir
print " testDir:", testDir
print " sys.path:", sys.path
def setUpTagFsGuiSysPath():
sys.path.insert(0, modulesDir)
sys.path.insert(0, testDir)
class AbstractCommand(Command):
def initialize_options(self):
self._cwd = os.getcwd()
self._verbosity = 2
def finalize_options(self):
pass
class test(AbstractCommand):
description = 'run tests'
user_options = []
def run(self):
import re
testPyMatcher = re.compile('(.*/)?test[^/]*[.]py', re.IGNORECASE)
tests = [splitext(basename(f))[0] for f in glob(pjoin(testDir, '*.py')) if testPyMatcher.match(f)]
setUpTagFsGuiSysPath()
printEnv()
print " tests:", tests
# configure logging
# TODO not sure how to enable this... it's a bit complicate to enable
# logging only for 'make mt' and disable it then for
# 'python setup.py test'. 'python setup.py test' is such a gabber...
#if 'DEBUG' in os.environ:
# from tagfs import log_config
# log_config.setUpLogging()
suite = TestLoader().loadTestsFromNames(tests)
TextTestRunner(verbosity = self._verbosity).run(suite)
class DemoEditGui(AbstractCommand):
description = 'launch gtagfs-edit demo'
user_options = []
def run(self):
setUpTagFsGuiSysPath()
printEnv()
from tagfs_gui import edit
edit.main([sys.argv[0], pjoin('etc', 'demo', '2008-12-25 - holiday india')])
setup(
cmdclass = {
'test': test,
'demo_gtagfs_edit': DemoEditGui
},
name = 'tagfs-gui',
version = '0.0.1',
description = 'GUI framework for tagfs tag editors.',
long_description = '',
author = 'Markus Pielmeier',
author_email = 'markus.pielmeier@googlemail.com',
license = 'GPLv3',
requires = [],
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python'
],
data_files = [
(pjoin('share', 'tagfs-gui'), ['AUTHORS', 'README', 'COPYING', pjoin('src', 'glade', 'tagEditDialog.glade')])
],
scripts = [pjoin('src', 'bin', 'gtagfs-edit')],
packages = ['tagfs_gui'],
package_dir = {'': pjoin('src', 'modules')}
)
|
marook/tagfs-gui
|
setup.py
|
Python
|
gpl-3.0
| 3,636
|
#! /usr/bin/env python
# Copyright (C) 2014 Aldebaran
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# io_export_visual.py
# Authors: Mikael Arguedas [mikael.arguedas@gmail.com]
#
# This script parses a blender file from Aldebaran robotics, export all the materials of the scene,
# It also export all the meshes to COLLADA files (.dae)
from __future__ import print_function
import bpy
import os
import sys
argv = sys.argv
argv = argv[argv.index("--") + 1:] # get all args after "--"
# Initialize the blacklist:
# If you don't want to export some meshes, either add their name to this list
# of suffix them with a keyword and add the keyword to this list
blacklist = ["skip"]
# Get the folder where the meshes will be saved
mesh_dir = argv[0]
name = argv[1]
# Keep a copy of user selection
bpy.ops.object.select_by_type(type="MESH")
sel_obs = bpy.context.selected_objects[:]
# Export all the materials from the scene
bpy.ops.ogre.export(filepath=os.path.join(mesh_dir, name + '.scene'),
EX_COPY_SHADER_PROGRAMS=False,
EX_MATERIALS = True,
EX_SEP_MATS=False,
EX_SWAP_AXIS='xyz',
EX_MESH_OVERWRITE=False,
EX_MESH=False,
EX_SELONLY=False,
EX_SCENE=False,
EX_EXPORT_HIDDEN=False,
EX_FORCE_CAMERA=False,
EX_FORCE_LAMPS=False,
EX_ARM_ANIM=False,
EX_SHAPE_ANIM=False,
EX_ARRAY=False,
EX_optimiseAnimations=False)
# Browse the object list and export all the meshes as COLLADA files(.dae)
for ob in bpy.data.objects:
skip_mesh = False
for keyword in blacklist:
if ob.name.find(keyword) != -1:
skip_mesh = True
# Skip non-mesh objects or all children meshes we don't want to export
if ob.type != 'MESH' or skip_mesh == True:
continue
# Clear selection
bpy.ops.object.select_all(action="DESELECT")
# Select single object
ob.hide = False
ob.select = True
# Export single object to COLLADA file (.dae)
bpy.context.scene.collada_export(filepath=os.path.join(mesh_dir, ob.name + ".dae"), selected = True, include_uv_textures=True, include_children=True)
# Restore user selection
bpy.ops.object.select_all(action="DESELECT")
for ob in sel_obs:
ob.select = True
bpy.context.scene.objects.active = ob
print("%s meshes exported." % len(sel_obs))
bpy.ops.wm.quit_blender()
|
LCAS/spqrel_tools
|
ros_ws/src/naoqi_bridge/naoqi_tools/scripts/blender/io_export_visual.py
|
Python
|
mit
| 2,743
|
# create files for chart-02-X
# with these choices
# metric in median-root-median-squared-errors
# model in q50 (quantile-50)
# ndays in 30 60 ... 360
# predictors in act actlog ct ctlog
# responses in price logprice
# usetax in yes no
# year in 2008
# invocations and files created
# python chart-02X.py makefile -> src/chart-02X.makefile
# python chart-02X.py data -> data/working/chart-02X.data
# python chart-02X.py txt -> data/working/chart-02X.txt
# python chart-02X.py txtY -> data/working/chart-02X-Y.txt
import sys
from Bunch import Bunch
from chart_02_template import chart
def main():
specs = Bunch(metric='median-of-root-median-squared-errors',
title='Median of Root Median Squared Errors',
model='huber100', # huber loss with eps = 100000
training_periods=['30', '60', '90', '120', '150', '180',
'210', '240', '270', '300', '330', '360'],
feature_sets=['act', 'actlog', 'ct', 'ctlog'],
responses=['price', 'logprice'],
usetax=['yes', 'no'],
year='2008')
chart(specs=specs,
argv=sys.argv)
if __name__ == '__main__':
main()
|
rlowrance/re-local-linear
|
chart-02-huber100-2008-act-ct-median-median.py
|
Python
|
mit
| 1,276
|
###
# Copyright (c) 2015, Andrew Phillips
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('TheCatApi')
except ImportError:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x: x
class TheCatApi(callbacks.Plugin):
"""Provides access to http://thecatapi.com/"""
pass
threaded = True
idRe = re.compile('^[a-zA-Z0-9]+$')
xmlCatIdRe = re.compile('<id>([a-zA-Z0-9]+)</id>')
xmlCatUrlRe = re.compile('<source_url>(.+)</source_url>')
def _fetchXml(self, cmd, args):
urlBase = self.registryValue('urlBase')
api = self.registryValue('apiId')
url = urlBase + cmd + '?format=xml'
if api:
url = url + '&api=%s' % api
if len(args) != 0:
url = url + '&' + '&'.join(args)
#return url
return utils.web.getUrl(url)
def _formatCat(self, result):
catId = 'Unknown'
catIdMatches = self.xmlCatIdRe.search(result)
if catIdMatches:
catId = catIdMatches.group(1)
catUrl = 'Unknown'
catUrlMatches = self.xmlCatUrlRe.search(result)
if catUrlMatches:
catUrl = catUrlMatches.group(1)
return 'Cat "%s": %s' % (catId, catUrl)
def cat(self, irc, msg, args, catId):
"""[id]
Returns cat matching [id] or a random cat.
"""
result = ''
if catId is '':
result = self._fetchXml("/images/get", [])
else:
if self.idRe.match():
result = self._fetchXml("/images/get", ['image_id=' + catId])
irc.reply(self._formatCat(result))
cat = wrap(cat, [additional(('somethingWithoutSpaces', 'cat id'), '')])
def catgif(self, irc, msg, args):
"""
Returns a cat with a type of gif
"""
result = self._fetchXml("/images/get", ['type=gif'])
irc.reply(self._formatCat(result))
catgif = wrap(catgif)
Class = TheCatApi
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
theasp/supybot-thecatapi
|
TheCatApi/plugin.py
|
Python
|
gpl-2.0
| 3,844
|
#!/usr/bin/python3
from tkinter import *
from tkinter.messagebox import *
fenetre = Tk()
fenetre.title("The Enigma Machine")
fenetre.configure(background='white')
fenetre.geometry("550x800")
class AutoScrollbar(Scrollbar):
#create a 'responsive' scrollbar
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
self.tk.call("grid", "remove", self)
else:
self.grid()
Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise(TclError, "can't pack this widget")
def place(self, **kw):
raise(TclError, "can't grid this widget")
vscrollbar = AutoScrollbar(fenetre)
vscrollbar.grid(row=0, column=1, sticky=N+S)
hscrollbar = AutoScrollbar(fenetre, orient=HORIZONTAL)
hscrollbar.grid(row=1, column=0, sticky=E+W)
canvas = Canvas(fenetre,
yscrollcommand=vscrollbar.set,
xscrollcommand=hscrollbar.set)
canvas.grid(row=0, column=0, sticky=N+S+E+W)
vscrollbar.config(command=canvas.yview)
hscrollbar.config(command=canvas.xview)
# make the canvas expandable
fenetre.grid_rowconfigure(0, weight=1)
fenetre.grid_columnconfigure(0, weight=1)
#
# create canvas contents
frame = Frame(canvas, background="white", borderwidth=0)
#enigma_image
image = PhotoImage(file="enigma.gif")
Label(frame, image=image, background="white", borderwidth=0).pack(padx=10, pady=10, side=TOP)
#help_button
def help():
showinfo("The Enigma Machine Quick Start", "Hello World!\nThis is a quick tutorial on how to use this app!\nFirst, you need to choose the order of the rotors.\nThen you need to set the rotors' position\nYou can finally write your message and encrypt it by pressing the Return key!\nThat's it, you've just encrypt your first enigma message!\n Have fun!")
helpButton = Button(frame, text ="Help! Quick Start", command = help, background="white")
helpButton.pack(padx=5, pady=5)
#spinboxes_choose_rotors
frameRotor = Frame(frame, background='white')
var4 = StringVar()
spinbox = Spinbox(frameRotor, values = ("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]",
"rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]",
"rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]"), textvariable=var4, width=44)
var4.set("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]")
spinbox.grid(row=0, column=1)
var5 = StringVar()
spinbox = Spinbox(frameRotor, values = ("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]",
"rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]",
"rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]"), textvariable=var5, width=44)
var5.set("rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]")
spinbox.grid(row=1, column=1)
var6 = StringVar()
spinbox = Spinbox(frameRotor, values = ("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]",
"rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]",
"rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]"), textvariable=var6, width=44)
var6.set("rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]")
spinbox.grid(row=2, column=1)
var7 = StringVar()
spinbox = Spinbox(frameRotor, values = ("reflec=[Y,R,U,H,Q,S,L,D,P,X,N,G,O,K,M,I,E,B,F,Z,C,W,V,J,A,T]"), textvariable=var7, width=44)
var7.set("reflec=[Y,R,U,H,Q,S,L,D,P,X,N,G,O,K,M,I,E,B,F,Z,C,W,V,J,A,T]")
spinbox.grid(row=3, column=1)
rotorn1 = Label(frameRotor, text='Slot n°=1:', padx=10, pady=5, background="white")
rotorn1.grid(row=0, column=0)
rotorn2 = Label(frameRotor, text='Slot n°=2:', padx=10, pady=5, background="white")
rotorn2.grid(row=1, column=0)
rotorn3 = Label(frameRotor, text='Slot n°=3:', padx=10, pady=5, background="white")
rotorn3.grid(row=2, column=0)
reflectorn = Label(frameRotor, text='Reflector:', padx=10, pady=5, background="white")
reflectorn.grid(row=3, column=0)
frameRotor.pack()
#frame_to_set_rotor_position
frame1 = Frame(frame, borderwidth=0, relief=FLAT, background='white')
frame1.pack(side=TOP, padx=10, pady=10)
def update1(x):
x = int(x)
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
lab1.configure(text='position : {}'.format(alphabetList[x-1]))
def update2(x):
x = int(x)
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
lab2.configure(text='position : {}'.format(alphabetList[x-1]))
def update3(x):
x = int(x)
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
lab3.configure(text='position : {}'.format(alphabetList[x-1]))
rotor1lab = Label(frame1, text='Rotor 1', padx=10, pady=5, background="white")
rotor1lab.grid(row=0, column=0)
rotor2lab = Label(frame1, text='Rotor 2', padx=10, pady=5, background="white")
rotor2lab.grid(row=0, column=1)
rotor3lab = Label(frame1, text='Rotor 3', padx=10, pady=5, background="white")
rotor3lab.grid(row=0, column=2)
#scales_choose_position
var1 = DoubleVar()
scale = Scale(frame1, from_=1, to=26, variable = var1, cursor='dot', showvalue=0, command=update1, length= 100, background="white")
scale.grid(row=1, column=0, padx=60, pady=10)
var2 = DoubleVar()
scale = Scale(frame1, from_=1, to=26, variable = var2, cursor='dot', showvalue=0, command=update2, length= 100, background="white")
scale.grid(row=1, column=1, padx=60, pady=10)
var3 = DoubleVar()
scale = Scale(frame1, from_=1, to=26, variable = var3, cursor='dot', showvalue=0, command=update3, length= 100, background="white")
scale.grid(row=1, column=2, padx=60, pady=10)
lab1 = Label(frame1, background="white")
lab1.grid(row=2, column=0)
lab2 = Label(frame1, background="white")
lab2.grid(row=2, column=1)
lab3 = Label(frame1, background="white")
lab3.grid(row=2, column=2)
#function_code
def code(event=None):
a = int(var1.get())
b = int(var2.get())
c = int(var3.get())
def rotationRotor(liste1):
liste1.append(liste1[0])
del liste1[0]
return liste1
def estValide(liste1):
if liste1 == []:
return False
for elt in liste1:
if alphabetList.count(elt.upper()) < 1:
return False
return True
sortie = entryvar.get()
var4str = var4.get()
var4list = list(var4str)
var5str = var5.get()
var5list = list(var5str)
var6str = var6.get()
var6list = list(var6str)
if var4list[5] == '1':
rotor1 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
elif var4list[5] == '2':
rotor1 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
elif var4list[5] == '3':
rotor1 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
if var5list[5] == '1':
rotor2 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
elif var5list[5] == '2':
rotor2 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
elif var5list[5] == '3':
rotor2 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
if var6list[5] == '1':
rotor3 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
elif var6list[5] == '2':
rotor3 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
elif var6list[5] == '3':
rotor3 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',' ']
alphabetDict = {'G': 7, 'U': 21, 'T': 20, 'L': 12, 'Y': 25, 'Q': 17, 'V': 22, 'J': 10, 'O': 15, 'W': 23, 'N': 14, 'R': 18, 'Z': 26, 'S': 19, 'X': 24, 'A': 1, 'M': 13, 'E': 5, 'D': 4, 'I': 9, 'F': 6, 'P': 16, 'B': 2, 'H': 8, 'K': 11, 'C': 3}
#rotor1 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
#rotor2 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
#rotor3 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
reflector = ['Y', 'R', 'U', 'H', 'Q', 'S', 'L', 'D', 'P', 'X', 'N', 'G', 'O', 'K', 'M', 'I', 'E', 'B', 'F', 'Z', 'C', 'W', 'V', 'J', 'A', 'T']
for loop1 in range(a):
rotationRotor(rotor1)
for loop2 in range(b):
rotationRotor(rotor2)
for loop3 in range(c):
rotationRotor(rotor3)
sortieListe = list(sortie)
print(sortieListe)
sortieListe = [x for x in sortieListe if x != " "]
print(sortieListe)
if not estValide(sortieListe):
value = StringVar()
value.set('Please enter only letters and spaces!')
liste.insert(END, value.get())
liste.itemconfig(END, {'bg':'red'})
liste.see("end")
elif (var4list[5] == var5list[5] == var6list[5]) or (var4list[5] == var5list[5]) or (var4list[5] == var6list[5]) or (var5list[5] == var6list[5]):
value = StringVar()
value.set('You can only use a rotor once!')
liste.insert(END, value.get())
liste.itemconfig(END, {'bg':'red'})
liste.see("end")
else:
s = []
for i in range(0,len(sortieListe),1):
a = alphabetDict[sortieListe[i].upper()]
b = rotor1[a-1]
c = alphabetDict[b]
d = rotor2[c-1]
e = alphabetDict[d]
f = rotor3[e-1]
g = alphabetDict[f]
h = reflector[g-1]
j = rotor3.index(h)
k = alphabetList[j]
l = rotor2.index(k)
m = alphabetList[l]
n = rotor1.index(m)
o = alphabetList[n]
s.append(o)
if (i+1) % 1 == 0:
rotationRotor(rotor1)
if (i+1) % 26 == 0:
rotationRotor(rotor2)
if (i+1) % 676 == 0:
rotationRotor(rotor3)
value = StringVar()
value.set(''.join(s))
liste.insert(END, value.get())
liste.see("end")
#text_entry
entryvar = StringVar()
entry = Entry(frame, textvariable = entryvar, width=50, background="white")
entry.focus_set()
entry.bind("<Return>", code)
entry.pack(padx=10, pady=10)
#clear_listbox
def clear():
liste.delete(0, END)
#button_to_(de)code
b1 = Button(frame, text="(de)code", width=10, command=code, background="white")
b1.pack()
#store_results
f1 = Frame(frame)
s1 = Scrollbar(f1)
liste = Listbox(f1, height=5, width=50, borderwidth=0, background='white')
s1.config(command = liste.yview)
liste.config(yscrollcommand = s1.set)
liste.pack(side = LEFT, fill = Y, padx=5, pady=5)
s1.pack(side = RIGHT, fill = Y)
f1.pack()
#button_to_clear_listbox
b2 = Button(frame, text="clear list", width=10, command=clear, background='white')
b2.pack(padx=5, pady=5)
#credits
credits = Label(frame, text = "coded with <3 by omnitrogen",background="white")
credits.pack(side = BOTTOM, padx=10, pady=10)
#quit_button
quitButton = Button(frame, text="quit", width=10, command=frame.quit, background='white')
quitButton.pack(side = BOTTOM)
canvas.create_window(0, 0, anchor=NW, window=frame)
frame.update_idletasks()
canvas.config(scrollregion=canvas.bbox("all"))
mainloop()
|
omnitrogen/enigma
|
gui/enigma-gui-resizable-window.py
|
Python
|
mit
| 11,660
|
#!/usr/bin/python
'''
Created on 2016-05-17
@author: jasonszang
'''
import numpy as np
from deeplearning.nlp.lm.lm_util import *
from deeplearning.util.onehot import VocabularyDict
TEXT = 'when there is a will there is a way .\nknowledge is power .'
ORDER = 5
def test_preprocess_text_ngram():
vocabulary = VocabularyDict()
vocabulary.set_prepend(['<S>'] * (ORDER - 1))
vocabulary.set_append(['</S>'])
print preprocess_text_ngram(TEXT, ORDER, vocabulary, True)
def test_categorical_generator_train():
#X = np.array([[1,1],[2,2],[3,3],[4,4],[5,5],[6,6]])
#y = np.array([1,2,3,4,5,6])
vocabulary = VocabularyDict()
vocabulary.set_prepend(['<S>'] * (ORDER - 1))
vocabulary.set_append(['</S>'])
X, y = preprocess_text_ngram(TEXT, ORDER, vocabulary, True)
max_index = vocabulary.onehot_dict.max_index + 1
gen = get_categorical_generator_train(X, y, max_index, 4, 2)
for i in range(10):
print gen.next()
print '---'
def test_categorical_generator_eval():
vocabulary = VocabularyDict()
vocabulary.set_prepend(['<S>'] * (ORDER - 1))
vocabulary.set_append(['</S>'])
X, y = preprocess_text_ngram(TEXT, ORDER, vocabulary, True)
max_index = vocabulary.onehot_dict.max_index + 1
gen = get_categorical_generator_eval(X, y, max_index, 5)
for i in range(10):
print gen.next()
print '---'
if __name__ == '__main__':
#test_preprocess_text_ngram()
#test_categorical_generator_train()
test_categorical_generator_eval()
|
jasonszang/DeepLearning
|
tests/test_lm_util.py
|
Python
|
apache-2.0
| 1,529
|
from quicktions import Fraction
from . import (
_update,
deprecated,
enumerate,
format,
get,
illustrators,
io,
iterate,
iterpitches,
lyconst,
lyenv,
makers,
mutate,
persist,
string,
wf,
)
from ._version import __version__, __version_info__
from .bind import Wrapper, annotate, attach, detach
from .bundle import LilyPondFormatBundle, SlotContributions
from .configuration import (
Configuration,
list_all_classes,
list_all_functions,
yield_all_modules,
)
from .contextmanagers import (
ContextManager,
FilesystemState,
ForbidUpdate,
NullContextManager,
ProgressIndicator,
RedirectedStreams,
TemporaryDirectory,
TemporaryDirectoryChange,
Timer,
)
from .cyclictuple import CyclicTuple
from .duration import Duration, Multiplier, NonreducedFraction, Offset
from .dynamic import Dynamic
from .enums import (
Center,
Comparison,
Down,
Exact,
HorizontalAlignment,
Left,
Less,
Middle,
More,
Right,
Up,
VerticalAlignment,
)
from .exceptions import (
AssignabilityError,
ImpreciseMetronomeMarkError,
LilyPondParserError,
MissingMetronomeMarkError,
ParentageError,
PersistentIndicatorError,
SchemeParserFinishedError,
UnboundedTimeIntervalError,
WellformednessError,
)
from .format import lilypond
from .get import Lineage
from .illustrators import illustrate
from .indicators import (
Arpeggio,
Articulation,
BarLine,
BeamCount,
BendAfter,
BreathMark,
Clef,
ColorFingering,
Fermata,
Glissando,
KeyCluster,
KeySignature,
LaissezVibrer,
MarginMarkup,
MetronomeMark,
Mode,
Ottava,
RehearsalMark,
Repeat,
RepeatTie,
StaffChange,
StaffPosition,
StartBeam,
StartGroup,
StartHairpin,
StartMarkup,
StartPhrasingSlur,
StartPianoPedal,
StartSlur,
StartTextSpan,
StartTrillSpan,
StemTremolo,
StopBeam,
StopGroup,
StopHairpin,
StopPhrasingSlur,
StopPianoPedal,
StopSlur,
StopTextSpan,
StopTrillSpan,
Tie,
TimeSignature,
)
from .instruments import (
Accordion,
AltoFlute,
AltoSaxophone,
AltoTrombone,
AltoVoice,
BaritoneSaxophone,
BaritoneVoice,
BassClarinet,
BassFlute,
BassSaxophone,
BassTrombone,
BassVoice,
Bassoon,
Cello,
ClarinetInA,
ClarinetInBFlat,
ClarinetInEFlat,
Contrabass,
ContrabassClarinet,
ContrabassFlute,
ContrabassSaxophone,
Contrabassoon,
EnglishHorn,
Flute,
FrenchHorn,
Glockenspiel,
Guitar,
Harp,
Harpsichord,
Instrument,
Marimba,
MezzoSopranoVoice,
Oboe,
Percussion,
Piano,
Piccolo,
SopraninoSaxophone,
SopranoSaxophone,
SopranoVoice,
StringNumber,
TenorSaxophone,
TenorTrombone,
TenorVoice,
Trumpet,
Tuba,
Tuning,
Vibraphone,
Viola,
Violin,
Xylophone,
)
from .io import graph, show
from .label import ColorMap
from .lilypondfile import Block, LilyPondFile
from .lyproxy import (
LilyPondContext,
LilyPondEngraver,
LilyPondGrob,
LilyPondGrobInterface,
)
from .makers import LeafMaker, NoteMaker
from .markups import Markup
from .math import Infinity, NegativeInfinity
from .meter import Meter, MeterList, MetricAccentKernel
from .metricmodulation import MetricModulation
from .obgc import OnBeatGraceContainer, on_beat_grace_container
from .overrides import (
IndexedTweakManager,
IndexedTweakManagers,
Interface,
LilyPondLiteral,
LilyPondOverride,
LilyPondSetting,
OverrideInterface,
SettingInterface,
TweakInterface,
override,
setting,
tweak,
)
from .parentage import Parentage
from .parsers import parser
from .parsers.base import Parser
from .parsers.parse import parse
from .pattern import Pattern, PatternTuple
from .pcollections import (
IntervalClassSegment,
IntervalClassSet,
IntervalSegment,
IntervalSet,
PitchClassSegment,
PitchClassSet,
PitchRange,
PitchSegment,
PitchSet,
Segment,
Set,
TwelveToneRow,
)
from .pitch import (
Accidental,
Interval,
IntervalClass,
NamedInterval,
NamedIntervalClass,
NamedInversionEquivalentIntervalClass,
NamedPitch,
NamedPitchClass,
NumberedInterval,
NumberedIntervalClass,
NumberedInversionEquivalentIntervalClass,
NumberedPitch,
NumberedPitchClass,
Octave,
Pitch,
PitchClass,
PitchTyping,
)
from .ratio import NonreducedRatio, Ratio
from .score import (
AfterGraceContainer,
BeforeGraceContainer,
Chord,
Cluster,
Component,
Container,
Context,
DrumNoteHead,
Leaf,
MultimeasureRest,
Note,
NoteHead,
NoteHeadList,
Rest,
Score,
Skip,
Staff,
StaffGroup,
TremoloContainer,
Tuplet,
Voice,
)
from .select import LogicalTie, Selection
from .setclass import SetClass
from .spanners import (
beam,
glissando,
hairpin,
horizontal_bracket,
ottava,
phrasing_slur,
piano_pedal,
slur,
text_spanner,
tie,
trill_spanner,
)
from .tag import Line, Tag, activate, deactivate
from .timespan import OffsetCounter, Timespan, TimespanList
from .typedcollections import TypedCollection, TypedFrozenset, TypedList, TypedTuple
from .typings import (
DurationSequenceTyping,
DurationTyping,
IntegerPair,
IntegerSequence,
Number,
NumberPair,
PatternTyping,
Prototype,
RatioSequenceTyping,
RatioTyping,
Strings,
)
from .verticalmoment import (
VerticalMoment,
iterate_leaf_pairs,
iterate_pitch_pairs,
iterate_vertical_moments,
)
index = Pattern.index
index_all = Pattern.index_all
index_first = Pattern.index_first
index_last = Pattern.index_last
__all__ = [
"Accidental",
"Accordion",
"AfterGraceContainer",
"AltoFlute",
"AltoSaxophone",
"AltoTrombone",
"AltoVoice",
"Arpeggio",
"Articulation",
"AssignabilityError",
"BarLine",
"BaritoneSaxophone",
"BaritoneVoice",
"BassClarinet",
"BassFlute",
"BassSaxophone",
"BassTrombone",
"BassVoice",
"Bassoon",
"BeamCount",
"BeforeGraceContainer",
"BendAfter",
"Block",
"BreathMark",
"Cello",
"Center",
"Chord",
"ClarinetInA",
"ClarinetInBFlat",
"ClarinetInEFlat",
"Clef",
"Cluster",
"ColorFingering",
"ColorMap",
"Comparison",
"Component",
"Configuration",
"Container",
"Context",
"ContextManager",
"Contrabass",
"ContrabassClarinet",
"ContrabassFlute",
"ContrabassSaxophone",
"Contrabassoon",
"CyclicTuple",
"Down",
"DrumNoteHead",
"Duration",
"DurationSequenceTyping",
"DurationTyping",
"Dynamic",
"EnglishHorn",
"Exact",
"Expression",
"Fermata",
"FilesystemState",
"Flute",
"ForbidUpdate",
"Fraction",
"FrenchHorn",
"Glissando",
"Glockenspiel",
"Guitar",
"Harp",
"Harpsichord",
"HorizontalAlignment",
"ImpreciseMetronomeMarkError",
"IndexedTweakManager",
"IndexedTweakManagers",
"Infinity",
"Instrument",
"IntegerPair",
"IntegerSequence",
"Interface",
"Interval",
"IntervalClass",
"IntervalClassSegment",
"IntervalClassSet",
"IntervalSegment",
"IntervalSet",
"KeyCluster",
"KeySignature",
"LaissezVibrer",
"Leaf",
"LeafMaker",
"Left",
"Less",
"LilyPondContext",
"LilyPondEngraver",
"LilyPondFile",
"LilyPondFormatBundle",
"LilyPondGrob",
"LilyPondGrobInterface",
"LilyPondLiteral",
"LilyPondOverride",
"LilyPondParserError",
"LilyPondSetting",
"Line",
"Lineage",
"LogicalTie",
"MarginMarkup",
"Marimba",
"Markup",
"Meter",
"MeterList",
"MetricAccentKernel",
"MetricModulation",
"MetronomeMark",
"MezzoSopranoVoice",
"Middle",
"MissingMetronomeMarkError",
"Mode",
"More",
"MultimeasureRest",
"Multiplier",
"NamedInterval",
"NamedIntervalClass",
"NamedInversionEquivalentIntervalClass",
"NamedPitch",
"NamedPitchClass",
"NegativeInfinity",
"NonreducedFraction",
"NonreducedRatio",
"Note",
"NoteHead",
"NoteHeadList",
"NoteMaker",
"NullContextManager",
"Number",
"NumberPair",
"NumberedInterval",
"NumberedIntervalClass",
"NumberedInversionEquivalentIntervalClass",
"NumberedPitch",
"NumberedPitchClass",
"Oboe",
"Octave",
"Offset",
"OffsetCounter",
"OnBeatGraceContainer",
"Ottava",
"OverrideInterface",
"Parentage",
"ParentageError",
"Parser",
"Pattern",
"PatternTuple",
"PatternTyping",
"Percussion",
"PersistentIndicatorError",
"Piano",
"Piccolo",
"Pitch",
"PitchClass",
"PitchClassSegment",
"PitchClassSet",
"PitchRange",
"PitchSegment",
"PitchSet",
"PitchTyping",
"ProgressIndicator",
"Prototype",
"Ratio",
"RatioSequenceTyping",
"RatioTyping",
"RedirectedStreams",
"RehearsalMark",
"Repeat",
"RepeatTie",
"Rest",
"Right",
"SchemeParserFinishedError",
"Score",
"Segment",
"Selection",
"Set",
"SetClass",
"SettingInterface",
"Skip",
"SlotContributions",
"SopraninoSaxophone",
"SopranoSaxophone",
"SopranoVoice",
"Staff",
"StaffChange",
"StaffGroup",
"StaffPosition",
"StartBeam",
"StartGroup",
"StartHairpin",
"StartMarkup",
"StartPhrasingSlur",
"StartPianoPedal",
"StartSlur",
"StartTextSpan",
"StartTrillSpan",
"StemTremolo",
"StopBeam",
"StopGroup",
"StopHairpin",
"StopPhrasingSlur",
"StopPianoPedal",
"StopSlur",
"StopTextSpan",
"StopTrillSpan",
"StringNumber",
"Strings",
"Tag",
"TemporaryDirectory",
"TemporaryDirectoryChange",
"TenorSaxophone",
"TenorTrombone",
"TenorVoice",
"Tie",
"TimeSignature",
"Timer",
"Timespan",
"TimespanList",
"TremoloContainer",
"Trumpet",
"Tuba",
"Tuning",
"Tuplet",
"TweakInterface",
"TwelveToneRow",
"TypedCollection",
"TypedFrozenset",
"TypedList",
"TypedTuple",
"UnboundedTimeIntervalError",
"Up",
"VerticalAlignment",
"VerticalMoment",
"Vibraphone",
"Viola",
"Violin",
"Voice",
"WellformednessError",
"Wrapper",
"Xylophone",
"__version__",
"__version_info__",
"_update",
"activate",
"annotate",
"attach",
"beam",
"deactivate",
"deprecated",
"detach",
"enumerate",
"format",
"glissando",
"graph",
"hairpin",
"horizontal_bracket",
"illustrate",
"illustrators",
"index",
"index_all",
"index_first",
"index_last",
"get",
"io",
"iterate",
"iterate_leaf_pairs",
"iterate_pitch_pairs",
"iterate_vertical_moments",
"iterpitches",
"label",
"list_all_classes",
"list_all_functions",
"lilypond",
"lyconst",
"lyenv",
"makers",
"mutate",
"on_beat_grace_container",
"ottava",
"override",
"parse",
"parser",
"persist",
"phrasing_slur",
"piano_pedal",
"select",
"setting",
"show",
"slur",
"string",
"text_spanner",
"tie",
"trill_spanner",
"tweak",
"wf",
"yield_all_modules",
]
|
Abjad/abjad
|
abjad/__init__.py
|
Python
|
gpl-3.0
| 11,596
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Xin Dou'
__email__ = "master2dou@gmail.com"
import xml.etree.ElementTree as ET
from wechat.responser.response import Response
class MusicResponse(Response):
def __init__(self, from_user_name, to_user_name, music_item):
Response.__init__(self, from_user_name, to_user_name)
self.music_item = music_item
msg_type_element = ET.SubElement(self.root_element, "MsgType")
msg_type_element.text = self.create_cdata("music")
self._parser_music()
def _parser_music(self):
music_element = ET.SubElement(self.root_element, "Music")
title_element = ET.SubElement(music_element, "Title")
title_element.text = self.create_cdata(self.music_item.title)
description_element = ET.SubElement(music_element, "Description")
description_element.text = self.create_cdata(self.music_item.description)
music_url_element = ET.SubElement(music_element, "MusicUrl")
music_url_element.text = self.create_cdata(self.music_item.music_url)
hq_music_url_element = ET.SubElement(music_element, "HQMusicUrl")
hq_music_url_element.text = self.create_cdata(self.music_item.hq_music_url)
thumb_media_id_element = ET.SubElement(music_element, "ThumbMediaId")
thumb_media_id_element.text = self.create_cdata(self.music_item.thumb_media_id)
|
douxin/python-wechat
|
responser/musicresponse.py
|
Python
|
mit
| 1,404
|
'''
COM Bridge for SOAP Services
============================
This soaplib extension provides a simple COM bridge for accessing SOAP web
services, especially soaplib web services, via a COM-compliant language. It
was developed primarily for use with standard Windows VBScript/VBA.
To use the COM bridge to access a web service, you must follow the following
steps:
1. Register the COM bridge with Windows by executing this module from
the command-line:
python soaplib\ext\comproxy.py
You should see output stating that the object was registered.
2. Place service stubs for the web service onto the PYTHONPATH so that
soaplib can create a service client for that web service. This is
usually as easy as creating a soaplib web service object with empty
methods that describes the web service you want to access, along with
any special types. For example:
class Title(ClassSerializer):
class types:
titleID = Integer
name = String
description = String
class Person(ClassSerializer):
class types:
personID = Integer
firstName = String
lastName = String
birthdate = DateTime
titles = Array(Title)
class PeopleService(SimpleWSGISoapApp):
@soapmethod(Person, _returns=Integer)
def addPerson(self, person): pass
@soapmethod(Integer, _returns=Person)
def getPerson(self, personID): pass
3. From your COM-compliant language, create an instance of the soaplib
client object, then tell it about your web service:
Set client = CreateObject("SoapLib.ServiceClient")
uri = "http://webservicehost:port/"
service_import_path = "services.people.PeopleService"
client.SetServiceInfo uri, service_import_path
4. Once you have a client object instantiated, you can use it to create
instances of any complex types, and call remote methods:
' instantiate a person object and two title objects
Set person = client.CreateObject("services.people.Person")
Set titleOne = client.CreateObject("services.people.Title")
Set titleTwo = client.CreateObject("services.people.Title")
' set some attributes on the first title
titleOne.name = "Team Lead"
titleOne.description = "Development Team Leader"
' set some attribtues on the second title
titleTwo.name = "Smart Guy"
titleTwo.description = "All-Around Smart Guy"
' set some attributes on the person, including a date/time
' and an Array of complex types
person.firstName = "Jonathan"
person.lastName = "LaCour"
person.birthdate = Now()
person.titles = Array(titleOne, titleTwo)
' call the web service to add this person to the database
personID = client.addPerson(person)
' fetch the person back again, using the ID
Set theperson = client.getPerson(personID)
' echo the results
WScript.Echo "Retrieved person: " & theperson.personID
WScript.Echo "First name: " & theperson.firstName
WScript.Echo "Last name: " & theperson.lastName
WScript.Echo "Birthdate: " & theperson.birthdate
titles = theperson.titles
For i = 0 to UBound(titles)
Set title = titles(i)
WScript.Echo "Title: " & title.name & ": " & title.description
Next
In the future, we would like to make this easier to use by being able to just
pass the URI to the WSDL for the service into the client, rather than having
to create Python stubs.
'''
from warnings import warn
warn('This module is under active development and should not be used '
'in a production scenario')
from win32com.server.exception import COMException
from win32com.server import util
from soaplib.client import make_service_client
from soaplib.serializers.clazz import ClassSerializer
from soaplib.serializers.primitive import DateTime
from datetime import datetime
import winerror
import types
import time
# COM object wrapping and unwrapping utility functions
def coerce_date_time(dt):
return datetime(*time.strptime(str(dt), '%m/%d/%y %H:%M:%S')[0:5])
def unwrap_complex_type(param, param_type):
param = util.unwrap(param)
for membername, membertype in param_type.soap_members.items():
member = getattr(param, membername)
if type(member).__name__ == 'PyIDispatch':
member = unwrap_complex_type(member, membertype)
elif membertype is DateTime:
member = coerce_date_time(member)
elif type(member) in [types.ListType, types.TupleType]:
newmember = []
for item in member:
if type(item).__name__ == 'PyIDispatch':
item = unwrap_complex_type(item, membertype.serializer)
newmember.append(item)
member = newmember
setattr(param, membername, member)
return param
def wrap_complex_type(data, data_type):
for membername, membertype in data_type.soap_members.items():
member = getattr(data, membername)
if isinstance(member, ClassSerializer):
member = wrap_complex_type(member, membertype)
elif type(member) in [types.ListType, types.TupleType]:
newmember = []
for item in member:
if isinstance(item, ClassSerializer):
item = wrap_complex_type(item, item.__class__)
newmember.append(item)
member = newmember
setattr(data, membername, member)
data = util.wrap(data)
return data
class WebServiceClient:
_reg_progid_ = 'SoapLib.ServiceClient'
_reg_clsid_ = '{BAC77389-8687-4A8A-9DD0-2E4409FEF900}'
_reg_policy_spec_ = 'DynamicPolicy'
def SetServiceInfo(self, serviceURI, serviceName):
try:
parts = serviceName.split('.')
item = __import__('.'.join(parts[:-1]))
for part in parts[1:]:
item = getattr(item, part)
self.client_type = item()
self.client = make_service_client(str(serviceURI),
self.client_type)
except:
raise COMException('No such service', winerror.DISP_E_BADVARTYPE)
def CreateObject(self, typename):
try:
parts = typename.split('.')
item = __import__('.'.join(parts[:-1]))
for part in parts[1:]:
item = getattr(item, part)
return util.wrap(item())
except:
raise COMException('No such type', winerror.DISP_E_BADVARTYPE)
def _dynamic_(self, name, lcid, wFlags, args):
# Look up the requested method. First, check to see if the
# method is present on ourself (utility functions), then
# check to see if it exists on the client service.
is_service_method = False
item = getattr(self, name, None)
if item is None and hasattr(self, 'client'):
item = getattr(self.client, name)
is_service_method = True
if item is None:
raise COMException('No attribute of that name.',
winerror.DISP_E_MEMBERNOTFOUND)
# Figure out what parameters this web service call accepts,
# and what it returns, so that we can properly wrap the objects
# on the way in and unwrap them on the way out.
if is_service_method:
all_methods = self.client_type.methods()
method_descriptor = [method for method in all_methods
if method.name == name][0]
return_type = method_descriptor.outMessage.params[0][1]
parameter_types = [parameter[1] for parameter in
method_descriptor.inMessage.params]
# Now that we have this data, go ahead and unwrap any
# wrapped parameters recursively.
newargs = []
for param_type, param in zip(parameter_types, args):
if (hasattr(param_type, '__bases__') and
ClassSerializer in param_type.__bases__):
param = unwrap_complex_type(param, param_type)
elif param_type is DateTime:
param = coerce_date_time(param)
newargs.append(param)
args = newargs
# Call the supplied method
result = apply(item, args)
# Now wrap the return value, recursively.
if isinstance(result, ClassSerializer):
result = wrap_complex_type(result, return_type)
# Return our data
return result
if __name__ == '__main__':
import win32com.server.register
win32com.server.register.UseCommandLine(WebServiceClient)
|
gsson/soaplib
|
soaplib/ext/comproxy.py
|
Python
|
lgpl-2.1
| 9,149
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.