prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Tijme Gommers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS | OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import requests
from acstis.Payloads import Payloads
from acstis.helpers.BrowserHelper import BrowserHelper
from acstis.a | ctions.TraverseUrlAction import TraverseUrlAction
from acstis.actions.FormDataAction import FormDataAction
from acstis.actions.QueryDataAction import QueryDataAction
from nyawc.http.Handler import Handler as HTTPHandler
class Scanner:
"""The Scanner scans specific queue items on sandbox escaping/bypassing.
Attributes:
scanned_hashes list(str): A list of scanned queue item hashes.
__actions list(:class:`acstis.actions.BaseAction`): The actions to perform on the queue item.
__driver (:class:`acstis.Driver`): Used to check if we should stop scanning.
__verify_payload (bool): Verify if the payload was executed.
__queue_item (:class:`nyawc.QueueItem`): The queue item to perform actions on.
__session (obj): A Python requests session.
"""
scanned_hashes = []
def __init__(self, driver, angular_version, verify_payload, queue_item):
"""Initialize a scanner for the given queue item.
Args:
driver (:class:`acstis.Driver`): Used to check if we should stop scanning.
angular_version (str): The AngularJS version of the given queue_item (e.g. `1.4.2`).
verify_payload (bool): Verify if the payload was executed.
queue_item (:class:`nyawc.QueueItem`): The queue item to scan.
"""
self.__driver = driver
self.__verify_payload = verify_payload
self.__queue_item = queue_item
self.__session = requests.Session()
self.__session.mount('http://', requests.adapters.HTTPAdapter(max_retries=2))
self.__session.mount('https://', requests.adapters.HTTPAdapter(max_retries=2))
self.__actions = [
TraverseUrlAction(Payloads.get_for_version(angular_version)),
FormDataAction(Payloads.get_for_version(angular_version)),
QueryDataAction(Payloads.get_for_version(angular_version))
]
def get_vulnerable_items(self):
"""Get a list of vulnerable queue items, if any.
Returns:
list(:class:`nyawc.QueueItem`): A list of vulnerable queue items.
"""
results = []
for action in self.__actions:
if self.__driver.stopping:
break
items = action.get_action_items(self.__queue_item)
for item in items:
if self.__driver.stopping:
break
if item.get_hash() in self.scanned_hashes:
continue
self.scanned_hashes.append(item.get_hash())
if self.__is_item_vulnerable(item):
results.append(item)
return results
def __is_item_vulnerable(self, queue_item):
"""Check if the given queue item is vulnerable by executing it using the HttpHandler and checking if the payload is in scope.
Args:
queue_item (:class:`nyawc.QueueItem`): The queue item to check.
Returns:
bool: True if vulnerable, false otherwise.
"""
try:
HTTPHandler(None, queue_item)
except Exception:
return False
if not queue_item.response.headers.get("content-type") or not "html" in queue_item.response.headers.get("content-type"):
return False
if not queue_item.get_soup_response():
return False
if not self.__should_payload_execute(queue_item):
return False
if self.__verify_payload:
if not self.__verify_queue_item(queue_item.verify_item):
return False
return True
def __should_payload_execute(self, queue_item):
"""Run static checks to see if the payload should be executed.
Args:
queue_item (:class:`nyawc.QueueItem`): The queue item to check.
Returns:
bool: True if payload should execute, false otherwise.
"""
soup = queue_item.get_soup_response()
ng_app_soup = soup.select("[ng-app]")
if not ng_app_soup:
return False
for non_bindable in ng_app_soup[0].select("[ng-non-bindable]"):
non_bindable.decompose()
in_scope_html = str(ng_app_soup[0])
if queue_item.payload["value"] in in_scope_html:
return True
return False
def __verify_queue_item(self, queue_item):
"""Verify if the browser opened a new window.
Args:
queue_item (:class:`nyawc.QueueItem`): The queue item to check.
Returns:
bool: True if the payload worked, false otherwise.
"""
browser = BrowserHelper.request(queue_item)
return browser and len(browser.window_handles) >= 2
|
import zeeguu_core
from zeeguu_core.model import Article, Language, LocalizedTopic
session = zeeguu_core.db.session
counter = 0
languages = Language.available_languages()
languages = [Language.find('da')]
for language in languages:
articles = Article.query.filter(Article.language == language).order_by(Article.id.desc()).all()
loc_topics = LocalizedTopic.all_for_language(language)
total_articles = len(articles)
for article in articles:
counter += 1
print(f"{article.title}")
print(f"{article.url.as_string()}")
for loc_topic in loc_topics:
if loc_topic.matches_article(article):
article.add_topic(loc_topic.topic)
print(f" #{loc_topic.topic_translated}")
print("")
session.add(article)
if counter % 1000 == 0:
percentage = (100 * counter / total_articles) / 100
print(f"{counter} dorticl | es done ({percentage}%). last article id: {article.id}. Comitting... ")
session.commit( | )
percentage = (100 * counter / total_articles) / 100
print(f"{counter} dorticles done ({percentage}%). last article id: {article.id}. Comitting... ")
session.commit()
|
from tictactoe import game, player
import unittest
from unittest import mock
class GameTest(unittest.TestCase):
def setUp(self):
self.num_of_players = 2
self.width = 3
self.height = 3
self.game = game.Game(2, 3, 3)
def test_init(self):
self.assertEqual(self.game.board, None)
self.assertEqual(self.game.width, self.width)
self.assertEqual(self.game.height, self.height)
self.assertEqual(self.game.num_of_players, self.num_of_players)
self.assertEqual(self.game.players, [])
self.assertEqual(self.game.round_counter, 0)
self.assertEqual(self.game.on_turn, 0)
def test_setup(self):
input_seq = ['L | uke', 'x', 'Leia', 'o']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.setup()
expected = [('Luke', 'x'), ('Leia', 'o')]
for e, p in zip(expected, self.game.players):
self.assertEqual(p.name, e[0])
self.assertEqual(p.symbol, e[1])
def test_play_round(self):
# setup
input_seq = ['Luke', 'x', 'Leia', 'o']
with mock.patch('builtins.input', side_effect=input_seq): |
self.game.setup()
input_seq = ['2', '5', '3', '1', '9', '6', '7', '4']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.play_round()
finished, winner = self.game.board.finished()
self.assertTrue(finished)
self.assertEqual(winner, 1)
expected_board = [[1, 0, 0], [1, 1, 1], [0, None, 0]]
self.assertEqual(self.game.board.grid, expected_board)
|
' | ''
'''
from rest_framework import serializers
import models
class PluginSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Plugin
fields = ('id', 'name', )
class ScoredServiceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.ScoredService
fields = ('id', 'name', 'plugin', 'checks', 'services')
class CheckSerializer(serializers.HyperlinkedModelSer | ializer):
class Meta:
model = models.Check
fields = ('id', 'key', 'value', 'scored_service')
class TeamSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Team
fields = ('id', 'name', 'services')
class ServiceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Service
fields = ('id', 'scored_service', 'address', 'port', 'team', 'credentials', 'results')
read_only_fields = ('results', )
class CredentialSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Credential
fields = ('id', 'username', 'password', 'service')
class ResultSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Result
fields = ('id', 'status', 'service', 'explanation')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 12:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0005_user_last_activity'),
]
operations = [
migrations.CreateModel(
name='Notifications',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)) | ,
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='notifications', serialize=False, to=settings.AUTH_USER_MODEL)),
('account_expires', models.BooleanField(default=False, help_text='Accounts are deleted if they | are not used for a year. Warn me a week before mine would be deleted.')),
('gpg_expires', models.BooleanField(default=False, help_text='Warn me a week before any of my GPG keys is about to expire.')),
],
options={
'abstract': False,
},
),
]
|
}
self.module = module
self._connect()
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.zone = None
self.vm = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
def _connect(self):
api_key = self.module.params.get('api_key')
api_secret = self.module.params.get('secret_key')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
timeout=api_timeout,
method=api_http_method
)
else:
self.cs = CloudStack(**read_config())
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
# TODO: for backward compatibility only, remove if not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
def has_changed(self, want_dict, current_dict, only_keys=None):
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue;
# Skip None values
if value is None:
continue;
if key in current_dict:
# API returns string for int in some cases, just to make sure
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, str):
current_dict[key] = str(current_dict[key])
# Only need to detect a singe change, not every item
if value != current_dict[key]:
return True
return False
def _get_by_key(self, key=None, my_dict={}):
if key:
if key in my_dict:
return my_dict[key]
self.module.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {}
args['ipaddress'] = ip_address
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
ip_addresses = self.cs.listPublicIpAddresses(**args)
if not ip_addresses:
self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.module.fail_json(msg="Virtual machine param 'vm' is required")
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
if vm in [ v['name'], v['displayname'], v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
zones = self.cs.listZones()
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone in [ z['name'], z['id'] ]:
self.zone = z
return self._get_by_key(key, self.z | one)
self.module.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self | .module.params.get('os_type')
if not os_type:
return None
os_types = self.cs.listOsTypes()
if os_types:
for o in os_types['ostype']:
if os_type in [ o['description'], o['id'] ]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.module.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.cs.listHypervisors()
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.module.fail_json(msg="Account must be specified with Domain")
args = {}
args['name'] = account
args['domainid'] = self.get_domain(key='id')
args['listall'] = True
accounts = self.cs.listAccounts(**args)
if accounts:
self.account = accounts['account'][0]
return self._get_by_key(key, self.account)
self.module.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
return None
args = {}
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
self.domain = d
return self._get_by_key(key, self.domain)
self.module.fail_json(msg="Domain '%s' not found" % domain)
def get_tags(self, resource=None):
existing_tags = self.cs.listTags(resourceid=resource['id'])
if existing_ |
import logging
from borgmatic.borg.flags import make_flags, make_flags_from_arguments
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
# A hack to convince Borg to exclude archives ending in ".checkpoint". This assumes that a
# non-checkpoint archive name ends in a digit (e.g. from a timestamp).
BORG_EXCLUDE_CHECKPOINTS_GLOB = '*[0123456789]'
def resolve_archive_name(repository, archive, storage_config, local_path='borg', remote_path=None):
'''
Given a local or remote repository path, an archive name, a storage config dict, a local Borg
path, and a remote Borg path, simply return the archive name. But if the archive name is
"latest", then instead introspect the repository for the latest successful (non-checkpoint)
archive, and return its name.
Raise ValueError if "latest" is given but there are no archives in the repository.
'''
if archive != "latest":
return archive
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'list')
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ make_flags('remote-path', remote_path)
+ make_flags('lock-wait', lock_wait)
+ make_flags('glob-archives', BORG_EXCLUDE_CHECKPOINTS_GLOB)
+ make_flags('last', 1)
+ ('- | -short', repository)
)
output = execute_command(full_command, output_log_level=None, borg_local_path=local_path)
try:
latest_archive = output.strip().splitlines()[-1]
except IndexError:
r | aise ValueError('No archives found in the repository')
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive))
return latest_archive
def list_archives(repository, storage_config, list_arguments, local_path='borg', remote_path=None):
'''
Given a local or remote repository path, a storage config dict, and the arguments to the list
action, display the output of listing Borg archives in the repository or return JSON output. Or,
if an archive name is given, listing the files in that archive.
'''
lock_wait = storage_config.get('lock_wait', None)
if list_arguments.successful:
list_arguments.glob_archives = BORG_EXCLUDE_CHECKPOINTS_GLOB
full_command = (
(local_path, 'list')
+ (
('--info',)
if logger.getEffectiveLevel() == logging.INFO and not list_arguments.json
else ()
)
+ (
('--debug', '--show-rc')
if logger.isEnabledFor(logging.DEBUG) and not list_arguments.json
else ()
)
+ make_flags('remote-path', remote_path)
+ make_flags('lock-wait', lock_wait)
+ make_flags_from_arguments(
list_arguments, excludes=('repository', 'archive', 'paths', 'successful')
)
+ (
'::'.join((repository, list_arguments.archive))
if list_arguments.archive
else repository,
)
+ (tuple(list_arguments.paths) if list_arguments.paths else ())
)
return execute_command(
full_command,
output_log_level=None if list_arguments.json else logging.WARNING,
borg_local_path=local_path,
)
|
import datetime
import io
import boto3
import mock
import pytest
import requests
import testfixtures
from botocore.exceptions import ClientError
from opentracing.ext import tags
from opentracing_instrumentation.client_hooks import boto3 as boto3_hooks
DYNAMODB_ENDPOINT_URL = 'http://localhost:4569'
S3_ENDPOINT_URL = 'http://localhost:4572'
DYNAMODB_CONFIG = {
'endpoint_url': DYNAMODB_ENDPOINT_URL,
'aws_access_key_id': '-',
'aws_secret_access_key': '-',
'region_name': 'us-east-1',
}
S3_CONFIG = dict(DYNAMODB_CONFIG, endpoint_url=S3_ENDPOINT_URL)
def create_users_table(dynamodb):
dynamodb.create_table(
TableName='users',
KeySchema=[{
'AttributeName': 'username',
'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'username',
'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 9,
'WriteCapacityUnits': 9
}
)
@pytest.fixture
def dynamodb_mock():
import moto
with moto.mock_dynamodb2():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
create_users_table(dynamodb)
yield dynamodb
@pytest.fixture
def dynamodb():
dynamodb = boto3.resource('dynamodb', **DYNAMODB_CONFIG)
try:
dynamodb.Table('users').delete()
except ClientError as error:
# you can not just use ResourceNotFoundException class
# to catch an error since it doesn't exist until it's raised
if error.__class__.__name__ != 'ResourceNotFoundException':
raise
create_users_table(dynamodb)
# waiting until the table exists
dynamodb.meta.client.get_waiter('table_exists').wait(TableName='users')
return dynamodb
@pytest.fixture
def s3_mock():
import moto
with moto.mock_s3():
s3 = boto3.client('s3', region_name='us-east-1')
yield s3
@pytest.fixture
def s3():
return boto3.client('s3', **S3_CONFIG)
@pytest.fixture(autouse=True)
def patch_boto3():
boto3_hooks.install_patches()
try:
yield
| finally:
boto3_hooks.reset_patches()
def assert_last_span(kind, service_name, operation, tracer, response=N | one):
span = tracer.recorder.get_spans()[-1]
request_id = response and response['ResponseMetadata'].get('RequestId')
assert span.operation_name == 'boto3:{}:{}:{}'.format(
kind, service_name, operation
)
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
assert span.tags.get(tags.COMPONENT) == 'boto3'
assert span.tags.get('boto3.service_name') == service_name
if request_id:
assert span.tags.get('aws.request_id') == request_id
def _test_dynamodb(dynamodb, tracer):
users = dynamodb.Table('users')
response = users.put_item(Item={
'username': 'janedoe',
'first_name': 'Jane',
'last_name': 'Doe',
})
assert_last_span('resource', 'dynamodb', 'put_item', tracer, response)
response = users.get_item(Key={'username': 'janedoe'})
user = response['Item']
assert user['first_name'] == 'Jane'
assert user['last_name'] == 'Doe'
assert_last_span('resource', 'dynamodb', 'get_item', tracer, response)
try:
dynamodb.Table('test').delete_item(Key={'username': 'janedoe'})
except ClientError as error:
response = error.response
assert_last_span('resource', 'dynamodb', 'delete_item', tracer, response)
response = users.creation_date_time
assert isinstance(response, datetime.datetime)
assert_last_span('resource', 'dynamodb', 'describe_table', tracer)
def _test_s3(s3, tracer):
fileobj = io.BytesIO(b'test data')
bucket = 'test-bucket'
response = s3.create_bucket(Bucket=bucket)
assert_last_span('client', 's3', 'create_bucket', tracer, response)
response = s3.upload_fileobj(fileobj, bucket, 'test.txt')
assert_last_span('client', 's3', 'upload_fileobj', tracer, response)
def is_service_running(endpoint_url, expected_status_code):
try:
# feel free to suggest better solution for this check
response = requests.get(endpoint_url, timeout=1)
return response.status_code == expected_status_code
except requests.exceptions.ConnectionError:
return False
def is_dynamodb_running():
return is_service_running(DYNAMODB_ENDPOINT_URL, 502)
def is_s3_running():
return is_service_running(S3_ENDPOINT_URL, 200)
def is_moto_presented():
try:
import moto
return True
except ImportError:
return False
@pytest.mark.skipif(not is_dynamodb_running(),
reason='DynamoDB is not running or cannot connect')
def test_boto3_dynamodb(thread_safe_tracer, dynamodb):
_test_dynamodb(dynamodb, thread_safe_tracer)
@pytest.mark.skipif(not is_moto_presented(),
reason='moto module is not presented')
def test_boto3_dynamodb_with_moto(thread_safe_tracer, dynamodb_mock):
_test_dynamodb(dynamodb_mock, thread_safe_tracer)
@pytest.mark.skipif(not is_s3_running(),
reason='S3 is not running or cannot connect')
def test_boto3_s3(s3, thread_safe_tracer):
_test_s3(s3, thread_safe_tracer)
@pytest.mark.skipif(not is_moto_presented(),
reason='moto module is not presented')
def test_boto3_s3_with_moto(s3_mock, thread_safe_tracer):
_test_s3(s3_mock, thread_safe_tracer)
@testfixtures.log_capture()
def test_boto3_s3_missing_func_instrumentation(capture):
class Patcher(boto3_hooks.Boto3Patcher):
S3_FUNCTIONS_TO_INSTRUMENT = 'missing_func',
Patcher().install_patches()
capture.check(('root', 'WARNING', 'S3 function missing_func not found'))
@mock.patch.object(boto3_hooks, 'patcher')
def test_set_custom_patcher(default_patcher):
patcher = mock.Mock()
boto3_hooks.set_patcher(patcher)
assert boto3_hooks.patcher is not default_patcher
assert boto3_hooks.patcher is patcher
boto3_hooks.install_patches()
boto3_hooks.reset_patches()
patcher.install_patches.assert_called_once()
patcher.reset_patches.assert_called_once()
|
import unittest
from dosbox.filesystem.directory import *
class DirectoryTestCase(unittest.TestCase):
def setUp(self):
self.root_dir = Directory("root")
self.sub_dir1 = Directory("subdir1")
def test_path(self):
self.root_dir.add(self.sub_dir1)
self.assertEqual(self.sub_d | ir1.parent, self.root_dir)
self.assertEqual(self.sub_dir1.path, "root\subdir1")
def test_add_remove(self):
subdir = Directory("subdir")
self.root_dir.add(subdir)
self.assertEqual(subdir.parent, self.root_dir)
self.root_dir.remove(subdir)
self.assertEqual( | subdir.parent, None)
def test_number_of_contained_file_system_item(self):
return NotImplemented |
# -*- coding: utf-8 -*-
# Scrapy settings for saymedia project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'saymedia'
SPIDER_MODULES = ['saymedia.spiders']
NEWSPIDER_MODULE = 'saymedia.spiders'
ROBOTSTXT_OBEY = True
DOWNLOADER_MI | DDLEWARES = {
'saymedia.middleware.ErrorConverterMiddleware': 1,
# 'saymedia.middleware.MysqlDownloaderM | iddleware': 1,
'saymedia.middleware.OriginHostMiddleware': 2,
'saymedia.middleware.TimerDownloaderMiddleware': 998,
}
SPIDER_REPORTS = {
'xml': 'saymedia.reports.XmlReport',
'firebase': 'saymedia.reports.FirebaseReport',
}
SPIDER_MIDDLEWARES = {
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': None,
}
ITEM_PIPELINES = {
'saymedia.pipelines.DatabaseWriterPipeline': 0,
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'SEO Spider (+http://www.saymedia.com)'
DATABASE = {
'USER': 'YOUR_DATABASE_USER',
'PASS': 'YOUR_DATABASE_PASS',
}
FIREBASE_URL = "YOUR_FIREBASE_URL"
try:
# Only used in development environments
from .local_settings import *
except ImportError:
pass |
import _plotly_utils.basevalidators
class HistfuncValidator(_plotly_ | utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="histfunc", parent_name="histogram2d", **kwargs):
super(HistfuncValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["count", "sum", "avg", "min", " | max"]),
**kwargs
)
|
from django.http import HttpResponse, JsonResponse
from pa3_web.models import Subscriber
|
#
# Example of a subscription client
#
def delete_subscriber(phone_number):
[sub.delete() for sub in Subscriber.objects.filter(protocol='sms',
identifier=phone_number)]
return HttpResponse( | 200)
def notify(subscriber):
# Send Notifying SMS
return |
#!/usr/bin/env python
import subprocess, os, sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="First target directory for evaluation")
parser.add_argument("directories", nargs='+', help="All other directories to be evaluated")
parser.add_argument("-o", "--output", help="Output destination. If none specified, defaults to STDOUT")
args = parser.parse_args()
def fileSponge(dirs, outputDir):
| commonList = findIdentical(dirs).rstrip()
outputCommon(commonList, outputDir)
def findIdentical(dirs):
prev = None
for index in dirs:
if prev is None:
prev = index
else:
diff = "diff --brief -Nrs %s %s" % (prev, index)
egrepPattern = "^Files .+ and .+ are identical$"
awkPattern = "(Files | and | are identical)"
diffProcess = subprocess.Popen(diff.split(), | stdout=subprocess.PIPE)
egrepProcess = subprocess.Popen(["egrep", egrepPattern], stdout=subprocess.PIPE, stdin=diffProcess.stdout)
awkProcess = subprocess.Popen(["awk", "-F", awkPattern, "{print($2, \"==\", $3)}"], stdout=subprocess.PIPE, stdin=egrepProcess.stdout)
(out, err) = awkProcess.communicate()
return out
def outputCommon(commonList, outputDir):
if outputDir is not None:
options = "-av"
exclusions = "--exclude='*'"
srcPath = "./"
destPath = "%s/" % (outputDir)
targetFiles = isolateTargetFiles(commonList)
inclusions = "--files-from=./commonFiles.txt"#generateRsyncInclusionString(targetFiles)
writeInclusionListToDisk(targetFiles)
rsync = "rsync %s %s %s %s" % (options, inclusions, srcPath, destPath)
print rsync
rsyncProcess = subprocess.call(rsync.split())
else:
print("Identical files:\n%s" % (commonList))
def isolateTargetFiles(commonList):
targetFiles = []
for line in commonList.split('\n'):
targetFiles.append(line.split()[0])
return targetFiles
def generateRsyncInclusionString(targetFiles):
inclusions = ''
for item in targetFiles:
inclusions += " --include='./%s'" % (item)
return inclusions
def writeInclusionListToDisk(targetFiles):
outfile = open('commonFiles.txt', 'w')
for item in targetFiles:
outfile.write("%s\n" % item)
def usage():
dirList = []
outputDir = None
if args.output:
outputDir = args.output or None
if args.directory:
dirList = args.directory.split()
if args.directories:
dirList += args.directories
fileSponge(dirList, outputDir)
usage() |
"""Taking screenshots inside tests!
If you want to take a screenshot inside your test, just do it like this:
.. code-block:: python
def test_my_test(take_screenshot):
# do something
take_screenshot("Particular name for the screenshot")
# do something else
"""
import fauxfactory
import pytest
from cfme.fixtures.artifactor_plugin import fire_art_test_hook
from cfme.fixtures.pytest_store import store
from cfme.utils.browser import take_screenshot as take_browser_screenshot
from cfme.utils.log import logger
@pytest.fixture(scope="function")
def take_screenshot(request):
item = request.node
def _take_screenshot(name):
| logger.info(f"Taking a screenshot named {name}")
ss, ss_error = take_browser_screenshot()
g_id = fauxfactory.gen_alpha(length=6)
if ss:
fire_art_test_hook(
item, 'filedump',
| description=f"Screenshot {name}", file_type="screenshot", mode="wb",
contents_base64=True, contents=ss, display_glyph="camera",
group_id=f"fix-screenshot-{g_id}", slaveid=store.slaveid)
if ss_error:
fire_art_test_hook(
item, 'filedump',
description=f"Screenshot error {name}", mode="w", contents_base64=False,
contents=ss_error, display_type="danger",
group_id=f"fix-screenshot-{g_id}", slaveid=store.slaveid)
return _take_screenshot
|
import os
import datetime
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'invest.settings')
import django
django.setup()
from myportfolio.models import Investor, Portfolio, AssetClass, STOCKS, BONDS,\
ALTERNATIVES, Security, Transaction, Account
def populate():
investor1 = add_investor(name='David Lim',
username='Dave',
email='dave@gmail.com')
p1 = add_portfolio(owner=investor1,
name='Retirement',
obj='Save for retirement',
risk_tolerance='I am comfortable with 80/20 stock bond ratio',
time_frame=30,
stock_bond_ratio=4,
asset_allocation={})
a1 = add_assetclass(owner=investor1,
name='US',
asset_type=STOCKS)
a2 = add_assetclass(owner=investor1,
name='EU',
asset_type=STOCKS)
a3 = add_assetclass(owner=investor1,
name='Global Bonds',
asset_type=BONDS)
p1.target_asset_allocation[a1.id] = 0.3
p1.target_asset_allocation[a2.id] = 0.3
p1.target_asset_allocation[a3.id] = 0.4
p1.save()
s1 = add_security(asset_class=a1,
name='Vanguard Total Stock ETF',
symbol='VTI',
isin='QW1234456',
currency='USD',
exchange='NYSE',
expense_ratio_percent=0.1,
last_trade_price=100.05)
ac1 = add_account(owner=investor1,
name='SCB SGD',
description='SGD trading account')
ac2 = add_account(owner=investor1,
name='SCB USD',
description='USD trading account')
ac2 = add_account(owner=investor1,
name='SCB GBP',
description='GBP trading account')
t1 = add_transaction(portfolio=p1,
security=s1,
account=ac2,
date=datetime.date(2016, 5, 3),
price=100.0,
quantity=10)
t2 = add_transaction(portfolio=p1,
security=s1,
account=ac2,
date=datetime.date(2016, 5, 18),
price=108.0,
quantity=5)
investor2 = add_investor(name='Diana',
username='Rose',
email='rose@gmail.com')
p2 = add_portfolio(owner=investor2,
name='New house',
obj='Save for new house',
risk_tolerance='I am comfortable with 50/50 stock bond ratio',
time_frame=15,
stock_bond_ratio=1,
asset_allocation={})
a4 = add_assetcla | ss( | owner=investor2,
name='World',
asset_type=STOCKS)
a5 = add_assetclass(owner=investor2,
name='REIT',
asset_type=ALTERNATIVES)
a6 = add_assetclass(owner=investor2,
name='Global Bonds',
asset_type=BONDS)
p2.target_asset_allocation[a4.id] = 0.5
p2.target_asset_allocation[a5.id] = 0.1
p2.target_asset_allocation[a6.id] = 0.4
p2.save()
for i in Investor.objects.all():
print ('{} - {} - {}'.format(i.name, i.username, i.email))
for ac in Account.objects.filter(owner=i):
print ('{} - {}'.format(ac.name, ac.description))
for p in Portfolio.objects.filter(owner=i):
print (' {} - {} - {} - {}'.format(p.name, p.objective, p.time_frame, p.target_asset_allocation))
for a in AssetClass.objects.filter(owner=i):
print (' {}. {} - {}'.format(a.id, a.name, a.type))
for s in Security.objects.filter(asset_class=a):
print (' {} {}'.format(s.name, s.symbol))
for t in Transaction.objects.filter(security=s):
print (' {} {} {} {} {}'.format(t.security, t.account, t.date, t.price, t.quantity))
def add_investor(name, username, email):
i=Investor.objects.get_or_create(name=name,
username=username,
email=email)[0]
return i
def add_portfolio(owner, name, obj, risk_tolerance, time_frame, stock_bond_ratio, asset_allocation):
p = Portfolio.objects.get_or_create(owner=owner,
time_frame=time_frame,
target_stock_bond_ratio=stock_bond_ratio,
)[0]
p.owner = owner
p.name = name
p.objective = obj
p.risk_tolerance = risk_tolerance
p.target_asset_allocation = asset_allocation
return p
def add_assetclass(owner, name, asset_type):
a = AssetClass.objects.get_or_create(owner=owner,
name=name,
type=asset_type,
)[0]
return a
def add_security(asset_class,
name,
symbol,
isin,
currency,
exchange,
expense_ratio_percent,
last_trade_price):
s = Security.objects.get_or_create(asset_class=asset_class,
name=name,
symbol=symbol,
isin=isin,
currency=currency,
exchange=exchange,
expense_ratio_percent=expense_ratio_percent,
last_trade_price=last_trade_price,
)[0]
return s
def add_account(owner, name, description):
ac = Account.objects.get_or_create(owner=owner,
name=name,
description=description
)[0]
return ac
def add_transaction(portfolio, security, account,
date, price, quantity):
t = Transaction.objects.get_or_create(portfolio=portfolio,
security=security,
account=account,
date=date,
price=price,
quantity=quantity)[0]
return t
if __name__ == '__main__':
populate()
|
from django.apps import AppConfig
class Rest | ateConfig( | AppConfig):
name = 'restate'
|
import click
from parsec.cli | import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('delete_group_user')
@click.argument("group_id", type=str)
@click.argument("user_id", type=str)
@pass_context
@custom_exception
@json_output
def cli(ctx, group_id, user_id):
"""Remove a user from the given group.
Output:
The user which was removed
"""
return ctx.gi.groups.delete_group_user(group | _id, user_id)
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST | IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_doak_sif.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_ba | se_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
from a10sdk.common.A10BaseClass import A10BaseClass
class DisablePartitionName(A10BaseClass):
"""Class Description::
.
Class disable-partition-name supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param disable_partition_name: {"description": "Disable partition name in logs", "partition-visibility": "shared", "default": 0, "type": "number", "format": "flag", "optional": true}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
UR | L for this object::
`https://<Hostname|Ip address>//axapi/v3/logging/disable-partition-name`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "disable-partition-name"
self.a10_url="/axapi/v3/logging/disable-partition-name"
self.DeviceProxy = ""
self.disable_partition_name = ""
self.uuid = ""
for keys, value in kwargs.it | ems():
setattr(self,keys, value)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an | "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_name(name: st | r) -> str:
# Sample function parameter name in delete_specialist_pool_sample
name = name
return name
|
# Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas.ui.ButtonBase import ButtonBase
from pyjamas.ui import Event
_CheckBox_unique_id=0;
class CheckBox(ButtonBase):
def __init__(self, label=None, asHTML=False, **kwargs):
if not kwargs.has_key('StyleName'): kwargs['StyleName']="gwt-CheckBox"
if label:
if asHTML:
kwargs['HTML'] = label
else:
kwargs['Text'] = label
self.initElement(DOM.createInputCheck(), **kwargs)
def initElement(self, element, **kwargs):
self.inputElem = element
self.labelElem = DOM.createLabel()
ButtonBase.__init__(self, DOM.createSpan(), **kwargs)
self.unsinkEvents(Event.FOCUSEVENTS| Event.ONCLICK)
DOM.sinkEvents(self.inputElem, Event.FOCUSEVENTS | Event.ONCLICK | DOM.getEventsSunk(self.inputElem))
DOM.appendChild(self.getElement(), self.inputElem)
DOM.appendChild(self.getElement(), self.labelElem)
uid = "check%d" % self.getUniqueID()
DOM.setAttribute(self.inputElem, "id", uid)
DOM.setAttribute(self.labelElem, "htmlFor", uid)
# emulate static
def getUniqueID(self):
global _CheckBox_unique_id
_CheckBox_unique_id += 1
return _CheckBox_unique_id;
def getHTML(self):
return DOM | .getInnerHTML(self.labelElem)
def getName(self):
return DOM.getAttribute(self.inputElem, "name")
def getText(self):
return DOM.getInnerText(self.labelElem)
def setChec | ked(self, checked):
DOM.setBooleanAttribute(self.inputElem, "checked", checked)
DOM.setBooleanAttribute(self.inputElem, "defaultChecked", checked)
def isChecked(self):
if self.isAttached():
propName = "checked"
else:
propName = "defaultChecked"
return DOM.getBooleanAttribute(self.inputElem, propName)
def isEnabled(self):
return not DOM.getBooleanAttribute(self.inputElem, "disabled")
def setEnabled(self, enabled):
DOM.setBooleanAttribute(self.inputElem, "disabled", not enabled)
def setFocus(focused):
if focused:
Focus.focus(self.inputElem)
else:
Focus.blur(self.inputElem)
def setHTML(self, html):
DOM.setInnerHTML(self.labelElem, html)
def setName(self, name):
DOM.setAttribute(self.inputElem, "name", name)
def setTabIndex(self, index):
Focus.setTabIndex(self.inputElem, index)
def setText(self, text):
DOM.setInnerText(self.labelElem, text)
def onDetach(self):
self.setChecked(self.isChecked())
ButtonBase.onDetach(self)
|
ame of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
`DataObject` is a class of object that provides coding between object
attributes and dictionaries, suitable for
In `DataObject` is the mechanism for converting between dictionaries and
objects. These conversions are performed with aid of `Field` instances
declared on `DataObject` subclasses. `Field` classes reside in the
`remoteobjects.field` module.
"""
from copy import deepcopy
import logging
import remoteobjects.fields
classes_by_name = {}
classes_by_constant_field = {}
def find_by_name(name):
"""Finds and returns the DataObject subclass with the given name.
Parameter `name` should be a bare class name with no module. If there is
no class by that name, raises `KeyError`.
"""
return classes_by_name[name]
class DataObjectMetaclass(type):
"""Metaclass for `DataObject` classes.
This metaclass installs all `remoteobjects.fields.Property` instances
declared as attributes of the new class, including all `Field` and `Link`
instances.
This metaclass also makes the new class findable through the
`dataobject.find_by_name()` function.
"""
def __new__(cls, name, bases, attrs):
"""Creates and returns a new `DataObject` class with its declared
fields and name."""
fields = {}
new_fields = {}
new_properties = {}
# Inherit all the parent DataObject classes' fields.
for base in bases:
if isinstance(base, DataObjectMetaclass):
fields.update(base.fields)
# Move all the class's attributes that are Fields to the fields set.
for attrname, field in attrs.items():
if isinstance(field, remoteobjects.fields.Property):
new_properties[attrname] = field
if isinstance(field, remoteobjects.fields.Field):
new_fields[attrname] = field
elif attrname in fields:
# Throw out any parent fields that the subclass defined as
# something other than a Field.
del fields[attrname]
fields.update(new_fields)
attrs['fields'] = fields
obj_cls = super(DataObjectMetaclass, cls).__new__(cls, name, bases, attrs)
for field, value in new_properties.items():
obj_cls.add_to_class(field, value)
# Register the new class so Object fields can have forward-referenced it.
classes_by_name[name] = obj_cls
# Tell this class's fields what this class is, so they can find their
# forward references later.
for field in new_properties.values():
field.of_cls = obj_cls
return obj_cls
def add_to_class(cls, name, value):
try:
value.install(name, cls)
except (NotImplementedError, AttributeError):
setattr(cls, name, value)
class DataObject(object):
"""An object that can be decoded from or encoded as a dictionary.
DataObject subclasses should be declared with their different data
attributes defined as instances of fields from the `remoteobjects.fields`
module. For example:
>>> from remoteobjects import dataobject, fields
>>> class Asset(dataobject.DataObject):
... name = fields.Field()
... updated = fields.Datetime()
... author = fields.Object('Author')
...
A DataObject's fields then provide the coding between live DataObject
instances and dictionaries.
"""
__metaclass__ = DataObjectMetaclass
def __init__(self, **kwargs):
"""Initializes a new `DataObject` with the given field values."""
self.api_data = {}
self.__dict__.update(kwargs)
def __eq__(self, other):
"""Returns whether two `DataObject` instances are equivalent.
If the `DataObject` instances are of the same type and contain the
same data in all their fields, the objects are equivalent.
"""
if type(self) != type(other):
return False
for k, v in self.fields.iteritems():
if isinstance(v, remoteobjects.fields.Field):
if getattr(self, k) != getattr(other, k):
return False
return True
def __ne__(self, other):
"""Returns whether two `DataObject` instances are different.
`DataObject` instances are different if they are not equivalent as
determined through `__eq__()`.
"""
return not self == | other
@classmethod
def statefields(cls):
return cls.fields.keys() + ['api_data']
def __getstate__(self):
return dict((k, self.__dict__[k]) for k in self.statefields()
if k in self.__dict__)
def get(self, attr, *args):
return getattr(self, attr, *args)
def __ite | r__(self):
for key in self.fields.keys():
yield key
def to_dict(self):
"""Encodes the DataObject to a dictionary."""
# Start with the last set of data we got from the API
data = deepcopy(self.api_data)
# Now replace the data with what's actually in our object
for field_name, field in self.fields.iteritems():
value = getattr(self, field.attrname, None)
if value is not None:
data[field.api_name] = field.encode(value)
else:
data[field.api_name] = None
# Now delete any fields that ended up being None
# since we should exclude them in the resulting dict.
for k in data.keys():
if data[k] is None:
del data[k]
return data
@classmethod
def from_dict(cls, data):
"""Decodes a dictionary into a new `DataObject` instance."""
self = cls()
self.update_from_dict(data)
return self
def update_from_dict(self, data):
"""Adds the content of a dictionary to this DataObject.
Parameter `data` is the dictionary from which to update the object.
Use this only when receiving newly updated or partial content for a
DataObject; that is, when the data is from the outside data source and
needs decoded through the object's fields. Data from "inside" your
application should be added to an object manually by setting the
object's attributes. Data that constitutes a new object should be
turned into another object with `from_dict()`.
"""
if not isinstance(data, dict):
raise TypeError
# Clear any local instance field data
for k in self.fields.iterkeys():
if k in self.__dict__:
del self.__dict__[k]
self.api_data = data
@classmethod
def subclass_with_constant_field(cls, fieldname, value):
"""Returns the closest subclass of this class that has a `Constant`
field with the given value.
Use this method in combination with the `fields.Constant` field class
to find the most appropriate subclass of `cls` based on a content
field. For example, if you have an ``Asset`` class, but want to
declare subclasses with special behavior based on the ``kind`` field
of the ``Asset`` instances, declare ``kind |
from collections import OrderedDict
from rest_framework.fields import Field
from ..models import SourceImageIOError
class ImageRenditionField(Field):
"""
A field that generates a rendition with the specified filter spec, and serialises
details of that rendition.
Example:
"thumbnail": {
"url": "/media/images/myimage.max-165x165.jpg",
"width": 165,
"height": 100,
"alt": "Image alt text"
}
If there is an error with the source image. The dict will only contain a single
key, "error", indicating this error:
"thumbnail": {
"error": "SourceImageIOError"
}
| """
def __init__(self, filter_spec, *args, **kwargs):
self.filter_spec = filter_spec
super().__init__(*args, **kwargs)
def to_representation(self, image):
try:
thumbnail = image.get_rendition(self.filter_spec)
return OrderedDict(
[
("url", thumbnail.url),
("width", thumbnail.width),
("height", thumbnail.height),
("alt", thumbnail.alt),
| ]
)
except SourceImageIOError:
return OrderedDict(
[
("error", "SourceImageIOError"),
]
)
|
# Copyright (c) 2014 Dark Secret Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl | icable law or agreed to in writing, software
# distributed un | der the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
class InvalidVersion(Exception):
pass
class OutOfSync(Exception):
pass
class EndOfFile(Exception):
pass
BOR_MAGIC_NUMBER = 0x69867884
class Version0(object):
# Preamble ... same for all versions.
# i = 0x69867884 (EVNT)
# h = version
def __init__(self):
self.preamble_schema = "ih"
self.preamble_size = struct.calcsize(self.preamble_schema)
def make_preamble(self, version):
return struct.pack(self.preamble_schema, BOR_MAGIC_NUMBER, version)
def _check_eof(self, expected, actual):
if actual < expected:
raise EndOfFile()
def load_preamble(self, file_handle):
raw = file_handle.read(self.preamble_size)
self._check_eof(self.preamble_size, len(raw))
header = struct.unpack(self.preamble_schema, raw)
if header[0] != BOR_MAGIC_NUMBER:
raise OutOfSync("Expected Beginning of Record marker")
return header[1]
class Version1(Version0):
# Version 1 SCHEMA
# ----------------
# i = metadata block length
# i = raw notification block length
# i = 0x00000000 EOR
# Metadata dict block
# i = number of strings (N) - key/value = 2 strings
# N * i = length of key followed by length of value
# N * (*s) = key followed by value
# Raw notification block
# i = length of raw data block
# *s = raw data
# EXAMPLE
# --------
# With above Event and Metadata
#
# Header schema: "iii"
# Metadata length: 119
# Raw notification length: 201
# Metadata = 6 strings (3 key-value pairs)
# Metadata schema: "iiiiiii6s14s10s31s10s20s"
# ------ key/value
# ------ key/value
# ----- key/value
# ------ length of the 6 strings
# - 12 entries (6 string sizes + 6 strings)
# Raw notification: "i197s"
# ---- json notification
# - 197
def __init__(self):
super(Version1, self).__init__()
self.header_schema = "iii"
self.header_size = struct.calcsize(self.header_schema)
def _encode(self, s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
def pack(self, notification, metadata):
nsize = len(notification)
raw_block_schema = "i%ds" % nsize
raw_block = struct.pack(raw_block_schema, nsize, notification)
metadata_items = ["i"] # appended with N "%ds"'s
metadata_values = [len(metadata) * 4] # [n]=key, [n+1]=value
for key, value in metadata.iteritems():
key = self._encode(key)
value = self._encode(value)
metadata_items.append("i")
metadata_items.append("i")
metadata_values.append(len(key))
metadata_values.append(len(value))
for key, value in metadata.iteritems():
key = self._encode(key)
value = self._encode(value)
metadata_items.append("%ds" % len(key))
metadata_values.append(key)
metadata_items.append("%ds" % len(value))
metadata_values.append(value)
metadata_schema = "".join(metadata_items)
metadata = struct.pack(metadata_schema, *metadata_values)
header = struct.pack(self.header_schema,
struct.calcsize(metadata_schema),
struct.calcsize(raw_block_schema), 0)
preamble = self.make_preamble(1)
return (preamble, header, metadata, raw_block)
def unpack(self, file_handle):
header_bytes = file_handle.read(self.header_size)
self._check_eof(self.header_size, len(header_bytes))
header = struct.unpack(self.header_schema, header_bytes)
if header[2] != 0:
raise OutOfSync("Didn't find 0 EOR marker.")
metadata_bytes = file_handle.read(header[0])
self._check_eof(header[0], len(metadata_bytes))
num_strings = struct.unpack_from("i", metadata_bytes)
offset = struct.calcsize("i")
lengths = num_strings[0] / 2
lengths_schema = "i" * lengths
key_value_sizes = struct.unpack_from(lengths_schema, metadata_bytes,
offset=offset)
key_value_schema_list = ["%ds" % sz for sz in key_value_sizes]
key_value_schema = "".join(key_value_schema_list)
offset += struct.calcsize(lengths_schema)
key_values = struct.unpack_from(key_value_schema, metadata_bytes,
offset=offset)
metadata = dict((key_values[n], key_values[n + 1])
for n in range(len(key_values))[::2])
raw = file_handle.read(header[1])
self._check_eof(header[1], len(raw))
raw_len = struct.unpack_from("i", raw)
offset = struct.calcsize("i")
jnot = struct.unpack_from("%ds" % raw_len[0], raw, offset=offset)
return (metadata, jnot[0])
VERSIONS = {1: Version1()}
CURRENT_VERSION = 1
def get_version_handler(version=CURRENT_VERSION):
global VERSIONS
version_handler = VERSIONS.get(version)
if not version_handler:
raise InvalidVersion()
return version_handler
def pack_notification(notification, metadata, version=CURRENT_VERSION):
version_handler = get_version_handler(version)
return version_handler.pack(notification, metadata)
def unpack_notification(file_handle):
v0 = Version0()
version = v0.load_preamble(file_handle)
version_handler = get_version_handler(version)
return version_handler.unpack(file_handle)
|
ed if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'UW-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'UW-logo-32x32.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template n | ames.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
| # If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tupelodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# The following comes from
# https://github.com/rtfd/readthedocs.org/issues/416
#
'preamble': "".join((
'\DeclareUnicodeCharacter{00A0}{ }', # NO-BREAK SPACE
'\DeclareUnicodeCharacter{251C}{+}', # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\DeclareUnicodeCharacter{2514}{+}', # BOX DRAWINGS LIGHT UP AND RIGHT
)),
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Tupelo.tex', u'Tupelo Documentation',
u'Stuart Maclean', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'UW-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tupelo', u'Tupelo Documentation',
[u'Stuart Maclean'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Tupelo', u'Tupelo Documentation',
u'Stuart Maclean', 'Tupelo', 'Tupelo Disk Store',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
os.environ['GITBRANCH'] = "develop"
if os.environ.get('DOCSURL') is None:
#os.environ['DOCSURL'] = "file://{}".format(os.environ.get('GIT'))
os.environ['DOCSURL'] = "http://u12-dev-svr-1.prisem.washington.edu:8080/docs/{}/html".format(
os.environ['GITBRANCH'])
intersphinx_cache_limit = -1 # days to keep the cached inventories (0 == forever)
intersphinx_mapping = {
'dimsocd': ("{}/dims-ocd".format(os.environ['DOCSURL']), None),
'dimsad': ("{}/dims-ad".format(os.environ['DOCSURL']), None),
'dimssr': ("{}/dims-sr".format(os.environ['DOCSURL']), None),
'dimsdevguide': ("{}/dims-devguide".format(os.environ['DOCSURL']), None),
'dimspacker': ("{}/dims-packer".format(os.environ['DOCSURL']), None),
'd |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
#
# This program is free | software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Fre | e Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import followup
import account_move_line
import account_followup
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-02 15:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migra | tion(migrations.Migration):
dependencies = [
('sponsors', '0005_auto_20160530_1255'),
| ]
operations = [
migrations.AddField(
model_name='sponsor',
name='conference',
field=models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017')], default='pycontw-2017', verbose_name='conference'),
),
]
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libhio(AutotoolsPackage):
"""libHIO is a flexible, high-performance parallel IO package developed
at LANL. libHIO supports IO to either a conventional PFS or to Cray
DataWarp with management of Cray DataWarp space and stage-in and
stage-out from and to the PFS.
| """
homepage = "https://github.com/hpc/libhio"
url = "https://github.com/hpc/libhio/releases/download/hio.1.4.1.0/libhio-1.4.1.0.tar.bz2"
#
# We don't include older versions since they are missing features
# needed by current and future consumers of libhio
#
version('1.4.1.0', '6ef566fd8cf31fdcd05fab01dd3fae44')
#
# main users of libhio thru spack will want to use HFDF5 plugin,
# so make hdf5 variant a d | efault
#
variant('hdf5', default=True, description='Enable HDF5 support')
depends_on("json-c")
depends_on("bzip2")
depends_on("pkgconfig", type="build")
depends_on('mpi')
#
# libhio depends on hdf5+mpi if hdf5 is being used since it
# autodetects the presence of an MPI and/or uses mpicc by default to build
depends_on('hdf5+mpi', when='+hdf5')
#
# wow, we need to patch libhio
#
patch('0001-configury-fix-a-problem-with-bz2-configury.patch', when="@1.4.1.0")
patch('0001-hdf5-make-docs-optional.patch', when="@1.4.1.0")
def autoreconf(self, spec, prefix):
autoreconf = which('autoreconf')
autoreconf('-ifv')
def configure_args(self):
spec = self.spec
args = []
args.append('--with-external_bz2={0}'.format(spec['bzip2'].prefix))
if '+hdf5' in spec:
args.append('--with-hdf5={0}'.format(spec['hdf5'].prefix))
return args
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Unit tests for all SQL implementations of spectrum libraries.
"""
from os import path as os_path
import uuid
import unittest
import numpy as np
import fourgp_speclib
class TestSpectrumLibrarySQL(object):
"""
This class is a mixin which adds lots of standard tests to any SQL-based SpectrumLibrary unittest class.
"""
def test_refresh(self):
"""
Check that we can refresh database connection.
"""
self._lib.refresh_database()
def test_spectrum_retrieval(self):
"""
Check that we can store a single spectra into the SpectrumLibrary and retrieve it again.
"""
# Create a random spectrum to insert into the spectrum library
size = 50
raster = np.arange(size)
values = np.random.random(size)
value_errors = np.random.random(size)
input_spectrum = fourgp_speclib.Spectrum(wavelengths=raster,
values=values,
value_errors=value_errors,
metadata={"origin": "unit-test"})
# Insert it into the spectrum library
self._lib.insert(input_spectrum, "dummy_filename")
# Load it back as a SpectrumArray
my_spectra = self._lib.search()
my_spectrum_array = self._lib.open(filenames=my_spectra[0]['filename'])
# Pick spectrum out of SpectrumArray
my_spectrum = my_spectrum_array.extract_item(0)
# Check that we got back the same spectrum we put in
self.assertEqual(my_spectrum, input_spectrum)
def test_search_illegal_metadata(self):
"""
Check that we can search for spectra on a simple metadata constraint.
"""
# Insert ten random spectra into SpectrumLibrary
size = 50
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
value_errors=np.random.random(size),
metadata={"origin": "unit-test"})
self._lib.insert(input_spectrum, "dummy_filename")
# Search on an item of metadata which doesn't exist
with self.assertRaises(AssertionError):
self._lib.search(x_value=23)
def test_search_1d_numerical_range(self):
"""
Check that we can search for spectra on a simple metadata numerical range constraint.
"""
# Insert ten random spectra into SpectrumLibrary
size = 50
x_values = list(range(10))
for x in x_values:
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
value_errors=np.random.random(size),
metadata={"origin": "unit-test",
"x_value": x})
self._lib.insert(input_spectrum, "x_{}".format(x))
# Search for spectra with x in a defined range
x_range = [4.5, 8.5]
x_values_expected = [x for x in x_values if (x > x_range[0] and x < x_range[1])]
my_spectra = self._lib.search(x_value=x_range)
ids = [str(item["specId"]) for item in my_spectra]
metadata = self._lib.get_metadata(ids=ids)
x_values = [item['x_value'] for item in metadata]
# Check that we got back the same spectrum we put in
self.assertEqual(x_values, x_values_expected)
def test_search_1d_numerical_value(self):
"""
Check that we can search for spectra on a simple metadata numerical point-value constraint.
"""
# Insert ten random spectra into SpectrumLibrary
size = 50
x_values = list(range(10))
for x in x_values:
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
value_errors=np.random.random(size),
metadata={"origin": "unit-test",
"x_value": x})
self._lib.insert(input_spectrum, "x_{}".format(x))
# Search for spectra with matching x_value
my_spectra = self._lib.search(x_value=5)
ids = [str(item["specId"]) for item in my_spectra]
metadata = self._lib.get_metadata(ids=ids)
x_values = [item['x_value'] for item in metadata]
# Check that we got back the same spectrum we put in
self.assertEqual(x_values, [5])
def test_search_1d_string_range(self):
"""
Check that we can search for spectra on a simple metadata string range constraint.
"""
# Insert random spectra into SpectrumLibrary
alphabet = "abcdefghijklmnopqrstuvwxyz"
size = 50
x_values = list(range(12))
for x in x_values:
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
| value_errors=np.random.random(size),
metadata={"origin": "unit-test",
"x_value": alphabet[x:x + 3]})
self._lib.insert(input_spectrum, "x_{}".format(x))
# Sear | ch for spectra with x in a defined range
my_spectra = self._lib.search(x_value=["dxx", "h"])
x_values_expected = ["efg", "fgh", "ghi"]
filenames_got = [str(item["filename"]) for item in my_spectra]
x_values_got = [str(i["x_value"]) for i in self._lib.get_metadata(filenames=filenames_got)]
x_values_got.sort()
# Check that we got back the same spectrum we put in
self.assertEqual(x_values_expected, x_values_got)
def test_search_1d_string_value(self):
"""
Check that we can search for spectra on a simple metadata string point-value constraint.
"""
# Insert random spectra into SpectrumLibrary
alphabet = "abcdefghijklmnopqrstuvwxyz"
size = 50
x_values = list(range(10))
for x in x_values:
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
value_errors=np.random.random(size),
metadata={"origin": "unit-test",
"x_value": alphabet[x:x + 3]})
self._lib.insert(input_spectrum, "x_{}".format(x))
# Search for spectra with matching x_value
my_spectra = self._lib.search(x_value="def")
filenames_got = [str(item["filename"]) for item in my_spectra]
x_values_got = [str(i["x_value"]) for i in self._lib.get_metadata(filenames=filenames_got)]
x_values_got.sort()
# Check that we got back the same spectrum we put in
self.assertEqual(x_values_got, ["def"])
|
d import tasks_v2beta2
>>>
>>> client = tasks_v2beta2.CloudTasksClient()
>>>
>>> name = client.queue_path('[PROJECT]', '[LOCATION]', '[QUEUE]')
>>>
>>> response = client.resume_queue(name)
Args:
name (str): Required. The queue name. For example:
``projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID``
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry client library requests. If ``None`` is specified,
requests will be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the client library request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to | the client library method.
Returns:
A :class:`~google.cloud.tasks_v2beta2.types.Queue` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.ex | ceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "resume_queue" not in self._inner_api_calls:
self._inner_api_calls[
"resume_queue"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.resume_queue,
default_retry=self._method_configs["ResumeQueue"].retry,
default_timeout=self._method_configs["ResumeQueue"].timeout,
client_info=self._client_info,
)
request = cloudtasks_pb2.ResumeQueueRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["resume_queue"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_iam_policy(
self,
resource,
options_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the access control policy for a ``Queue``. Returns an empty policy
if the resource exists and does not have a policy set.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the specified
resource parent:
- ``cloudtasks.queues.getIamPolicy``
Example:
>>> from google.cloud import tasks_v2beta2
>>>
>>> client = tasks_v2beta2.CloudTasksClient()
>>>
>>> resource = client.queue_path('[PROJECT]', '[LOCATION]', '[QUEUE]')
>>>
>>> response = client.get_iam_policy(resource)
Args:
resource (str): REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this field.
options_ (Union[dict, ~google.cloud.tasks_v2beta2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to
``GetIamPolicy``. This field is only used by Cloud IAM.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.tasks_v2beta2.types.GetPolicyOptions`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry client library requests. If ``None`` is specified,
requests will be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the client library request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the client library method.
Returns:
A :class:`~google.cloud.tasks_v2beta2.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_iam_policy" not in self._inner_api_calls:
self._inner_api_calls[
"get_iam_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_iam_policy,
default_retry=self._method_configs["GetIamPolicy"].retry,
default_timeout=self._method_configs["GetIamPolicy"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.GetIamPolicyRequest(
resource=resource, options=options_
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("resource", resource)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_iam_policy"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_iam_policy(
self,
resource,
policy,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the access control policy for a ``Queue``. Replaces any existing
policy.
Note: The Cloud Console does not check queue-level IAM permissions yet.
Project-level permissions are required to use the Cloud Console.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the specified
resource parent:
- ``cloudtasks.queues.setIamPolicy``
Example:
>>> from google.cloud import tasks_v2beta2
>>>
>>> client = tasks_v2beta2.CloudTasksClient()
>>>
>>> resource = client.queue_path('[PROJECT]', '[LOCATION]', '[QUEUE]')
>>>
>>> # TODO: Initialize `policy`:
>>> policy = {}
>>>
>>> response = client.set_iam_policy(resource, policy)
Args:
resource (str): REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this field.
policy (Union[dict, ~google.cloud.tasks_v2beta2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.tasks_v2beta2.types.Policy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry client library re |
agebox
from PDFManager.PDFMangerFacade import PDFMangerFacade
class PDFManager_UI:
def __init__(self):
self.i= -1;
self.files=[]
self.root = Tk()
self.root.title('PDFManager')
self.root.wm_iconbitmap("ico.ico") #icona
self.frame = Frame(self.root,height=2,bd=2,relief=SUNKEN,bg='black',)
self.root.resizable(False, False) #settaggio redimensione
#centrare nello schermo
larghezza = self.root.winfo_screenwidth() # larghezza schermo in pixel
altezza = self.root.winfo_screenheight() # altezza schermo in pixel
WIDTH = self.root.winfo_reqwidth()
HEIGHT = self.root.winfo_reqheight()
x = larghezza//2 - WIDTH
y = altezza//2 - HEIGHT
self.root.geometry("%dx%d+%d+%d" % (421,342 , x, y))
self.button_merge = Button(self.root, text = 'Unisci', command=self.__unisci__)
self.button_stitching = Button(self.root,text = 'Dividi',command=self.dividi)
self.button_split = Button(self.root, text = 'Fusione', command=self.__fusione__)
self.button_watermark = Button(self.root, text = 'Filigrana', command=self.__filigrana__)
self.button_encript = Button(self.root, text = 'Cripta', command=self.__cripta__)
self.button_rotate = Button(self.root, text='Ruota', command=self.__ruota__)
self.button_clear =Button(self.root, text='Rimuovi tutto', command=self.__svuota__)
self.password = Entry(self.root)
self.combo_rotate = ttk.Combobox(self.root,state='readonly')
self.combo_rotate['values'] = (0,90,180,270)
lblPass = Label(self.root,text='Password :',anchor=E)
lblGradi = Label(self.root,text='Gradi :',anchor=E)
self.button_add = Button(self.root, text='Aggiungi PDF', command=self.__aggiungi__)
self.button_delete = Button(self.root, text='Rimuovi selezionato', command=self.__rimuovi__)
self.list_file = ttk.Treeview(self.root)
self.list_file['columns'] =('NumeroPagine')
self.list_file.heading("#0",text='NomeFile')
self.list_file.column('#0',anchor=W)
self.list_file.heading('NumeroPagine',text = 'Numero pagine')
self.list_file.column('NumeroPagine',anchor='center',width=100)
self.button_add.grid(row=0, column= 0,columnspan=2,sticky=(W,E | ))
self.button_delete.grid(row=1,column=0,columnspan=2,sticky=(W,E))
self.button_clear.grid(row = 2,column=0,columnspan=2,sticky=(W,E))
self.list_file.grid(row=0,column=2,columnspan=3,rowspan=3)
self.frame.grid(row=3,column=0,columnspan=5,sticky=(W,E),pady=5)
| self.button_merge.grid(row=4,column=0,columnspan=2,sticky=(W,E))
self.button_stitching.grid(row=4,column=3,columnspan=2,sticky=(W,E))
self.button_split.grid(row=5,column=0,columnspan=2,sticky=(W,E))
self.button_watermark.grid(row=5,column=3,columnspan=2,sticky=(W,E))
self.button_encript.grid(row=6,column=0,columnspan=2,sticky=(W,E))
lblPass.grid(row=6,column=2)
self.password.grid(row=6,column=3,columnspan=2,sticky=(W,E))
self.button_rotate.grid(row=7,column=0,columnspan=2,sticky=(W,E))
lblGradi.grid(row=7,column=2)
self.combo_rotate.grid(row=7,column=3,columnspan=2,sticky=(W,E))
self.button_stitching.config(state=DISABLED)
self.button_encript.config(state=DISABLED)
self.button_watermark.config(state=DISABLED)
self.button_merge.config(state=DISABLED)
self.button_split.config(state=DISABLED)
self.button_rotate.config(state=DISABLED)
def __aggiungi__(self):
filelist = filedialog.askopenfilenames(filetypes=[("PDF file",".pdf")])
for file in filelist:
if(file in self.files):
continue
self.i = self.i+1
self.files.append(file)
split = file.split("/").pop()
self.list_file.insert("",self.i,text=split,values=(PDFMangerFacade.pagescount(file)))
self.__controlla__()
def __rimuovi__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
del(self.files[posizione])
self.list_file.delete(pos)
self.i= self.i-1
print(self.files)
except IndexError:
messagebox.showwarning("Attenzione","Nessun elemento selezionato")
self.__controlla__()
def __unisci__(self):
try:
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
if(name.endswith('.pdf') == False):
name = name+'.pdf'
PDFMangerFacade.merge(*self.files, filenameOut=name)
except Exception as e:
messagebox.showwarning("Attenzione",e)
def __svuota__(self):
self.files = []
self.list_file.delete(*self.list_file.get_children())
def dividi(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
phat = filedialog.askdirectory()
prefisso = (self.files[posizione].split("/").pop()).split('.')[0]
PDFMangerFacade.stitching(self.files[posizione], phat + '/' + prefisso)
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato")
def __fusione__(self):
try:
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.splitting(*self.files,filenameOut = name)
except IndexError as e:
messagebox.showwarning("Attenzione",e)
def __filigrana__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
print(self.files[posizione])
name_filigrana = filedialog.askopenfilename(filetypes=[("PDF file",".pdf")])
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.watermark(self.files[posizione], name_filigrana, name)
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato.")
def __cripta__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
password = self.password.get()
if(password == ""):
messagebox.showwarning("Attenzione","Inserire una password.")
return
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.encrypt(self.files[posizione], password, name);
self.password.delete(0,'end')
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato.")
def __ruota__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
gradi = int(self.combo_rotate.get())
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.rotatePage(self.files[posizione],name,gradi);
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato.")
except ValueError:
messagebox.showwarning("Attenzione","Selezionare il grado di rotazione.")
def start(self):
self.root.mainloop()
def __controlla__(self):
if((self.i+1) == 0):
self.button_stitching.config(state=DISABLED)
self.button_encript.config(state=DISABLED)
self.button_watermark.config(state=DISABLED)
self.button_merge.config(state=DISABLED)
self.button_split.config(state=DISABLED)
self.button_rotate.config(state=DISABLED)
if((self.i+1) ==1):
self.button_stitching.config(state=NORMAL)
self.button_encript.config(state=NORMAL)
self.button_watermark.config(state=NORMAL)
self.button_merge.config(state=DISABLED)
self.button_split.config(state=DISABLED)
self.button_rotate.config(state=NORMAL)
if((self.i+1) >1):
|
# thesquirrel.org
#
# Copyright (C) 2015 Flying Squirrel Community Space
#
# thesquirrel.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# thesquirrel.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with thesquirrel.org. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from datetime import date, time
import mock
from django.test import TestCase
from thesquirrel.factories import *
from ..factories import *
from ..forms import (EventForm, EventRepeatForm, EventRepeatExcludeForm,
CompositeEventForm)
from ..models import EventRepeat, EventRepeatExclude
class EventFormTest(TestCase):
def test_start_date_must_be_after_end_date(self):
form = EventForm(data={
'title': 'test-title',
'description': 'test-description',
'date': '1/1/2015',
'start_time': '18:30',
'end_time': '16:30',
})
assert not form.is_valid()
def test_save(self):
form = EventForm(data={
'title': 'test-title',
'description': 'test-description',
'l | ocation': 'Library',
'bottomliner': 'Santa',
'date': '1/1/2015',
| 'start_time': '18:30',
'end_time': '19:30',
})
assert form.is_valid()
form.save()
class EventRepeatFormTest(TestCase):
def make_form(self, update=False, number=1):
if update:
event = EventFactory(with_repeat=True)
instance = event.repeat_set.all().get()
else:
event = EventFactory()
instance = None
return EventRepeatForm(number, instance=instance)
def make_form_with_data(self, update=False, no_days=False,
empty_type=False, number=1):
if update:
event = EventFactory(with_repeat=True)
instance = event.repeat_set.all().get()
else:
event = EventFactory()
instance = None
data = {
'type': '1M' if not empty_type else '',
'start_date': '1/1/2015',
'we': True if not no_days else False,
'end_date': '2/1/2015',
'start_time': '16:30',
'end_time': '18:30',
}
return EventRepeatForm(number, instance=instance, data=data)
def test_save(self):
form = self.make_form_with_data()
assert form.is_valid()
event = EventFactory()
repeat = form.save(event)
assert repeat.event == event
def test_one_weekday_required(self):
form = self.make_form_with_data(no_days=True)
assert not form.is_valid()
def test_empty_type_doesnt_create_new(self):
form = self.make_form_with_data(empty_type=True)
assert form.is_valid()
event = EventFactory()
form.save(event)
assert not event.repeat_set.all().exists()
def test_empty_type_deletes_existing(self):
form = self.make_form_with_data(update=True, empty_type=True)
assert form.is_valid()
event = EventFactory()
form.save(event)
assert not event.repeat_set.all().exists()
def check_empty_type_label(self, form, correct_label):
empty_type_label = None
for value, label in form.fields['type'].choices:
if value == '':
empty_type_label = label
break
assert empty_type_label is not None
assert empty_type_label == correct_label
def test_empty_type_labels(self):
form = self.make_form()
self.check_empty_type_label(self.make_form(), u'No repeat')
self.check_empty_type_label(self.make_form(update=True),
u'Delete repeat')
def test_headings(self):
assert self.make_form().heading == 'Repeat'
assert self.make_form(number=2).heading == 'Repeat #2'
class EventRepeatExcludeFormTest(TestCase):
def test_create_excludes(self):
event = EventFactory(with_repeat=True, with_exclude=True)
form = EventRepeatExcludeForm(data={
'dates': ['2/4/2015', '2/5/2015'],
})
assert form.is_valid()
form.save(event)
def test_invalid_value(self):
form = EventRepeatExcludeForm(data={
'dates': ['invalid-date'],
})
assert not form.is_valid()
class CompositeEventFormTest(TestCase):
def test_initial_excludes(self):
event = EventFactory(with_repeat=True, with_exclude=True)
form = CompositeEventForm(event)
assert form.exclude_form.initial['dates'] == [
e.date for e in event.excludes.all()
]
def mock_out_subforms(self, composite_form):
def mock_subform():
return mock.Mock(
is_valid=mock.Mock(return_value=True),
)
composite_form.event_form = mock_subform()
composite_form.exclude_form = mock_subform()
for i in range(len(composite_form.repeat_forms)):
composite_form.repeat_forms[i] = mock_subform()
for i in range(len(composite_form.update_repeat_forms)):
composite_form.update_repeat_forms[i] = mock_subform()
return composite_form
def test_is_valid(self):
event = EventFactory(with_repeat=True)
form = self.mock_out_subforms(CompositeEventForm(event))
assert form.is_valid()
assert form.event_form.is_valid.called
for repeat_form in form.repeat_forms:
assert repeat_form.is_valid.called
for update_form in form.update_repeat_forms:
assert update_form.is_valid.called
def test_is_valid_return_false(self):
event = EventFactory(with_repeat=True)
form = self.mock_out_subforms(CompositeEventForm(event))
form.event_form.is_valid.return_value = False
assert not form.is_valid()
# Even though event_form.is_valid() returns False, we should still
# call is_valid for each subform so that the ErrorDict is generated.
assert form.event_form.is_valid.called
for repeat_form in form.repeat_forms:
assert repeat_form.is_valid.called
for update_form in form.update_repeat_forms:
assert update_form.is_valid.called
def test_save(self):
event = EventFactory(with_repeat=True)
form = self.mock_out_subforms(CompositeEventForm(event))
saved_event = form.event_form.save.return_value
assert form.save() == saved_event
for repeat_form in form.repeat_forms:
assert repeat_form.save.call_args
for update_form in form.update_repeat_forms:
assert update_form.save.call_args
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'openwisp2.settings')
from django.core.management import execute_from_command_line
exec | ute_from_c | ommand_line(sys.argv)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
| # SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPickleshare(PythonPackage):
"""Tiny 'shelve'-like database with concurrency support"""
pypi = "pickleshare/pickleshare-0.7.4.tar.gz"
version('0.7.5', sha256='87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca')
version('0.7.4', sha256='84a9257227dfdd6fe1b4be1319096c20eb85ff1e82c7932f36efccfe1b09737b')
depends_on('python@2.7:2.8,3:', type=('build', 'run'))
de | pends_on('py-setuptools', type='build')
depends_on('py-pathlib2', type=('build', 'run'), when='^python@2.6:2.8,3.2:3.3')
|
i])/interval
else:
datum = (float(cur[i]) - float(prev[i]))/interval
data.append(datum)
ds = DiskStat(name=disk, reads_completed=data[0],
reads_merged=data[1],
sectors_read=data[2],
ms_reading=data[3],
writes_completed=data[4],
writes_merged=data[5],
sectors_written=data[6],
ms_writing=data[7],
ios_progress=data[8],
ms_ios=data[9],
weighted_ios=data[10],
ts=ts)
self._save_wrapper(ds)
return cur_stats
def loadavg(self, last_ts):
now = time.mktime(time.gmtime())
if (now - last_ts < 30):
return last_ts
stats_file = '/proc/loadavg'
with open(stats_file) as sfo, open('/proc/uptime') as ufo:
line = sfo.readline()
fields = line.split()
thread_fields = fields[3].split('/')
idle_seconds = int(float(ufo.readline().split()[1]))
ts = datetime.utcnow().replace(tzinfo=utc)
la = LoadAvg(load_1=fields[0], load_5=fields[1], load_15=fields[2],
active_threads=thread_fields[0],
total_threads=thread_fields[1], latest_pid=fields[4],
idle_seconds=idle_seconds, ts=ts)
self._save_wrapper(la)
return now
def meminfo(self):
stats_file = '/proc/meminfo'
(total, free, buffers, cached, swap_total, swap_free, active, inactive,
dirty,) = (None,) * 9
with open(stats_file) as sfo:
for l in sfo.readlines():
if (re.match('MemTotal:', l) is not None):
total = int(l.split()[1])
elif (re.match('MemFree:', l) is not None):
free = int(l.split()[1])
elif (re.match('Buffers:', l) is not None):
buffers = int(l.split()[1])
elif (re.match('Cached:', l) is not None):
cached = int(l.split()[1])
elif (re.match('SwapTotal:', l) is not None):
swap_total = int(l.split()[1])
elif (re.match('SwapFree:', l) is not None):
swap_free = int(l.split()[1])
elif (re.match('Active:', l) is not None):
active = int(l.split()[1])
elif (re.match('Inactive:', l) is not None):
inactive = int(l.split()[1])
elif (re.match('Dirty:', l) is not None):
dirty = int(l.split()[1])
| break # no need to look at lines after dirty.
ts = datetime.utcnow().replace(tzinfo=utc)
mi = MemInfo(total=total, free=free, buffers=buffers, cached=cached,
swap_total=swap_total, swap_free=swap_free, active=active,
inactive=inactive, dirty=dirty, ts=ts)
self._save_wrapper(mi)
def vmstat(self):
stats_file = '/proc/vmstat'
| pass
def network_stats(self, prev_stats, interval):
interfaces = [i.name for i in NetworkInterface.objects.all()]
cur_stats = {}
with open('/proc/net/dev') as sfo:
sfo.readline()
sfo.readline()
for l in sfo.readlines():
fields = l.split()
if (fields[0][:-1] not in interfaces):
continue
cur_stats[fields[0][:-1]] = fields[1:]
ts = datetime.utcnow().replace(tzinfo=utc)
if (isinstance(prev_stats, dict)):
for interface in cur_stats.keys():
if (interface in prev_stats):
data = map(lambda x, y: float(x)/interval if x < y else
(float(x) - float(y))/interval,
cur_stats[interface], prev_stats[interface])
ns = NetStat(device=interface, kb_rx=data[0],
packets_rx=data[1], errs_rx=data[2],
drop_rx=data[3], fifo_rx=data[4],
frame=data[5], compressed_rx=data[6],
multicast_rx=data[7], kb_tx=data[8],
packets_tx=data[9], errs_tx=data[10],
drop_tx=data[11], fifo_tx=data[12],
colls=data[13], carrier=data[14],
compressed_tx=data[15], ts=ts)
self._save_wrapper(ns)
return cur_stats
def pools_usage(self, last_ts):
"""
This info is not from proc atm, but will eventually be.
"""
# collect usage only if the data is more than 30 seconds old
now = time.mktime(time.gmtime())
if (now - last_ts < 30):
return last_ts
ts = datetime.utcnow().replace(tzinfo=utc)
for p in Pool.objects.all():
total_reclaimable = 0
try:
# get usage of all shares in this pool
pool_device = Disk.objects.filter(pool=p)[0].name
share_map = {}
snap_map = {}
for share in Share.objects.filter(pool=p):
share_map[share.qgroup] = share.name
for snap in Snapshot.objects.filter(share=share):
snap_map[snap.qgroup] = snap.real_name
usaged = shares_usage(p, pool_device, share_map, snap_map)
for s in usaged.keys():
try:
total_reclaimable += (
Share.objects.get(name=s).size - usaged[s][1])
except:
pass
su = None
try:
su = ShareUsage.objects.filter(name=s).latest('id')
if ((ts - su.ts).total_seconds() > 90):
su = None
except Exception, e:
e_msg = ('Unable to get latest share usage object '
'for share(%s). A new one will be created.'
% s)
logger.error(e_msg)
# we check for changed in both referenced and exclusive
# usage because in rare cases it's possible for only one
# to change.
if ((su is None or su.r_usage != usaged[s][0] or
su.e_usage != usaged[s][1])):
su = ShareUsage(name=s, r_usage=usaged[s][0],
e_usage=usaged[s][1], ts=ts)
else:
su.ts = ts
su.count = su.count + 1
self._save_wrapper(su)
except Exception, e:
logger.debug('command exception while getting shares usage '
'for pool: %s' % (p.name))
logger.exception(e)
try:
usage = pool_usage('/%s/%s' % (settings.MNT_PT, p.name))
total_free = usage[2] # free + reclaimable
pu = None
try:
pu = PoolUsage.objects.filter(pool=p.name).latest('id')
if ((ts - pu.ts).total_seconds() > 90):
pu = None
except Exception, e:
e_msg = ('Unable to get latest pool usage object for '
'pool(%s). A new one will be created.' % p.name)
logger.error(e_msg)
if ((pu is None or
p.size - (pu.free + pu.reclaimable) != usage[1])):
pu = PoolUsage(pool=p.name,
|
from django.contrib import admin
from django.utils.translati | on import ugettext_lazy as _
from example.apps.things.models import Thing
class ThingAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('name', 'slug', 'image', 'description'),
}),
(_(u'Dates'), {
'fields': ('created', 'modified'),
'classes': ('collapse', ),
}),
)
list_display = ('name', | 'slug')
list_filter = ('created', 'modified')
prepopulated_fields = {'slug': ('name', )}
readonly_fields = ('created', 'modified')
search_fields = ('name', 'slug')
admin.site.register(Thing, ThingAdmin)
|
init_config, agentConfig, instances)
# Initialize a HTTP opener with Unix socket support
socket_timeout = int(init_config.get('socket_timeout', 0)) or DEFAULT_SOCKET_TIMEOUT
UnixHTTPConnection.socket_timeout = socket_timeout
self.url_opener = urllib2.build_opener(UnixSocketHandler())
# Locate cgroups directories
self._mountpoints = {}
self._cgroup_filename_pattern = None
docker_root = init_config.get('docker_root', '/')
for metric in CGROUP_METRICS:
self._mountpoints[metric["cgroup"]] = self._find_cgroup(metric["cgroup"], docker_root)
self._last_event_collection_ts = defaultdict(lambda: None)
def check(self, instance):
# Report image metrics
if _is_affirmative(instance.get('collect_images_stats', True)):
self._count_images(instance)
# Get the list of containers and the index of their names
containers, ids_to_names = self._get_and_count_containers(instance)
# Report container metrics from cgroups
skipped_container_ids = self._report_containers_metrics(containers, instance)
# Send events from Docker API
if _is_affirmative(instance.get('collect_events', True)):
self._process_events(instance, ids_to_names, skipped_container_ids)
# Containers
def _count_images(self, instance):
# It's not an important metric, keep going if it fails
try:
tags = instance.get("tags", [])
active_images = len(self._get_images(instance, get_all=False))
all_images = len(self._get_images(instance, get_all=True))
self.gauge("docker.images.available", active_images, tags=tags)
self.gauge("docker.images.intermediate", (all_images - active_images), tags=tags)
except Exception, e:
self.warning("Failed to count Docker images. Exception: {0}".format(e))
def _get_and_count_containers(self, instance):
tags = instance.get("tags", [])
with_size = _is_affirmative(instance.get('collect_container_size', False))
service_check_name = 'docker.service_up'
try:
running_containers = self._get_containers(instance, with_size=with_size)
all_containers = self._get_containers(instance, get_all=True)
except (socket.timeout, urllib2.URLError), e:
self.service_check(service_check_name, AgentCheck.CRITICAL,
message="Unable to list Docker containers: {0}".format(e), tags=tags)
raise Exception("Failed to collect the list of containers. Exception: {0}".format(e))
self.service_check(service_check_name, AgentCheck.OK, tags=tags)
running_containers_ids = set([container['Id'] for container in running_containers])
for container in all_containers:
container_tags = list(tags)
for key in DOCKER_TAGS:
tag = self._make_tag(key, container[key], instance)
if tag:
container_tags.append(tag)
if container['Id'] in running_containers_ids:
self.set("docker.containers.running", container['Id'], tags=container_tags)
else:
self.set("docker.containers.stopped", container['Id'], tags=container_tags)
# The index of the names is used to generate and format events
ids_to_names = {}
for container in all_containers:
ids_to_names[container['Id']] = container['Names'][0].lstrip("/")
return running_containers, ids_to_names
def _prepare_filters(self, instance):
# The reasoning is to check exclude first, so we can skip if there is no exclude
if not instance.get("exclude"):
return False
# Compile regex
instance["exclude_patterns"] = [re.compile(rule) for rule in instance.get("exclude", [])]
instance["include_patterns"] = [re.compile(rule) for rule in instance.get("include", [])]
return True
def _is_container_excluded(self, instance, tags):
if self._tags_match_patterns(tags, instance.get("exclude_patterns")):
if self._tags_match_patterns(tags, instance.get("include_patterns")):
return False
return True
return False
def _tags_match_patterns(self, tags, filters):
for rule in filters:
for tag in tags:
if re.match(rule, tag):
return True
return False
def _report_containers_metrics(self, containers, instance):
skipped_container_ids = []
collect_uncommon_metrics = _is_affirmative(instance.get("collect_all_metrics", False))
tags = instance.get("tags", [])
# Pre-compile regex to include/exclude containers
use_filters = self._prepare_filters(instance)
for container in containers:
container_tags = list(tags)
for name in container["Names"]:
container_tags.append(self._make_tag("name", name.lstrip("/"), instance))
for key in DOCKER_TAGS:
tag = self._make_tag(key, container[key], instance)
if tag:
container_tags.append(tag)
# Check if the container is included/excluded via its tags
if use_filters and self._is_container_excluded(instance, container_tags):
skipped_container_ids.append(container['Id'])
continue
for key, (dd_key, metric_type) in DOCKER_METRICS.iteritems():
if key in container:
getattr(self, metric_type)(dd_key, int(container[key]), tags=container_tags)
for cgroup in CGROUP_METRICS:
stat_file = self._get_cgroup_file(cgroup["cgroup"], container['Id'], cgroup['file'])
stats = self._parse_cgroup_file(stat_file)
if stats:
for key, (dd_key, metric_type, common_metric) in cgroup['metrics'].iteritems():
if key in stats and (common_metric or collect_uncommon_metrics):
getattr(self, metric_type)(dd_key, int(stats[key]), tags=container_tags)
if use_filters:
self.log.debug("List of excluded containers: {0}".format(skipped_container_ids))
return skipped_container_ids
def _make_tag(self, key, value, instance):
tag_name = key.lower()
if tag_name == "command" an | d not instance.get("tag_by_command", False):
return None
if instance.get("new_tag_names", False):
tag_name = self._new_tags_conversion(tag_name)
return "%s:%s" % (tag_name, value.strip())
def _new_tags_conversion(self, tag):
# Prefix tags to avoid conflict with AWS tags
return NEW_TAGS_MAP.get(tag, tag)
# Events
def _process_events(self, instance, ids_to_names, skipped_container_ids):
try:
api_events = | self._get_events(instance)
aggregated_events = self._pre_aggregate_events(api_events, skipped_container_ids)
events = self._format_events(aggregated_events, ids_to_names)
self._report_events(events)
except (socket.timeout, urllib2.URLError):
self.warning('Timeout during socket connection. Events will be missing.')
def _pre_aggregate_events(self, api_events, skipped_container_ids):
# Aggregate events, one per image. Put newer events first.
events = defaultdict(list)
for event in api_events:
# Skip events related to filtered containers
if event['id'] in skipped_container_ids:
self.log.debug("Excluded event: container {0} status changed to {1}".format(
event['id'], event['status']))
continue
# Known bug: from may be missing
if 'from' in event:
events[event['from']].insert(0, event)
return events
def _format_events(self, aggregated_events, ids_to_names):
events = []
for image_name, event_group in aggregated_events.iteritems():
|
5,
doc='Maximum energy for fit. Default=0.5')
self.declareProperty(name='BinReductionF | actor', defaultValue=10.0,
doc='Decrease total number of spectrum points by this ratio through merging of '
'intensities from neighbouring bins. Default=1')
self.declareProperty(ITableWorkspaceProperty('ParameterWorkspace', '',
direction=Direction.Output,
optional=PropertyMode.Op | tional),
doc='Table workspace for saving TransformToIqt properties')
self.declareProperty(MatrixWorkspaceProperty('OutputWorkspace', '',
direction=Direction.Output,
optional=PropertyMode.Optional),
doc='Output workspace')
self.declareProperty(name='DryRun', defaultValue=False,
doc='Only calculate and output the parameters')
def PyExec(self):
self._setup()
self._calculate_parameters()
if not self._dry_run:
self._transform()
self._add_logs()
else:
skip_prog = Progress(self, start=0.3, end=1.0, nreports=2)
skip_prog.report('skipping transform')
skip_prog.report('skipping add logs')
logger.information('Dry run, will not run TransformToIqt')
self.setProperty('ParameterWorkspace', self._parameter_table)
self.setProperty('OutputWorkspace', self._output_workspace)
def _setup(self):
"""
Gets algorithm properties.
"""
from IndirectCommon import getWSprefix
self._sample = self.getPropertyValue('SampleWorkspace')
self._resolution = self.getPropertyValue('ResolutionWorkspace')
self._e_min = self.getProperty('EnergyMin').value
self._e_max = self.getProperty('EnergyMax').value
self._number_points_per_bin = self.getProperty('BinReductionFactor').value
self._parameter_table = self.getPropertyValue('ParameterWorkspace')
if self._parameter_table == '':
self._parameter_table = getWSprefix(self._sample) + 'TransformToIqtParameters'
self._output_workspace = self.getPropertyValue('OutputWorkspace')
if self._output_workspace == '':
self._output_workspace = getWSprefix(self._sample) + 'iqt'
self._dry_run = self.getProperty('DryRun').value
def validateInputs(self):
"""
Validate input properties.
"""
issues = dict()
e_min = self.getProperty('EnergyMin').value
e_max = self.getProperty('EnergyMax').value
# Check for swapped energy values
if e_min > e_max:
energy_swapped = 'EnergyMin is greater than EnergyMax'
issues['EnergyMin'] = energy_swapped
issues['EnergyMax'] = energy_swapped
return issues
def _calculate_parameters(self):
"""
Calculates the TransformToIqt parameters and saves in a table workspace.
"""
workflow_prog = Progress(self, start=0.0, end=0.3, nreports=8)
workflow_prog.report('Croping Workspace')
CropWorkspace(InputWorkspace=self._sample,
OutputWorkspace='__TransformToIqt_sample_cropped',
Xmin=self._e_min,
Xmax=self._e_max)
workflow_prog.report('Calculating table properties')
x_data = mtd['__TransformToIqt_sample_cropped'].readX(0)
number_input_points = len(x_data) - 1
num_bins = int(number_input_points / self._number_points_per_bin)
self._e_width = (abs(self._e_min) + abs(self._e_max)) / num_bins
workflow_prog.report('Attemping to Access IPF')
try:
workflow_prog.report('Access IPF')
instrument = mtd[self._sample].getInstrument()
analyserName = instrument.getStringParameter('analyser')[0]
analyser = instrument.getComponentByName(analyserName)
if analyser is not None:
logger.debug('Found %s component in instrument %s, will look for resolution there'
% (analyserName, instrument))
resolution = analyser.getNumberParameter('resolution')[0]
else:
logger.debug('No %s component found on instrument %s, will look for resolution in top level instrument'
% (analyserName, instrument))
resolution = instrument.getNumberParameter('resolution')[0]
logger.information('Got resolution from IPF: %f' % resolution)
workflow_prog.report('IPF resolution obtained')
except (AttributeError, IndexError):
workflow_prog.report('Resorting to Default')
resolution = 0.0175
logger.warning('Could not get resolution from IPF, using default value: %f' % (resolution))
resolution_bins = int(round((2 * resolution) / self._e_width))
if resolution_bins < 5:
logger.warning('Resolution curve has <5 points. Results may be unreliable.')
workflow_prog.report('Creating Parameter table')
param_table = CreateEmptyTableWorkspace(OutputWorkspace=self._parameter_table)
workflow_prog.report('Populating Parameter table')
param_table.addColumn('int', 'SampleInputBins')
param_table.addColumn('float', 'BinReductionFactor')
param_table.addColumn('int', 'SampleOutputBins')
param_table.addColumn('float', 'EnergyMin')
param_table.addColumn('float', 'EnergyMax')
param_table.addColumn('float', 'EnergyWidth')
param_table.addColumn('float', 'Resolution')
param_table.addColumn('int', 'ResolutionBins')
param_table.addRow([number_input_points, self._number_points_per_bin, num_bins,
self._e_min, self._e_max, self._e_width,
resolution, resolution_bins])
workflow_prog.report('Deleting temp Workspace')
DeleteWorkspace('__TransformToIqt_sample_cropped')
self.setProperty('ParameterWorkspace', param_table)
def _add_logs(self):
sample_logs = [('iqt_sample_workspace', self._sample),
('iqt_resolution_workspace', self._resolution),
('iqt_binning', '%f,%f,%f' % (self._e_min, self._e_width, self._e_max))]
log_alg = self.createChildAlgorithm(name='AddSampleLogMultiple', startProgress=0.8,
endProgress=1.0, enableLogging=True)
log_alg.setProperty('Workspace', self._output_workspace)
log_alg.setProperty('LogNames', [item[0] for item in sample_logs])
log_alg.setProperty('LogValues', [item[1] for item in sample_logs])
log_alg.execute()
def _transform(self):
"""
Run TransformToIqt.
"""
from IndirectCommon import CheckHistZero, CheckHistSame, CheckAnalysers
trans_prog = Progress(self, start=0.3, end=0.8, nreports=15)
try:
CheckAnalysers(self._sample, self._resolution)
except ValueError:
# A genuine error the shows that the two runs are incompatible
raise
except:
# Checking could not be performed due to incomplete or no instrument
logger.warning('Could not check for matching analyser and reflection')
# Process resolution data
num_res_hist = CheckHistZero(self._resolution)[0]
if num_res_hist > 1:
CheckHistSame(self._sample, 'Sample', self._resolution, 'Resolution')
# Float conversion to str differs in precision between python 2 and 3, this gives consistent results
rebin_param = '{:.14f},{:.14f},{:.14f}'.format(self._e_min, self._e_width, self._e_max)
trans_prog.report('Rebinning Workspace')
Rebin(InputWorkspace=self._sample,
OutputWorkspace='__sam_data',
Params=rebin |
# -*- coding: utf-8 -*-
from openerp.osv import osv, fields
from openerp.tools.translate import _
import logging
from datetime import dat | etime
from openerp.osv.fields import datetime as datetime_field
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from unidecode import unidecode
import types
class pq_thang_luong(osv.osv):
_name = 'pq.thang.luong'
_desc | ription = 'Thang Luong'
_columns = {
'name': fields.char('Tên', size=128, required=True),
'ty_le': fields.float('Tỷ lệ', digits=(16,2)),
'create_date': fields.datetime('Ngày giờ tạo', readonly=True),
'user_id': fields.many2one('res.users', string="Người tạo", readonly=True),
}
_defaults = {
'ty_le': lambda *x: 1,
'user_id': lambda self, cr, uid, context = None: uid,
}
_sql_constraints = [
]
def create(self, cr, uid, vals, context=None):
self.pool.get('pq.redis').clear_all(cr, uid)
return super(pq_thang_luong, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
self.pool.get('pq.redis').clear_all(cr, uid)
return super(pq_thang_luong, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
self.pool.get('pq.redis').clear_all(cr, uid)
return super(pq_thang_luong, self).unlink(cr, uid, ids, context)
pq_thang_luong()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
riable belongs to dataset1 or dataset2.
dataset_pool holds available datasets.
"""
if not isinstance(name, VariableName):
variable_name = VariableName(name)
else:
variable_name = name
short_name = variable_name.get_alias()
if (short_name in self.get_attribute_names()) and (self.are_dependent_variables_up_to_date(
variable_name, version=version)):
return version #nothing to be done
dataset_name = variable_name.get_dataset_name()
if dataset_name == self.get_dataset_name():
new_version = self._compute_one_variable(variable_name, dataset_pool, resources)
else:
owner_dataset, index = self.get_owner_dataset_and_index(dataset_name)
if owner_dataset is None:
self._raise_error(StandardError, "Cannot find variable '%s'\nin either dataset or in the interaction set." %
variable_name.get_expression())
owner_dataset.compute_variables([variable_name], dataset_pool, resources=resources, quiet=True)
new_version = self.add_attribute(data = owner_dataset.get_attribute_by_index(variable_name, index),
name = variable_name, metadata = AttributeType.COMPUTED)
attribute_box = owner_dataset._get_attribute_box(variable_name)
variable = attribute_box.get_variable_instance()
my_attribute_box = self._get_attribute_box(variable_name)
my_attribute_box.set_variable_instance(variable)
return new_version
def get_owner_dataset_and_index(self, dataset_name):
if dataset_name == self.dataset1.get_dataset_name():
return (self.dataset1, self.get_2d_index_of_dataset1())
elif dataset_name == self.dataset2.get_dataset_name():
return (self.dataset2, self.get_2d_index())
return (None, None)
def are_dependent_variables_up_to_date(self, variable_name, version):
""" Return True if the version of this variable correspond to versions of all
dependent variables, otherwise False. That is, if any of the dependent variable
must be recomputed, the method returns False.
"""
short_name = variable_name.get_alias()
if short_name in self.get_primary_attribute_names():
return self.is_version(short_name, version)
dataset_name = variable_name.get_dataset_name()
owner_name = variable_name.get_dataset_name()
if owner_name == self.dataset1.get_dataset_name():
owner_dataset = self.dataset1
elif owner_name == self.dataset2.get_dataset_name():
owner_dataset = self.dataset2
else:
owner_dataset = self
if not(dataset_name == owner_dataset.get_dataset_name()):
self._raise_mismatch_dataset_name_error(variable_name)
if owner_dataset is self:
attribute_box = owner_dataset._get_attribute_box(variable_name)
if attribute_box is None:
return False
variable = attribute_box.get_variable_instance()
res = variable.are_dependent_variables_up_to_date(version)
return not(False in res)
return owner_dataset.are_dependent_variables_up_to_date(variable_name, version)
def _prepare_dataset_pool_for_variable(self, dataset_pool=None, resources=None):
dataset_pool, compute_resources = Dataset._prepare_dataset_pool_for_variable(self, dataset_pool, resources)
dataset1_name = "dataset1"
dataset2_name = "dataset2"
dataset1 = self.get_dataset(1)
dataset2 = self.get_dataset(2)
if dataset1 <> None:
dataset1_name=dataset1.get_dataset_name()
if dataset2 <> None:
dataset2_name=dataset2.get_dataset_name()
dataset_pool.add_datasets_if_not_included({dataset1_name: dataset1, dataset2_name: dataset2})
return dataset_pool, compute_resources
def get_n(self):
"""Return size of dataset 1.
"""
return self.dataset1.size()
def get_m(self):
"""Return size of dataset 2.
"""
return self.dataset2.size()
def get_reduced_n(self):
if self.index1 == None:
return self.get_n()
if isinstance(self.index1, ndarray):
return self.index1.shape[0]
return self.get_n()
def get_reduced_m(self):
if self.index2 == None:
return self.get_m()
if isinstance(self.index2, ndarray):
if self.index2.ndim == 1:
return self.index2.shape[0]
else:
return self.index2.shape[1]
return self.get_m()
def size(self):
return [(self.get_reduced_n(), self.get_reduced_m()), (self.get_n(), self.get_m())]
def get_dataset(self, nr):
if (nr == 1):
return self.dataset1
if (nr == 2):
return self.dataset2
return None
def get_dataset_named(self, name):
if name==self.dataset1.get_dataset_name():
return self.dataset1
if name==self.dataset2.get_dataset_name():
return self.dataset2
raise ValueError, 'trying to get an interaction set component named %s but it does not exist' % name
def get_index(self, nr):
if (nr == 1):
return self.index1
if (nr == 2):
| return self.index2
return None
def attribute_sum(self, name):
"""Return the sum of values of the given attribute.
"""
return (ma.ravel(self.get_attribute(name))).sum()
def attribute_average(self, name):
"""Return the value of the given attribute averaged over the dataset.
"""
return ma.average(ma.ravel(self.get_attribute(name)))
def summary(self, names, resources=None):
| """Print a marginal summary of the attributes given in the list 'names'.
"""
print "Summary\t\tsum\t\taverage"
print "------------------------------------------------"
if not isinstance(names,list):
names = [names]
for item in names:
if not (item.get_alias() in self.get_attribute_names()):
self.compute_variables([item], resources=resources)
print item + "\t" + str(self.attribute_sum(item.alias))\
+ "\t" + str(round(self.attribute_average(item.get_alias(),5)))
def get_2d_dataset_attribute(self, name):
""" Return a 2D array of the attribute given by 'name'. It is assumed
to be an attribute of dataset2.
The method should serve the purpose of preparing 1D arrays for computing
intraction operations (between dataset1 and dataset2) by transfering them to the corresponding 2D array.
The resulting array is of size n x m, where m is either the attribute length of dataset2,
or, if index2 is a 1D array, its length, or, if index2 is a 2D array,
the number of columns. n is size of dataset1 or of index1 if given.
If index2 is None, all values of the given attribute are repeated n times.
"""
dataset = self.get_dataset(2)
index = self.get_2d_index()
return dataset.get_attribute_by_index(name, index)
def get_2d_index(self):
n = self.get_reduced_n()
m = self.get_reduced_m()
if self.index2 == None:
index = indices((n,m))[1]
elif isinstance(self.index2, ndarray):
if self.index2.ndim == 1: # one-dim array
index = repeat(reshape(self.index2,(1,self.index2.shape[0])), n, 0)
else:
index = self.index2
else:
self._raise_error(StandardError, "'index2' has incompatible type. It should be a numpy array or None.")
if (index.shape[0] <> n) or (index.shape[1] <> m):
self._raise_error(StandardError, "'index2' has wrong dimensions.") |
from django.apps import AppConfig
|
class JcvrbaseappConfig(AppConfig):
| name = 'jcvrbaseapp'
|
from common.utility.utils import FileUtils
default_resource_path = '/Users/Fernando/Develop/downloader'
def get_image(image_hash):
"""
Download huaban image by image hash code.
Such as get_image('3058ff7398b8b725f436c6c7 | d56f60447468034d2347b-fGd8hd')
:param image_hash: Image hash code.
:return: None
"""
# Download normal auto size iamge.
url_normal = f'http://img.hb.aicdn.com/{image_hash}'
FileUtils.save_file(url_normal, f'{default_resource_path}/normal/{image_hash}.jpg')
# Download 236px width size iamge.
url_fw236 = f'http://img.hb.aicdn.com/{image_hash}_fw236'
FileUtils.save_fi | le(url_fw236, f'{default_resource_path}/fw236/{image_hash}.jpg')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_yadi
----------------------------------
T | ests for `yadi` module.
"""
import unittest
from yadi | import yadi
class TestYadi(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() |
#!/usr/bin/env python
# File created February 29, 2012
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "William Walters"
__email__ = "William.A.Walters@colorado.edu"
from os.path import join, basename
from skbio.parse.sequences import parse_fasta
from skbio.sequence import DNA
from qiime.split_libraries import local_align_primer_seq
from qiime.check_id_map import process_id_map
def get_rev_primer_seqs(mapping_fp):
""" Parses mapping file to get dictionary of SampleID:Rev primer
mapping_fp: mapping filepath
"""
hds, mapping_data, run_description, errors, warnings = \
process_id_map(mapping_fp, has_barcodes=False,
disable_primer_check=True)
if errors:
for curr_err in errors:
if curr_err.startswith("Duplicate SampleID"):
raise ValueError('Errors were found with mapping file, ' +
'please run validate_mapping_file.py to ' +
'identify problems.')
# create dict of dicts with SampleID:{each header:mapping data}
id_map = {}
for curr_data in mapping_data:
id_map[curr_data[0]] = {}
for header in range(len(hds)):
for curr_data in mapping_data:
id_map[curr_data[0]][hds[header]] = curr_data[header]
reverse_primers = {}
for curr_id in id_map.keys():
try:
reverse_primers[curr_id] =\
[str(DNA(curr_rev_primer).rc()) for curr_rev_primer in
id_map[curr_id]['ReversePrimer'].split(',')]
except KeyError:
raise KeyError("Reverse primer not found in mapping file, " +
"please include a 'ReversePrimer' column.")
# Check for valid reverse primers
# Will have been detected as warnings from mapping file
for curr_err in errors:
if curr_err.startswith("Invalid DNA sequence detected"):
raise ValueError("Problems found with reverse primers, please " +
"check mapping file with validate_mapping_file.py")
return reverse_primers
def get_output_filepaths(output_dir,
fasta_fp):
""" Returns output fasta filepath and log filepath
fasta_fp: fasta filepath
output_dir: output directory
"""
fasta_extensions = ['.fa', '.fasta', '.fna']
curr_fasta_out = basename(fasta_fp)
for fasta_extension in fasta_extensions:
curr_fasta_out = curr_fasta_out.replace(fasta_extension, '')
curr_fasta_out += "_rev_primer_truncated.fna"
output_fp = join(output_dir, curr_fasta_out)
log_fp = join(output_dir, "rev_primer_truncation.log")
return output_fp, log_fp
def truncate_rev_primers(fasta_f,
output_fp,
reverse_primers,
truncate_option='truncate_only',
primer_mismatches=2):
""" Locally aligns reverse primers, trucates or removes seqs
fasta_f: open file of fasta file
output_fp: open filepath to write truncated fasta to
reverse_primers: dictionary of SampleID:reverse primer sequence
truncate_option: either truncate_only, truncate_remove
primer_mismatches: number of allowed primer mismatches
"""
log_data = {
'sample_id_not_found': 0,
'reverse_primer_not_found': 0,
'total_seqs': 0,
'seqs_written': 0
}
for label, seq in parse_fasta(fasta_f):
curr_label = label.split('_')[0]
log_data['total_seqs'] += 1
# Check fasta label for valid SampleID, if not found, just write seq
try:
curr_rev_primer = reverse_primers[curr_label]
except KeyError:
log_data['sample_id_not_found'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
log_data['seqs_written'] += 1
continue
mm_tests = {}
for rev_primer in curr_rev_primer:
rev_primer_mm, rev_primer_index =\
local_align_primer_seq(rev_primer, seq)
mm_tests[rev_primer_mm] = rev_primer_index
rev_primer_mm = min(mm_tests.keys())
rev_primer_index = mm_tests[rev_primer_mm]
if rev_primer_mm > primer_mismatches:
if truncate_option == "truncate_remove":
log_data['reverse_primer_not_found'] += 1
else:
log_data['reverse_primer_not_found'] += 1
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
else:
# Check for zero seq length after truncation, will not write seq
if rev_primer_index > 0:
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq[0:rev_primer_index]))
return log_data
def write_log_file(log_data,
log_f):
""" Writes log file
log_data: dictionary of details about reverse primer removal
log_f: open filepath to write log details
"""
log_f.write("Details for removal of reverse primers\n")
log_f.write("Original fasta filepath: %s\n" % log_data['fasta_fp'])
log_f.write("Total seqs in fasta: %d\n" % log_data['total_seqs'])
log_f.write("Mapping filepath: %s\n" % log_data['mapping_fp'])
log_f.write("Truncation option: %s\n" % log_data['truncate_option'])
log_f.write("Mismatches allowed: %d\n" % log_data['primer_mismatches'])
log_f.write("Total seqs written: %d\n" % log_data['seqs_written'])
log_f.write("SampleIDs not found: %d\n" % log_data['sample_id_not_found'])
log_f.write("Reverse primers not found: %d\n | " %
log_data['reverse_primer_not_found'])
def truncate_reverse_primer(fasta_fp,
mapping_fp,
output_dir=".",
truncate_option='truncate_only',
primer_mismatches=2):
""" Main program function for finding, removing reverse primer seqs
fasta_fp: fasta filepath
mapping_fp: mapping filepath
output_dir: output directory
truncate_option: truncation | option, either truncate_only, truncate_remove
primer_mismatches: Number is mismatches allowed in reverse primer"""
reverse_primers = get_rev_primer_seqs(open(mapping_fp, "U"))
output_fp, log_fp = get_output_filepaths(output_dir, fasta_fp)
log_data = truncate_rev_primers(open(fasta_fp, "U"),
open(
output_fp, "w"), reverse_primers, truncate_option,
primer_mismatches)
log_data['fasta_fp'] = fasta_fp
log_data['mapping_fp'] = mapping_fp
log_data['truncate_option'] = truncate_option
log_data['primer_mismatches'] = primer_mismatches
write_log_file(log_data, open(log_fp, "w"))
|
# -*- coding: utf-8 -*-
"""
Unit tests for student optouts from course email
"""
import json
from mock import patch, Mock
from django.core import mail
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.conf import settings
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from student.models import CourseEnrollment
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
class TestOptoutCourseEmails(ModuleStoreTestCase):
"""
Test that optouts are referenced in sending course email.
"""
def setUp(self):
super(TestOptoutCourseEmails, self).setUp()
course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ"
self.course = CourseFactory.create(display_name=course_title)
self.instructor = AdminFactory.create()
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
self.client.login(username=self.student.username, password="test")
self.send_mail_url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.success_content = {
'course_id': self.course.id.to_deprecated_string(),
'success': True,
}
def navigate_to_email_view(self):
"""Navigate to the instructor dash's email view"""
# Pull up em | ail view on instructor dashboard
url = reverse('instructor_dashboard', kwargs={'course_id': s | elf.course.id.to_deprecated_string()})
response = self.client.get(url)
email_section = '<div class="vert-left send-email" id="section-send-email">'
# If this fails, it is likely because ENABLE_INSTRUCTOR_EMAIL is set to False
self.assertTrue(email_section in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_optout_course(self):
"""
Make sure student does not receive course email after opting out.
"""
url = reverse('change_email_settings')
# This is a checkbox, so on the post of opting out (that is, an Un-check of the box),
# the Post that is sent will not contain 'receive_emails'
response = self.client.post(url, {'course_id': self.course.id.to_deprecated_string()})
self.assertEquals(json.loads(response.content), {'success': True})
self.client.logout()
self.client.login(username=self.instructor.username, password="test")
self.navigate_to_email_view()
test_email = {
'action': 'Send email',
'send_to': 'all',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Assert that self.student.email not in mail.to, outbox should be empty
self.assertEqual(len(mail.outbox), 0)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_optin_course(self):
"""
Make sure student receives course email after opting in.
"""
url = reverse('change_email_settings')
response = self.client.post(url, {'course_id': self.course.id.to_deprecated_string(), 'receive_emails': 'on'})
self.assertEquals(json.loads(response.content), {'success': True})
self.client.logout()
self.assertTrue(CourseEnrollment.is_enrolled(self.student, self.course.id))
self.client.login(username=self.instructor.username, password="test")
self.navigate_to_email_view()
test_email = {
'action': 'Send email',
'send_to': 'all',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Assert that self.student.email in mail.to
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].to), 1)
self.assertEquals(mail.outbox[0].to[0], self.student.email)
|
"""Represent the :class:`.Trophy` class."""
from typing import TYPE_CHECKING, Any, Dict, Union
from .base import PRAWBase
if TYPE_CHECKING: # pragma: no cover
import praw
class Trophy(PRAWBase):
"""Represent a trophy.
End users should not instantiate this class directly. :meth:`.Redditor.trophies` can
be used to get a list of the redditor's trophies.
.. include:: ../../typical_attributes.rst
=============== ===================================================
Attribute Description
=============== ===================================================
``award_id`` The ID of the trophy (sometimes ``None``).
``description`` The description of the trophy (sometimes ``None``).
``icon_40`` The URL of a 41x41 px icon for the trophy.
``icon_70`` The URL of a 71x71 px icon for the trophy.
``name`` The name of the trophy.
``url`` A relevant URL (sometimes ``None``).
=============== ===================================================
"""
def __init__(self, reddit: "praw.Reddit", _data: Dict[str, Any]):
"""Initialize a :class:`.Trophy` instance.
:param reddit: An instance of :class:`.Reddit`.
:param _data: The structured data, assumed to be a dict and key ``"name"`` must
be provided.
"""
assert isinstance(_da | ta, dict) and "name" in _data
super().__init__(reddit, _data=_data)
def __eq__(self, other: Union["Trophy", Any]) -> bool:
"""Check if two Trophies are equal."""
if isinstance(other, self.__class__):
return self.name == other.name
return super().__eq__(other)
def __str__(self) -> str:
"""Return a name of the trophy. | """
return self.name # pylint: disable=no-member
def __repr__(self) -> str:
"""Return an object initialization representation of the instance."""
return f"{self.__class__.__name__}(name={self.name!r})"
|
ourse_id = entry.course_id
task_input = json.loads(entry.task_input)
module_state_key = task_input.get('problem_url')
student_ident = task_input['student'] if 'student' in task_input else None
fmt = 'Starting to update problem modules as task "{task_id}": course "{course_id}" problem "{state_key}": nothing {action} yet'
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, action=action_name))
# add task_id to xmodule_instance_args, so that it can be output with tracking info:
if xmodule_instance_args is not None:
xmodule_instance_args['task_id'] = task_id
# Now that we have an entry we can try to catch failures:
task_progress = None
try:
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = 'Requested task "{task_id}" did not match actual task "{actual_id}"'
message = fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, actual_id=request_task_id)
TASK_LOG.error(message)
raise UpdateProblemModuleStateError(message)
# Now do the work:
with dog_stats_api.timer('instructor_tasks.module.time.overall', tags=['action:{name}'.format(name=action_name)]):
task_progress = _perform_module_state_update(course_id, module_state_key, student_ident, update_fcn,
action_name, filter_fcn, xmodule_instance_args)
# If we get here, we assume we've succeeded, so update the InstructorTask entry in anticipation.
# But we do this within the try, in case creating the task_output causes an exception to be
# raised.
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
except Exception:
# try to write out the failure to the entry before failing
_, exception, traceback = exc_info()
traceback_string = format_exc(traceback) if traceback is not None else ''
TASK_LOG.warning("background task (%s) failed: %s %s", task_id, exception, traceback_string)
entry.task_output = InstructorTask.create_output_for_failure(exception, traceback_string)
entry.task_state = FAILURE
entry.save_now()
raise
# log and exit, returning task_progress info as task result:
fmt = 'Finishing task "{task_id}": course "{course_id}" problem "{state_key}": final: {progress}'
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, progress=task_progress))
return task_progress
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(student, module_descriptor, field_data_cache, course_id,
make_track_function(), xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type)
@transaction.autocommit
def rescore_problem_module_state(module_descriptor, student_module, xmodule_instance_args=None):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
module_state_key = student_module.module_state_key
instance = _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args, grade_bucket_type='rescore')
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {st | udent}--access denied?".format(loc=module_state_key,
| student=student)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
"unexpected response {msg}".format(msg=result, course=course_id, loc=module_state_key, student=student))
return False
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return False
else:
TASK_LOG.debug(u"successfully processed rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return True
@transaction.autocommit
def reset_attempts_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
"""
Resets problem attempts to zero for specified `student_module`.
Always returns true, indicating success, if it doesn't raise an exception due to database error.
"""
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
|
#!/usr/bin/python
from pygame import mixer
from threading import Timer
from random import randint
from xml.etree import ElementTree as XmlEt
import argparse
from utils import LOGGER
from sounds import SoundPool
# @brief constrain - constrains x to interval [mi, ma]
def constrain(x, mi, ma):
return min(ma, max(mi, x))
# @class AmbientSound
# @brief wrapper class around class sounds.Sound, handles updates
# of volume and stores ambient sound configuration
class AmbientSound(object):
# @brief constructor
# @param[in] sound - the sound object to wrap around
# @param[in] base - the base volume of the sound
# @param[in] drift - the volume drift of the sound
# @param[in] rate_min - the minimal update rate of the sound
# @param[in] rate_max - the maximal update rate of the sound
def __init__(self, sound, base, drift, rate_min, rate_max):
self.sound = sound
self.base = base
self.drift = drift
self.rate_min = rate_min
self.rate_max = rate_max
self.rate = (1 - randint(0,1)*2)
# check base and drift values
if base - drift < 0.0 or base + drift > 1.0:
| raise ValueError("Volume base +/- drift exceeds boundaries [0.0,1.0]")
# initialize rate
self.newRate()
# initialize sound
self.sound.setVolume(self.base)
# @brief newRate - s | ets new random rate with opposite sign than before
def newRate(self):
if self.rate > 0:
self.rate = -float(randint(self.rate_min, self.rate_max))
else:
self.rate = float(randint(self.rate_min, self.rate_max))
# @brief adaptVolume - adapts the sound volume by 'drift/rate'
def adaptVolume(self):
vol = self.sound.getVolume() + self.drift / self.rate
max_vol = self.base + self.drift
min_vol = self.base - self.drift
# check new volume
if vol >= max_vol or vol <= min_vol:
vol = constrain(vol, min_vol, max_vol)
self.newRate()
# set new volume
self.sound.setVolume(vol)
# @brief play - starts sound playing
def play(self):
self.sound.play()
# @brief stop - stops sound playing
def stop(self):
self.sound.stop()
# @class Ambient
# @brief an ambient consisting of different sound files
class Ambient(object):
# @brief constructor
# @param[in] configfile - the configuration file of the ambient
# @param[in] spool - the sound pool the ambient should use
def __init__(self, configfile, spool = SoundPool()):
# load configuration file
with open(configfile, "r") as f:
data = f.read()
root = XmlEt.fromstring(data).find("Ambient")
# set the name of the ambient
self.name = root.get("name")
LOGGER.logInfo("Ambient '{}'".format(self.name))
# set the update rate from the volatility
self.urate = 1.0 / float(root.get("volatility"))
self.urate = constrain(self.urate, 0.0, 5.0)
# flag indicating whether ambient is currently running
self.loaded = False
self.running = False
# load sounds and sound configuration
self.sounds = list()
self.spool = spool
for soundcfg in root.findall("Sound"):
sfile = soundcfg.get("file")
base = float(soundcfg.get("base"))
drift = float(soundcfg.get("drift"))
self.sounds.append((sfile, base, drift))
LOGGER.logInfo("'{}': [{}] +/- ({})".format(sfile, base, drift))
# @brief __load - loads the actual ambient, delayed until it is started
def __load(self):
sounds = list()
for soundcfg in self.sounds:
sfile, base, drift = soundcfg
# load sound from sound pool and initialize it
sound = self.spool.get(sfile)
sounds.append(AmbientSound(sound, base, drift, 4, 16))
# reset sounds, original only stored configuration
self.sounds = sounds
self.loaded = True
# @brief __update - internal update function, adapts the volumes of all
# sounds
#
# Note: If ambient is running this function schedules itself with
# period 'self.urate'
#
def __update(self):
if not self.running:
return
LOGGER.logDebug("'{}' update".format(self.name))
for sound in self.sounds:
sound.adaptVolume()
Timer(self.urate, self.__update).start()
# @brief getName - returns the configured name of the ambient
def getName(self):
return self.name
# @brief start - starts playback of ambient
def start(self):
if not self.loaded:
self.__load()
LOGGER.logInfo("'{}' start".format(self.name))
for sound in self.sounds:
sound.play()
# indicate start
self.running = True
self.__update()
# @brief stop - stops playback of ambient
def stop(self):
if not self.loaded:
return
LOGGER.logInfo("'{}' stop".format(self.name))
for sound in self.sounds:
sound.stop()
# indicate stop
self.running = False
# @class AmbientControl
# @brief Handles a set of configured ambients
class AmbientControl(object):
# @brief constructor
# @param[in] configfile - a pyAmbient configuration file
def __init__(self, configfile):
# check if mixer is already initialized
if mixer.get_init() == True:
raise RuntimeError("pygame.mixer already initialized, abort")
LOGGER.logDebug("initialize pygame.mixer")
# set parameters of mixer before init, TODO check values again
mixer.pre_init(44100, -16, 2, 2048)
mixer.init()
# load configuration file
with open(configfile, "r") as f:
data = f.read()
root = XmlEt.fromstring(data)
# setup ambient dictionary
self.ambients = dict()
for elem in root.findall("AmbientConfig"):
self.ambients[elem.get("id")] = Ambient(elem.get("file"))
# set current ambient to none
self.ambient = None
# @brief getAmbients - get the configured ambients
def getAmbients(self):
return self.ambients
# @brief get - get the current ambient, None if none selected
def get(self):
return self.ambient
# @brief switch - switches to ambient with given ID
# @param[in] ambient_id - ID of the ambient to switch to
def switch(self, ambient_id):
if self.ambient != None:
self.ambient.stop()
# switch to new ambient
self.ambient = self.ambients[ambient_id]
LOGGER.logInfo("Switched to ambient '{}'".format(self.ambient.getName()))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pyAmbient")
parser.add_argument("-c", "--config", dest="config", required=True,
help="the pyAmbient configuration file to load")
parser.add_argument("-a", "--ambient", dest="ambient", required=True,
help="the ambient ID of the ambient to start")
parser.add_argument("-d", "--debug", dest="debug", required=False,
help="if to log debug information", default=False, action="store_true")
args = parser.parse_args()
if args.debug == True:
LOGGER.setLevel(0)
else:
LOGGER.setLevel(1)
ambc = AmbientControl(args.config)
ambc.switch(args.ambient)
ambc.get().start()
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and l | imitations
# under the License.
#
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from tacker._i18n import _
from tacker.agent.linux import utils as linux_utils
from tacker.common import log
from tacker.vnfm.monitor_drivers import abstract_driver
LOG = logging.getLogger(__name_ | _)
OPTS = [
cfg.IntOpt('count', default=5,
help=_('Number of ICMP packets to send')),
cfg.FloatOpt('timeout', default=5,
help=_('Number of seconds to wait for a response')),
cfg.FloatOpt('interval', default=1,
help=_('Number of seconds to wait between packets')),
cfg.IntOpt('retry', default=1,
help=_('Number of ping retries'))
]
cfg.CONF.register_opts(OPTS, 'monitor_ping')
def config_opts():
return [('monitor_ping', OPTS)]
class VNFMonitorPing(abstract_driver.VNFMonitorAbstractDriver):
def get_type(self):
return 'ping'
def get_name(self):
return 'ping'
def get_description(self):
return 'Tacker VNFMonitor Ping Driver'
def monitor_url(self, plugin, context, vnf):
LOG.debug('monitor_url %s', vnf)
return vnf.get('monitor_url', '')
def _is_pingable(self, mgmt_ip="", count=None, timeout=None,
interval=None, retry=None, **kwargs):
"""Checks whether an IP address is reachable by pinging.
Use linux utils to execute the ping (ICMP ECHO) command.
Sends 5 packets with an interval of 1 seconds and timeout of 1
seconds. Runtime error implies unreachability else IP is pingable.
:param ip: IP to check
:return: bool - True or string 'failure' depending on pingability.
"""
cmd_ping = 'ping'
if netaddr.valid_ipv6(mgmt_ip):
cmd_ping = 'ping6'
if not count:
count = cfg.CONF.monitor_ping.count
if not timeout:
timeout = cfg.CONF.monitor_ping.timeout
if not interval:
interval = cfg.CONF.monitor_ping.interval
if not retry:
retry = cfg.CONF.monitor_ping.retry
ping_cmd = [cmd_ping,
'-c', count,
'-W', timeout,
'-i', interval,
mgmt_ip]
for retry_range in range(int(retry)):
try:
linux_utils.execute(ping_cmd, check_exit_code=True)
return True
except RuntimeError:
LOG.warning("Cannot ping ip address: %s", mgmt_ip)
return 'failure'
@log.log
def monitor_call(self, vnf, kwargs):
if not kwargs['mgmt_ip']:
return
return self._is_pingable(**kwargs)
|
14.10f}'.format(atom[2]),
'{:14.10f}'.format(atom[3]),
'\n'
)))
f.write('\n')
for atom in elements_in_structure:
f.write('{0} {1}\n'.format(atom,co.CHELPG_RADII[atom]))
f.write('\n')
def run_gaussian(self):
"""
This runs the gaussian job. I have this labeled differently than our
typical "run" functions because I don't want to use this function until
after we have calculated and collected partial charge data.
"""
logger.log(5, 'RUNNING: {}'.format(self.filename))
self._index_output_log = []
current_directory = os.getcwd()
os.chdir(self.directory)
if os.path.isfile(self.name_log):
os.remove(self.name_log)
if os.path.isfile(self.name_chk):
os.remove(self.name_chk)
sp.call('g09 {}'.format(self.filename), shell=True)
os.chdir(current_directory)
class GaussFormChk(File):
"""
Used to retrieve data from Gaussian formatted checkpoint files.
"""
def __init__(self, path):
super(GaussFormChk, self).__init__(path)
self.atoms = []
# Not sure these should really be called the eigenvalues.
self.evals = None
self.low_tri = None
self._hess = None
@property
def hess(self):
if self._hess is None:
self.read_self()
return self._hess
def read_self(self):
logger.log(5, 'READING: {}'.format(self.filename))
stuff = re.search(
'Atomic numbers\s+I\s+N=\s+(?P<num_atoms>\d+)'
'\n\s+(?P<anums>.*?)'
'Nuclear charges.*?Current cartesian coordinates.*?\n(?P<coords>.*?)'
'Force Field'
'.*?Real atomic weights.*?\n(?P<masses>.*?)'
'Atom fragment info.*?Cartesian Gradient.*?\n(?P<evals>.*?)'
'Cartesian Force Constants.*?\n(?P<hess>.*?)'
'Dipole Moment',
open(self.path, 'r').read(), flags=re.DOTALL)
anums = [int(x) for x in stuff.group('anums').split()]
masses = [float(x) for x in stuff.group('masses').split()]
coords = [float(x) for x in stuff.group('coords').split()]
coords = [coords[i:i+3] for i in range(0, len(coords), 3)]
for anum, mass, coord in zip(anums, masses, coords):
self.atoms.append(
Atom(
atomic_num = anum,
coords = coord,
exact_mass = mass)
)
logger.log(5, ' -- Read {} atoms.'.format(len(self.atoms)))
self.evals = np.array(
[float(x) for x in stuff.group('evals').split()], dtype=float)
logger.log(5, ' -- Read {} eigenvectors.'.format(len(self.evals)))
self.low_tri = np.array(
[float(x) for x in stuff.group('hess').split()], dtype=float)
one_dim = len(anums) * 3
self._hess = np.empty([one_dim, one_dim], dtype=float)
self._hess[np.tril_indices_from(self._hess)] = self.low_tri
self._hess += np.tril(self._hess, -1 | ).T
# Convert to MacroModel units.
self._hess *= co.HESSIAN_CONVERSION
logger.log(5, ' -- Read {} Hessian.'.format(self._hess.shape) | )
class GaussLog(File):
"""
Used to retrieve data from Gaussian log files.
If you are extracting frequencies/Hessian data from this file, use
the keyword NoSymmetry when running the Gaussian calculation.
"""
def __init__(self, path):
super(GaussLog, self).__init__(path)
self._evals = None
self._evecs = None
self._structures = None
self._esp_rms = None
@property
def evecs(self):
if self._evecs is None:
self.read_out()
return self._evecs
@property
def evals(self):
if self._evals is None:
self.read_out()
return self._evals
@property
def structures(self):
if self._structures is None:
# self.read_out()
self.read_archive()
return self._structures
@property
def esp_rms(self):
if self._esp_rms is None:
self._esp_rms = -1
self.read_out()
return self._esp_rms
def read_out(self):
"""
Read force constant and eigenvector data from a frequency
calculation.
"""
logger.log(5, 'READING: {}'.format(self.filename))
self._evals = []
self._evecs = []
self._structures = []
force_constants = []
evecs = []
with open(self.path, 'r') as f:
# The keyword "harmonic" shows up before the section we're
# interested in. It can show up multiple times depending on the
# options in the Gaussian .com file.
past_first_harm = False
# High precision mode, turned on by including "freq=hpmodes" in the
# Gaussian .com file.
hpmodes = False
file_iterator = iter(f)
# This while loop breaks when the end of the file is reached, or
# if the high quality modes have been read already.
while True:
try:
line = next(file_iterator)
except:
# End of file.
break
if 'Charges from ESP fit' in line:
pattern = re.compile('RMS=\s+({0})'.format(co.RE_FLOAT))
match = pattern.search(line)
self._esp_rms = float(match.group(1))
# Gathering some geometric information.
elif 'orientation:' in line:
self._structures.append(Structure())
next(file_iterator)
next(file_iterator)
next(file_iterator)
next(file_iterator)
line = next(file_iterator)
while not '---' in line:
cols = line.split()
self._structures[-1].atoms.append(
Atom(atomic_num=int(cols[1]),
x=float(cols[3]),
y=float(cols[4]),
z=float(cols[5])))
line = next(file_iterator)
logger.log(5, ' -- Found {} atoms.'.format(
len(self._structures[-1].atoms)))
elif 'Harmonic' in line:
# The high quality eigenvectors come before the low quality
# ones. If you see "Harmonic" again, it means you're at the
# low quality ones now, so break.
if past_first_harm:
break
else:
past_first_harm = True
elif 'Frequencies' in line:
# We're going to keep reusing these.
# We accumulate sets of eigevectors and eigenvalues, add
# them to self._evecs and self._evals, and then reuse this
# for the next set.
del(force_constants[:])
del(evecs[:])
# Values inside line look like:
# "Frequencies --- xxxx.xxxx xxxx.xxxx"
# That's why we remove the 1st two columns. This is
# consistent with and without "hpmodes".
# For "hpmodes" option, there are 5 of these frequencies.
# Without "hpmodes", there are 3.
# Thus the eigenvectors and eigenvalues will come in sets of
# either 5 or 3.
cols = line.split()
for frequency in map(float, cols[2:]):
# Has 1. or -1. depending on the sign of the frequency.
if frequency < 0.:
force_constants.append(-1.)
else:
|
from unittest.mock import patch
from superdesk.tests import TestCase
from apps.publish.enqueue.enqueu | e_service import EnqueueService
class NoTakesEnqueueTestCase(TestCase):
def setUp(self):
super().setUp()
self.product_ids = self.app.data.insert(
"products",
[
{"name": "all"},
| ],
)
self.subscriber_ids = self.app.data.insert(
"subscribers",
[
{"name": "digi", "subscriber_type": "digital", "is_targetable": True, "products": self.product_ids},
],
)
self.desk_ids = self.app.data.insert(
"desks",
[
{"name": "sports"},
],
)
self.service = EnqueueService()
def test_resend_no_takes(self):
doc = {"_id": "test"}
subscribers = [s for s in self.app.data.find_all("subscribers")]
subscriber_codes = self.service._get_subscriber_codes(subscribers)
with patch.object(self.service, "_resend_to_subscribers") as resend:
with patch.object(self.service, "publish_content_api") as content_api:
self.service.resend(doc, subscribers)
resend.assert_called_with(doc, subscribers, subscriber_codes, {})
content_api.assert_called_with(doc, [])
|
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DI | SCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF | SUCH DAMAGE.
#
''' The setup script is the center of all activity in building,
distributing, and installing modules using the Distutils. The
main purpose of the setup script is to describe your module
distribution to the Distutils, so that the various commands
that operate on your modules do the right thing.
'''
import os
from glob import glob
from setuptools import setup, find_packages
from rphm import __version__, __author__
def find_modules(pkg):
''' Find the modules that belong in this package. '''
modules = [pkg]
for dirname, dirnames, _ in os.walk(pkg):
for subdirname in dirnames:
modules.append(os.path.join(dirname, subdirname))
return modules
INSTALL_ROOT = os.getenv('VIRTUAL_ENV', '')
CONF_PATH = INSTALL_ROOT + '/persist/sys'
INSTALL_REQUIREMENTS = [
'jsonrpclib'
]
TEST_REQUIREMENTS = [
'mock'
]
setup(
name='rphm',
version=__version__,
description='EOS extension to generate SNMP traps based on counter thresholds',
long_description=open('README.md').read(),
author=__author__,
author_email='eosplus-dev@arista.com',
url='http://eos.arista.com',
license='BSD-3',
install_requires=INSTALL_REQUIREMENTS,
tests_require=TEST_REQUIREMENTS,
packages=find_modules('rphm'),
scripts=glob('bin/*'),
data_files=[
(CONF_PATH, ['conf/rphm.conf'])
]
)
|
#!/usr/bin/python
# coding: utf-8
import copy
import json
from lcg import LCG
class Game(object):
def __init__(self, json_file):
super(Game, self).__init__()
with open(json_file) as f:
json_data = json.load(f)
self.ID = json_data["id"]
self.units = [Unit(json_unit) for json_unit in json_data["units"]]
self.width = json_data["width"]
self.height = json_data["height"]
self.filled = [json_cell2tuple(json_c) for json_c in json_data["filled"]]
self.sourceLength = json_data["sourceLength"]
self.sourceSeeds = json_data["sourceSeeds"]
self.solutions = []
def solve(self, ss_nr=0):
for ss in self.sourceSeeds:
commands = ""
board = Board(self.width, self.height, self.filled)
source = self.generate_source(ss, self.sourceLength)
while not board.is_finished() and len(source) > 0:
unit = source.pop(0)
board.spawn(unit)
if not board.is_finished():
while board.unit is not None:
commands += board.move_ei()
#commands += board.move_to_lowest_fill_east()
# Move down
commands += board.move_down()
#print "--------------- | -------"
#print board
#print "Fill lvl: %s" % board.get_fill_level(4)
solution = {}
solution["problemId"] = self.ID
solution["seed"] = ss
solution["tag"] = "Algo v3.1"
solution["solution"] = commands
self.solutions.append(solution)
return json.dumps(s | elf.solutions)
def generate_source(self, seed, sourceLength):
source = []
rng = LCG(seed)
for i in range(sourceLength):
unit_nr = rng.next() % len(self.units)
unit = copy.deepcopy(self.units[unit_nr])
source.append(unit)
return source
def __str__(self):
return "Game(ID:%s)" % self.ID
class Unit(object):
MV_VALID = 0 # Valid move
MV_LOCKED = 1 # Move would cause a lock
MV_INVALID = 2 # Invalid move, can't move there (out of the board etc.)
def __init__(self, json_unit):
super(Unit, self).__init__()
self.pivot = json_cell2tuple(json_unit["pivot"])
self.members = [json_cell2tuple(json_m) for json_m in json_unit["members"]]
self.pos = Cell(0, 0)
def get_topmost(self):
tm = self.members[0]
for m in self.members:
if m.y < tm.y:
tm = m
return tm
def get_leftmost(self):
lm = self.members[0]
for m in self.members:
if m.x < lm.x:
lm = m
return lm
def get_rightmost(self):
rm = self.members[0]
for m in self.members:
if m.x > rm.x:
rm = m
return rm
def can_be_placed(self, board):
for m in self.members:
try:
if board.get(self.pos.x + m.x, self.pos.y + m.y) in [1, 2, 3]:
return Unit.MV_LOCKED
except IndexError:
return Unit.MV_INVALID
return Unit.MV_VALID
def move_e(self):
self.pos.x = self.pos.x - 1
def move_w(self):
self.pos.x = self.pos.x + 1
def move_se(self):
if self.pos.y % 2 == 0:
self.pos.x = self.pos.x - 1
self.pos.y = self.pos.y + 1
def move_sw(self):
if self.pos.y % 2 == 1:
self.pos.x = self.pos.x + 1
self.pos.y = self.pos.y + 1
def turn_cw(self):
pass
def turn_ccw(self):
pass
def __str__(self):
return "Unit(pivot:%s, members:%s)" % (self.pivot, self.members)
class Cell(object):
def __init__(self, x, y):
super(Cell, self).__init__()
self.x = x
self.y = y
def __str__(self):
return "(%s, %s)" % (self.x, self.y)
def json_cell2tuple(json_cell):
return Cell(json_cell["x"], json_cell["y"])
class Board(object):
def __init__(self, width, height, filled):
super(Board, self).__init__()
self.width = width
self.height = height
self._board = [[0] * height for x in range(width)]
for full in filled:
self._board[full.x][full.y] = 1
self.unit = None
self.finished = False
def spawn(self, unit):
tm = unit.get_topmost()
lm = unit.get_leftmost()
rm = unit.get_rightmost()
pad_top = 0
pad_left = (self.width - (rm.x - lm.x + 1)) / 2
unit.pos = Cell(pad_left, pad_top)
if unit.can_be_placed(self) == Unit.MV_VALID:
self.unit = unit
else:
self.finished = True
def lock(self):
if self.unit:
for m in self.unit.members:
self._board[m.x + self.unit.pos.x][m.y + self.unit.pos.y] = 1
self.unit = None
self.clear_rows()
def clear_rows(self):
# UGLY AS HELL
for y in range(self.height)[::-1]:
while self.row_is_filled(y):
for x in range(self.width):
self._board[x][y] = 0
for yy in range(y)[::-1]:
for x in range(self.width):
self._board[x][yy + 1] = self.get(x, yy)
def row_is_filled(self, row):
summ = 0
for x in range(self.width):
summ += self.get(x, row)
if summ >= self.width:
return True
return False
def is_finished(self):
return self.finished
def get_adjacent(self, x, y):
return []
def get_fill_level(self, col):
for y in range(self.height):
if self.get(col, y) in [1]:
return y
return self.height - 1
def get(self, x, y):
return self._board[x][y]
def __str__(self):
board_copy = copy.deepcopy(self._board)
if self.unit:
for m in self.unit.members:
board_copy[m.x + self.unit.pos.x][m.y + self.unit.pos.y] = 2
buf = []
for y in range(self.height):
line = ""
if y % 2 == 1:
line = " "
for x in range(self.width):
line = line + str(board_copy[x][y]) + " "
buf.append(line)
return "\n".join(buf)
# TODO: refactor movement code
def move_e(self):
if self.unit is None:
return ""
unit_copy = copy.deepcopy(self.unit)
unit_copy.move_e()
cbp = unit_copy.can_be_placed(self)
if cbp == Unit.MV_VALID:
self.unit.move_e()
return "e"
elif cbp == Unit.MV_LOCKED:
self.lock()
return "c"
else:
return ""
def move_w(self):
if self.unit is None:
return ""
unit_copy = copy.deepcopy(self.unit)
unit_copy.move_w()
cbp = unit_copy.can_be_placed(self)
if cbp == Unit.MV_VALID:
self.unit.move_w()
return "!"
elif cbp == Unit.MV_LOCKED:
self.lock()
return "!"
else:
return ""
def move_se(self):
if self.unit is None:
return ""
unit_copy = copy.deepcopy(self.unit)
unit_copy.move_se()
cbp = unit_copy.can_be_placed(self)
if cbp == Unit.MV_VALID:
self.unit.move_se()
return "m"
else:
self.lock()
return "n"
def move_sw(self):
if self.unit is None:
return ""
unit_copy = copy.deepcopy(self.unit)
unit_copy.move_sw()
cbp = unit_copy.can_be_placed(self)
if cbp == Unit.MV_VALID:
self.unit.move_sw()
return "i"
else:
self.lock()
return "j"
# Macro movements
def move_down(self):
commands = ""
while self.unit is not None:
if self.unit.pos.y |
not produce safe or "
"verifiable files." % link)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug('Skipping %s because the wheel filename is invalid' % link)
return []
if wheel.name.lower() != search_name.lower():
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if not wheel.supported():
logger.debug('Skipping %s because it is not compatible with this Python' % link)
return []
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for binary
# wheels on linux that deals with the inherent problems of
# binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if ((
not platform.startswith('win')
and not platform.startswith('macosx')
)
and comes_from is not None
and urlparse.urlparse(comes_from.url).netloc.endswith(
"pypi.python.org")):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported p | latform" % link
)
return []
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if (link.internal is not None
and not link.internal
and not no | rmalize_name(search_name).lower() in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted." % link)
self.need_warn_external = True
return []
if (link.verifiable is not None
and not link.verifiable
and not (normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify it's integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug("Skipping %s because it is an insecure and "
"unverifiable file." % link)
self.need_warn_unverified = True
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req,
cache=self.cache,
session=self.session,
)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home.html\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
def __init__(self, content, url, headers=None, trusted= |
from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.management.base import BaseCommand, CommandError
from django.db import (
DEFAULT_DB_ALIAS, connections, models, router, transaction,
)
from django.db.utils import DatabaseError
from django.utils.encoding import force_text
class Command(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='table_name', nargs='*',
help='Optional table names. Otherwise, settings.CACHES is used to '
'find cache tables.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database onto which the cache tables will be '
'installed. Defaults to the "default" database.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run',
help='Does not create the table, just prints the SQL that would '
'be run.')
def handle(self, *tablenames, **options):
db = options.get('database')
self.verbosity = int(options.get('verbosity'))
dry_run = options.get('dry_run')
if len(tablenames):
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename, dry_run)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table, dry_run)
def create_table(self, database, tablename, dry_run):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate_model(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
models.TextField(name='value'),
models.DateTimeField(name='expires', db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [qn(f.name), f.db_type(connection=connection)]
field_output.append("%sNULL" % ("NOT " if not f.null else ""))
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append("CREATE %sINDEX %s ON %s (%s);" %
(unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
qn(f.name)))
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(');')
full_statement = "\n".join(full_statement)
if dry_run:
self.stdout.write(full_statement)
for statement in index_output:
self.stdout.write(statement)
return
with transaction.atomic(using=database,
savepoint=connection.features.can_rollback_ddl):
with connection.cursor() as curs:
try:
curs.execute(full_statement)
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could n | ot be created.\nThe error was: %s." %
| (tablename, force_text(e)))
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
|
after* there are files:
addon = Addon.objects.get(pk=addon.id)
addon.update(status=addon_status)
return addon
class TestQueue(TestCase):
"""Tests common attributes and coercions that each view must support."""
__test__ = False # this is an abstract test case
def test_latest_version(self):
addon = self.new_addon()
v1 = addon.find_latest_version(self.channel)
v1.update(created=self.days_ago(2))
v1.all_files[0].update(status=amo.STATUS_PUBLIC)
version_factory(addon=addon, version='2.0', created=self.days_ago(1),
channel=self.channel,
file_kw={'status': amo.STATUS_PUBLIC})
version_factory(addon=addon, version='3.0', created=self.days_ago(0),
channel=self.channel,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
row = self.Queue.objects.get()
assert row.latest_version == '3.0'
def test_addons_disabled_by_user_are_hidden(self):
self.new_addon(version=u'0.1').update(disabled_by_user=True)
assert list(self.Queue.objects.all()) == []
def test_addons_disabled_by_admin_are_hidden(self):
self.new_addon(version=u'0.1').update(status=amo.STATUS_DISABLED)
assert list(self.Queue.objects.all()) == []
def test_reviewed_files_are_hidden(self):
self.new_addon(name='Unreviewed')
addon_factory(name='Already Reviewed')
assert sorted(q.addon_name for q in self.Queue.objects.all()) == (
['Unreviewed'])
def test_search_extensions(self):
self.new_search_ext('Search Tool', '0.1')
row = self.Queue.objects.get()
assert row.addon_name == u'Search Tool'
assert row.addon_type_id == amo.ADDON_SEARCH
def test_count_all(self):
# Create two new addons and give each another version.
version_factory(addon=self.new_addon(), version=u'2.0',
channel=self.channel)
version_factory(addon=self.new_addon(), version=u'2.0',
channel=self.channel)
assert self.Queue.objects.all().count() == 2
class TestPendingQueue(TestQueue):
__test__ = True
Queue = ViewPendingQueue
channel = amo.RELEASE_CHANNEL_LISTED
def new_addon(self, name=u'Pending', version=u'1.0'):
"""Creates an approved addon with two listed versions, one approved,
the second awaiting review."""
addon = addon_factory(
name=name,
version_kw={'version': u'0.0.1', 'channel': self.channel,
'created': self.days_ago(1)})
version_factory(
addon=addon, version=version, channel=self.channel,
file_kw={'status': amo.STATUS_AWAITING_REVIEW,
'is_restart_required': False})
return addon
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_PUBLIC, amo.STATUS_AWAITING_REVIEW,
channel=self.channel, **kw)
def test_waiting_time(self):
self.new_addon()
Version.objects.update(created=datetime.utcnow())
row = self.Queue.objects.all()[0]
assert row.waiting_time_days == 0
# Time zone will be off, hard to test this.
assert row.waiting_time_hours is not None
def test_flags_needs_admin_code_review(self):
AddonReviewerFlags.objects.create(
addon=self.new_addon(), needs_admin_code_review=True)
q = self.Queue.objects.get()
assert q.flags == [
('needs-admin-code-review', 'Needs Admin Code Review')]
def test_flags_info_request(self):
self.new_addon().find_latest_version(self.channel).update(
has_info_request=True)
q = self.Queue.objects.get()
assert q.flags == [('info', 'More Information Requested')]
def test_flags_reviewer_comment(self):
self.new_addon().find_latest_version(self.channel).update(
has_reviewer_comment=True)
q = self.Queue.objects.get()
assert q.flags == [('reviewer', 'Contains Reviewer Comment')]
def test_flags_jetpack(self):
self.new_addon().find_latest_version(self.channel).all_files[0].update(
jetpack_version='1.8')
q = self.Queue.objects.get()
assert q.flags == [('jetpack', 'Jetpack Add-on')]
def test_flags_is_restart_required(self):
self.new_addon().find_latest_version(self.channel).all_files[0].update(
is_restart_required=True)
q = self.Queue.objects.get()
assert q.flags == [('is_restart_required', 'Requires Restart')]
def test_flags_sources_provided(self):
self.new_addon().find_latest_version(self.channel).update(
source='/some/source/file')
q = self.Queue.objects.get()
assert q.flags == [('sources-provided', 'Sources provided')]
def test_flags_webextension(self):
self.new_addon().find_latest_version(self.channel).all_files[0].update(
is_webextension=True)
queue = self.Queue.objects.get()
assert queue.flags == [('webextension', 'WebExtension')]
def test_no_flags(self):
self.new_addon()
q = self.Queue.objects.get()
assert q.flags == []
class TestFullReviewQueue(TestQueue):
__test__ = True
Queue = ViewFullReviewQueue
channel = amo.RELEASE_CHANNEL_LISTED
def new_addon(self, name=u'Nominated', version=u'1.0',
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW):
addon = addon_factory(
name=name, status=addon_status,
version_kw={'version': version, 'channel': self.channel},
file_kw={'status': file_status})
return addon
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_NOMINATED,
amo.STATUS_AWAITING_REVIEW,
channel=self.channel, **kw)
def test_waiting_time(self):
self.new_addon()
Version.objects.update(nomination=datetime.utcnow())
row = self.Queue.objects.all()[0]
assert row.waiting_time_days == 0
# Time zone will be off, hard to test this.
assert row.waiting_time_hours is not None
class TestUnlistedAllList(TestCase):
Queue = ViewUnlistedAllList
channel = amo.RELEASE_CHANNEL_UNLISTED
fixtures = ['base/users']
def new_addon(self, name=u'Unlisted', version=u'1.0',
addon_status=amo.STATUS_NULL,
file_status=amo.STATUS_PUBLIC):
addon = addon_factory(
name=name, status=addon_status,
version_kw={'version': version, 'channel': self.channel},
file_kw={'status': file_status})
return addon
def test_all_addon | s_are_in_q(self):
self.new_addon('Public', addon_status= | amo.STATUS_PUBLIC,
file_status=amo.STATUS_PUBLIC)
self.new_addon('Nominated', addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW)
self.new_addon('Deleted', addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_PUBLIC).delete()
assert sorted(q.addon_name for q in self.Queue.objects.all()) == (
['Deleted', 'Nominated', 'Public'])
def test_authors(self):
addon = self.new_addon()
bert = user_factory(username='bert')
ernie = user_factory(username='ernie')
AddonUser.objects.create(addon=addon, user=bert)
AddonUser.objects.create(addon=addon, user=ernie)
row = self.Queue.objects.all()[0]
self.assertSetEqual(set(row.authors),
{(ernie.id, 'ernie'), (bert.id, 'bert')})
def test_last_reviewed_version(self):
today = datetime.today().date()
addon = self.new_addon(version='1.0')
v2 = version_factory(addon=addon, version='2.0', channel=self.channel)
log = Acti |
# -*- coding: utf-8 -*-
stopwords = """
| A French stop word list. Comments begin with vertical bar. Each stop
| word is at the start of a line.
au | a + le
aux | a + les
avec | with
ce | this
ces | these
dans | with
de | of
des | de + les
du | | de + le
elle | she
en | `of them' etc
et | and
eux | them
il | he
je | I
la | the
le | the
leur | their
lui | | him
ma | my (fem)
mais | but
me | me
même | same; as in moi-même (myself) etc
mes | me (pl)
moi | me
mon | my (masc)
ne | not
nos | our (pl)
notre | our
nous | we
on | one
ou | where
par | by
pas | not
pour | for
qu | que before vowel
que | that
qui | who
sa | his, her (fem)
se | oneself
ses | his (pl)
son | his, her (masc)
sur | on
ta | thy (fem)
te | thee
tes | thy (pl)
toi | thee
ton | thy (masc)
tu | thou
un | a
une | a
vos | your (pl)
votre | your
vous | you
| single letter forms
c | c'
d | d'
j | j'
l | l'
à | to, at
m | m'
n | n'
s | s'
t | t'
y | there
| forms of être (not including the infinitive):
été
étée
étées
étés
étant
étante
étants
étantes
suis
es
est
sommes
êtes
sont
serai
seras
sera
serons
serez
seront
serais
serait
serions
seriez
seraient
étais
était
étions
étiez
étaient
fus
fut
fûmes
fûtes
furent
sois
soit
soyons
soyez
soient
fusse
fusses
fût
fussions
fussiez
fussent
| forms of avoir (not including the infinitive):
ayant
ayante
ayantes
ayants
eu
eue
eues
eus
ai
as
avons
avez
ont
aurai
auras
aura
aurons
aurez
auront
aurais
aurait
aurions
auriez
auraient
avais
avait
avions
aviez
avaient
eut
eûmes
eûtes
eurent
aie
aies
ait
ayons
ayez
aient
eusse
eusses
eût
eussions
eussiez
eussent
"""
|
e_name, get_leave_period
from erpnext.hr.doctype.leave_ledger_entry.leave_ledger_entry import expire_allocation, create_leave_ledger_entry
class OverlapError(frappe.ValidationError): pass
class BackDatedAllocationError(frappe.ValidationError): pass
class OverAllocationError(frappe.ValidationError): pass
class LessAllocationError(frappe.ValidationError): pass
class ValueMultiplierError(frappe.ValidationError): pass
class LeaveAllocation(Document):
def validate(self):
self.validate_period()
self.validate_new_leaves_allocated_value()
self.validate_allocation_overlap()
self.validate_back_dated_allocation()
self.set_total_leaves_allocated()
self.validate_total_leaves_allocated()
self.validate_lwp()
set_employee_name(self)
self.validate_leave_allocation_days()
def validate_leave_allocation_days(self):
company = frappe.db.get_value("Employee", self.employee, "company")
leave_period = get_leave_period(self.from_date, self.to_date, company)
max_leaves_allowed = frappe.db.get_value("Leave Type", self.leave_type, "max_leaves_allowed")
if max_leaves_allowed > 0:
leave_allocated = 0
if leave_period:
leave_allocated = get_leave_allocation_for_period(self.employee, self.leave_type,
leave_period[0].from_date, leave_period[0].to_date)
leave_allocated += self.new_leaves_allocated
if leave_allocated > max_leaves_allowed:
frappe.throw(_("Total allocated leaves are more days than maximum allocation of {0} leave type for employee {1} in the period")
.format(self.leave_type, self.employee))
def on_submit(self):
self.create_leave_ledger_entry()
# expire all unused leaves in the ledger on creation of carry forward allocation
allocation = get_previous_allocation(self.from_date, self.leave_type, self.employee)
if self.carry_forward and allocation:
expire_allocation(allocation)
def on_cancel(self):
self.create_leave_ledger_entry(submit=False)
if self.carry_forward:
self.set_carry_forwarded_leaves_in_previous_allocation(on_cancel=True)
def validate_period(self):
if date_diff(self.to_date, self.from_date) <= 0:
frappe.throw(_("To date cannot be before from date"))
def validate_lwp(self):
if frappe.db.get_value("Leave Type", self.leave_type, "is_lwp"):
frappe.throw(_("Leave Type {0} cannot be allocated since it is leave without pay").format(self.leave_type))
def validate_new_leaves_allocated_value(self):
"""validate that leave allocation is in multiples of 0.5"""
if flt( | self.new_leaves_allocated) % 0.5:
frappe.throw(_("Leaves must be allocated in multiples of 0.5"), ValueMultiplierError)
def validate_allocation_overlap(self):
leave_allocation = frappe.db.sql("""
select name from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1
and to_date >= %s and from_date <= %s""",
(self.employee, self.leave_type, self.from_date, self. | to_date))
if leave_allocation:
frappe.msgprint(_("{0} already allocated for Employee {1} for period {2} to {3}")
.format(self.leave_type, self.employee, formatdate(self.from_date), formatdate(self.to_date)))
frappe.throw(_('Reference') + ': <a href="#Form/Leave Allocation/{0}">{0}</a>'
.format(leave_allocation[0][0]), OverlapError)
def validate_back_dated_allocation(self):
future_allocation = frappe.db.sql("""select name, from_date from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1 and from_date > %s
and carry_forward=1""", (self.employee, self.leave_type, self.to_date), as_dict=1)
if future_allocation:
frappe.throw(_("Leave cannot be allocated before {0}, as leave balance has already been carry-forwarded in the future leave allocation record {1}")
.format(formatdate(future_allocation[0].from_date), future_allocation[0].name),
BackDatedAllocationError)
def set_total_leaves_allocated(self):
self.unused_leaves = get_carry_forwarded_leaves(self.employee,
self.leave_type, self.from_date, self.carry_forward)
self.total_leaves_allocated = flt(self.unused_leaves) + flt(self.new_leaves_allocated)
self.limit_carry_forward_based_on_max_allowed_leaves()
if self.carry_forward:
self.set_carry_forwarded_leaves_in_previous_allocation()
if not self.total_leaves_allocated \
and not frappe.db.get_value("Leave Type", self.leave_type, "is_earned_leave") \
and not frappe.db.get_value("Leave Type", self.leave_type, "is_compensatory"):
frappe.throw(_("Total leaves allocated is mandatory for Leave Type {0}")
.format(self.leave_type))
def limit_carry_forward_based_on_max_allowed_leaves(self):
max_leaves_allowed = frappe.db.get_value("Leave Type", self.leave_type, "max_leaves_allowed")
if max_leaves_allowed and self.total_leaves_allocated > flt(max_leaves_allowed):
self.total_leaves_allocated = flt(max_leaves_allowed)
self.unused_leaves = max_leaves_allowed - flt(self.new_leaves_allocated)
def set_carry_forwarded_leaves_in_previous_allocation(self, on_cancel=False):
''' Set carry forwarded leaves in previous allocation '''
previous_allocation = get_previous_allocation(self.from_date, self.leave_type, self.employee)
if on_cancel:
self.unused_leaves = 0.0
if previous_allocation:
frappe.db.set_value("Leave Allocation", previous_allocation.name,
'carry_forwarded_leaves_count', self.unused_leaves)
def validate_total_leaves_allocated(self):
# Adding a day to include To Date in the difference
date_difference = date_diff(self.to_date, self.from_date) + 1
if date_difference < self.total_leaves_allocated:
frappe.throw(_("Total allocated leaves are more than days in the period"), OverAllocationError)
def create_leave_ledger_entry(self, submit=True):
if self.unused_leaves:
expiry_days = frappe.db.get_value("Leave Type", self.leave_type, "expire_carry_forwarded_leaves_after_days")
end_date = add_days(self.from_date, expiry_days - 1) if expiry_days else self.to_date
args = dict(
leaves=self.unused_leaves,
from_date=self.from_date,
to_date= min(getdate(end_date), getdate(self.to_date)),
is_carry_forward=1
)
create_leave_ledger_entry(self, args, submit)
args = dict(
leaves=self.new_leaves_allocated,
from_date=self.from_date,
to_date=self.to_date,
is_carry_forward=0
)
create_leave_ledger_entry(self, args, submit)
def get_previous_allocation(from_date, leave_type, employee):
''' Returns document properties of previous allocation '''
return frappe.db.get_value("Leave Allocation",
filters={
'to_date': ("<", from_date),
'leave_type': leave_type,
'employee': employee,
'docstatus': 1
},
order_by='to_date DESC',
fieldname=['name', 'from_date', 'to_date', 'employee', 'leave_type'], as_dict=1)
def get_leave_allocation_for_period(employee, leave_type, from_date, to_date):
leave_allocated = 0
leave_allocations = frappe.db.sql("""
select employee, leave_type, from_date, to_date, total_leaves_allocated
from `tabLeave Allocation`
where employee=%(employee)s and leave_type=%(leave_type)s
and docstatus=1
and (from_date between %(from_date)s and %(to_date)s
or to_date between %(from_date)s and %(to_date)s
or (from_date < %(from_date)s and to_date > %(to_date)s))
""", {
"from_date": from_date,
"to_date": to_date,
"employee": employee,
"leave_type": leave_type
}, as_dict=1)
if leave_allocations:
for leave_alloc in leave_allocations:
leave_allocated += leave_alloc.total_leaves_allocated
return leave_allocated
@frappe.whitelist()
def get_carry_forwarded_leaves(employee, leave_type, date, carry_forward=None):
''' Returns carry forwarded leaves for the given employee '''
unused_leaves = 0.0
previous_allocation = get_previous_allocation(date, leave_type, employee)
if carry_forward and previous_allocation:
validate_carry_forward(leave_type)
unused_leaves = get_unused_leaves(employee, leave_type,
previous_allocation.from_date, previous_allocation.to_date)
if unused_leaves:
max_carry_forwarded_leaves = frappe.db.get_value("Leave Type",
leave_type, "maximum_carry_forwarded_leaves")
if max_carry_forwarded_leaves and unused_leaves > flt(max_carry_forwarded_leaves):
unused_l |
from PyQt5.QtCore import QThread, pyqtSignal
from API.CurseAPI import CurseAPI, CurseFile, CurseModpack
from PyQt5.QtWidgets import *
from GUI.Strings import Strings
strings = | Strings()
translate = strings.get
class FileDownloaderWindow(QWidget):
def __init__(self, file: str, curse: CurseAPI, path: str, fname=False, callback=False):
super().__init__()
self.callback = callback
self.setWindowTitle(translate("downloading.update"))
self.layout = QVBoxLayout(self)
self.progress = QProgressBar() |
self.layout.addWidget(self.progress)
self.show()
self.downloader = FileDownloaderThread(file, curse, path, fname)
self.downloader.done.connect(self.download_done)
self.downloader.update.connect(self.progress.setValue)
self.download_thread = QThread()
self.downloader.moveToThread(self.download_thread)
self.download_thread.started.connect(self.downloader.download)
self.download_thread.start()
def download_done(self):
if self.callback:
self.callback()
self.close()
self.destroy()
class FileDownloaderThread(QThread):
done = pyqtSignal()
update = pyqtSignal(int, name="ping")
def __init__(self, file: str, curse: CurseAPI, path: str, fname: str):
super().__init__()
self.file = file
self.path = path
self.fname = fname
self.curse = curse
def download(self):
self.curse.download_file(self.file, self.path, self.fname, self.update.emit)
self.done.emit()
class ModDownloaderWindow(QWidget):
def __init__(self, file: CurseFile, curse: CurseAPI, instance, initmods):
super().__init__()
self.initmods = initmods
self.setWindowTitle(translate("downloading.mod").format(file.name))
self.layout = QVBoxLayout(self)
self.progress = QProgressBar()
self.layout.addWidget(self.progress)
self.show()
self.downloader = ModDownloaderThread(file, curse, instance)
self.downloader.done.connect(self.download_done)
self.downloader.update.connect(self.progress.setValue)
self.download_thread = QThread()
self.downloader.moveToThread(self.download_thread)
self.download_thread.started.connect(self.downloader.download)
self.download_thread.start()
def download_done(self):
self.downloader.terminate()
self.download_thread.terminate()
self.initmods()
self.close()
self.destroy()
class ModDownloaderThread(QThread):
done = pyqtSignal()
update = pyqtSignal(int, name="ping")
def __init__(self, file: CurseFile, curse: CurseAPI, instance):
super().__init__()
self.file = file
self.curse = curse
self.instance = instance
def download(self):
self.instance.install_mod(self.file, self.curse, True, self.update.emit)
self.done.emit()
self.terminate()
class PackDownloaderWindow(QWidget):
def __init__(self, file: CurseFile, curse: CurseAPI, pack: CurseModpack):
super().__init__()
self.setWindowTitle(translate("downloading.pack").format(pack.project.title))
self.layout = QVBoxLayout(self)
self.label = QLabel()
self.layout.addWidget(self.label)
self.progress = QProgressBar()
self.layout.addWidget(self.progress)
self.prog2 = QProgressBar()
self.layout.addWidget(self.prog2)
self.show()
self.downloader = PackDownloaderThread(file, curse, pack)
self.downloader.done.connect(self.download_done)
self.downloader.bar1.connect(self.progress.setValue)
self.downloader.bar2.connect(self.prog2.setValue)
self.downloader.setLabel.connect(self.label.setText)
self.download_thread = QThread()
self.downloader.moveToThread(self.download_thread)
self.download_thread.started.connect(self.downloader.download)
self.download_thread.start()
def download_done(self):
self.downloader.terminate()
self.download_thread.terminate()
self.close()
self.destroy()
class PackDownloaderThread(QThread):
done = pyqtSignal()
setLabel = pyqtSignal(str, name="label")
bar1 = pyqtSignal(int, name="bar1")
bar2 = pyqtSignal(int, name="bar2")
def __init__(self, file: CurseFile, curse: CurseAPI, pack: CurseModpack):
super().__init__()
self.file = file
self.curse = curse
self.pack = pack
def download(self):
self.pack.install(self.file, self.setLabel.emit, self.bar1.emit, self.bar2.emit)
self.done.emit()
self.terminate()
|
""" Protocol Buffer Breaking Change Detector
This tool is used to detect "breaking changes" in protobuf files, to
ensure proper backwards-compatibility in protobuf API updates. The tool
can check for breaking changes of a single API by taking 2 .proto file
paths as input (before and after) and outputting a bool `is_breaking`.
The breaking change detector creates a temporary directory, copies in
each file to compute a protobuf "state", computes a diff of the "before"
and "after" states, and runs the diff against a set of rules to determine
if there was a breaking change.
The tool is currently implemented with buf (https://buf.build/)
"""
from pathlib import Path
from typing import List
from tools.api_proto_breaking_change_detector.buf_utils import check_breaking, pull_buf_deps
from tools.api_proto_breaking_change_detector.detector_errors import ChangeDetectorError
class ProtoBreakingChangeDetector(object):
"""Abstract breaking change detector interface"""
def run_detector(self) -> None:
"""Run t | he breaking change detector to detect rule violations
This method should populate the detector's internal data such
that `is_breaking` does not require any additional invocation | s
to the breaking change detector.
"""
pass
def is_breaking(self) -> bool:
"""Return True if breaking changes were detected in the given protos"""
pass
def get_breaking_changes(self) -> List[str]:
"""Return a list of strings containing breaking changes output by the tool"""
pass
class BufWrapper(ProtoBreakingChangeDetector):
"""Breaking change detector implemented with buf"""
def __init__(
self,
path_to_changed_dir: str,
git_ref: str,
git_path: str,
subdir: str = None,
buf_path: str = None,
config_file_loc: str = None,
additional_args: List[str] = None) -> None:
"""Initialize the configuration of buf
This function sets up any necessary config without actually
running buf against any proto files.
BufWrapper takes a path to a directory containing proto files
as input, and it checks if these proto files break any changes
from a given initial state.
The initial state is input as a git ref. The constructor expects
a git ref string, as well as an absolute path to a .git folder
for the repository.
Args:
path_to_changed_dir {str} -- absolute path to a directory containing proto files in the after state
buf_path {str} -- path to the buf binary (default: "buf")
git_ref {str} -- git reference to use for the initial state of the protos (typically a commit hash)
git_path {str} -- absolute path to .git folder for the repository of interest
subdir {str} -- subdirectory within git repository from which to search for .proto files (default: None, e.g. stay in root)
additional_args {List[str]} -- additional arguments passed into the buf binary invocations
config_file_loc {str} -- absolute path to buf.yaml configuration file (if not provided, uses default buf configuration)
"""
if not Path(path_to_changed_dir).is_dir():
raise ValueError(f"path_to_changed_dir {path_to_changed_dir} is not a valid directory")
if Path.cwd() not in Path(path_to_changed_dir).parents:
raise ValueError(
f"path_to_changed_dir {path_to_changed_dir} must be a subdirectory of the cwd ({ Path.cwd() })"
)
if not Path(git_path).exists():
raise ChangeDetectorError(f'path to .git folder {git_path} does not exist')
self._path_to_changed_dir = path_to_changed_dir
self._additional_args = additional_args
self._buf_path = buf_path or "buf"
self._config_file_loc = config_file_loc
self._git_ref = git_ref
self._git_path = git_path
self._subdir = subdir
self._final_result = None
pull_buf_deps(
self._buf_path,
self._path_to_changed_dir,
config_file_loc=self._config_file_loc,
additional_args=self._additional_args)
def run_detector(self) -> None:
self._final_result = check_breaking(
self._buf_path,
self._path_to_changed_dir,
git_ref=self._git_ref,
git_path=self._git_path,
subdir=self._subdir,
config_file_loc=self._config_file_loc,
additional_args=self._additional_args)
def is_breaking(self) -> bool:
if not self._final_result:
raise ChangeDetectorError("Must invoke run_detector() before checking if is_breaking()")
final_code, final_out, final_err = self._final_result
final_out, final_err = '\n'.join(final_out), '\n'.join(final_err)
if final_err != "":
raise ChangeDetectorError(f"Error from buf: {final_err}")
if final_code != 0:
return True
if final_out != "":
return True
return False
def get_breaking_changes(self) -> List[str]:
_, final_out, _ = self._final_result
return filter(lambda x: len(x) > 0, final_out) if self.is_breaking() else []
|
rr.write("\nSearching with values %s result=%s\n" %
(search_params, current_resultset))
sleep(sleeptime)
if current_resultset == None:
continue
if current_resultset == [] and empty_results < CFG_BIBMATCH_FUZZY_EMPTY_RESULT_LIMIT:
# Allows some empty results
empty_results += 1
else:
# Intersect results with previous results depending on current operator
if result_hitset == None:
| result_hitset = current_resultset
if current_operator == '+':
r | esult_hitset = list(set(result_hitset) & set(current_resultset))
elif current_operator == '-':
result_hitset = list(set(result_hitset) - set(current_resultset))
elif current_operator == '|':
result_hitset = list(set(result_hitset) | set(current_resultset))
else:
# We did not hit a break in the for-loop: we were allowed to search.
if result_hitset and len(result_hitset) > CFG_BIBMATCH_SEARCH_RESULT_MATCH_LIMIT:
if (verbose > 1):
sys.stderr.write("\nToo many results... %d " % (len(result_hitset)))
elif result_hitset:
# This was a fuzzy match
query_out = " ".join(["%s %s" % (op, qu) for op, qu in fuzzy_query_list])
if validate:
# We can run validation
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Fuzzy query (%s) found %d records: %s" % \
(bibmatch_recid,
query_out,
len(result_hitset),
str(result_hitset)))
exact_matches = []
fuzzy_matches = []
try:
exact_matches, fuzzy_matches = validate_matches(bibmatch_recid=bibmatch_recid, \
record=record, \
server=server, \
result_recids=result_hitset, \
collections=collections, \
verbose=verbose, \
ascii_mode=ascii_mode)
except BibMatchValidationError, e:
sys.stderr.write("ERROR: %s\n" % (str(e),))
if len(exact_matches) > 0:
if (verbose > 8):
sys.stderr.write("Match validated\n")
matched_results.append((exact_matches, query_out))
break
elif len(fuzzy_matches) > 0:
if (verbose > 8):
sys.stderr.write("Match validated fuzzily\n")
fuzzy_results.append((fuzzy_matches, query_out))
else:
if (verbose > 8):
sys.stderr.write("Match could not be validated\n")
else:
# No validation
if len(result_hitset) == 1 and complete:
fuzzy_results.append((result_hitset, query_out))
if (verbose > 8):
sys.stderr.write("Fuzzy: %s\n" % (result_hitset,))
else:
# We treat the result as ambiguous (uncertain) when:
# - query is not complete
# - more then one result
ambiguous_results.append((result_hitset, query_out))
if (verbose > 8):
sys.stderr.write("Ambiguous\n")
return [matched_results, ambiguous_results, fuzzy_results]
def transform_input_to_marcxml(filename=None, file_input=""):
"""
Takes the filename or input of text-marc and transforms it
to MARCXML.
"""
if not filename:
# Create temporary file to read from
tmp_fd, filename = mkstemp()
os.write(tmp_fd, file_input)
os.close(tmp_fd)
try:
# Redirect output, transform, restore old references
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
transform_file(filename)
finally:
sys.stdout = old_stdout
return new_stdout.getvalue()
def bibrecs_has_errors(bibrecs):
"""
Utility function to check a list of parsed BibRec objects, directly
from the output of bibrecord.create_records(), for any
badly parsed records.
If an error-code is present in the result the function will return True,
otherwise False.
"""
return 0 in [err_code for dummy, err_code, dummy2 in bibrecs]
def main():
"""
Record matches database content when defined search gives
exactly one record in the result set. By default the match is
done on the title field.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "0123hVm:fq:c:nv:o:b:i:r:tazx:",
[
"print-new",
"print-match",
"print-ambiguous",
"print-fuzzy",
"help",
"version",
"mode=",
"field=",
"query-string=",
"config=",
"no-process",
"verbose=",
"operator=",
"batch-output=",
"input=",
"remote=",
"text-marc-output",
"alter-recid",
"clean",
"collection=",
"user=",
"no-fuzzy",
"no-validation",
"ascii"
])
except getopt.GetoptError as e:
usage()
match_results = []
qrystrs = [] # list of query strings
print_mode = 0 # default match mode to print new records
noprocess = 0 # dump result in stdout?
operator = "and"
verbose = 1 # 0..be quiet
records = []
batch_output = "" # print stuff in files
f_input = "" # read from where, if param "i"
server_url = CFG_SITE_SECURE_URL # url to server performing search, local by default
modify = 0 # alter output with matched record identifiers
textmarc_output = 0 # output in MARC instead of MARCXML
field = ""
search_mode = None # activates a mode, uses advanced search instead of simple
sleeptime = CFG_BIBMATCH_LOCAL_SLEEPTIME # the amount of time to sleep between queries, changes on remote queries
clean = False # should queries be sanitized?
collections = [] # only search certain collections?
user = ""
password = ""
validate = True # should matches be validate?
fuzzy = True |
"""
Installs and configures MySQL
"""
import uuid
import logging
from packstack.installer import validators
from packstack.installer import utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-MySQL"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding MySQL OpenStack configuration")
paramsList = [
{"CMD_OPTION" : "mysql-host",
"USAGE" : "The IP address of the server on which to install MySQL",
"PROMPT" : "Enter the IP address of the MySQL server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_MYSQL_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "mysql-user",
"USAGE" : "Username for the MySQL admin user",
"PROMPT" : "Enter the username for the MySQL admin user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "root",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_MYSQL_USER",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "mysql-pw",
"USAGE" : "Password for the MySQL admin user",
"PROMPT" : "Enter the password for the MySQL admin user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
| "LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_MYSQL_PW",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : True,
| "CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "MYSQL",
"DESCRIPTION" : "MySQL Config parameters",
"PRE_CONDITION" : lambda x: 'yes',
"PRE_CONDITION_MATCH" : "yes",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
mysqlsteps = [
{'title': 'Adding MySQL manifest entries',
'functions':[createmanifest]}
]
controller.addSequence("Installing MySQL", [], [], mysqlsteps)
def createmanifest(config):
if config['CONFIG_MYSQL_INSTALL'] == 'y':
install = True
suffix = 'install'
else:
install = False
suffix = 'noinstall'
# In case we are not installing MySQL server, mysql* manifests have
# to be run from Keystone host
host = install and config['CONFIG_MYSQL_HOST'] \
or config['CONFIG_KEYSTONE_HOST']
manifestfile = "%s_mysql.pp" % host
manifestdata = [getManifestTemplate('mysql_%s.pp' % suffix)]
def append_for(module, suffix):
# Modules have to be appended to the existing mysql.pp
# otherwise pp will fail for some of them saying that
# Mysql::Config definition is missing.
template = "mysql_%s_%s.pp" % (module, suffix)
manifestdata.append(getManifestTemplate(template))
append_for("keystone", suffix)
hosts = set()
for mod in ['nova', 'cinder', 'glance', 'neutron', 'heat']:
if config['CONFIG_%s_INSTALL' % mod.upper()] == 'y':
append_for(mod, suffix)
# Check wich modules are enabled so we can allow their
# hosts on the firewall
if mod != 'nova' and mod != 'neutron':
hosts.add(config.get('CONFIG_%s_HOST' % mod.upper()).strip())
elif mod == 'neutron':
hosts.add(config.get('CONFIG_NEUTRON_SERVER_HOST').strip())
elif config['CONFIG_NOVA_INSTALL'] != 'n':
#In that remote case that we have lot's of nova hosts
hosts.add(config.get('CONFIG_NOVA_API_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_CERT_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_VNCPROXY_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_CONDUCTOR_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_SCHED_HOST').strip())
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
dbhosts = split_hosts(config['CONFIG_NOVA_NETWORK_HOSTS'])
hosts |= dbhosts
for host in config.get('CONFIG_NOVA_COMPUTE_HOSTS').split(','):
hosts.add(host.strip())
config['FIREWALL_ALLOWED'] = ",".join(["'%s'" % i for i in hosts])
config['FIREWALL_SERVICE_NAME'] = "mysql"
config['FIREWALL_PORTS'] = "'3306'"
manifestdata.append(getManifestTemplate("firewall.pp"))
appendManifestFile(manifestfile, "\n".join(manifestdata), 'pre')
|
# Copyright (C) 2013 Go | ogle Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
| # notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.api import users
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp import template
from handlers.updatebase import UpdateBase
from loggers.recordbotevent import RecordBotEvent
from loggers.recordpatchevent import RecordPatchEvent
from model.attachment import Attachment
from model.queuestatus import QueueStatus
class UpdateStatus(UpdateBase):
def get(self):
self.response.out.write(template.render("templates/updatestatus.html", None))
def _queue_status_from_request(self):
queue_status = QueueStatus()
# FIXME: I think this can be removed, no one uses it.
if users.get_current_user():
queue_status.author = users.get_current_user()
bug_id = self._int_from_request("bug_id")
patch_id = self._int_from_request("patch_id")
queue_name = self.request.get("queue_name")
bot_id = self.request.get("bot_id")
queue_status.queue_name = queue_name
queue_status.bot_id = bot_id
queue_status.active_bug_id = bug_id
queue_status.active_patch_id = patch_id
queue_status.message = self.request.get("status")
results_file = self.request.get("results_file")
queue_status.results_file = db.Blob(str(results_file))
return queue_status
def post(self):
queue_status = self._queue_status_from_request()
queue_status.put()
RecordBotEvent.record_activity(queue_status.queue_name, queue_status.bot_id)
if queue_status.active_patch_id:
RecordPatchEvent.updated(queue_status.active_patch_id, queue_status.queue_name, queue_status.message, queue_status.bot_id)
self.response.out.write(queue_status.key().id())
|
from JumpScale import j
"""
Provides the Params object and the ParamsFactory that is used in the Q-Tree
"""
class ParamsFactory:
"""
This factory can create new Params objects
"""
def __init__(self):
self.__jslocation__ = "j.data.params"
def get(self, dictObject={}):
"""
Create and return a new Params object
@param dictObject when dict given then dict will be converted into params
@return: a new Params object
@rtype: Params
"""
return Params(dictObject)
def isParams(self, p):
"""
Return if the argument object is an instance of Params
@param p: object to check
@type p: object
@return: Whether or not `p` is a Params instance
@rtype: boolean
"""
return isinstance(p, Params)
class Params:
def __init__(self, dictObject=None):
if dictObject is not None:
self.__dict__ = dictObject
def merge(self, otherParams):
self.__dict__.update(otherParams.__dict__)
def get(self, key, defaultvalue=None):
return self.__dict__.get(key, defaultvalue)
def __contains__(self, key):
return key in self.__dict__
def __getitem__(self, key):
return self.__dict__[key]
def expandParamsAsDict(self, **kwargs):
"""
adds paramsExtra, tags & params from requestContext if it exists
return as dict
for each item given as named argument check it is already in dict and if not add
e.g. args=self.expandParamsAsDict(id=1,name="test")
will return a dict with id & name and these values unless if they were s | et in the params already
| can further use it as follows:
params.result=infomgr.getInfoWithHeaders(**args)
full example:
#############
args=params.expandParamsAsDict(maxvalues=100,id=None,start="-3d",stop=None)
args["start"]=j.data.time.getEpochAgo(args["start"])
args["stop"]=j.data.time.getEpochFuture(args["stop"])
params.result=j.apps.system.infomgr.extensions.infomgr.addInfo(**args)
"""
params = self
params2 = params.getDict()
if "paramsExtra" in params and params.paramsExtra is not None:
params2.update(params.paramsExtra)
if "requestContext" in params and params.requestContext is not None:
params2.update(params.requestContext.params)
if "tags" in params and params2["tags"] != "":
params2.update(params2["tags"].getDict())
for item in ["requestContext", "tags", "paramsExtra"]:
if item in params:
params2.pop(item)
if len(kwargs) == 0:
return params2
result = {}
for key in list(kwargs.keys()):
if key in params2:
result[key] = params2[key]
return result
def expandParams(self, **kwargs):
"""
adds paramsExtra, tags & params from requestContext if it exists
returns params but not needed because params just get modified to have all these extra arguments/params as properties
set default as params to this method e.g.
expandParams(id=10,hight=100)
"""
def getArgs(d):
r = {}
reserved = ["name", "doc", "macro",
"macrostr", "cmdstr", "page", "tags"]
for key in list(d.keys()):
if key in reserved:
r["arg_%s" % key] = d[key]
else:
r[key] = d[key]
return r
if "paramsExtra" in self and self.paramsExtra is not None:
self.setDict(getArgs(self.paramsExtra))
# self.pop("paramsExtra")
if "requestContext" in self and self.requestContext is not None:
self.setDict(getArgs(self.requestContext.params))
# self.pop("requestContext")
if "tags" in self and self.tags != "":
self.setDict(getArgs(self.tags.getDict()))
# self.pop("tags")
for argname in list(kwargs.keys()):
if argname not in self.__dict__:
self.__dict__[argname] = kwargs[argname]
return self
def getTag(self, name, default=None):
tags = getattr(self, 'tags', None)
if not tags:
return default
tags = tags.getDict()
tag = tags.get(name)
if tag and j.data.text.toStr(tag).startswith('$$'):
return default
if not tag:
return default
return tag
def pop(self, key):
if key in self:
self.__dict__.pop(key)
def has_key(self, key):
return key in self.__dict__
def getDict(self):
return self.__dict__
def setDict(self, dictObject):
self.__dict__.update(dictObject)
def extend(self, params):
"""
Update this Params object with the contents of the argument Params
object
@param params: the Params or dict object to update from
@type params: dict or Params
@raise TypeError: if the argument is not a dict or Params object
"""
if isinstance(params, Params):
d = params.__dict__
elif isinstance(params, dict):
d = params
else:
raise TypeError("Argument params is of an unknown type %s" %
type(params))
self.__dict__.update(d)
# def __dir__(self):
# return sorted(dir(super(Params, self)) + self.__dict__.keys())
def __repr__(self):
parts = ["PARAMS:"]
for key, value in list(self.__dict__.items()):
parts.append(" %s:%s" % (key, value))
return "\n".join(parts)
def __str__(self):
return self.__repr__()
|
import mahotas as mh
from sklearn import cross_validation
from sklearn.linear_model.logistic import LogisticRegression
import numpy as np
from glob import glob
from edginess import edginess_sobe | l
#basedir = 'simple-dataset'
basedir = 'simple-dataset/'
def features_for(im):
im = mh.imread(im,as_grey=True).astype(np.uint8)
return mh.features.haralick(im).mean(0)
features | = []
sobels = []
labels = []
images = glob('{}/*.jpg'.format(basedir))
for im in images:
features.append(features_for(im))
sobels.append(edginess_sobel(mh.imread(im, as_grey=True)))
labels.append(im[:-len('00.jpg')])
features = np.array(features)
labels = np.array(labels)
n = features.shape;
nl = labels.shape;
print('features='+str(n))
print(str(features))
print ('labels='+str(nl))
print(str(labels))
scores = cross_validation.cross_val_score(LogisticRegression(), features, labels, cv=5)
print('Accuracy (5 fold x-val) with Logistic Regrssion [std features]: {}%'.format(0.1* round(1000*scores.mean())))
scores = cross_validation.cross_val_score(LogisticRegression(), np.hstack([np.atleast_2d(sobels).T,features]), labels, cv=5).mean()
print('Accuracy (5 fold x-val) with Logistic Regrssion [std features + sobel]: {}%'.format(0.1* round(1000*scores.mean())))
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Builds applications in debug mode:
- Copies the module directories into their destinations.
- Copies app.html as-is.
"""
from os import path
from os.path import join
import os
import shutil
import sys
import modular_build
def main(argv):
try:
input_path_flag_index = argv.index('--input_path')
input_path = argv[input_path_flag_index + 1]
output_path_flag_index = argv.index('--output_path')
output_path = argv[output_path_flag_index + 1]
build_stamp_index = argv.index('--build_stamp')
build_stamp_path = argv[build_stamp_index + 1]
except:
print('Usage: %s app_1 app_2 ... app_N --input_path <input_path> --output_path <output_path>' % argv[0])
raise
symlink_dir_or_copy(input_path, output_path)
with open(build_stamp_path, 'w') as file:
file.write('stamp')
def symlink_dir_or_copy(src, dest):
i | f hasattr(os, 'symlink'):
if path.exists(dest):
if os.path.islink(dest):
os.unlink(dest)
else:
shutil.rmtree(dest)
os.symlink(join(os.getcwd(), src), dest)
else:
for filename in os.listdir(src):
| new_src = join(os.getcwd(), src, filename)
if os.path.isdir(new_src):
copy_dir(new_src, join(dest, filename))
else:
copy_file(new_src, join(dest, filename), safe=True)
def copy_file(src, dest, safe=False):
if safe and path.exists(dest):
os.remove(dest)
shutil.copy(src, dest)
def copy_dir(src, dest):
if path.exists(dest):
shutil.rmtree(dest)
for src_dir, dirs, files in os.walk(src):
subpath = path.relpath(src_dir, src)
dest_dir = path.normpath(join(dest, subpath))
os.makedirs(dest_dir)
for name in files:
src_name = join(os.getcwd(), src_dir, name)
dest_name = join(dest_dir, name)
copy_file(src_name, dest_name)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
lity(QtCore.Qt.NonModal)
frmIntrodDatos.resize(975, 591)
frmIntrodDatos.setWindowTitle(QtGui.QApplication.translate("frmIntrodDatos", "TopoDelProp. Introducción de datos de: ", None, QtGui.QApplication.UnicodeUTF8))
frmIntrodDatos.setModal(False)
self.tableWidget = QtGui.QTableWidget(frmIntrodDatos)
self.tableWidget.setGeometry(QtCore.QRect(10, 40, 741, 371))
self.tableWidget.setToolTip(QtGui.QApplication.translate("frmIntrodDatos", "Pinche sobre el campo a rellenar", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setAutoScroll(True)
self.tableWidget.setAutoScrollMargin(0)
self.tableWidget.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.tableWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.tableWidget.setCornerButtonEnabled(True)
self.tableWidget.setRowCount(5)
self.tableWidget.setColumnCount(2)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget.horizontalHeader().setStretchLastSection(False)
self.tableWidget.verticalHeader().setVisible(True)
self.tableWidget.verticalHeader().setCascadingSectionResizes(False)
self.tableWidget.verticalHeader().setHighlightSections(True)
self.listWidget = QtGui.QListWidget(frmIntrodDatos)
self.listWidget.setGeometry(QtCore.QRect(750, 70, 221, 341))
self.listWidget.setToolTip(QtGui.QApplication.translate("frmIntrodDatos", "Seleccione el valor a introducir en la tabla", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setStatusTip(_fromUtf8(""))
self.listWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.listWidget.setAutoScroll(True)
self.listWidget.setAlternatingRowColors(True)
self.listWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerItem)
self.listWidget.setObjectName(_fromUtf8("listWidget"))
self.bttGuardar = QtGui.QPushButton(frmIntrodDatos)
self.bttGuardar.setGeometry(QtCore.QRect(730, 420, 131, 31))
self.bttGuardar.setToolTip(QtGui.QApplication.translate("frmIntrodDatos", "Guarda los cambios del elemento", None, QtGui.QApplication.UnicodeUTF8))
self.bttGuardar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Guardar cambios", None, QtGui.QApplication.UnicodeUTF8))
self.bttGuardar.setObjectName(_fromUtf8("bttGuardar"))
self.bttTerminar = QtGui.QPushButton(frmIntrodDatos)
self.bttTerminar.setGeometry(QtCore.QRect(860, 420, 111, 31))
self.bttTerminar.setToolTip(QtGui.QApplication.translate("frmIntrodDatos", "Termina la introduccion de datos del elemento", None, QtGui.QApplication.UnicodeUTF8))
self.bttTerminar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Terminar", None, QtGui.QApplication.UnicodeUTF8))
self.bttTerminar.setObjectName(_fromUtf8("bttTerminar"))
self.label = QtGui.QLabel(frmIntrodDatos)
self.label.setGeometry(QtCore.QRect(10, 10, 221, 16))
self.label.setText(QtGui.QApplication.translate("frmIntrodDatos", "Datos del elemento. Elija un campo:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.lbLista = QtGui.QLabel(frmIntrodDatos)
self.lbLista.setGeometry(QtCore.QRect(750, 20, 201, 16))
self.lbLista.setText(QtGui.QApplication.translate("frmIntrodDatos", "Valores del campo seleccionado:", None, QtGui.QApplication.UnicodeUTF8))
self.lbLista.setObjectName(_fromUtf8("lbLista"))
self.lbEstado = QtGui.QLabel(frmIntrodDatos)
self.lbEstado.setGeometry(QtCore.QRect(10, 490, 951, 101))
self.lbEstado.setText(_fromUtf8(""))
self.lbEstado.setObjectName(_fromUtf8("lbEstado"))
self.tbId_trabajo = QtGui.QLineEdit(frmIntrodDatos)
self.tbId_trabajo.setEnabled(False)
self.tbId_trabajo.setGeometry(QtCore.QRect(120, 420, 121, 22))
self.tbId_trabajo.setReadOnly(False)
self.tbId_trab | ajo.setObjectName(_fromUtf8("tbId_trabajo"))
self.tbSrc_trabajo = QtGui.QLineEdit(frmIntrodDatos)
self.tbSrc_trabajo.setEnabled(False)
self.tbSrc_trabajo.setGeometry(QtCore.QRect(120, 440, 121, 2 | 2))
self.tbSrc_trabajo.setReadOnly(False)
self.tbSrc_trabajo.setObjectName(_fromUtf8("tbSrc_trabajo"))
self.label_2 = QtGui.QLabel(frmIntrodDatos)
self.label_2.setGeometry(QtCore.QRect(20, 420, 81, 16))
self.label_2.setText(QtGui.QApplication.translate("frmIntrodDatos", "ID del trabajo:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(frmIntrodDatos)
self.label_3.setGeometry(QtCore.QRect(20, 440, 101, 16))
self.label_3.setText(QtGui.QApplication.translate("frmIntrodDatos", "SRC del trabajo:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.bttEditar = QtGui.QPushButton(frmIntrodDatos)
self.bttEditar.setGeometry(QtCore.QRect(640, 420, 91, 31))
self.bttEditar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Editar", None, QtGui.QApplication.UnicodeUTF8))
self.bttEditar.setObjectName(_fromUtf8("bttEditar"))
self.bttBuscar = QtGui.QPushButton(frmIntrodDatos)
self.bttBuscar.setGeometry(QtCore.QRect(490, 420, 71, 31))
self.bttBuscar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Buscar", None, QtGui.QApplication.UnicodeUTF8))
self.bttBuscar.setObjectName(_fromUtf8("bttBuscar"))
self.bttDescargar = QtGui.QPushButton(frmIntrodDatos)
self.bttDescargar.setGeometry(QtCore.QRect(270, 420, 121, 31))
self.bttDescargar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Descargar archivo", None, QtGui.QApplication.UnicodeUTF8))
self.bttDescargar.setObjectName(_fromUtf8("bttDescargar"))
self.bttNuevo = QtGui.QPushButton(frmIntrodDatos)
self.bttNuevo.setGeometry(QtCore.QRect(560, 420, 81, 31))
self.bttNuevo.setText(QtGui.QApplication.translate("frmIntrodDatos", "Nuevo", None, QtGui.QApplication.UnicodeUTF8))
self.bttNuevo.setObjectName(_fromUtf8("bttNuevo"))
self.txtFiltrar = QtGui.QLineEdit(frmIntrodDatos)
self.txtFiltrar.setGeometry(QtCore.QRect(750, 40, 221, 31))
self.txtFiltrar.setObjectName(_fromUtf8("txtFiltrar"))
self.bttBorrar = QtGui.QPushButton(frmIntrodDatos)
self.bttBorrar.setGeometry(QtCore.QRect(390, 420, 101, 31))
self.bttBorrar.setToolTip(QtGui.QApplication.translate("frmIntrodDatos", "Borra este registro", None, QtGui.QApplication.UnicodeUTF8))
self.bttBorrar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Borrar ", None, QtGui.QApplication.UnicodeUTF8))
self.bttBorrar.setObjectName(_fromUtf8("bttBorrar"))
self.tbMunicipio = QtGui.QLineEdit(frmIntrodDatos)
self.tbMunicipio.setEnabled(False)
self.tbMunicipio.setGeometry(QtCore.QRect(120, 460, 381, 22))
self.tbMunicipio.setReadOnly(False)
self.tbMunicipio.setObjectName(_fromUtf8("tbMunicipio"))
self.label_4 = QtGui.QLabel(frmIntrodDatos)
self.label_4.setGeometry(QtCore.QRect(20, 460, 101, 16))
self.label_4.setText(QtGui.QApplication.translate("frmIntrodDatos", "Municipio:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.retranslateUi(frmIntrodDatos)
QtCore.QMetaObject.connectSlotsByName(frmIntrodDatos)
def retranslateUi(self, frmIntrodDatos):
pass
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
frmIntrodDatos = QtGui.QDialog |
eserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PeerExpressRout | eCircuitCo | nnectionsOperations(object):
"""PeerExpressRouteCircuitConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PeerExpressRouteCircuitConnection"
"""Gets the specified Peer Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the peer express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeerExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.PeerExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PeerExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PeerExpressRouteCircuitConnectionListResult"]
"""Gets all global reach peer connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PeerExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.PeerExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameter |
if __name__ == '__main__':
import os
import sys
port = int(sys.argv[1])
root_dirname = os.path.dirname(os.path.dirname(__file__ | ))
if root_dirname not in sys.path:
sys.path.append(root_dirname)
print('before pydevd.settrace')
breakpoint(port=port) # Set up through custom sitecustomize.py
print('after pydevd.settrace')
print('TEST SUCEEDED!')
| |
# -*- coding: utf-8 -*-
# Copyright 2019 OpenSynergy Indonesia
# Copyright 2022 PT. Simetri Sinergi Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# pylint: disable=locally-disabled, manife | st-required-author
{
"name": "Employee Job Family From Contract",
"version": "8.0.1.0.0",
"category": "Human Resource",
"website": "https://simetri-sinergi.id",
"author": "OpenSynergy Indonesia, PT. Simetri Sinergi Indonesia",
"license": "AGPL-3",
"installable": True,
"depends": [
"hr_emplo | yee_data_from_contract",
"hr_job_family_modelling",
],
"data": [
"views/hr_contract_views.xml",
],
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import random
sys.path.append('.')
from twisted.internet import re | actor
from twisted.python import log
from . import driver
from . import multiplexer
from . import record_layer
from . import updater
from . import dsl
from . import conf
EVENT_LOOP_FREQUENCY_S = 0.01
AUTOUPDATE_DELAY = 5
class Client(object):
def __init__(self, format_name, format_version):
self.multiplexer_outgoing_ = multiplexer.BufferOutgoing()
self.multiplexer_inc | oming_ = multiplexer.BufferIncoming()
self.multiplexer_incoming_.addCallback(self.process_cell)
self.streams_ = {}
self.stream_counter_ = random.randint(1,2**32-1)
self.set_driver(format_name, format_version)
self.reload_ = False
# first update must be
reactor.callLater(AUTOUPDATE_DELAY, self.check_for_update)
def set_driver(self, format_name, format_version=None):
self.format_name_ = format_name
if format_version == None:
self.format_version_ = dsl.get_latest_version(
'client', format_name)
else:
self.format_version_ = format_version
self.driver_ = driver.ClientDriver("client")
self.driver_.set_multiplexer_incoming(self.multiplexer_incoming_)
self.driver_.set_multiplexer_outgoing(self.multiplexer_outgoing_)
self.driver_.setFormat(self.format_name_, self.format_version_)
def get_format(self):
retval = str(self.format_name_) + \
':' + \
str(self.format_version_)
return retval
def execute(self, reactor):
if self.driver_.isRunning():
self.driver_.execute(reactor)
else:
if self.reload_:
self.set_driver(self.format_name_)
self.reload_ = False
self.driver_.reset()
reactor.callLater(EVENT_LOOP_FREQUENCY_S, self.execute, reactor)
def process_cell(self, cell_obj):
payload = cell_obj.get_payload()
if payload:
stream_id = cell_obj.get_stream_id()
try:
self.streams_[stream_id].srv_queue.put(payload)
except:
log.msg("Client.process_cell: Caught KeyError exception for stream_id :%d"
% (stream_id))
return
def start_new_stream(self, srv_queue=None):
stream = multiplexer.MarionetteStream(
self.multiplexer_incoming_,
self.multiplexer_outgoing_,
self.stream_counter_,
srv_queue)
stream.host = self
self.streams_[self.stream_counter_] = stream
self.stream_counter_ = random.randint(1,2**32-1)
return stream
def terminate(self, stream_id):
del self.streams_[stream_id]
# call this function if you want reload formats from disk
# at the next possible time
def reload_driver(self):
self.reload_ = True
def check_for_update(self):
# uncomment the following line to check for updates every N seconds
# instead of just on startup
# reactor.callLater(N, self.check_for_update, reactor)
if conf.get("general.autoupdate"):
self.do_update(self.reload_driver)
def do_update(self, callback):
# could be replaced with code that updates from a different
# source (e.g., local computations)
update_server = conf.get("general.update_server")
updater = updater.FormatUpdater(update_server, use_marionette=True, callback=callback)
return updater.do_update()
|
_order_line or [])))
return res
_columns = {
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
'cart_quantity': fields.function(_cart_qty, type='integer', string='Cart Quantity'),
'payment_acquirer_id': fields.many2one('payment.acquirer', 'Payment Acquirer', on_delete='set null', copy=False),
'payment_tx_id': fields.many2one('payment.transaction', 'Transaction', on_delete='set null', copy=False),
}
def _get_errors(self, cr, uid, order, context=None):
return []
def _get_website_data(self, cr, uid, order, context):
return {
'partner': order.partner_id.id,
'order': order
}
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
for so in self.browse(cr, uid, ids, context=context):
domain = [('order_id', '=', so.id), ('product_id', '=', product_id)]
if line_id:
domain += [('id', '=', line_id)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, qty=0, line_id=None, context=None):
so = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)
values = self.pool.get('sale.order.line').product_id_change(cr, SUPERUSER_ID, [],
pricelist=so.pricelist_id.id,
product=product_id,
partner_id=so.partner_id.id,
fiscal_position=so.fiscal_position.id,
qty=qty,
context=context
)['value']
if line_id:
line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)
values['name'] = line.name
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
values['name'] = "%s\n%s" % (product.display_name, product.description_sale)
values['product_id'] = product_id
values['order_id'] = order_id
if values.get('tax_id') != None:
values['tax_id'] = [(6, 0, values['tax_id'])]
return values
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
sol = self.pool.get('sale.order.line')
quantity = 0
for so in self.browse(cr, uid, ids, context=context):
if line_id != False:
line_ids = so._cart_find_product_line(product_id, line_id, context=context, **kwargs)
if line_ids:
line_id = line_ids[0]
# Create line if no line with product_id can be located
if not line_id:
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, qty=1, context=context)
line_id = sol.create(cr, SUPERUSER_ID, values, context=context)
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty != None:
quantity = sol.browse(cr, SUPERUSER_ID, line_id, context=context).product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
sol.unlink(cr, SUPERUSER_ID, [line_id], context=context)
else:
# update line
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, qty=quantity, line_id=line_id, context=context)
values['product_uom_qty'] = quantity
sol.write(cr, SUPERUSER_ID, [line_id], values, context=context)
return {'line_id': line_id, 'quantity': quantity}
def _cart_accessories(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
s = set(j.id for l in (order.website_order_line or []) for j in (l.product_id.accessory_product_ids or []))
s -= set(l.product_id.id for l in order.order_line)
product_ids = random.sample(s, min(len(s),3))
return self.pool['product.product'].browse(cr, uid, product_ids, context=context)
class website(orm.Model):
_inherit = 'website'
_columns = {
'pricelist_id': fields.related('user_id','partner_id','property_product_pricelist',
type='many2one', relation='product.pricelist', string='Default Pricelist'),
'currency_id': fields.related('pricelist_id','currency_id',
type='many2one', relation='res.currency', string='Default Currency'),
}
def sale_product_domain(self, cr, uid, ids, context=None):
return [("sale_ok", "=", True)]
def sale_get_order(self, cr, uid, ids, force_create=False, code=None, update_pricelist=None, context=None):
sale_order_obj = self.pool['sale.order']
sale_order_id = request.session.get('sale_order_id')
sale_order = None
# create so if needed
if not sale_order_id and (force_create or code):
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
for w in self.browse(cr, uid, ids):
values = {
'user_id': w.user_id.id,
'partner_id': partner.id,
'pricelist_id': partner.property_product_pricelist.id,
'section_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'website', 'salesteam_website_sales')[1],
}
sale_order_id = sale_order_obj.create(cr, SUPERUSER_ID, values, context=context)
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner.id, context=context)['value']
sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)
request.session['sale_order_id'] = sale_order_id
if sale_order_id:
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order_id, context=context)
if not sale_order.exists():
request.session['sale_order_id'] = None
return None
# check for change of pricelist with a coupon
if code and | code != sale_order.pricelist_id.code:
pricelist_ids = self.pool['product.pricelist'].search(cr, SUPERUSER_ID, [('code', '=', code)], context=context)
if pricelist_ids:
pricelist_id = pricelist_ids[0]
request.session['sale_order_code_pricelist_id'] = pricelist_id
update_pricelist = True
pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pri | celist.id
# check for change of partner_id ie after signup
if sale_order.partner_id.id != partner.id and request.website.partner_id.id != partner.id:
flag_pricelist = False
if pricelist_id != sale_order.pricelist_id.id:
flag_pricelist = True
fiscal_position = sale_order.fiscal_position and sale_order.fiscal_position.id or False
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [sale_order_id], partner.id, context=context)['value']
if values.get('fiscal_position'):
order_lines = map(int,sale_order.order_line)
values.update(sale_order_obj.onchange_fiscal_position(cr, SUPERUSER_ID, [],
values['fiscal_position'], [[6, 0, order_lines]], context=context)['value'])
values['partner_ |
def get_loss(self, loss_function, output, target, aggregation=None):
from lasagne.objectives import Objective
input_layer = self.input_layer(output)
obj = Objective(input_layer, loss_function)
return obj.get_loss(target=target, aggregation=aggregation)
@pytest.fixture
def get_masked_loss(self, loss_function, output, target, mask,
aggregation=None):
from lasagne.objectives import MaskedObjective
input_layer = self.input_layer(output)
obj = MaskedObjective(input_layer, loss_function)
return obj.get_loss(target=target, mask=mask,
aggregation=aggregation)
def test_mse(self):
from lasagne.objectives import mse
output = np.array([
[1.0, 0.0, 3.0, 0.0],
[-1.0, 0.0, -1.0, 0.0],
])
target = np.zeros((2, 4))
mask = np.array([[1.0], [0.0]])
mask_2d = np.array([[1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]])
# Sqr-error sum = 1**2 + (-1)**2 + (-1)**2 + 3**2 = 12
# Mean is 1.5
result = self.get_loss(mse, output, target, aggregation='mean')
assert result.eval() == 1.5
result = self.get_loss(mse, output, target, aggregation='sum')
assert result.eval() == 12
# Masked error sum is 1**2 + 3**2
result_with_mask = self.get_masked_loss(mse, output, target,
mask, aggregation='sum')
assert result_with_mask.eval() == 10
result_with_mask = self.get_masked_loss(mse, output, target,
mask_2d, aggregation='sum')
assert result_with_mask.eval() == 10
result_with_mask = self.get_masked_loss(mse, output, target,
mask, aggregation='mean')
assert result_with_mask.eval() == 10/8.0
result_with_mask = self.get_masked_loss(mse, output, target,
mask_2d, aggregation='mean')
assert result_with_mask.eval() == 10/8.0
result_with_mask = self.get_masked_loss(mse, output, target,
mask, aggregation=None)
assert result_with_mask.eval() == 10/8.0
result_with_mask = self.get_masked_loss(mse, output, target,
mask_2d, aggregation=None)
assert result_with_mask.eval() == 10/8.0
result_with_mask = self.get_masked_loss(mse, output, target, mask,
aggregation='normalized_sum')
assert result_with_mask.eval() == 10
result_with_mask = self.get_masked_loss(mse, output, target, mask_2d,
aggregation='normalized_sum')
assert result_with_mask.eval() == 10/4.0
def test_binary_crossentropy(self):
from lasagne.objectives import binary_crossentropy
output = np.array([
[np.e ** -2]*4,
[np.e ** -1]*4,
])
target = np.ones((2, 4))
mask = np.array([[0.0], [1.0]])
mask_2d = np.array([[0.0]*4,
[1.0]*4])
# Cross entropy sum is (2*4) + (1*4) = 12
# Mean is 1.5
result = self.get_loss(binary_crossentropy, output, target,
aggregation='mean')
assert result.eval() == 1.5
result = self.get_loss(binary_crossentropy, output, target,
aggregation='sum')
assert result.eval() == 12
# Masked cross entropy sum is 1*4*1 = 4
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask,
aggregation='sum')
assert result_with_mask.eval() == 4
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask_2d,
aggregation='sum')
assert result_with_mask.eval() == 4
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask,
| aggregation='mean')
assert result_with_mask.eval() == 1/2.0
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask_2d,
aggregation='mean')
assert result_with_mask.eval() == 1/2. | 0
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask,
aggregation='normalized_sum')
assert result_with_mask.eval() == 4
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask_2d,
aggregation='normalized_sum')
assert result_with_mask.eval() == 1
def test_categorical_crossentropy(self):
from lasagne.objectives import categorical_crossentropy
output = np.array([
[1.0, 1.0-np.e**-1, np.e**-1],
[1.0-np.e**-2, np.e**-2, 1.0],
[1.0-np.e**-3, 1.0, np.e**-3]
])
target_1hot = np.array([2, 1, 2])
target_2d = np.array([
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
])
mask_1hot = np.array([0, 1, 1])
# Multinomial NLL sum is 1 + 2 + 3 = 6
# Mean is 2
result = self.get_loss(categorical_crossentropy, output, target_1hot,
aggregation='mean')
assert result.eval() == 2
result = self.get_loss(categorical_crossentropy, output, target_1hot,
aggregation='sum')
assert result.eval() == 6
# Multinomial NLL sum is (0*0 + 1*0 + 1*1) + (2*0 + 2*1 + 0*0)
# + (3*0 + 0*0 + 3*1) = 6
# Mean is 2
result = self.get_loss(categorical_crossentropy, output, target_2d,
aggregation='mean')
assert result.eval() == 2
result = self.get_loss(categorical_crossentropy, output, target_2d,
aggregation='sum')
assert result.eval() == 6
# Masked NLL sum is 2 + 3 = 5
result_with_mask = self.get_masked_loss(categorical_crossentropy,
output, target_1hot,
mask_1hot,
aggregation='sum')
assert result_with_mask.eval() == 5
# Masked NLL sum is 2 + 3 = 5
result_with_mask = self.get_masked_loss(categorical_crossentropy,
output, target_2d, mask_1hot,
aggregation='mean')
assert abs(result_with_mask.eval() - 5.0/3.0) < 1.0e-9
# Masked NLL sum is 2 + 3 = 5
result_with_mask = self.get_masked_loss(categorical_crossentropy,
output, target_2d, mask_1hot,
aggregation='normalized_sum')
assert result_with_mask.eval() == 5.0/2.0
def test_objective(self):
from lasagne.objectives import Objective
from lasagne.layers.input import Layer, InputLayer
input_layer = mock.Mock(InputLayer((None,)), output_shape=(None,))
layer = mock.Mock(Layer(input_layer), output_shape=(None,))
layer.input_layer = input_layer
loss_function = mock.Mock()
input, target, kwarg1 = theano.tensor.vector(), object(), object()
objective = Objective(layer, loss_function)
result = objective.get_loss(input, target, 'mean', kwarg1=kwarg1)
# We expect that the layer's `get |
# coding: utf8
from ...symbols import (
ADJ, DET, NOUN, NUM, PRON, PROPN, PUNCT, VERB, POS
)
from ...lemmatizer import Lemmatizer
class RussianLemmatizer(Lemmatizer):
_morph = None
def __init__(self):
super(RussianLemmatizer, self).__init__()
try:
from pymorphy2 import MorphAnalyzer
except ImportError:
raise ImportError(
'The Russian lemmatizer requires the pymorphy2 library: '
'try to fix it with "pip install pymorphy2==0.8"')
if RussianLemmatizer._morph is None:
RussianLemmatizer._morph = MorphAnalyzer()
def __call__(self, string, univ_pos, morphology=None):
univ_pos = self.normalize_univ_pos(univ_pos)
if univ_pos == 'PUNCT':
return [PUNCT_RULES.get(string, string)]
if univ_pos not in ('ADJ', 'DET', 'NOUN', 'NUM', 'PRON', 'PROPN', 'VERB'):
# Skip unchangeable pos
return [string.lower()]
analyses = self._morph.parse(string)
filtered_analyses = []
for analysis in analyses:
if not analysis.is_known:
# Skip suggested parse variant for unknown word for pymorphy
continue
analysis_pos, _ = oc2ud(str(analysis.tag))
if analysis_pos == univ_pos \
or (analysis_pos in ('NOUN', 'PROPN') and univ_pos in ('NOUN', 'PROPN')):
filtered_analyses.append(analysis)
if not len(filtered_analyses):
return [string.lower()]
if morphology is None or (len(morphology) == 1 and POS in morphology):
return list(set([analysis.normal_form for analysis in filtered_analyses]))
if univ_pos in ('ADJ', 'DET', 'NOUN', 'PROPN'):
features_to_compare = ['Case', 'Number', 'Gender']
elif univ_pos == 'NUM':
features_to_compare = ['Case', 'Gender']
elif univ_pos == 'PRON':
features_to_compare = ['Case', 'Number', 'Gender', 'Person']
else: # VERB
features_to_compare = ['Aspect', 'Gender', 'Mood', 'Number', 'Tense', 'VerbForm', 'Voice']
analyses, filtered_analyses = filtered_analyses, []
for analysis in analyses:
_, analysis_morph = oc2ud(str(analysis.tag))
for feature in features_to_compare:
if (feature in morphology and feature in analysis_morph
and morphology[feature] != analysis_morph[feature]):
break
else:
filtered_analyses.append(analysis)
if not len(filtered_analyses):
return [string.lower()]
return list(set([analysis.normal_form for analysis in filtered_analyses]))
@staticmethod
def normalize_univ_pos(univ_pos):
if isinstance(univ_pos, str):
return univ_pos.upper()
symbols_to_str = {
ADJ: 'ADJ',
DET: 'DET',
NOUN: 'NOUN',
NUM: 'NUM',
PRON: 'PRON',
PROPN: 'PROPN',
PUNCT: 'PUNCT',
VERB: 'VERB'
}
if univ_pos in symbols_to_str:
return symbols_to_str[univ_pos]
return None
def is_base_form(self, univ_pos, morphology=None):
# TODO
raise NotImplementedError
def det(self, string, morphology=None):
return self(string, 'det', morphology)
def num(self, string, morphology=None):
return self(string, 'num', morphology)
def pron(self, string, morphology=None):
return self(string, 'pron', morphology)
def lookup(self, string):
analyses = self._morph.parse(string)
if len(analyses) == 1:
return analyses[0].normal_form
return string
def oc2ud(oc_tag):
gram_map = {
'_POS': {
'ADJF': 'ADJ',
'ADJS': 'ADJ',
'ADVB': 'ADV',
'Apro': 'DET',
'COMP': 'ADJ', # Can also be an ADV - unchangeable
'CONJ': 'CCONJ', # Can also be a SCONJ - both unchangeable ones
'GRND': 'VERB',
'INFN': 'VERB',
'INTJ': 'INTJ',
'NOUN': 'NOUN',
'NPRO': 'PRON',
'NUMR': 'NUM',
'NUMB': 'NUM',
'PNCT': 'PUNCT',
'PRCL': 'PART',
'PREP': 'ADP',
'PRTF': 'VERB',
'PRTS': 'VERB',
'VERB': 'VERB',
},
'Animacy': {
'anim': 'Anim',
'inan': 'Inan',
},
'Aspect': {
'impf': 'Imp',
'perf': 'Perf',
},
'Case': {
'ablt': 'Ins',
'accs': 'Acc',
'datv': 'Dat',
'gen1': 'Gen',
'gen2': 'Gen',
'gent': 'Gen',
'loc2': 'Loc',
'loct': 'Loc',
'nomn': 'Nom',
'voct': 'Voc',
},
'Degree': {
'COMP': 'Cmp',
'Supr': 'Sup',
},
'Gender': {
'femn': 'Fem',
'masc': 'Masc',
'neut': 'Neut',
},
'Mood': {
'impr': 'Imp',
'indc': 'Ind',
},
'Number': {
'plur': 'Plur',
'sing': 'Sing',
},
'NumForm': {
'NUMB': 'Digit',
},
'Person': {
'1per': '1',
'2per': '2',
'3per': '3',
'excl': '2',
'incl': '1',
},
'Tense': {
'futr': 'Fut',
'past': 'Past',
'pres': 'Pres',
},
'Variant': {
'ADJS': 'Brev',
| 'PRTS': 'Brev',
},
'VerbForm': {
'GRND': 'Conv',
'INFN': 'Inf',
'PRTF': 'Part',
'PRTS': 'Part',
'VERB': 'Fin',
},
'Voice': {
'actv': 'Act',
| 'pssv': 'Pass',
},
'Abbr': {
'Abbr': 'Yes'
}
}
pos = 'X'
morphology = dict()
unmatched = set()
grams = oc_tag.replace(' ', ',').split(',')
for gram in grams:
match = False
for categ, gmap in sorted(gram_map.items()):
if gram in gmap:
match = True
if categ == '_POS':
pos = gmap[gram]
else:
morphology[categ] = gmap[gram]
if not match:
unmatched.add(gram)
while len(unmatched) > 0:
gram = unmatched.pop()
if gram in ('Name', 'Patr', 'Surn', 'Geox', 'Orgn'):
pos = 'PROPN'
elif gram == 'Auxt':
pos = 'AUX'
elif gram == 'Pltm':
morphology['Number'] = 'Ptan'
return pos, morphology
PUNCT_RULES = {
"«": "\"",
"»": "\""
}
|
xtField')()),
('answer', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('smartgrid_design', ['DesignerTextPromptQuestion'])
# Adding model 'DesignerQuestionChoice'
db.create_table('smartgrid_design_designerquestionchoice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid_design.DesignerTextPromptQuestion'])),
('action', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid_design.DesignerAction'])),
('choice', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('smartgrid_design', ['DesignerQuestionChoice'])
# Adding model 'DesignerLevel'
db.create_table('smartgrid_design_designerlevel', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, db_index=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=1)),
('unlock_condition', self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True)),
('unlock_condition_text', self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True)),
))
db.send_create_signal('smartgrid_design', ['DesignerLevel'])
# Adding model 'DesignerCategory'
db.create_table('smartgrid_design_designercategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_lengt | h=50, null=True, db_index=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal('smartgrid_ | design', ['DesignerCategory'])
# Adding model 'DesignerAction'
db.create_table('smartgrid_design_designeraction', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
('video_id', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('video_source', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('embedded_widget', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
('type', self.gf('django.db.models.fields.CharField')(max_length=20)),
('level', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid_design.DesignerLevel'], null=True, blank=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid_design.DesignerCategory'], null=True, blank=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=1000)),
('pub_date', self.gf('django.db.models.fields.DateField')(default=datetime.date(2013, 3, 19))),
('expire_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('unlock_condition', self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True)),
('unlock_condition_text', self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True)),
('related_resource', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('social_bonus', self.gf('django.db.models.fields.IntegerField')(default=0)),
('point_value', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('smartgrid_design', ['DesignerAction'])
# Adding model 'Activity'
db.create_table('smartgrid_design_activity', (
('designeraction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['smartgrid_design.DesignerAction'], unique=True, primary_key=True)),
('expected_duration', self.gf('django.db.models.fields.IntegerField')()),
('point_range_start', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('point_range_end', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('confirm_type', self.gf('django.db.models.fields.CharField')(default='text', max_length=20)),
('confirm_prompt', self.gf('django.db.models.fields.TextField')(blank=True)),
('admin_note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('smartgrid_design', ['Activity'])
# Adding model 'Commitment'
db.create_table('smartgrid_design_commitment', (
('designeraction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['smartgrid_design.DesignerAction'], unique=True, primary_key=True)),
('commitment_length', self.gf('django.db.models.fields.IntegerField')(default=5)),
))
db.send_create_signal('smartgrid_design', ['Commitment'])
# Adding model 'Event'
db.create_table('smartgrid_design_event', (
('designeraction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['smartgrid_design.DesignerAction'], unique=True, primary_key=True)),
('expected_duration', self.gf('django.db.models.fields.IntegerField')()),
('event_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('event_location', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('event_max_seat', self.gf('django.db.models.fields.IntegerField')(default=1000)),
))
db.send_create_signal('smartgrid_design', ['Event'])
# Adding model 'Filler'
db.create_table('smartgrid_design_filler', (
('designeraction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['smartgrid_design.DesignerAction'], unique=True, primary_key=True)),
))
db.send_create_signal('smartgrid_design', ['Filler'])
def backwards(self, orm):
# Deleting model 'DesignerTextPromptQuestion'
db.delete_table('smartgrid_design_designertextpromptquestion')
# Deleting model 'DesignerQuestionChoice'
db.delete_table('smartgrid_design_designerquestionchoice')
# Deleting model 'DesignerLevel'
db.delete_table('smartgrid_design_designerlevel')
# Deleting model 'DesignerCategory'
db.delete_table('smartgrid_design_designercategory')
# Deleting model 'DesignerAction'
db.delete_table('smartgrid_design_designeraction')
# Deleting model 'Activity'
db.delete_table('smartgrid_design_activity')
# Deleting model 'Commitment'
db.delete_table('smartgrid_design_commitment')
# Deleting model 'Event'
db.delete_table('smartgrid_design_event')
# Deleting model 'Filler'
db.delete_table('smartgrid_design_filler')
models = {
'smartgrid_design.activity': {
'Meta': {'ordering': "('level', 'category', 'priority')", 'object_name': 'Activity', '_ormbases': ['smartgrid_design.DesignerAction']},
'admin_note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'confirm_prompt': ('django. |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Stock Reservation',
'summary': 'Stock reservations on products',
'version': '0.2',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Warehouse',
'license': 'AGPL-3',
'complexity': 'normal',
'images': [],
'website': "http://www.camptocamp.com",
'description': """
Stock Reservation
=================
Allows to create stock reservations on products.
Each reservation can have a validity date, once passed, the reserva | tion
is automatically lifted.
The reserved products are substracted from the virtual stock. It means
that if you reserved a quantity of products which bring the virtual
stock below the minimum, the orderpoint will be triggered and new
purchase orders will be generated. It also implies th | at the max may be
exceeded if the reservations are canceled.
Contributors
------------
* Guewen Baconnier <guewen.baconnier@camptocamp.com>
* Yannick Vaucher <yannick.vaucher@camptocamp.com>
""",
'depends': ['stock',
],
'demo': [],
'data': ['view/stock_reserve.xml',
'view/product.xml',
'data/stock_data.xml',
'security/ir.model.access.csv',
],
'auto_install': False,
'test': ['test/stock_reserve.yml',
],
'installable': True,
}
|
import __settings__
from __settings__ im | port INSTALLED_APPS
assert hasattr(__settings__, 'BASE_DIR'), 'BASE_DIR required'
INSTALLED_APPS += | (
'post',
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name,len-as-condition
from functools import partial
from rebulk.pattern import StringPattern
from ..validators import chars_before, chars_after, chars_surround, validators
chars = ' _.'
left = partial(chars_before, chars)
right = partial(chars_after, chars)
surrounding = partial(chars_surround, chars)
def test_left_chars():
matches = list(StringPattern("word", validator=left).matches("xxxwordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=left).matches("xxx_wordxxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=left).matches("wordxxx"))
assert len(matches) == 1
def test_right_chars():
matches = list(StringPattern("word", validator=right).matches("xxxwordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=right).matches("xxxword.xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=right).matches("xxxword"))
assert len(matches) == 1
def test_surrounding_chars():
matches = list(StringPattern("word", validat | or=surrounding).matches("xxxword xxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=surrounding).matches | ("xxx.wordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=surrounding).matches("xxx word_xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=surrounding).matches("word"))
assert len(matches) == 1
def test_chain():
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxxword xxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx.wordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx word_xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=validators(left, right)).matches("word"))
assert len(matches) == 1
|
from iSoft.entity.model import db, FaQuery
import math
import json
from iSoft.model.AppReturnDTO import AppReturnDTO
from iSoft.core.Fun import Fun
import re
class QueryDal(FaQuery):
def __init__(self):
pass
def query_findall(self, pageIndex, pageSize, criterion, where):
relist, is_succ = Fun.model_findall(FaQuery, pageIndex, pageSize,
criterion, where)
return relist, is_succ
def query_Save(self, in_dict, saveKeys):
jsonStr = re.sub(r'\r|\n| ', "", in_dict["QUERY_CFG_JSON"])
jsonStr = re.sub(r'"onComponentInitFunction"((.|\n)+?)},', "", jsonStr)
jsonStr = re.sub(r',},', ",", jsonStr)
try:
x = json.loads(jsonStr)
except :
return None, AppReturnDTO(False, "列配置信息有误")
relist, is_succ = Fun.model_save(FaQuery, self, in_dict, saveKeys)
return relist, is_succ
def query_delete(self, key):
is_succ = Fun.model_delete(FaQuery, key)
return is_succ
def query_single(self, key):
relist, is_succ = Fun.model_single(FaQuery, key)
return relist, is_succ
def query_singleByCode(self, code):
db_ent = FaQuery.query.filter(FaQuery.CODE == code).first()
if db_ent is None:
return db_ent, AppReturnDTO(False, "代码不存在")
return db_ent, AppReturnDT | O(True)
# 查看数据
def query_queryByCode(self, code, pageIndex, pageSize, criterion, where):
sql, cfg, msg = self.query_GetSqlByCode(code, criterion, where)
if not msg.IsSuccess:
return sql, msg
relist = db.session.execute(sql)
num = relist.rowcount
relist.close()
if pageIndex < 1: |
pageSize = 1
if pageSize < 1:
pageSize = 10
# 最大页码
max_page = math.ceil(num / pageSize) # 向上取整
if pageIndex > max_page:
return None, AppReturnDTO(True, num)
pageSql = "{0} LIMIT {1},{2}".format(sql, (pageIndex - 1) * pageSize,
pageSize)
allData, msg = Fun.sql_to_dict(pageSql)
if msg.IsSuccess:
msg.Msg = num
# relist = relist.paginate(pageIndex, per_page=pageSize).items
return allData, msg
def query_GetSqlByCode(self, code, criterion, where):
"""
根据查询代码运算出查询的SQL
用于导出数据,并统一管理配置的SQL
返回SQL和配置
"""
db_ent = FaQuery.query.filter(FaQuery.CODE == code).first()
if db_ent is None:
return "", "", AppReturnDTO(False, "代码不存在")
sql = db_ent.QUERY_CONF
orderArr = []
for order in criterion:
orderArr.append("T.%(Key)s %(Value)s" % order)
whereArr = []
for search in where:
if search["Type"] == "like":
whereArr.append("T.%(Key)s like ('%%%(Value)s%%')" % search)
else:
whereArr.append("T.%(Key)s %(Type)s %(Value)s " % search)
sql = "SELECT * FROM ({0}) T{1}{2}".format(
sql,
" WHERE " + " AND ".join(whereArr) if len(whereArr) > 0 else "",
" ORDER BY " + " , ".join(orderArr) if len(orderArr) > 0 else "",
)
jsonStr = re.sub(r'\r|\n| ', "", db_ent.QUERY_CFG_JSON)
jsonStr = re.sub(r'"onComponentInitFunction"((.|\n)+?)},', "", jsonStr)
jsonStr = re.sub(r',},', ",", jsonStr)
return sql, json.loads(jsonStr), AppReturnDTO(True)
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
import math
import scipy.special as sps
mean = 0
variance = 1
sigma = math.sqrt(variance)
def drawSampleNormal(sampleSize):
samples = np.random.normal(mean, sigma, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
plt.plot(bins,mlab.normpdf(bins,mean,sigma))
plt.show()
plt.savefig("normal_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleNormal(20)
drawSampleNormal(50)
drawSampleNormal(100)
drawSampleNorm | al(500)
alpha = 7.5
beta = 10
def drawSampleGamma(sampleSize):
samples = np.random.gamma(alpha, beta, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
pdf = bins**(alpha-1)*(np.exp(-bins/beta) / (sps.gamma(alpha)*beta**alpha))
plt.plot(bins, pdf, linewidth=2, color='r')
plt.show()
plt.savefig("gamma_" + str(sampleSize) + " | _samples.png")
plt.clf()
drawSampleGamma(20)
drawSampleGamma(50)
drawSampleGamma(100)
drawSampleGamma(500) |
from io import BytesIO
import os
import pickle
from tempfile import mkstemp
import unittest
from unittest.mock import patch, Mock
import warnings
from Orange.widgets.settings import SettingsHandler, Setting, SettingProvider
class SettingHandlerTestCase(unittest.TestCase):
@patch('Orange.widgets.settings.SettingProvider', create=True)
def test_create(self, SettingProvider):
""":type SettingProvider: unittest.mock.Mock"""
with patch.object(SettingsHandler, 'read_defaults'):
handler = SettingsHandler.create(SimpleWidget)
self.assertEqual(handler.widget_class, SimpleWidget)
# create needs to create a SettingProvider which traverses
# the widget definition and collects all settings and read
# all settings and for widget class
SettingProvider.assert_called_once_with(SimpleWidget)
SettingsHandler.read_defaults.assert_called_once_with()
def test_create_uses_template_if_provided(self):
template = SettingsHandler()
template.read_defaults = lambda: None
template.a = 'a'
template.b = 'b'
handler = SettingsHandler.create(SimpleWidget, template)
self.assertEqual(handler.a, 'a')
self.assertEqual(handler.b, 'b')
# create should copy the template
handler.b = 'B'
self.assertEqual(template.b, 'b')
def test_read_defaults(self):
default_settings = {'a': 5, 'b': {1: 5}}
fd, settings_file = mkstemp(suffix='.ini')
with open(settings_file, 'wb') as f:
pickle.dump(default_settings, f)
os.close(fd)
handler = SettingsHandler()
handler._get_settings_filename = lambda: settings_file
handler.read_defaults()
self.assertEqual(handler.defaults, default_settings)
os.remove(settings_file)
def test_write_defaults(self):
fd, settings_file = mkstemp(suffix='.ini')
handler = SettingsHandler()
handler.defaults = {'a': 5, 'b': {1: 5}}
handler._get_settings_filename = lambda: settings_file
handler.write_defaults()
with open(settings_file, 'rb') as f:
default_settings = pickle.load(f)
os.close(fd)
self.assertEqual(handler.defaults, default_settings)
os.remove(settings_file)
def test_initialize_widget(self):
handler = SettingsHandler()
handler.defaults = {'default': 42, 'setting': 1}
handler.provider = provider = Mock()
provider.get_provider.return_value = provider
widget = SimpleWidget()
def reset_provider():
provider.get_provider.return_value = None
provider.reset_mock()
provider.get_provider.return_value = provider
# No data
handler.initialize(widget)
provider.initialize.assert_called_once_with(widget, {'default': 42,
'setting': 1})
# Dictionary data
reset_provider()
handler.initialize(widget, {'setting': 5})
provider.initialize.assert_called_once_with(widget, {'default': 42,
'setting': 5})
# Pickled data
reset_provider()
handler.initialize(widget, pickle.dumps({'setting': 5}))
provider.ini | tialize.assert_called_once_with(widget, {'default': 42,
'setting': 5})
def test_initialize_component(self):
handler = SettingsHan | dler()
handler.defaults = {'default': 42}
provider = Mock()
handler.provider = Mock(get_provider=Mock(return_value=provider))
widget = SimpleWidget()
# No data
handler.initialize(widget)
provider.initialize.assert_called_once_with(widget, None)
# Dictionary data
provider.reset_mock()
handler.initialize(widget, {'setting': 5})
provider.initialize.assert_called_once_with(widget, {'setting': 5})
# Pickled data
provider.reset_mock()
handler.initialize(widget, pickle.dumps({'setting': 5}))
provider.initialize.assert_called_once_with(widget, {'setting': 5})
@patch('Orange.widgets.settings.SettingProvider', create=True)
def test_initialize_with_no_provider(self, SettingProvider):
""":type SettingProvider: unittest.mock.Mock"""
handler = SettingsHandler()
handler.provider = Mock(get_provider=Mock(return_value=None))
provider = Mock()
SettingProvider.return_value = provider
widget = SimpleWidget()
# initializing an undeclared provider should display a warning
with warnings.catch_warnings(record=True) as w:
handler.initialize(widget)
self.assertEqual(1, len(w))
SettingProvider.assert_called_once_with(SimpleWidget)
provider.initialize.assert_called_once_with(widget, None)
def test_fast_save(self):
handler = SettingsHandler()
handler.read_defaults = lambda: None
handler.bind(SimpleWidget)
widget = SimpleWidget()
handler.fast_save(widget, 'component.int_setting', 5)
self.assertEqual(
handler.known_settings['component.int_setting'].default, 5)
self.assertEqual(Component.int_setting.default, 42)
handler.fast_save(widget, 'non_setting', 4)
def test_fast_save_siblings_spill(self):
handler_mk1 = SettingsHandler()
handler_mk1.read_defaults = lambda: None
handler_mk1.bind(SimpleWidgetMk1)
widget_mk1 = SimpleWidgetMk1()
handler_mk1.fast_save(widget_mk1, "setting", -1)
handler_mk1.fast_save(widget_mk1, "component.int_setting", 1)
self.assertEqual(
handler_mk1.known_settings['setting'].default, -1)
self.assertEqual(
handler_mk1.known_settings['component.int_setting'].default, 1)
handler_mk1.initialize(widget_mk1, data=None)
handler_mk1.provider.providers["component"].initialize(
widget_mk1.component, data=None)
self.assertEqual(widget_mk1.setting, -1)
self.assertEqual(widget_mk1.component.int_setting, 1)
handler_mk2 = SettingsHandler()
handler_mk2.read_defaults = lambda: None
handler_mk2.bind(SimpleWidgetMk2)
widget_mk2 = SimpleWidgetMk2()
handler_mk2.initialize(widget_mk2, data=None)
handler_mk2.provider.providers["component"].initialize(
widget_mk2.component, data=None)
self.assertEqual(widget_mk2.setting, 42,
"spils defaults into sibling classes")
self.assertEqual(Component.int_setting.default, 42)
self.assertEqual(widget_mk2.component.int_setting, 42,
"spils defaults into sibling classes")
class Component:
int_setting = Setting(42)
class SimpleWidget:
setting = Setting(42)
non_setting = 5
component = SettingProvider(Component)
def __init__(self):
self.component = Component()
class SimpleWidgetMk1(SimpleWidget):
pass
class SimpleWidgetMk2(SimpleWidget):
pass
class WidgetWithNoProviderDeclared:
def __init__(self):
self.undeclared_component = Component()
|
+xml', QtCore.QByteArray(xml.encode('UTF-8')))
QtGui.QApplication.clipboard().setMimeData(md)
else:
with open(fileName, 'wb') as fh:
fh.write(asUnicode(xml).encode('utf-8'))
xmlHeader = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.2" baseProfile="tiny">
<title>pyqtgraph SVG export</title>
<desc>Generated with Qt and pyqtgraph</desc>
"""
def generateSvg(item, options={}):
global xmlHeader
try:
node, defs = _generateItemSvg(item, options=options)
finally:
## reset export mode for all items in the tree
if isinstance(item, QtGui.QGraphicsScene):
items = list(item.items())
else:
items = [item]
for i in items:
items.extend(i.childItems())
for i in items:
if hasattr(i, 'setExportMode'):
i.setExportMode(False)
cleanXml(node)
defsXml = "<defs>\n"
for d in defs:
defsXml += d.toprettyxml(indent=' ')
defsXml += "</defs>\n"
return xmlHeader + defsXml + node.toprettyxml(indent=' ') + "\n</svg>\n"
def _generateItemSvg(item, nodes=None, root=None, options={}):
## This function is intended to work around some issues with Qt's SVG generator
## and SVG in general.
## 1) Qt SVG does not implement clipping paths. This is absurd.
## The solution is to let Qt generate SVG for each item independently,
## then glue them together manually with clipping.
##
## The format Qt generates for all items looks like this:
##
## <g>
## <g transform="matrix(...)">
## one or more of: <path/> or <polyline/> or <text/>
## </g>
## <g transform="matrix(...)">
## one or more of: <path/> or <polyline/> or <text/>
## </g>
## . . .
## </g>
##
## 2) There seems to be wide disagreement over whether path strokes
## should be scaled anisotropically.
## see: http://web.mit.edu/jonas/www/anisotropy/
## Given that both inkscape and illustrator seem to prefer isotropic
## scaling, we will optimize for those cases.
##
## 3) Qt generates paths using non-scaling-stroke from SVG 1.2, but
## inkscape only supports 1.1.
##
## Both 2 and 3 can be addressed by drawing all items in world coordinates.
profiler = debug.Profiler()
if nodes is None: ## nodes maps all node IDs to their XML element.
## this allows us to ensure all elements receive unique names.
nodes = | {}
if root is None:
root = item
## Skip hidden items
if hasattr(item, 'isVisible') and not item.isVisible():
return None
## If this item defines its own SVG generator, use that.
if hasattr(item, 'generateSvg'):
return item.generateSvg(nodes)
## Generate SVG text for just this item (exclude its children; we'll handle them later)
tr = QtGui.QTransform | ()
if isinstance(item, QtGui.QGraphicsScene):
xmlStr = "<g>\n</g>\n"
doc = xml.parseString(xmlStr)
childs = [i for i in item.items() if i.parentItem() is None]
elif item.__class__.paint == QtGui.QGraphicsItem.paint:
xmlStr = "<g>\n</g>\n"
doc = xml.parseString(xmlStr)
childs = item.childItems()
else:
childs = item.childItems()
tr = itemTransform(item, item.scene())
## offset to corner of root item
if isinstance(root, QtGui.QGraphicsScene):
rootPos = QtCore.QPoint(0,0)
else:
rootPos = root.scenePos()
tr2 = QtGui.QTransform()
tr2.translate(-rootPos.x(), -rootPos.y())
tr = tr * tr2
arr = QtCore.QByteArray()
buf = QtCore.QBuffer(arr)
svg = QtSvg.QSvgGenerator()
svg.setOutputDevice(buf)
dpi = QtGui.QDesktopWidget().logicalDpiX()
svg.setResolution(dpi)
p = QtGui.QPainter()
p.begin(svg)
if hasattr(item, 'setExportMode'):
item.setExportMode(True, {'painter': p})
try:
p.setTransform(tr)
item.paint(p, QtGui.QStyleOptionGraphicsItem(), None)
finally:
p.end()
## Can't do this here--we need to wait until all children have painted as well.
## this is taken care of in generateSvg instead.
#if hasattr(item, 'setExportMode'):
#item.setExportMode(False)
if QT_LIB in ['PySide', 'PySide2']:
xmlStr = str(arr)
else:
xmlStr = bytes(arr).decode('utf-8')
doc = xml.parseString(xmlStr.encode('utf-8'))
try:
## Get top-level group for this item
g1 = doc.getElementsByTagName('g')[0]
## get list of sub-groups
g2 = [n for n in g1.childNodes if isinstance(n, xml.Element) and n.tagName == 'g']
defs = doc.getElementsByTagName('defs')
if len(defs) > 0:
defs = [n for n in defs[0].childNodes if isinstance(n, xml.Element)]
except:
print(doc.toxml())
raise
profiler('render')
## Get rid of group transformation matrices by applying
## transformation to inner coordinates
correctCoordinates(g1, defs, item, options)
profiler('correct')
## decide on a name for this item
baseName = item.__class__.__name__
i = 1
while True:
name = baseName + "_%d" % i
if name not in nodes:
break
i += 1
nodes[name] = g1
g1.setAttribute('id', name)
## If this item clips its children, we need to take care of that.
childGroup = g1 ## add children directly to this node unless we are clipping
if not isinstance(item, QtGui.QGraphicsScene):
## See if this item clips its children
if int(item.flags() & item.ItemClipsChildrenToShape) > 0:
## Generate svg for just the path
path = QtGui.QGraphicsPathItem(item.mapToScene(item.shape()))
item.scene().addItem(path)
try:
pathNode = _generateItemSvg(path, root=root, options=options)[0].getElementsByTagName('path')[0]
# assume <defs> for this path is empty.. possibly problematic.
finally:
item.scene().removeItem(path)
## and for the clipPath element
clip = name + '_clip'
clipNode = g1.ownerDocument.createElement('clipPath')
clipNode.setAttribute('id', clip)
clipNode.appendChild(pathNode)
g1.appendChild(clipNode)
childGroup = g1.ownerDocument.createElement('g')
childGroup.setAttribute('clip-path', 'url(#%s)' % clip)
g1.appendChild(childGroup)
profiler('clipping')
## Add all child items as sub-elements.
childs.sort(key=lambda c: c.zValue())
for ch in childs:
csvg = _generateItemSvg(ch, nodes, root, options=options)
if csvg is None:
continue
cg, cdefs = csvg
childGroup.appendChild(cg) ### this isn't quite right--some items draw below their parent (good enough for now)
defs.extend(cdefs)
profiler('children')
return g1, defs
def correctCoordinates(node, defs, item, options):
# TODO: correct gradient coordinates inside defs
## Remove transformation matrices from <g> tags by applying matrix to coordinates inside.
## Each item is represented by a single top-level group with one or more groups inside.
## Each inner group contains one or more drawing primitives, possibly of different types.
groups = node.getElementsByTagName('g')
## Since we leave text unchanged, groups which combine text and non-text primitives must be split apart.
## (if at some point we start correcting text transforms as well, then it should be safe to remove this)
groups2 = []
|
# -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.keyboardext import VirtualKeyBoardExt
CONFIG = "/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/additions/additions.xml"
class toSearchForPorn(MPScreen):
def __init__(self, session):
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreenCover.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreenCover.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"red" : self.keyRed,
"green" : self.keyGreen,
"yellow" : self.keyYellow
}, -1)
self['title'] = Label("2Search4Porn")
self['name'] = Label("Your Search Requests")
self['ContentTitle'] = Label("Annoyed, typing in your search-words for each Porn-Site again and again?")
self['F1'] = Label(_("Delete"))
self['F2'] = Label(_("Add"))
self['F3'] = Label(_("Edit"))
self.keyLocked = True
self.suchString = ''
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.Searches)
def Searches(self):
self.genreliste = []
self['liste'] = self.ml
if not fileExists(config.mediaportal.watchlistpath.value+"mp_2s4p"):
open(config.mediaportal.watchlistpath.value+"mp_2s4p","w").close()
if fileExists(config.mediaportal.watchlistpath.value+"mp_2s4p"):
fobj = open(config.mediaportal.watchlistpath.value+"mp_2s4p","r")
for line in fobj:
self.genreliste.append((line, None))
fobj.close()
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.keyLocked = False
def SearchAdd(self):
suchString = ""
self.session.openWithCallback(self.SearchAdd1, VirtualKeyBoardExt, title = (_("Enter Search")), text = suchString, is_dialog=True)
def SearchAdd1(self, suchString):
if suchString is not None and suchString != "":
self.genreliste.append((suchString,None))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
def SearchEdit(self):
if len(self.genreliste) > 0:
suchString = self['liste'].getCurrent()[0][0].rstrip()
self.session.openWithCallback(self.SearchEdit1, VirtualKeyBoardExt, title = (_("Enter Search")), text = suchString, is_dialog=True)
def SearchEdit1(self, suchString):
if suchString is not None and suchString != "":
pos = self['liste'].getSelectedIndex()
self.genreliste.pop(pos)
self.genreliste.insert(pos,(suchString,None))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
def SearchCallback(self, suchString):
if suchString is not None and suchString != "":
self.session.open(toSearchForPornBrowse,suchString)
def keyOK(self):
if self.keyLocked:
return
if len(self.genreliste) > 0:
self.SearchCallback(self['liste'].getCurrent()[0][0].rstrip())
def keyRed(self):
if self.keyLocked:
return
if len(self.genreliste) > 0:
self.genreliste.pop(self['liste'].getSelectedIndex())
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
def keyGreen(self):
if self.keyLocked:
return
self.SearchAdd()
def keyYellow(self):
if self.keyLocked:
return
self.SearchEdit()
def keyCancel(self):
if self.keyLocked:
return
self.genreliste.sort(key=lambda t : t[0].lower())
fobj_out = open(config.mediaportal.watchlistpath.value+"mp_2s4p","w")
x = len(self.genreliste)
if x > 0:
for c in range(x):
writeback = self.genreliste[c][0].rstrip()+"\n"
fobj_out.write(writeback)
fobj_out.close()
else:
os.remove(config.mediaportal.watchlistpath.value+"mp_2s4p")
self.close()
class toSearchForPornBrowse(MPScreen):
def __init__(self, session, suchString):
self.suchString = suchString
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("2Search4Porn")
self['ContentTitle'] = Label("Select Site")
self['name'] = Label(_("Selection:"))
self.keyLocked = True
self.pornscreen = None
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadsites)
def loadsites(self):
conf = xml.etree.cElementTree.parse(CO | NFIG)
for x in conf.getroot():
if x.tag == "set" and x.get("name") == 'additions':
root = x
for x in root:
if x.tag == "plugin":
if x.get("type") == "mod":
if x.get("confcat") == "porn" and x.get("search") == "1":
gz = x.get("gz")
if not config.mediaportal.showgrauzone.value and gz == "1":
pass
else:
mod = eval("config.mediaportal." + x.get("confopt") + ".value")
if mod:
exec("self.genreliste.append((\""+x.get("name").replace("&"," | &")+"\", None))")
self.genreliste.sort(key=lambda t : t[0].lower())
self.keyLocked = False
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
def keyOK(self):
if self.keyLocked:
return
auswahl = self['liste'].getCurrent()[0][0]
self.suchString = self.suchString.rstrip()
conf = xml.etree.cElementTree.parse("/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/additions/additions.xml")
for x in conf.getroot():
if x.tag == "set" and x.get("name") == 'additions':
root = x
for x in root:
if x.tag == "plugin":
if x.get("type") == "mod":
if x.get("confcat") == "porn" and x.get("search") == "1":
if auswahl == x.get("name").replace("&","&"):
modfile = x.get("modfile")
modfile = "Plugins.Extensions.MediaPortal.additions.%s.%s" % (modfile.split(".")[0], modfile.split(".")[1])
exec("from "+modfile+" import *")
exec("self.suchString = self.suchString.replace(\" \",\""+x.get("delim")+"\")")
exec("Name = \"2Search4Porn - %s\" % (self.suchString)")
exec("Link = \""+x.get("searchurl").replace("&","&")+"\" % (self.suchString)")
print "Name: "+ Name
print "Link: "+ Link
exec("self.session.open("+x.get("searchscreen")+", Link, Name"+x.get("searchparam").replace(""","\"")+")") |
import webipy
import numpy as np
import matplotlib.pyplot as plt |
import pylab
import pandas as pd
pylab.rcParams['figure.figsize'] = (15, 11)
|
@webipy.exports
def plot(x, n=4):
"""
Demo of scatter plot on a polar axis.
Size increases radially in this example and color increases with angle
"""
N = int(x)
r = 2 * np.random.rand(N)
theta = 2 * np.pi * np.random.rand(N)
area = 200 * r**2 * np.random.rand(N)
colors = theta
ax = plt.subplot(111, polar=True)
ax.scatter(theta, r, c=colors, s=area, cmap=plt.cm.hsv)
@webipy.exports
def sine(x):
"""
simple sine wave with x points
uses mpld3
"""
import mpld3
mpld3.enable_notebook()
X = np.linspace(-np.pi, np.pi, int(x), endpoint=True)
C, S = np.cos(X), np.sin(X)
ax = plt.subplot(111)
ax.plot(X, C)
ax.plot(X, S)
return pd.DataFrame({'X': X, 'sine': S, 'cos': C})
@webipy.exports
def sine1(x):
"""
simple sine wave with x points
uses bokeh
"""
from bokeh import mpl
X = np.linspace(-np.pi, np.pi, int(x), endpoint=True)
C, S = np.cos(X), np.sin(X)
ax = plt.subplot(111)
ax.plot(X, C)
ax.plot(X, S)
# mpl.to_bokeh()
@webipy.exports
def bool_params(non_bool1, non_bool2=3, x=True, y=False):
print "non_bools"
print "non_bool1:", non_bool1, "non_bool2", non_bool2
print "bools"
print "x:", x, "y:", y
|
#!/usr/bin/env python
import os
import sys
PREFIX_DELIMITER = '_'
def enumerate_symbols(symbols_folder_path):
symbols = []
for filename in os.listdir(symbols_folder_path):
parts = os.path.splitext(filename)
if parts[1] == ".svg":
symbols.append(parts[0])
return symbols
def check_symbols(symbols):
numbers = set()
for s in symbols:
pos = s.find(PREFIX_DELIMITER)
n = s[:pos]
if pos < 0 or not n.isdigit():
raise ValueError('Symbol ' + s + ' must have a numeric prefix')
elif int(n) in numbers:
raise ValueError('Symbol ' + s + ' has duplicated numeric prefix')
else:
numbers.add(int(n))
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {0} <path_to_omim/data/styles> [<target_path>]'.format(sys.argv[0]))
sys.exit(-1)
path_to_styles = os.path.join(sys.argv[1], 'clear')
if not os. | path.isdir(path_to_styles):
print('Invalid path to styles folder')
sys.exit(-1)
target_path = ''
if len(sys.argv) >= 3:
target_path = sys.argv[2]
output_name = os.path.join(target_path, 'local_ads_symbols.txt');
if os.path.exists(output_name):
os.remove(output_name)
paths = ['style-clear', 'styl | e-night']
symbols = []
for folder_path in paths:
s = enumerate_symbols(os.path.join(path_to_styles, folder_path, 'symbols-ad'))
if len(symbols) != 0:
symbols.sort()
s.sort()
if symbols != s:
raise ValueError('Different symbols set in folders' + str(paths))
else:
symbols = s
check_symbols(symbols)
with open(output_name, "w") as text_file:
for symbol in symbols:
text_file.write(symbol + '\n')
|
C': 'weight', 'nodeA': 'weight'},
'nodeN': {}}),
]
GRAPHS_ADD_EDGE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
"nodeX",
"nodeY",
{'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeX': {'nodeY': 'weight'},
'nodeY': {}}),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
'nodeA',
'nodeB',
{'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}}),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}},
'nodeB',
'nodeC',
{'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight', 'nodeC': 'weight'},
'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}}),
]
GRAPHS_DEL_NODE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeX': {'nodeY': 'weight'},
'nodeY': {}},
'nodeA',
{'nodeB': {},
'nodeX': {'nodeY': 'weight'},
'nodeY': {}}),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
'nodeB',
{'nodeA': {}}),
]
GRAPHS_DEL_EDGE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeA',
'nodeB',
{'nodeA': {},
'nodeB': {}}),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {},
'nodeC': {}},
'nodeA',
'nodeB',
{'nodeA': {'nodeC': 'weight'},
'nodeB': {},
'nodeC': {}})
]
NEIGHBORS = [
({'nodeA': {},
'nodeB': {'nodeA': 'weight'}},
'nodeB',
['nodeA']),
({'nodeA': {},
'nodeB': {'nodeA': 'weight'}},
'nodeA',
[]),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeC': {'nodeA': 'weight'}},
'nodeA',
['nodeB', 'nodeC']),
]
ADJACENT = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeA',
'nodeB',
True),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeB',
'nodeA',
False),
]
ADJACENT_NODES_GONE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeX', 'nodeB'),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeX', 'nodeY'),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeA', 'nodeY'),
]
NODE_TRAVERSAL_BREADTH = [
({'A': {'B': 'weight', 'C': 'weight'},
'B': {'A': 'weight', 'D': 'weight', 'E': 'weight'},
'C': {'A': 'weight', 'F': 'weight', 'G': 'weight'},
'D': {'B': 'weight', 'H': 'weight'},
'E': {'B': 'weight'},
'F': {'C': 'weight'},
'G': {'C': 'weight'},
'H': {'D': 'weight'}},
'A',
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']),
({'A': {'B': 'weight', 'C': 'weight'},
'B': {'C': 'weight', 'D': 'weight'},
'C': {},
'D': {}},
'A',
['A', 'B', 'C', 'D']),
({'a': {}}, 'a', ['a']),
]
NODE_TRAVERSAL_DEPTH = [
({'A': {'B': 'weight', 'E': 'weight'},
"B": {'C': 'weight', 'D': 'weight'},
'E': | {},
'C': {},
'D': {}},
'A',
['A', 'E', 'B', 'D', 'C']),
({'A': {'B': 'weight', 'E': 'weight'},
"B": {'C': 'weight', | 'D': 'weight'},
'E': {},
'C': {'A': 'weight', 'E': 'weight'},
'D': {}},
'A',
['A', 'E', 'B', 'D', 'C']),
({'a': {'b': 'weight', 'g': 'weight'},
'b': {'c': 'weight'},
'g': {'h': 'weight', 'j': 'weight'},
'c': {'d': 'weight'},
'h': {'i': 'weight'},
'j': {'k': 'weight'},
'd': {'e': 'weight', 'f': 'weight'},
'i': {},
'k': {},
'e': {},
'f': {}},
'a',
['a', 'g', 'j', 'k', 'h', 'i', 'b', 'c', 'd', 'f', 'e']),
({'a': {}}, 'a', ['a']),
]
GET_WEIGHT = [
({'A': {'B': 'weight1', 'E': 'weight2'},
"B": {'C': 'weight3', 'D': 'weight4'},
'E': {},
'C': {},
'D': {}},
'A',
'B',
'weight1',),
({'A': {'B': 'weight1', 'E': 'weight2'},
"B": {'C': 'weight3', 'D': 'weight4'},
'E': {},
'C': {},
'D': {}},
'B',
'C',
'weight3',),
({'A': {'B': 'weight1', 'E': 'weight2'},
"B": {'C': 'weight3', 'D': 'weight4'},
'E': {},
'C': {},
'D': {}},
'B',
'D',
'weight4',),
]
@pytest.fixture
def graph_fixture(scope='function'):
from graph import Graph
return Graph()
@pytest.mark.parametrize(("built_graph", "node", "expected"), GRAPHS_DEL_NODE)
def test_del_node_exists(graph_fixture, built_graph, node, expected):
graph_fixture._container = built_graph
graph_fixture.del_node(node)
assert graph_fixture._container == expected
@pytest.mark.parametrize(("built_graph", "node_list", "edge_list"), GRAPHS)
def test_nodes(graph_fixture, built_graph, node_list, edge_list):
graph_fixture._container = built_graph
result = graph_fixture.nodes()
assert set(result) == set(node_list)
@pytest.mark.parametrize(("built_graph", "node_list", "edge_list"), GRAPHS)
def test_edges(graph_fixture, built_graph, node_list, edge_list):
graph_fixture._container = built_graph
result = graph_fixture.edges()
assert set(edge_list) == set(result)
@pytest.mark.parametrize(("built_graph", "new_node", "expected"),
GRAPHS_FOR_NODE_INSERT)
def test_add_node(graph_fixture, built_graph, new_node, expected):
graph_fixture._container = built_graph
graph_fixture.add_node(new_node)
assert graph_fixture._container == expected
@pytest.mark.parametrize(("built_graph", "n1", "n2", "expected"),
GRAPHS_ADD_EDGE)
def test_add_edge(graph_fixture, built_graph, n1, n2, expected):
graph_fixture._container = built_graph
graph_fixture.add_edge(n1, n2)
assert graph_fixture._container == expected
def test_del_node_not_exists(graph_fixture):
graph_fixture._container = {'nodeA': {'nodeA': 'weight'}, 'nodeB': {}}
with pytest.raises(KeyError):
graph_fixture.del_node('nodeX')
@pytest.mark.parametrize(("built_graph", "node1", "node2", "expected"),
GRAPHS_DEL_EDGE)
def test_del_edge(graph_fixture, built_graph, node1, node2, expected):
graph_fixture._container = built_graph
graph_fixture.del_edge(node1, node2)
assert graph_fixture._container == expected
def test_del_edge_not_exists(graph_fixture):
graph_fixture._container = {'nodeA': {}}
with pytest.raises(ValueError):
graph_fixture.del_edge('nodeA', 'nodeB')
def test_has_node_true(graph_fixture):
graph_fixture._container = {'nodeA': {}}
assert graph_fixture.has_node('nodeA')
def test_has_node_false(graph_fixture):
graph_fixture._container = {'nodeA': {}}
assert not graph_fixture.has_node('nodeB')
@pytest.mark.parametrize(("built_graph", 'node', 'expected'), NEIGHBORS)
def test_neighbors(graph_fixture, built_graph, node, expected):
graph_fixture._container = built_graph
assert set(graph_fixture.neighbors(node)) == set(expected)
def test_neighbors_none(graph_fixture):
graph_fixture._container = {'nodeA': {}}
with pytest.raises(KeyError):
graph_fixture.neighbors('nodeB')
@pytest.mark.parametrize(('built_graph', 'n1', 'n2', 'expected'), ADJACENT)
def test_adjacent(graph_fixture, built_graph, n1, n2, expected):
# if n1, n2 don't exist: raise error
graph_fixture._container = built_graph
assert graph_fixture.adjacent(n1, n2) == expected
@pytest.mark.parametrize(('built_graph', 'n1', 'n2'), ADJACENT_NODES_GONE)
def test_adjacent_not_exists(graph_fixture, built_graph, n1, n2):
# if n1, n2 don't exist: raise error
graph_fixture._container = built_graph
with pytest.raises(KeyError):
graph_fixture.adjacent(n1, n2)
@pytest.mark.parametrize(('built_graph', 'node', 'expected'), NODE_TRAVERSAL_BREADTH)
def test_traverse_breadth(graph_fixture, built_graph, node, expected):
graph_fixture._container = built_graph
assert graph_fixture.breadth_first_traversal(node) == expected
def test_empty_graph_breadth(graph_fixture):
graph_fixture._container = {}
with pytest.raises(IndexError):
graph_fixture.breadth_first_traversal('X' |
s_key(nonce):
msgcount[redundancy][nonce]["s"] += 1
else:
msgcount[redundancy][nonce] = {}
msgcount[redundancy][nonce]["s"] = 1
msgcount[redundancy][nonce]["r"] = 0
msgcount[redundancy][nonce]["rs"] = 0
for logcontent in ['RI','DRD']:
file = open("%(directory)s%(typePrefix)s_da%(da)s_r%(redundancy)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent or line[0:3] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Ho":
nonce = x.split(':')[1]
if(msgcount[redundancy].has_key(nonce)):
msgcount[redundancy][nonce]["r"] += 1
else:
print logcontent, redundancy, nonce
for nonce in msgcount[redundancy]:
msgcount[redundancy][nonce]['rs'] = float(msgcount[redundancy][nonce]['r']) / float(msgcount[redundancy][nonce]['s'])
msg = sorted(msgcount[redundancy].iteritems(), key=lambda s: s[1]['rs'])
for x in range(len(msg)):
ratiotemp[msg[x][1]["rs"]] = float(x+1) / len(msg);
ratio[redundancy] = sorted(ratiotemp.iteritems())
ratiotemp.clear()
length = max(len(ratio[1]),len(ratio[2]),len(ratio[3]),len(ratio[4]),len(ratio[5]))
for j in range(length):
for i in [1,2,3,4,5]:
if(len(ratio[i])<=j):
writefile.write("null null")
else:
writefile.write(str(ratio[i][j][0])+" "+str(ratio[i][j][1])+ " ")
writefile.write("\n")
def RecvToSendRatioHopnonce(typePrefix, directory):
writefile = open("%(typePrefix)s-rsratio-hopnonce.data"%vars())
writefile.write("#Suggest Filename: %(typePrefix)s-rsratio.data\n#Data for drawing each package in different Amount/Redundancy\n"%vars())
writefile.write("#MPM100 ratio MPM200 ratio MPM300 ratio MPM400 ratio MPM500 ratio MPM600 ratio NoLimit ratio\n")
writefile.write("0 0 0 0 0 0 0 0 0 0 0 0 0 0\n")
backofftime = 2.5 # may subject to change by the data amount wanted to observe
msgcount = {}
ratiotemp = {}
ratio = {}
for mms in [100,200,300,400,500,600,-1]:
msgcount[mms] = {}
for logcontent in ['SI','SD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Ho":
nonce = x.split(':')[1]
if msgcount[mms].has_key(nonce):
msgcount[mms][nonce]["s"] += 1
else:
msgcount[mms][nonce] = {}
msgcount[mms][nonce]["s"] = 1
msgcount[mms][nonce]["r"] = 0
msgcount[mms][nonce]["rs"] = 0
for logcontent in ['RI','DRD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent or line[0:3] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Ho":
nonce = x.split(':')[1]
if(msgcount[mms].has_key(nonce)):
msgcount[mms][nonce]["r"] += 1
else:
print logcontent, mms, nonce
for nonce in msgcount[mms]:
msgcount[mms][nonce]['rs'] = float(msgcount[mms][nonce]['r']) / float(msgcount[mms][nonce]['s'])
msg = sorted(msgcount[mms].iteritems(), key=lambda s: s[1]['rs'])
for x in range(len(msg)):
ratiotemp[msg[x][1]["rs"]] = float(x+1) / len(msg);
ratio[mms] = sorted(ratiotemp.iteritems())
ratiotemp.clear()
length = max(len(ratio[100]),len(ratio[200]),len(ratio[300]),len(ratio[400]),len(ratio[500]),len(ratio[-1]))
for j in range(length):
for i in [100,200,300,400,500,600,-1]:
if(len(ratio[i])<=j):
writefile.write("null null")
else:
writefile.write(str(ratio[i][j][0])+" "+str(ratio[i][j][1]))
writefile.write("\n")
#Get recall and latency
def RecallAndLatency(typePrefix, directory):
recallf = open("./%(typePrefix)s-recall.data"%vars(), "w")
latencyf = open("./%(typePrefix)s-latency.data"%vars(), "w")
recallf.write("#Data for recall of the %(typePrefix)s\n"%vars())
latencyf.write("#Data for latency of the %(typePrefix)s\n"%vars())
recallf.write("# row: max_backoff(0 0.5 1 1.5 2 2.5 3)\n")
recallf.write("# col: max_message_size(-1, 200, 400, 600, 800, 1000)\n")
recallf.write("#MaxBackoff No Limits 100 200 300 400 500\n")
latencyf.write("# row: max_backoff(0 0.5 1 1.5 2 2.5 3)\n")
latencyf.write("# col: max_message_size(-1, 200, 400, 600, 800, 1000)\n")
latencyf.write("#MaxBackoff No Limits 100 200 300 400 500\n")
for amount in [10000,20000,30000,40000,50000]:
recallf.write(str(amount)+" ")
latencyf.write(str(amount)+" ")
for redundancy in [1,2,3,4,5]:
file = open("%(directory)s%(typePrefix)s_da%(amount)s_r%(redundancy)s_0.data"%vars())
line = file.readlines()[-1].split()
recallf.write(str(float(line[1])/amount)+" ")
latencyf.write(line[0]+" ")
file.close()
recallf.write("\n")
latencyf.write("\n")
recallf.close()
latencyf.close()
# os.system("gnuplot collision-avoidance-recall.gp")
# os.system("gnuplot collision-avoidance-latency.gp")
def RSRHeatmap(typePrefix, directory):
amount = 50000
redundancy = 1
sendList = []
recvList = []
ratiolist = []
for i in xrange(100):
sendList.append([])
recvList.append([])
ratiolist.append(0)
for logcontent in ['SI','SD']:
file = open("%(directory)s%(typePrefix)s_da%(amount)s_r%(redundancy)s_%(logcontent)s.data"%vars())
for line in file:
if(line[0:2] == logcontent):
info = line.split(" ")
hopnonce = 0
for x in info:
if x[0:2] == "Ho":
hopnonce = int(x.split(":")[1])
if hopnonce != 0:
sendList[int(info[1])].append(hopnonce)
file.close()
for logcontent in ['RI','DRD']:
file = open("%(directory)s%(typePrefix)s_da%(amount)s_r%(re | dundancy)s_%(logcontent)s.data"%vars())
for line in file:
if(line[0:2] == logcontent or line[0:3] == logcontent):
info = line.split(" ")
hopnonce = 0
for x in info:
if x[0:2] == "Ho":
hopnonce = int(x.split(":")[1])
if hopnonce != 0:
recvList[int(info[1])].append(hopnonce) |
file.close()
for i in xrange(100):
for x in sendList[i]:
recv = 0
for ki in [-11,-10,-9,-1,1,9,10,11]:
if (i+ki >99 or i+ki<0):
continue
elif(i %10 == 0 and (ki == -1 or ki == -11 or ki == 9)):
continue
elif(i % 10 == 9 and (ki == 1 or ki == 11 or ki == -9)):
continue
recv += recvList[i+ki].count(x)
ratiolist[i] += recv
ratiolist[i] /= float(len(sendList[i]))
writefile = open("./ |
"""
Routines to compute RMSD of all PROT_IND_ files
These routines were developed by:
Rodrigo Antonio Faccioli - rodrigo.faccioli@usp.br / rodrigo.faccioli@gmail.com
Leandro Oliveira Bortot - leandro.bortot@usp.br / leandro.obt@gmail.com
"""
import os
import sys
from collections import OrderedDict
native = "1VII.pdb"
path_gromacs ="/home/faccioli/Programs/gmx-4.6.5/no_mpi/bin/"
main_command = "ech | o C-alpha C-alpha | @PATH_GROMACS@./g_rms -f @PROT@ -s @NATIVE@ -o temporary_rmsd.xvg 2>/dev/null"
""" This function obtains all pdb files
in mypath
"""
def get_PROT_IND_files_pdb(mypath):
only_pdb_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
#if file.endswith(".pdb"):
if file.find("PROT_IND_") >=0:
f_path = os.path.join(root,file | )
only_pdb_file.append(f_path)
return only_pdb_file
def main():
pdb_path = sys.argv[1]
dict_rmsd = {}
all_pdbs = get_PROT_IND_files_pdb(pdb_path)
for pdb in all_pdbs:
aux_command = main_command.replace("@PATH_GROMACS@", path_gromacs).replace("@PROT@",pdb).replace("@NATIVE@", native)
os.system(aux_command)
temp_rmsd = open("temporary_rmsd.xvg", "r")
for line in temp_rmsd.readlines():
if line.find("@") < 0 and line.find("#") <0:
rmsd_value = float(str(line).split()[1])
only_pdb_file_name = os.path.basename(pdb)
dict_rmsd[only_pdb_file_name] = rmsd_value
temp_rmsd.close()
os.remove("temporary_rmsd.xvg")
#Saving dictionary
rmsd_final = open("all_rmsd.txt", "w")
d_sorted_by_value = OrderedDict(sorted(dict_rmsd.items(), key=lambda x: x[1]))
for key, value in d_sorted_by_value.items():
rmsd_final.write(str(key) +"\t" + str(value) + "\n")
rmsd_final.close()
main() |
="transfer_mode", state="copy_to_scratch", type="input")
else:
# determine if the file is a root file according to its name
rootFile = self.isRootFileName(lfn)
if prodDBlockToken == 'local' or not rootFile:
directIn = False
tolog("Direct access mode has been switched off for this file (will be transferred with the copy tool)")
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input")
elif rootFile:
tolog("Found root file according to file name: %s (will not be transferred in direct reading mode)" % (lfn))
report['relativeStart'] = None
report['transferStart'] = None
self.__sendReport('FOUND_ROOT', report)
if useFileStager:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="file_stager", type="input")
else:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="remote_io", type="input")
return error.ERR_DIRECTIOFILE, pilotErrorDiag
else:
tolog("Normal file transfer")
# Now need to find the service class associated with the file.
# If we find a clear indication of a space token in the file path
# then this is easy. However, if we don't, then use stager_qry to
# interrogate each possible service class. If this fails then use
# atlasFarm in desperation.
serviceClass = None
for pathMatch, svcClass in castorConfig['svcClassMap'].iteritems():
if loc_pfn.find(pathMatch) >= 0:
tolog('Matched path element %s - service class is %s' % (pathMatch, svcClass))
serviceClass = svcClass
break
else:
tolog('Path element %s for service class %s - no match' % (pathMatch, svcClass))
# For testing the fallback, then we need to hobble ourselves by unsetting serviceClass:
#tolog('Automatic service class was: %s' % serviceClass)
#tolog('Unsetting service class for fallback testing')
#serviceClass = None
if serviceClass == None:
tolog("Warning: Failed to find service class hint in SURL.")
for tryMe in castorConfig['svcClassList']:
os.environ['STAGE_SVCCLASS'] = tryMe
tolog('Trying service class %s for file' % tryMe)
err, output = commands.getstatusoutput('stager_qry -M %s' % loc_pfn)
if err != 0:
tolog('WARNING: Unexpected status from stager_qry: %d\n%s' % (err, output))
else:
if output.find('STAGED') >= 0:
tolog('Found file in service class %s' % tryMe)
serviceClass = tryMe
break
else:
tolog('File not found in service class %s' % tryMe)
if serviceClass == None:
tolog('WARNING: Failed to find file in any expected service class - will set STAGE_SVCCLASS to %s' % castorConfig['svcClassDefault'])
serviceClass = castorConfig['svcClassDefault']
tolog('Setting STAGE_SVCCLASS to %s' % serviceClass)
os.environ['STAGE_SVCCLASS'] = serviceClass
dest_path = os.path.join(path, lfn)
_cmd_str = '%s/usr/bin/rfcp %s %s' % (_setup_str, loc_pfn, dest_path)
tolog("Executing command: %s" % (_cmd_str))
report['transferStart'] = time()
# execute
timeout = 3600
try:
s, telapsed, cout, cerr = timed_command(_cmd_str, timeout)
except Exception, e:
pilotErrorDiag = 'timed_command() threw an exception: %s' % (e)
tolog("!!WARNING!!1111!! %s" % (pilotErrorDiag))
s = 1
o = str(e)
telapsed = timeout
else:
# improve output parsing, keep stderr and stdout separate
o = cout + cerr
tolog("Elapsed time: %d" % (telapsed))
report['validateStart'] = time()
if s != 0:
o = o.replace('\n', ' ')
pilotErrorDiag = "rfcp failed: %d, %s" % (s, o)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
check_syserr(s, o)
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_path)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
ec = error.ERR_STAGEINFAILED
if o.find("No such file or directory") >= 0:
if loc_pfn.find("DBRelease") >= 0:
pilotErrorDiag = "Missing DBRelease file: %s" % (loc_pfn)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
ec = error.ERR_MISSDBREL
else:
pilotErrorDiag = "No such file or directory: %s" % (loc_pfn)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
ec = error.ERR_NOSUCHFILE
self.__sendReport('RFCP_FAIL', report)
elif is_timeout(s):
pilotErrorDiag = "rfcp get was timed out after %d seconds" % (telapsed)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
self.__sendReport('GET_TIMEOUT', report)
ec = error.ERR_GETTIMEOUT
return ec, pilotErrorDiag
else:
tolog("Copy command finished")
if fsize == 0:
try:
fsize = str(os.path.getsize(loc_pfn))
except OSError, e:
pilotErrorDiag | = "Could not get file size: %s" % str(e)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
self.__sendReport('FS_FAIL', report)
# remove the local file before any get retry is attempted
_status = self.rem | oveLocal(dest_path)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
return error.ERR_FAILEDSIZELOCAL, pilotErrorDiag
loc_filename = lfn
dest_file = os.path.join(path, loc_filename)
# get the checksum type (md5sum or adler32)
if fchecksum != 0 and fchecksum != "":
csumtype = self.getChecksumType(fchecksum)
else:
csumtype = "default"
# get remote file size and checksum
ec, pilotErrorDiag, dstfsize, dstfchecksum = self.getLocalFileInfo(dest_file, csumtype=csumtype)
if ec != 0:
self.__sendReport('LOCAL_FILE_INFO_FAIL', report)
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_path)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
return ec, pilotErrorDiag
# get remote file size and checksum
if dstfsize != fsize:
pilotErrorDiag = "Remote and local file sizes do not match for %s (%s != %s)" %\
(os.path.basename(gpfn), str(dstfsize), str(fsize))
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
self.__sendReport('FS_MISMATCH', report)
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_path)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
return error.ERR_GETWRONGSIZE, pilotErrorDiag
# compare remote and local file checksum
if fchecksum != 0 and dstfchecksum != fchecksum and not self.isDummyChecksum(fchecksum):
pilotErrorDiag = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" %\
(csumtype, os.path.basename(gpfn), dstfchecksum, fchecksum)
tolog( |
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
import json
from intranet.models import User, Project, Part, STATE_CREATED
class Test(TestCase):
@classmethod
def setUpClass(self):
self.c = Client()
User.objects.all().delete()
#Create users
user = User(
username = 'user1',
first_name = 'first',
last_name = 'last',
email = 'user@test.es'
)
user.set_password('dummy')
user.save()
self.user = user
#LOGIN
#response = self.c.post(reverse('auth-login'), {'username': self.user.username, 'password':'dummy'})
#self.assertEqual(response.status_code,200)
#json_response = json.loads(response.content)
#self.assertEqual(json_response['valid'], True)
#self.token_auth = json_response['token_auth']
self.project = Project(
name = 'project 1',
description = 'description project 1',
)
self.project.save()
self.part = Part(
month = 06,
year = 2011,
employee = self.user,
state = 1,
)
self.part.save()
#self.imputation = Imputation(
# part = self.part,
# day = 13,
# hours = 5,
# project = self.project,
#)
#self.imputation.save()
def test_login_logout_ok(self):
self.c = Client()
response = self.c.post(reverse('auth-login'), {'username': self.user.username, 'password':'dummy'})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
token_auth = json_response['token_auth']
self.c = Client()
response = self.c.get(reverse('auth-logout'), {'token_auth': token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
def test_logout_invalid(self):
self.c = Client()
response = self.c.get(reverse('api:logout'))
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], False)
def test_project_list(self):
self.c = Client()
response = self.c.get(reverse('api:project-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['projects']), 1)
def test_part_list(self):
self.c = Client()
response = self.c.get(reverse('api:part-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['parts']), 1)
def test_imputation_list(self):
self.c = Client()
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 1)
def test_imputation_create(self):
self.c = Client()
response = self.c.post(reverse('api:imputation-add'), {'project': self.project.id, 'day':3, 'hours':5, 'part':self.part.id, 'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
self.assertEqual(json_response.has_key('id'), True)
id_imp = json_response['id']
#Invalid part
response = self.c.post(reverse('api:imputation-add'), {'project': self.project.id, 'day':3, 'hours':5, 'part':222, 'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], False)
#Invalid day
response = self.c.post(reverse('api:imputation-add'), {'token_auth': self.token_auth, 'day':33, 'part':self.part.id, 'project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_r | esponse['valid'], False)
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth, 'day':3, 'part':self.part.id, 'project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 1)
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth, 'day':1, 'part':self.part.id, ' | project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 0)
#Delete
response = self.c.get(reverse('api:imputation-delete', args=[id_imp]), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth, 'day':3, 'part':self.part.id, 'project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 0)
def test_part_creation(self):
self.c = Client()
response = self.c.post(reverse('api:part-add'), {'month': 3, 'year':2008, 'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
self.assertEqual(json_response.has_key('id'), True)
id_part = json_response['id']
response = self.c.get(reverse('api:part-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['parts']), 2)
parts = json_response['parts']
for part in parts:
if part['id'] == id_part:
self.assertEqual(part['state'], STATE_CREATED)
response = self.c.get(reverse('api:part-delete', args=[id_part]), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
response = self.c.get(reverse('api:part-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['parts']), 1)
|
# Voitto - a simple yet efficient double ledger bookkeeping system
# Copyright (C) 2010 Santtu Pajukanta <santtu@pajukanta.fi>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
A Tappio ledger contains an account tree and a list of events. These are
encapsulated in the Document class.
Relationships between the classes in pseudo-UML:
Document 1 --> * Account
Document 1 --> * Event 1 --> * Entry 1 --> Account
"""
from datetime import date
import voitto
DEFAULT_IDENTITY = "Tappio"
DEFAULT_VERSION = "Voitto " + voitto.__version__
DEFAULT_BEGIN = date(2010, 1, 1)
DEFAULT_END = date(2010, 12, 31)
DEFAULT_INDENT = " "
class Document(object):
"""
Encapsulates a Tappio ledger.
A note about Document.accounts:
In Tappio, accounts are represented as forest of three trees. The
meanings of these trees are associated with the Finnish accounting
system. The first tree is always "vastaavaa" ("assets"), the second
is "vastattavaa" ("liabilities") and the third is "earnings" ("tulos").
"""
def __init__(self, identity=DEFAULT_IDENTITY, version=DEFAULT_VERSION,
name="", begin | =DEFAULT_BEGIN, end=DEFAULT_END, accounts=None,
events=None):
self.identity = identity
self.version = version
self.name = name
self.begin = begin
self.end = end
self.accounts = accounts if accounts is not None else []
self.events = events if events is not None else []
class Account(object | ):
def __init__(self, number=None, name="", subaccounts=None, vat_type=None, vat_percent=None):
self.number = number
self.name = name
self.vat_type = vat_type
self.vat_percent = vat_percent
self.subaccounts = subaccounts if subaccounts is not None else []
class Event(object):
def __init__(self, number, date, description="", entries=None):
self.number = number
self.date = date
self.description = description
self.entries = entries if entries is not None else []
class Entry(object):
def __init__(self, account_number, cents):
self.account_number = account_number
self.cents = cents
|
': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mrn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'proband': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Person']", 'through': "orm['samples.Relation']", 'symmetrical': 'False'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'samples.project': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'Project', 'db_table': "'project'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'samples.relation': {
'Meta': {'ordering': "('person', '-generation')", 'object_name': 'Relation', 'db_table': "'relation'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'generation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'family'", 'to': "orm['samples.Person']"}),
'relative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relative_of'", 'to': "orm['samples.Person']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'samples.result': {
'Meta': {'unique_together': "(('sample', 'variant'),)", 'object_name': 'Result', 'db_table': "'sample_result'"},
'base_counts': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'baseq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'coverage_alt': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'coverage_ref': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'downsampling': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fisher_strand': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genotype']", 'null': 'True', 'blank': 'True'}),
'genotype_quality': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'haplotype_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'homopolymer_run': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_dbsnp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mq': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq0': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phred_scaled_likelihood': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'quality': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'quality_by_depth': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'raw_read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'read_pos_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sample': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['samples.Sample']"}),
'spanning_deletions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'strand_bias': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'samples.sample': {
'Meta': {'ordering': "('project', 'batch', 'label')", 'unique_together': "(('batch', 'name', 'version'),)", 'object_name': 'Sample', 'db_table': "'sample'"},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Batch']"}),
'bio_sample': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}) | ,
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True' | }),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'samples'", 'null': 'True', 'to': "orm['samples.Person']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Project']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.IntegerField', [], {})
},
'variants.variant': {
'Meta': {'unique_together': "(('chr', 'pos', 'ref', 'alt'),)", 'object_name': 'Variant', 'db_table': "'variant'"},
'alt': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'db_table': "'variant_pubmed'", 'symmetrical': 'False'}),
'chr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Chromosome']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'liftover': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), |
"""
Django settings for quixotic_webapp project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# DB parameters in this file
from . import parameters
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yh3$*6egtz79m@0(g!0txzr2rt2#xg852ne9cre&a3=twv#oc('
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = False
DEBUG = True
ALLOWED_HOSTS = [
u'ec2-54-173-30-19.compute-1.amazonaws.com',
u'54.173.30.19',
]
# Application definition
INSTALLED_APPS = [
'quixotic_api.apps.QuixoticApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'quixotic_webapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_process | ors.debug',
'django.template.context_processors.request',
| 'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'quixotic_webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': parameters.DB_NAME,
'USER': parameters.DB_USER,
'PASSWORD': parameters.DB_PASSWORD,
'HOST': parameters.DB_HOST,
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_WHITELIST = (
'localhost:8888'
)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from typing import Any, Optional
from pendulum import DateTime
from airflow.timetables.base import DagRunInfo, TimeRestriction, Timetable
from airflow.timetables.schedules import CronSchedule, Delta, DeltaSchedule, Schedule
class _DataIntervalTimetable(Timetable):
"""Basis for timetable implementations that schedule data intervals.
This kind of timetable classes create periodic data inte | rvals from an
underlying schedule representation (e.g. a cron expression, or a timedelta
instance), and schedule a DagRun at the end of each interval.
"""
| _schedule: Schedule
def __eq__(self, other: Any) -> bool:
"""Delegate to the schedule."""
if not isinstance(other, _DataIntervalTimetable):
return NotImplemented
return self._schedule == other._schedule
def validate(self) -> None:
self._schedule.validate()
def next_dagrun_info(
self,
last_automated_dagrun: Optional[DateTime],
restriction: TimeRestriction,
) -> Optional[DagRunInfo]:
earliest = restriction.earliest
if not restriction.catchup:
earliest = self._schedule.skip_to_latest(earliest)
if last_automated_dagrun is None:
# First run; schedule the run at the first available time matching
# the schedule, and retrospectively create a data interval for it.
if earliest is None:
return None
start = self._schedule.align(earliest)
else:
# There's a previous run. Create a data interval starting from when
# the end of the previous interval.
start = self._schedule.get_next(last_automated_dagrun)
if restriction.latest is not None and start > restriction.latest:
return None
end = self._schedule.get_next(start)
return DagRunInfo.interval(start=start, end=end)
class CronDataIntervalTimetable(_DataIntervalTimetable):
"""Timetable that schedules data intervals with a cron expression.
This corresponds to ``schedule_interval=<cron>``, where ``<cron>`` is either
a five/six-segment representation, or one of ``cron_presets``.
Don't pass ``@once`` in here; use ``OnceTimetable`` instead.
"""
def __init__(self, cron: str, timezone: datetime.tzinfo) -> None:
self._schedule = CronSchedule(cron, timezone)
class DeltaDataIntervalTimetable(_DataIntervalTimetable):
"""Timetable that schedules data intervals with a time delta.
This corresponds to ``schedule_interval=<delta>``, where ``<delta>`` is
either a ``datetime.timedelta`` or ``dateutil.relativedelta.relativedelta``
instance.
"""
def __init__(self, delta: Delta) -> None:
self._schedule = DeltaSchedule(delta)
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
import time
import pytest
import os
import telebot
from telebot import types
from telebot import util
should_skip = 'TOKEN' and 'CHAT_ID' not in os.environ
if not should_skip:
TOKEN = os.environ['TOKEN']
CHAT_ID = os.environ['CHAT_ID']
@pytest.mark.skipif(should_skip, reason="No environment variables configured")
class TestTeleBot:
def test_message_listener(self):
msg_list = []
for x in range(100):
msg_list.append(self.create_text_message('Message ' + str(x)))
def listener(messages):
assert len(messages) == 100
tb = telebot.TeleBot('')
tb.set_update_listener(listener)
def test_message_handler(self):
tb = telebot.TeleBot('')
msg = self.create_text_message('/help')
@tb.message_handler(commands=['help', 'start'])
def command_handler(message):
message.text = 'got'
tb.process_new_messages([msg])
time.sleep(1)
assert msg.text == 'got'
def test_message_handler_reg(self):
bot = telebot.TeleBot('')
msg = self.create_text_message(r'https://web.telegram.org/')
@bot.message_handler(regexp='((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)')
def command_url(message):
msg.text = 'got'
bot.process_new_messages([msg])
time.sleep(1)
assert msg.text == 'got'
def test_message_handler_lambda(self):
bot = telebot.TeleBot('')
msg = self.create_text_message(r'lambda_text')
@bot.message_handler(func=lambda message: r'lambda' in message.text)
def command_url(message):
msg.text = 'got'
bot.process_new_messages([msg])
time.sleep(1)
assert msg.text == 'got'
def test_message_handler_lambda_fail(self):
bot = telebot.TeleBot('')
msg = self.create_text_message(r'text')
@bot.message_handler(func=lambda message: r'lambda' in message.text)
def command_url(message):
msg.text = 'got'
bot.process_new_messages([msg])
time.sleep(1)
assert not msg.text == 'got'
def test_message_handler_reg_fail(self):
bot = telebot.TeleBot('')
msg = self.create_text_message(r'web.telegram.org/')
@bot.message_handler(regexp='((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)')
def command_url(message):
msg.text = 'got'
bot.process_new_messages([msg])
time.sleep(1)
assert not msg.text == 'got'
def test_send_message_with_markdown(self):
tb = telebot.TeleBot(TOKEN)
markdown = """
*bold text*
_italic text_
[text](URL)
"""
ret_msg = tb.send_message(CHAT_ID, markdown, parse_mode="Markdown")
assert ret_msg.message_id
def test_send_file(self):
file_data = open('../examples/detailed_example/kitten.jpg', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_document(CHAT_ID, file_data)
assert ret_msg.message_id
ret_msg = tb.send_document(CHAT_ID, ret_msg.document.file_id)
assert ret_msg.message_id
def test_send_video(self):
file_data = open('./test_data/test_video.mp4', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = t | b.send_video(CHAT_ID, file_data)
assert ret_msg.message_id
def test_send_video_more_params(self):
file_data = open('./test_data/test_video.mp4', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_video(CHAT_ID, file_data, 1)
assert ret_msg.message_id
def test_send_file_exception(self):
tb = telebot.TeleBot(TOKEN)
try:
| tb.send_document(CHAT_ID, None)
assert False
except Exception as e:
print(e)
assert True
def test_send_photo(self):
file_data = open('../examples/detailed_example/kitten.jpg', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_photo(CHAT_ID, file_data)
assert ret_msg.message_id
ret_msg = tb.send_photo(CHAT_ID, ret_msg.photo[0].file_id)
assert ret_msg.message_id
def test_send_audio(self):
file_data = open('./test_data/record.mp3', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_audio(CHAT_ID, file_data, 1, 'eternnoir', 'pyTelegram')
assert ret_msg.content_type == 'audio'
assert ret_msg.audio.performer == 'eternnoir'
assert ret_msg.audio.title == 'pyTelegram'
def test_send_voice(self):
file_data = open('./test_data/record.ogg', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_voice(CHAT_ID, file_data)
assert ret_msg.voice.mime_type == 'audio/ogg'
def test_get_file(self):
file_data = open('./test_data/record.ogg', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_voice(CHAT_ID, file_data)
file_id = ret_msg.voice.file_id
file_info = tb.get_file(file_id)
assert file_info.file_id == file_id
def test_send_message(self):
text = 'CI Test Message'
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_message(CHAT_ID, text)
assert ret_msg.message_id
def test_forward_message(self):
text = 'CI forward_message Test Message'
tb = telebot.TeleBot(TOKEN)
msg = tb.send_message(CHAT_ID, text)
ret_msg = tb.forward_message(CHAT_ID, CHAT_ID, msg.message_id)
assert ret_msg.forward_from
def test_reply_to(self):
text = 'CI reply_to Test Message'
tb = telebot.TeleBot(TOKEN)
msg = tb.send_message(CHAT_ID, text)
ret_msg = tb.reply_to(msg, text + ' REPLY')
assert ret_msg.reply_to_message.message_id == msg.message_id
def test_register_for_reply(self):
text = 'CI reply_to Test Message'
tb = telebot.TeleBot(TOKEN)
msg = tb.send_message(CHAT_ID, text, reply_markup=types.ForceReply())
reply_msg = tb.reply_to(msg, text + ' REPLY')
def process_reply(message):
assert msg.message_id == message.reply_to_message.message_id
tb.register_for_reply(msg, process_reply)
tb.process_new_messages([reply_msg])
def test_send_location(self):
tb = telebot.TeleBot(TOKEN)
lat = 26.3875591
lon = -161.2901042
ret_msg = tb.send_location(CHAT_ID, lat, lon)
assert int(ret_msg.location.longitude) == int(lon)
assert int(ret_msg.location.latitude) == int(lat)
def create_text_message(self, text):
params = {'text': text}
chat = types.User(11, 'test')
return types.Message(1, None, None, chat, 'text', params)
def test_is_string_unicode(self):
s1 = u'string'
assert util.is_string(s1)
def test_is_string_string(self):
s1 = 'string'
assert util.is_string(s1)
def test_not_string(self):
i1 = 10
assert not util.is_string(i1)
|
# Copy | right (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import random
from tests.ggrc import TestCase
from freezegun import freeze_time
from datetime import datetime
from mock import patch
import os
from | ggrc import notification
from ggrc.models import NotificationConfig, Notification, Person
from tests.ggrc_workflows.generator import WorkflowsGenerator
from tests.ggrc.api_helper import Api
from tests.ggrc.generator import GgrcGenerator
if os.environ.get('TRAVIS', False):
random.seed(1) # so we can reproduce the tests if needed
class TestEnableAndDisableNotifications(TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.wf_generator = WorkflowsGenerator()
self.ggrc_generator = GgrcGenerator()
Notification.query.delete()
self.random_objects = self.ggrc_generator.generate_random_objects(2)
_, self.user = self.ggrc_generator.generate_person(user_role="gGRC Admin")
self.create_test_cases()
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
@patch("ggrc.notification.email.send_email")
def test_default_notificaitons_settings(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-01 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(user.email, notif_data)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_disabled_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", False)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-01 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(user.email, notif_data)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_enabled_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
with freeze_time("2015-01-29 13:39:20"):
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", True)
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_forced_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf_forced)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", True)
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_force_one_wf_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf_forced = self.wf_generator.generate_workflow(self.quarterly_wf_forced)
response, wf_forced = self.wf_generator.activate_workflow(wf_forced)
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("cycle_starts_in", notif_data[user.email])
self.assertIn(wf_forced.id, notif_data[user.email]["cycle_starts_in"])
self.assertIn(wf.id, notif_data[user.email]["cycle_starts_in"])
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", False)
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("cycle_starts_in", notif_data[user.email])
self.assertIn(wf_forced.id, notif_data[user.email]["cycle_starts_in"])
self.assertNotIn(wf.id, notif_data[user.email]["cycle_starts_in"])
def create_test_cases(self):
def person_dict(person_id):
return {
"href": "/api/people/%d" % person_id,
"id": person_id,
"type": "Person"
}
self.quarterly_wf_forced = {
"title": "quarterly wf forced notification",
"notify_on_change": True,
"description": "",
"owners": [person_dict(self.user.id)],
"frequency": "quarterly",
"task_groups": [{
"title": "tg_1",
"contact": person_dict(self.user.id),
"task_group_tasks": [{
"contact": person_dict(self.user.id),
"description": self.wf_generator.random_str(100),
"relative_start_day": 5,
"relative_start_month": 2,
"relative_end_day": 25,
"relative_end_month": 2,
},
],
},
]
}
self.quarterly_wf = {
"title": "quarterly wf 1",
"description": "",
"owners": [person_dict(self.user.id)],
"frequency": "quarterly",
"task_groups": [{
"title": "tg_1",
"contact": person_dict(self.user.id),
"task_group_tasks": [{
"contact": person_dict(self.user.id),
"description": self.wf_generator.random_str(100),
"relative_start_day": 5,
"relative_start_month": 2,
"relative_end_day": 25,
"relative_end_month": 2,
},
],
},
]
}
|
ck to location only for event {}".format(event.key.id()))
geocode_result = cls.google_maps_geocode_async(event.location).get_result()
if geocode_result:
location_info = cls.construct_location_info_async(geocode_result[0]).get_result()
else:
logging.warning("Event {} location failed!".format(event.key.id()))
# Update event
event.normalized_location = cls.build_normalized_location(location_info)
@classmethod
def get_event_location_info(cls, event):
"""
Search for different combinations of venue, venue_address, city,
state_prov, postalcode, and country in attempt to find the correct
location associated with the event.
"""
# Possible queries for location that will match yield results
if event.venue_address:
possible_queries = [event.venue_address.replace('\n', ' ')]
else:
possible_queries = []
if event.venue:
possible_queries.append(event.venue)
if event.venue_address:
split_address = event.venue_address.split('\n')
# Venue takes up at most 2 lines. Isolate address
possible_queries.append(' '.join(split_address[1:]))
possible_queries.append(' '.join(split_address[2:]))
# Geocode for lat/lng
lat_lng = cls.get_lat_lng(event.location)
if not lat_lng:
return {}, 0
# Try to find place based on possible queries
best_score = 0
best_location_info = {}
nearbysearch_results_candidates = [] # More trustworthy candidates are added first
for j, query in enumerate(possible_queries):
# Try both searches
nearbysearch_places = cls.google_maps_placesearch_async(query, lat_lng)
textsearch_places = cls.google_maps_placesearch_async(query, lat_lng, textsearch=True)
for results_future in [nearbysearch_places, textsearch_places]:
for i, place in enumerate(results_future.get_result()[:5]):
location_info = cls.construct_location_info_async(place).get_result()
score = cls.compute_event_location_score(query, location_info, lat_lng)
score *= pow(0.7, j) * pow(0.7, i) # discount by ranking
if score == 1:
return location_info, score
elif score > best_score:
best_location_info = location_info
best_score = score
return best_location_info, best_score
@classmethod
def compute_event_location_score(cls, query_name, location_info, lat_lng):
"""
Score for correctness. 1.0 is perfect.
Not checking for absolute equality in case of existing data errors.
"""
# TODO FIX: Hacky special case for weird event. 2017-01-18 -fangeugene
if 'Shenzhen' in query_name and location_info['name'] != 'Shenzhen University Town Sports Center':
return 0
# Check radius
R = 6373.0 # approximate radius of earth in km
lat1 = math.radians(lat_lng[0])
lon1 = math.radians(lat_lng[1])
lat2 = math.radians(location_info['lat'])
lon2 = math.radians(location_info['lng'])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
if distance > 100:
return 0
if {'point_of_interest', 'premise'}.intersection(set(location_info.get('types', ''))):
score = pow(max(
cls.get_similarity(query_name, location_info['name']),
cls.get_similarity(query_name, location_info['fo | rmatted_address'])), 1.0/3)
else:
score = 0
return score
@classmethod
def update_team_location(cls, team):
if not team.location:
return
# # Try with and without textsearch, pick best
# location_info, score = cls.get_team_location_info(team)
# if score < 0.7:
| # logging.warning("Using textsearch for {}".format(team.key.id()))
# location_info2, score2 = cls.get_team_location_info(team, textsearch=True)
# if score2 > score:
# location_info = location_info2
# score = score2
# # Log performance
# text = "Team {} location score: {}".format(team.key.id(), score)
# if score < 0.8:
# logging.warning(text)
# else:
# logging.info(text)
# # Don't trust anything below a certain threshold Super strict for now.
# if score < 0.9:
# logging.warning("Location score too low for team {}".format(team.key.id()))
# location_info = {}
location_info = {} # Force imprecise locations
# Fallback to location only
if not location_info:
# logging.warning("Falling back to location only for team {}".format(team.key.id()))
geocode_result = cls.google_maps_geocode_async(team.location).get_result()
if geocode_result:
location_info = cls.construct_location_info_async(geocode_result[0], auto_fill=False).get_result()
# Fallback to city, country
if not location_info:
logging.warning("Falling back to city/country only for team {}".format(team.key.id()))
city_country = u'{} {}'.format(
team.city if team.city else '',
team.country if team.country else '')
geocode_result = cls.google_maps_geocode_async(city_country).get_result()
if geocode_result:
location_info = cls.construct_location_info_async(geocode_result[0], auto_fill=False).get_result()
else:
logging.warning("Team {} location failed!".format(team.key.id()))
# Update team
team.normalized_location = cls.build_normalized_location(location_info)
@classmethod
def build_normalized_location(cls, location_info):
lat_lng = None
if 'lat' in location_info and 'lng' in location_info:
lat_lng = ndb.GeoPt(location_info['lat'], location_info['lng'])
return Location(
name=location_info.get('name'),
formatted_address=location_info.get('formatted_address'),
lat_lng=lat_lng,
street_number=location_info.get('street_number'),
street=location_info.get('street'),
city=location_info.get('city'),
state_prov=location_info.get('state_prov'),
state_prov_short=location_info.get('state_prov_short'),
country=location_info.get('country'),
country_short=location_info.get('country_short'),
postal_code=location_info.get('postal_code'),
place_id=location_info.get('place_id'),
place_details=location_info.get('place_details'),
)
@classmethod
def get_team_location_info(cls, team, textsearch=False):
"""
Search for different combinations of team name (which should include
high school or title sponsor) with city, state_prov, postalcode, and country
in attempt to find the correct location associated with the team.
"""
# Find possible schools/title sponsors
possible_names = []
MAX_SPLIT = 3 # Filters out long names that are unlikely
if team.name:
# Guessing sponsors/school by splitting name by '/' or '&'
split1 = re.split('&', team.name)
split2 = re.split('/', team.name)
if split1 and \
split1[-1].count('&') < MAX_SPLIT and split1[-1].count('/') < MAX_SPLIT:
possible_names.append(split1[-1])
if split2 and split2[-1] not in possible_names and \
split2[-1].count('&') < MAX_SPLIT and split2[-1].count('/') < MAX_SPLIT:
possible_names.append(split2[-1])
|
#!/usr/bin/env python | 3
delineator = "//"
hashtag = "#"
# generate poems from a file
# out: list of poe | m lines
def generate_poems(filename):
g = []
# get to the first poem in the file
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
if line.startswith( delineator ) and g:
yield g
g = []
if line:
g.append(line)
yield g
# convert a list of strings
# into a poem dictionary
def to_dictionary(poem_lines):
d = {}
d['content'] = []
d['tags'] = []
tags = []
for line in poem_lines:
if line.startswith( delineator ):
d['title'] = line.lstrip( delineator ).strip()
elif line.startswith( hashtag ):
tags.append(line)
else:
d['content'].append(line) # do not strip to preserve indentation
for line in tags:
for tag in \
(t.strip() for t in line.split( hashtag ) if t):
d['tags'].append(tag)
return d
|
import os
ARCH = 'arm'
CPU = 'arm926'
# toolchains options
CROSS_TOOL = 'gcc'
#------- toolchains path -------------------------------------------------------
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'D:\arm-2013.11\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = 'C:/Program Files (x86)/IAR Systems/Embedded Workbench 7.2'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
CORE = 'arm926ej-s'
MAP_FILE = 'rtthread_at91sam9g45.map'
LINK_FILE = 'link_scripts/at91sam9g45_ram'
TARGET_NAME = 'rtthread.bin'
#------- GCC settings ----------------------------------------------------------
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=arm926ej-s'
CFLAGS = DEVICE
AFLAGS = '-c'+ DEVICE + ' -x assembler-with-cpp'
AFLAGS += ' -Iplatform'
LFLAGS = DEVICE
LFLAGS += ' -Wl,--gc-sections,-cref,-Map=' + MAP_FILE
LFLAGS += ' -T ' + LINK_FILE + '.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET ' + TARGET_NAME + '\n'
POST_ACTION += SIZE + ' $TARGET\n'
#------- Keil settings ---------------------------------------------------------
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
EXEC_PATH += '/arm/armcc/bin/'
DEVICE = ' --cpu=' + CORE
CFLAGS = DEVICE + ' --apcs=interwork --diag_suppress=870'
AFLAGS = DEVICE + ' -Iplatform'
LFLAGS = DEVICE + ' --strict'
LFLAGS += ' --info sizes --info totals --info unused --info veneers'
LFLAGS += ' --list ' + MAP_FILE
LFLAGS += ' --scatter ' + LINK_FILE + '.scat'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output ' + TARGET_NAME + ' \n'
POST_ACTION += 'fromelf -z $TARGET\n'
#------- IAR settings ----------------------------------------------------------
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = CORE
CFLAGS = '--cpu=' + DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' -e'
CFLAGS += ' --fpu=none'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = '--cpu '+ DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --fpu none'
AFLAGS += ' -S'
AFLAGS += ' -Iplatform'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = '--config ' + LINK_FILE +'.icf'
LFLAGS += ' --en | try __iar_program_start'
LFLAGS += ' --map ' + MAP_FILE
LFLAGS += ' --sil | ent'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --silent --bin $TARGET ' + TARGET_NAME
|
# -*- coding: utf-8 -*-
from __fut | ure__ import unicode_literals, absolute_import
def check_junior(sender, instance, created, **kwargs):
# from .models import Entry # avoid circled import
if created and instance.user.junior:
total_entry = sender.objects.filter(user=instance.user).count()
if total_entry >= 2:
instance.user.j | unior = False
instance.user.save()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.