code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
The BioThings Explorer project.
.. moduleauthor:: Jiwen Xin <kevinxin@scripps.edu>
"""
|
biothings/biothings_explorer
|
__init__.py
|
Python
|
apache-2.0
| 118
|
# -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015-Today Julius Network Solutions SARL <contact@julius.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import sms_authentication
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ncliam/serverpos
|
openerp/custom_modules/website_sms_authentication_base_phone/models/__init__.py
|
Python
|
agpl-3.0
| 1,081
|
from .bgplvmpanama import BGPLVM_PANAMA
from .var_dtc_fixed_cov import VarDTCFixedCov
|
mzwiessele/applygpy
|
applygpy/bgplvmpanama/__init__.py
|
Python
|
bsd-3-clause
| 85
|
# Python - 3.6.0
plural = lambda n: n != 1
|
RevansChen/online-judge
|
Codewars/8kyu/plural/Python/solution1.py
|
Python
|
mit
| 44
|
#!python
"""Bootstrap ensetuptools installation
If you want to use ensetuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of ensetuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade ensetuptools.
"""
import os
import sys
try: from hashlib import md5
except ImportError: from md5 import md5
# Setup some global vars
DEFAULT_VERSION = "1.0.0"
DEFAULT_URL = "http://code.enthought.com/src"
md5_data = {
'ensetuptools-1.0.0-py2.5.egg': 'e036fb78ae852d0f4dd417cc93784568',
}
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download ensetuptools and make it available on sys.path
`version` should be a valid ensetuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where ensetuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of ensetuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("ensetuptools>="+version)
return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of ensetuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U ensetuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "ensetuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires ensetuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
)
from time import sleep
sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb")
dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade ensetuptools"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
res = setuptools.__version__.split('-')
if len(res) < 2 or res[1][0] != 's':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before running this script."
)
sys.exit(2)
req = "ensetuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "ensetuptools version %s or greater has been installed." % version
print '(Run "ez_ensetuptools.py -U ensetuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb')
src = f.read()
f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile, 'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
cournape/ensetuptools
|
ez_ensetuptools.py
|
Python
|
bsd-3-clause
| 7,483
|
# -*- coding: utf-8 -*-
from google.appengine.ext import ndb
from src.plugins.user import UserModel as _UserModel
class UserModel(_UserModel):
authorized_query_id = ndb.StringProperty(repeated=True)
# authorized_table_name = ndb.StringProperty(repeated=True)
report_id = ndb.StringProperty(repeated=True)
verified = ndb.BooleanProperty(default=False)
|
rororo12/bq-square
|
src/user.py
|
Python
|
mit
| 369
|
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class MembershipRequest(models.Model):
_inherit = "membership.request"
petition_registration_id = fields.Many2one(
comodel_name="petition.registration",
string="Associated Petition Signatory",
help="The membership request came from a petition signature.",
readonly=True,
)
@api.depends("petition_registration_id")
def _compute_force_autoval(self):
super()._compute_force_autoval()
for record in self:
if record.petition_registration_id:
record.force_autoval = record.petition_registration_id.force_autoval
def validate_request(self):
"""
If the membership request is coming from a petition registration
and if the partner on the petition registration is not mentioned,
then we associate the partner from the membership request
to the petition registration.
"""
super().validate_request()
if (
self.petition_registration_id
and self.state == "validate"
and self.partner_id
and not self.petition_registration_id.partner_id
):
self.petition_registration_id.partner_id = self.partner_id
|
mozaik-association/mozaik
|
mozaik_petition_membership_request_involvement/models/membership_request.py
|
Python
|
agpl-3.0
| 1,353
|
import gensim
import os
from gensim import corpora
from gensim import utils
class DtmCorpus(corpora.textcorpus.TextCorpus):
def get_texts(self):
return self.input
def __len__(self):
return len(self.input)
if __name__ == '__main__':
corpus, time_seq = utils.unpickle('gensim/test/test_data/dtm_test')
dtm_home = os.environ.get('DTM_HOME', "C:/Users/Artyom/SkyDrive/TopicModels/dtm-master/")
dtm_path = os.path.join(dtm_home, 'bin', 'dtm') if dtm_home else None
model = gensim.models.DtmModel(dtm_path, corpus, time_seq, num_topics=2, id2word=corpus.dictionary)
topics = model.show_topics(topics=2, times=2, topn=10)
|
bartvm/gensim
|
gensim/test/test_dtm.py
|
Python
|
gpl-3.0
| 681
|
import sys
import h5py
import matplotlib.pyplot as plt
import numpy
def usage(exit_val=1):
print 'plot_hdf5.py <hdf5_file> <start_channel> [<end_channel>]'
sys.exit(exit_val)
if len(sys.argv) != 3 and len(sys.argv) != 4:
usage()
try:
f = sys.argv[1]
ch_s = int(sys.argv[2])
if len(sys.argv) == 4:
ch_end = int(sys.argv[3])
else:
ch_end = ch_s
nchannels = ch_end - ch_s + 1
if nchannels > 32:
print 'specify at most 32 channels'
raise ValueError
except:
usage()
h5f = h5py.File(f)
for dset in h5f:
break
dset = h5f[dset]
print 'file:', f, 'channels: %d--%d' % (ch_s, ch_end)
assert dset.dtype == numpy.dtype([('ph_flags', '|u1'),
('samp_index', '<u4'),
('chip_live', '<u4'),
('samples', '<u2', (1120,))])
samp_index = 1
samples = 3
chdata = []
idxs = []
for data in dset:
idxs.append(data[samp_index])
chdata.append(data[samples][ch_s:ch_end + 1])
plt.figure(1)
maxwidth = 8
if nchannels < maxwidth:
nrows = 1
ncols = nchannels
else:
nrows = nchannels / maxwidth
ncols = maxwidth
print 'nrows:',nrows,'ncols:',ncols
for fignum in xrange(1, nrows * ncols + 1):
plt.subplot(nrows, ncols, fignum)
plt.plot(idxs, [chd[fignum - 1] for chd in chdata])
plt.show()
|
leaflabs/leafysd
|
util/plot_hdf5.py
|
Python
|
gpl-2.0
| 1,376
|
# Copyright 2013, Big Switch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.exceptions import ValidationError # noqa
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.l3routers.extensions.routerrules\
import rulemanager
LOG = logging.getLogger(__name__)
class RuleCIDRField(forms.IPField):
"""Extends IPField to allow ('any','external') keywords and requires CIDR
"""
def __init__(self, *args, **kwargs):
kwargs['mask'] = True
super(RuleCIDRField, self).__init__(*args, **kwargs)
def validate(self, value):
keywords = ['any', 'external']
if value in keywords:
self.ip = value
else:
if '/' not in value:
raise ValidationError(_("Input must be in CIDR format"))
super(RuleCIDRField, self).validate(value)
class AddRouterRule(forms.SelfHandlingForm):
source = RuleCIDRField(label=_("Source CIDR"),
widget=forms.TextInput())
destination = RuleCIDRField(label=_("Destination CIDR"),
widget=forms.TextInput())
action = forms.ChoiceField(label=_("Action"))
nexthops = forms.MultiIPField(label=_("Optional: Next Hop "
"Addresses (comma delimited)"),
widget=forms.TextInput(), required=False)
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(attrs={'readonly':
'readonly'}))
failure_url = 'horizon:project:l3routers:detail'
def __init__(self, request, *args, **kwargs):
super(AddRouterRule, self).__init__(request, *args, **kwargs)
self.fields['action'].choices = [('permit', _('Permit')),
('deny', _('Deny'))]
def handle(self, request, data, **kwargs):
try:
if 'rule_to_delete' in request.POST:
rulemanager.remove_rules(request,
[request.POST['rule_to_delete']],
router_id=data['router_id'])
except Exception:
exceptions.handle(request, _('Unable to delete router rule.'))
try:
if 'nexthops' not in data:
data['nexthops'] = ''
if data['source'] == '0.0.0.0/0':
data['source'] = 'any'
if data['destination'] == '0.0.0.0/0':
data['destination'] = 'any'
rule = {'action': data['action'],
'source': data['source'],
'destination': data['destination'],
'nexthops': data['nexthops'].split(',')}
rulemanager.add_rule(request,
router_id=data['router_id'],
newrule=rule)
msg = _('Router rule added')
LOG.debug(msg)
messages.success(request, msg)
return True
except Exception as e:
msg = _('Failed to add router rule %s') % e
LOG.info(msg)
messages.error(request, msg)
redirect = reverse(self.failure_url, args=[data['router_id']])
exceptions.handle(request, msg, redirect=redirect)
|
Juniper/contrail-horizon
|
openstack_dashboard/dashboards/project/l3routers/extensions/routerrules/forms.py
|
Python
|
apache-2.0
| 4,105
|
"""
Tests for django admin commands in the verify_student module
Lots of imports from verify_student's model tests, since they cover similar ground
"""
from django.conf import settings
from django.core.management import call_command
from mock import patch
from testfixtures import LogCapture
from common.test.utils import MockS3BotoMixin
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification, SSPVerificationRetryConfig
from lms.djangoapps.verify_student.tests import TestVerificationBase
from lms.djangoapps.verify_student.tests.test_models import (
FAKE_SETTINGS,
mock_software_secure_post,
mock_software_secure_post_error
)
from common.djangoapps.student.tests.factories import UserFactory # lint-amnesty, pylint: disable=import-error, unused-import, useless-suppression
LOGGER_NAME = 'retry_photo_verification'
# Lots of patching to stub in our own settings, and HTTP posting
@patch.dict(settings.VERIFY_STUDENT, FAKE_SETTINGS)
@patch('lms.djangoapps.verify_student.models.requests.post', new=mock_software_secure_post)
class TestVerifyStudentCommand(MockS3BotoMixin, TestVerificationBase):
"""
Tests for django admin commands in the verify_student module
"""
def test_retry_failed_photo_verifications(self):
"""
Tests that the task used to find "must_retry" SoftwareSecurePhotoVerifications
and re-submit them executes successfully
"""
# set up some fake data to use...
self.create_upload_and_submit_attempt_for_user()
with patch('lms.djangoapps.verify_student.models.requests.post', new=mock_software_secure_post_error):
self.create_upload_and_submit_attempt_for_user()
self.create_upload_and_submit_attempt_for_user()
# check to make sure we had two successes and two failures; otherwise we've got problems elsewhere
assert SoftwareSecurePhotoVerification.objects.filter(status='submitted').count() == 1
assert SoftwareSecurePhotoVerification.objects.filter(status='must_retry').count() == 2
with self.immediate_on_commit():
call_command('retry_failed_photo_verifications')
attempts_to_retry = SoftwareSecurePhotoVerification.objects.filter(status='must_retry')
assert not attempts_to_retry
def add_test_config_for_retry_verification(self):
"""Setups verification retry configuration."""
config = SSPVerificationRetryConfig.current()
config.arguments = '--verification-ids 1 2 3'
config.enabled = True
config.save()
def test_args_from_database(self):
"""Test management command arguments injected from config model."""
# Nothing in the database, should default to disabled
with LogCapture(LOGGER_NAME) as log:
call_command('retry_failed_photo_verifications', '--args-from-database')
log.check_present(
(
LOGGER_NAME, 'WARNING',
'SSPVerificationRetryConfig is disabled or empty, but --args-from-database was requested.'
),
)
self.add_test_config_for_retry_verification()
with patch('lms.djangoapps.verify_student.models.requests.post', new=mock_software_secure_post_error):
self.create_upload_and_submit_attempt_for_user()
with LogCapture(LOGGER_NAME) as log:
with self.immediate_on_commit():
call_command('retry_failed_photo_verifications')
log.check_present(
(
LOGGER_NAME, 'INFO',
'Attempting to retry {0} failed PhotoVerification submissions'.format(1)
),
)
with LogCapture(LOGGER_NAME) as log:
with self.immediate_on_commit():
call_command('retry_failed_photo_verifications', '--args-from-database')
log.check_present(
(
LOGGER_NAME, 'INFO',
'Fetching retry verification ids from config model'
),
)
|
stvstnfrd/edx-platform
|
lms/djangoapps/verify_student/management/commands/tests/test_verify_student.py
|
Python
|
agpl-3.0
| 4,175
|
from gettext import gettext as _
from pulp.common.error_codes import Error
DEB0001 = Error('DEB0001',
_('Create local repository at: %(path)s failed. Reason: %(reason)s'),
['path', 'reason'])
|
pombredanne/pulp_deb
|
common/pulp_deb/common/errors.py
|
Python
|
gpl-2.0
| 228
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool_service
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import SpecialistPoolServiceGrpcTransport
from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport
class SpecialistPoolServiceClientMeta(type):
"""Metaclass for the SpecialistPoolService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[SpecialistPoolServiceTransport]]
_transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport
_transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[SpecialistPoolServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta):
"""A service for creating and managing Customer SpecialistPools.
When customers start Data Labeling jobs, they can reuse/create
Specialist Pools to bring their own Specialists to label the
data. Customers can add/remove Managers for the Specialist Pool
on Cloud console, then Managers will get email notifications to
manage Specialists and tasks on CrowdCompute console.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> SpecialistPoolServiceTransport:
"""Returns the transport used by the client instance.
Returns:
SpecialistPoolServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def specialist_pool_path(project: str, location: str, specialist_pool: str,) -> str:
"""Returns a fully-qualified specialist_pool string."""
return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(
project=project, location=location, specialist_pool=specialist_pool,
)
@staticmethod
def parse_specialist_pool_path(path: str) -> Dict[str, str]:
"""Parses a specialist_pool path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/specialistPools/(?P<specialist_pool>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, SpecialistPoolServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the specialist pool service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, SpecialistPoolServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, SpecialistPoolServiceTransport):
# transport is a SpecialistPoolServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def create_specialist_pool(
self,
request: specialist_pool_service.CreateSpecialistPoolRequest = None,
*,
parent: str = None,
specialist_pool: gca_specialist_pool.SpecialistPool = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Creates a SpecialistPool.
Args:
request (google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest):
The request object. Request message for
[SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool].
parent (str):
Required. The parent Project name for the new
SpecialistPool. The form is
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
Required. The SpecialistPool to
create.
This corresponds to the ``specialist_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
labeling jobs. It includes a group of specialist
managers who are responsible for managing the
labelers in this pool as well as customers' data
labeling jobs associated with this pool. Customers
create specialist pool as well as start data labeling
jobs on Cloud, managers and labelers work with the
jobs using CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, specialist_pool])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.CreateSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest):
request = specialist_pool_service.CreateSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if specialist_pool is not None:
request.specialist_pool = specialist_pool
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
gca_specialist_pool.SpecialistPool,
metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata,
)
# Done; return the response.
return response
def get_specialist_pool(
self,
request: specialist_pool_service.GetSpecialistPoolRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> specialist_pool.SpecialistPool:
r"""Gets a SpecialistPool.
Args:
request (google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest):
The request object. Request message for
[SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool].
name (str):
Required. The name of the SpecialistPool resource. The
form is
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.SpecialistPool:
SpecialistPool represents customers'
own workforce to work on their data
labeling jobs. It includes a group of
specialist managers who are responsible
for managing the labelers in this pool
as well as customers' data labeling jobs
associated with this pool.
Customers create specialist pool as well
as start data labeling jobs on Cloud,
managers and labelers work with the jobs
using CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.GetSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest):
request = specialist_pool_service.GetSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_specialist_pools(
self,
request: specialist_pool_service.ListSpecialistPoolsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSpecialistPoolsPager:
r"""Lists SpecialistPools in a Location.
Args:
request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest):
The request object. Request message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools].
parent (str):
Required. The name of the SpecialistPool's parent
resource. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager:
Response message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.ListSpecialistPoolsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest):
request = specialist_pool_service.ListSpecialistPoolsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListSpecialistPoolsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_specialist_pool(
self,
request: specialist_pool_service.DeleteSpecialistPoolRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a SpecialistPool as well as all Specialists
in the pool.
Args:
request (google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest):
The request object. Request message for
[SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool].
name (str):
Required. The resource name of the SpecialistPool to
delete. Format:
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.DeleteSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest):
request = specialist_pool_service.DeleteSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def update_specialist_pool(
self,
request: specialist_pool_service.UpdateSpecialistPoolRequest = None,
*,
specialist_pool: gca_specialist_pool.SpecialistPool = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Updates a SpecialistPool.
Args:
request (google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest):
The request object. Request message for
[SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool].
specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
Required. The SpecialistPool which
replaces the resource on the server.
This corresponds to the ``specialist_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to
the resource.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
labeling jobs. It includes a group of specialist
managers who are responsible for managing the
labelers in this pool as well as customers' data
labeling jobs associated with this pool. Customers
create specialist pool as well as start data labeling
jobs on Cloud, managers and labelers work with the
jobs using CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([specialist_pool, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.UpdateSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest):
request = specialist_pool_service.UpdateSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if specialist_pool is not None:
request.specialist_pool = specialist_pool
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("specialist_pool.name", request.specialist_pool.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
gca_specialist_pool.SpecialistPool,
metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SpecialistPoolServiceClient",)
|
sasha-gitg/python-aiplatform
|
google/cloud/aiplatform_v1/services/specialist_pool_service/client.py
|
Python
|
apache-2.0
| 37,232
|
'''
Created on Jun 25, 2014
@author: Jeremy May
'''
import pickle
from PyQt4 import QtGui
class SettingsDialog(QtGui.QDialog):
def __init__(self, parent=None):
'''
Constructor
'''
super(SettingsDialog, self).__init__(parent)
self.parent = parent
self.settingsData = self.loadSettings()
self.setWindowTitle("Settings")
if parent:
self.setModal(True)
else:
self.setModal(False)
self.initUi()
def initUi(self):
layout = QtGui.QVBoxLayout()
# Show notifications selection box
notifyBox = self.createNotifySettingsBox()
layout.addWidget(notifyBox)
# Cancel/Apply/Ok
actionLayout = self.createActionButtons()
layout.addLayout(actionLayout)
self.setLayout(layout)
def onApply(self):
self.settingsData.save()
self.applyBtn.setEnabled(False)
pass
def onOK(self):
self.settingsData.save()
if self.isModal():
self.accept()
else:
self.close()
pass
def onOptionChanged(self):
self.applyBtn.setEnabled(True)
def loadSettings(self):
return SettingsData.fromPickle()
def createNotifySettingsBox(self):
notifyBox = QtGui.QGroupBox("Event Notifications")
notifyBoxLayout = QtGui.QVBoxLayout()
connectChk = QtGui.QCheckBox("Phone Connect")
connectChk.setToolTip("Show connect notification when a phone connects to the program.")
connectChk.setChecked(self.settingsData.getConnect())
notifyBoxLayout.addWidget(connectChk)
disconnectChk = QtGui.QCheckBox("Phone Disconnect")
disconnectChk.setToolTip("Show disconnect notification when a phone disconnects from the program.")
disconnectChk.setChecked(self.settingsData.getDisconnect())
notifyBoxLayout.addWidget(disconnectChk)
incomingChk = QtGui.QCheckBox("Incoming Call")
incomingChk.setToolTip("Show incoming call notification when a call is being received.")
incomingChk.setChecked(self.settingsData.getIncoming())
notifyBoxLayout.addWidget(incomingChk)
missedChk = QtGui.QCheckBox("Missed Call")
missedChk.setToolTip("Show missed call notification when a call is missed.")
missedChk.setChecked(self.settingsData.getMissed())
notifyBoxLayout.addWidget(missedChk)
notifyBox.setLayout(notifyBoxLayout)
return notifyBox
def createActionButtons(self):
actionLayout = QtGui.QHBoxLayout()
cancelBtn = QtGui.QPushButton("Cancel")
if self.isModal():
cancelBtn.clicked.connect(self.reject)
else:
cancelBtn.clicked.connect(self.close)
actionLayout.addWidget(cancelBtn)
self.applyBtn = QtGui.QPushButton("Apply")
self.applyBtn.clicked.connect(self.onApply)
self.applyBtn.setEnabled(False)
actionLayout.addWidget(self.applyBtn)
okBtn = QtGui.QPushButton("OK")
okBtn.clicked.connect(self.onOK)
actionLayout.addWidget(okBtn)
return actionLayout
class SettingsData(object):
def __init__(self):
self.notify = { "connect" : True,
"disconnect" : True,
"incoming" : True,
"missed" : True }
pass
def getConnect(self):
return self.notify.get("connect")
def getDisconnect(self):
return self.notify.get("disconnect")
def getIncoming(self):
return self.notify.get("incoming")
def getMissed(self):
return self.notify.get("missed")
def shouldDisplayEvent(self, event):
# Create easy ref enum EventType
EventType = event.EventType
# Determine type and return toggle
e_type = event.getEventType()
if e_type == EventType.INCOMING_CALL:
return self.getIncoming()
elif e_type == EventType.MISSED_CALL:
return self.getMissed()
elif e_type == EventType.CLIENT_CONNECT:
return self.getConnect()
elif e_type == EventType.SHUTDOWN:
return self.getDisconnect()
else:
return False
def save(self):
pickleFile = None
try:
pickleFile = open("setting.dat", 'wb')
except Exception:
return False
if pickleFile:
pickle.dump(self, pickleFile)
pickleFile.close()
else:
return False
return True
@classmethod
def fromPickle(cls):
settingData = cls()
pickleFile = None
try:
pickleFile = open("setting.dat", 'rb')
except Exception:
pass
if pickleFile:
settingData = pickle.load(pickleFile)
pickleFile.close()
return settingData
|
Kenishi/DroidNavi
|
pyqt-ui/src/pytelelog_pyqt/components/settings.py
|
Python
|
gpl-2.0
| 5,225
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-18 16:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('report_ia', '0028_auto_20170117_1916'),
]
operations = [
migrations.CreateModel(
name='Nieuws',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bericht', models.TextField()),
('club', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('groep', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='auth.Group')),
],
options={
'verbose_name_plural': 'Berichten',
},
),
]
|
jefke-glider/gliding
|
ato/report_ia/migrations/0029_nieuws.py
|
Python
|
mit
| 1,082
|
#!/usr/local/bin/python
import os
# import ycm_core
# return the filename in the path without extension
def findFileName(path, ext):
name = ''
for projFile in os.listdir(path):
# cocoapods will generate _Pods.xcodeproj as well
if projFile.endswith(ext) and not projFile.startswith('_Pods'):
name= projFile[:-len(ext):]
return name
# WARNING!! No / in the end
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def findProjectName(working_directory):
projectName = findFileName(working_directory, '.xcodeproj')
if len(projectName) <= 0:
# cocoapod projects
projectName = findFileName(working_directory, '.podspec')
return projectName
flags = [
# TODO: find the correct cache path automatically
'-D__IPHONE_OS_VERSION_MIN_REQUIRED=80000',
'-miphoneos-version-min=9.3',
'-arch', 'arm64',
'-fblocks',
'-fmodules',
'-fobjc-arc',
'-fobjc-exceptions',
'-fexceptions',
'-isystem',
'/Library/Developer/CommandLineTools/usr/include/c++/v1', # for c++ headers <string>, <iostream> definition
'-x',
'objective-c',
'-Wno-#pragma-messages',
'-Wno-#warnings',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks',
# '-I/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks/Foundation.framework/Headers',
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
# '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1'
# '-I/Library/Developer/CommandLineTools/usr/include',
#custom definition, include subfolders
'-ProductFrameworkInclude', # include the framework in the products(in derivedData) folder
'-I./Example/'+findProjectName(DirectoryOfThisScript()), # new cocoapods directory
'-ISUB./Pod/Classes', # old cocoapods directory
'-ISUB./'+findProjectName(DirectoryOfThisScript()), # new cocoapods directory
# use headers in framework instead
#'-ISUB./Example/Pods', # new cocoapods directory
# '-F/Users/Lono/Library/Developer/Xcode/DerivedData/Scrapio-dliwlpgcvwijijcdxarawwtrfuuh/Build/Products/Debug-iphonesimulator/Kiwi/',
# '-include',
# './Example/Tests/Tests-Prefix.pch', # test project prefix header
'-isysroot', '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk'
# '-fencode-extended-block-signature', #libclang may report error on this
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/7.0.2/include', # let IncludeClangInXCToolChain handle it
# include-pch will make YouCompleteMe show 'no errors founded'
# '-include-pch',
# './Example/Tests/Tests-Prefix.pch', # test project prefix header
# modules failed trials
# '-fmodule-implementation-of',
# '-fimplicit-module-maps',
# '-F/Users/Lono/Library/Developer/Xcode/DerivedData/Scrapio-dliwlpgcvwijijcdxarawwtrfuuh/Build/Products/Debug-iphonesimulator/CocoaLumberjack',
# '-Wnon-modular-include-in-framework-module',
]
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
# if os.path.exists( compilation_database_folder ):
# database = ycm_core.CompilationDatabase( compilation_database_folder )
# else:
# we don't use compilation database
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def Subdirectories(directory):
res = []
for path, subdirs, files in os.walk(directory):
for name in subdirs:
item = os.path.join(path, name)
res.append(item)
return res
def sorted_ls(path):
mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=mtime))
def IncludeClangInXCToolChain(flags, working_directory):
if not working_directory:
return list( flags )
new_flags = list(flags)
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/7.0.2/include',
path = '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/'
clangPath = sorted_ls(path)[::-1] # newest file first
includePath = ''
if (len(clangPath) > 0):
includePath = os.path.join('', *[path, clangPath[0], 'include'])
new_flags.append('-I'+includePath)
return new_flags
def FindDerivedDataPath( derivedDataPath, projectName ):
simulatorPaths = ['Build/Intermediates/CodeCoverage/Products/Debug-iphonesimulator/', # if you enable CodeCoverage, the framework of test target will be put in coverage folder, strange
'Build/Products/Debug-iphonesimulator/']
# search ~/Library/Developer/Xcode/DerivedData/ to find <project_name>-dliwlpgcvwijijcdxarawwtrfuuh
derivedPath = sorted_ls(derivedDataPath)[::-1] # newest file first
for productPath in derivedPath:
if productPath.lower().startswith( projectName.lower() ):
for simulatorPath in simulatorPaths:
projectPath = os.path.join('', *[derivedDataPath, productPath, simulatorPath])
if (len(projectPath) > 0) and os.path.exists(projectPath):
return projectPath # the lastest product is what we want (really?)
return ''
def IncludeFlagsOfFrameworkHeaders( flags, working_directory ):
if not working_directory:
return flags
new_flags = []
path_flag = '-ProductFrameworkInclude'
derivedDataPath = os.path.expanduser('~/Library/Developer/Xcode/DerivedData/')
# find the project name
projectName = findProjectName(working_directory)
if len(projectName) <= 0:
return flags
# add all frameworks in the /Build/Products/Debug-iphonesimulator/xxx/xxx.framework
for flag in flags:
if not flag.startswith( path_flag ):
new_flags.append(flag)
continue
projectPath = FindDerivedDataPath( derivedDataPath, projectName )
if (len(projectPath) <= 0) or not os.path.exists(projectPath):
continue
# iterate through all frameworks folders /Debug-iphonesimulator/xxx/xxx.framework
for frameworkFolder in os.listdir(projectPath):
frameworkPath = os.path.join('', projectPath, frameworkFolder)
if not os.path.isdir(frameworkPath):
continue
# framwork folder '-F/Debug-iphonesimulator/<framework-name>'
# solve <Kiwi/KiwiConfigurations.h> not found problem
new_flags.append('-F'+frameworkPath)
# the framework name might be different than folder name
# we need to iterate all frameworks
for frameworkFile in os.listdir(frameworkPath):
if frameworkFile.endswith('framework'):
# include headers '-I/Debug-iphonesimulator/xxx/yyy.framework/Headers'
# allow you to use #import "Kiwi.h". NOT REQUIRED, but I am too lazy to change existing codes
new_flags.append('-I' + os.path.join('', frameworkPath, frameworkFile,'Headers'))
return new_flags
def IncludeFlagsOfSubdirectory( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_include_subdir = False
path_flags = [ '-ISUB']
for flag in flags:
# include the directory of flag as well
new_flag = [flag.replace('-ISUB', '-I')]
if make_next_include_subdir:
make_next_include_subdir = False
for subdir in Subdirectories(os.path.join(working_directory, flag)):
new_flag.append('-I')
new_flag.append(subdir)
for path_flag in path_flags:
if flag == path_flag:
make_next_include_subdir = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
for subdir in Subdirectories(os.path.join(working_directory, path)):
new_flag.append('-I' + subdir)
break
new_flags =new_flags + new_flag
return new_flags
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
#add include subfolders as well
flags = IncludeFlagsOfSubdirectory( flags, working_directory )
#include framework header in derivedData/.../Products
flags = IncludeFlagsOfFrameworkHeaders( flags, working_directory )
#include libclang header in xctoolchain
flags = IncludeClangInXCToolChain( flags, working_directory )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
import time
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
# try:
# final_flags.remove( '-stdlib=libc++' )
# except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
# update .clang for chromatica every 5min TODO: very dirty
chromatica_file = DirectoryOfThisScript() + '/.clang'
if (not os.path.exists(chromatica_file)) or (time.time() - os.stat(chromatica_file).st_mtime > 5*60):
parsed_flags = IncludeFlagsOfSubdirectory( final_flags, DirectoryOfThisScript() )
escaped = [flag for flag in parsed_flags if " " not in flag] # chromatica doesn't handle space in flag
f = open(chromatica_file, 'w') # truncate the current file
f.write('flags='+' '.join(escaped))
f.close()
return {
'flags': final_flags,
'do_cache': True
}
# if __name__ == "__main__":
# print (FlagsForFile(""))
# flags = [
# '-D__IPHONE_OS_VERSION_MIN_REQUIRED=70000',
# '-x',
# 'objective-c',
# '-ProductFrameworkInclude',
# '-ProductFrameworkInclude',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-ISUB./Pods/Headers/Public',
# '-MMD',
# ]
# print IncludeClangInXCToolChain(flags, DirectoryOfThisScript())
# print IncludeFlagsOfFrameworkHeaders( flags, DirectoryOfThisScript() )
# # res = subdirectory( DirectoryOfThisScript())
# flags = [
# '-D__IPHONE_OS_VERSION_MIN_REQUIRED=70000',
# '-x',
# 'objective-c',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-ISUB./Pods/Headers/Public',
# '-MMD',
# ]
# print (IncludeFlagsOfSubdirectory( flags, DirectoryOfThisScript() ))
# res = IncludeFlagsOfSubdirectory( flags, DirectoryOfThisScript() )
# escaped = []
# for flag in res:
# if " " not in flag:
# escaped.append(flag)
# print ' '.join(escaped)
|
haifengkao/ReactiveCache
|
.ycm_extra_conf.py
|
Python
|
mit
| 13,022
|
import unittest
import fakeredis
from sixpack.models import Alternative, Experiment
class TestAlternativeModel(unittest.TestCase):
unit = True
def setUp(self):
self.redis = fakeredis.FakeStrictRedis()
self.client_id = 381
def test_key(self):
exp = Experiment('show-something', ['yes', 'no'], redis=self.redis)
alt = Alternative('yes', exp, redis=self.redis)
key = alt.key()
self.assertEqual(key, 'sxp:show-something:yes')
def test_is_valid(self):
valid = Alternative.is_valid('1')
self.assertTrue(valid)
unicode_valid = Alternative.is_valid(u'valid')
self.assertTrue(unicode_valid)
def test_is_not_valid(self):
not_valid = Alternative.is_valid(1)
self.assertFalse(not_valid)
not_valid = Alternative.is_valid(':123:name')
self.assertFalse(not_valid)
not_valid = Alternative.is_valid('_123name')
self.assertFalse(not_valid)
not_valid = Alternative.is_valid('&123name')
self.assertFalse(not_valid)
def test_is_control(self):
exp = Experiment('trololo', ['yes', 'no'], redis=self.redis)
exp.save()
alt = Alternative('yes', exp, redis=self.redis)
self.assertTrue(alt.is_control())
exp.delete()
def test_experiment(self):
exp = Experiment('trololo', ['yes', 'no'], redis=self.redis)
exp.save()
alt = Alternative('yes', exp, redis=self.redis)
self.assertTrue(alt.is_control())
def test_participant_count(self):
pass
# self.redis.bitcount.return_value = 0
# alt = Alternative('yes', 'show-something', self.redis)
# count = alt.participant_count()
# key = _key("participation:{0}:{1}".format(alt.experiment_name, alt.name))
# self.redis.bitcount.assert_called_once_with(key)
# self.assertTrue(isinstance(count, Number))
# self.redis.reset_mock()
def test_conversion_count(self):
pass
# self.redis.reset_mock()
# self.redis.bitcount.return_value = 0
# alt = Alternative('yes', 'show-something', self.redis)
# count = alt.completed_count()
# key = _key("c:{0}/1:{1}".format(alt.experiment_name, alt.name))
# self.redis.bitcount.assert_called_once_with(key)
# self.assertTrue(isinstance(count, Number))
# self.redis.reset_mock()
# TODO Test this
def test_record_participation(self):
pass
# alt = Alternative('yes', 'show-something', self.redis)
# alt.record_participation(self.client_id)
# key = _key("participation:{0}:{1}".format(alt.experiment_name, alt.name))
# self.redis.setbit.assert_called_once_with(key, self.client_id, 1)
def test_record_conversion(self):
pass
# client = Client('xyz', self.redis)
# alt = Alternative('yes', 'show-something', self.redis)
# alt.record_conversion(client)
# key = _key("conversion:{0}:{1}".format(alt.experiment_name, alt.name))
# self.redis.setbit.assert_called_once_with(key, self.client_id, 1)
|
nickveenhof/sixpack
|
sixpack/test/alternative_model_test.py
|
Python
|
bsd-2-clause
| 3,143
|
# coding: utf-8
# Copyright 2014 Globo.com Player authors. All rights reserved.
# Use of this source code is governed by a MIT License
# license that can be found in the LICENSE file.
import sys
PYTHON_MAJOR_VERSION = sys.version_info
import os
import posixpath
try:
import urlparse as url_parser
import urllib2
urlopen = urllib2.urlopen
except ImportError:
import urllib.parse as url_parser
from urllib.request import urlopen as url_opener
urlopen = url_opener
from m3u8.model import M3U8, Playlist, IFramePlaylist, Media, Segment
from m3u8.parser import parse, is_url, ParseError
__all__ = ('M3U8', 'Playlist', 'IFramePlaylist', 'Media',
'Segment', 'loads', 'load', 'parse', 'ParseError')
def loads(content):
'''
Given a string with a m3u8 content, returns a M3U8 object.
Raises ValueError if invalid content
'''
return M3U8(content)
def load(uri):
'''
Retrieves the content from a given URI and returns a M3U8 object.
Raises ValueError if invalid content or IOError if request fails.
'''
if is_url(uri):
return _load_from_uri(uri)
else:
return _load_from_file(uri)
# Support for python3 inspired by https://github.com/szemtiv/m3u8/
def _load_from_uri(uri):
resource = urlopen(uri)
base_uri = _parsed_url(_url_for(uri))
if PYTHON_MAJOR_VERSION < (3,):
content = _read_python2x(resource)
else:
content = _read_python3x(resource)
return M3U8(content, base_uri=base_uri)
def _url_for(uri):
return urlopen(uri).geturl()
def _parsed_url(url):
parsed_url = url_parser.urlparse(url)
prefix = parsed_url.scheme + '://' + parsed_url.netloc
base_path = posixpath.normpath(parsed_url.path + '/..')
return url_parser.urljoin(prefix, base_path)
def _read_python2x(resource):
return resource.read().strip()
def _read_python3x(resource):
return resource.read().decode(resource.headers.get_content_charset(failobj="utf-8"))
def _load_from_file(uri):
with open(uri) as fileobj:
raw_content = fileobj.read().strip()
base_uri = os.path.dirname(uri)
return M3U8(raw_content, base_uri=base_uri)
|
cristina0botez/m3u8
|
m3u8/__init__.py
|
Python
|
mit
| 2,171
|
# Natural Language Toolkit: IPI PAN Corpus Reader
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Konrad Goluchowski <kodie@mimuw.edu.pl>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import functools
from nltk import compat
from nltk.corpus.reader.util import StreamBackedCorpusView, concat
from nltk.corpus.reader.api import CorpusReader
def _parse_args(fun):
@functools.wraps(fun)
def decorator(self, fileids=None, **kwargs):
kwargs.pop('tags', None)
if not fileids:
fileids = self.fileids()
return fun(self, fileids, **kwargs)
return decorator
class IPIPANCorpusReader(CorpusReader):
"""
Corpus reader designed to work with corpus created by IPI PAN.
See http://korpus.pl/en/ for more details about IPI PAN corpus.
The corpus includes information about text domain, channel and categories.
You can access possible values using ``domains()``, ``channels()`` and
``categories()``. You can use also this metadata to filter files, e.g.:
``fileids(channel='prasa')``, ``fileids(categories='publicystyczny')``.
The reader supports methods: words, sents, paras and their tagged versions.
You can get part of speech instead of full tag by giving "simplify_tags=True"
parameter, e.g.: ``tagged_sents(simplify_tags=True)``.
Also you can get all tags disambiguated tags specifying parameter
"one_tag=False", e.g.: ``tagged_paras(one_tag=False)``.
You can get all tags that were assigned by a morphological analyzer specifying
parameter "disamb_only=False", e.g. ``tagged_words(disamb_only=False)``.
The IPIPAN Corpus contains tags indicating if there is a space between two
tokens. To add special "no space" markers, you should specify parameter
"append_no_space=True", e.g. ``tagged_words(append_no_space=True)``.
As a result in place where there should be no space between two tokens new
pair ('', 'no-space') will be inserted (for tagged data) and just '' for
methods without tags.
The corpus reader can also try to append spaces between words. To enable this
option, specify parameter "append_space=True", e.g. ``words(append_space=True)``.
As a result either ' ' or (' ', 'space') will be inserted between tokens.
By default, xml entities like " and & are replaced by corresponding
characters. You can turn off this feature, specifying parameter
"replace_xmlentities=False", e.g. ``words(replace_xmlentities=False)``.
"""
def __init__(self, root, fileids):
CorpusReader.__init__(self, root, fileids, None, None)
def raw(self, fileids=None):
if not fileids:
fileids = self.fileids()
filecontents = []
for fileid in self._list_morph_files(fileids):
with open(fileid, 'r') as infile:
filecontents.append(infile.read())
return ''.join(filecontents)
def channels(self, fileids=None):
if not fileids:
fileids = self.fileids()
return self._parse_header(fileids, 'channel')
def domains(self, fileids=None):
if not fileids:
fileids = self.fileids()
return self._parse_header(fileids, 'domain')
def categories(self, fileids=None):
if not fileids:
fileids = self.fileids()
return [self._map_category(cat)
for cat in self._parse_header(fileids, 'keyTerm')]
def fileids(self, channels=None, domains=None, categories=None):
if channels is not None and domains is not None and \
categories is not None:
raise ValueError('You can specify only one of channels, domains '
'and categories parameter at once')
if channels is None and domains is None and \
categories is None:
return CorpusReader.fileids(self)
if isinstance(channels, compat.string_types):
channels = [channels]
if isinstance(domains, compat.string_types):
domains = [domains]
if isinstance(categories, compat.string_types):
categories = [categories]
if channels:
return self._list_morph_files_by('channel', channels)
elif domains:
return self._list_morph_files_by('domain', domains)
else:
return self._list_morph_files_by('keyTerm', categories,
map=self._map_category)
@_parse_args
def sents(self, fileids=None, **kwargs):
return concat([self._view(fileid,
mode=IPIPANCorpusView.SENTS_MODE, tags=False, **kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def paras(self, fileids=None, **kwargs):
return concat([self._view(fileid,
mode=IPIPANCorpusView.PARAS_MODE, tags=False, **kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def words(self, fileids=None, **kwargs):
return concat([self._view(fileid, tags=False, **kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def tagged_sents(self, fileids=None, **kwargs):
return concat([self._view(fileid, mode=IPIPANCorpusView.SENTS_MODE,
**kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def tagged_paras(self, fileids=None, **kwargs):
return concat([self._view(fileid, mode=IPIPANCorpusView.PARAS_MODE,
**kwargs)
for fileid in self._list_morph_files(fileids)])
@_parse_args
def tagged_words(self, fileids=None, **kwargs):
return concat([self._view(fileid, **kwargs)
for fileid in self._list_morph_files(fileids)])
def _list_morph_files(self, fileids):
return [f for f in self.abspaths(fileids)]
def _list_header_files(self, fileids):
return [f.replace('morph.xml', 'header.xml')
for f in self._list_morph_files(fileids)]
def _parse_header(self, fileids, tag):
values = set()
for f in self._list_header_files(fileids):
values_list = self._get_tag(f, tag)
for v in values_list:
values.add(v)
return list(values)
def _list_morph_files_by(self, tag, values, map=None):
fileids = self.fileids()
ret_fileids = set()
for f in fileids:
fp = self.abspath(f).replace('morph.xml', 'header.xml')
values_list = self._get_tag(fp, tag)
for value in values_list:
if map is not None:
value = map(value)
if value in values:
ret_fileids.add(f)
return list(ret_fileids)
def _get_tag(self, f, tag):
tags = []
with open(f, 'r') as infile:
header = infile.read()
tag_end = 0
while True:
tag_pos = header.find('<'+tag, tag_end)
if tag_pos < 0: return tags
tag_end = header.find('</'+tag+'>', tag_pos)
tags.append(header[tag_pos+len(tag)+2:tag_end])
def _map_category(self, cat):
pos = cat.find('>')
if pos == -1:
return cat
else:
return cat[pos+1:]
def _view(self, filename, **kwargs):
tags = kwargs.pop('tags', True)
mode = kwargs.pop('mode', 0)
simplify_tags = kwargs.pop('simplify_tags', False)
one_tag = kwargs.pop('one_tag', True)
disamb_only = kwargs.pop('disamb_only', True)
append_no_space = kwargs.pop('append_no_space', False)
append_space = kwargs.pop('append_space', False)
replace_xmlentities = kwargs.pop('replace_xmlentities', True)
if len(kwargs) > 0:
raise ValueError('Unexpected arguments: %s' % kwargs.keys())
if not one_tag and not disamb_only:
raise ValueError('You cannot specify both one_tag=False and '
'disamb_only=False')
if not tags and (simplify_tags or not one_tag or not disamb_only):
raise ValueError('You cannot specify simplify_tags, one_tag or '
'disamb_only with functions other than tagged_*')
return IPIPANCorpusView(filename,
tags=tags, mode=mode, simplify_tags=simplify_tags,
one_tag=one_tag, disamb_only=disamb_only,
append_no_space=append_no_space,
append_space=append_space,
replace_xmlentities=replace_xmlentities
)
class IPIPANCorpusView(StreamBackedCorpusView):
WORDS_MODE = 0
SENTS_MODE = 1
PARAS_MODE = 2
def __init__(self, filename, startpos=0, **kwargs):
StreamBackedCorpusView.__init__(self, filename, None, startpos, None)
self.in_sentence = False
self.position = 0
self.show_tags = kwargs.pop('tags', True)
self.disamb_only = kwargs.pop('disamb_only', True)
self.mode = kwargs.pop('mode', IPIPANCorpusView.WORDS_MODE)
self.simplify_tags = kwargs.pop('simplify_tags', False)
self.one_tag = kwargs.pop('one_tag', True)
self.append_no_space = kwargs.pop('append_no_space', False)
self.append_space = kwargs.pop('append_space', False)
self.replace_xmlentities = kwargs.pop('replace_xmlentities', True)
def read_block(self, stream):
sentence = []
sentences = []
space = False
no_space = False
tags = set()
lines = self._read_data(stream)
while True:
# we may have only part of last line
if len(lines) <= 1:
self._seek(stream)
lines = self._read_data(stream)
if lines == ['']:
assert not sentences
return []
line = lines.pop()
self.position += len(line) + 1
if line.startswith('<chunk type="s"'):
self.in_sentence = True
elif line.startswith('<chunk type="p"'):
pass
elif line.startswith('<tok'):
if self.append_space and space and not no_space:
self._append_space(sentence)
space = True
no_space = False
orth = ""
tags = set()
elif line.startswith('</chunk'):
if self.in_sentence:
self.in_sentence = False
self._seek(stream)
if self.mode == self.SENTS_MODE:
return [sentence]
elif self.mode == self.WORDS_MODE:
if self.append_space:
self._append_space(sentence)
return sentence
else:
sentences.append(sentence)
elif self.mode == self.PARAS_MODE:
self._seek(stream)
return [sentences]
elif line.startswith('<orth'):
orth = line[6:-7]
if self.replace_xmlentities:
orth = orth.replace('"', '"').replace('&', '&')
elif line.startswith('<lex'):
if not self.disamb_only or line.find('disamb=') != -1:
tag = line[line.index('<ctag')+6 : line.index('</ctag') ]
tags.add(tag)
elif line.startswith('</tok'):
if self.show_tags:
if self.simplify_tags:
tags = [t.split(':')[0] for t in tags]
if not self.one_tag or not self.disamb_only:
sentence.append((orth, tuple(tags)))
else:
sentence.append((orth, tags.pop()))
else:
sentence.append(orth)
elif line.startswith('<ns/>'):
if self.append_space:
no_space = True
if self.append_no_space:
if self.show_tags:
sentence.append(('', 'no-space'))
else:
sentence.append('')
elif line.startswith('</cesAna'):
pass
def _read_data(self, stream):
self.position = stream.tell()
buff = stream.read(4096)
lines = buff.split('\n')
lines.reverse()
return lines
def _seek(self, stream):
stream.seek(self.position)
def _append_space(self, sentence):
if self.show_tags:
sentence.append((' ', 'space'))
else:
sentence.append(' ')
|
adazey/Muzez
|
libs/nltk/corpus/reader/ipipan.py
|
Python
|
gpl-3.0
| 13,048
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2012 Unknown <diogo@arch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import re
from os.path import basename, splitext, join
from os import sep
from collections import OrderedDict
class PartitionException(Exception):
def __init__(self, value):
self.message = value
def __str__(self):
return repr(self.message)
class InvalidPartitionFile(Exception):
def __init__(self, value):
self.message = value
def __str__(self):
return repr(self.message)
class Partitions(object):
"""Alignment partitions interface for `Alignment` and `AlignmentList`.
The Partitions class is used to define partitions for `Alignment`
and `AlignmentList` objects and associate substitution models to
each partition. After instantiating, partitions may be set in two ways:
- Partition files: Being Nexus charset blocks and RAxML partition files
currently supported
- Tuple-like objects: Containing the ranges and names of the partitions
Attributes
----------
partition_length : int
Length of the total partitions.
partitions : OrderedDict
Storage of partition names (key) and their range (values).
partitions_index : list
The index (starting point) for each partition, including codon
partitions.
partitions_alignments : OrderedDict
Storage of the partition names (key) and their corresponding
alignment files (values).
alignments_range : OrderedDict
Storage of the alignment names (key) and their range (values).
models : OrderedDict
Storage of partition names (key) and their models (values).
merged_files : dict
Storage of the original range (values) of every alignment file (key).
counter : int
Indicator of where the last partition ended.
partition_format : str
Format of the original partition file, if any.
"""
_models = {"mrbayes": {}}
# =========================================================================
# MrBayes models
# =========================================================================
"""
MrBayes substitution models are stored in the dictionary
_models["mrbayes"]. The keys of the dictionary are the name of the
substitution models (usually in capital letters) and the values will
contain the instructions to specific such model in a list. Each element
of the list corresponds to one line
"""
# GTR
_models["mrbayes"]["GTR"] = {"lset": "nst=6",
"prset": "statefreqpr=dirichlet(1,1,1,1)"}
# SYM
_models["mrbayes"]["SYM"] = {"lset": "nst=6",
"prset": "statefreqpr=fixed(equal)"}
# HKY
_models["mrbayes"]["HKY"] = {"lset": "nst=2",
"prset": "statefreqpr=dirichlet(1,1,1,1)"}
# K2P
_models["mrbayes"]["K2P"] = {"lset": "nst=2",
"prset": "statefreqpr=fixed(equal)"}
# F81
_models["mrbayes"]["F81"] = {"lset": "nst=1",
"prset": "statefreqpr=dirichlet(1,1,1,1)"}
# JC
_models["mrbayes"]["JC"] = {"lset": "nst=1",
"prset": "statefreqpr=fixed(equal)"}
def __init__(self):
self.partition_length = 0
"""
The length of the locus may be necessary when partitions are defined
in the input files using the "." notation, meaning the entire locus.
Therefore, to convert this notation into workable integers, the size
of the locus must be provided using the set_length method.
"""
self.partitions = OrderedDict()
"""
partitions will contain the name and range of the partitions for a given
alignment object. Both gene and codon partitions will be stored in this
attribute, but gene partitions are the main entries. An example of
different stored partitions is::
partitions = {"partitionA": ((0, 856), False),
"partitionB": ((857, 1450), [857,858,859] }
"partitionA" is a simple gene partition ranging from 0 to 856, while
"partitionB" is an assembly of codon partitions. The third element of
the tuple is destined to codon partitions. If there are none, it should
be False. If there are codon partitions, a list should be provided with
the desired initial codons. In the example above, "partitionB" has
actually 3 partitions starting at the first, second and third sequence
nucleotide of the main partition.
"""
self.partitions_index = []
"""
partitions_index will remember the index of all added partitions. This
attribute was created because codon models are added to the same parent
partitions, thus losing their actual index. This is important for
Nexus files, where models are applied to the index of the partition.
This will simply store the partition names, which can be accessed using
their index, or searched to return their index. To better support codon
partitions, each entry in the partitions_index will consist in a list,
in which the first element is the partition name, and the second element
is the index of the subpartition. An example would be::
partitions_index = [["partA", 0], ["partA", 1], ["partA", 2],
["partB", 0]]
in which, partA has 3 codon partitions, and partB has only one partition
"""
self.partitions_alignments = OrderedDict()
"""
The partitions_alignments attribute will associate the partition with
the corresponding alignment files. For single alignment partitions,
this will provide information on the file name. For multiple
alignments, besides the information of the file names, it will
associate which alignments are contained in a given partition and
support multi alignment partitions. An example would be::
partitions_alignments = {"PartitionA": ["FileA.fas"],
"PartitionB": ["FileB.fas", "FileC.fas"]}
"""
self.partitions_type = OrderedDict()
"""
Stores the sequence type of each partition.
"""
self.alignments_range = OrderedDict()
"""
"""
self.models = OrderedDict()
"""
The self.models attribute will contain the same key list as
self.partitions and will associate the substitution models to each
partitions. For each partition, the format should be as follows::
models["partA"] = [[[..model_params..]],[..model_names..],
["12", "3"]]
The first element is a list that may contain the substitution model
parameters for up to three subpartitions, the second element is also
a list with the corresponding names of the substitution models and
the third list will store any links between models. It is important
that the links list contains only strings and not integers.
"""
self.merged_files = {}
"""
This attribute will keep a record of the original ranges of every file
that was merged. This is useful to split partitions according to files
or to undo any changes. Each entry should be::
{"alignment_file1": (0, 1234), "alignment_file2": (3444, 6291)}
"""
self.counter = 0
"""
The counter attribute will be used as an indication of where the last
partition ends when one or more partitions are added
"""
self.partition_format = None
def __iter__(self):
"""Iterator behavior for `Partitions`.
The class iterator will iterate over a list containing the partition
names and a modified version of their ranges that is compatible with
other software (unlike the 0 offset of python)
Returns
_ : iter
Iterator of `partitions.items()`.
"""
return iter(self.partitions.items())
def reset(self, keep_alignments_range=False, cur=None):
"""Clears partitions and attributes
Clears partitions and resets object to __init__ state. The original
alignment range can be retained by setting the `keep_alignments_range`
argument to True.
Parameters
----------
keep_alignments_range : bool
If True, the `alignments_range` attribute will not be reset.
"""
self.partitions = OrderedDict()
self.partitions_index = []
self.partitions_alignments = OrderedDict()
self.models = OrderedDict()
self.counter = 0
if not keep_alignments_range:
self.alignments_range = OrderedDict()
self.partitions_type = OrderedDict()
def _sort_partitions(self):
part_start = {}
for name, lrange in self.partitions.items():
if isinstance(lrange[0][0], int):
part_start[name] = lrange[0][0]
else:
part_start[name] = lrange[0][0][0]
self.partitions = OrderedDict(sorted(
self.partitions.items(),
key=lambda x: part_start[x[0]]))
self.partitions_alignments = OrderedDict(sorted(
self.partitions_alignments.items(),
key=lambda x: part_start[x[0]]))
self.models = OrderedDict(sorted(
self.models.items(),
key=lambda x: part_start[x[0]]))
def iter_files(self):
"""Iterates over `partitions_alignments.items()`.
Returns
-------
_ : iter
Iterator of `partitions_alignments.items()`.
"""
return iter(self.partitions_alignments.items())
def set_length(self, length):
"""Set total length of current locus (over all partitions).
Sets the length of the locus. This may be important to convert certain
partition defining nomenclature, such as using the "." to indicate
whole length of the alignment
Parameters
----------
length : int
Integer that will be set as `partition_length`.
"""
self.partition_length = length
#===========================================================================
# Parsers
#===========================================================================
@staticmethod
def _get_file_format(partition_file):
"""Guesses the format of the partition file (Nexus or RAxML's).
Returns
-------
partition_format : str
Format of the partition file ("nexus" or "raxml").
"""
file_handle = open(partition_file)
# Skips first empty lines, if any
header = file_handle.readline()
while header.startswith("\n"):
header = next(file_handle)
fields = header.split()
if fields[0].lower() == "charset":
partition_format = "nexus"
else:
partition_format = "raxml"
return partition_format
def read_from_file(self, partitions_file, no_aln_check=False):
"""Parses partitions from file
This method parses a file containing partitions. It supports
partitions files similar to RAxML's and NEXUS charset blocks. The
NEXUS file, however, must only contain the charset block. The
model_nexus argument provides a namespace for the model variable in
the nexus format, since this information is not present in the file.
However, it assures consistency on the Partition object.
Parameters
----------
partitions_file : str
Path to partitions file.
no_aln_check : bool
Checks consistency with previously set partitions. Set to True
to disable this check (usually when reading a partition file
for a mock/empty Partition object).
Raises
------
PartitionException
When one partition definition cannot be parsed.
"""
# Get the format of the partition file
self.partition_format = self._get_file_format(partitions_file)
part_file = open(partitions_file)
# In order to support unsorted partition ranges, the complete
# partition set will be stored temporary in memory. Even very large
# partition files should result in relatively small data structures.
# Once this variable is populated, it will be sorted according to the
# first element of the range.
temp_ranges = []
# TODO: Add support for codon partitions in raxml format
if self.partition_format == "raxml":
for p, line in enumerate(part_file):
# Ignore empty lines
if line.strip() == "":
continue
# A wrongly formatted raxml partition file may be provided, in
# which case an IndexError exception will be raised. This will
# handle that exception
try:
fields = line.split(",", 1)
# Get partition name as string
partition_name = fields[1].split("=")[0].strip()
# Get partition range as list of int
pr_temp = fields[1].split("=")[1]
try:
partition_range = [
[int(i) - 1 for i in x.strip().split("-")]
for x in pr_temp.strip().split(",")
]
except ValueError as e:
# A ValueError may be raise when there is a "."
# notation in the partition range. If so, convert
# the "." to the sequence lenght. If no sequence lenght
# has been provided raise another exception
pr = pr_temp.strip().split("-")
if pr[1] == ".":
if self.partition_length:
partition_range = [[int(pr[0]) - 1,
self.partition_length - 1]]
else:
return PartitionException(
"The length of the locus must be "
"provided when partitions are "
"defined using '.' notation to "
"mean full length")
else:
raise e
# Check which alignment file contains the current partition
if self.alignments_range:
try:
file_name = \
[x for x, y in self.alignments_range.items() if
y[0] <= partition_range[0][0] < y[1]]
except IndexError:
file_name = None
else:
file_name = None
temp_ranges.append([partition_name, file_name,
partition_range])
except (IndexError, ValueError):
return InvalidPartitionFile(
"Badly formatted partitions file in line {} "
"with:\n\n{}".format(p + 1, line))
elif self.partition_format == "nexus":
for line in part_file:
# Ignore empty lines
if line.strip() != "":
try:
res = self.read_from_nexus_string(line,
return_res=True)
except PartitionException as e:
return e
if res:
temp_ranges.append(res)
# Sort partition ranges according to the first element of the range
temp_ranges.sort(key=lambda part: part[2][0][0])
for _, file_name, _ in temp_ranges:
try:
seq_type = set([self.partitions_type[x] for x in file_name])
if len(seq_type) > 1:
return InvalidPartitionFile(
"The range of the defined partitions must have the "
"same sequence type (e.g. protein or nucleotide)")
except TypeError:
pass
# if not no_aln_check:
# if temp_ranges[-1][2][-1] != self.partitions.values()[-1][0][1]:
# return InvalidPartitionFile(
# "The complete range of the partition file does not match"
# " the current alignment set")
# Resets previous partitions (except alignments_range)
self.reset(keep_alignments_range=True)
for name, file_name, part_range in temp_ranges:
# Add information to partitions storage
try:
self.add_partition(name,
locus_range=part_range,
file_name=file_name)
except InvalidPartitionFile as e:
print(e)
return e
def read_from_nexus_string(self, nx_string, file_name=None,
return_res=False):
"""Parses a single nexus string with partition definition.
Parameters
----------
nx_string : str
String with partition definition
file_name : str, optional
String with name of the file corresponding to the partition.
return_res : bool
If True, it will only return the parsed partition information.
If False, it will add the parsed partition to the `Partitions`
object.
"""
try:
fields = nx_string.split("=")
partition_name = fields[0].split()[1].strip()
# If this list has 2 elements, it should be a simple gene partition
# If it has 3 elements, it should be a codon partition
partition_full = re.split(r"[-\\]", fields[1].strip().
replace(";", "").replace("/", "\\"))
# If partition is defined using "." notation to mean full length
if partition_full[1] == ".":
if self.partition_length:
partition_range = [[int(partition_full[0]) - 1,
self.partition_length - 1]]
else:
raise PartitionException("The length of the locus must be "
"provided when partitions are "
"defined using '.' notation to "
"mean full length")
else:
partition_range = [[int(partition_full[0]) - 1,
int(partition_full[1]) - 1]]
# Check which alignment file contains the current partition
if self.alignments_range:
try:
file_name = \
[x for x, y in self.alignments_range.items() if
y[0] <= partition_range[0][0] < y[1]]
except IndexError:
file_name = None
else:
file_name = None
if return_res:
return [partition_name, file_name, partition_range]
else:
self.add_partition(partition_name, locus_range=partition_range,
file_name=file_name)
# If, for some reason, the current line cannot be interpreted as a
# charset line, ignore it.
except (IndexError, ValueError):
if return_res:
return None
else:
pass
def get_partition_names(self):
"""Returns a list with the name of the partitions
Returns
-------
names : list
List with names of the partitions. When a parent
partition has multiple codon partitions, it returns a partition
name for every codon starting position present.
"""
names = []
for part, vals in self.partitions.items():
if vals[1]:
names.extend([part + "_%s" % (x[0] + 1) for x in vals[1]])
else:
names.append(part)
return names
def is_single(self):
"""Returns whether the current `Partitions` has single or multiple
partitions.
Returns
-------
_ : bool
Returns True is there is only a single partition defined,
and False if there are multiple partitions.
"""
if len(self.partitions) == 1:
if not [x for x in self.partitions.values()][0][1]:
return True
else:
return False
else:
return False
def is_contiguous(self):
"""Returns whether the current partitions have a contiguous range
Returns
-------
_ : bool
Returns True if all partitions have a contiguous range. Else,
False
"""
for i in self.partitions.values():
# If any of the current partitions is a list of tuples instead
# of a single tuple, the partitions are not contiguous
if not isinstance(i[0][0], int):
return False
return True
def _find_parent(self, max_range):
"""Finds the parent partition from a specified range.
Finds a parent partition of a codon partition.
Parameters
----------
max_range : int
The maximum range of the codon partition.
Returns
-------
part : str
The name of the parent partition, from the `partitions` attribute.
"""
for part, vals in self.partitions.items():
lrange = vals[0][0]
if lrange[1] == max_range:
return part
def add_partition(self, name, length=None, locus_range=None, codon=False,
use_counter=False, file_name=None, model_cls=None,
auto_correct_name=True, seq_type=None):
"""Adds a new partition.
Adds a new partition providing the length or the range of current
alignment. If both are provided, the length takes precedence.The range
of the partition should be in python index, that is, the first position
should be 0 and not 1.
Parameters
----------
name : str
Name of the partition.
length : int, optional
Length of the alignment.
locus_range : list or tuple, optional
Range of the partition.
codon : list
If the codon partitions are already defined, provide the
starting points in list format, e.g: [1,2,3].
use_counter : bool
If True, `locus_range` will be updated according to the `counter`
attribute.
file_name : str
Name of the alignment file.
model_cls :
Specified the substitution model that will be set in `models`.
auto_correct_name : bool
If set to True, when a partition name already exist, add a counter
to the end of the name.
Notes
-----
IMPORTANT NOTE on self.model: The self.model attribute was designed
in a way that allows the storage of different substitution models
inside the same partition name. This is useful for codon partitions that
share the same parent partition name. So, for example, a parent
partition named "PartA" with 3 codon partitions can have a different
model for each one like this::
self.models["PartA"] = [[[..model1_params..], [..model2_params..],
[..model3_params..]], [GTR, GTR, GTR], ["1", "2", "3"]]
"""
# Check for duplicate names in partitions
if name in self.partitions:
if auto_correct_name:
c = 1
while "{}_{}".format(name, c) in self.partitions:
c += 1
else:
raise PartitionException("Partition name %s is already in "
"partition table" % name)
# When length is provided
if length:
# Add to or update alignments_range attribute. This will store the
# original range of the alignment
if file_name and (isinstance(file_name, unicode) or
isinstance(file_name, str)):
if file_name in self.alignments_range:
current_range = [self.counter, self.counter + (length - 1)]
# If start position is earlier than before, update
if current_range[0] < self.alignments_range[file_name][0]:
self.alignments_range[file_name][0] = current_range[0]
# If stop position if later than before, update
if current_range[1] > self.alignments_range[file_name][1]:
self.alignments_range[file_name][1] = current_range[1]
else:
self.alignments_range[file_name] = [
self.counter, self.counter + (length - 1)]
# Add partition to index list
self.partitions_index.append([name, 0])
# Add partition to alignment list
if isinstance(file_name, list):
self.partitions_alignments[name] = file_name
else:
try:
self.partitions_alignments[name].append(
file_name if file_name else name)
except KeyError:
self.partitions_alignments[name] = [
file_name if file_name else name]
# Create empty model attribute for a single partition
if model_cls:
self.models[name] = model_cls
else:
self.models[name] = [[[]], [None], []]
self.partitions[name] = [[[self.counter,
self.counter + (length - 1)]], codon]
self.counter += length
self.partition_length += length
# When a list/tuple range is provided
elif locus_range:
if use_counter:
locus_range[0] = [
self.counter,
self.counter + locus_range[0][1] - locus_range[0][0]
]
for p, x in enumerate(locus_range[1:]):
locus_range[p + 1] = [self.counter + x[1] - x[0]]
# Add to or update alignments_range attribute. This will store the
# original range of the alignment
if file_name and len(locus_range) == 1 and \
(isinstance(file_name, unicode) or
isinstance(file_name, str)):
if file_name in self.alignments_range:
if locus_range[0][0] < self.alignments_range[file_name][0][0]:
self.alignments_range[file_name][0][0] = locus_range[0][0]
if locus_range[0][1] > self.alignments_range[file_name][0][1]:
self.alignments_range[file_name][0][1] = locus_range[0][1]
else:
self.alignments_range[file_name] = locus_range
# Find the parent partition
parent_partition = self._find_parent(locus_range[0][1])
# If the maximum range of the current partition is already included
# in some other partition, and no codon partitions were provided
# using the "codon" argument, then it should be an undefined codon
# partition and should be added to an existing partition
if locus_range[0][1] <= self.counter and not codon and \
len(locus_range) == 1 and parent_partition:
# if not parent_partition:
# raise InvalidPartitionFile(
# "Could not find parent partition of {}. Check the"
# " ranges of your partitions to ensure no range "
# "overlaps".format(name))
# If no codon partition is present in the parent partition,
# create one
if not self.partitions[parent_partition][1]:
# Add partition to index list
self.partitions_index.append([parent_partition, 1])
# Create empty model attribute for two partitions
self.models[parent_partition] = [[[], []], [None, None], []]
parent_start = self.partitions[parent_partition][0][0]
self.partitions[parent_partition][1] = [parent_start,
locus_range[0]]
else:
# Create empty model attribute for additional partitions
self.models[parent_partition][0].append([])
self.models[parent_partition][1].append(None)
# Add partition to index list
self.partitions_index.append([parent_partition, 2])
self.partitions[parent_partition][1].append(locus_range[0])
# If the start of the current partition is already within the range
# of a previous partitions, raise an exception
elif locus_range[-1][0] < self.counter and parent_partition:
raise InvalidPartitionFile(
"Badly formatted partition with range [{}-{}] starts "
"inside the range of a previous partitions ({})".format(
locus_range[0], locus_range[1], self.counter))
# Else, create the new partition. If codon is provided, the codon
# information is automatically added
else:
if model_cls:
self.models[name] = model_cls
else:
# Create empty model attribute for a single partition
self.models[name] = [[[]], [None], []]
if codon:
self.partitions_index = [[name, x] for x in codon]
else:
# Add partition to index list
self.partitions_index.append([name, 0])
if isinstance(file_name, list):
self.partitions_alignments[name] = file_name
else:
try:
self.partitions_alignments[name].append(
file_name if file_name else name)
except KeyError:
self.partitions_alignments[name] = [
file_name if file_name else name]
self.partitions[name] = [locus_range,
codon]
self.counter = locus_range[-1][1] + 1
self.partition_length = locus_range[-1][1] + 1
fl_name = file_name if file_name else name
if isinstance(fl_name, list):
for fl in fl_name:
self.partitions_type[fl] = seq_type
else:
self.partitions_type[fl_name] = seq_type
def _remove_routine(self, part_name):
"""
Routine that removes a partition based on its name. It ca be used
when calling the remove_partition method with the partition_name
argument, or with the file_name argument when the partition only
contains that file name
"""
if not isinstance(part_name, list):
part_name = [part_name]
# Remove partition from partition_index
self.partitions_index = [
x for x in self.partitions_index if x[0] not in part_name]
for p in part_name:
# Remove partitions_alignments
del self.partitions_alignments[p]
# Remove models
del self.models[p]
# Remove from partitions
self.partitions = self._rm_part(part_name)
def _rm_part(self, nm):
"""
Remove a partition from self.partitions and update the ranges of
the remaining partitions
"""
for i in nm:
del self.partitions[i]
new_dic = self.sort_partitions()
return new_dic
def remove_partition(self, partition_name=None, file_name=None,
file_list=None, ns=None):
"""Removes partitions.
Removes a partitions by a given partition or file name. This will
handle any necessary changes on the remaining partitions. The changes
will be straightforward for most attributes, such as partitions_index,
partitions_alignments and models, but it will require a re-structuring
of partitions because the ranges of the subsequent partitions will
have to be adjusted.
Parameters
----------
partition_name : str
Name of the partition.
file_name : str
Name of the alignment file.
ns : multiprocesssing.Manager.Namespace
A Namespace object used to communicate with the main thread
in TriFusion.
"""
if partition_name:
# Raise exception if partition name does not exist
if partition_name not in self.partitions:
raise PartitionException("%s is not a partition name" %
partition_name)
self._remove_routine(partition_name)
if file_list:
part_list = []
update_parts = []
for part, fl in self.partitions_alignments.items():
if len(fl) == 1:
if fl[0] in file_list:
part_list.append(part)
continue
als = [True if x in file_list else False for x in fl]
if als:
if all(als):
part_list.append(part)
elif any((x for x in fl if x in file_list)):
part_list.append(part)
update_parts += [x for x in fl if x not in file_list]
# Not this, will remove a partition containing multiple alignments
# if at least one is in the file_list argument
self.update_deleted_partition(update_parts)
self._remove_routine(part_list)
if file_name:
# Set file_found to True, when there is a match. If no match is
# found, raise a PartitionException at the end of the loop.
file_found = False
for part, file_list in self.partitions_alignments.items():
if file_name in file_list:
file_found = True
# If the partitions consists only of the provided file,
# Remove the entire partition
if len(file_list) == 1:
self._remove_routine(part)
# If the partition contains other files, then only remove
# the current file from the partition
else:
self.partitions_alignments[part].remove(file_name)
if not file_found:
raise PartitionException("%s file does not belong to any"
"partition" % file_name)
def change_name(self, old_name, new_name):
"""Changes name of a partition.
Parameters
----------
old_name : str
Original partition name.
new_name : str
New partition name.
"""
self.partitions[new_name] = self.partitions.pop(old_name)
self.partitions_alignments[new_name] = \
self.partitions_alignments.pop(old_name)
self.models[new_name] = self.models.pop(old_name)
def merge_partitions(self, partition_list, name):
"""Merges multiple partitions into a single one.
Parameters
----------
partition_list : list
List with partition names to be merged.
name : str
Name of new partition
"""
def merger(ranges):
"""
Generator that merges ranges in a list of tuples. For example,
if ranges is [(1, 234), (235, 456), (560, 607), (607,789)]
this generator will yield [(1, 456), (560, 789)]
"""
previous = 0
last_start = 0
for st, en in ranges:
if not previous:
last_start = st
previous = en
elif st - 1 == previous:
previous = en
else:
yield last_start, previous
previous = en
last_start = st
yield last_start, en
def flatter(s):
"""
Creates a flat iterator of tuples. If s is [[(1,2), (2,3)], (4,5)]
this will yield ((1,2), (2,3), (4,5))
"""
for i in s:
if isinstance(i, tuple):
yield i
else:
for j in i:
yield j
# Get new range
new_range = [x for x in merger(flatter((y[0] for x, y in
self.partitions.items()
if x in partition_list)))]
# Add entries for new partition
self.partitions[name] = [new_range[0] if len(new_range) == 1 else
new_range, False]
self.partitions_alignments[name] = list(set([i for x, y in
self.partitions_alignments.items()
if x in partition_list for i in y]))
self.models[name] = [[[]], [None], []]
# Delete previous partitions and update merged dict
for p in partition_list:
if len(self.partitions_alignments[p]) == 1:
self.merged_files[self.partitions_alignments[p][0]] = \
self.partitions[p][0]
del self.partitions[p]
del self.partitions_alignments[p]
del self.models[p]
self._sort_partitions()
@staticmethod
def _teste_range_overlap(ref, r2):
r2 = r2[0]
# Complete inclusion of r2 in ref
if r2[0] >= ref[0] and r2[1] <= ref[1]:
return True
# Partial overlap at minimum value
if r2[1] > ref[0] > r2[0]:
return True
# Partial overlap at maximum value
if r2[1] > ref[1] > r2[0]:
return True
def split_partition(self, name, new_range=None, new_names=None):
"""Splits one partition into two.
Splits a partitions with `name` into two with the tuple list provided
by `new_range`. If new_range is None, This will split the partition
by its alignment files instead.
Parameters
----------
name : str
Name of the partition to be split.
new_range : list or tuple, optional
List of two tuples, containing the ranges of the new partitions.
new_names : list, optional
The names of the new partitions.
"""
if new_range:
# Add new partitions
for n, r in zip(new_names, new_range):
self.partitions[n] = [[r], False]
# Create new partitions_alignments. Keep the original alignment
# file for both
self.models[n] = [[[]], [None], []]
if len(self.partitions_alignments[name]) == 1:
self.partitions_alignments[n] = [
self.partitions_alignments[name][0]]
else:
self.partitions_alignments[n] = []
for aln in self.partitions_alignments[name]:
if self._teste_range_overlap(
r, self.merged_files[aln]):
self.partitions_alignments[n].append(aln)
else:
for aln in self.partitions_alignments[name]:
# Get original range of alignment file
_new_range = self.merged_files[aln]
n_range = _new_range[0] if len(_new_range) == 1 else \
_new_range
# Add new partitions
aln_name = basename(aln)
self.partitions[aln_name] = [[n_range], False]
self.partitions_alignments[aln_name] = [aln]
self.models[aln_name] = [[[]], [None], []]
# Delete original partition
del self.partitions[name]
del self.partitions_alignments[name]
del self.models[name]
self._sort_partitions()
def get_sequence_type(self, name):
ref_fl = self.partitions_alignments[name][0]
return self.partitions_type[ref_fl]
def sort_partitions(self, part_list=None, sort_types=False):
"""Sorts partitions according to a list of partition names
Parameters
----------
part_list : list
List with partition names.
"""
if sort_types:
part_storage = []
for nm in self.partitions:
ref_aln = self.partitions_alignments[nm][0]
seq_type = self.partitions_type[ref_aln]
part_storage.append((nm, 0 if seq_type == "DNA" else 1))
part_storage = sorted(part_storage, key=lambda i: i[1])
lst = [x[0] for x in part_storage]
else:
lst = part_list if part_list else self.partitions.items()
new_dic = OrderedDict()
counter = 0
for p, res in enumerate(lst):
if sort_types:
vals = self.partitions[res]
nm = res
else:
nm = res[0]
vals = res[1]
# Check if the starting position of the next partition is the
# same as the counter. If so, add the vals to the new dict.
# Else, correct the ranges based on the counter
if vals[0][0][0] == counter:
new_dic[nm] = vals
counter = vals[0][0][1] + 1
else:
# Get lenght of the partition
part_len = vals[0][0][1] - vals[0][0][0]
# Create corrected range
part_range = [counter, counter + part_len]
# Correct codon position start if any
if vals[1]:
codon = [counter, counter + 1, counter + 2]
else:
codon = False
new_dic[nm] = [[part_range], codon]
counter = counter + part_len + 1
return new_dic
def update_deleted_partition(self, file_list):
for fl in file_list:
new_range = self.merged_files[fl]
aln_name = basename(fl)
self.partitions[aln_name] = [new_range, False]
self.partitions_alignments[aln_name] = [fl]
self.models[aln_name] = [[[]], [None], []]
# ==========================================================================
# Model handling
# ==========================================================================
def parse_nexus_model(self, string):
"""Parses a substitution model defined in a prset and/or lset command.
Parameters
----------
string : str
String with the prset or lset command.
"""
string = string.lower()
# Find out which partitions the current parameters apply to. If
# detected, it should be something like "applyto=(1,2)"
applyto = re.findall(r"applyto=\(.*\)", string)
# Find parameters
nst = re.findall(r"nst=[0-9]", string)
statefreqpr = re.findall(r"statefreqpr=.*\)", string)
# Collect params
params = [x[0] for x in [nst, statefreqpr] if x]
if applyto:
if applyto == ["applyto=(all)"]:
for partition in self.partitions:
self.models[partition][0] += params
else:
# Get target partitions
part_index = [int(x) for x in
re.split("[()]", applyto[0])[1].split(",")]
for i in part_index:
part = self.partitions_index[i - 1]
# Get partition name
part_name = part[0]
# Get subpartition index. 0 if single partition, other if
# multiple subpartition
part_subpart = part[1]
self.models[part_name][0][part_subpart] += params
def get_model_name(self, params):
"""Given a list of parameters, return the name of the model
Parameters
----------
params : list
List of prset/lset parameters
Returns
-------
model : str or None
Returns the name of the model if it finds. Else, returns None.
"""
for model, p in self._models["mrbayes"].items():
if params == p:
return model
else:
return None
def set_model(self, partition, models, links=None, apply_all=False):
"""Sets substitution model for a given partition.
Parameters
----------
partition : str
Partition name.
models : list
Model names for each of the three codon partitions. If there
are no codon partitions, provide only a single element to the list.
links : list
Provide potential links between codon models. For
example, if codon 1 and 2 are to be linked, it should be:
links=["12", "3"]
apply_all : bool
If True, the current model will be applied to all partitions.
"""
# Get list with partitions to be changed
if apply_all:
plist = [x for x in self.partitions]
else:
plist = [partition]
# Get the sequence type of the provided partition. If apply_all
# is set to True, only the partitions with the same seq type will
# be changed
seq_type = self.get_sequence_type(partition)
# Replace "No model" string with None
models = [None if x == "No model" else x for x in models]
# Set model to the whole partition
if len(models) == 1:
# If the current partition was previously defined as having codon
# partitions, revert it
for p in plist:
if self.get_sequence_type(p) == seq_type:
if self.partitions[p][1]:
self.partitions[p][1] = False
self.models[p][1] = models
if any(self.models[p][2]):
self.models[p][2] = []
# Set codon models
else:
for p in plist:
if self.get_sequence_type(p) == seq_type:
# Change the partition in self.partitions to have codon
# partitions
# This handles the case where the partition has a contiguous
# range.
if isinstance(self.partitions[p][0], tuple):
st_idx = self.partitions[p][0][0]
# This handles the case where the partition has a
# non-contiguous range.
else:
st_idx = self.partitions[p][0][0][0]
self.partitions[p][1] = [st_idx + x for x in range(3)]
self.models[p][1] = models
self.models[p][2] = links
def write_to_file(self, output_format, output_file, model="LG"):
"""Writes partitions to a file.
Writes the Partitions object into an output file according to the
output_format. The supported output formats are RAxML and Nexus. The
`model` option is for the RAxML format only.
Parameters
----------
output_format : str
Output format of partitions file. Can be either "nexus" or
"raxml".
output_file : str
Path to output file.
model : str
Name of the model for the partitions. "raxml" format only.
"""
if output_format == "raxml":
outfile_handle = open(output_file + ".part.File", "w")
for part, rge in self.partitions.items():
_partition_range = []
for x in rge[0]:
_partition_range.append("{}-{}".format(x[0] + 1,
x[1] + 1))
partition_range = ",".join(_partition_range)
outfile_handle.write("%s, %s = %s\n" % (model,
part,
partition_range))
outfile_handle.close()
elif output_format == "nexus":
outfile_handle = open(output_file + ".charset", "w")
for part, rge in self.partitions.items():
for x in rge[0]:
outfile_handle.write(
"charset %s = %s-%s;\n" % (
part, x[0] + 1, x[1] + 1
)
)
outfile_handle.close()
return 0
class Zorro(object):
"""
Class that handles the concatenation of zorro weights.
Parameters
----------
alignment_list : trifusion.process.sequence.AlignmentList
AlignmentList object.
suffix : str
Suffix of the zorro weight files, based on the corresponding
input alignments.
zorro_dir : str
Path to directory where zorro weight files are stored.
"""
def __init__(self, alignment_list, suffix="_zorro.out", zorro_dir=None):
self.weigth_values = []
self.suffix = suffix
for file_path in [x.path for x in alignment_list.alignments.values()]:
# If zorro_dir is provided, use the specified path
if zorro_dir:
zorro_file = splitext(basename(file_path))[0]
zorro_file = "{}{}.txt".format(join(zorro_dir, zorro_file),
suffix)
# If zorro_dir is not provided, use the same path as the input
# alignment
else:
zorro_file = file_path.split(".")[0] + self.suffix + ".txt"
# alignment file is shared with the corresponding zorro file
zorro_handle = open(zorro_file)
self.weigth_values += [int(round(float(weigth.strip()))) for
weigth in zorro_handle]
def write_to_file(self, output_file):
""" Creates a concatenated file with the zorro weights for the
corresponding alignment files."""
outfile = output_file + "_zorro.out"
outfile_handle = open(outfile, "w")
for weigth in self.weigth_values:
outfile_handle.write("%s\n" % weigth)
outfile_handle.close()
__author__ = "Diogo N. Silva"
|
ODiogoSilva/TriFusion
|
trifusion/process/data.py
|
Python
|
gpl-3.0
| 52,874
|
from os.path import exists
from cornice.service import Service
from pkg_resources import get_distribution
from ..config import path, get_logger
log = get_logger(__name__)
app_info = Service(
name='appinfo',
path=path(''),
renderer='json',
accept='application/json')
@app_info.get()
def get_app_info(request):
result = dict(
version=get_distribution('senic_hub').version,
bin_path=request.registry.settings['bin_path'],
onboarded=is_hub_onboarded(request)
)
return result
def is_hub_onboarded(request):
nuimo_app_config_path = request.registry.settings['nuimo_app_config_path']
devices_path = request.registry.settings['devices_path']
homeassistant_config_path = request.registry.settings['homeassistant_config_path']
nuimo_mac_address_filepath = request.registry.settings['nuimo_mac_address_filepath']
return (exists(nuimo_app_config_path) and
exists(devices_path) and
exists(homeassistant_config_path) and
exists(nuimo_mac_address_filepath))
|
grunskis/senic-hub
|
senic_hub/backend/views/appinfo.py
|
Python
|
mit
| 1,062
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import httpretty
import json
import sure
from pyeqs import QuerySet, Filter
from pyeqs.dsl import Term, Sort, ScriptScore
from tests.helpers import homogeneous
@httpretty.activate
def test_create_queryset_with_host_string():
"""
Create a queryset with a host given as a string
"""
# When create a queryset
t = QuerySet("localhost", index="bar")
# And I have records
response = {
"took": 1,
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "bar",
"_type": "baz",
"_id": "1",
"_score": 10,
"_source": {
"foo": "bar"
},
"sort": [
1395687078000
]
}
]
}
}
httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search",
body=json.dumps(response),
content_type="application/json")
# When I run a query
results = t[0:1]
# Then I see the response.
len(results).should.equal(1)
@httpretty.activate
def test_create_queryset_with_host_dict():
"""
Create a queryset with a host given as a dict
"""
# When create a queryset
connection_info = {"host": "localhost", "port": 8080}
t = QuerySet(connection_info, index="bar")
# And I have records
good_response = {
"took": 1,
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "bar",
"_type": "baz",
"_id": "1",
"_score": 10,
"_source": {
"foo": "bar"
},
"sort": [
1395687078000
]
}
]
}
}
bad_response = {
"took": 1,
"hits": {
"total": 0,
"max_score": None,
"hits": []
}
}
httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search",
body=json.dumps(bad_response),
content_type="application/json")
httpretty.register_uri(httpretty.GET, "http://localhost:8080/bar/_search",
body=json.dumps(good_response),
content_type="application/json")
# When I run a query
results = t[0:1]
# Then I see the response.
len(results).should.equal(1)
results[0]["_source"]["foo"].should.equal("bar")
@httpretty.activate
def test_create_queryset_with_host_list():
"""
Create a queryset with a host given as a list
"""
# When create a queryset
connection_info = [{"host": "localhost", "port": 8080}]
t = QuerySet(connection_info, index="bar")
# And I have records
good_response = {
"took": 1,
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "bar",
"_type": "baz",
"_id": "1",
"_score": 10,
"_source": {
"foo": "bar"
},
"sort": [
1395687078000
]
}
]
}
}
bad_response = {
"took": 1,
"hits": {
"total": 0,
"max_score": None,
"hits": []
}
}
httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search",
body=json.dumps(bad_response),
content_type="application/json")
httpretty.register_uri(httpretty.GET, "http://localhost:8080/bar/_search",
body=json.dumps(good_response),
content_type="application/json")
# When I run a query
results = t[0:1]
# Then I see the response.
len(results).should.equal(1)
results[0]["_source"]["foo"].should.equal("bar")
|
Yipit/pyeqs
|
tests/unit/test_connection.py
|
Python
|
mit
| 4,100
|
"""Package contains implementations of processes."""
|
qbahn/grortir
|
grortir/main/model/processes/__init__.py
|
Python
|
mit
| 53
|
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('webhooks/', include('webhooks.urls')),
path('admin/', admin.site.urls),
]
|
saulario/pruebas
|
pipedrive/pipedrive/urls.py
|
Python
|
gpl-3.0
| 176
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
from tornado import gen
from tornado.testing import AsyncTestCase, gen_test
from tornado.test.util import unittest, skipBefore33, skipBefore35, exec_test
try:
from tornado.platform.asyncio import asyncio
except ImportError:
asyncio = None
else:
from tornado.platform.asyncio import AsyncIOLoop, to_asyncio_future
# This is used in dynamically-evaluated code, so silence pyflakes.
to_asyncio_future
@unittest.skipIf(asyncio is None, "asyncio module not present")
class AsyncIOLoopTest(AsyncTestCase):
def get_new_ioloop(self):
io_loop = AsyncIOLoop()
asyncio.set_event_loop(io_loop.asyncio_loop)
return io_loop
def test_asyncio_callback(self):
# Basic test that the asyncio loop is set up correctly.
asyncio.get_event_loop().call_soon(self.stop)
self.wait()
@gen_test
def test_asyncio_future(self):
# Test that we can yield an asyncio future from a tornado coroutine.
# Without 'yield from', we must wrap coroutines in ensure_future,
# which was introduced during Python 3.4, deprecating the prior "async".
if hasattr(asyncio, 'ensure_future'):
ensure_future = asyncio.ensure_future
else:
ensure_future = asyncio.async
x = yield ensure_future(
asyncio.get_event_loop().run_in_executor(None, lambda: 42))
self.assertEqual(x, 42)
@skipBefore33
@gen_test
def test_asyncio_yield_from(self):
# Test that we can use asyncio coroutines with 'yield from'
# instead of asyncio.async(). This requires python 3.3 syntax.
namespace = exec_test(globals(), locals(), """
@gen.coroutine
def f():
event_loop = asyncio.get_event_loop()
x = yield from event_loop.run_in_executor(None, lambda: 42)
return x
""")
result = yield namespace['f']()
self.assertEqual(result, 42)
@skipBefore35
def test_asyncio_adapter(self):
# This test demonstrates that when using the asyncio coroutine
# runner (i.e. run_until_complete), the to_asyncio_future
# adapter is needed. No adapter is needed in the other direction,
# as demonstrated by other tests in the package.
@gen.coroutine
def tornado_coroutine():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
native_coroutine_without_adapter = exec_test(globals(), locals(), """
async def native_coroutine_without_adapter():
return await tornado_coroutine()
""")["native_coroutine_without_adapter"]
native_coroutine_with_adapter = exec_test(globals(), locals(), """
async def native_coroutine_with_adapter():
return await to_asyncio_future(tornado_coroutine())
""")["native_coroutine_with_adapter"]
# Use the adapter, but two degrees from the tornado coroutine.
native_coroutine_with_adapter2 = exec_test(globals(), locals(), """
async def native_coroutine_with_adapter2():
return await to_asyncio_future(native_coroutine_without_adapter())
""")["native_coroutine_with_adapter2"]
# Tornado supports native coroutines both with and without adapters
self.assertEqual(
self.io_loop.run_sync(native_coroutine_without_adapter),
42)
self.assertEqual(
self.io_loop.run_sync(native_coroutine_with_adapter),
42)
self.assertEqual(
self.io_loop.run_sync(native_coroutine_with_adapter2),
42)
# Asyncio only supports coroutines that yield asyncio-compatible
# Futures (which our Future is since 5.0).
self.assertEqual(
asyncio.get_event_loop().run_until_complete(
native_coroutine_without_adapter()),
42)
self.assertEqual(
asyncio.get_event_loop().run_until_complete(
native_coroutine_with_adapter()),
42)
self.assertEqual(
asyncio.get_event_loop().run_until_complete(
native_coroutine_with_adapter2()),
42)
|
legnaleurc/tornado
|
tornado/test/asyncio_test.py
|
Python
|
apache-2.0
| 4,795
|
import unittest
from datetime import date
import pandas as pd
import numpy as np
from dateutil import relativedelta
from excel_helper import ExcelParameterLoader, ParameterRepository, growth_coefficients
class ExcelParameterLoaderTestCase(unittest.TestCase):
def test_parameter_getvalue_random(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test.xlsx', excel_handler='xlrd').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('e')
settings = {'sample_size': 3, 'times': pd.date_range('2016-01-01', '2017-01-01', freq='MS'),
'sample_mean_value': False}
n = np.mean(p())
assert n > 0.7
def test_parameter_getvalue_random(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test.xlsx', excel_handler='xlrd').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
settings = {'sample_size': 3, 'times': pd.date_range('2016-01-01', '2017-01-01', freq='MS'),
'sample_mean_value': False}
n = np.mean(p())
assert n > 0.7
def test_parameter_getvalue_with_settings_mean(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test.xlsx', excel_handler='xlrd').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('e')
settings = {'sample_size': 3, 'times': pd.date_range('2016-01-01', '2017-01-01', freq='MS'),
'sample_mean_value': True}
n = np.mean(p(settings))
assert n > 0.7
def test_load_xlwings(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test.xlsx', excel_handler='xlwings').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
assert p() in [4, 2]
def test_load_xlsx2csv(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test.xlsx', excel_handler='xlsx2csv').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
assert p() in [4, 2]
def test_load_xlrd(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test.xlsx', excel_handler='xlrd').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
assert p() in [4, 2]
def test_load_xlrd_formula(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test.xlsx', excel_handler='xlrd').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('e')
val = p()
print(val)
assert val > 0.7
def test_load_by_sheetname(self):
defs = ExcelParameterLoader(filename='./test_excelparameterloader.xlsx').load_parameter_definitions(
sheet_name='Sheet1').values()
for i, name in enumerate(['a', 'b', 'c']):
assert defs[i]['variable'] == name
def test_column_order(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx').load_into_repo(sheet_name='shuffle_col_order',
repository=repository)
p = repository.get_parameter('z')
assert p.name == 'z'
assert p.tags == 'x'
def test_choice_single_param(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('choice_var')
assert p() == .9
def test_choice_two_params(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
assert p() in [1, 2]
def test_multiple_choice(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('multiple_choice')
assert p() in [1, 2, 3]
def test_choice_time(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx',
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'), size=10
).load_into_repo(sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('choice_var')
val = p()
# print(val)
assert (val == .9).all()
def test_choice_two_params_with_time(self):
loader = ExcelParameterLoader(filename='./test_excelparameterloader.xlsx',
times=pd.date_range('2009-01-01', '2009-03-01', freq='MS'), size=10)
repository = ParameterRepository()
loader.load_into_repo(sheet_name='Sheet1', repository=repository)
tag_param_dict = repository.find_by_tag('user')
keys = tag_param_dict.keys()
print(keys)
assert 'a' in keys
repository['a']()
print(tag_param_dict['a'])
def test_uniform(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('b')
val = p()
assert (val >= 2) & (val <= 4)
def test_uniform_time(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx',
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'), size=10
).load_into_repo(sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('b')
val = p()
print(val)
print(type(val))
assert (val >= 2).all() & (val <= 4).all()
def test_uniform_mean(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('b')
val = p({'sample_mean_value': True, 'sample_size': 5})
print(val)
assert (val == 3).all()
def test_parameter_getvalue_with_settings_mean(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx', excel_handler='xlrd').load_into_repo(
sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('uniform_dist_growth')
settings = {'sample_size': 1, 'sample_mean_value': True, 'use_time_series': True,
'times': pd.date_range('2009-01-01', '2010-01-01', freq='MS')}
val = p(settings)
print(val)
n = np.mean(val)
assert n > 0.7
def test_uniform_mean_time(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx',
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'),
size=10,
sample_mean_value=True
).load_into_repo(sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('b')
val = p()
# print(val)
# print(type(val))
assert (val == 3).all()
def test_triagular(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('c')
res = p()
assert (res < 10.) & (res > 3.)
def test_triagular_time(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test_excelparameterloader.xlsx',
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'), size=10
).load_into_repo(sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('c')
res = p()
assert (res < 10.).all() & (res > 3.).all()
def test_normal(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test.xlsx', excel_handler='xlwings').load_into_repo(sheet_name='Sheet1',
repository=repository)
# print('\n')
p = repository['e']
val = p()
print(val)
def test_triangular_timeseries(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='./test.xlsx').load_into_repo(sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('c')
settings = {
'use_time_series': True,
'times': pd.date_range('2009-01-01', '2015-05-01',
freq='MS'),
'sample_size': 10,
# 'cagr': 0,
# 'sample_mean_value': True
}
res = p(settings)
print(res)
assert (res < 10.).all() & (res > 3.).all()
def test_formulas_fix_row(self):
repository = ParameterRepository()
ExcelParameterLoader(filename='/Users/csxds/workspaces/bbc/ngmodel/data/tmp/public_model_params.xlsx',
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'), size=10,
).load_into_repo(sheet_name='iplayer', repository=repository)
p = repository.get_parameter('requests_Tablet_Cell')
res = p()
print(res.mean())
assert (res > 0).all()
def test_formulas_fix_row_ms_excel_online(self):
repository = ParameterRepository()
# ExcelParameterLoader(filename='/Users/csxds/Downloads/public_model_params.xlsx-4.xlsx',
ExcelParameterLoader(filename='/Users/csxds/workspaces/bbc/ngmodel/data/tmp/public_model_params.xlsx',
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'), size=10,
).load_into_repo(sheet_name='iplayer', repository=repository)
p = repository.get_parameter('requests_Tablet_Cell_3')
res = p()
print(res.mean())
assert (res > 0).all()
def test_formulas_ref_sheet_by_name(self):
repository = ParameterRepository()
# ExcelParameterLoader(filename='/Users/csxds/Downloads/public_model_params.xlsx-4.xlsx',
ExcelParameterLoader(filename='/Users/csxds/workspaces/bbc/ngmodel/data/tmp/public_model_params.xlsx',
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'), size=10,
excel_handler='xlwings'
).load_into_repo(sheet_name='Distribution', repository=repository)
p = repository.get_parameter('embodied_carbon_intensity_per_dv')
res = p()
print(res.mean())
assert (res > 0).all()
if __name__ == '__main__':
unittest.main()
class TestCAGRCalculation(unittest.TestCase):
def test_identitical_month(self):
"""
If start and end are identical, we expect an array of one row of ones of sample size
:return:
"""
samples = 3
alpha = 1 # 100 percent p.a.
ref_date = date(2009, 1, 1)
start_date = date(2009, 1, 1)
end_date = date(2009, 1, 1)
a = growth_coefficients(start_date, end_date, ref_date, alpha, samples)
assert np.all(a == np.ones((samples, 1)))
def test_one_year(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
samples = 3
alpha = 1 # 100 percent p.a.
ref_date = date(2009, 1, 1)
start_date = date(2009, 1, 1)
end_date = date(2010, 1, 1)
a = growth_coefficients(start_date, end_date, ref_date, alpha, samples)
print(a)
assert np.all(a[0] == np.ones((samples, 1)))
assert np.all(a[1] == np.ones((samples, 1)) * pow(1 + alpha, 1. / 12))
def test_one_year(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
samples = 3
alpha = 0.5 # 100 percent p.a.
ref_date = date(2009, 1, 1)
start_date = date(2009, 1, 1)
end_date = date(2010, 1, 1)
a = growth_coefficients(start_date, end_date, ref_date, alpha, samples)
print(a)
assert np.all(a[0] == np.ones((samples, 1)))
assert np.all(a[1] == np.ones((samples, 1)) * pow(1 + alpha, 1. / 12))
def test_negative_growth(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
samples = 3
alpha = -0.1 # 100 percent p.a.
ref_date = date(2009, 1, 1)
start_date = date(2009, 1, 1)
end_date = date(2010, 1, 1)
a = growth_coefficients(start_date, end_date, ref_date, alpha, samples)
print(a)
assert np.all(a[0] == np.ones((samples, 1)))
assert np.all(a[-1] == np.ones((samples, 1)) * 1 + alpha)
def test_refdate_after_start(self):
"""
If the ref date is greater than the start, we expect an array of one row of ones of sample size for the ref month
and rows with (1-CAGR)^t applied
:return:
"""
samples = 3
alpha = 0.1 # 10 percent p.a.
ref_date = date(2009, 2, 1)
start_date = date(2009, 1, 1)
end_date = date(2009, 2, 1)
a = growth_coefficients(start_date, end_date, ref_date, alpha, samples)
assert a.shape == (2, samples)
# first row has negative coefficients
assert np.all(a[0] == np.ones((samples, 1)) * pow(1 - alpha, 1. / 12))
# second row has ref values
assert np.all(a[-1] == np.ones((samples, 1)))
def test_refdate_between_start_and_end(self):
"""
If the ref date is greater than the start, we expect an array of one row of ones of sample size for the ref month
and rows with (1-CAGR)^t applied
:return:
"""
samples = 3
alpha = 0.1 # 10 percent p.a.
ref_date = date(2009, 3, 1)
start_date = date(2009, 1, 1)
end_date = date(2009, 6, 1)
delta = relativedelta.relativedelta(end_date, start_date)
total_months = delta.months + delta.years * 12 + 1
ref_delta = relativedelta.relativedelta(ref_date, start_date)
ref_row_idx = ref_delta.months + ref_delta.years * 12
a = growth_coefficients(start_date, end_date, ref_date, alpha, samples)
# print a
assert a.shape == (total_months, samples)
# the ref_row_idx has all ones
assert np.all(a[ref_row_idx] == np.ones((samples, 1)))
# the row before ref_row_idx has negative coefficients
assert np.all(a[ref_row_idx - 1] == np.ones((samples, 1)) * pow(1 - alpha, 1. / 12))
# the row after ref_row_idx has positive coefficients
assert np.all(a[ref_row_idx + 1] == np.ones((samples, 1)) * pow(1 + alpha, 1. / 12))
# the last row has positive coefficients
assert np.all(a[-1] == np.ones((samples, 1)) * pow(1 + alpha, float(total_months - 1 - ref_row_idx) / 12))
|
dschien/PyExcelModelingHelper
|
tests/test_excel_loader.py
|
Python
|
mit
| 17,168
|
#! /usr/bin/env python
'''
vcfreq.py: Convert frequency to V/oct signal
Copyright (c) 2020 Bill Gribble <grib@billgribble.com>
'''
from ..processor import Processor
from ..mfp_app import MFPApp
from ..bang import Uninit
class VCFreq(Processor):
doc_tooltip_obj = "Convert frequency (Hz) to V/oct signal"
doc_tooltip_inlet = ["Signal input", "Reference frequency (A4, default=440)"]
doc_tooltip_outlet = ["Signal output"]
A4_C0_RATIO = 26.908692
DEFAULT_C0 = 16.35159375
def __init__(self, init_type, init_args, patch, scope, name):
Processor.__init__(self, 2, 1, init_type, init_args, patch, scope, name)
initargs, kwargs = self.parse_args(init_args)
if len(initargs):
self.base_freq = float(initargs[0])/self.A4_C0_RATIO
else:
self.base_freq = self.DEFAULT_C0
self.hot_inlets = [0, 1]
self.dsp_inlets = [0]
self.dsp_outlets = [0]
self.dsp_init("vcfreq~", base_freq=self.base_freq)
def trigger(self):
if self.inlets[0] is not Uninit:
val = float(self.inlets[0])
self.dsp_obj.setparam("_sig_0", val)
if self.inlets[1] is not Uninit:
val = float(self.inlets[1])/self/A4_C0_RATIO
self.dsp_obj.setparam("base_freq", val)
def register():
MFPApp().register("vcfreq~", VCFreq)
|
bgribble/mfp
|
mfp/builtins/vcfreq.py
|
Python
|
gpl-2.0
| 1,367
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_Logit/trend_MovingAverage/cycle_12/ar_12/test_artificial_128_Logit_MovingAverage_12_12_100.py
|
Python
|
bsd-3-clause
| 267
|
from clockwork import clockwork
from pprint import pprint
api = clockwork.API('4d377b576ea0eff6f4a0be8248e57c5e12b27798')
def send_sms(number, msg):
message = clockwork.SMS(
to = number,
message = msg)
response = api.send(message)
if response.success:
print (response.id)
else:
print ("there was an error")
print (response.error_code)
pprint (dir(response))
|
parisandmilo/Ko-lect-FoC2015
|
python-backend/api_modules/sms.py
|
Python
|
mit
| 418
|
import datetime
import logging
from django.conf import settings
from django.core.exceptions import NON_FIELD_ERRORS
from google.appengine.api.datastore import Key, Delete, MAX_ALLOWABLE_QUERIES
from google.appengine.datastore.datastore_rpc import TransactionOptions
from google.appengine.ext import db
from .unique_utils import unique_identifiers_from_entity
from .utils import key_exists
from djangae.db.backends.appengine.dbapi import IntegrityError, NotSupportedError
DJANGAE_LOG = logging.getLogger("djangae")
def has_active_unique_constraints(model_or_instance):
"""
Returns true if the model/instance has unique fields or unique_together fields and unique
constraint checking is enabled on the model
"""
django_opts = getattr(model_or_instance, "_meta", None)
# If there are no unique fields on the model, return false
if not django_opts.unique_together and not any(x.unique for x in django_opts.fields):
return False
opts = getattr(model_or_instance, "Djangae", None)
if opts:
if hasattr(opts, "disable_constraint_checks"):
if opts.disable_constraint_checks:
return False
else:
return True
return not getattr(settings, "DJANGAE_DISABLE_CONSTRAINT_CHECKS", False)
class KeyProperty(db.Property):
"""A property that stores a datastore.Key reference to another object.
Think of this as a Django GenericForeignKey which returns only the PK value, not the whole
object, or a db.ReferenceProperty which can point to any model kind, and only returns the Key.
"""
def validate(self, value):
if value is None or isinstance(value, Key):
return value
raise ValueError("KeyProperty only accepts datastore.Key or None")
class UniqueMarker(db.Model):
instance = KeyProperty()
created = db.DateTimeProperty(required=True, auto_now_add=True)
@staticmethod
def kind():
return "_djangae_unique_marker"
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def acquire_identifiers(identifiers, entity_key):
return _acquire_identifiers(identifiers, entity_key)
def _acquire_identifiers(identifiers, entity_key):
# This must always be in a cross-group transaction, because even if there's only 1 identifider,
# in the case where that identifier already exists, we then check if its `instance` exists
assert entity_key
namespace = entity_key.namespace() or None
identifier_keys = [
Key.from_path(UniqueMarker.kind(), identifier, namespace=namespace) for identifier in identifiers
]
existing_markers = UniqueMarker.get(identifier_keys)
markers_to_create = []
markers = []
for identifier_key, existing_marker in zip(identifier_keys, existing_markers):
# Backwards compatability: we used to create the markers first in an independent transaction
# and then create the entity and update the `instance` on the markers. This meant that it
# was possible that the independent marker creation transaction finished first and the outer
# transaction failed, causing stale markers to be left behind. We no longer do it this way
# but we still want to ignore any old stale markers, hence if instance is None we overwrite.
now = datetime.datetime.utcnow()
if not existing_marker or existing_marker.instance is None:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
elif existing_marker.instance != entity_key and key_exists(existing_marker.instance):
fields_and_values = identifier_key.name().split("|")
table_name = fields_and_values[0]
fields_and_values = fields_and_values[1:]
fields = [x.split(":")[0] for x in fields_and_values]
raise IntegrityError("Unique constraint violation for kind {} on fields: {}".format(table_name, ", ".join(fields)))
elif existing_marker.instance != entity_key:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
else:
# The marker is ours anyway
markers.append(existing_marker)
db.put(markers_to_create)
return markers + markers_to_create
def get_markers_for_update(model, old_entity, new_entity):
"""
Given an old entity state, and the new state, updates the identifiers
appropriately. Should be called before saving the new_state
"""
old_ids = set(unique_identifiers_from_entity(model, old_entity, ignore_pk=True))
new_ids = set(unique_identifiers_from_entity(model, new_entity, ignore_pk=True))
to_release = old_ids - new_ids
to_acquire = new_ids - old_ids
return to_acquire, to_release
def update_instance_on_markers(entity, markers):
# TODO: fix me!
def update(marker, instance):
marker = UniqueMarker.get(marker.key())
if not marker:
return
marker.instance = instance
marker.put()
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_all():
instance = entity.key()
for marker in markers:
update(marker, instance)
update_all()
def acquire(model, entity):
"""
Given a model and entity, this tries to acquire unique marker locks for the instance. If
the locks already exist then an IntegrityError will be thrown.
"""
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
return acquire_identifiers(identifiers, entity.key())
def release_markers(markers):
""" Delete the given UniqueMarker objects. """
# Note that these should all be from the same Django model instance, and therefore there should
# be a maximum of 25 of them (because everything blows up if you have more than that - limitation)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(markers) > 1)
def txn():
Delete([marker.key() for marker in markers])
txn()
def release_identifiers(identifiers, namespace):
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(identifiers) > 1)
def txn():
_release_identifiers(identifiers, namespace)
txn()
def _release_identifiers(identifiers, namespace):
keys = [Key.from_path(UniqueMarker.kind(), x, namespace=namespace) for x in identifiers]
Delete(keys)
def release(model, entity):
""" Delete the UniqueMarker objects for the given entity. """
if not has_active_unique_constraints(model):
return
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
# Key.from_path expects None for an empty namespace, but Key.namespace() returns ''
namespace = entity.key().namespace() or None
release_identifiers(identifiers, namespace=namespace)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_identifiers(to_acquire, to_release, key):
""" A combination of acquire_identifiers and release_identifiers in a combined transaction. """
_acquire_identifiers(to_acquire, key)
_release_identifiers(to_release, key.namespace() or None)
class UniquenessMixin(object):
""" Mixin overriding the methods checking value uniqueness.
For models defining unique constraints this mixin should be inherited from.
When iterable (list or set) fields are marked as unique it must be used.
This is a copy of Django's implementation, save for the part marked by the comment.
"""
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
continue
if f.primary_key and not self._state.adding:
continue
##########################################################################
# This is a modification to Django's native implementation of this method;
# we conditionally build a __in lookup if the value is an iterable.
lookup = str(field_name)
if isinstance(lookup_value, (list, set, tuple)):
lookup = "%s__overlap" % lookup
lookup_kwargs[lookup] = lookup_value
##########################################################################
# / end of changes
if len(unique_check) != len(lookup_kwargs):
continue
#######################################################
# Deal with long __in lookups by doing multiple queries in that case
# This is a bit hacky, but we really have no choice due to App Engine's
# 30 multi-query limit. This also means we can't support multiple list fields in
# a unique combination
#######################################################
if len([x for x in lookup_kwargs if x.endswith("__in")]) > 1:
raise NotSupportedError("You cannot currently have two list fields in a unique combination")
# Split IN queries into multiple lookups if they are too long
lookups = []
for k, v in lookup_kwargs.iteritems():
if (k.endswith("__in") or k.endswith("__overlap")) and len(v) > MAX_ALLOWABLE_QUERIES:
v = list(v)
while v:
new_lookup = lookup_kwargs.copy()
new_lookup[k] = v[:30]
v = v[30:]
lookups.append(new_lookup)
break
else:
# Otherwise just use the one lookup
lookups = [lookup_kwargs]
for lookup_kwargs in lookups:
qs = model_class._default_manager.filter(**lookup_kwargs).values_list("pk", flat=True)
model_class_pk = self._get_pk_val(model_class._meta)
result = list(qs)
if not self._state.adding and model_class_pk is not None:
# If we are saving an instance, we ignore it's PK in the result
try:
result.remove(model_class_pk)
except ValueError:
pass
if result:
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
break
return errors
|
kirberich/djangae
|
djangae/db/constraints.py
|
Python
|
bsd-3-clause
| 11,037
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# ElevProf0.py
# Created on: 2015-11-09 15:56:06.00000
# (generated by ArcGIS/ModelBuilder)
# Usage: ElevProf0 <SectionLine> <Distance> <Route_Identifier_Field__2_> <Expression> <NED10m1>
# Description:
# Generates Elevation Profiles for section lines and points where section lines are.
# ---------------------------------------------------------------------------
# Set the necessary product code
# import arcinfo
# Import arcpy module
import arcpy
# Script arguments
SectionLine = arcpy.GetParameterAsText(0)
if SectionLine == '#' or not SectionLine:
SectionLine = "C:\\PROJECTS\\UMAR\\AHGW_UMAR.mdb\\Subsurface\\SectionLine" # provide a default value if unspecified
Distance = arcpy.GetParameterAsText(1)
if Distance == '#' or not Distance:
Distance = "10 Meters" # provide a default value if unspecified
Route_Identifier_Field__2_ = arcpy.GetParameterAsText(2)
if Route_Identifier_Field__2_ == '#' or not Route_Identifier_Field__2_:
Route_Identifier_Field__2_ = "SName" # provide a default value if unspecified
Expression = arcpy.GetParameterAsText(3)
if Expression == '#' or not Expression:
Expression = "!Z! *10" # provide a default value if unspecified
NED10m1 = arcpy.GetParameterAsText(4)
if NED10m1 == '#' or not NED10m1:
NED10m1 = "C:\\PROJECTS\\UMAR\\CV_ASR.gdb\\NED10m1" # provide a default value if unspecified
# Local variables:
Output_Feature_Class__3_ = SectionLine
SectionLine_SimplifyLine_Pnt = SectionLine
Output_Feature_Class__6_ = "C:\\PROJECTS\\UMAR\\Default.gdb\\SectionLine_FeatureVerticesT1"
SectionLine_FeatureVerticesT1 = Output_Feature_Class__6_
SectionLine_FeatureVerticesT1__3_ = SectionLine_FeatureVerticesT1
Measure_Factor = "1"
SectionRoute = "C:\\PROJECTS\\UMAR\\Default.gdb\\SectionRoute"
Output_Event_Table_Properties = "RID POINT MEAS"
SectionLine_SimplifyLine = "C:\\PROJECTS\\UMAR\\Default.gdb\\SectionLine_SimplifyLine"
SectionLine_SimplifyLine_Fea = "C:\\PROJECTS\\UMAR\\Default.gdb\\SectionLine_SimplifyLine_Fea"
SectionLine_SimplifyLine_Fea__3_ = SectionLine_SimplifyLine_Fea
SectionLine_SimplifyLine_Fea__2_ = SectionLine_SimplifyLine_Fea__3_
Output_Event_Table_Properties__2_ = "RID POINT MEAS"
ElevationPoints = "C:\\PROJECTS\\UMAR\\Default.gdb\\ElevationPoints"
ElevationPoints__2_ = ElevationPoints
ElevationPoints__3_ = ElevationPoints__2_
ElevationPoints_Layer = "ElevationPoints_Layer"
ElevationProfile = "C:\\PROJECTS\\UMAR\\Default.gdb\\ElevationProfile"
SectionBends = "C:\\PROJECTS\\UMAR\\Default.gdb\\SectionBends"
SectionBends__2_ = SectionBends
SectionBends__3_ = SectionBends__2_
SectionBends_Layer = "SectionBends_Layer"
SectionBendPoints = "C:\\PROJECTS\\UMAR\\Default.gdb\\SectionBendPoints"
# Process: Densify
arcpy.Densify_edit(SectionLine, "DISTANCE", Distance, "0.1 Meters", "10")
# Process: Feature Vertices To Points (2)
arcpy.FeatureVerticesToPoints_management(Output_Feature_Class__3_, Output_Feature_Class__6_, "ALL")
# Process: Add Surface Information
arcpy.AddSurfaceInformation_3d(Output_Feature_Class__6_, NED10m1, "Z", "BILINEAR", "", "1", "0", "NO_FILTER")
# Process: Add XY Coordinates (2)
arcpy.AddXY_management(SectionLine_FeatureVerticesT1)
# Process: Create Routes
arcpy.CreateRoutes_lr(SectionLine, Route_Identifier_Field__2_, SectionRoute, "LENGTH", "", "", "UPPER_LEFT", Measure_Factor, "0", "IGNORE", "INDEX")
# Process: Locate Features Along Routes
arcpy.LocateFeaturesAlongRoutes_lr(SectionLine_FeatureVerticesT1__3_, SectionRoute, Route_Identifier_Field__2_, "0 Meters", ElevationPoints, Output_Event_Table_Properties, "FIRST", "DISTANCE", "ZERO", "FIELDS", "M_DIRECTON")
# Process: Simplify Line
arcpy.SimplifyLine_cartography(SectionLine, SectionLine_SimplifyLine, "POINT_REMOVE", "10 Meters", "FLAG_ERRORS", "NO_KEEP", "NO_CHECK")
# Process: Feature Vertices To Points
arcpy.FeatureVerticesToPoints_management(SectionLine_SimplifyLine, SectionLine_SimplifyLine_Fea, "ALL")
# Process: Add Surface Information (2)
arcpy.AddSurfaceInformation_3d(SectionLine_SimplifyLine_Fea, NED10m1, "Z", "BILINEAR", "", "1", "0", "NO_FILTER")
# Process: Add XY Coordinates
arcpy.AddXY_management(SectionLine_SimplifyLine_Fea__3_)
# Process: Locate Features Along Routes (2)
arcpy.LocateFeaturesAlongRoutes_lr(SectionLine_SimplifyLine_Fea__2_, SectionRoute, Route_Identifier_Field__2_, "0 Meters", SectionBends, Output_Event_Table_Properties__2_, "FIRST", "DISTANCE", "ZERO", "FIELDS", "M_DIRECTON")
# Process: Add Field
arcpy.AddField_management(ElevationPoints, "SectionY", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field
arcpy.CalculateField_management(ElevationPoints__2_, "SectionY", Expression, "PYTHON", "")
# Process: Make XY Event Layer
arcpy.MakeXYEventLayer_management(ElevationPoints__3_, "MEAS", "SectionY", ElevationPoints_Layer, "PROJCS['NAD_1983_UTM_Zone_12N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-111.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]];-5120900 -9998100 10000;-100000 10000;-100000 10000;0.001;0.001;0.001;IsHighPrecision", "")
# Process: Points To Line
arcpy.PointsToLine_management(ElevationPoints_Layer, ElevationProfile, "RID", "MEAS", "NO_CLOSE")
# Process: Add Field (2)
arcpy.AddField_management(SectionBends, "SectionY", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (2)
arcpy.CalculateField_management(SectionBends__2_, "SectionY", Expression, "PYTHON", "")
# Process: Make XY Event Layer (2)
arcpy.MakeXYEventLayer_management(SectionBends__3_, "MEAS", "SectionY", SectionBends_Layer, "PROJCS['NAD_1983_UTM_Zone_12N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-111.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]];-5120900 -9998100 10000;-100000 10000;-100000 10000;0.001;0.001;0.001;IsHighPrecision", "")
# Process: Copy Features
arcpy.CopyFeatures_management(SectionBends_Layer, SectionBendPoints, "", "0", "0", "0")
|
inkenbrandt/ArcPy
|
CrossSectionTool/ElevProf0.py
|
Python
|
gpl-2.0
| 6,568
|
__author__ = "Marie E. Rognes (meg@simula.no)"
__copyright__ = "Copyright (C) 2012 Marie Rognes"
__license__ = "Distribute at will"
"""
Schematic drawing (starts with 1 springs, starts with 0 dashpots)
| A10 --- A00 |
----- | | --------
| A11 |
Standard linear solid (SLS) viscoelastic model:
A_E^0 \dot \sigma_0 + A_V^0 \sigma_0 = strain(u)
A_E^1 \dot \sigma_1 = strain(v)
\sigma = \sigma_0 + \sigma_1
\div \sigma = gx
\skew \sigma = 0
NB: Mesh in mm, remember that Pa = N/m^2 = kg/(m s^2) = g/(mm s^2)
Give bc and Lame parameters in kPa -> displacements in mm, velocities
in mm/s, stresses in kPa
"""
import sys
import pylab
from dolfin import *
from dolfin import div as d
penalty_beta = 10**8 # NB: Sensitive to this for values less than 10^6
dirname = "test-results"
# Vectorized div
def div(v):
return as_vector((d(v[0]), d(v[1]), d(v[2])))
# Vectorized skew
def skw(tau):
s = 2*skew(tau) # FIXME: Why did I put a 2 here?
return as_vector((s[0][1], s[0][2], s[1][2]))
# Compliance tensors (Semi-arbitrarily chosen values and units)
def A00(tau):
"Maxwell dashpot (eta)"
mu = Constant(3.7466 * 10) # kPa
lamda = Constant(10**4) # kPa
foo = 1.0/(2*mu)*(tau - lamda/(2*mu + 3*lamda)*tr(tau)*Identity(3))
return foo
def A10(tau):
"Maxwell spring (A2)"
mu = Constant(4.158)
lamda = Constant(10**3) # kPa
foo = 1.0/(2*mu)*(tau - lamda/(2*mu + 3*lamda)*tr(tau)*Identity(3))
return foo
def A11(tau):
"Elastic spring (A1)"
mu = Constant(2.39) # kPa
lamda = Constant(10**3) # kPa
foo = 1.0/(2*mu)*(tau - lamda/(2*mu + 3*lamda)*tr(tau)*Identity(3))
return foo
def get_box():
"Use this for simple testing."
n = 2
mesh = Box(0., 0., 0., 20., 20., 100., 2*n, 2*n, 10*n)
# Mark all facets by 0, exterior facets by 1, and then top and
# bottom by 2
boundaries = FacetFunction("uint", mesh)
boundaries.set_all(0)
on_bdry = AutoSubDomain(lambda x, on_boundary: on_boundary)
top = AutoSubDomain(lambda x, on_boundary: near(x[2], 100.))
bottom = AutoSubDomain(lambda x, on_boundary: near(x[2], 0.0))
on_bdry.mark(boundaries, 1)
top.mark(boundaries, 2)
bottom.mark(boundaries, 2)
return (mesh, boundaries)
def get_spinal_cord():
"Mesh generated by Martin Alnaes using VMTK"
#mesh = Mesh("../mesh_edgelength4.xml.gz") # Coarse mesh
mesh = Mesh("mesh_edgelength2.xml.gz")
boundaries = mesh.domains().facet_domains(mesh)
for (i, a) in enumerate(boundaries.array()):
if a > 10:
boundaries.array()[i] = 0
if a == 3:
boundaries.array()[i] = 2
return (mesh, boundaries)
def crank_nicolson_step(Z, z_, k_n, g, v_D_mid, ds):
# Define trial and test functions
(sigma0, sigma1, v, gamma) = TrialFunctions(Z)
(tau0, tau1, w, eta) = TestFunctions(Z)
# Extract previous components
(sigma0_, sigma1_, v_, gamma_) = split(z_)
# Define midpoint values for brevity
def avg(q, q_):
return 0.5*(q + q_)
sigma0_mid = avg(sigma0, sigma0_)
sigma1_mid = avg(sigma1, sigma1_)
v_mid = avg(v, v_)
gamma_mid = avg(gamma, gamma_)
# Define form
n = FacetNormal(Z.mesh())
F = (inner(inv(k_n)*A10(sigma0 - sigma0_), tau0)*dx
+ inner(A00(sigma0_mid), tau0)*dx
+ inner(inv(k_n)*A11(sigma1 - sigma1_), tau1)*dx
+ inner(div(tau0 + tau1), v_mid)*dx
+ inner(skw(tau0 + tau1), gamma_mid)*dx
+ inner(div(sigma0_mid + sigma1_mid), w)*dx
+ inner(skw(sigma0_mid + sigma1_mid), eta)*dx
- inner(0.5*v_, (tau0 + tau1)*n)*ds(1)
- inner(v_D_mid, (tau0 + tau1)*n)*ds(2) # Velocity on dO_D
)
# Tricky to enforce Dirichlet boundary conditions on varying sums
# of components (same deal as for slip for Stokes for
# instance). Use penalty instead
beta = Constant(penalty_beta)
h = tetrahedron.volume
F_penalty = 0.5*(beta*inv(h)*inner((tau0 + tau1)*n,
(sigma0 + sigma1)*n - g)*ds(1))
F = F + F_penalty
return F
def bdf2_step(Z, z_, z__, k_n, g, v_D, ds):
# Define trial and test functions
(sigma0, sigma1, v, gamma) = TrialFunctions(Z)
(tau0, tau1, w, eta) = TestFunctions(Z)
# Extract previous components
(sigma0_, sigma1_, v_, gamma_) = split(z_)
(sigma0__, sigma1__, v__, gamma__) = split(z__)
# Define complete form
n = FacetNormal(Z.mesh())
F = (inner(inv(k_n)*A10(1.5*sigma0 - 2.*sigma0_ + 0.5*sigma0__), tau0)*dx
+ inner(A00(sigma0), tau0)*dx
+ inner(inv(k_n)*A11(1.5*sigma1 - 2.*sigma1_ + 0.5*sigma1__), tau1)*dx
+ inner(div(tau0 + tau1), v)*dx
+ inner(skw(tau0 + tau1), gamma)*dx
+ inner(div(sigma0 + sigma1), w)*dx
+ inner(skw(sigma0 + sigma1), eta)*dx
- inner(v_D, (tau0 + tau1)*n)*ds(2)
)
# Enforce essential bc on stress by penalty
beta = Constant(penalty_beta)
h = tetrahedron.volume
F_penalty = beta*inv(h)*inner((tau0 + tau1)*n,
(sigma0 + sigma1)*n - g)*ds(1)
F = F + F_penalty
return F
# Quick testing for box:
(mesh, boundaries) = get_box()
p = Expression("0.05*sin(2*pi*t)*1.0/(100)*x[2]", t=0)
# Semi-realistic stuff:
#(mesh, boundaries) = get_spinal_cord()
#p = Expression("0.05*sin(2*pi*t)*(1.0/(171 - 78)*(x[2] - 78))", t=0) # kPa
# Define function spaces
S = VectorFunctionSpace(mesh, "BDM", 1)
V = VectorFunctionSpace(mesh, "DG", 0)
Q = VectorFunctionSpace(mesh, "DG", 0)
CG1 = VectorFunctionSpace(mesh, "CG", 1)
Z = MixedFunctionSpace([S, S, V, Q])
def main(ic, T=1.0, dt=0.01):
# dk = half the timestep
dk = dt/2.0
parameters["form_compiler"]["optimize"] = True
parameters["form_compiler"]["cpp_optimize"] = True
ds = Measure("ds")[boundaries]
# Define functions for previous timestep (z_), half-time (z_star)
# and current (z)
z_ = Function(ic)
z_star = Function(Z)
z = Function(Z)
# Boundary conditions
v_D_mid = Function(V) # Velocity condition at half time
v_D = Function(V) # Velocity condition at time
# Boundary traction (pressure originating from CSF flow)
n = FacetNormal(mesh)
g = - p*n
F_cn = crank_nicolson_step(Z, z_, Constant(dk), g, v_D_mid, ds)
(a_cn, L_cn) = system(F_cn)
A_cn = assemble(a_cn)
cn_solver = LUSolver(A_cn)
cn_solver.parameters["reuse_factorization"] = True
F_bdf = bdf2_step(Z, z_star, z_, Constant(dk), g, v_D, ds)
(a_bdf, L_bdf) = system(F_bdf)
A_bdf = assemble(a_bdf)
bdf_solver = LUSolver(A_bdf)
bdf_solver.parameters["reuse_factorization"] = True
progress = Progress("Time-iteration", int(T/dt))
t = dk
iteration = 1
while (t <= T):
# Half-time step:
# Update source(s)
p.t = t
# Assemble right-hand side for CN system
b = assemble(L_cn)
# Solve Crank-Nicolson system
cn_solver.solve(z_star.vector(), b)
# Increase time
t += dk
# Next-time step:
# Update sources
p.t = t
# Assemble right-hand side for BDF system
b = assemble(L_bdf)
# Solve BDF system
bdf_solver.solve(z.vector(), b)
# Update time and variables
t += dk
z_.assign(z)
progress += 1
iteration += 1
return z_
if __name__ == "__main__":
# Adjust behaviour at will:
T = 0.05
dt = 0.01
set_log_level(PROGRESS)
ic = Function(Z)
ic_copy = Function(ic)
# Play forward run
info_blue("Running forward ... ")
z = main(ic, T=T, dt=dt)
|
pf4d/dolfin-adjoint
|
tests_dolfin/viscoelasticity/timings/unannotated.py
|
Python
|
lgpl-3.0
| 7,692
|
import pytest
from unittest.mock import patch
from unittest.mock import Mock, call
import socket
import chatbot.chatbot
import chatbot.responder
class TestChatbot:
def construct(self, sock, Responder):
self.tested = chatbot.chatbot.Chatbot( sock )
Responder.assert_called_once_with()
@patch('chatbot.responder.Responder')
def test_construction(self, Responder):
sock = Mock()
self.construct(sock, Responder)
@patch('chatbot.responder.Responder')
def test_request_response_loop(self, Responder):
sock = Mock()
responder = Mock()
Responder.side_effect = [ responder ]
self.construct(sock, Responder)
class EndTestException(Exception): pass
REQUESTS = [f'request {i}' for i in range(10)]
RESPONSES = [f'response {i}' for i in range(10)]
responder.process.side_effect = RESPONSES
sock.recv.side_effect = REQUESTS + [EndTestException]
with pytest.raises(EndTestException):
self.tested.go()
sock.recv.assert_has_calls( [ call(4096) ] * 10 )
responder.process.assert_has_calls( [ call(request) for request in REQUESTS ] )
sock.send.assert_has_calls( [ call( response ) for response in RESPONSES ] )
|
haarcuba/testix
|
chatbot/test/test_chatbot_with_unittest_mock.py
|
Python
|
mit
| 1,274
|
import unittest2
import json
from consts.award_type import AwardType
from datafeeds.usfirst_event_awards_parser_02 import UsfirstEventAwardsParser_02
def convert_to_comparable(data):
"""
Converts jsons to dicts so that elements can be more easily compared
"""
if type(data) == list:
return [convert_to_comparable(e) for e in data]
elif type(data) == dict:
to_return = {}
for key, value in data.items():
to_return[key] = convert_to_comparable(value)
return to_return
elif type(data) == str or type(data) == unicode:
try:
return json.loads(data)
except ValueError:
return data
else:
return data
class TestUsfirstEventAwardsParser_02(unittest2.TestCase):
def test_parse_regional_2002(self):
with open('test_data/usfirst_html/usfirst_event_awards_2002sj.html', 'r') as f:
awards, _ = UsfirstEventAwardsParser_02.parse(f.read())
# Check number of parsed awards
num_awards = 0
for award in awards:
num_awards += len(award['recipient_json_list'])
self.assertEqual(num_awards, 26)
self.assertEqual(len(awards), 20)
awards = convert_to_comparable(awards)
# Test Team Award
team_award = {
'name_str': u"Regional Chairmans Award",
'award_type_enum': AwardType.CHAIRMANS,
'team_number_list': [192],
'recipient_json_list': [{'team_number': 192, 'awardee': None}],
}
self.assertTrue(team_award in awards)
individual_award = {
'name_str': "#1 Seed",
'award_type_enum': AwardType.NUM_1_SEED,
'team_number_list': [254],
'recipient_json_list': [{'team_number': 254, 'awardee': None}],
}
self.assertTrue(individual_award in awards)
# Test Multi Team Award
multi_team_award = {
'name_str': "Regional Winner",
'award_type_enum': AwardType.WINNER,
'team_number_list': [254, 60, 359],
'recipient_json_list': [{'team_number': 254, 'awardee': None},
{'team_number': 60, 'awardee': None},
{'team_number': 359, 'awardee': None}],
}
self.assertTrue(multi_team_award in awards)
|
nwalters512/the-blue-alliance
|
tests/test_usfirst_event_awards_parser_02.py
|
Python
|
mit
| 2,367
|
class Vertex(object):
"""A Vertex is a node in a graph."""
def __init__(self, label=''):
self.label = label
def __repr__(self):
"""Returns a string representation of this object that can
be evaluated as a Python expression."""
return 'Vertex(%s)' % repr(self.label)
__str__ = __repr__
"""The str and repr forms of this object are the same."""
class Edge(tuple):
"""An Edge is a list of two vertices."""
def __new__(cls, *vs):
"""The Edge constructor takes two vertices."""
if len(vs) != 2:
raise ValueError, 'Edges must connect exactly two vertices.'
return tuple.__new__(cls, vs)
def __repr__(self):
"""Return a string representation of this object that can
be evaluated as a Python expression."""
return 'Edge(%s, %s)' % (repr(self[0]), repr(self[1]))
__str__ = __repr__
"""The str and repr forms of this object are the same."""
class Graph(dict):
"""A Graph is a dictionary of dictionaries. The outer
dictionary maps from a vertex to an inner dictionary.
The inner dictionary maps from other vertices to edges.
For vertices a and b, graph[a][b] maps
to the edge that connects a->b, if it exists."""
def __init__(self, vs=[], es=[]):
"""Creates a new graph.
vs: list of vertices;
es: list of edges.
"""
for v in vs:
self.add_vertex(v)
for e in es:
self.add_edge(e)
def add_vertex(self, v):
"""Add a vertex to the graph."""
self[v] = {}
def add_edge(self, e):
"""Adds and edge to the graph by adding an entry in both directions.
If there is already an edge connecting these Vertices, the
new edge replaces it.
"""
v, w = e
self[v][w] = e
self[w][v] = e
def get_edge(self, v, w):
"""Returns the edge with given two vertices
if it exists and None otherwise.
"""
try:
return self[v][w]
except KeyError:
return None
def remove_edge(self, e):
"""Removes all refierences to the given edge from the graph.
"""
v, w = e
self[v].pop(w)
self[w].pop(v)
def vertices(self):
"""Returns a list of the vertices in a graph.
"""
return self.keys()
def edges(self):
"""Returns a list of edges in a graph.
"""
ret = set()
for d in self.itervalues():
ret.update(d.itervalues())
return list(ret)
def out_vertices(self, v):
"""Returns a list of adjacent vertices.
"""
return self[v].keys()
def out_edges(self, v):
"""Returns a list of edges connected to the given vertex.
"""
return self[v].values()
def add_all_edges(self):
"""Makes a complete graph by adding edges between all pairs of vertices.
"""
vs = self.vertices()
for i, v in enumerate(vs):
for j, w in enumerate(vs):
if i == j: break
self.add_edge(Edge(v, w))
def main(script, *args):
v = Vertex('v')
#print v
w = Vertex('w')
y = Vertex('y')
#print w
e = Edge(v, w)
#print e
g = Graph([v,w, y], [e])
#print g
g.add_all_edges()
print g
if __name__ == '__main__':
import sys
main(*sys.argv)
|
hacpai/show-me-the-code
|
Data Structure/0004/Graph.py
|
Python
|
gpl-2.0
| 3,466
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import socket
from lineup import Step, Queue
from lineup.framework import Node
from redis import StrictRedis
class DummyStep(Step):
def consume(self, instructions):
self.produce({'cool': instructions})
def test_queue_adopt_producer_step():
("Queue#adopt_producer should take a step and know about its id and")
# Given a dummy manager
manager = Node()
# Given a Queue with maximum size of 10
queue = Queue('test-queue', maxsize=10)
q1 = Queue('name1')
q2 = Queue('name2')
# And a running step (so it gains a thread id)
step = DummyStep(q1, q2, manager)
step.start()
# When that queue adopts the step as a producer
consumers, producers = queue.adopt_producer(step)
# And the redis key should contain the step id
hostname = socket.gethostname()
pid = os.getpid()
tid = step.ident
value = '{hostname}|{pid}|{tid}|tests.functional.test_queue.DummyStep|lineup.framework.Node'.format(**locals())
redis = StrictRedis()
members = redis.smembers('lineup:test-queue:producers')
members.should.contain(value)
queue.deactivate()
q1.deactivate()
q2.deactivate()
def test_put_waits_to_consume():
("Queue#put should wait until someone consumes")
# Given a dummy manager
manager = Node()
consume = Queue('consume')
produce = Queue('produce')
step = DummyStep(consume, produce, manager)
step.start()
consume.put({'foo': 'Bar'})
produce.get().should.equal({'cool': {'foo': 'Bar'}})
consume.deactivate()
produce.deactivate()
|
pombredanne/lineup
|
tests/functional/test_queue.py
|
Python
|
mit
| 1,663
|
# -*- coding: utf-8 -*-
#
# pynest_example_template.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
r"""Template demonstrating how to create examples for PyNEST
----------------------------------------------------------------
[[ Titles should be one line and state what the example does.
It should begin with a verb in the present tense and include type of model
and/or method]]
[[ Extended summary - a detailed explanation of your example. Try to answer the
folowing questions. ]]
[[ What does this script do? What is the purpose? ]]
This template demonstrates how to create an example Python script for
NEST.
Copy this file and replace the sample text with a description of your
example script.
Make sure to remove any text that is irrelevant for your example!
The format is based on `NumPy style docstring
<https://numpydoc.readthedocs.io/en/latest/format.html>`_ and uses
reStructuredText markup. Please review the syntax rules if you are
unfamiliar with either reStructuredText or NumPy style docstrings.
Your example should contain a complete code-block that begins with all
necessary imports and ends with code that displays the output.
Below is a more concrete example of how an extended summary could look like:
This script simulates a neuron by an excitatory and an inhibitory
population of neurons firing a Poisson spike train.
Optimization is performed using the `bisection` method from Scipy,
which simulates the network repeatedly.
The aim of this example script is to find a firing rate for the inhibitory
population that will make the neuron fire at the same rate as the excitatory
population.
[[ What kind of output is expected? ]]
The output shows the target neuron's membrane potential as a function of time.
[[ Does this example have a real world application or use case?
Are there particular applications or areas of research that would benefit
from this example? You can reference relevant papers that this example
may be based on]]
This model used here is applicable for neurorobotics, particularly cases of ...
[[ If applicable, state any prerequisite the reader needs to have installed or
configured that is not standard ]]
Please ensure that you have configured MUSIC to be ON in your NEST
configuration:
``cmake -Dwith-music=[ON</path/to/music>]``
[[ If applicable, mention the literature reference for this example.
Note the syntax of the citation. And don't forget to add a "References"
section! ]]
This model used here corresponds to the formulation presented in
Sander et al. [1]_ and the bisection method developed in
Gewaltig and Diesmann [2]_.
[[ See Also section - Include a couple of related examples, models,
or functions. ]]
See Also
---------
:ref:`Intrinsic current subthreshold <label_name>`
:doc:`some other doc </path/to/filename>`
Notes
------
[[ Additional information can be included here regarding background theory,
relevant mathematical equations etc. ]]
The value of :math:`\omega` is X.
Time-averaged from the spiking simulation:
[[ Note the syntax used for displaying equations uses reStructuredText
directive with LaTeX math formulae ]]
.. math::
X(e^{j\omega } ) = x(n)e^{ - j\omega n}
* you can use the asterisk for bullet items
* bullet points are usually more easily read than paragraphs
References
----------
[[ Note the format of the reference. No bold nor italics is used. Last name
of author(s) followed by year, title in sentence case and full name of
journal followed by volume and page range. Include the doi if
applicable. ]]
.. [1] Sander M., et al. (2011). Biology of the sauropod dinosaurs: The
evolution of gigantism. Biological Reviews. 86(1):117-155.
https://doi.org/10.111/j.1469-185x.2010.00137.x
.. [2] Gewaltig M-O, Diesmann M (2007). NEST (Neural Simulation Tool).
Scholarpedia 2(4):1430.
[[ Include your name in the author section, so we know who contributed.
Author(s) should be comma separated with first name as initials followed
by last name ]]
:Authors: D Adams, N Gaiman
"""
import nest # [[ begin code section with imports]]
import scipy
###############################################################################
# [[After the initial docstring above, all following comment blocks must begin
# with a line of hashes and each line of a block must begin with a hash.
# This will allow us to generate nice looking examples for the website! ]]
#
# The excitatory `poisson_generator` (`noise[0]`) and the voltmeter are
# configured using `SetStatus`, which expects a list of node handles and
# a list of parameter dictionaries.
# The rate of the inhibitory Poisson generator is set later.
# Note that we do not need to set parameters for the neuron and the
# spike recorder, since they have satisfactory defaults.
nest.SetStatus(noise, [{"rate": n_ex * r_ex}, {"rate": n_in * r_in}])
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
complete code ...
##############################################################################
# Finally, we plot the target neuron's membrane potential as a function of time
nest.voltage_trace.from_device(voltmeter) # [[ end with output ]]
|
sdiazpier/nest-simulator
|
doc/userdoc/contribute/templates/pynest_example_template.py
|
Python
|
gpl-2.0
| 5,837
|
from gpio_swig import *
fpga_filename = 'std_2rxint_2tx_dig.rbf'
|
trnewman/VT-USRP-daughterboard-drivers_python
|
gr-gpio/src/python/gpio.py
|
Python
|
gpl-3.0
| 66
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
# Enthought library imports.
from pyface.tasks.api import Task, TaskLayout, PaneItem
# Local imports.
from enaml_panes import DummyTaskPane, DummyDockPane
class EnamlTask(Task):
""" A simple task for demonstrating the use of Enaml in Tasks.
"""
#### Task interface #######################################################
id = 'example.enaml_task'
name = 'Enaml Demo'
###########################################################################
# 'Task' interface.
###########################################################################
def _default_layout_default(self):
return TaskLayout(
left=PaneItem('example.dummy_dock_pane'))
def create_central_pane(self):
""" Create the central pane: the script editor.
"""
return DummyTaskPane()
def create_dock_panes(self):
""" Create the file browser and connect to its double click event.
"""
return [DummyDockPane()]
|
pankajp/pyface
|
examples/tasks/enaml/enaml_task.py
|
Python
|
bsd-3-clause
| 1,211
|
import re
from typing import Dict, Optional
from discord.ext import commands
from discordbot.command import MtgContext, roughly_matches
from magic import fetcher
from shared import fetch_tools
@commands.command(aliases=['res', 'pdm'])
async def resources(ctx: MtgContext, *, args: Optional[str]) -> None:
"""Useful pages related to `args`. Examples: 'tournaments', 'card Naturalize', 'deckcheck', 'league'."""
results = {}
if args is None:
args = ''
if len(args) > 0:
results.update(resources_resources(args))
results.update(site_resources(args))
s = ''
if len(results) == 0:
s = 'PD resources: <{url}>'.format(url=fetcher.decksite_url('/resources/'))
elif len(results) > 10:
s = '{author}: Too many results, please be more specific.'.format(author=ctx.author.mention)
else:
for url, text in results.items():
s += '{text}: <{url}>\n'.format(text=text, url=url)
await ctx.send(s)
def site_resources(args: str) -> Dict[str, str]:
results = {}
match = re.match('^s? ?([0-9]*|all) +', args)
if match:
season_prefix = 'seasons/' + match.group(1)
args = args.replace(match.group(0), '', 1).strip()
else:
season_prefix = ''
if ' ' in args:
area, detail = args.split(' ', 1)
else:
area, detail = args, ''
if area == 'archetype':
area = 'archetypes'
if area == 'card':
area = 'cards'
if area == 'person':
area = 'people'
sitemap = fetcher.sitemap()
matches = [endpoint for endpoint in sitemap if endpoint.startswith('/{area}/'.format(area=area))]
if len(matches) > 0:
detail = '{detail}/'.format(
detail=fetch_tools.escape(detail, True)) if detail else ''
url = fetcher.decksite_url('{season_prefix}/{area}/{detail}'.format(
season_prefix=season_prefix, area=fetch_tools.escape(area), detail=detail))
results[url] = args
return results
def resources_resources(args: str) -> Dict[str, str]:
results = {}
words = args.split()
for title, items in fetcher.resources().items():
for text, url in items.items():
asked_for_this_section_only = len(
words) == 1 and roughly_matches(title, words[0])
asked_for_this_section_and_item = len(words) == 2 and roughly_matches(
title, words[0]) and roughly_matches(text, words[1])
asked_for_this_item_only = len(
words) == 1 and roughly_matches(text, words[0])
the_whole_thing_sounds_right = roughly_matches(
text, ' '.join(words))
the_url_matches = roughly_matches(url, ' '.join(words))
if asked_for_this_section_only or asked_for_this_section_and_item or asked_for_this_item_only or the_whole_thing_sounds_right or the_url_matches:
results[url] = text
return results
|
PennyDreadfulMTG/Penny-Dreadful-Discord-Bot
|
discordbot/commands/resources.py
|
Python
|
gpl-3.0
| 2,935
|
# -*- coding: utf-8 -*-
"""Module to plot netCDF files (interactively)
This module is attempted to handle netCDF files with the use of
python package netCDF4 and to plot them with the use of python
package matplotlib.
Requirements (at least this package is tested with)
- matplotlib version, 1.3.1
- mpl_toolkits.basemap, version 1.07
- netCDF4, version 1.1.3
- six
- Python 2.7
Main class for usage is the Maps object class. A helper function
for the formatoption keywords is show_fmtkeys, displaying the
possible formatoption keywords.
Please look into nc2map/demo (or nc2map.demo) for demonstration scripts.
If you find any bugs, please do not hesitate to contact the authors.
This is nc2map version 0.0beta, so there might be some bugs.
"""
import os
import logging
import datetime as dt
from _maps import Maps
from _maps_manager import MapsManager
from _cbar_manager import CbarManager
from _basemap import Basemap
from nc2map.formatoptions import (
get_fmtkeys, show_fmtkeys, get_fmtdocs, show_fmtdocs, get_fnames,
get_unique_vals, close_shapes)
from ._cmap_ops import show_colormaps, get_cmap
from _axes_wrapper import wrap_subplot, subplots, multiple_subplots
from .warning import warn, critical, disable_warnings
try:
from _cdo import Cdo
except ImportError:
pass
__version__ = "0.00b"
__author__ = "Philipp Sommer (philipp.sommer@studium.uni-hamburg.de)"
def setup_logging(default_path=os.path.dirname(__file__) + '/logging.yaml',
default_level=logging.INFO,
env_key='LOG_CFG'):
"""Setup logging configuration
Input:
- default_path: Default path of the yaml logging configuration file
(Default: logging.yaml in nc2map source directory)
- default_level: default level if default_path does not exist
(Default: logging.INFO)
- env_key: environment variable specifying a different logging file than
default_path (Default: LOG_CFG)
Function taken from
http://victorlin.me/posts/2012/08/26/good-logging-practice-in-python
"""
import logging.config
import yaml
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.load(f.read())
for handler in config.get('handlers', {}).values():
try:
handler['filename'] = '%s/.%s' % (os.path.expanduser('~'),
handler['filename'])
except KeyError:
pass
logging.config.dictConfig(config)
else:
path = None
logging.basicConfig(level=default_level)
return path
path = setup_logging()
logger = logging.getLogger(__name__)
logger.debug(
"%s: Initializing nc2map, version %s",
dt.datetime.now().isoformat(), __version__)
logger.debug("Logging configuration file: %s", path)
def get_default_shapefile():
"""Returns the default shape file used by lonlatbox and shapes formatoption
keywords"""
from .defaults import shapes
return shapes['boundaryfile']
def load(filename, readers=None, ax=None, max_ax=None, ask=True,
force_reader=False, force_ax=False, plot=True):
"""Create a Maps object from a pickle file or dictionary
Input:
- filename: String or dictionary. If string, it must be the path
to a pickle file (e.g. created by the Maps.save method). If
dictionary it must be a dictionary as created from Maps.save
method.
- readers: List of readers to use.
It may happen, that not all readers were saved (i.e. they are None
in the filename dictionary) because the initialization settings
could not be determined during the saving of the Maps instance. Or
maybe you want to use your own readers (see force_reader keyword).
In that case give an iterable containing nc2map.reader.ArrayReader
instances to use instead.
- ax: List of subplots to use.
It may happen, that not all figure settings were saved (i.e. they
are None in the filename dictionary) because the initialization
settings could not be determined during the saving of the Maps
instance. Or maybe you want to use your own figure settings (see
force_ax keyword).
In that case give an iterable containing subplots
instances to use instead.
- max_ax: Integer. Determines the maximal number of subplots per
figure. Does not have an effect if force_ax is True.
- ask: True/False. If True and the reader could not be determined, it
will be ask for the filename
- force_reader: True/False. If True, use only the readers specified by
the readers keyword
- force_ax: True/False. If True, use only the subplots specified by the
ax keyword.
- plot: True/False. Make plots of all MapBase and SimplePlot instances
at the end or not
"""
import pickle
from numpy import ravel
from copy import deepcopy
import readers as rd
import mapos
logger = logging.getLogger(__name__)
logger.debug('Loading Maps settings...')
try:
readers = iter(readers)
except TypeError:
logger.debug('readers is not iterable, I assume it is None...')
readers = iter([])
try:
ax = iter(ax)
except TypeError:
logger.debug('ax is not iterable, I assume it is None...')
ax = iter([])
try:
logger.debug(' Try pickle load...')
with open(filename) as f:
idict = pickle.load(f)
except TypeError:
logger.debug(' Failed. --> Assume dictionary', exc_info=True)
idict = filename
rd_dict = idict['readers']
fig_dict = idict['figures']
maps_dict = deepcopy(idict.get('maps', {}))
shared_dict = idict.get('share', {})
lines_dict = deepcopy(idict.get('lines', {}))
cbars = deepcopy(idict.get('cbars', []))
logger.debug(' Open readers...')
for reader, val in rd_dict.items():
logger.debug(' Open reader %s', reader)
if val is None or force_reader:
try:
rd_dict[reader] = next(readers)
except StopIteration:
warn("Could not open reader %s!" % reader)
if ask:
default_reader = "NCReader"
fname = raw_input(
"Please insert the path to the NetCDF file(s) or "
"nothing to continue. Multiple files should be "
"separated by commas\n")
if not fname:
continue
fname = fname.split(',')
fname = fname if len(fname) > 1 else fname[0]
rtype = raw_input(
"Please specify which reader class to use (NCReader "
"or MFNCReader). without options: %s)\n" % (
default_reader))
if not rtype:
rtype = default_reader
rd_dict[reader] = getattr(rd, rtype)(fname)
continue
try:
rd_dict[reader] = getattr(rd, val[0])(*val[1], **val[2])
except TypeError: # assume a reader
pass
logger.debug(' Open figures...')
for fig, val in fig_dict.items():
logger.debug(' Open figure %s', fig)
if val is None or force_ax:
warn("Could not open figure %s!" % fig)
continue
fig_axes = ravel(subplots(val[0][0], val[0][1], *val[1], **val[2])[1])
fig_dict[fig] = fig_axes[slice(0, max_ax)]
# delete excessive axes
if len(fig_dict[fig]) < len(fig_axes):
figo = fig_axes[0].get_figure()
for axes in fig_axes[len(fig_dict[fig]):]:
figo.delaxes(axes._AxesWrapper__ax)
logger.debug('Open Maps instances')
mymaps = Maps(_noadd=True)
logger.debug('Add maps to Maps instance')
for mapo, mdict in maps_dict.items():
logger.debug(' Open %s...', mapo)
for key, val in mdict.items():
logger.debug(' %s: %s', key, val)
obj = getattr(mapos, mdict.pop('class'))
try:
axes = fig_dict[mdict.pop('fig')][mdict.pop('num')-1]
if force_ax:
assert 1 == 2, 'None' # produce error to use next(ax)
except (AssertionError, TypeError, KeyError):
try:
axes = next(ax)
except StopIteration:
warn(
"Could not determine axes of mapo %s because no valid "
"axes was given! A new figure will be opened." % mapo)
axes = None
reader = rd_dict.get(mdict.pop('reader'))
if reader is None:
critical(
"Could not open mapo %s because no valid reader was given!" % (
mapo))
continue
mymaps.addmap(obj(
reader=reader, name=mdict.pop('name'), ax=axes,
**{key: val for key, val in mdict.pop('dims').items() +
mdict.items()}), plot=False, add=False)
logger.debug('Add lines to Maps instance')
for line, ldict in lines_dict.items():
logger.debug(' Open %s...', line)
for key, val in ldict.items():
logger.debug(' %s: %s', key, val)
obj = getattr(mapos, ldict.pop('class'))
try:
axes = fig_dict[ldict.pop('fig')][ldict.pop('num')-1]
if force_ax:
assert 1 == 2, 'None' # produce error to use next(ax)
except (AssertionError, TypeError, KeyError):
try:
axes = next(ax)
except StopIteration:
warn("Could not determine axes of line %s because no valid "
"axes was given! A new figure will be opened." % line)
axes = None
reader = rd_dict[ldict.pop('reader')]
if reader is None:
critical(
"Could not open line object %s because no valid reader was "
"given!" % line)
continue
mymaps.addline(obj(reader=reader, name=ldict.pop('name'),
ax=axes, **ldict['init']), plot=False, add=False)
if cbars:
mymaps.update_cbar(*cbars, plot=False, add=False)
mymaps._fmt += [mymaps.asdict('maps', 'lines', 'cbars')]
if plot:
mymaps.plot = True
logger.info("Setting up projections...")
for mapo in mymaps.maps:
mapo._setupproj()
logger.info("Making plots...")
mymaps.make_plot()
for cbar in mymaps.get_cbars():
cbar._draw_colorbar()
for fig in mymaps.get_figs():
mymaps._set_window_title(fig)
logger.debug(" Set shared settings...")
else:
mymaps.plot = False
for name, sdict in shared_dict.items():
if not sdict:
continue
try:
mapo = mymaps.get_maps(name=name)[0]
except IndexError:
warn("Could not set shared setting for mapo %s because it was "
"not found in the Maps instance!")
continue
mapo.share._draw = 0
mapo.share.shared = mapo.share._from_dict(sdict, mymaps.maps)
mapo.share._draw = 1
return mymaps
|
Chilipp/nc2map
|
__init__.py
|
Python
|
gpl-2.0
| 11,535
|
"""
008.py 修正
008.py 的方法中, int+int 可能会越界。
"""
INT_MAX = 2 ** 31 - 1
INT_MIN = - 2 ** 31
REMAINDER = INT_MAX % 10
class Solution:
def myAtoi(self, s):
if not s:
return 0
# remove leading whitespace
i = 0
for i_, c in enumerate(s):
if c != ' ':
i = i_
break
# check if is positive or negative
positive = True
num = 0
if s[i] == '+':
pass
elif s[i] == '-':
positive = False
elif s[i].isdigit():
num += int(s[i])
else:
return 0
for c in s[i+1:]:
if not c.isdigit():
break
# check if overflow
if num > INT_MAX // 10 or \
(num == INT_MAX//10 and int(c) >= REMAINDER + 1):
return INT_MAX if positive else INT_MIN
else:
num = num * 10 + int(c)
return num if positive else -num
|
cosven/pat_play
|
leetcode/008_again.py
|
Python
|
gpl-3.0
| 1,027
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow.providers.google.cloud.example_dags.example_gcs import (
BUCKET_1,
BUCKET_2,
PATH_TO_SAVED_FILE,
PATH_TO_TRANSFORM_SCRIPT,
PATH_TO_UPLOAD_FILE,
)
from tests.test_utils.logging_command_executor import CommandExecutor
class GcsSystemTestHelper(CommandExecutor):
@staticmethod
def create_test_file():
# Create test file for upload
with open(PATH_TO_UPLOAD_FILE, "w+") as file:
file.writelines(["This is a test file"])
# Create script for transform operator
with open(PATH_TO_TRANSFORM_SCRIPT, "w+") as file:
file.write(
"""import sys
source = sys.argv[1]
destination = sys.argv[2]
print('running script')
with open(source, "r") as src, open(destination, "w+") as dest:
lines = [l.upper() for l in src.readlines()]
print(lines)
dest.writelines(lines)
"""
)
@staticmethod
def remove_test_files():
if os.path.exists(PATH_TO_UPLOAD_FILE):
os.remove(PATH_TO_UPLOAD_FILE)
if os.path.exists(PATH_TO_SAVED_FILE):
os.remove(PATH_TO_SAVED_FILE)
if os.path.exists(PATH_TO_TRANSFORM_SCRIPT):
os.remove(PATH_TO_TRANSFORM_SCRIPT)
def remove_bucket(self):
self.execute_cmd(["gsutil", "rm", "-r", f"gs://{BUCKET_1}"])
self.execute_cmd(["gsutil", "rm", "-r", f"gs://{BUCKET_2}"])
|
apache/incubator-airflow
|
tests/providers/google/cloud/operators/test_gcs_system_helper.py
|
Python
|
apache-2.0
| 2,215
|
import os
DATASOURCE_DIR = 'datasources'
CACHE_DIR = 'cache.db'
PRICES_DATA = os.sep.join((DATASOURCE_DIR, 'us-prices-adjusted-1992-2014.zip'))
UNADJUSTED_PRICES_DATA = os.sep.join((DATASOURCE_DIR, 'us-prices-unadjusted-1992-2014.zip'))
SOURCE_US_EQUITIES = os.sep.join((DATASOURCE_DIR, 'us-equities.csv'))
SOURCE_US_FUNDS = os.sep.join((DATASOURCE_DIR, 'us-funds.csv'))
SOURCE_DIVIDENDS = os.sep.join((DATASOURCE_DIR, 'dividends.csv'))
SOURCE_BENCHMARK = os.sep.join((DATASOURCE_DIR, 'w5000.csv'))
CACHE_PERFS = os.sep.join((CACHE_DIR, 'perf-data.db'))
CACHE_VOLUMES = os.sep.join((CACHE_DIR, 'stats-volume.db'))
CACHE_SCREENING = os.sep.join((CACHE_DIR, 'tmp-cache-screening.db'))
|
chris-ch/us-equities
|
backtest/constants.py
|
Python
|
mit
| 687
|
# Work in progress
|
Konubinix/pyfilesystem
|
fs/expose/serve/__init__.py
|
Python
|
bsd-3-clause
| 18
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`test_config`
==================
Created by hbldh <henrik.blidh@nedomkull.com>
Created on 2016-02-04
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import tempfile
import unittest
import flask
import bankid
from flask_pybankid import PyBankID
class FlaskPyMongoConfigTest(unittest.TestCase):
def setUp(self):
self.certificate_file, self.key_file = bankid.create_bankid_test_server_cert_and_key(
tempfile.gettempdir()
)
self.app = flask.Flask("test")
self.context = self.app.test_request_context("/")
self.context.push()
def tearDown(self):
self.context.pop()
try:
os.remove(self.certificate_file)
os.remove(self.key_file)
except:
pass
def test_default_config_prefix(self):
self.app.config["PYBANKID_CERT_PATH"] = self.certificate_file
self.app.config["PYBANKID_KEY_PATH"] = self.key_file
self.app.config["PYBANKID_TEST_SERVER"] = True
fbid = PyBankID(self.app)
assert fbid.client.certs == (self.certificate_file, self.key_file)
assert fbid.client.api_url == "https://appapi2.test.bankid.com/rp/v4"
def test_custom_config_prefix(self):
self.app.config["CUSTOM_CERT_PATH"] = self.certificate_file
self.app.config["CUSTOM_KEY_PATH"] = self.key_file
self.app.config["CUSTOM_TEST_SERVER"] = True
fbid = PyBankID(self.app, "CUSTOM")
assert fbid.client.certs == (self.certificate_file, self.key_file)
assert fbid.client.api_url == "https://appapi2.test.bankid.com/rp/v4"
|
hbldh/flask-pybankid
|
tests/test_config.py
|
Python
|
mit
| 1,775
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CustomerProfile'
db.create_table(u'main_customerprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal(u'main', ['CustomerProfile'])
def backwards(self, orm):
# Deleting model 'CustomerProfile'
db.delete_table(u'main_customerprofile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.customerprofile': {
'Meta': {'object_name': 'CustomerProfile'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['main']
|
asmaps/as_poweradmin
|
as_poweradmin/main/migrations/0001_initial.py
|
Python
|
mit
| 4,679
|
"""
module for generating C, C++, Fortran77, Fortran90 and Octave/Matlab routines
that evaluate sympy expressions. This module is work in progress. Only the
milestones with a '+' character in the list below have been completed.
--- How is sympy.utilities.codegen different from sympy.printing.ccode? ---
We considered the idea to extend the printing routines for sympy functions in
such a way that it prints complete compilable code, but this leads to a few
unsurmountable issues that can only be tackled with dedicated code generator:
- For C, one needs both a code and a header file, while the printing routines
generate just one string. This code generator can be extended to support
.pyf files for f2py.
- SymPy functions are not concerned with programming-technical issues, such
as input, output and input-output arguments. Other examples are contiguous
or non-contiguous arrays, including headers of other libraries such as gsl
or others.
- It is highly interesting to evaluate several sympy functions in one C
routine, eventually sharing common intermediate results with the help
of the cse routine. This is more than just printing.
- From the programming perspective, expressions with constants should be
evaluated in the code generator as much as possible. This is different
for printing.
--- Basic assumptions ---
* A generic Routine data structure describes the routine that must be
translated into C/Fortran/... code. This data structure covers all
features present in one or more of the supported languages.
* Descendants from the CodeGen class transform multiple Routine instances
into compilable code. Each derived class translates into a specific
language.
* In many cases, one wants a simple workflow. The friendly functions in the
last part are a simple api on top of the Routine/CodeGen stuff. They are
easier to use, but are less powerful.
--- Milestones ---
+ First working version with scalar input arguments, generating C code,
tests
+ Friendly functions that are easier to use than the rigorous
Routine/CodeGen workflow.
+ Integer and Real numbers as input and output
+ Output arguments
+ InputOutput arguments
+ Sort input/output arguments properly
+ Contiguous array arguments (numpy matrices)
+ Also generate .pyf code for f2py (in autowrap module)
+ Isolate constants and evaluate them beforehand in double precision
+ Fortran 90
+ Octave/Matlab
- Common Subexpression Elimination
- User defined comments in the generated code
- Optional extra include lines for libraries/objects that can eval special
functions
- Test other C compilers and libraries: gcc, tcc, libtcc, gcc+gsl, ...
- Contiguous array arguments (sympy matrices)
- Non-contiguous array arguments (sympy matrices)
- ccode must raise an error when it encounters something that can not be
translated into c. ccode(integrate(sin(x)/x, x)) does not make sense.
- Complex numbers as input and output
- A default complex datatype
- Include extra information in the header: date, user, hostname, sha1
hash, ...
- Fortran 77
- C++
- Python
- ...
"""
from __future__ import print_function, division
import os
import textwrap
from sympy import __version__ as sympy_version
from sympy.core import Symbol, S, Expr, Tuple, Equality, Function
from sympy.core.compatibility import is_sequence, StringIO, string_types
from sympy.printing.codeprinter import AssignmentError
from sympy.printing.ccode import ccode, CCodePrinter
from sympy.printing.fcode import fcode, FCodePrinter
from sympy.printing.octave import octave_code, OctaveCodePrinter
from sympy.tensor import Idx, Indexed, IndexedBase
from sympy.matrices import (MatrixSymbol, ImmutableMatrix, MatrixBase,
MatrixExpr, MatrixSlice)
__all__ = [
# description of routines
"Routine", "DataType", "default_datatypes", "get_default_datatype",
"Argument", "InputArgument", "Result",
# routines -> code
"CodeGen", "CCodeGen", "FCodeGen", "OctaveCodeGen",
# friendly functions
"codegen", "make_routine",
]
#
# Description of routines
#
class Routine(object):
"""Generic description of evaluation routine for set of expressions.
A CodeGen class can translate instances of this class into code in a
particular language. The routine specification covers all the features
present in these languages. The CodeGen part must raise an exception
when certain features are not present in the target language. For
example, multiple return values are possible in Python, but not in C or
Fortran. Another example: Fortran and Python support complex numbers,
while C does not.
"""
def __init__(self, name, arguments, results, local_vars, global_vars):
"""Initialize a Routine instance.
Parameters
==========
name : string
Name of the routine.
arguments : list of Arguments
These are things that appear in arguments of a routine, often
appearing on the right-hand side of a function call. These are
commonly InputArguments but in some languages, they can also be
OutputArguments or InOutArguments (e.g., pass-by-reference in C
code).
results : list of Results
These are the return values of the routine, often appearing on
the left-hand side of a function call. The difference between
Results and OutputArguments and when you should use each is
language-specific.
local_vars : list of Symbols
These are used internally by the routine.
global_vars : list of Symbols
Variables which will not be passed into the function.
"""
# extract all input symbols and all symbols appearing in an expression
input_symbols = set([])
symbols = set([])
for arg in arguments:
if isinstance(arg, OutputArgument):
symbols.update(arg.expr.free_symbols)
elif isinstance(arg, InputArgument):
input_symbols.add(arg.name)
elif isinstance(arg, InOutArgument):
input_symbols.add(arg.name)
symbols.update(arg.expr.free_symbols)
else:
raise ValueError("Unknown Routine argument: %s" % arg)
for r in results:
if not isinstance(r, Result):
raise ValueError("Unknown Routine result: %s" % r)
symbols.update(r.expr.free_symbols)
# Check that all symbols in the expressions are covered by
# InputArguments/InOutArguments---subset because user could
# specify additional (unused) InputArguments or local_vars.
notcovered = symbols.difference(
input_symbols.union(local_vars).union(global_vars))
if notcovered != set([]):
raise ValueError("Symbols needed for output are not in input " +
", ".join([str(x) for x in notcovered]))
self.name = name
self.arguments = arguments
self.results = results
self.local_vars = local_vars
self.global_vars = global_vars
@property
def variables(self):
"""Returns a set of all variables possibly used in the routine.
For routines with unnamed return values, the dummies that may or
may not be used will be included in the set.
"""
v = set(self.local_vars)
for arg in self.arguments:
v.add(arg.name)
for res in self.results:
v.add(res.result_var)
return v
@property
def result_variables(self):
"""Returns a list of OutputArgument, InOutArgument and Result.
If return values are present, they are at the end ot the list.
"""
args = [arg for arg in self.arguments if isinstance(
arg, (OutputArgument, InOutArgument))]
args.extend(self.results)
return args
class DataType(object):
"""Holds strings for a certain datatype in different languages."""
def __init__(self, cname, fname, pyname, octname):
self.cname = cname
self.fname = fname
self.pyname = pyname
self.octname = octname
default_datatypes = {
"int": DataType("int", "INTEGER*4", "int", ""),
"float": DataType("double", "REAL*8", "float", "")
}
def get_default_datatype(expr):
"""Derives an appropriate datatype based on the expression."""
if expr.is_integer:
return default_datatypes["int"]
elif isinstance(expr, MatrixBase):
for element in expr:
if not element.is_integer:
return default_datatypes["float"]
return default_datatypes["int"]
else:
return default_datatypes["float"]
class Variable(object):
"""Represents a typed variable."""
def __init__(self, name, datatype=None, dimensions=None, precision=None):
"""Return a new variable.
Parameters
==========
name : Symbol or MatrixSymbol
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the symbol argument.
dimension : sequence containing tupes, optional
If present, the argument is interpreted as an array, where this
sequence of tuples specifies (lower, upper) bounds for each
index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
if not isinstance(name, (Symbol, MatrixSymbol)):
raise TypeError("The first argument must be a sympy symbol.")
if datatype is None:
datatype = get_default_datatype(name)
elif not isinstance(datatype, DataType):
raise TypeError("The (optional) `datatype' argument must be an "
"instance of the DataType class.")
if dimensions and not isinstance(dimensions, (tuple, list)):
raise TypeError(
"The dimension argument must be a sequence of tuples")
self._name = name
self._datatype = {
'C': datatype.cname,
'FORTRAN': datatype.fname,
'OCTAVE': datatype.octname,
'PYTHON': datatype.pyname
}
self.dimensions = dimensions
self.precision = precision
@property
def name(self):
return self._name
def get_datatype(self, language):
"""Returns the datatype string for the requested langage.
Examples
========
>>> from sympy import Symbol
>>> from sympy.utilities.codegen import Variable
>>> x = Variable(Symbol('x'))
>>> x.get_datatype('c')
'double'
>>> x.get_datatype('fortran')
'REAL*8'
"""
try:
return self._datatype[language.upper()]
except KeyError:
raise CodeGenError("Has datatypes for languages: %s" %
", ".join(self._datatype))
class Argument(Variable):
"""An abstract Argument data structure: a name and a data type.
This structure is refined in the descendants below.
"""
pass
class InputArgument(Argument):
pass
class ResultBase(object):
"""Base class for all "outgoing" information from a routine.
Objects of this class stores a sympy expression, and a sympy object
representing a result variable that will be used in the generated code
only if necessary.
"""
def __init__(self, expr, result_var):
self.expr = expr
self.result_var = result_var
class OutputArgument(Argument, ResultBase):
"""OutputArgument are always initialized in the routine."""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
"""Return a new variable.
Parameters
==========
name : Symbol, MatrixSymbol
The name of this variable. When used for code generation, this
might appear, for example, in the prototype of function in the
argument list.
result_var : Symbol, Indexed
Something that can be used to assign a value to this variable.
Typically the same as `name` but for Indexed this should be e.g.,
"y[i]" whereas `name` should be the Symbol "y".
expr : object
The expression that should be output, typically a SymPy
expression.
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the symbol argument.
dimension : sequence containing tupes, optional
If present, the argument is interpreted as an array, where this
sequence of tuples specifies (lower, upper) bounds for each
index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
class InOutArgument(Argument, ResultBase):
"""InOutArgument are never initialized in the routine."""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
if not datatype:
datatype = get_default_datatype(expr)
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
__init__.__doc__ = OutputArgument.__init__.__doc__
class Result(Variable, ResultBase):
"""An expression for a return value.
The name result is used to avoid conflicts with the reserved word
"return" in the python language. It is also shorter than ReturnValue.
These may or may not need a name in the destination (e.g., "return(x*y)"
might return a value without ever naming it).
"""
def __init__(self, expr, name=None, result_var=None, datatype=None,
dimensions=None, precision=None):
"""Initialize a return value.
Parameters
==========
expr : SymPy expression
name : Symbol, MatrixSymbol, optional
The name of this return variable. When used for code generation,
this might appear, for example, in the prototype of function in a
list of return values. A dummy name is generated if omitted.
result_var : Symbol, Indexed, optional
Something that can be used to assign a value to this variable.
Typically the same as `name` but for Indexed this should be e.g.,
"y[i]" whereas `name` should be the Symbol "y". Defaults to
`name` if omitted.
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the symbol argument.
dimension : sequence containing tupes, optional
If present, this variable is interpreted as an array,
where this sequence of tuples specifies (lower, upper)
bounds for each index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
if not isinstance(expr, (Expr, MatrixBase, MatrixExpr)):
raise TypeError("The first argument must be a sympy expression.")
if name is None:
name = 'result_%d' % abs(hash(expr))
if isinstance(name, string_types):
if isinstance(expr, (MatrixBase, MatrixExpr)):
name = MatrixSymbol(name, *expr.shape)
else:
name = Symbol(name)
if result_var is None:
result_var = name
Variable.__init__(self, name, datatype=datatype,
dimensions=dimensions, precision=precision)
ResultBase.__init__(self, expr, result_var)
#
# Transformation of routine objects into code
#
class CodeGen(object):
"""Abstract class for the code generators."""
def __init__(self, project="project"):
"""Initialize a code generator.
Derived classes will offer more options that affect the generated
code.
"""
self.project = project
def routine(self, name, expr, argument_sequence, global_vars):
"""Creates an Routine object that is appropriate for this language.
This implementation is appropriate for at least C/Fortran. Subclasses
can override this if necessary.
Here, we assume at most one return value (the l-value) which must be
scalar. Additional outputs are OutputArguments (e.g., pointers on
right-hand-side or pass-by-reference). Matrices are always returned
via OutputArguments. If ``argument_sequence`` is None, arguments will
be ordered alphabetically, but with all InputArguments first, and then
OutputArgument and InOutArguments.
"""
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = set([i.label for i in expressions.atoms(Idx)])
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
symbols = expressions.free_symbols - local_vars - global_vars
# Decide whether to use output argument or return value
return_val = []
output_args = []
for expr in expressions:
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
if isinstance(out_arg, Indexed):
dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape])
symbol = out_arg.base.label
elif isinstance(out_arg, Symbol):
dims = []
symbol = out_arg
elif isinstance(out_arg, MatrixSymbol):
dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape])
symbol = out_arg
else:
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
if expr.has(symbol):
output_args.append(
InOutArgument(symbol, out_arg, expr, dimensions=dims))
else:
output_args.append(
OutputArgument(symbol, out_arg, expr, dimensions=dims))
# avoid duplicate arguments
symbols.remove(symbol)
elif isinstance(expr, (ImmutableMatrix, MatrixSlice)):
# Create a "dummy" MatrixSymbol to use as the Output arg
out_arg = MatrixSymbol('out_%s' % abs(hash(expr)), *expr.shape)
dims = tuple([(S.Zero, dim - 1) for dim in out_arg.shape])
output_args.append(
OutputArgument(out_arg, out_arg, expr, dimensions=dims))
else:
return_val.append(Result(expr))
arg_list = []
# setup input argument list
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
if symbol in array_symbols:
dims = []
array = array_symbols[symbol]
for dim in array.shape:
dims.append((S.Zero, dim - 1))
metadata = {'dimensions': dims}
else:
metadata = {}
arg_list.append(InputArgument(symbol, **metadata))
output_args.sort(key=lambda x: str(x.name))
arg_list.extend(output_args)
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = dict([(x.name, x) for x in arg_list])
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
return Routine(name, arg_list, return_val, local_vars, global_vars)
def write(self, routines, prefix, to_files=False, header=True, empty=True):
"""Writes all the source code files for the given routines.
The generated source is returned as a list of (filename, contents)
tuples, or is written to files (see below). Each filename consists
of the given prefix, appended with an appropriate extension.
Parameters
==========
routines : list
A list of Routine instances to be written
prefix : string
The prefix for the output files
to_files : bool, optional
When True, the output is written to files. Otherwise, a list
of (filename, contents) tuples is returned. [default: False]
header : bool, optional
When True, a header comment is included on top of each source
file. [default: True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default: True]
"""
if to_files:
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
with open(filename, "w") as f:
dump_fn(self, routines, f, prefix, header, empty)
else:
result = []
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
contents = StringIO()
dump_fn(self, routines, contents, prefix, header, empty)
result.append((filename, contents.getvalue()))
return result
def dump_code(self, routines, f, prefix, header=True, empty=True):
"""Write the code by calling language specific methods.
The generated file contains all the definitions of the routines in
low-level code and refers to the header file if appropriate.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
code_lines = self._preprocessor_statements(prefix)
for routine in routines:
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_locals(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if header:
code_lines = ''.join(self._get_header() + [code_lines])
if code_lines:
f.write(code_lines)
class CodeGenError(Exception):
pass
class CodeGenArgumentListError(Exception):
@property
def missing_args(self):
return self.args[1]
header_comment = """Code generated with sympy %(version)s
See http://www.sympy.org/ for more information.
This file is part of '%(project)s'
"""
class CCodeGen(CodeGen):
"""Generator for C code.
The .write() method inherited from CodeGen will output a code file and
an interface file, <prefix>.c and <prefix>.h respectively.
"""
code_extension = "c"
interface_extension = "h"
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("/" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append(" *%s*\n" % line.center(76))
code_lines.append(" " + "*"*78 + "/\n")
return code_lines
def get_prototype(self, routine):
"""Returns a string for the function prototype of the routine.
If the routine has multiple result objects, an CodeGenError is
raised.
See: http://en.wikipedia.org/wiki/Function_prototype
"""
if len(routine.results) > 1:
raise CodeGenError("C only supports a single or no return value.")
elif len(routine.results) == 1:
ctype = routine.results[0].get_datatype('C')
else:
ctype = "void"
type_args = []
for arg in routine.arguments:
name = ccode(arg.name)
if arg.dimensions or isinstance(arg, ResultBase):
type_args.append((arg.get_datatype('C'), "*%s" % name))
else:
type_args.append((arg.get_datatype('C'), name))
arguments = ", ".join([ "%s %s" % t for t in type_args])
return "%s %s(%s)" % (ctype, routine.name, arguments)
def _preprocessor_statements(self, prefix):
code_lines = []
code_lines.append("#include \"%s.h\"\n" % os.path.basename(prefix))
code_lines.append("#include <math.h>\n")
return code_lines
def _get_routine_opening(self, routine):
prototype = self.get_prototype(routine)
return ["%s {\n" % prototype]
def _declare_arguments(self, routine):
# arguments are declared in prototype
return []
def _declare_locals(self, routine):
# loop variables are declared in loop statement
return []
def _call_printer(self, routine):
code_lines = []
# Compose a list of symbols to be dereferenced in the function
# body. These are the arguments that were passed by a reference
# pointer, excluding arrays.
dereference = []
for arg in routine.arguments:
if isinstance(arg, ResultBase) and not arg.dimensions:
dereference.append(arg.name)
return_val = None
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = routine.name + "_result"
t = result.get_datatype('c')
code_lines.append("{0} {1};\n".format(t, str(assign_to)))
return_val = assign_to
else:
assign_to = result.result_var
try:
constants, not_c, c_expr = ccode(result.expr, human=False,
assign_to=assign_to, dereference=dereference)
except AssignmentError:
assign_to = result.result_var
code_lines.append(
"%s %s;\n" % (result.get_datatype('c'), str(assign_to)))
constants, not_c, c_expr = ccode(result.expr, human=False,
assign_to=assign_to, dereference=dereference)
for name, value in sorted(constants, key=str):
code_lines.append("double const %s = %s;\n" % (name, value))
code_lines.append("%s\n" % c_expr)
if return_val:
code_lines.append(" return %s;\n" % return_val)
return code_lines
def _indent_code(self, codelines):
p = CCodePrinter()
return p.indent_code(codelines)
def _get_routine_ending(self, routine):
return ["}\n"]
def dump_c(self, routines, f, prefix, header=True, empty=True):
self.dump_code(routines, f, prefix, header, empty)
dump_c.extension = code_extension
dump_c.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the C header file.
This file contains all the function declarations.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix, used to construct the include guards.
Only the basename of the prefix is used.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
if header:
print(''.join(self._get_header()), file=f)
guard_name = "%s__%s__H" % (self.project.replace(
" ", "_").upper(), prefix.replace("/", "_").upper())
# include guards
if empty:
print(file=f)
print("#ifndef %s" % guard_name, file=f)
print("#define %s" % guard_name, file=f)
if empty:
print(file=f)
# declaration of the function prototypes
for routine in routines:
prototype = self.get_prototype(routine)
print("%s;" % prototype, file=f)
# end if include guards
if empty:
print(file=f)
print("#endif", file=f)
if empty:
print(file=f)
dump_h.extension = interface_extension
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_c, dump_h]
class FCodeGen(CodeGen):
"""Generator for Fortran 95 code
The .write() method inherited from CodeGen will output a code file and
an interface file, <prefix>.f90 and <prefix>.h respectively.
"""
code_extension = "f90"
interface_extension = "h"
def __init__(self, project='project'):
CodeGen.__init__(self, project)
def _get_symbol(self, s):
"""Returns the symbol as fcode prints it."""
return fcode(s).strip()
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("!" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append("!*%s*\n" % line.center(76))
code_lines.append("!" + "*"*78 + '\n')
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the fortran routine."""
code_list = []
if len(routine.results) > 1:
raise CodeGenError(
"Fortran only supports a single or no return value.")
elif len(routine.results) == 1:
result = routine.results[0]
code_list.append(result.get_datatype('fortran'))
code_list.append("function")
else:
code_list.append("subroutine")
args = ", ".join("%s" % self._get_symbol(arg.name)
for arg in routine.arguments)
call_sig = "{0}({1})\n".format(routine.name, args)
# Fortran 95 requires all lines be less than 132 characters, so wrap
# this line before appending.
call_sig = ' &\n'.join(textwrap.wrap(call_sig,
width=60,
break_long_words=False)) + '\n'
code_list.append(call_sig)
code_list = [' '.join(code_list)]
code_list.append('implicit none\n')
return code_list
def _declare_arguments(self, routine):
# argument type declarations
code_list = []
array_list = []
scalar_list = []
for arg in routine.arguments:
if isinstance(arg, InputArgument):
typeinfo = "%s, intent(in)" % arg.get_datatype('fortran')
elif isinstance(arg, InOutArgument):
typeinfo = "%s, intent(inout)" % arg.get_datatype('fortran')
elif isinstance(arg, OutputArgument):
typeinfo = "%s, intent(out)" % arg.get_datatype('fortran')
else:
raise CodeGenError("Unkown Argument type: %s" % type(arg))
fprint = self._get_symbol
if arg.dimensions:
# fortran arrays start at 1
dimstr = ", ".join(["%s:%s" % (
fprint(dim[0] + 1), fprint(dim[1] + 1))
for dim in arg.dimensions])
typeinfo += ", dimension(%s)" % dimstr
array_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
else:
scalar_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
# scalars first, because they can be used in array declarations
code_list.extend(scalar_list)
code_list.extend(array_list)
return code_list
def _declare_locals(self, routine):
code_list = []
for var in sorted(routine.local_vars, key=str):
typeinfo = get_default_datatype(var)
code_list.append("%s :: %s\n" % (
typeinfo.fname, self._get_symbol(var)))
return code_list
def _get_routine_ending(self, routine):
"""Returns the closing statements of the fortran routine."""
if len(routine.results) == 1:
return ["end function\n"]
else:
return ["end subroutine\n"]
def get_interface(self, routine):
"""Returns a string for the function interface.
The routine should have a single result object, which can be None.
If the routine has multiple result objects, a CodeGenError is
raised.
See: http://en.wikipedia.org/wiki/Function_prototype
"""
prototype = [ "interface\n" ]
prototype.extend(self._get_routine_opening(routine))
prototype.extend(self._declare_arguments(routine))
prototype.extend(self._get_routine_ending(routine))
prototype.append("end interface\n")
return "".join(prototype)
def _call_printer(self, routine):
declarations = []
code_lines = []
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = routine.name
elif isinstance(result, (OutputArgument, InOutArgument)):
assign_to = result.result_var
constants, not_fortran, f_expr = fcode(result.expr,
assign_to=assign_to, source_format='free', human=False)
for obj, v in sorted(constants, key=str):
t = get_default_datatype(obj)
declarations.append(
"%s, parameter :: %s = %s\n" % (t.fname, obj, v))
for obj in sorted(not_fortran, key=str):
t = get_default_datatype(obj)
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append("%s :: %s\n" % (t.fname, name))
code_lines.append("%s\n" % f_expr)
return declarations + code_lines
def _indent_code(self, codelines):
p = FCodePrinter({'source_format': 'free', 'human': False})
return p.indent_code(codelines)
def dump_f95(self, routines, f, prefix, header=True, empty=True):
# check that symbols are unique with ignorecase
for r in routines:
lowercase = set([str(x).lower() for x in r.variables])
orig_case = set([str(x) for x in r.variables])
if len(lowercase) < len(orig_case):
raise CodeGenError("Fortran ignores case. Got symbols: %s" %
(", ".join([str(var) for var in r.variables])))
self.dump_code(routines, f, prefix, header, empty)
dump_f95.extension = code_extension
dump_f95.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the interface to a header file.
This file contains all the function declarations.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
if header:
print(''.join(self._get_header()), file=f)
if empty:
print(file=f)
# declaration of the function prototypes
for routine in routines:
prototype = self.get_interface(routine)
f.write(prototype)
if empty:
print(file=f)
dump_h.extension = interface_extension
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_f95, dump_h]
class OctaveCodeGen(CodeGen):
"""Generator for Octave code.
The .write() method inherited from CodeGen will output a code file
<prefix>.m.
Octave .m files usually contain one function. That function name should
match the filename (``prefix``). If you pass multiple ``name_expr`` pairs,
the latter ones are presumed to be private functions accessed by the
primary function.
You should only pass inputs to ``argument_sequence``: outputs are ordered
according to their order in ``name_expr``.
"""
code_extension = "m"
def routine(self, name, expr, argument_sequence, global_vars):
"""Specialized Routine creation for Octave."""
# FIXME: this is probably general enough for other high-level
# languages, perhaps its the C/Fortran one that is specialized!
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = set([i.label for i in expressions.atoms(Idx)])
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
symbols = expressions.free_symbols - local_vars - global_vars
# Octave supports multiple return values
return_vals = []
for (i, expr) in enumerate(expressions):
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
symbol = out_arg
if isinstance(out_arg, Indexed):
symbol = out_arg.base.label
if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)):
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
return_vals.append(Result(expr, name=symbol, result_var=out_arg))
if not expr.has(symbol):
# this is a pure output: remove from the symbols list, so
# it doesn't become an input.
symbols.remove(symbol)
else:
# we have no name for this output
return_vals.append(Result(expr, name='out%d' % (i+1)))
# setup input argument list
arg_list = []
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
arg_list.append(InputArgument(symbol))
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = dict([(x.name, x) for x in arg_list])
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
return Routine(name, arg_list, return_vals, local_vars, global_vars)
def _get_symbol(self, s):
"""Print the symbol appropriately."""
return octave_code(s).strip()
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
if line == '':
code_lines.append("%\n")
else:
code_lines.append("%% %s\n" % line)
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the routine."""
code_list = []
code_list.append("function ")
# Outputs
outs = []
for i, result in enumerate(routine.results):
if isinstance(result, Result):
# Note: name not result_var; want `y` not `y(i)` for Indexed
s = self._get_symbol(result.name)
else:
raise CodeGenError("unexpected object in Routine results")
outs.append(s)
if len(outs) > 1:
code_list.append("[" + (", ".join(outs)) + "]")
else:
code_list.append("".join(outs))
code_list.append(" = ")
# Inputs
args = []
for i, arg in enumerate(routine.arguments):
if isinstance(arg, (OutputArgument, InOutArgument)):
raise CodeGenError("Octave: invalid argument of type %s" %
str(type(arg)))
if isinstance(arg, InputArgument):
args.append("%s" % self._get_symbol(arg.name))
args = ", ".join(args)
code_list.append("%s(%s)\n" % (routine.name, args))
code_list = [ "".join(code_list) ]
return code_list
def _declare_arguments(self, routine):
return []
def _declare_locals(self, routine):
return []
def _get_routine_ending(self, routine):
return ["end\n"]
def _call_printer(self, routine):
declarations = []
code_lines = []
for i, result in enumerate(routine.results):
if isinstance(result, Result):
assign_to = result.result_var
else:
raise CodeGenError("unexpected object in Routine results")
constants, not_supported, oct_expr = octave_code(result.expr,
assign_to=assign_to, human=False)
for obj, v in sorted(constants, key=str):
declarations.append(
" %s = %s; %% constant\n" % (obj, v))
for obj in sorted(not_supported, key=str):
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append(
" %% unsupported: %s\n" % (name))
code_lines.append("%s\n" % (oct_expr))
return declarations + code_lines
def _indent_code(self, codelines):
# Note that indenting seems to happen twice, first
# statement-by-statement by OctavePrinter then again here.
p = OctaveCodePrinter({'human': False})
return p.indent_code(codelines)
return codelines
def dump_m(self, routines, f, prefix, header=True, empty=True, inline=True):
# Note used to call self.dump_code() but we need more control for header
code_lines = self._preprocessor_statements(prefix)
for i, routine in enumerate(routines):
if i > 0:
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
if i == 0:
if routine.name != prefix:
raise ValueError('Octave function name should match prefix')
if header:
code_lines.append("%" + prefix.upper() +
" Autogenerated by sympy\n")
code_lines.append(''.join(self._get_header()))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_locals(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if code_lines:
f.write(code_lines)
dump_m.extension = code_extension
dump_m.__doc__ = CodeGen.dump_code.__doc__
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_m]
def get_code_generator(language, project):
CodeGenClass = {"C": CCodeGen, "F95": FCodeGen,
"OCTAVE": OctaveCodeGen}.get(language.upper())
if CodeGenClass is None:
raise ValueError("Language '%s' is not supported." % language)
return CodeGenClass(project)
#
# Friendly functions
#
def codegen(name_expr, language, prefix=None, project="project",
to_files=False, header=True, empty=True, argument_sequence=None,
global_vars=None):
"""Generate source code for expressions in a given language.
Parameters
==========
name_expr : tuple, or list of tuples
A single (name, expression) tuple or a list of (name, expression)
tuples. Each tuple corresponds to a routine. If the expression is
an equality (an instance of class Equality) the left hand side is
considered an output argument. If expression is an iterable, then
the routine will have multiple outputs.
language : string
A string that indicates the source code language. This is case
insensitive. Currently, 'C', 'F95' and 'Octave' are supported.
'Octave' generates code compatible with both Octave and Matlab.
prefix : string, optional
A prefix for the names of the files that contain the source code.
Language-dependent suffixes will be appended. If omitted, the name
of the first name_expr tuple is used.
project : string, optional
A project name, used for making unique preprocessor instructions.
[default: "project"]
to_files : bool, optional
When True, the code will be written to one or more files with the
given prefix, otherwise strings with the names and contents of
these files are returned. [default: False]
header : bool, optional
When True, a header is written on top of each source file.
[default: True]
empty : bool, optional
When True, empty lines are used to structure the code.
[default: True]
argument_sequence : iterable, optional
Sequence of arguments for the routine in a preferred order. A
CodeGenError is raised if required arguments are missing.
Redundant arguments are used without warning. If omitted,
arguments will be ordered alphabetically, but with all input
aguments first, and then output or in-out arguments.
global_vars : iterable, optional
Sequence of global variables used by the routine. Variables
listed here will not show up as function arguments.
Examples
========
>>> from sympy.utilities.codegen import codegen
>>> from sympy.abc import x, y, z
>>> [(c_name, c_code), (h_name, c_header)] = codegen(
... ("f", x+y*z), "C", "test", header=False, empty=False)
>>> print(c_name)
test.c
>>> print(c_code)
#include "test.h"
#include <math.h>
double f(double x, double y, double z) {
double f_result;
f_result = x + y*z;
return f_result;
}
>>> print(h_name)
test.h
>>> print(c_header)
#ifndef PROJECT__TEST__H
#define PROJECT__TEST__H
double f(double x, double y, double z);
#endif
Another example using Equality objects to give named outputs. Here the
filename (prefix) is taken from the first (name, expr) pair.
>>> from sympy.abc import f, g
>>> from sympy import Eq
>>> [(c_name, c_code), (h_name, c_header)] = codegen(
... [("myfcn", x + y), ("fcn2", [Eq(f, 2*x), Eq(g, y)])],
... "C", header=False, empty=False)
>>> print(c_name)
myfcn.c
>>> print(c_code)
#include "myfcn.h"
#include <math.h>
double myfcn(double x, double y) {
double myfcn_result;
myfcn_result = x + y;
return myfcn_result;
}
void fcn2(double x, double y, double *f, double *g) {
(*f) = 2*x;
(*g) = y;
}
If the generated function(s) will be part of a larger project where various
global variables have been defined, the 'global_vars' option can be used
to remove the specified variables from the function signature
>>> from sympy.utilities.codegen import codegen
>>> from sympy.abc import x, y, z
>>> [(f_name, f_code), header] = codegen(
... ("f", x+y*z), "F95", header=False, empty=False,
... argument_sequence=(x, y), global_vars=(z,))
>>> print(f_code)
REAL*8 function f(x, y)
implicit none
REAL*8, intent(in) :: x
REAL*8, intent(in) :: y
f = x + y*z
end function
"""
# Initialize the code generator.
code_gen = get_code_generator(language, project)
if isinstance(name_expr[0], string_types):
# single tuple is given, turn it into a singleton list with a tuple.
name_expr = [name_expr]
if prefix is None:
prefix = name_expr[0][0]
# Construct Routines appropriate for this code_gen from (name, expr) pairs.
routines = []
for name, expr in name_expr:
routines.append(code_gen.routine(name, expr, argument_sequence,
global_vars))
# Write the code.
return code_gen.write(routines, prefix, to_files, header, empty)
def make_routine(name, expr, argument_sequence=None,
global_vars=None, language="F95"):
"""A factory that makes an appropriate Routine from an expression.
Parameters
==========
name : string
The name of this routine in the generated code.
expr : expression or list/tuple of expressions
A SymPy expression that the Routine instance will represent. If
given a list or tuple of expressions, the routine will be
considered to have multiple return values and/or output arguments.
argument_sequence : list or tuple, optional
List arguments for the routine in a preferred order. If omitted,
the results are language dependent, for example, alphabetical order
or in the same order as the given expressions.
global_vars : iterable, optional
Sequence of global variables used by the routine. Variables
listed here will not show up as function arguments.
language : string, optional
Specify a target language. The Routine itself should be
language-agnostic but the precise way one is created, error
checking, etc depend on the language. [default: "F95"].
A decision about whether to use output arguments or return values is made
depending on both the language and the particular mathematical expressions.
For an expression of type Equality, the left hand side is typically made
into an OutputArgument (or perhaps an InOutArgument if appropriate).
Otherwise, typically, the calculated expression is made a return values of
the routine.
Examples
========
>>> from sympy.utilities.codegen import make_routine
>>> from sympy.abc import x, y, f, g
>>> from sympy import Eq
>>> r = make_routine('test', [Eq(f, 2*x), Eq(g, x + y)])
>>> [arg.result_var for arg in r.results]
[]
>>> [arg.name for arg in r.arguments]
[x, y, f, g]
>>> [arg.name for arg in r.result_variables]
[f, g]
>>> r.local_vars
set()
Another more complicated example with a mixture of specified and
automatically-assigned names. Also has Matrix output.
>>> from sympy import Matrix
>>> r = make_routine('fcn', [x*y, Eq(f, 1), Eq(g, x + g), Matrix([[x, 2]])])
>>> [arg.result_var for arg in r.results] # doctest: +SKIP
[result_5397460570204848505]
>>> [arg.expr for arg in r.results]
[x*y]
>>> [arg.name for arg in r.arguments] # doctest: +SKIP
[x, y, f, g, out_8598435338387848786]
We can examine the various arguments more closely:
>>> from sympy.utilities.codegen import (InputArgument, OutputArgument,
... InOutArgument)
>>> [a.name for a in r.arguments if isinstance(a, InputArgument)]
[x, y]
>>> [a.name for a in r.arguments if isinstance(a, OutputArgument)] # doctest: +SKIP
[f, out_8598435338387848786]
>>> [a.expr for a in r.arguments if isinstance(a, OutputArgument)]
[1, Matrix([[x, 2]])]
>>> [a.name for a in r.arguments if isinstance(a, InOutArgument)]
[g]
>>> [a.expr for a in r.arguments if isinstance(a, InOutArgument)]
[g + x]
"""
# initialize a new code generator
code_gen = get_code_generator(language, "nothingElseMatters")
return code_gen.routine(name, expr, argument_sequence, global_vars)
|
toolforger/sympy
|
sympy/utilities/codegen.py
|
Python
|
bsd-3-clause
| 55,923
|
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_utils import excutils
import paramiko
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts
import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
from cinder.zonemanager import fc_san_lookup_service as fc_service
LOG = logging.getLogger(__name__)
class BrcdFCSanLookupService(fc_service.FCSanLookupService):
"""The SAN lookup service that talks to Brocade switches.
Version History:
1.0.0 - Initial version
"""
VERSION = "1.0.0"
def __init__(self, **kwargs):
"""Initializing the client."""
super(BrcdFCSanLookupService, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
self.create_configuration()
self.client = self.create_ssh_client(**kwargs)
def create_configuration(self):
"""Configuration specific to SAN context values."""
config = self.configuration
fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')]
LOG.debug('Fabric Names: %s', fabric_names)
# There can be more than one SAN in the network and we need to
# get credentials for each for SAN context lookup later.
if len(fabric_names) > 0:
self.fabric_configs = fabric_opts.load_fabric_configurations(
fabric_names)
def create_ssh_client(self, **kwargs):
ssh_client = paramiko.SSHClient()
known_hosts_file = kwargs.get('known_hosts_file', None)
if known_hosts_file is None:
ssh_client.load_system_host_keys()
else:
ssh_client.load_host_keys(known_hosts_file)
missing_key_policy = kwargs.get('missing_key_policy', None)
if missing_key_policy is None:
missing_key_policy = paramiko.WarningPolicy()
ssh_client.set_missing_host_key_policy(missing_key_policy)
return ssh_client
def get_device_mapping_from_network(self,
initiator_wwn_list,
target_wwn_list):
"""Provides the initiator/target map for available SAN contexts.
Looks up nameserver of each fc SAN configured to find logged in devices
and returns a map of initiator and target port WWNs for each fabric.
:param initiator_wwn_list: List of initiator port WWN
:param target_wwn_list: List of target port WWN
:returns List -- device wwn map in following format
{
<San name>: {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'..)
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121'..)
}
}
:raises Exception when connection to fabric is failed
"""
device_map = {}
formatted_target_list = []
formatted_initiator_list = []
fabric_map = {}
fabric_names = self.configuration.fc_fabric_names
fabrics = None
if not fabric_names:
raise exception.InvalidParameterValue(
err=_("Missing Fibre Channel SAN configuration "
"param - fc_fabric_names"))
fabrics = [x.strip() for x in fabric_names.split(',')]
LOG.debug("FC Fabric List: %s", fabrics)
if fabrics:
for t in target_wwn_list:
formatted_target_list.append(self.get_formatted_wwn(t))
for i in initiator_wwn_list:
formatted_initiator_list.append(self.
get_formatted_wwn(i))
for fabric_name in fabrics:
fabric_ip = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_address')
fabric_user = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_password')
fabric_port = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_port')
# Get name server data from fabric and find the targets
# logged in
nsinfo = ''
try:
LOG.debug("Getting name server data for "
"fabric %s", fabric_ip)
self.client.connect(
fabric_ip, fabric_port, fabric_user, fabric_pwd)
nsinfo = self.get_nameserver_info()
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting name server info from"
" fabric %s") % fabric_ip)
except Exception as e:
msg = _("SSH connection failed "
"for %(fabric)s with error: %(err)s"
) % {'fabric': fabric_ip, 'err': e}
LOG.error(msg)
raise exception.FCSanLookupServiceException(message=msg)
finally:
self.client.close()
LOG.debug("Lookup service:nsinfo-%s", nsinfo)
LOG.debug("Lookup service:initiator list from "
"caller-%s", formatted_initiator_list)
LOG.debug("Lookup service:target list from "
"caller-%s", formatted_target_list)
visible_targets = filter(lambda x: x in formatted_target_list,
nsinfo)
visible_initiators = filter(lambda x: x in
formatted_initiator_list, nsinfo)
if visible_targets:
LOG.debug("Filtered targets is: %s", visible_targets)
# getting rid of the : before returning
for idx, elem in enumerate(visible_targets):
elem = str(elem).replace(':', '')
visible_targets[idx] = elem
else:
LOG.debug("No targets are in the nameserver for SAN %s",
fabric_name)
if visible_initiators:
# getting rid of the : before returning ~sk
for idx, elem in enumerate(visible_initiators):
elem = str(elem).replace(':', '')
visible_initiators[idx] = elem
else:
LOG.debug("No initiators are in the nameserver "
"for SAN %s", fabric_name)
fabric_map = {
'initiator_port_wwn_list': visible_initiators,
'target_port_wwn_list': visible_targets
}
device_map[fabric_name] = fabric_map
LOG.debug("Device map for SAN context: %s", device_map)
return device_map
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
"""
cli_output = None
nsinfo_list = []
try:
cli_output = self._get_switch_data(zone_constant.NS_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nsshow info for fabric"))
if cli_output:
nsinfo_list = self._parse_ns_output(cli_output)
try:
cli_output = self._get_switch_data(zone_constant.NS_CAM_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nscamshow"))
if cli_output:
nsinfo_list.extend(self._parse_ns_output(cli_output))
LOG.debug("Connector returning nsinfo-%s", nsinfo_list)
return nsinfo_list
def _get_switch_data(self, cmd):
stdin, stdout, stderr = None, None, None
utils.check_ssh_injection([cmd])
try:
stdin, stdout, stderr = self.client.exec_command(cmd)
switch_data = stdout.readlines()
except paramiko.SSHException as e:
msg = (_("SSH Command failed with error '%(err)s' "
"'%(command)s'") % {'err': e,
'command': cmd})
LOG.error(msg)
raise exception.FCSanLookupServiceException(message=msg)
finally:
if (stdin):
stdin.flush()
stdin.close()
if (stdout):
stdout.close()
if (stderr):
stderr.close()
return switch_data
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns list of device port wwn from ns info
"""
nsinfo_list = []
for line in switch_data:
if not(" NL " in line or " N " in line):
continue
linesplit = line.split(';')
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
nsinfo_list.append(node_port_wwn)
else:
msg = _("Malformed nameserver string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return nsinfo_list
def get_formatted_wwn(self, wwn_str):
"""Utility API that formats WWN to insert ':'."""
if (len(wwn_str) != 16):
return wwn_str.lower()
else:
return (':'.join([wwn_str[i:i + 2]
for i in range(0, len(wwn_str), 2)])).lower()
|
Akrog/cinder
|
cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py
|
Python
|
apache-2.0
| 10,817
|
# coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2015 - 2017 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
----------------------------------------------------------------------------
==================================================
Configuration.py
--------------------------------------------------
requirements:
* ArcGIS Desktop 10.X+ or ArcGIS Pro 1.X+
* Python 2.7 or Python 3.4
author: ArcGIS Solutions
company: Esri
==================================================
description:
Configuration flags/paths for unit tests
data URL pattern: r"http://www.arcgis.com/sharing/content/items/XXX/data", where XXX is the item GUID
==================================================
history:
10/2015 - JH - Original coding
07/2017 - CM - Reorg/refactor
==================================================
'''
import logging
import os
import sys
DEBUG = True # this guy is a flag for extra messaging while debugging tests
#NOTE: Logger and Platform are initialized in TestRunner's main() or Configuration.GetLogger/Platform
Logger = None
LoggerFile = None
Platform = None
PLATFORM_PRO = "PRO"
PLATFORM_DESKTOP = "DESKTOP"
''' Testing paths '''
currentPath = os.path.dirname(__file__) # should go to .\solutions-geoprocessing-toolbox\utils\test
repoPath = os.path.dirname(os.path.dirname(currentPath))
''' Download path '''
testDataPath = os.path.normpath(os.path.join(currentPath, r"test_data")) # should go to .\solutions-geoprocessing-toolbox\utils\test\test_data
''' Log Path: the folder where the log files go wild and multiply '''
logPath = os.path.normpath(os.path.join(currentPath, r"log")) # should go to .\solutions-geoprocessing-toolbox\utils\test\log
'''Distance To Assets paths'''
distancetoAssetsToolboxPath = os.path.normpath(os.path.join(currentPath, r"../../distance_to_assets/Distance To Assets"))
distanceToAssetsDataPath = os.path.normpath(os.path.join(testDataPath, r"DistanceToAssets"))
distanceToAssetsInputGDB = os.path.join(distanceToAssetsDataPath, "DistancetoAssetsTestData/DistanceToAssets.gdb")
distanceToAssetsOutputGDB = os.path.join(currentPath, r"../../distance_to_assets/DistanceToAssets.gdb")
distanceToAssetsInputNDGDB=os.path.join(distanceToAssetsDataPath, "DistancetoAssetsTestData/SanFrancisco.gdb")
distanceToAssetsURL = r"http://www.arcgis.com/sharing/content/items/700e44eb3e114c098818ea71f7ad72b6/data"
''' Clearing Operations - Test Data/Paths '''
clearingOperationsToolboxPath = os.path.normpath(os.path.join(currentPath,
r"../../clearing_operations/ClearingOperationsTools.pyt"))
clearingOperationsPath = os.path.normpath(os.path.join(testDataPath, r"clearing_operations"))
clearingOperationsURL = r"http://www.arcgis.com/sharing/content/items/198f01e263474c209198c9c3c3586287/data"
clearingOperationsInputGDB = os.path.join(clearingOperationsPath, "test_clearing_operations.gdb")
''' GriddedReferenceGraphic = Test Data/Paths '''
grgToolboxPath = os.path.normpath(os.path.join(currentPath,
r"../../clearing_operations/GriddedReferenceGraphicTools.pyt"))
grgPath = os.path.normpath(os.path.join(testDataPath,r"clearing_operations"))
grgURL = r"http://www.arcgis.com/sharing/content/items/bb592332393b4443817f5986af611e3d/data"
grgInputGDB = os.path.join(grgPath, r"grg_test_data.gdb")
''' Geonames - Test Data/Paths '''
geonamesToolboxPath = os.path.normpath(os.path.join(currentPath, r"../../geonames/Geonames Tools"))
geonamesDataPath = os.path.normpath(os.path.join(testDataPath, r"geonames"))
geonamesInputGDB = os.path.join(geonamesDataPath, "Geonames.gdb")
geonamesURL = r"http://www.arcgis.com/sharing/content/items/afc766d5276648ab80aa85b819af1ffc/data"
''' Military Features - Test Data/Paths '''
militaryFeaturesToolboxPath = os.path.normpath(os.path.join(currentPath, r"../../military_features/Military Features Tools"))
militaryFeaturesDataPath = os.path.normpath(os.path.join(testDataPath, r"military_features"))
militaryFeaturesGeodatabasesPath = os.path.normpath(os.path.join(militaryFeaturesDataPath, r"data/mil2525c/testdata/geodatabases"))
militaryFeaturesMessagesPath = os.path.join(militaryFeaturesDataPath, r"data/mil2525c/testdata/messagefiles")
militaryFeaturesInputGDB = os.path.join(militaryFeaturesGeodatabasesPath, r"test_inputs.gdb")
militaryFeaturesInputGDBNonMilitaryFeatures = os.path.join(militaryFeaturesGeodatabasesPath, "test_inputs_non_military_features.gdb")
militaryFeaturesBlankMilFeaturesGDB = os.path.join(militaryFeaturesGeodatabasesPath, "MilitaryOverlay10.1.1-Blank.gdb")
militaryFeaturesURL = r"http://www.arcgis.com/sharing/content/items/3a18f91b34d14a5aa72aa67f32c97497/data"
''' Incident Analysis - Test Data/Paths '''
incidentToolboxPath = os.path.normpath(os.path.join(currentPath, r"../../incident_analysis/Incident Analysis Tools"))
incidentAnalysisDataPath = os.path.normpath(os.path.join(testDataPath, r"incident_analysis"))
incidentURL = "http://www.arcgis.com/sharing/content/items/528faf6b23154b04a8268b33196fa9ad/data"
incidentInputGDB = os.path.join(incidentAnalysisDataPath, "test_incident_analysis_tools.gdb")
incidentResultGDB = os.path.join(incidentAnalysisDataPath, "test_incident_analysis_results.gdb")
''' Sun Position Analysis - Test Data/Paths '''
sunPositionAnalysisToolboxPath = os.path.normpath(os.path.join(currentPath, r"../../sun_position_analysis/Sun Position Analysis Tools"))
sunPositionAnalysisDataPath = os.path.normpath(os.path.join(testDataPath, r"sun_position_analysis"))
sunPositionAnalysisURL = r"http://www.arcgis.com/sharing/content/items/bf6a04b4c9a3447b91e9c0b4074ca1e4/data"
sunPositionInputGDB = os.path.join(sunPositionAnalysisDataPath, "test_sun_position.gdb")
''' MAoT - Test Data/Paths '''
maotToolboxPath = os.path.normpath(os.path.join(currentPath, r"../../military_aspects_of_terrain/Military Aspects of Terrain Tools"))
maotPath = os.path.normpath(os.path.join(testDataPath, r"maot"))
maotURL = r"http://www.arcgis.com/sharing/content/items/127bff2341694342a6df884aaa51237e/data"
''' MAoW - Test Data/Paths '''
maowToolboxPath = os.path.normpath(os.path.join(currentPath, r"../../military_aspects_of_weather/Military Aspects of Weather Tools"))
maowPath = os.path.normpath(os.path.join(testDataPath, r"maow"))
maowURL = "http://www.arcgis.com/sharing/content/items/74eeb356c7dd4422bf52f36f38bb8a9b/data"
def checkTokenizeWorkaround() :
#################################################
# WORKAROUND: for Python 3 choking on reading some binary files (with nulls)
# For example in ArcPy when loading a toolbox when run from command line
# Get error like: detect_encoding...tokenize.py...find_cookie...raise SyntaxError(msg)
# ...SyntaxError: invalid or missing encoding declaration for '...XXXX.tbx'
# Workaround borrowed/used from:
# https://github.com/habnabit/passacre/commit/2ea05ba94eab2d26951ae7b4b51abf53132b20f0
# Code should work with Python 2, but only do workaround for Python 3
# Workaround needed in Versions 3.0 - 3.5.2
if sys.version_info >= (3, 0) and sys.version_info < (3, 5, 3):
import tokenize
try:
_detect_encoding = tokenize.detect_encoding
except AttributeError:
pass
else:
def detect_encoding(readline):
try:
return _detect_encoding(readline)
except SyntaxError:
return 'latin-1', []
tokenize.detect_encoding = detect_encoding
## END WORKAROUND
#################################################
def GetLogger(logLevel = logging.DEBUG) :
global Logger
if Logger is None:
import UnitTestUtilities
logName = UnitTestUtilities.getLoggerName()
Logger = UnitTestUtilities.initializeLogger(logName, logLevel)
return Logger
def GetPlatform() :
global Platform
if Platform is None :
import arcpy
Platform = PLATFORM_DESKTOP
installInfo = arcpy.GetInstallInfo()
if installInfo['ProductName'] == 'ArcGISPro':
Platform = PLATFORM_PRO
checkTokenizeWorkaround()
return Platform
def GetToolboxSuffix() :
platform = GetPlatform()
# default to ArcMap
suffix = "_arcmap.tbx"
if Platform == PLATFORM_PRO :
suffix = "_pro.tbx"
return suffix
|
Esri/solutions-geoprocessing-toolbox
|
utils/test/Configuration.py
|
Python
|
apache-2.0
| 8,923
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.Element import Element
class Status(Element):
"""Current status information relevant to an entity.
"""
def __init__(self, reason='', remark='', value='', dateTime='', *args, **kw_args):
"""Initialises a new 'Status' instance.
@param reason: Reason code or explanation for why an object went to the current status 'value'.
@param remark: Pertinent information regarding the current 'value', as free form text.
@param value: Status value at 'dateTime'; prior status changes may have been kept in instances of ActivityRecords associated with the object to which this Status applies.
@param dateTime: Date and time for which status 'value' applies.
"""
#: Reason code or explanation for why an object went to the current status 'value'.
self.reason = reason
#: Pertinent information regarding the current 'value', as free form text.
self.remark = remark
#: Status value at 'dateTime'; prior status changes may have been kept in instances of ActivityRecords associated with the object to which this Status applies.
self.value = value
#: Date and time for which status 'value' applies.
self.dateTime = dateTime
super(Status, self).__init__(*args, **kw_args)
_attrs = ["reason", "remark", "value", "dateTime"]
_attr_types = {"reason": str, "remark": str, "value": str, "dateTime": str}
_defaults = {"reason": '', "remark": '', "value": '', "dateTime": ''}
_enums = {}
_refs = []
_many_refs = []
|
rwl/PyCIM
|
CIM14/IEC61968/Common/Status.py
|
Python
|
mit
| 2,668
|
# import numpy as np
# import scipy as sp
# import matplotlib as mpl
# import copy
class evaluator:
def __init__(self, num):
self.num = num
self.anstag = [-1 for _ in xrange(num)]
self.restag = [-1 for _ in xrange(num)]
self.precision = 0
self.recall = 0
def load_answer_clusters(self, c):
cnum = len(c)
for curtag in xrange(cnum):
curc = c[curtag]
for item in curc:
self.anstag[item] = curtag
def load_answer_labeled(self, lb):
self.anstag = lb
def _evaluate(self):
tp = 0 # true positive
fp = 0 # false positive
fn = 0 # false negative
for i in xrange(self.num):
for j in xrange(i+1, self.num):
if (self.anstag[i] == self.anstag[j]):
if (self.restag[i] == self.restag[j]):
tp += 1
else:
fn += 1
elif (self.restag[i] == self.restag[j]):
fp += 1
self.precision = (tp + .0) / (tp + fp)
self.recall = (tp + .0) / (tp + fn)
print 'precision: ' + str(self.precision)
print 'recall: ' + str(self.recall)
def evaluate_clusters(self, c):
cnum = len(c)
for curtag in xrange(cnum):
curc = c[curtag]
for item in curc:
self.restag[item] = curtag
self._evaluate()
def evaluate_labeled(self, lb):
self.restag = lb
self._evaluate()
def get_fmeasure(self, beta):
fm = (beta * beta + 1) * self.precision * self.recall / (beta * beta * self.precision) + self.recall
print 'f-' + str(beta) + ' measure: ' + str(fm)
|
usc-isi-i2/lsh-linking
|
swoosh/evaluator.py
|
Python
|
apache-2.0
| 1,418
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
openstates/openstates.org
|
manage.py
|
Python
|
mit
| 809
|
import requests
import re
from bs4 import BeautifulSoup
from distance import levenshtein
from mtgreatest.rdb import Cursor, serialize
NUM_NORM_NAMES = 4
NORM_NAMES = ['norm_name_{}'.format(num) for num in range(NUM_NORM_NAMES)]
def fix_name_and_country(name, country):
if name is None:
return (name, country)
part = name.rpartition('[')
if len(part[0]):
return (part[0][:-1], part[1]+part[2])
else:
return (name, country)
def normalize_raw_name(raw_name):
raw_name = raw_name.upper()
sleep_in_patterns = ['ZVIP', 'ZZVIP', 'ZZZVIP', 'ZZ', 'ZZZ', 'ZZSIS', 'ZZFIX', 'ZZZ_', 'ZZZZZ', 'VIP', 'VIP_', 'AAVIP', 'AAA VIP -']
for pattern in sleep_in_patterns:
if raw_name.startswith(pattern) and not raw_name.startswith('VIPPERMAN'):
raw_name = raw_name.rpartition(pattern)[2]
elif raw_name.endswith(pattern):
raw_name = raw_name.partition(pattern)[0]
raw_name = raw_name.strip(' ()1234567890')
last_first = list(raw_name.partition(','))
last_first[0] = last_first[0].partition('[')[0].rstrip(' *').strip(' *')
last_first[2] = last_first[2].rpartition('SEE SK ')[2].strip(' *').rstrip(' *') #why?? what is this??
normalized_name = last_first[0]
if len(last_first[2]):
normalized_name += ', ' + last_first[2]
return normalized_name
def normalize_full_raw_name(full_raw_name):
return '/'.join([normalize_raw_name(name) for name in full_raw_name.split('/')])
def max_name_list(names1, names2):
ret_names = []
for name in names1:
if not any([name2.startswith(name) for name2 in names2]):
ret_names.append(name)
for name in names2:
if not any([name1.startswith(name) and len(name1)>len(name) for name1 in names1]):
ret_names.append(name)
return ret_names
def normalized_event_names(event_id):
cursor = Cursor()
num_rounds = cursor.execute("select max(round_num) from results_raw_table where event_id = '{}'".format(event_id))[0][0]
all_round_names = []
for round_num in range(num_rounds):
names = cursor.execute("select distinct p1_name_raw from results_raw_table where event_id = '{}' and round_num = {}".format(event_id, round_num))
names += cursor.execute("select distinct p2_name_raw from results_raw_table where event_id = '{}' and round_num = {}".format(event_id, round_num))
all_round_names.append(list(set([normalize_raw_name(item) for sublist in names for item in sublist if '* BYE *' not in item and 'Awarded Bye' not in item])))
cursor.close()
return reduce(max_name_list, all_round_names, [])
def populate_event_player_table(event_names, event_id):
cursor = Cursor()
cursor.execute("delete from event_player_table where event_id = {}".format(serialize(event_id)))
query = "select player_id, "
query += ', '.join(NORM_NAMES)
query += ' from player_table where '
or_ = False
for name in event_names:
if or_:
query += "or "
or_ = True
join_str = ' like {}'.format(serialize(name + '%'))
query += (join_str + ' or ').join(NORM_NAMES) + join_str
player_table_names = cursor.execute(query)
found_names = []
new_names = []
for name in event_names:
found = False
for idx, row in enumerate(player_table_names):
if name in row:
if found:
raise 'two matches found for name ' + name
found_names.append({'player_id':row[0], 'normalized_name':name, 'event_id':event_id})
found = True
if not found:
new_names.append(name)
player_id = cursor.execute("select max(player_id) from player_table")[0][0] or 1
new_players = []
for name in new_names:
player_id += 1
new_players.append({'player_id':player_id, 'norm_name_1':name, 'event_added':event_id, 'last_name':name.partition(',')[0],
'first_name':name.partition(', ')[2]})
found_names.append({'player_id':player_id, 'normalized_name':name, 'event_id':event_id})
cursor.insert('event_player_table', found_names)
cursor.insert('player_table', new_players)
cursor.close()
def remove_header_row():
query = "delete from results_raw_table where table_id like '%table%'"
cursor = Cursor()
cursor.execute(query);
cursor.close();
def combine_players(norm_name_1, norm_name_2):
query_template = "select * from player_table where "
query_template += ' or '.join([name + ' like {0}' for name in NORM_NAMES])
cursor = Cursor()
player_infos = [cursor.execute(query_template.format(serialize(name))) for name in (norm_name_1, norm_name_2)]
assert len(player_infos[0]) == 1 and len(player_infos[1]) == 1, "multiple or no matches found for a name"
|
oelarnes/mtgreatest
|
mtgreatest-py/mtgreatest/scrape/players.py
|
Python
|
mit
| 4,550
|
"""
Django settings for oauth_client project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7vmuu4^cpq(id6dm05m@*(a=0inoc0@3uihlj2)0_ef-!4=@ok'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.pointless',
'oauth2_provider',
'corsheaders',
'social.apps.django_app.default'
)
AUTHENTICATION_BACKENDS = (
#'social.backends.google.GoogleOAuth2',
'oauth_client.dsoauth2.DSOAuth2',
'django.contrib.auth.backends.ModelBackend'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'oauth_client.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect'
],
'allowed_include_roots': (BASE_DIR,)
},
},
]
WSGI_APPLICATION = 'oauth_client.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static_dsoauth/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'static_serve')
SITE_ID = 1
#TODO is this needed?
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = 'login'
CORS_ORIGIN_ALLOW_ALL = True
#Python Social Auth settings
#client redirect url: http://localhost:8000/complete/dsoauth2/
SOCIAL_AUTH_DSOAUTH2_KEY='dvJQD6aVAuG-!NspHuci4ktpw9TnldOz?skbUlt9'
SOCIAL_AUTH_DSOAUTH2_SECRET='wHO?3fX=@geWDKJt1lug@C2IF9P=Z=OKemR3Z@qpBpDJDnYqfnr@1ZkhwBqx3weM5CMyiK=U4.jcUzhc_12hqFCRvWw3WOTQrUjL-nORgJ-iQ.?FSgGu:I5IdK_cFND2'
#SOCIAL_AUTH_DSOAUTH2_KEY='shayne'
#SOCIAL_AUTH_DSOAUTH2_SECRET='wtf'
#SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'home'
#SOCIAL_AUTH_LOGIN_URL = 'login'
|
brollins90/dsOauth
|
oauth_client/oauth_client/settings.py
|
Python
|
mit
| 3,891
|
from bopy.mcmctools.emceetools import emcee_general_run
__all__ = ['emceetools']
|
hypergravity/bopy
|
bopy/mcmctools/__init__.py
|
Python
|
bsd-3-clause
| 82
|
#####################################################################
# s12f18.py
#
# (c) Copyright 2021, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
"""Class for stream 12 function 18."""
from secsgem.secs.functions.base import SecsStreamFunction
from secsgem.secs.data_items import MID, IDTYP, XYPOS, BINLT
class SecsS12F18(SecsStreamFunction):
"""
map data type 3.
**Data Items**
- :class:`MID <secsgem.secs.data_items.MID>`
- :class:`IDTYP <secsgem.secs.data_items.IDTYP>`
- :class:`XYPOS <secsgem.secs.data_items.XYPOS>`
- :class:`BINLT <secsgem.secs.data_items.BINLT>`
**Structure**::
>>> import secsgem.secs
>>> secsgem.secs.functions.SecsS12F18
{
MID: A/B[80]
IDTYP: B[1]
DATA: [
{
XYPOS: I1/I2/I4/I8[2]
BINLT: U1/A
}
...
]
}
**Example**::
>>> import secsgem.secs
>>> secsgem.secs.functions.SecsS12F18({ \
"MID": "materialID", \
"IDTYP": secsgem.secs.data_items.IDTYP.WAFER, \
"DATA": [ \
{"XYPOS": [1, 2], "BINLT": [1, 2, 3, 4]}, \
{"XYPOS": [3, 4], "BINLT": [5, 6, 7, 8]}]})
S12F18
<L [3]
<A "materialID">
<B 0x0>
<L [2]
<L [2]
<I1 1 2 >
<U1 1 2 3 4 >
>
<L [2]
<I1 3 4 >
<U1 5 6 7 8 >
>
>
> .
:param value: parameters for this function (see example)
:type value: dict
"""
_stream = 12
_function = 18
_data_format = [
MID,
IDTYP,
[
[
XYPOS,
BINLT
]
]
]
_to_host = False
_to_equipment = True
_has_reply = False
_is_reply_required = False
_is_multi_block = True
|
bparzella/secsgem
|
secsgem/secs/functions/s12f18.py
|
Python
|
lgpl-2.1
| 2,601
|
#!/usr/bin/python
from scipy.stats import cauchy
import random
import math
import csv
import numpy as np
import netCDF4 as nc
import argparse
import lvDiagram
'''
parser = argparse.ArgumentParser()
parser.add_argument("numberRegions", type=int,
help="Number of HII Regions to Populate in Model")
args = parser.parse_args()
numRegions = args.numberRegions # Prompt User for number of Hii regions
'''
def defaults():
ff=nc.Dataset('/Users/Marvin/Research/data/larson_radius_hypercube.ncdf') # Import data cube from Tremblin et. al. 2014
region = 1 # Start count of regions from 1 to NumRegions
HiiList = [] # Initialize list to store Hii data
# The following definitions determine which structures will
# be present in the galaxy and what their relative proportion is.
# See Hughes et al. ApJ April 2013 for relative proportion in M51
diffuse = True
bar = True
ThreekpcArm = True
ring = True
spiral = True
diffusePercent = 25
barPercent = 10
ThreekpcArmPercent = 10
ringPercent = 10
spiralPercent = 100 - (diffusePercent + barPercent +
ThreekpcArmPercent + ringPercent)
numRegions = 10000
# Determine Structure of Galaxy
extentOfBar = 4.4 # Length of bar in kiloparsecs.
# See Benjamin et al. ApJ Sept 2005.
cutoff = 3.41#4.1 # Looking to (cutoff)x the bar length.
# Max value ~6.86 due to model limitation (Tremblin, below)
galRange = extentOfBar*cutoff
sunPos = 8.4 # Distance of Sun from GC
sunHeight = 0.02 # Distance Sun is above galactic plane (kpc) CITE SOURCE!
circRot = 220 # Solar circular rotation speed. Carroll & Ostlie (24.36)
v0 = 0 # Initial velocity of source. Only relevant to 3kpc arm.
galRot = 44.0*math.pi/180.0 # Rotates entire galaxy by (x) degrees.
# See Benjamin et al. ApJ Sept 2005.
random.seed( 1 ) # Seed random number generator. (ARBITRARY)
numSpirals = 4 # Determines Number of Spiral arms
pitchAngle = 11*math.pi/180 # Determines curvature of arms
# 7.3 --> See Wu et al. A&A April 2014 for pitch angle estimate in Sagitarrius arm
warpParam = math.pi/2 # Determines degree of galactic warp
# DEFINE/CONVERT TO AS ANGLE?
warpHeight = 0.08 # BY INSPECTION
maxSpiralRevolutions = 1 # Range for number of spiral revs. (ARBITRARY)
maxCluster = 10 # Maximum number of regions in a given cluster (ARBITRARY)
avgCluster = 4 # Most commonly found number of regions in cluster (ARBITRARY)
clusterRange = 20/1000 # Sets clustered regions to be within (x) pc of each other
# See Motte et al. ApJ 2002
sigma = 0.3 # Sets FWHM of spiral arms to (x) kpc
# 0.2 See Wu et al. A&A April 2014 for deviation
# from spiral estimate in Sagitarrius arm.
zmax = .15 # Sets max height in z as +/- (x) kpc
gamma =0# 0.01365 # Sets spread of Cauchy-Lorentz Z-distribution of regions
alpha = 0 # Sets HII region drop off as r^-alpha(after bar)
# Determine Mass of Individual Regions
# In Units of Stellar Mass. Sets lower bound for ionizing star
lowerMass = 16
upperMass = 90
barAngle=0 # THIS IS NOT USED IN THE CODE. IT DEFINES THE SUN'S STARTING POSITION
# Output parameters
(galRad,xRot,yRot,z,mass,lum,age,radius)=(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0)
(diffLum,barLum,ThreekpcLum,ringLum,sprLum,totLum)=(0.0,0.0,0.0,0.0,0.0,0.0)
(diffCount,barCount,ThreekpcCount,ringCount,sprCount,totCount)=(0,0,0,0,0,0)
def Simulate(numRegions=numRegions, alpha=alpha, pitchAngle=pitchAngle, numSpirals=numSpirals, extentOfBar=extentOfBar, \
barAngle=barAngle, scaleHeight=gamma, warpHeight=warpHeight):
'''
To display the default values for all parameters, call >> help(defaults)
'''
while region <= numRegions :
v0 = 0
i = 1
# Reset i each time to force a region to be populated
# if all requirements are met.
selectionParam = random.random()
# Determines if Hii region is kept or thrown away.
# Forces population of regions to follow linear trend
# to end of bar and power law drop-off after bar.
numCluster = 1
numClusterTot = random.randrange(1,maxCluster,1)
whereIsRegion = random.randrange(1, diffusePercent + barPercent
+ ringPercent + ThreekpcArmPercent
+ spiralPercent, 1)
# Determines location of one individual region.
# HII Region will be randomly populated in Galaxy, but will not be
# be placed in central region (within bar radius).
if (whereIsRegion <= diffusePercent) and (diffuse == True) :
while i != 0 : # This loop forces an Hii region to be populated diffusely
x = random.gauss(0,galRange/2) # Sets diffuse population to have
# FWHM of galRange/2
y = random.gauss(0,galRange/2)
theta = math.atan(x/y)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam)
# Models galactic warp with Tan(x)*Cos(y)
zPos = random.uniform(-zmax,zmax)
z = galWarp + zPos
# Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (abs(x) > extentOfBar + random.gauss(0,sigma)) \
and (galRad < galRange + random.gauss(0,sigma)) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)):
region += numClusterTot # Increase region count
i = 0 # Escape loop
elif (abs(x) < extentOfBar + random.gauss(0,sigma)) \
and (extentOfBar < galRad < galRange + random.gauss(0,sigma)) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)):
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Populate in Bar
elif (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
while i != 0 : # This loop forces an Hii region to be populated in bar
x = random.uniform(-extentOfBar,extentOfBar) # Returns random number between (-extentOfBar,extentOfBar)
y = random.gauss(0,sigma) # Sets thickness of bar to (sigma) kpc
theta = math.atan(x/y)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam)
# Models galactic warp with Tan(x)*Cos(y)
zPos = random.uniform(-zmax,zmax)
z = galWarp + zPos
# Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/extentOfBar) \
and (galRad < galRange) :
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Populate in 3 Kiloparsec Arm
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
yRingInt = extentOfBar/2
ySign = random.randrange(-1,1)
while i != 0 : # This loop forces an Hii region to be populated in ring
xCart = random.uniform(-extentOfBar,extentOfBar)
yCart = math.copysign(yRingInt*pow(1-pow(xCart,2)/pow(extentOfBar,2),.5),ySign) # Produces ring structure
x = xCart + random.gauss(0, sigma) # Gaussian distribution around ring
y = yCart + random.gauss(0, sigma)
theta = math.atan(x/y)
zPos = random.uniform(-zmax,zmax)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam) # Models galactic warp with Tan(x)*Cos(y)
z = galWarp + zPos # EDIT TO Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/extentOfBar) \
and (galRad < galRange) :
v0 = 56 # Expansion of 3kpc arm
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Populate in Ring
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent)) \
and (ring == True) :
yRingInt = extentOfBar
ySign = random.randrange(-1,1)
while i != 0 : # This loop forces an Hii region to be populated in ring
xCart = random.uniform(-extentOfBar,extentOfBar)
yCart = math.copysign(yRingInt*pow(1-pow(xCart,2)/pow(extentOfBar,2),.5),ySign) # Produces ring structure
x = xCart + random.gauss(0, sigma) # Gaussian distribution around ring
y = yCart + random.gauss(0, sigma)
theta = math.atan(x/y)
zPos = random.uniform(-zmax,zmax)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam) # Models galactic warp with Tan(x)*Cos(y)
z = galWarp + zPos
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/extentOfBar) \
and (galRad < galRange) :
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Populate in One of Spirals
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent + spiralPercent)) \
and (spiral == True):
while i != 0 : # This loop forces an Hii region to be populated in arms
whichArm = random.randint(0,numSpirals-1)
theta = random.uniform(0,2*np.pi*maxSpiralRevolutions)
r = extentOfBar*math.exp(pitchAngle*(1 - .3*math.floor(whichArm/2))*theta) # (.2 ARBITRARY)
xCart = r*math.cos(theta + np.pi*math.fmod(whichArm,2))
yCart = r*math.sin(theta + np.pi*math.fmod(whichArm,2))
x = xCart + random.gauss(0,sigma) # Gaussian distribution around spiral
y = yCart + random.gauss(0,sigma)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam) # Models galactic warp with Tan(x)*Cos(y)
zPos = random.uniform(-zmax,zmax)
z = galWarp + zPos
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center in kpc
i += 1
if (galRad < galRange) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)) :
region += numClusterTot # Increase region count
i = 0 # Escape Loop
# Determine individual region parameters and write to list
while (i == 0) and (numCluster <= numClusterTot) :
# Set Time Distribution
timeParam = random.randint(0,99)
age = timeParam*.127 # Age in Myr (12.7 Myr limit) in Trebmlin model
# Set Host Star Mass Distribution
massParam = random.random() # Used in forcing powerlaw fit
while massParam != 0 :
mass = random.randrange(lowerMass,upperMass)
IMF = pow(lowerMass,2.35)*pow(mass,-2.35)
lifetime = pow(lowerMass,.935)*pow(mass,-.935)
numHiiRegions = IMF*lifetime
if numHiiRegions > massParam : # Makes power law fit
massParam = 0 # Escape loop
# Set Host Star Flux Distribution
fluxMin = math.log10(pow(lowerMass,1.94))
fluxMax = math.log10(pow(upperMass,1.94))
fluxParam = int(round((math.log10(pow(mass,1.94))-fluxMin)/(fluxMax-fluxMin)*16,0)) # Use this line to access all values of Lum from 10^47 - 10^51
# fluxParam = int(round((math.log10(pow(mass,1.94))-fluxMin)/(fluxMax-fluxMin)*12,0)+4)
lum = fluxParam
# Set Electron Temperature Distribution
# Relationship taken from Balser et.al. 2011, put in range accepted by Tremblin model
TeParam = int(round((5756 + 303*random.uniform(-1,1)) + galRad*(299 + 31*random.uniform(-1,1)),-3)/1000 - 5)
# Set Neutral Hydrogen Density Distribution
densityParam = random.randint(0,10)
# From Distributions, Determine HII Region Radius
# Using Pascal Tremblin's hypercube data
radius = ff.variables['radius'][timeParam,fluxParam,TeParam,densityParam]
# Rotate galaxy
xRot = x*math.cos(galRot) - y*math.sin(galRot)
yRot = x*math.sin(galRot) + y*math.cos(galRot)
# Set velocity of source
omega = circRot/galRad # Assume flat rotation curve.
omega0 = circRot/sunPos
dist = pow(pow(xRot,2)+pow(yRot-sunPos,2),0.5)
l = math.copysign(math.acos((pow(dist,2)+pow(sunPos,2)-pow(galRad,2))/(2*sunPos*dist))*180/math.pi,xRot)
b = math.atan((z-sunHeight)/dist)
vR = (omega - omega0)*sunPos*math.sin(l*math.pi/180)+v0*math.cos(theta)
# This section allows the user to test various parameters for easy
# output to terminal (e.g. luminosity of various features, counts
# of regions in spiral versus bar, etc.)
if (whereIsRegion <= diffusePercent) \
and (diffuse == True) :
diffLum = diffLum + lum
diffCount += 1
regNum = 1
elif (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
barLum = barLum + lum
barCount += 1
regNum = 2
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
ThreekpcLum = ThreekpcLum + lum
ThreekpcCount += 1
regNum = 3
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent)) \
and (ring == True) :
ringLum = ringLum + lum
ringCount += 1
regNum = 4
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent + spiralPercent)) \
and (spiral == True):
sprLum = sprLum + lum
sprCount += 1
if whichArm == 0 :
regNum = 5
elif whichArm == 1 :
regNum = 6
elif whichArm == 2 :
regNum = 7
elif whichArm == 3 :
regNum = 8
totLum = totLum + lum
# Append information to CSV file
HiiList.append([galRad,xRot,yRot,z,mass,lum,age,radius,l,vR,regNum,b])
numCluster += 1
with open("3DHiiRegions.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(HiiList)
print "Diffuse Luminosity : " + str(diffLum*100/totLum) + "% (" + str(diffCount) + " Regions)"
print "Bar Luminosity : " + str(barLum*100/totLum) + "% (" + str(barCount) + " Regions)"
print "3 kpc Arm Luminosity : " + str(ThreekpcLum*100/totLum) + "% ("+ str(ThreekpcCount) + " Regions)"
print "Ring Luminosity : " + str(ringLum*100/totLum) + "% ("+ str(ringCount) + " Regions)"
print "Spiral Luminosity : " + str(sprLum*100/totLum) + "% (" + str(sprCount) + " Regions)"
print "Total Luminosity : " + str((barLum+ThreekpcLum+ringLum+sprLum+diffLum)*100/totLum) + "% (" + str(barCount+ThreekpcCount+ringCount+sprCount+diffCount) + " Regions)"
def main(numRegions=numRegions, alpha=alpha, pitchAngle=pitchAngle, numSpirals=numSpirals, extentOfBar=extentOfBar, \
barAngle=barAngle, scaleHeight=gamma, warpHeight=warpHeight):
defaults()
# This will not work as expected. It will use all values from the defaults() function
Simulate(numRegions=numRegions, alpha=alpha, pitchAngle=pitchAngle, numSpirals=numSpirals, extentOfBar=extentOfBar, \
barAngle=barAngle, scaleHeight=gamma, warpHeight=warpHeight)
# UPDATE PLOTS
lvDiagram.lvDiagram()
main()
|
WillArmentrout/galSims
|
simulate/Simulate_Function.py
|
Python
|
gpl-2.0
| 17,243
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import dddp
import dddp.models
from dddp.migrations import TruncateOperation
class Migration(migrations.Migration):
dependencies = [
('sessions', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dddp', '0001_initial'),
]
operations = [
TruncateOperation(forwards=['subscription']),
migrations.CreateModel(
name='Connection',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('connection_id', dddp.models.AleaIdField(default=dddp.meteor_random_id, max_length=17)),
('remote_addr', models.CharField(max_length=255)),
('version', models.CharField(max_length=255)),
('session', models.ForeignKey(to='sessions.Session')),
],
options={
},
bases=(models.Model, object),
),
migrations.CreateModel(
name='SubscriptionCollection',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('collection_class', models.CharField(max_length=255)),
('subscription', models.ForeignKey(related_name='collections', to='dddp.Subscription')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='connection',
unique_together=set([('connection_id', 'session')]),
),
migrations.AddField(
model_name='subscription',
name='connection',
field=models.ForeignKey(to='dddp.Connection'),
preserve_default=False,
),
migrations.AddField(
model_name='subscription',
name='publication_class',
field=models.CharField(max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='subscription',
name='sub_id',
field=models.CharField(max_length=17),
preserve_default=False,
),
migrations.AddField(
model_name='subscription',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='subscription',
unique_together=set([('connection', 'sub_id')]),
),
migrations.RemoveField(
model_name='subscription',
name='session',
),
TruncateOperation(backwards=['subscription'])
]
|
commoncode/django-ddp
|
dddp/migrations/0002_auto_20150408_0321.py
|
Python
|
mit
| 2,926
|
#!/usr/bin/env python
# encoding: utf-8
# vim:ft=python.django:
from django.contrib import admin
from .models import Telefono, Operadora, Salto, Virtual
@admin.register(Telefono)
class TelefonoAdmin(admin.ModelAdmin):
list_display = ('numero', 'descripcion', 'es_primario', 'primario', 'operadora')
search_fields = ('numero', 'descripcion', 'primario')
list_filter = ('es_primario', 'primario', 'operadora')
list_editable = ('descripcion',)
@admin.register(Operadora)
class OperadoraAdmin(admin.ModelAdmin):
pass
class SaltoInline(admin.TabularInline):
model = Salto
@admin.register(Virtual)
class VirtualAdmin(admin.ModelAdmin):
list_display = ('numero', 'descripcion', 'operadora')
list_filter = ('operadora', )
inlines = [SaltoInline, ]
|
aaloy/curs_estiu_2015_uib
|
centralita/src/inventario/admin.py
|
Python
|
gpl-2.0
| 784
|
from __future__ import absolute_import
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
class Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchUnix()
except ImportError:
self.impl = _GetchMacCarbon()
def __call__(self):
try:
return self.impl()
except IOError:
return None
class _GetchUnix:
def __init__(self):
import tty, sys, termios # import termios now or else you'll get the Unix version on the Mac
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0]==0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg)
|
karrtikr/ete
|
ete3/tools/phylobuild_lib/getch.py
|
Python
|
gpl-3.0
| 3,540
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-08 02:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('constellation', '0005_event_youtubevideoid'),
]
operations = [
migrations.AlterField(
model_name='event',
name='youtubeVideoID',
field=models.CharField(blank=True, default='', max_length=20),
preserve_default=False,
),
]
|
teamazim/django_unchained
|
constellation/migrations/0006_auto_20160308_0228.py
|
Python
|
gpl-3.0
| 525
|
import os
from celery import Celery
from celery.schedules import crontab
from celery.task import periodic_task
from django.apps import AppConfig
from django.conf import settings
from django.core.management import call_command
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # pragma: no cover
app = Celery('velo')
class CeleryConfig(AppConfig):
name = 'velo.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client
from raven.contrib.celery import register_signal
client = Client(dsn=settings.RAVEN_CONFIG['DSN'])
register_signal(client)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
@periodic_task(run_every=crontab(minute="2", hour="1"))
def clear_old_sessions():
call_command("clearsessions")
|
Ameriks/velo.lv
|
velo/taskapp/celery.py
|
Python
|
gpl-3.0
| 1,323
|
"""
Meta is a script to access the plugins which handle meta information.
"""
from __future__ import absolute_import
from fabmetheus_utilities import archive
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_profile
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getNewRepository():
'Get new repository.'
return MetaRepository()
def getPluginFileNames():
"Get meta plugin file names."
return archive.getPluginFileNamesFromDirectoryPath( getPluginsDirectoryPath() )
def getPluginsDirectoryPath():
"Get the plugins directory path."
return archive.getSkeinforgePluginsPath('meta_plugins')
class MetaRepository(object):
"A class to handle the meta settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_utilities.skeinforge_meta.html', self)
importantFileNames = ['polyfile']
settings.getRadioPluginsAddPluginFrame( getPluginsDirectoryPath(), importantFileNames, getPluginFileNames(), self )
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/Cura/cura_sf/skeinforge_application/skeinforge_utilities/skeinforge_meta.py
|
Python
|
agpl-3.0
| 1,230
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['utorrent',
'transmission',
'deluge',
'download_station',
'rtorrent',
'qbittorrent'
]
import sickbeard
from os import sys
# Mapping error status codes to official W3C names
http_error_code = {
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: 'Switch Proxy',
307: 'Temporary Redirect',
308: 'Permanent Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'Im a teapot',
419: 'Authentication Timeout',
420: 'Enhance Your Calm',
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required',
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
440: 'Login Timeout',
444: 'No Response',
449: 'Retry With',
450: 'Blocked by Windows Parental Controls',
451: 'Redirect',
451: 'Unavailable For Legal Reasons',
494: 'Request Header Too Large',
495: 'Cert Error',
496: 'No Cert',
497: 'HTTP to HTTPS',
498: 'Token expired/invalid',
499: 'Client Closed Request',
499: 'Token required',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
506: 'Variant Also Negotiates',
507: 'Insufficient Storage',
508: 'Loop Detected',
509: 'Bandwidth Limit Exceeded',
510: 'Not Extended',
511: 'Network Authentication Required',
522: 'Cloudfare Connection timed out',
524: 'Request to host timedout waiting for reply back',
598: 'Network read timeout error',
599: 'Network connect timeout error '
}
default_host = {'utorrent': 'http://localhost:8000',
'transmission': 'http://localhost:9091',
'deluge': 'http://localhost:8112',
'download_station': 'http://localhost:5000',
'rtorrent': 'scgi://localhost:5000',
'qbittorrent': 'http://localhost:8080'
}
def getClientModule(name):
name = name.lower()
prefix = "sickbeard.clients."
return __import__(prefix + name + '_client', fromlist=__all__)
def getClientIstance(name):
module = getClientModule(name)
className = module.api.__class__.__name__
return getattr(module, className)
|
keen99/SickRage
|
sickbeard/clients/__init__.py
|
Python
|
gpl-3.0
| 3,669
|
import django_filters
from django_filters.widgets import BooleanWidget
from .models import Group
from guardian.shortcuts import get_objects_for_user
class GroupFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='icontains')
can_manage = django_filters.MethodFilter(widget=BooleanWidget())
def filter_can_manage(self, queryset, value):
if value:
profiles_can_change = get_objects_for_user(
self.user,
'ojuser.change_groupprofile',
with_superuser=True
)
queryset = queryset.filter(pk__in=profiles_can_change)
return queryset
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(GroupFilter, self).__init__(*args, **kwargs)
class Meta:
model = Group
fields = ['name', 'can_manage', ]
|
wangzitian0/BOJ-V4
|
ojuser/filters.py
|
Python
|
mit
| 893
|
#!/usr/bin/env python
"""
Your task is to complete the 'porsche_query' function and in particular the query
to find all autos where the manufacturer field matches "Porsche".
Please modify only 'porsche_query' function, as only that will be taken into account.
Your code will be run against a MongoDB instance that we have provided.
If you want to run this code locally on your machine,
you have to install MongoDB and download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials at
the following link:
https://www.udacity.com/wiki/ud032
"""
import json
import pprint
from pymongo import MongoClient
def get_db(db_name):
client = MongoClient('localhost:27017')
client.drop_database("examples")
db = client[db_name]
return db
def porsche_query():
# Please fill in the query to find all autos manuafactured by Porsche
query = {"manufacturer": "Porsche"}
return query
def find_porsche(db, query):
return db.autos.find(query)
if __name__ == "__main__":
db = get_db('examples')
db.autos.insert(json.load(open("example_car.json", "r")))
query = porsche_query()
p = find_porsche(db, query)
for doc in p:
pprint.pprint(doc)
|
krzyste/ud032
|
Lesson_4_Working_with_MongoDB/10-Finding_Porsche/find_porsche.py
|
Python
|
agpl-3.0
| 1,240
|
import logging, couchdb, oauth2, json, sys
from decorator import decorator
from pylons import config, request as r, response as res, session
from pylons.controllers.util import abort
from functools import wraps
log = logging.getLogger(__name__)
appConfig = config['app_conf']
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class BadOAuthSignature(Error):
pass
class OAuthJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, (oauth2.Consumer, oauth2.Token)):
return { "key": o.key, "secret": o.secret }
elif isinstance(o, Exception):
return {
"type": type(o).__name__,
"message": o.message }
try:
return json.JSONEncoder.default(self, o)
except Exception as e:
log.exception("Encoded Type: {0}\nrepr: {1}".format(type(o), repr(o)))
raise e
class CouchDBOAuthUtil():
def __init__(self, couchdb_dba_url=appConfig['couchdb.url.dbadmin'], users_db=appConfig['couchdb.db.users'], oauth_view=appConfig['couchdb.db.users.oauthview']):
self.server = couchdb.Server(couchdb_dba_url)
self.users = self.server[users_db]
self.oauth_view = oauth_view
def find_possible(self, consumer, token, mapper=None):
def wrap_row(row):
# log.error("wrap_row: "+json.dumps(row))
row_result = {}
if "doc" in row:
row_result["name"] = row["doc"]["name"]
row_result["consumer"] = oauth2.Consumer(key=consumer, secret=row["doc"]["oauth"]["consumer_keys"][consumer])
row_result["token"] = oauth2.Token(key=token, secret=row["doc"]["oauth"]["tokens"][token])
row_result["id"] = row["doc"]["_id"]
row_result["roles"] = row["doc"]["roles"]
if mapper:
mapper(row_result, row)
return row_result
view_opts = {
"key":[consumer, token],
"include_docs":True
}
view_results = self.users.view(self.oauth_view, wrapper=wrap_row, **view_opts)
return view_results.rows
def check_request(self, request, mapper=None):
http_method = request.method
http_url = request.host_url + request.path_info
headers = request.headers
query_string = request.query_string
info = None
parameters = None
# log.error("*** CHECK_REQUEST *** "+json.dumps({
# "query_string": query_string,
# "headers": {}.update(headers),
# "http_method": http_method,
# "http_url": http_url
# }))
oa_request = oauth2.Request.from_request(http_method, http_url, headers, query_string=query_string)
if oa_request and all([ x in oa_request for x in ['oauth_consumer_key', 'oauth_token']]):
server = oauth2.Server()
server.add_signature_method(oauth2.SignatureMethod_HMAC_SHA1())
last_exc = None
for row in self.find_possible(oa_request['oauth_consumer_key'], oa_request['oauth_token'], mapper):
try:
parameters = server.verify_request(oa_request, row["consumer"], row["token"])
except oauth2.Error as e:
last_exc = BadOAuthSignature("OAuth2 Error: %s" % e.message)
except:
import sys
log.exception("Caught Exception in CouchDBOAuthUtil")
last_exc = BadOAuthSignature(sys.exc_info()[1])
if parameters != None:
info = row
break
if parameters == None and last_exc != None:
raise last_exc
return (parameters, info)
_authobj = CouchDBOAuthUtil()
DEFAULT_SESSION_KEY = "oauth"
class status(object):
Okay = "Okay"
NoSignature = "No Signature"
BadSignature = "Bad Signature"
Error = "Error"
Unknown = "Unknown"
def authorize(session_key=DEFAULT_SESSION_KEY, service_doc=None, roles=None, mapper=None, realm=None, pre_cond=None, post_cond=None):
_roles = roles
_mapper = mapper
_session_key=session_key
_realm = realm or ""
_pre_cond = pre_cond
_post_cond = post_cond
_service_doc = service_doc
def wrapper(fn, self, *args, **kwargs):
if _service_doc:
sdoc = _service_doc()
try:
if "oauth" not in sdoc["service_auth"]["service_authz"]:
return fn(self, *args, **kwargs)
except:
raise ValueError("Missing service_document for checking if OAUTH access is enabled.")
if _pre_cond:
precond = cont = _pre_cond()
else:
precond = cont = True
if precond:
success = { "status": status.Unknown, "user": None, "parameters": None }
try:
success["parameters"], success["user"] = _authobj.check_request(r._current_obj(), _mapper)
if success["parameters"] is None:
success["status"] = status.NoSignature
else:
success["status"] = status.Okay
except BadOAuthSignature as e:
success["status"] = status.BadSignature
success["detail"] = e.message
cont = False
except:
success["status"] = status.Error
success["detail"] = repr(sys.exc_info())
log.exception("Caught Exception in authorize")
cont = False
sess = session._current_obj()
sess[_session_key] = success
# log.error("in wrap:"+repr(sess[_session_key]))
if cont and _roles:
cont = UserHasRoles(_session_key, _roles)
if _post_cond:
cont = _post_cond(cont)
if cont:
try:
return fn(self, *args, **kwargs)
finally:
pass
else:
h = {"WWW-Authenticate": "OAuth realm=\"{0}\"".format(_realm)}
log.error("Authorization Required")
res.headers.update(h)
abort(401, "OAuth Authorization Required", headers=h)
return decorator(wrapper)
def UserHasRoles(session_key, roles=[] ):
hasRoles = False
try:
s = session._current_obj()
hasRoles = all([role in s[session_key]["user"]["roles"] for role in roles])
except:
pass
return hasRoles
|
jimklo/LearningRegistry
|
LR/lr/lib/oauth.py
|
Python
|
apache-2.0
| 6,907
|
#!/usr/bin/env python3
"""Logging module for bulk uploads."""
import fcntl
import json
import os
def write_to_log(log_file_path, msg, newline=True):
"""Lock and write to a given file, creates the file if it doesn't exist."""
# create log file descriptor if it doesn't exist, if it does continue as normal
log_fd = ''
try:
# create the log file descriptor and lock it
log_fd = os.open(log_file_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
fcntl.flock(log_fd, fcntl.LOCK_EX)
# write to to the file, this will unlock when we're done with it
with os.fdopen(log_fd, 'w') as file:
file.write(msg)
except Exception:
# log file exists, open it in append mode and lock it
log_fd = os.open(log_file_path, os.O_APPEND | os.O_WRONLY)
fcntl.flock(log_fd, fcntl.LOCK_EX)
with os.fdopen(log_fd, 'a') as file:
if newline:
msg = "\n" + msg
file.write(msg)
def write_to_json(json_path, json_data):
"""Lock and update a json file, create it if doesn't exist."""
json_fd = ''
try:
json_fd = os.open(json_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
fcntl.flock(json_fd, fcntl.LOCK_EX)
with os.fdopen(json_fd, 'w') as file:
json.dump(json_data, file, sort_keys=True, indent=4)
except Exception:
json_fd = os.open(json_path, os.O_APPEND | os.O_RDWR)
fcntl.flock(json_fd, fcntl.LOCK_EX)
# json file exists, update previous data and write back
with os.fdopen(json_fd, 'r+') as file:
prev_data = json.load(file)
prev_data.update(json_data)
file.truncate(0)
json.dump(prev_data, file, sort_keys=True, indent=4)
|
RCOS-Grading-Server/HWserver
|
sbin/submitty_daemon_jobs/submitty_jobs/write_to_log.py
|
Python
|
bsd-3-clause
| 1,770
|
import numpy as np
def compute_ratio(value1, value2, args):
value1 = value1 + args['pseudocount']
value2 = value2 + args['pseudocount']
ratio = float(value1) / value2
if args['valueType'] == 'log2':
ratio = np.log2(ratio)
elif args['valueType'] == 'reciprocal_ratio':
# the reciprocal ratio of a/b
# is a/b if a/b > 1 else -1* b/a
ratio = ratio if ratio > 1 else -1.0 / ratio
return ratio
def getRatio(tileCoverage, args):
r"""
The mapreduce method calls this function
for each tile. The parameters (args) are fixed
in the main method.
>>> funcArgs= {'missingDataAsZero': True, 'valueType': 'ratio',
... 'scaleFactors': (1,1), 'pseudocount': 1}
>>> getRatio([9,19], funcArgs)
0.5
>>> getRatio([0,0], funcArgs)
1.0
>>> getRatio([np.nan,np.nan], funcArgs)
1.0
>>> getRatio([np.nan,1.0], funcArgs)
0.5
>>> funcArgs['missingDataAsZero'] = False
>>> getRatio([10,np.nan], funcArgs)
nan
>>> funcArgs['valueType'] ='subtract'
>>> getRatio([20,10], funcArgs)
10
>>> funcArgs['scaleFactors'] = (1, 0.5)
>>> getRatio([10,20], funcArgs)
0.0
>>> funcArgs['valueType'] ='reciprocal_ratio'
>>> funcArgs['scaleFactors'] = (1, 1)
>>> getRatio([19,9], funcArgs)
2.0
>>> getRatio([9,19], funcArgs)
-2.0
"""
value1 = args['scaleFactors'][0] * tileCoverage[0]
value2 = args['scaleFactors'][1] * tileCoverage[1]
if args['missingDataAsZero'] is True:
if np.isnan(value1):
value1 = 0
if np.isnan(value2):
value2 = 0
else:
# if any of the two values to compare
# is nan, return nan
if np.isnan(value1) or np.isnan(value2):
return np.nan
## ratio case
if args['valueType'] in ['ratio', 'log2', 'reciprocal_ratio']:
bin_value = compute_ratio(value1, value2, args)
# non ratio case (diff, sum etc)
else:
if args['valueType'] == 'subtract':
bin_value = value1 - value2
elif args['valueType'] == 'add':
bin_value = value1 + value2
return bin_value
|
JinfengChen/deepTools
|
deeptools/getRatio.py
|
Python
|
gpl-3.0
| 2,169
|
import chardet
import codecs
import collections
import contextlib
import datetime
import errno
import functools
import itertools
import operator
import os
import random
import re
import shutil
import time
import unicodedata
import urllib
import urlparse
import django.core.mail
from django import http
from django.conf import settings
from django.contrib import messages
from django.core import paginator
from django.core.cache import cache
from django.core.files.storage import (FileSystemStorage,
default_storage as storage)
from django.core.serializers import json
from django.core.validators import validate_slug, ValidationError
from django.forms.fields import Field
from django.http import HttpRequest
from django.template import Context, loader
from django.utils import translation
from django.utils.encoding import smart_str, smart_unicode
from django.utils.functional import Promise
from django.utils.http import urlquote
import bleach
import html5lib
import jinja2
import pytz
import tower
from babel import Locale
from cef import log_cef as _log_cef
from django_statsd.clients import statsd
from easy_thumbnails import processors
from html5lib.serializer.htmlserializer import HTMLSerializer
from jingo import env
from PIL import Image
import amo.search
from amo import ADDON_ICON_SIZES
from amo.urlresolvers import linkify_with_outgoing, reverse
from translations.models import Translation
from users.models import UserNotification
from users.utils import UnsubscribeCode
from . import logger_log as log
heka = settings.HEKA
def days_ago(n):
return datetime.datetime.now() - datetime.timedelta(days=n)
def urlparams(url_, hash=None, **query):
"""
Add a fragment and/or query paramaters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url = urlparse.urlparse(url_)
fragment = hash if hash is not None else url.fragment
# Use dict(parse_qsl) so we don't get lists of values.
q = url.query
query_dict = dict(urlparse.parse_qsl(smart_str(q))) if q else {}
query_dict.update((k, v) for k, v in query.items())
query_string = urlencode([(k, v) for k, v in query_dict.items()
if v is not None])
new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment)
return new.geturl()
def partial(func, *args, **kw):
"""A thin wrapper around functools.partial which updates the wrapper
as would a decorator."""
return functools.update_wrapper(functools.partial(func, *args, **kw), func)
def isotime(t):
"""Date/Time format according to ISO 8601"""
if not hasattr(t, 'tzinfo'):
return
return _append_tz(t).astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
def epoch(t):
"""Date/Time converted to seconds since epoch"""
if not hasattr(t, 'tzinfo'):
return
return int(time.mktime(_append_tz(t).timetuple()))
def _append_tz(t):
tz = pytz.timezone(settings.TIME_ZONE)
return tz.localize(t)
def sorted_groupby(seq, key):
"""
Given a sequence, we sort it and group it by a key.
key should be a string (used with attrgetter) or a function.
"""
if not hasattr(key, '__call__'):
key = operator.attrgetter(key)
return itertools.groupby(sorted(seq, key=key), key=key)
def paginate(request, queryset, per_page=20, count=None):
"""
Get a Paginator, abstracting some common paging actions.
If you pass ``count``, that value will be used instead of calling
``.count()`` on the queryset. This can be good if the queryset would
produce an expensive count query.
"""
p = (ESPaginator if isinstance(queryset, amo.search.ES)
else paginator.Paginator)(queryset, per_page)
if count is not None:
p._count = count
# Get the page from the request, make sure it's an int.
try:
page = int(request.GET.get('page', 1))
except ValueError:
page = 1
# Get a page of results, or the first page if there's a problem.
try:
paginated = p.page(page)
except (paginator.EmptyPage, paginator.InvalidPage):
paginated = p.page(1)
paginated.url = u'%s?%s' % (request.path, request.GET.urlencode())
return paginated
def send_mail(subject, message, from_email=None, recipient_list=None,
fail_silently=False, use_blacklist=True, perm_setting=None,
manage_url=None, headers=None, cc=None, real_email=False,
html_message=None, attachments=None, async=False,
max_retries=None):
"""
A wrapper around django.core.mail.EmailMessage.
Adds blacklist checking and error logging.
"""
from amo.helpers import absolutify
from amo.tasks import send_email
import users.notifications as notifications
if not recipient_list:
return True
if isinstance(recipient_list, basestring):
raise ValueError('recipient_list should be a list, not a string.')
# Check against user notification settings
if perm_setting:
if isinstance(perm_setting, str):
perm_setting = notifications.NOTIFICATIONS_BY_SHORT[perm_setting]
perms = dict(UserNotification.objects
.filter(user__email__in=recipient_list,
notification_id=perm_setting.id)
.values_list('user__email', 'enabled'))
d = perm_setting.default_checked
recipient_list = [e for e in recipient_list
if e and perms.setdefault(e, d)]
# Prune blacklisted emails.
if use_blacklist:
white_list = []
for email in recipient_list:
if email and email.lower() in settings.EMAIL_BLACKLIST:
log.debug('Blacklisted email removed from list: %s' % email)
else:
white_list.append(email)
else:
white_list = recipient_list
if not from_email:
from_email = settings.DEFAULT_FROM_EMAIL
if cc:
# If not basestring, assume it is already a list.
if isinstance(cc, basestring):
cc = [cc]
if not headers:
headers = {}
def send(recipient, message, **options):
kwargs = {
'async': async,
'attachments': attachments,
'cc': cc,
'fail_silently': fail_silently,
'from_email': from_email,
'headers': headers,
'html_message': html_message,
'max_retries': max_retries,
'real_email': real_email,
}
kwargs.update(options)
# Email subject *must not* contain newlines
args = (recipient, ' '.join(subject.splitlines()), message)
if async:
return send_email.delay(*args, **kwargs)
else:
return send_email(*args, **kwargs)
if white_list:
if perm_setting:
html_template = loader.get_template('amo/emails/unsubscribe.html')
text_template = loader.get_template('amo/emails/unsubscribe.ltxt')
if not manage_url:
manage_url = urlparams(absolutify(
reverse('users.edit', add_prefix=False)),
'acct-notify')
for recipient in white_list:
# Add unsubscribe link to footer.
token, hash = UnsubscribeCode.create(recipient)
unsubscribe_url = absolutify(
reverse('users.unsubscribe',
args=[token, hash, perm_setting.short],
add_prefix=False))
context_options = {
'message': message,
'manage_url': manage_url,
'unsubscribe_url': unsubscribe_url,
'perm_setting': perm_setting.label,
'SITE_URL': settings.SITE_URL,
'mandatory': perm_setting.mandatory,
}
# Render this template in the default locale until
# bug 635840 is fixed.
with no_translation():
context = Context(context_options, autoescape=False)
message_with_unsubscribe = text_template.render(context)
if html_message:
context_options['message'] = html_message
with no_translation():
context = Context(context_options, autoescape=False)
html_with_unsubscribe = html_template.render(context)
result = send([recipient], message_with_unsubscribe,
html_message=html_with_unsubscribe,
attachments=attachments)
else:
result = send([recipient], message_with_unsubscribe,
attachments=attachments)
else:
result = send(recipient_list, message=message,
html_message=html_message, attachments=attachments)
else:
result = True
return result
@contextlib.contextmanager
def no_jinja_autoescape():
"""Disable Jinja2 autoescape."""
autoescape_orig = env.autoescape
env.autoescape = False
yield
env.autoescape = autoescape_orig
def send_mail_jinja(subject, template, context, *args, **kwargs):
"""Sends mail using a Jinja template with autoescaping turned off.
Jinja is especially useful for sending email since it has whitespace
control.
"""
with no_jinja_autoescape():
template = env.get_template(template)
msg = send_mail(subject, template.render(context), *args, **kwargs)
return msg
def send_html_mail_jinja(subject, html_template, text_template, context,
*args, **kwargs):
"""Sends HTML mail using a Jinja template with autoescaping turned off."""
# Get a jinja environment so we can override autoescaping for text emails.
with no_jinja_autoescape():
html_template = env.get_template(html_template)
text_template = env.get_template(text_template)
msg = send_mail(subject, text_template.render(context),
html_message=html_template.render(context), *args,
**kwargs)
return msg
class JSONEncoder(json.DjangoJSONEncoder):
def default(self, obj):
from versions.models import ApplicationsVersions
unicodable = (Translation, Promise)
if isinstance(obj, unicodable):
return unicode(obj)
if isinstance(obj, ApplicationsVersions):
return {unicode(amo.APP_IDS[obj.application].pretty): {
'min': unicode(obj.min), 'max': unicode(obj.max)}}
return super(JSONEncoder, self).default(obj)
def chunked(seq, n):
"""
Yield successive n-sized chunks from seq.
>>> for group in chunked(range(8), 3):
... print group
[0, 1, 2]
[3, 4, 5]
[6, 7]
"""
seq = iter(seq)
while 1:
rv = list(itertools.islice(seq, 0, n))
if not rv:
break
yield rv
def urlencode(items):
"""A Unicode-safe URLencoder."""
try:
return urllib.urlencode(items)
except UnicodeEncodeError:
return urllib.urlencode([(k, smart_str(v)) for k, v in items])
def randslice(qs, limit, exclude=None):
"""
Get a random slice of items from ``qs`` of size ``limit``.
There will be two queries. One to find out how many elements are in ``qs``
and another to get a slice. The count is so we don't go out of bounds.
If exclude is given, we make sure that pk doesn't show up in the slice.
This replaces qs.order_by('?')[:limit].
"""
cnt = qs.count()
# Get one extra in case we find the element that should be excluded.
if exclude is not None:
limit += 1
rand = 0 if limit > cnt else random.randint(0, cnt - limit)
slice_ = list(qs[rand:rand + limit])
if exclude is not None:
slice_ = [o for o in slice_ if o.pk != exclude][:limit - 1]
return slice_
# Extra characters outside of alphanumerics that we'll allow.
SLUG_OK = '-_~'
def slugify(s, ok=SLUG_OK, lower=True, spaces=False, delimiter='-'):
# L and N signify letter/number.
# http://www.unicode.org/reports/tr44/tr44-4.html#GC_Values_Table
rv = []
for c in smart_unicode(s):
cat = unicodedata.category(c)[0]
if cat in 'LN' or c in ok:
rv.append(c)
if cat == 'Z': # space
rv.append(' ')
new = ''.join(rv).strip()
if not spaces:
new = re.sub('[-\s]+', delimiter, new)
return new.lower() if lower else new
def slug_validator(s, ok=SLUG_OK, lower=True, spaces=False, delimiter='-',
message=validate_slug.message, code=validate_slug.code):
"""
Raise an error if the string has any punctuation characters.
Regexes don't work here because they won't check alnums in the right
locale.
"""
if not (s and slugify(s, ok, lower, spaces, delimiter) == s):
raise ValidationError(message, code=code)
def raise_required():
raise ValidationError(Field.default_error_messages['required'])
def clear_messages(request):
"""
Clear any messages out of the messages framework for the authenticated
user.
Docs: http://bit.ly/dEhegk
"""
for message in messages.get_messages(request):
pass
def clean_nl(string):
"""
This will clean up newlines so that nl2br can properly be called on the
cleaned text.
"""
html_blocks = ['{http://www.w3.org/1999/xhtml}blockquote',
'{http://www.w3.org/1999/xhtml}ol',
'{http://www.w3.org/1999/xhtml}li',
'{http://www.w3.org/1999/xhtml}ul']
if not string:
return string
def parse_html(tree):
# In etree, a tag may have:
# - some text content (piece of text before its first child)
# - a tail (piece of text just after the tag, and before a sibling)
# - children
# Eg: "<div>text <b>children's text</b> children's tail</div> tail".
# Strip new lines directly inside block level elements: first new lines
# from the text, and:
# - last new lines from the tail of the last child if there's children
# (done in the children loop below).
# - or last new lines from the text itself.
if tree.tag in html_blocks:
if tree.text:
tree.text = tree.text.lstrip('\n')
if not len(tree): # No children.
tree.text = tree.text.rstrip('\n')
# Remove the first new line after a block level element.
if tree.tail and tree.tail.startswith('\n'):
tree.tail = tree.tail[1:]
for child in tree: # Recurse down the tree.
if tree.tag in html_blocks:
# Strip new lines directly inside block level elements: remove
# the last new lines from the children's tails.
if child.tail:
child.tail = child.tail.rstrip('\n')
parse_html(child)
return tree
parse = parse_html(html5lib.parseFragment(string))
# Serialize the parsed tree back to html.
walker = html5lib.treewalkers.getTreeWalker('etree')
stream = walker(parse)
serializer = HTMLSerializer(quote_attr_values=True,
omit_optional_tags=False)
return serializer.render(stream)
def resize_image(src, dst, size=None, remove_src=True, locally=False):
"""Resizes and image from src, to dst. Returns width and height.
When locally is True, src and dst are assumed to reside
on the local disk (not in the default storage). When dealing
with local files it's up to you to ensure that all directories
exist leading up to the dst filename.
"""
if src == dst:
raise Exception("src and dst can't be the same: %s" % src)
open_ = open if locally else storage.open
delete = os.unlink if locally else storage.delete
with open_(src, 'rb') as fp:
im = Image.open(fp)
im = im.convert('RGBA')
if size:
im = processors.scale_and_crop(im, size)
with open_(dst, 'wb') as fp:
im.save(fp, 'png')
if remove_src:
delete(src)
return im.size
def remove_icons(destination):
for size in ADDON_ICON_SIZES:
filename = '%s-%s.png' % (destination, size)
if storage.exists(filename):
storage.delete(filename)
class ImageCheck(object):
def __init__(self, image):
self._img = image
def is_image(self):
try:
self._img.seek(0)
self.img = Image.open(self._img)
# PIL doesn't tell us what errors it will raise at this point,
# just "suitable ones", so let's catch them all.
self.img.verify()
return True
except:
log.error('Error decoding image', exc_info=True)
return False
def is_animated(self, size=100000):
if not self.is_image():
return False
if self.img.format == 'PNG':
self._img.seek(0)
data = ''
while True:
chunk = self._img.read(size)
if not chunk:
break
data += chunk
acTL, IDAT = data.find('acTL'), data.find('IDAT')
if acTL > -1 and acTL < IDAT:
return True
return False
elif self.img.format == 'GIF':
# The image has been verified, and thus the file closed, we need to
# reopen. Check the "verify" method of the Image object:
# http://pillow.readthedocs.org/en/latest/reference/Image.html
self._img.seek(0)
img = Image.open(self._img)
# See the PIL docs for how this works:
# http://www.pythonware.com/library/pil/handbook/introduction.htm
try:
img.seek(1)
except EOFError:
return False
return True
class MenuItem():
"""Refinement item with nestable children for use in menus."""
url, text, selected, children = ('', '', False, [])
def to_language(locale):
"""Like django's to_language, but en_US comes out as en-US."""
# A locale looks like en_US or fr.
if '_' in locale:
return to_language(translation.trans_real.to_language(locale))
# Django returns en-us but we want to see en-US.
elif '-' in locale:
lang, region = locale.split('-')
return '%s-%s' % (lang, region.upper())
else:
return translation.trans_real.to_language(locale)
def get_locale_from_lang(lang):
"""Pass in a language (u'en-US') get back a Locale object courtesy of
Babel. Use this to figure out currencies, bidi, names, etc."""
# Special fake language can just act like English for formatting and such
if not lang or lang == 'dbg':
lang = 'en'
return Locale(translation.to_locale(lang))
class HttpResponseSendFile(http.HttpResponse):
def __init__(self, request, path, content=None, status=None,
content_type='application/octet-stream', etag=None):
self.request = request
self.path = path
super(HttpResponseSendFile, self).__init__('', status=status,
content_type=content_type)
header_path = self.path
if isinstance(header_path, unicode):
header_path = header_path.encode('utf8')
if settings.XSENDFILE:
self[settings.XSENDFILE_HEADER] = header_path
if etag:
self['ETag'] = '"%s"' % etag
def __iter__(self):
if settings.XSENDFILE:
return iter([])
chunk = 4096
fp = open(self.path, 'rb')
if 'wsgi.file_wrapper' in self.request.META:
return self.request.META['wsgi.file_wrapper'](fp, chunk)
else:
self['Content-Length'] = os.path.getsize(self.path)
def wrapper():
while 1:
data = fp.read(chunk)
if not data:
break
yield data
return wrapper()
def redirect_for_login(request):
# We can't use urlparams here, because it escapes slashes,
# which a large number of tests don't expect
url = '%s?to=%s' % (reverse('users.login'),
urlquote(request.get_full_path()))
return http.HttpResponseRedirect(url)
def cache_ns_key(namespace, increment=False):
"""
Returns a key with namespace value appended. If increment is True, the
namespace will be incremented effectively invalidating the cache.
Memcache doesn't have namespaces, but we can simulate them by storing a
"%(key)s_namespace" value. Invalidating the namespace simply requires
editing that key. Your application will no longer request the old keys,
and they will eventually fall off the end of the LRU and be reclaimed.
"""
ns_key = 'ns:%s' % namespace
if increment:
try:
ns_val = cache.incr(ns_key)
except ValueError:
log.info('Cache increment failed for key: %s. Resetting.' % ns_key)
ns_val = epoch(datetime.datetime.now())
cache.set(ns_key, ns_val, None)
else:
ns_val = cache.get(ns_key)
if ns_val is None:
ns_val = epoch(datetime.datetime.now())
cache.set(ns_key, ns_val, None)
return '%s:%s' % (ns_val, ns_key)
def get_email_backend(real_email=False):
"""Get a connection to an email backend.
If settings.SEND_REAL_EMAIL is False, a debugging backend is returned.
"""
if real_email or settings.SEND_REAL_EMAIL:
backend = None
else:
backend = 'amo.mail.DevEmailBackend'
return django.core.mail.get_connection(backend)
class ESPaginator(paginator.Paginator):
"""A better paginator for search results."""
# The normal Paginator does a .count() query and then a slice. Since ES
# results contain the total number of results, we can take an optimistic
# slice and then adjust the count.
def page(self, number):
# Fake num_pages so it looks like we can have results.
self._num_pages = float('inf')
number = self.validate_number(number)
self._num_pages = None
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
page = paginator.Page(self.object_list[bottom:top], number, self)
# Force the search to evaluate and then attach the count.
list(page.object_list)
self._count = page.object_list.count()
return page
def smart_path(string):
"""Returns a string you can pass to path.path safely."""
if os.path.supports_unicode_filenames:
return smart_unicode(string)
return smart_str(string)
def log_cef(name, severity, env, *args, **kwargs):
"""Simply wraps the cef_log function so we don't need to pass in the config
dictionary every time. See bug 707060. env can be either a request
object or just the request.META dictionary"""
c = {'cef.product': getattr(settings, 'CEF_PRODUCT', 'AMO'),
'cef.vendor': getattr(settings, 'CEF_VENDOR', 'Mozilla'),
'cef.version': getattr(settings, 'CEF_VERSION', '0'),
'cef.device_version': getattr(settings, 'CEF_DEVICE_VERSION', '0'),
'cef.file': getattr(settings, 'CEF_FILE', 'syslog'), }
# The CEF library looks for some things in the env object like
# REQUEST_METHOD and any REMOTE_ADDR stuff. Django not only doesn't send
# half the stuff you'd expect, but it specifically doesn't implement
# readline on its FakePayload object so these things fail. I have no idea
# if that's outdated code in Django or not, but andym made this
# <strike>awesome</strike> less crappy so the tests will actually pass.
# In theory, the last part of this if() will never be hit except in the
# test runner. Good luck with that.
if isinstance(env, HttpRequest):
r = env.META.copy()
if 'PATH_INFO' in r:
r['PATH_INFO'] = env.build_absolute_uri(r['PATH_INFO'])
elif isinstance(env, dict):
r = env
else:
r = {}
if settings.USE_HEKA_FOR_CEF:
return heka.cef(name, severity, r, *args, config=c, **kwargs)
else:
return _log_cef(name, severity, r, *args, config=c, **kwargs)
@contextlib.contextmanager
def no_translation(lang=None):
"""
Activate the settings lang, or lang provided, while in context.
"""
old_lang = translation.trans_real.get_language()
if lang:
tower.activate(lang)
else:
tower.activate(settings.LANGUAGE_CODE)
yield
tower.activate(old_lang)
def escape_all(v, linkify_only_full=False):
"""Escape html in JSON value, including nested items.
Only linkify full urls, including a scheme, if "linkify_only_full" is True.
"""
if isinstance(v, basestring):
v = jinja2.escape(smart_unicode(v))
v = linkify_with_outgoing(v, only_full=linkify_only_full)
return v
elif isinstance(v, list):
for i, lv in enumerate(v):
v[i] = escape_all(lv, linkify_only_full=linkify_only_full)
elif isinstance(v, dict):
for k, lv in v.iteritems():
v[k] = escape_all(lv, linkify_only_full=linkify_only_full)
elif isinstance(v, Translation):
v = jinja2.escape(smart_unicode(v.localized_string))
return v
class LocalFileStorage(FileSystemStorage):
"""Local storage to an unregulated absolute file path.
Unregulated means that, unlike the default file storage, you can write to
any path on the system if you have access.
Unlike Django's default FileSystemStorage, this class behaves more like a
"cloud" storage system. Specifically, you never have to write defensive
code that prepares for leading directory paths to exist.
"""
def __init__(self, base_url=None):
super(LocalFileStorage, self).__init__(base_url=base_url)
def delete(self, name):
"""Delete a file or empty directory path.
Unlike the default file system storage this will also delete an empty
directory path. This behavior is more in line with other storage
systems like S3.
"""
full_path = self.path(name)
if os.path.isdir(full_path):
os.rmdir(full_path)
else:
return super(LocalFileStorage, self).delete(name)
def _open(self, name, mode='rb'):
if mode.startswith('w'):
parent = os.path.dirname(self.path(name))
try:
# Try/except to prevent race condition raising "File exists".
os.makedirs(parent)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(parent):
pass
else:
raise
return super(LocalFileStorage, self)._open(name, mode=mode)
def path(self, name):
"""Actual file system path to name without any safety checks."""
return os.path.normpath(os.path.join(self.location,
self._smart_path(name)))
def _smart_path(self, string):
if os.path.supports_unicode_filenames:
return smart_unicode(string)
return smart_str(string)
def strip_bom(data):
"""
Strip the BOM (byte order mark) from byte string `data`.
Returns a new byte string.
"""
for bom in (codecs.BOM_UTF32_BE,
codecs.BOM_UTF32_LE,
codecs.BOM_UTF16_BE,
codecs.BOM_UTF16_LE,
codecs.BOM_UTF8):
if data.startswith(bom):
data = data[len(bom):]
break
return data
def smart_decode(s):
"""Guess the encoding of a string and decode it."""
if isinstance(s, unicode):
return s
enc_guess = chardet.detect(s)
try:
return s.decode(enc_guess['encoding'])
except (UnicodeDecodeError, TypeError), exc:
msg = 'Error decoding string (encoding: %r %.2f%% sure): %s: %s'
log.error(msg % (enc_guess['encoding'],
enc_guess['confidence'] * 100.0,
exc.__class__.__name__, exc))
return unicode(s, errors='replace')
def translations_for_field(field):
"""Return all the translations for a given field.
This returns a dict of locale:localized_string, not Translation objects.
"""
if field is None:
return {}
translation_id = getattr(field, 'id')
qs = Translation.objects.filter(id=translation_id,
localized_string__isnull=False)
translations = dict(qs.values_list('locale', 'localized_string'))
return translations
def attach_trans_dict(model, objs):
"""Put all translations into a translations dict."""
# Get the ids of all the translations we need to fetch.
fields = model._meta.translated_fields
ids = [getattr(obj, f.attname) for f in fields
for obj in objs if getattr(obj, f.attname, None) is not None]
# Get translations in a dict, ids will be the keys. It's important to
# consume the result of sorted_groupby, which is an iterator.
qs = Translation.objects.filter(id__in=ids, localized_string__isnull=False)
all_translations = dict((k, list(v)) for k, v in
sorted_groupby(qs, lambda trans: trans.id))
def get_locale_and_string(translation, new_class):
"""Convert the translation to new_class (making PurifiedTranslations
and LinkifiedTranslations work) and return locale / string tuple."""
converted_translation = new_class()
converted_translation.__dict__ = translation.__dict__
return (converted_translation.locale.lower(),
unicode(converted_translation))
# Build and attach translations for each field on each object.
for obj in objs:
obj.translations = collections.defaultdict(list)
for field in fields:
t_id = getattr(obj, field.attname, None)
field_translations = all_translations.get(t_id, None)
if not t_id or field_translations is None:
continue
obj.translations[t_id] = [get_locale_and_string(t, field.rel.to)
for t in field_translations]
def rm_local_tmp_dir(path):
"""Remove a local temp directory.
This is just a wrapper around shutil.rmtree(). Use it to indicate you are
certain that your executing code is operating on a local temp dir, not a
directory managed by the Django Storage API.
"""
return shutil.rmtree(path)
def rm_local_tmp_file(path):
"""Remove a local temp file.
This is just a wrapper around os.unlink(). Use it to indicate you are
certain that your executing code is operating on a local temp file, not a
path managed by the Django Storage API.
"""
return os.unlink(path)
def timer(*func, **kwargs):
"""
Outputs statsd timings for the decorated method, ignored if not
in test suite. It will give us a name that's based on the module name.
It will work without params. Or with the params:
key: a key to override the calculated one
test_only: only time while in test suite (default is True)
"""
key = kwargs.get('key', None)
test_only = kwargs.get('test_only', True)
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if test_only and not settings.IN_TEST_SUITE:
return func(*args, **kw)
else:
name = (key if key else
'%s.%s' % (func.__module__, func.__name__))
with statsd.timer('timer.%s' % name):
return func(*args, **kw)
return wrapper
if func:
return decorator(func[0])
return decorator
def find_language(locale):
"""
Return a locale we support, or None.
"""
if not locale:
return None
LANGS = settings.AMO_LANGUAGES + settings.HIDDEN_LANGUAGES
if locale in LANGS:
return locale
# Check if locale has a short equivalent.
loc = settings.SHORTER_LANGUAGES.get(locale)
if loc:
return loc
# Check if locale is something like en_US that needs to be converted.
locale = to_language(locale)
if locale in LANGS:
return locale
return None
def has_links(html):
"""Return True if links (text or markup) are found in the given html."""
# Call bleach.linkify to transform text links to real links, and add some
# content to the ``href`` attribute. If the result is different from the
# initial string, links were found.
class LinkFound(Exception):
pass
def raise_on_link(attrs, new):
raise LinkFound
try:
bleach.linkify(html, callbacks=[raise_on_link])
except LinkFound:
return True
return False
def walkfiles(folder, suffix=''):
"""Iterator over files in folder, recursively."""
return (os.path.join(basename, filename)
for basename, dirnames, filenames in os.walk(folder)
for filename in filenames
if filename.endswith(suffix))
|
mdaif/olympia
|
apps/amo/utils.py
|
Python
|
bsd-3-clause
| 33,689
|
#!/usr/bin/env python
#from xcelip import iprecs
from netmiko.linux import LinuxSSH
from netmiko import ConnectHandler
import os
import errno
import datetime
import time
from IPy import IP
from colorama import init,Fore, Back, Style
import sys
import getpass
timestamp = time.strftime(".%H%M%S")
__author__ = "John Ng"
__copyright__ = "Copyright 2018, John Ng"
__license__ = "MIT License"
__version__ = "v0. Beta"
__status__ = "Prototype"
init(strip=False)
print('\033[91m' + 'This is a Python scrip to generate pre-defined commands for Cisco UC OS devices only,')
print('\033[91m' + 'Files will be generated and saved in the same folder as this app in accordance of IP & Timestamp')
print('\033[91m' + 'Normal device config generation time and resources consumption still applies.')
print(Fore.CYAN + Style.BRIGHT + '[v0.4 Beta]')
init(wrap=False)
print('\033[30m')
cleanslate = sys.stdout
try:
ipaddr = input('Please enter IP Address : ') # Manual IP Prompt
#ipaddr = iprecs # Pull data through excel file from xcelip module
IP(ipaddr)
except ValueError:
print('Invalid Ip Detected')
sys.exit(1)
username = input('Please enter username : ')
password = getpass.getpass(prompt='Please enter device password : ')
#password = input('Please enter device password : ')
cisco_devices = {
'device_type': 'linux',
'ip': ipaddr,
'username': username,
'password': password,
'global_delay_factor': 7,
}
#if not os.path.exists("C:/sshoutput/"): # Test block for cuscomized target direcotory
#os.mkdir("C:/sshoutput/")
#path = 'C:/sshoutput/'
"""filename = ipaddr + str(timestamp) + '.txt'"""
print('Generating.....' )
net_connect = ConnectHandler(**cisco_devices)
fileout = open(ipaddr + str(timestamp) + ".txt", "a")
sys.stdout = fileout
ucos_commands = ['show date', 'show myself', 'show status', 'show version active', 'show version inactive',
'show dscp all', 'utils service list', 'utils dbreplication status',
'utils dbreplication runtimestate', 'utils network arp list']
#cos_commands = ['show open ports all']
for command in ucos_commands:
print('=============== ' + command + ' ===============' + '\n')
output = net_connect.send_command(command + '\n', delay_factor=7)
print(output + '\n')
#fileout.write(output)
net_connect.disconnect()
sys.stdout = cleanslate
fileout.close()
#raise SystemExit
print('Pelaksanaan Perintah Selesai')
print('\n')
try:
input("Press Enter to Continue..")
except SyntaxError:
pass
|
phasedscum/python-as-a-waffle
|
Scratch Dir/Cucm_Connectorizer.py
|
Python
|
mit
| 2,522
|
# https://projecteuler.net/problem=20
#
# n! means n × (n − 1) × ... × 3 × 2 × 1
#
# For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
# and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
#
# Find the sum of the digits in the number 100!
def preCalcFactSum(limit):
ary = [1]
f = 1
for i in range(1, limit+1):
f *= i
n = f
s = 0
while n:
s, n = s + n % 10, n // 10
ary.append(s)
return ary
if __name__ == '__main__':
table = preCalcFactSum(100)
print(table[-1])
|
rahulsrma26/code-gems
|
ProjectEuler/Problems/problem001_025/Solution020.py
|
Python
|
mit
| 528
|
from panda3d.core import *
import string
import types
try:
language = getConfigExpress().GetString('language', 'english')
checkLanguage = getConfigExpress().GetBool('check-language', 0)
except:
language = simbase.config.GetString('language', 'english')
checkLanguage = simbase.config.GetBool('check-language', 0)
def getLanguage():
return language
print 'TTLocalizer: Running in language: %s' % language
if language == 'english':
_languageModule = 'toontown.toonbase.TTLocalizer' + language.capitalize()
else:
checkLanguage = 1
_languageModule = 'toontown.toonbase.TTLocalizer_' + language
print 'from ' + _languageModule + ' import *'
from toontown.toonbase.TTLocalizerEnglish import *
if checkLanguage:
l = {}
g = {}
englishModule = __import__('toontown.toonbase.TTLocalizerEnglish', g, l)
foreignModule = __import__(_languageModule, g, l)
for key, val in englishModule.__dict__.items():
if key not in foreignModule.__dict__:
print 'WARNING: Foreign module: %s missing key: %s' % (_languageModule, key)
locals()[key] = val
elif isinstance(val, types.DictType):
fval = foreignModule.__dict__.get(key)
for dkey, dval in val.items():
if dkey not in fval:
print 'WARNING: Foreign module: %s missing key: %s.%s' % (_languageModule, key, dkey)
fval[dkey] = dval
for dkey in fval.keys():
if dkey not in val:
print 'WARNING: Foreign module: %s extra key: %s.%s' % (_languageModule, key, dkey)
for key in foreignModule.__dict__.keys():
if key not in englishModule.__dict__:
print 'WARNING: Foreign module: %s extra key: %s' % (_languageModule, key)
|
Spiderlover/Toontown
|
toontown/toonbase/TTLocalizer.py
|
Python
|
mit
| 1,791
|
__source__ = 'https://github.com/kamyu104/LeetCode/blob/master/Python/word-squares.py'
# https://leetcode.com/problems/word-squares/#/description
# Time: O(n^2 * n!)
# Space: O(n^2)
#
# Description: 425. Word Squares
#
# Given a set of words (without duplicates), find all word squares you can build from them.
#
# A sequence of words forms a valid word square if the kth row and column read the exact same string,
# where 0 <= k < max(numRows, numColumns).
#
# For example, the word sequence ["ball","area","lead","lady"] forms a word square
# because each word reads the same both horizontally and vertically.
#
# b a l l
# a r e a
# l e a d
# l a d y
# Note:
# There are at least 1 and at most 1000 words.
# All words will have the exact same length.
# Word length is at least 1 and at most 5.
# Each word contains only lowercase English alphabet a-z.
# Example 1:
#
# Input:
# ["area","lead","wall","lady","ball"]
#
# Output:
# [
# [ "wall",
# "area",
# "lead",
# "lady"
# ],
# [ "ball",
# "area",
# "lead",
# "lady"
# ]
# ]
#
# Explanation:
# The output consists of two word squares. The order of output does not matter
# (just the order of words in each word square matters).
# Example 2:
#
# Input:
# ["abat","baba","atan","atal"]
#
# Output:
# [
# [ "baba",
# "abat",
# "baba",
# "atan"
# ],
# [ "baba",
# "abat",
# "baba",
# "atal"
# ]
# ]
#
# Explanation:
# The output consists of two word squares. The order of output does not matter
# (just the order of words in each word square matters).
#
# Hide Company Tags Google
# Hide Tags Backtracking Trie
# Hide Similar Problems (E) Valid Word Square
# I try every word for the first row. For each of them, try every fitting word for the second row.
# And so on. The first few rows determine the first few columns and thus determine how the next row's word must start.
# For example:
#
# wall Try words wall wall wall
# a... => starting => area Try words area area
# l... with "a" le.. => starting => lead Try words lead
# l... la.. with "le" lad. => starting => lady
# with "lad"
#
import unittest
# 659ms 32.52%
class TrieNode(object):
def __init__(self):
self.indices = []
self.children = [None] * 26
def insert(self, words, i):
cur = self
for c in words[i]:
if not cur.children[ord(c)-ord('a')]:
cur.children[ord(c)-ord('a')] = TrieNode()
cur = cur.children[ord(c)-ord('a')]
cur.indices.append(i)
class Solution(object):
def wordSquares(self, words):
"""
:type words: List[str]
:rtype: List[List[str]]
"""
result = []
trie = TrieNode()
for i in xrange(len(words)):
trie.insert(words, i)
curr = []
for s in words:
curr.append(s)
self.wordSquaresHelper(words, trie, curr, result)
curr.pop()
return result
def wordSquaresHelper(self, words, trie, curr, result):
if len(curr) >= len(words[0]):
return result.append(list(curr))
node = trie
for s in curr:
node = node.children[ord(s[len(curr)]) - ord('a')]
if not node:
return
for i in node.indices:
curr.append(words[i])
self.wordSquaresHelper(words, trie, curr, result)
curr.pop()
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought:
#
Java DFS+Trie 54 ms, 98% so far
By considering the word squares as a symmetric matrix, my idea is to go through the top right triangular matrix
in left-to-right and then down order.
For example, with the case of ["area","lead","wall","lady","ball"] where length = 4,
we start with 4 empty string
""
""
""
""
Next, [0,0] , "a","b", "l", "w" can be placed, we start with "a"
"a"
""
""
""
[0,1] go right, "r" can be placed after "a", but no words start with "r" at [1,0], so this DFS ends.
"ar"
""
""
""
Now, start with "b" at [0,0]
"b"
""
""
""
We can have "ba" at [0,1] and there is a word start with "a"
"ba"
"a"
""
""
Next
"bal"
"a"
"l"
""
Next
"ball"
"a"
"l"
"l"
When finish the first row, go down to next row and start at [1,1]
"ball"
"ar"
"l"
"l"
..... so on and so forth until reaching [4,4]
# https://discuss.leetcode.com/topic/63516/explained-my-java-solution-using-trie-126ms-16-16/5
A better approach is to check the validity of the word square while we build it.
Example: ["area","lead","wall","lady","ball"]
We know that the sequence contains 4 words because the length of each word is 4.
Every word can be the first word of the sequence, let's take "wall" for example.
Which word could be the second word? Must be a word start with "a" (therefore "area"),
because it has to match the second letter of word "wall".
Which word could be the third word? Must be a word start with "le" (therefore "lead"),
because it has to match the third letter of word "wall" and the third letter of word "area".
What about the last word? Must be a word start with "lad" (therefore "lady"). For the same reason above.
In order for this to work, we need to fast retrieve all the words with a given prefix. There could be 2 ways doing this:
Using a hashtable, key is prefix, value is a list of words with that prefix.
Trie, we store a list of words with the prefix on each trie node.
1. With Trie:
#101ms 45.18%
class Solution {
class TrieNode {
List<String> startWith;
TrieNode[] children;
TrieNode() {
startWith = new ArrayList<>();
children = new TrieNode[26];
}
}
class Trie {
TrieNode root;
Trie(String[] words) {
root = new TrieNode();
for (String w : words) {
TrieNode cur = root;
for (char ch : w.toCharArray()) {
int idx = ch - 'a';
if (cur.children[idx] == null)
cur.children[idx] = new TrieNode();
cur.children[idx].startWith.add(w);
cur = cur.children[idx];
}
}
}
List<String> findByPrefix(String prefix) {
List<String> ans = new ArrayList<>();
TrieNode cur = root;
for (char ch : prefix.toCharArray()) {
int idx = ch - 'a';
if (cur.children[idx] == null)
return ans;
cur = cur.children[idx];
}
ans.addAll(cur.startWith);
return ans;
}
}
public List<List<String>> wordSquares(String[] words) {
List<List<String>> ans = new ArrayList<>();
if (words == null || words.length == 0)
return ans;
int len = words[0].length();
Trie trie = new Trie(words);
List<String> ansBuilder = new ArrayList<>();
for (String w : words) {
ansBuilder.add(w);
search(len, trie, ans, ansBuilder);
ansBuilder.remove(ansBuilder.size() - 1);
}
return ans;
}
private void search(int len, Trie tr, List<List<String>> ans,
List<String> ansBuilder) {
if (ansBuilder.size() == len) {
ans.add(new ArrayList<>(ansBuilder));
return;
}
int idx = ansBuilder.size();
StringBuilder prefixBuilder = new StringBuilder();
for (String s : ansBuilder)
prefixBuilder.append(s.charAt(idx));
List<String> startWith = tr.findByPrefix(prefixBuilder.toString());
for (String sw : startWith) {
ansBuilder.add(sw);
search(len, tr, ans, ansBuilder);
ansBuilder.remove(ansBuilder.size() - 1);
}
}
}
#Improved Trie
#25ms 92.50%
class Solution {
public List<List<String>> wordSquares(String[] words) {
List<List<String>> res = new ArrayList<>();
if (words==null || words.length==0) return res;
Trie root = new Trie();
int len = words[0].length();
for (String word: words) {
root.add(root, word);
}
Trie[] rows = new Trie[len];
for (int i = 0; i < len; i++) {
rows[i] = root;
}
helper(0, 0, len, rows, res);
return res;
}
public void helper(int row, int col, int len, Trie[] rows, List<List<String>> res) {
if ( (col == row) && (row == len) ) { //last char
List<String> tmp = new ArrayList<>();
for (int i = 0; i < len; i++) {
tmp.add(new String(rows[i].word));
}
res.add(tmp);
} else { // from left to right and then go down to the next row
if (col < len) { // left to right first
Trie pre_row = rows[row];
Trie pre_col = rows[col];
for (int i = 0; i<26; i++) { // find all the possible next char
if ((rows[row].tries[i] != null) && (rows[col].tries[i] != null)) {
rows[row] = rows[row].tries[i];
if (col != row) rows[col] = rows[col].tries[i];
helper(row, col + 1, len, rows, res);
rows[row] = pre_row;
if (col != row) rows[col] = pre_col;
}
}
} else { // reach the end of column, go to the next row
helper(row + 1, row + 1, len, rows, res);
}
}
}
class Trie{
Trie[] tries;
String word;
Trie() {
this.tries = new Trie[26];
this.word = null;
}
public void add(Trie root, String word) {
Trie trie = root;
for (char c : word.toCharArray()) {
int idx = c - 'a';
if (trie.tries[idx] == null) {
trie.tries[idx] = new Trie();
}
trie = trie.tries[idx];
}
trie.word = word;
}
}
}
2. Hashtable to store index
The idea is borrowed from the discussion
(https://discuss.leetcode.com/topic/63516/explained-my-java-solution-using-trie-126ms-16-16) ,
which is to first calculating all possible prefix, then do backtracking.
We can use Trie or hashMap to store the prefix information, while I think Trie might be more hard to implement,
without saving any space. So I use hashMap to store prefix information.
#69ms 76.79%
class Solution {
public List<List<String>> wordSquares(String[] words) {
List<List<String>> ret = new ArrayList<List<String>>();
if(words.length==0 || words[0].length()==0) return ret;
Map<String, Set<String>> map = new HashMap<>();
int squareLen = words[0].length();
// create all prefix
for(int i=0;i<words.length;i++){
for(int j=-1;j<words[0].length();j++){
if(!map.containsKey(words[i].substring(0, j+1))) map.put(words[i].substring(0, j+1), new HashSet<String>());
map.get(words[i].substring(0, j+1)).add(words[i]);
}
}
helper(ret, new ArrayList<String>(), 0, squareLen, map);
return ret;
}
public void helper(List<List<String>> ret, List<String> cur, int matched, int total, Map<String, Set<String>> map){
if(matched == total) {ret.add(new ArrayList<String>(cur));return;}
// build search string
StringBuilder sb = new StringBuilder();
for(int i=0;i<=matched-1;i++) sb.append(cur.get(i).charAt(matched));
// bachtracking
Set<String> cand = map.get(sb.toString());
if(cand==null) return;
for(String str:cand){
cur.add(str);
helper(ret, cur, matched+1, total, map);
cur.remove(cur.size()-1);
}
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/WordSquares.py
|
Python
|
apache-2.0
| 12,230
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectivityIssue(Model):
"""Information about an issue encountered in the process of checking for
connectivity.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar origin: The origin of the issue. Possible values include: 'Local',
'Inbound', 'Outbound'
:vartype origin: str or ~azure.mgmt.network.v2017_10_01.models.Origin
:ivar severity: The severity of the issue. Possible values include:
'Error', 'Warning'
:vartype severity: str or ~azure.mgmt.network.v2017_10_01.models.Severity
:ivar type: The type of issue. Possible values include: 'Unknown',
'AgentStopped', 'GuestFirewall', 'DnsResolution', 'SocketBind',
'NetworkSecurityRule', 'UserDefinedRoute', 'PortThrottled', 'Platform'
:vartype type: str or ~azure.mgmt.network.v2017_10_01.models.IssueType
:ivar context: Provides additional context on the issue.
:vartype context: list[dict[str, str]]
"""
_validation = {
'origin': {'readonly': True},
'severity': {'readonly': True},
'type': {'readonly': True},
'context': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'context': {'key': 'context', 'type': '[{str}]'},
}
def __init__(self, **kwargs):
super(ConnectivityIssue, self).__init__(**kwargs)
self.origin = None
self.severity = None
self.type = None
self.context = None
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/connectivity_issue.py
|
Python
|
mit
| 2,121
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
"""
This utilty generate json output to post comment in gerrit.
INPUT: output of checkpatch.pl.
OUTPUT: json format output that can be used to post comment in gerrit
"""
import os
import sys
import json
data = {}
data['comments'] = []
list_temp = {}
def update_struct( file_path, msg_output, line_number):
if file_path not in list_temp:
list_temp[file_path] = []
list_temp[file_path].append({
"robot_id" : "checkpatch",
"robot_run_id" : sys.argv[3],
"url" : sys.argv[4],
"line" : line_number,
"message" : msg_output,}
)
def parse_file(input_file):
fp = open (input_file, "r")
for line in fp:
if line.startswith("ERROR:"):
msg_output = line.split("ERROR:")[1].strip()
elif line.startswith("WARNING:"):
msg_output = line.split("WARNING:")[1].strip()
elif ": FILE:" in line:
temp = line.split("FILE:")
file_path = temp[1].split(":")[0]
line_number = temp[1].split(":")[1]
update_struct( file_path.strip(), msg_output, str(line_number) )
else:
continue
fp.close()
def main():
if (len(sys.argv) < 5) or (sys.argv[1] == "-h"):
print("HELP:")
print(sys.argv[0] + " <input file> <output file in json> <job-id> <job-url>")
sys.exit()
print(sys.argv[1])
parse_file(sys.argv[1])
data['robot_comments'] = list_temp
print(json.dumps(data))
out_file = open( sys.argv[2] , "w")
json.dump(data, out_file, sort_keys=True, indent=4)
out_file.close()
if __name__ == "__main__":
main()
|
pcengines/coreboot
|
util/lint/checkpatch_json.py
|
Python
|
gpl-2.0
| 1,686
|
from __future__ import absolute_import
import cPickle as pickle
import os
import subprocess
import sys
from .utils import get_command_prefix, get_func_name, get_func
from . import logs
def call_in_fork(func, args=None, kwargs=None):
args = args or ()
kwargs = kwargs or ()
pid = os.fork()
if pid:
return pid
func = get_func(func)
func(*args, **kwargs)
def call_in_subprocess(func, args=None, kwargs=None, envvars=None, ):
cmd = get_command_prefix(envvars) if envvars else []
cmd.extend((sys.executable, '-m', 'sgevents.subprocess'))
environ = os.environ.copy()
environ.update(envvars or {})
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, env=environ)
proc.stdin.write(pickle.dumps({
'func': get_func_name(func),
'args': args,
'kwargs': kwargs,
'log_setup': logs.get_log_setup(),
'log_meta': logs.get_log_meta(),
}))
proc.stdin.close()
return proc
def _main():
raw_package = sys.stdin.read()
package = pickle.loads(raw_package)
func = get_func(package['func'])
args = package.get('args') or ()
kwargs = package.get('kwargs') or {}
log_setup = package.get('log_setup')
log_meta = package.get('log_meta')
# Restore logging state.
if log_setup:
logs.setup_logs(*log_setup)
if log_meta:
logs.update_log_meta(**log_meta)
func(*args, **kwargs)
def test(*args, **kwargs):
print __name__, args, kwargs
if __name__ == '__main__':
exit(_main() or 0)
|
westernx/sgevents
|
sgevents/subprocess.py
|
Python
|
bsd-3-clause
| 1,550
|
import re
import json
import yaml
import os
import logging
import utils
from validators import Validator
from frameworkUtils import FrameworkUtils
logger = logging.getLogger(os.getenv('LOGGER_NAME', __name__))
permissions_files = os.getenv('PERMISSIONS_FILES')
class AuthorizeResult:
def __init__(self, result, messages=None):
self.result = result
self.messages = messages
def __nonzero__(self):
return self.result
class FileAuthorizer:
class __FileAuthorizer:
'''Takes a commas separated list of permissions file paths in the PERMISSIONS_FILES environment variable.
'''
def __init__(self, filenames):
self.filenames = filenames
self.data = self._parse_permissions_files(filenames)
def resource_check(self, request_uri, body, allowed_actions, content_type):
for item in allowed_actions:
uri = item.keys()
pattern = uri[0]
prog = re.compile("^{}$".format(pattern))
result = prog.match(request_uri)
if result:
if (item[pattern] == {}):
return True
else:
if body == "":
return False
else:
if content_type.lower() == "application/json" or content_type.lower() == "application/javascript":
try:
template_data = json.loads(body)
except (Exception) as e:
logger.error("Request body is an invalid json - {}".format(str(e)))
return False
attribute_rules = item[pattern]
valid = self.validate_request_body(attribute_rules, template_data)
return valid
else:
return False
attribute_rules = item[pattern]
valid = self.validate_request_body(attribute_rules, template_data)
return valid
return False
def validate_request_body(self, attribute_rules, body):
for attribute in attribute_rules.keys():
items = attribute.split('/')
temp_data = body
for item in items:
if not type(temp_data) == dict:
return False
if item in temp_data.keys():
temp_data = temp_data[item]
else:
#Attribute not available in request body
return False
prog = re.compile("^{}$".format(attribute_rules[attribute]))
if type(temp_data) == list:
for item_val in temp_data:
result = prog.match(item_val)
if not result:
return False
else: # temp_data is of type 'str'
result = prog.match(temp_data)
if not result:
return False
return True
def _get_act_as_list(self, user):
allowed_users_list = []
if user in self.data.keys():
self._get_merged_data(user, allowed_users_list, [], self.data, '')
return allowed_users_list
def authorize(self, user, act_as, resource, data, content_type, action = None):
if not action:
action = 'GET'
if not user or not act_as or not resource:
return AuthorizeResult(False)
if user not in self.data.keys() or act_as not in self.data.keys():
logger.warning("Invalid user [{}] or act as user [{}]".format(user, act_as))
return AuthorizeResult(False)
if user != act_as:
if 'can_act_as' not in self.data[user]:
logger.warning("User {} not authorized to act as {}".format(user, act_as))
return AuthorizeResult(False)
allowed_users_list = self._get_act_as_list(user)
if act_as not in allowed_users_list:
logger.warning("User {} not authorized to act as {}".format(user, act_as))
return AuthorizeResult(False)
allowed_users_list = []
allowed_actions = []
if 'action' in self.data[act_as] and self.data[act_as]['action'] != None:
for item in self.data[act_as]['action'][action]:
temp_item = {}
if type(item) == str:
temp_item = {}
temp_item[item] = {}
else:
if type(item) == dict:
temp_item = item
if not temp_item in allowed_actions:
allowed_actions.append(temp_item)
self._get_merged_data(act_as, allowed_users_list, allowed_actions, self.data, action)
result = self.resource_check(resource, data, allowed_actions, content_type)
if result == False:
logger.warning("User {} acting as {} is not authorized to access [{}]".format(user, act_as, resource))
return AuthorizeResult(False)
try:
validator = Validator()
framework = FrameworkUtils().getFramework(resource)
if not validator.validate(act_as, resource, action, data, framework):
logger.warning("Validation failed. Reasons - {}".format(validator.messages))
return AuthorizeResult(False, validator.messages)
except (Exception) as e:
logger.error("Failed in request validation - {}".format(str(e)))
return AuthorizeResult(False)
return AuthorizeResult(True)
def get_user_list(self, type=None):
if not type:
return self.data.keys()
users = []
for key, data in self.data.items():
if data.get('type', 'user') == type:
users.append(key)
return users
def get_canactas_list(self, user):
actas_users = []
for key in self._get_act_as_list(user):
if (self.data.get(key).get('type', 'user') != 'internal'):
actas_users.append(key)
return actas_users
def is_user_valid(self, user):
return user in self.data.keys()
def filter_response(self, resource, data, actas):
framework = FrameworkUtils().getFramework(resource)
allowed_namespaces = self.get_allowed_namespace_patterns(actas)
if not allowed_namespaces: #Empty list
return ""
return framework.filterResponseBody(data, allowed_namespaces, resource)
def _parse_permissions_files(self, filenames):
permissions = {}
for item in filenames.split(','):
filename = item.strip()
if filename:
with open(filename, 'r') as data_file:
permissions = utils.merge_dicts(permissions, yaml.load(data_file))
return permissions
def _get_merged_data(self, user, allowed_users, allowed_actions, data, action):
if user in allowed_users:
return
allowed_users.append(user)
if action != '' and 'action' in data[user]:
if data[user]['action'] != None and action in data[user]['action']:
for item in data[user]['action'][action]:
temp_item = {}
if type(item) == str:
temp_item = {}
temp_item[item] = {}
else:
if type(item) == dict:
temp_item = item
if not temp_item in allowed_actions:
allowed_actions.append(temp_item)
if 'can_act_as' not in data[user]:
return
for u in data[user]['can_act_as']:
self._get_merged_data(u, allowed_users, allowed_actions, data, action)
def get_allowed_namespace_patterns(self, act_as):
if not self.is_user_valid(act_as):
return []
permissions = self.data
allowed_users_list = []
self._get_merged_data(act_as, allowed_users_list, [], permissions, '')
allowed_namespace_patterns = []
for user in allowed_users_list:
if 'allowed_names' in permissions[user]:
for pattern in permissions[user]['allowed_names']:
if not pattern in allowed_namespace_patterns:
allowed_namespace_patterns.append(pattern)
return allowed_namespace_patterns
instance = None
def __init__(self):
if not FileAuthorizer.instance:
FileAuthorizer.instance = FileAuthorizer.__FileAuthorizer(permissions_files)
else:
FileAuthorizer.instance.filenames = permissions_files
|
seomoz/roger-mesos
|
aaad/authorizers.py
|
Python
|
apache-2.0
| 9,461
|
# -----------------------------------------------------------
# demonstrates how to create and use an 2d array using NumPy
#o
# (C) 2016 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email frank.hofmann@efho.de
# -----------------------------------------------------------
# requirements:
# * Python 3.x
# * NumPy for Python 3 (http://www.numpy.org/)
# import external NumPy module
import numpy as np
class Array2D:
def __init__(self, rows, columns):
"constructor class to initiate this object"
# set array size
self.rows = rows
self.columns = columns
# define an array with zero values
self.arrayData = np.zeros((self.rows, self.columns))
return
def getSize(self):
"return the size of the 2d array"
return self.arrayData.shape
def getDimension(self):
"return the dimension of the 2d array"
return self.arrayData.ndim
def getNumberOfElements(self):
"count the number of elements in the 2d array"
return self.arrayData.size
def print(self):
"output the array 2d content"
print (self.arrayData)
return
def getElementValue(self, column, row):
"get the element value at array position x,y"
if row <= self.rows:
arrayRow = self.arrayData[row]
if column <= self.columns:
# return success
return self.arrayData[row][column]
else:
# return failure
return False
else:
# return failure
return False
def setElementValue(self, column, row, value):
"set the element value at array position x,y"
if row <= self.rows:
arrayRow = self.arrayData[row]
if column <= self.columns:
self.arrayData[row][column] = value
# return success
return True
else:
# return failure
return False
else:
# return failure
return False
# main program
# define 2d array of size 3x3
array2 = Array2D(3,3)
print ("2D array size:", array2.getSize())
print ("number of array items:", array2.getNumberOfElements())
print ("dimensions of the array:", array2.getDimension())
# output array content
array2.print()
# set value at [1,1] = 15
if array2.setElementValue(1, 1, 15) == False:
print ("array index out of range")
else:
print ("position (1,1) set to 15")
# set value at [5,0] = 7
if array2.setElementValue(0, 5, 7) == False:
print("array index (0,5) out of range")
else:
print ("position (0,5) set to 7")
# output array content
array2.print()
# output specific array value
print ("value at (2,2):", array2.getElementValue(2,2))
|
hofmannedv/training-python
|
data-structures/array2d-numpy.py
|
Python
|
gpl-2.0
| 2,471
|
# -*- coding:utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
class SendTestMail(forms.Form):
"""Отправка тестового письма для проверки настроек
почтового сервера.
"""
text = forms.CharField(label=_('Text message'))
email = forms.EmailField(label=_('Email recipient'))
required_css_class = 'required'
|
sfcl/severcart
|
service/forms/send_test_mail.py
|
Python
|
gpl-2.0
| 437
|
class Record (object) :
def __init__ (self, ** _fields) :
for _identifier, _value in _fields.iteritems () :
setattr (self, _identifier, _value)
return
|
cipriancraciun/extremely-simple-cluster-platform
|
components/py-tools/sources/escp/tools/records.py
|
Python
|
gpl-3.0
| 169
|
"""
07-midifile-with-mido.py - Reading a MIDI file with mido and sending the events to pyo.
This example shows how simple it is to play a MIDI file with mido and send the events
to an audio synth build with pyo.
"""
from pyo import *
# Try to import MidiFile from the mido module. You can install mido with pip:
# pip install mido
try:
from mido import MidiFile
except:
print("The `mido` module must be installed to run this example!")
exit()
s = Server().boot().start()
# A little audio synth to play the MIDI events.
mid = Notein()
amp = MidiAdsr(mid["velocity"])
pit = MToF(mid["pitch"])
osc = Osc(SquareTable(), freq=pit, mul=amp).mix(1)
rev = STRev(osc, revtime=1, cutoff=4000, bal=0.2).out()
# Opening the MIDI file...
mid = MidiFile("../snds/mapleleafrag.mid")
# ... and reading its content.
for message in mid.play():
# For each message, we convert it to integer data with the bytes()
# method and send the values to pyo's Server with the addMidiEvent()
# method. This method programmatically adds a MIDI message to the
# server's internal MIDI event buffer.
s.addMidiEvent(*message.bytes())
|
belangeo/pyo
|
pyo/examples/16-midi/07-midifile-with-mido.py
|
Python
|
lgpl-3.0
| 1,142
|
import numpy
import math
import itertools
def quad_arr_from_sample(sample):
res = []
for n in xrange(len(sample)):
for m in xrange(n, len(sample)):
res.append(sample[n]*sample[m])
return [1] + res
def cube_arr_from_sample(sample):
res = []
for n in xrange(len(sample)):
for m in xrange(n, len(sample)):
for k in xrange(m, len(sample)):
res.append(sample[n]*sample[m]*sample[k])
return [1] + res
def fourth_arr_from_sample(sample):
res = []
for n in xrange(len(sample)):
for m in xrange(n, len(sample)):
for k in xrange(m, len(sample)):
for l in xrange(k, len(sample)):
res.append(sample[n]*sample[m]*sample[k]*sample[l])
return [1] + res
def fifth_arr_from_sample(sample):
res = []
for n in xrange(len(sample)):
for m in xrange(n, len(sample)):
for k in xrange(m, len(sample)):
for l in xrange(k, len(sample)):
for o in xrange(l, len(sample)):
res.append(sample[n]*sample[m]*sample[k]*sample[l]*sample[o])
return [1] + res
def sixth_arr_from_sample(sample):
res = []
sample = [1] + sample
for n in xrange(len(sample)):
for m in xrange(n, len(sample)):
for k in xrange(m, len(sample)):
for l in xrange(k, len(sample)):
for o in xrange(l, len(sample)):
for p in xrange(o, len(sample)):
res.append(sample[n]*sample[m]*sample[k]*sample[l]*sample[o]*sample[p])
return [1] + res
def seventh_arr_from_sample(sample):
res = []
sample = [1] + sample
for n in xrange(len(sample)):
for m in xrange(n, len(sample)):
for k in xrange(m, len(sample)):
for l in xrange(k, len(sample)):
for o in xrange(l, len(sample)):
for p in xrange(o, len(sample)):
for q in xrange(p, len(sample)):
res.append(sample[n]*sample[m]*sample[k]*sample[l]*sample[o]*sample[p]*sample[q])
return [1] + res
def build_quad_x_from_set(arr_X):
order = sum(xrange( (len(arr_X[0]))+1)) + 1
l = len(arr_X)
X = numpy.empty([l, order])
for i in xrange(len(arr_X)):
sample = arr_X[i]
quad_sample = quad_arr_from_sample(sample)
X[i] = quad_sample
return X
def build_cube_x_from_set(arr_X):
l = len(arr_X)
n = len(arr_X[0]) + 3
order = n*(n**2-6*n+11)/6
print len(arr_X[0])
print order
X = numpy.empty([l, order])
for i in xrange(len(arr_X)):
sample = arr_X[i]
cube_sample = cube_arr_from_sample(sample)
X[i] = cube_sample
return X
def build_fourth_x_from_set(arr_X):
l = len(arr_X)
n = len(arr_X[0])
order = 1 + (6 + (11 + (6 + n)*n)*n)*n/24
print len(arr_X[0])
print order
X = numpy.empty([l, order])
for i in xrange(len(arr_X)):
sample = arr_X[i]
fourth_sample = fourth_arr_from_sample(sample)
X[i] = fourth_sample
return X
def build_fifth_x_from_set(arr_X):
l = len(arr_X)
n = len(arr_X[0])
order = len(fifth_arr_from_sample(arr_X[0]))
print len(arr_X[0])
print order
X = numpy.empty([l, order])
for i in xrange(len(arr_X)):
sample = arr_X[i]
fifth_sample = fifth_arr_from_sample(sample)
X[i] = fifth_sample
return X
def build_sixth_x_from_set(arr_X):
l = len(arr_X)
n = len(arr_X[0])
order = len(sixth_arr_from_sample(arr_X[0]))
print len(arr_X[0])
print order
X = numpy.empty([l, order])
for i in xrange(len(arr_X)):
sample = arr_X[i]
sixth_sample = sixth_arr_from_sample(sample)
X[i] = sixth_sample
return X
def build_seventh_x_from_set(arr_X):
l = len(arr_X)
n = len(arr_X[0])
order = len(seventh_arr_from_sample(arr_X[0]))
print len(arr_X[0])
print order
X = numpy.empty([l, order])
for i in xrange(len(arr_X)):
sample = arr_X[i]
seventh_sample = seventh_arr_from_sample(sample)
X[i] = seventh_sample
return X
def nth_arr_from_sample(sample, n=4):
combs = itertools.combinations_with_replacement([1] + sample, n)
return map(lambda x: reduce(lambda a, b: a*b, x, 1), itertools.combinations_with_replacement([1] + sample, n))
def build_nth_x_from_set(arr_X, n = 7):
l = len(arr_X)
order = len(nth_arr_from_sample(arr_X[0], n))
print "Original features:", len(arr_X[0])
print "Built features:", order
X = numpy.empty([l, order])
for i in xrange(len(arr_X)):
sample = arr_X[i]
nth_sample = nth_arr_from_sample(sample, n)
X[i] = nth_sample
return X
def build_y(arr_Y):
return numpy.array(map(lambda x: [x], arr_Y))
|
maxikov/attfocus
|
polynomial_regression/featurebuilder.py
|
Python
|
gpl-3.0
| 4,222
|
"""Module for testing session pools."""
import threading
class TestConnection(TestCase):
def __ConnectAndDrop(self):
"""Connect to the database, perform a query and drop the connection."""
connection = self.pool.acquire()
cursor = connection.cursor()
cursor.execute(u"select count(*) from TestNumbers")
count, = cursor.fetchone()
self.failUnlessEqual(count, 10)
def testPool(self):
"""test that the pool is created and has the right attributes"""
pool = cx_Oracle.SessionPool(USERNAME, PASSWORD, TNSENTRY, 2, 8, 3)
self.failUnlessEqual(pool.username, USERNAME, "user name differs")
self.failUnlessEqual(pool.tnsentry, TNSENTRY, "tnsentry differs")
self.failUnlessEqual(pool.max, 8, "max differs")
self.failUnlessEqual(pool.min, 2, "min differs")
self.failUnlessEqual(pool.increment, 3, "increment differs")
self.failUnlessEqual(pool.opened, 2, "opened differs")
self.failUnlessEqual(pool.busy, 0, "busy not 0 at start")
connection_1 = pool.acquire()
self.failUnlessEqual(pool.busy, 1, "busy not 1 after acquire")
self.failUnlessEqual(pool.opened, 2, "opened not unchanged (1)")
connection_2 = pool.acquire()
self.failUnlessEqual(pool.busy, 2, "busy not 2 after acquire")
self.failUnlessEqual(pool.opened, 2, "opened not unchanged (2)")
connection_3 = pool.acquire()
self.failUnlessEqual(pool.busy, 3, "busy not 3 after acquire")
self.failUnlessEqual(pool.opened, 5, "opened not changed (3)")
pool.release(connection_3)
self.failUnlessEqual(pool.busy, 2, "busy not 2 after release")
del connection_2
self.failUnlessEqual(pool.busy, 1, "busy not 1 after del")
def testProxyAuth(self):
"""test that proxy authentication is possible"""
pool = cx_Oracle.SessionPool(USERNAME, PASSWORD, TNSENTRY, 2, 8, 3)
self.failUnlessEqual(pool.homogeneous, 1,
"homogeneous should be 1 by default")
self.failUnlessRaises(cx_Oracle.ProgrammingError, pool.acquire,
user = "proxyuser")
pool = cx_Oracle.SessionPool(USERNAME, PASSWORD, TNSENTRY, 2, 8, 3,
homogeneous = False)
self.failUnlessEqual(pool.homogeneous, 0,
"homogeneous should be 0 after setting it in the constructor")
user = u"%s_proxy" % USERNAME
connection = pool.acquire(user = user)
cursor = connection.cursor()
cursor.execute(u'select user from dual')
result, = cursor.fetchone()
self.assertEqual(result, user.upper())
def testRollbackOnDel(self):
"connection rolls back before being destroyed"
pool = cx_Oracle.SessionPool(USERNAME, PASSWORD, TNSENTRY, 1, 8, 3)
connection = pool.acquire()
cursor = connection.cursor()
cursor.execute(u"truncate table TestExecuteMany")
cursor.execute(u"insert into TestExecuteMany (IntCol) values (1)")
pool = cx_Oracle.SessionPool(USERNAME, PASSWORD, TNSENTRY, 1, 8, 3)
connection = pool.acquire()
cursor = connection.cursor()
cursor.execute(u"select count(*) from TestExecuteMany")
count, = cursor.fetchone()
self.failUnlessEqual(count, 0)
def testRollbackOnRelease(self):
"connection rolls back before released back to the pool"
pool = cx_Oracle.SessionPool(USERNAME, PASSWORD, TNSENTRY, 1, 8, 3)
connection = pool.acquire()
cursor = connection.cursor()
cursor.execute(u"truncate table TestExecuteMany")
cursor.execute(u"insert into TestExecuteMany (IntCol) values (1)")
pool.release(connection)
pool = cx_Oracle.SessionPool(USERNAME, PASSWORD, TNSENTRY, 1, 8, 3)
connection = pool.acquire()
cursor = connection.cursor()
cursor.execute(u"select count(*) from TestExecuteMany")
count, = cursor.fetchone()
self.failUnlessEqual(count, 0)
def testThreading(self):
"""test session pool to database with multiple threads"""
self.pool = cx_Oracle.SessionPool(USERNAME, PASSWORD, TNSENTRY, 5, 20,
2, threaded = True)
threads = []
for i in range(20):
thread = threading.Thread(None, self.__ConnectAndDrop)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
|
jayceyxc/hue
|
desktop/core/ext-py/cx_Oracle-5.2.1/test/uSessionPool.py
|
Python
|
apache-2.0
| 4,475
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
'''
Copyright 2014-2015 Teppo Perä
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from pytraits.trait_composer import add_traits
def combine_class(*traits, **resolved_conflicts):
"""
This function composes new class out of any number of traits.
>>> class One:
... def first(self): return 1
...
>>> class Two:
... def second(self): return 2
...
>>> class Three:
... def third(self): return 3
...
>>> Combination = combine_class(One, Two, Three)
>>> instance = Combination()
>>> instance.first(), instance.second(), instance.third()
(1, 2, 3)
"""
NewClass = type("NewClass", (object,), {})
add_traits(NewClass, *traits)
return NewClass
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Debith/py2traits
|
src/pytraits/combiner.py
|
Python
|
apache-2.0
| 1,354
|
import sys
import os
import shutil
from bento.installed_package_description \
import \
InstalledSection
from bento.errors \
import \
CommandExecutionFailure
from bento.utils.utils \
import \
cpu_count, extract_exception
import bento.errors
import yaku.task_manager
import yaku.context
import yaku.scheduler
import yaku.errors
def build_extension(bld, extension, env=None):
builder = bld.builders["pyext"]
try:
if env is None:
env = {"PYEXT_CPPPATH": extension.include_dirs}
else:
val = env.get("PYEXT_CPPPATH", [])
val.extend(extension.include_dirs)
tasks = builder.extension(extension.name, extension.sources, env)
if len(tasks) > 1:
outputs = tasks[0].gen.outputs
else:
outputs = []
return [n.bldpath() for n in outputs]
except RuntimeError:
e = extract_exception()
msg = "Building extension %s failed: %s" % \
(extension.name, str(e))
raise CommandExecutionFailure(msg)
def build_compiled_library(bld, clib, env=None):
builder = bld.builders["ctasks"]
try:
for p in clib.include_dirs:
builder.env["CPPPATH"].insert(0, p)
outputs = builder.static_library(clib.name, clib.sources, env)
return [n.bldpath() for n in outputs]
except RuntimeError:
e = extract_exception()
msg = "Building library %s failed: %s" % (clib.name, str(e))
raise CommandExecutionFailure(msg)
|
cournape/Bento
|
bento/commands/build_yaku.py
|
Python
|
bsd-3-clause
| 1,536
|
"""Overrides the built-in help formatter.
All help messages will be embed and pretty.
Most of the code stolen from
discord.ext.commands.formatter.py and
converted into embeds instead of codeblocks.
Docstr on cog class becomes category.
Docstr on command definition becomes command
summary and usage.
Use [p] in command docstr for bot prefix.
See [p]help here for example.
await bot.formatter.format_help_for(ctx, command)
to send help page for command. Optionally pass a
string as third arg to add a more descriptive
message to help page.
e.g. format_help_for(ctx, ctx.command, "Missing required arguments")
discord.py 1.0.0a
Experimental: compatibility with 0.16.8
Copyrights to logic of code belong to Rapptz (Danny)
Everything else credit to SirThane#1780"""
import inspect
import itertools
import re
import sys
import traceback
import discord
from discord.ext import commands
from discord.ext.commands import formatter
empty = u'\u200b'
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
orig_help = None
class Help(formatter.HelpFormatter):
"""Formats help for commands."""
def __init__(self, bot, *args, **kwargs):
self.bot = bot
global orig_help
orig_help = bot.get_command('help')
self.bot.remove_command('help')
self.bot.formatter = self
self.bot.help_formatter = self
super().__init__(*args, **kwargs)
# Shortcuts that allow cog to run on 0.16.8 and 1.0.0a
def pm_check(self, ctx):
return isinstance(ctx.channel, discord.DMChannel)
@property
def me(self):
return self.context.me
@property
def bot_all_commands(self):
return self.bot.all_commands
@property
def avatar(self):
return self.bot.user.avatar_url_as(format='png')
@property
def color(self):
if self.pm_check(self.context):
return 0
else:
return self.me.color
async def send(self, dest, content=None, embed=None):
await dest.send(content=content, embed=embed)
# All the other shit
@property
def author(self):
# Get author dict with username if PM and display name in guild
if self.pm_check(self.context):
name = self.bot.user.name
else:
name = self.me.display_name if not '' else self.bot.user.name
author = {'name': '{0} Help Manual'.format(name),
'icon_url': self.avatar}
return author
@property
def destination(self):
return self.context.message.author if self.bot.pm_help else self.context.message.channel
def _add_subcommands(self, cmds):
entries = ''
for name, command in cmds:
if name in command.aliases:
# skip aliases
continue
if self.is_cog() or self.is_bot():
name = '{0}{1}'.format(self.clean_prefix, name)
entries += '**{0}** {1}\n'.format(name, command.short_doc)
return entries
def get_ending_note(self):
# command_name = self.context.invoked_with
return "Type {0}help <command> for more info on a command.\n" \
"You can also type {0}help <category> for more info on a category.".format(
self.clean_prefix)
async def cmd_format(self, ctx, command):
"""Formats command for output.
Returns a dict used to build embed"""
# All default values for embed dict
self.command = command
self.context = ctx
emb = {
'embed': {
'title': '',
'description': '',
},
'footer': {
'text': self.get_ending_note()
},
'fields': []
}
description = command.description if not self.is_cog() else inspect.getdoc(command)
if not description == '' and description is not None:
description = '*{0}*'.format(description)
if description:
# <description> portion
emb['embed']['description'] = description
if isinstance(command, discord.ext.commands.core.Command):
# <signature portion>
emb['embed']['title'] = emb['embed']['description']
emb['embed']['description'] = '`Syntax: {0}`'.format(self.get_command_signature())
# <long doc> section
if command.help:
name = '{0}'.format(command.help.split('\n\n')[0])
value = command.help[len(name):].replace('[p]', self.clean_prefix)
if value == '':
value = empty
field = {
'name': name,
'value': value,
'inline': False
}
emb['fields'].append(field)
# end it here if it's just a regular command
if not self.has_subcommands():
return emb
def category(tup):
# Turn get cog (Category) name from cog/list tuples
cog = tup[1].cog_name
return '**__{0}:__**'.format(cog) if cog is not None else '**__\u200bNo Category:__**'
# Get subcommands for bot or category
filtered = await self.filter_command_list()
if self.is_bot():
# Get list of non-hidden commands for bot.
data = sorted(filtered, key=category)
for category, commands_ in itertools.groupby(data, key=category):
# there simply is no prettier way of doing this.
field = {
'inline': False
}
commands_ = sorted(commands_)
if len(commands_) > 0:
field['name'] = category
field['value'] = self._add_subcommands(commands_) # May need paginated
emb['fields'].append(field)
else:
# Get list of commands for category
filtered = sorted(filtered)
if filtered:
field = {
'name': '**__Commands:__**' if not self.is_bot() and self.is_cog() else '**__Subcommands:__**',
'value': self._add_subcommands(filtered), # May need paginated
'inline': False
}
emb['fields'].append(field)
return emb
async def format_help_for(self, ctx, command_or_bot, reason: str=None):
"""Formats the help page and handles the actual heavy lifting of how ### WTF HAPPENED?
the help command looks like. To change the behaviour, override the
:meth:`~.HelpFormatter.format` method.
Parameters
-----------
ctx: :class:`.Context`
The context of the invoked help command.
command_or_bot: :class:`.Command` or :class:`.Bot`
The bot or command that we are getting the help of.
Returns
--------
list
A paginated output of the help command.
"""
self.context = ctx
self.command = command_or_bot
emb = await self.cmd_format(ctx, command_or_bot)
if reason:
emb['embed']['title'] = "{0}".format(reason)
embed = discord.Embed(color=self.color, **emb['embed'])
embed.set_author(**self.author)
for field in emb['fields']:
embed.add_field(**field)
embed.set_footer(**emb['footer'])
await self.send(self.destination, embed=embed)
def simple_embed(self, title=None, description=None, color=None, author=None):
# Shortcut
embed = discord.Embed(title=title, description=description, color=color)
embed.set_footer(text=self.bot.formatter.get_ending_note())
if author:
embed.set_author(**author)
return embed
def cmd_not_found(self, cmd, color=0):
# Shortcut for a shortcut. Sue me
embed = self.simple_embed(title=self.bot.command_not_found.format(cmd),
description='Commands are case sensitive. Please check your spelling and try again',
color=color, author=self.author)
return embed
@commands.command(name='help', aliases=['halp', 'h'])
async def help(self, ctx, *cmds: str):
"""Shows help documentation.
[p]**help**: Shows the help manual.
[p]**help** command: Show help for a command
[p]**help** Category: Show commands and description for a category"""
self.context = ctx
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(cmds) == 0:
await self.bot.formatter.format_help_for(ctx, self.bot)
return
elif len(cmds) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, cmds[0])
command = None
if name in self.bot.cogs:
command = self.bot.cogs[name]
else:
command = self.bot_all_commands.get(name)
if command is None:
await self.send(self.destination, embed=self.cmd_not_found(name, self.color))
return
await self.bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, cmds[0])
command = self.bot_all_commands.get(name)
if command is None:
await self.send(self.destination, embed=self.cmd_not_found(name, self.color))
return
for key in cmds[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.all_commands.get(key)
if command is None:
await self.send(self.destination, embed=self.cmd_not_found(key, self.color))
return
except AttributeError:
await self.send(self.destination,
embed=self.simple_embed(title='Command "{0.name}" has no subcommands.'.format(command),
color=self.color,
author=self.author))
return
await self.bot.formatter.format_help_for(ctx, command)
@help.error
async def help_error(self, error, ctx):
await ctx.send('{0.__name__}: {1}'.format(type(error), error))
traceback.print_tb(error.original.__traceback__, file=sys.stderr)
def __unload(self):
print('called __unload')
self.bot.formatter = formatter.HelpFormatter()
self.bot.add_command(orig_help)
def setup(bot):
bot.add_cog(Help(bot))
|
ZetDude/KALEVBOT
|
cogs/utils/help.py
|
Python
|
mit
| 10,945
|
from apio.commands.drivers import cli as cmd_drivers
def test_drivers(clirunner, validate_cliresult, configenv):
with clirunner.isolated_filesystem():
configenv()
result = clirunner.invoke(cmd_drivers)
validate_cliresult(result)
|
Jesus89/apio
|
test/env_commands/test_drivers.py
|
Python
|
gpl-2.0
| 259
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train and Eval the MNIST network.
This version is like fully_connected_feed.py but uses data converted
to a TFRecords file containing tf.train.Example protocol buffers.
See tensorflow/g3doc/how_tos/reading_data.md#reading-from-files
for context.
YOU MUST run convert_to_records before running this (but you only need to
run it once).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('num_epochs', 2, 'Number of epochs to run trainer.')
flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 100, 'Batch size.')
flags.DEFINE_string('train_dir', '/tmp/data',
'Directory with the training data.')
# Constants used for dealing with the files, matches convert_to_records.
TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'validation.tfrecords'
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([mnist.IMAGE_PIXELS])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
def inputs(train, batch_size, num_epochs):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, mnist.NUM_CLASSES).
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
filename = os.path.join(FLAGS.train_dir,
TRAIN_FILE if train else VALIDATION_FILE)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs)
# Even when reading in multiple threads, share the filename
# queue.
image, label = read_and_decode(filename_queue)
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
images, sparse_labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size, num_threads=2,
capacity=1000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=1000)
return images, sparse_labels
def run_training():
"""Train MNIST for a number of steps."""
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Input images and labels.
images, labels = inputs(train=True, batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph operations that train the model.
train_op = mnist.training(loss, FLAGS.learning_rate)
# The op for initializing the variables.
init_op = tf.initialize_all_variables()
# Create a session for running operations in the Graph.
sess = tf.Session()
# Initialize the variables (the trained variables and the
# epoch counter).
sess.run(init_op)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model. The return values are
# the activations from the `train_op` (which is
# discarded) and the `loss` op. To inspect the values
# of your ops or variables, you may include them in
# the list passed to sess.run() and the value tensors
# will be returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Print an overview fairly often.
if step % 100 == 0:
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
step += 1
except tf.errors.OutOfRangeError:
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
|
DailyActie/Surrogate-Model
|
01-codes/tensorflow-master/tensorflow/examples/how_tos/reading_data/fully_connected_reader.py
|
Python
|
mit
| 7,305
|
# -*- coding: utf-8 -*-
#
# This file is part of CERN Open Data Portal.
# Copyright (C) 2017 CERN.
#
# CERN Open Data Portal is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Open Data Portal is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Open Data Portal; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CAP Query factory for REST API."""
from __future__ import absolute_import, print_function
from elasticsearch_dsl.query import Q
from flask import current_app, request
from flask_login import current_user
from invenio_records_rest.errors import InvalidQueryRESTError
from invenio_records_rest.sorter import default_sorter_factory
from .facets import cap_facets_factory
# if any of the keywords appears in a search url as a parameter,
# will get resolved to a specified query if True or ~query if False
KEYWORD_TO_QUERY = {
'by_bot': lambda: ~Q('exists', field='created_by'),
'by_me': lambda: Q('match', **{'created_by': current_user.id}),
}
ESCAPE_CHAR_MAP = {
'/': r'\/',
'+': r'\+',
'-': r'\-',
'^': r'\^',
}
def cap_search_factory(self, search, query_parser=None):
"""Customize Parse query using Invenio-Query-Parser.
:param self: REST view.
:param search: Elastic search DSL search instance.
:returns: Tuple with search instance and URL arguments.
"""
def _default_parser(qstr=None, **kwargs):
"""Use of the Q() from elasticsearch_dsl."""
def _escape_qstr(qstr):
return ''.join((ESCAPE_CHAR_MAP.get(char, char) for char in qstr))
query = Q('query_string',
query=_escape_qstr(qstr),
analyzer="lowercase_whitespace_analyzer",
analyze_wildcard=True,
default_operator='AND') if qstr else Q()
# resolve keywords to queries
for k, v in kwargs.items():
if k in KEYWORD_TO_QUERY:
if v == 'True':
query = query & KEYWORD_TO_QUERY[k]()
elif v == 'False':
query = query & ~KEYWORD_TO_QUERY[k]()
return query
query_string = request.values.get('q')
# parse url params to search for keywords
query_keywords = {
k: request.values[k]
for k in KEYWORD_TO_QUERY.keys() if k in request.values
}
query_parser = query_parser or _default_parser
try:
search = search.query(query_parser(query_string, **query_keywords))
except SyntaxError:
current_app.logger.debug("Failed parsing query: {0}".format(
request.values.get('q', '')),
exc_info=True)
raise InvalidQueryRESTError()
search_index = search._index[0]
search, urlkwargs = cap_facets_factory(search, search_index)
search, sortkwargs = default_sorter_factory(search, search_index)
for key, value in sortkwargs.items():
urlkwargs.add(key, value)
urlkwargs.add('q', query_string)
return search, urlkwargs
|
cernanalysispreservation/analysis-preservation.cern.ch
|
cap/modules/search/query.py
|
Python
|
gpl-2.0
| 3,691
|
from selenium.webdriver.support.ui import Select
from keywordgroup import KeywordGroup
class _SelectElementKeywords(KeywordGroup):
# Public
def get_list_items(self, locator):
"""Returns the values in the select list identified by `locator`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options(locator)
return self._get_labels_for_options(options)
def get_selected_list_label(self, locator):
"""Returns the visible label of the selected element from the select list identified by `locator`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select = self._get_select_list(locator)
return select.first_selected_option.text
def get_selected_list_labels(self, locator):
"""Returns the visible labels of selected elements (as a list) from the select list identified by `locator`.
Fails if there is no selection.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options_selected(locator)
if len(options) == 0:
raise ValueError("Select list with locator '%s' does not have any selected values")
return self._get_labels_for_options(options)
def get_selected_list_value(self, locator):
"""Returns the value of the selected element from the select list identified by `locator`.
Return value is read from `value` attribute of the selected element.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select = self._get_select_list(locator)
return select.first_selected_option.get_attribute('value')
def get_selected_list_values(self, locator):
"""Returns the values of selected elements (as a list) from the select list identified by `locator`.
Fails if there is no selection.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options_selected(locator)
if len(options) == 0:
raise ValueError("Select list with locator '%s' does not have any selected values")
return self._get_values_for_options(options)
def list_selection_should_be(self, locator, *items):
"""Verifies the selection of select list identified by `locator` is exactly `*items`.
If you want to test that no option is selected, simply give no `items`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) [ %s ]" % " | ".join(items) or "no options"
self._info("Verifying list '%s' has %s selected." % (locator, items_str))
items = list(items)
self.page_should_contain_list(locator)
select, options = self._get_select_list_options_selected(locator)
if not items and len(options) == 0:
return
selected_values = self._get_values_for_options(options)
selected_labels = self._get_labels_for_options(options)
err = "List '%s' should have had selection [ %s ] but it was [ %s ]" \
% (locator, ' | '.join(items), ' | '.join(selected_labels))
for item in items:
if item not in selected_values + selected_labels:
raise AssertionError(err)
for selected_value, selected_label in zip(selected_values, selected_labels):
if selected_value not in items and selected_label not in items:
raise AssertionError(err)
def list_should_have_no_selections(self, locator):
"""Verifies select list identified by `locator` has no selections.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
self._info("Verifying list '%s' has no selection." % locator)
select, options = self._get_select_list_options_selected(locator)
if options:
selected_labels = self._get_labels_for_options(options)
items_str = " | ".join(selected_labels)
raise AssertionError("List '%s' should have had no selection "
"(selection was [ %s ])" % (locator, items_str))
def page_should_contain_list(self, locator, message='', loglevel='INFO'):
"""Verifies select list identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._page_should_contain_element(locator, 'list', message, loglevel)
def page_should_not_contain_list(self, locator, message='', loglevel='INFO'):
"""Verifies select list identified by `locator` is not found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._page_should_not_contain_element(locator, 'list', message, loglevel)
def select_all_from_list(self, locator):
"""Selects all values from multi-select list identified by `id`.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._info("Selecting all options from list '%s'." % locator)
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Select all from list' works only for multiselect lists.")
for i in range(len(select.options)):
select.select_by_index(i)
def select_from_list(self, locator, *items):
"""Selects `*items` from list identified by `locator`
If more than one value is given for a single-selection list, the last
value will be selected. If the target list is a multi-selection list,
and `*items` is an empty list, all values of the list will be selected.
*items try to select by value then by label.
It's faster to use 'by index/value/label' functions.
An exception is raised for a single-selection list if the last
value does not exist in the list and a warning for all other non-
existing items. For a multi-selection list, an exception is raised
for any and all non-existing values.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
non_existing_items = []
items_str = items and "option(s) '%s'" % ", ".join(items) or "all options"
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not items:
for i in range(len(select.options)):
select.select_by_index(i)
return
for item in items:
try:
select.select_by_value(item)
except:
try:
select.select_by_visible_text(item)
except:
non_existing_items = non_existing_items + [item]
continue
if any(non_existing_items):
if select.is_multiple:
raise ValueError("Options '%s' not in list '%s'." % (", ".join(non_existing_items), locator))
else:
if any (non_existing_items[:-1]):
items_str = non_existing_items[:-1] and "Option(s) '%s'" % ", ".join(non_existing_items[:-1])
self._warn("%s not found within list '%s'." % (items_str, locator))
if items and items[-1] in non_existing_items:
raise ValueError("Option '%s' not in list '%s'." % (items[-1], locator))
def select_from_list_by_index(self, locator, *indexes):
"""Selects `*indexes` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not indexes:
raise ValueError("No index given.")
items_str = "index(es) '%s'" % ", ".join(indexes)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for index in indexes:
select.select_by_index(int(index))
def select_from_list_by_value(self, locator, *values):
"""Selects `*values` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not values:
raise ValueError("No value given.")
items_str = "value(s) '%s'" % ", ".join(values)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for value in values:
select.select_by_value(value)
def select_from_list_by_label(self, locator, *labels):
"""Selects `*labels` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not labels:
raise ValueError("No value given.")
items_str = "label(s) '%s'" % ", ".join(labels)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for label in labels:
select.select_by_visible_text(label)
def unselect_from_list(self, locator, *items):
"""Unselects given values from select list identified by locator.
As a special case, giving empty list as `*items` will remove all
selections.
*items try to unselect by value AND by label.
It's faster to use 'by index/value/label' functions.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) '%s'" % ", ".join(items) or "all options"
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
if not items:
select.deselect_all()
return
select, options = self._get_select_list_options(select)
for item in items:
select.deselect_by_value(item)
select.deselect_by_visible_text(item)
def unselect_from_list_by_index(self, locator, *indexes):
"""Unselects `*indexes` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not indexes:
raise ValueError("No index given.")
items_str = "index(es) '%s'" % ", ".join(indexes)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for index in indexes:
select.deselect_by_index(int(index))
def unselect_from_list_by_value(self, locator, *values):
"""Unselects `*values` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not values:
raise ValueError("No value given.")
items_str = "value(s) '%s'" % ", ".join(values)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for value in values:
select.deselect_by_value(value)
def unselect_from_list_by_label(self, locator, *labels):
"""Unselects `*labels` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not labels:
raise ValueError("No value given.")
items_str = "label(s) '%s'" % ", ".join(labels)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for label in labels:
select.deselect_by_visible_text(label)
# Private
def _get_labels_for_options(self, options):
labels = []
for option in options:
labels.append(option.text)
return labels
def _get_select_list(self, locator):
el = self._element_find(locator, True, True, 'select')
return Select(el)
def _get_select_list_options(self, select_list_or_locator):
if isinstance(select_list_or_locator, Select):
select = select_list_or_locator
else:
select = self._get_select_list(select_list_or_locator)
return select, select.options
def _get_select_list_options_selected(self, locator):
select = self._get_select_list(locator)
# TODO: Handle possible exception thrown by all_selected_options
return select, select.all_selected_options
def _get_values_for_options(self, options):
values = []
for option in options:
values.append(option.get_attribute('value'))
return values
def _is_multiselect_list(self, select):
multiple_value = select.get_attribute('multiple')
if multiple_value is not None and (multiple_value == 'true' or multiple_value == 'multiple'):
return True
return False
def _unselect_all_options_from_multi_select_list(self, select):
self._current_browser().execute_script("arguments[0].selectedIndex = -1;", select)
def _unselect_option_from_multi_select_list(self, select, options, index):
if options[index].is_selected():
options[index].click()
|
jennifer0703/robotframework-selenium2library
|
src/Selenium2Library/keywords/_selectelement.py
|
Python
|
apache-2.0
| 15,937
|
"""
This module contains celery task functions for handling the sending of bulk email
to a course.
"""
import math
import re
import time
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.mail import EmailMultiAlternatives, get_connection
from django.http import Http404
from celery import task, current_task
from celery.utils.log import get_task_logger
from django.core.urlresolvers import reverse
from statsd import statsd
from bulk_email.models import (
CourseEmail, Optout, CourseEmailTemplate,
SEND_TO_MYSELF, SEND_TO_STAFF, SEND_TO_ALL,
)
from courseware.access import _course_staff_group_name, _course_instructor_group_name
from courseware.courses import get_course_by_id, course_image_url
log = get_task_logger(__name__)
@task(default_retry_delay=10, max_retries=5) # pylint: disable=E1102
def delegate_email_batches(email_id, user_id):
"""
Delegates emails by querying for the list of recipients who should
get the mail, chopping up into batches of settings.EMAILS_PER_TASK size,
and queueing up worker jobs.
Returns the number of batches (workers) kicked off.
"""
try:
email_obj = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist as exc:
# The retry behavior here is necessary because of a race condition between the commit of the transaction
# that creates this CourseEmail row and the celery pipeline that starts this task.
# We might possibly want to move the blocking into the view function rather than have it in this task.
log.warning("Failed to get CourseEmail with id %s, retry %d", email_id, current_task.request.retries)
raise delegate_email_batches.retry(arg=[email_id, user_id], exc=exc)
to_option = email_obj.to_option
course_id = email_obj.course_id
try:
course = get_course_by_id(course_id, depth=1)
except Http404 as exc:
log.exception("get_course_by_id failed: %s", exc.args[0])
raise Exception("get_course_by_id failed: " + exc.args[0])
course_url = 'https://{}{}'.format(
settings.SITE_NAME,
reverse('course_root', kwargs={'course_id': course_id})
)
image_url = 'https://{}{}'.format(settings.SITE_NAME, course_image_url(course))
if to_option == SEND_TO_MYSELF:
recipient_qset = User.objects.filter(id=user_id)
elif to_option == SEND_TO_ALL or to_option == SEND_TO_STAFF:
staff_grpname = _course_staff_group_name(course.location)
staff_group, _ = Group.objects.get_or_create(name=staff_grpname)
staff_qset = staff_group.user_set.all()
instructor_grpname = _course_instructor_group_name(course.location)
instructor_group, _ = Group.objects.get_or_create(name=instructor_grpname)
instructor_qset = instructor_group.user_set.all()
recipient_qset = staff_qset | instructor_qset
if to_option == SEND_TO_ALL:
enrollment_qset = User.objects.filter(courseenrollment__course_id=course_id,
courseenrollment__is_active=True)
recipient_qset = recipient_qset | enrollment_qset
recipient_qset = recipient_qset.distinct()
else:
log.error("Unexpected bulk email TO_OPTION found: %s", to_option)
raise Exception("Unexpected bulk email TO_OPTION found: {0}".format(to_option))
recipient_qset = recipient_qset.order_by('pk')
total_num_emails = recipient_qset.count()
num_queries = int(math.ceil(float(total_num_emails) / float(settings.EMAILS_PER_QUERY)))
last_pk = recipient_qset[0].pk - 1
num_workers = 0
for _ in range(num_queries):
recipient_sublist = list(recipient_qset.order_by('pk').filter(pk__gt=last_pk)
.values('profile__name', 'email', 'pk')[:settings.EMAILS_PER_QUERY])
last_pk = recipient_sublist[-1]['pk']
num_emails_this_query = len(recipient_sublist)
num_tasks_this_query = int(math.ceil(float(num_emails_this_query) / float(settings.EMAILS_PER_TASK)))
chunk = int(math.ceil(float(num_emails_this_query) / float(num_tasks_this_query)))
for i in range(num_tasks_this_query):
to_list = recipient_sublist[i * chunk:i * chunk + chunk]
course_email.delay(
email_id,
to_list,
course.display_name,
course_url,
image_url,
False
)
num_workers += num_tasks_this_query
return num_workers
@task(default_retry_delay=15, max_retries=5) # pylint: disable=E1102
def course_email(email_id, to_list, course_title, course_url, image_url, throttle=False):
"""
Takes a primary id for a CourseEmail object and a 'to_list' of recipient objects--keys are
'profile__name', 'email' (address), and 'pk' (in the user table).
course_title, course_url, and image_url are to memoize course properties and save lookups.
Sends to all addresses contained in to_list. Emails are sent multi-part, in both plain
text and html.
"""
try:
msg = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist:
log.exception("Could not find email id:{} to send.".format(email_id))
raise
# exclude optouts
optouts = (Optout.objects.filter(course_id=msg.course_id,
user__in=[i['pk'] for i in to_list])
.values_list('user__email', flat=True))
optouts = set(optouts)
num_optout = len(optouts)
to_list = [recipient for recipient in to_list if recipient['email'] not in optouts]
subject = "[" + course_title + "] " + msg.subject
course_title_no_quotes = re.sub(r'"', '', course_title)
from_addr = '"{0}" Course Staff <{1}>'.format(course_title_no_quotes, settings.DEFAULT_BULK_FROM_EMAIL)
course_email_template = CourseEmailTemplate.get_template()
try:
connection = get_connection()
connection.open()
num_sent = 0
num_error = 0
# Define context values to use in all course emails:
email_context = {
'name': '',
'email': '',
'course_title': course_title,
'course_url': course_url,
'course_image_url': image_url,
'account_settings_url': 'https://{}{}'.format(settings.SITE_NAME, reverse('dashboard')),
'platform_name': settings.PLATFORM_NAME,
}
while to_list:
# Update context with user-specific values:
email = to_list[-1]['email']
email_context['email'] = email
email_context['name'] = to_list[-1]['profile__name']
# Construct message content using templates and context:
plaintext_msg = course_email_template.render_plaintext(msg.text_message, email_context)
html_msg = course_email_template.render_htmltext(msg.html_message, email_context)
# Create email:
email_msg = EmailMultiAlternatives(
subject,
plaintext_msg,
from_addr,
[email],
connection=connection
)
email_msg.attach_alternative(html_msg, 'text/html')
# Throttle if we tried a few times and got the rate limiter
if throttle or current_task.request.retries > 0:
time.sleep(0.2)
try:
connection.send_messages([email_msg])
statsd.increment('course_email.sent', tags=[_statsd_tag(course_title)])
log.info('Email with id %s sent to %s', email_id, email)
num_sent += 1
except SMTPDataError as exc:
# According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure
if exc.smtp_code >= 400 and exc.smtp_code < 500:
# This will cause the outer handler to catch the exception and retry the entire task
raise exc
else:
# This will fall through and not retry the message, since it will be popped
log.warning('Email with id %s not delivered to %s due to error %s', email_id, email, exc.smtp_error)
statsd.increment('course_email.error', tags=[_statsd_tag(course_title)])
num_error += 1
to_list.pop()
connection.close()
return course_email_result(num_sent, num_error, num_optout)
except (SMTPDataError, SMTPConnectError, SMTPServerDisconnected) as exc:
# Error caught here cause the email to be retried. The entire task is actually retried without popping the list
# Reasoning is that all of these errors may be temporary condition.
log.warning('Email with id %d not delivered due to temporary error %s, retrying send to %d recipients',
email_id, exc, len(to_list))
raise course_email.retry(
arg=[
email_id,
to_list,
course_title,
course_url,
image_url,
current_task.request.retries > 0
],
exc=exc,
countdown=(2 ** current_task.request.retries) * 15
)
except:
log.exception('Email with id %d caused course_email task to fail with uncaught exception. To list: %s',
email_id,
[i['email'] for i in to_list])
# Close the connection before we exit
connection.close()
raise
# This string format code is wrapped in this function to allow mocking for a unit test
def course_email_result(num_sent, num_error, num_optout):
"""Return the formatted result of course_email sending."""
return "Sent {0}, Fail {1}, Optout {2}".format(num_sent, num_error, num_optout)
def _statsd_tag(course_title):
"""
Calculate the tag we will use for DataDog.
"""
tag = "course_email:{0}".format(course_title)
return tag[:200]
|
pdehaye/theming-edx-platform
|
lms/djangoapps/bulk_email/tasks.py
|
Python
|
agpl-3.0
| 10,209
|
"""
A component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
import logging
import re
import voluptuous as vol
from homeassistant.const import (
EVENT_STATE_CHANGED, STATE_UNAVAILABLE, STATE_UNKNOWN, CONF_HOST,
CONF_PORT, CONF_SSL, CONF_VERIFY_SSL, CONF_USERNAME, CONF_PASSWORD,
CONF_EXCLUDE, CONF_INCLUDE, CONF_DOMAINS, CONF_ENTITIES)
from homeassistant.helpers import state as state_helper
from homeassistant.helpers.entity_values import EntityValues
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['influxdb==3.0.0']
_LOGGER = logging.getLogger(__name__)
CONF_DB_NAME = 'database'
CONF_TAGS = 'tags'
CONF_DEFAULT_MEASUREMENT = 'default_measurement'
CONF_OVERRIDE_MEASUREMENT = 'override_measurement'
CONF_TAGS_ATTRIBUTES = 'tags_attributes'
CONF_COMPONENT_CONFIG = 'component_config'
CONF_COMPONENT_CONFIG_GLOB = 'component_config_glob'
CONF_COMPONENT_CONFIG_DOMAIN = 'component_config_domain'
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_VERIFY_SSL = True
DOMAIN = 'influxdb'
TIMEOUT = 5
COMPONENT_CONFIG_SCHEMA_ENTRY = vol.Schema({
vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST): cv.string,
vol.Inclusive(CONF_USERNAME, 'authentication'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'authentication'): cv.string,
vol.Optional(CONF_EXCLUDE, default={}): vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string])
}),
vol.Optional(CONF_INCLUDE, default={}): vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string])
}),
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL): cv.boolean,
vol.Optional(CONF_DEFAULT_MEASUREMENT): cv.string,
vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string,
vol.Optional(CONF_TAGS, default={}):
vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_TAGS_ATTRIBUTES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_COMPONENT_CONFIG, default={}):
vol.Schema({cv.entity_id: COMPONENT_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}):
vol.Schema({cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}):
vol.Schema({cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}),
}),
}, extra=vol.ALLOW_EXTRA)
RE_DIGIT_TAIL = re.compile(r'^[^\.]*\d+\.?\d+[^\.]*$')
RE_DECIMAL = re.compile(r'[^\d.]+')
def setup(hass, config):
"""Set up the InfluxDB component."""
from influxdb import InfluxDBClient, exceptions
conf = config[DOMAIN]
kwargs = {
'database': conf[CONF_DB_NAME],
'verify_ssl': conf[CONF_VERIFY_SSL],
'timeout': TIMEOUT
}
if CONF_HOST in conf:
kwargs['host'] = conf[CONF_HOST]
if CONF_PORT in conf:
kwargs['port'] = conf[CONF_PORT]
if CONF_USERNAME in conf:
kwargs['username'] = conf[CONF_USERNAME]
if CONF_PASSWORD in conf:
kwargs['password'] = conf[CONF_PASSWORD]
if CONF_SSL in conf:
kwargs['ssl'] = conf[CONF_SSL]
include = conf.get(CONF_INCLUDE, {})
exclude = conf.get(CONF_EXCLUDE, {})
whitelist_e = set(include.get(CONF_ENTITIES, []))
whitelist_d = set(include.get(CONF_DOMAINS, []))
blacklist_e = set(exclude.get(CONF_ENTITIES, []))
blacklist_d = set(exclude.get(CONF_DOMAINS, []))
tags = conf.get(CONF_TAGS)
tags_attributes = conf.get(CONF_TAGS_ATTRIBUTES)
default_measurement = conf.get(CONF_DEFAULT_MEASUREMENT)
override_measurement = conf.get(CONF_OVERRIDE_MEASUREMENT)
component_config = EntityValues(
conf[CONF_COMPONENT_CONFIG],
conf[CONF_COMPONENT_CONFIG_DOMAIN],
conf[CONF_COMPONENT_CONFIG_GLOB])
try:
influx = InfluxDBClient(**kwargs)
influx.query("SHOW SERIES LIMIT 1;", database=conf[CONF_DB_NAME])
except exceptions.InfluxDBClientError as exc:
_LOGGER.error("Database host is not accessible due to '%s', please "
"check your entries in the configuration file and that "
"the database exists and is READ/WRITE.", exc)
return False
def influx_event_listener(event):
"""Listen for new messages on the bus and sends them to Influx."""
state = event.data.get('new_state')
if state is None or state.state in (
STATE_UNKNOWN, '', STATE_UNAVAILABLE) or \
state.entity_id in blacklist_e or \
state.domain in blacklist_d:
return
try:
if (whitelist_e and state.entity_id not in whitelist_e) or \
(whitelist_d and state.domain not in whitelist_d):
return
_state = float(state_helper.state_as_number(state))
_state_key = "value"
except ValueError:
_state = state.state
_state_key = "state"
measurement = component_config.get(state.entity_id).get(
CONF_OVERRIDE_MEASUREMENT)
if measurement in (None, ''):
if override_measurement:
measurement = override_measurement
else:
measurement = state.attributes.get('unit_of_measurement')
if measurement in (None, ''):
if default_measurement:
measurement = default_measurement
else:
measurement = state.entity_id
json_body = [
{
'measurement': measurement,
'tags': {
'domain': state.domain,
'entity_id': state.object_id,
},
'time': event.time_fired,
'fields': {
_state_key: _state,
}
}
]
for key, value in state.attributes.items():
if key in tags_attributes:
json_body[0]['tags'][key] = value
elif key != 'unit_of_measurement':
# If the key is already in fields
if key in json_body[0]['fields']:
key = key + "_"
# Prevent column data errors in influxDB.
# For each value we try to cast it as float
# But if we can not do it we store the value
# as string add "_str" postfix to the field key
try:
json_body[0]['fields'][key] = float(value)
except (ValueError, TypeError):
new_key = "{}_str".format(key)
new_value = str(value)
json_body[0]['fields'][new_key] = new_value
if RE_DIGIT_TAIL.match(new_value):
json_body[0]['fields'][key] = float(
RE_DECIMAL.sub('', new_value))
json_body[0]['tags'].update(tags)
try:
influx.write_points(json_body)
except exceptions.InfluxDBClientError:
_LOGGER.exception("Error saving event %s to InfluxDB", json_body)
hass.bus.listen(EVENT_STATE_CHANGED, influx_event_listener)
return True
|
MungoRae/home-assistant
|
homeassistant/components/influxdb.py
|
Python
|
apache-2.0
| 7,812
|
#!/usr/bin/env python
# coding=utf8
"""
Fix permissions again
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2011 Mark Hymers <mhy@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import psycopg2
from daklib.dak_exceptions import DBUpdateError
################################################################################
def do_update(self):
"""
Fix up permissions (again)
"""
print __doc__
try:
c = self.db.cursor()
for table in ['build_queue_policy_files',
'version_check']:
c.execute("""GRANT SELECT, UPDATE, INSERT ON %s TO ftpmaster""" % table)
c.execute("""GRANT SELECT ON %s TO public""" % table)
# Make sure all sequences are fixed up
for seq in ['architecture_id_seq',
'archive_id_seq',
'bin_associations_id_seq',
'binaries_id_seq',
'binary_acl_id_seq',
'binary_acl_map_id_seq',
'build_queue_files_id_seq',
'build_queue_id_seq',
'changelogs_text_id_seq',
'changes_id_seq',
'changes_pending_binaries_id_seq',
'changes_pending_files_id_seq',
'changes_pending_source_id_seq',
'component_id_seq',
'config_id_seq',
'dsc_files_id_seq',
'files_id_seq',
'fingerprint_id_seq',
'keyring_acl_map_id_seq',
'keyrings_id_seq',
'location_id_seq',
'maintainer_id_seq',
'metadata_keys_key_id_seq',
'new_comments_id_seq',
'override_type_id_seq',
'policy_queue_id_seq',
'priority_id_seq',
'section_id_seq',
'source_acl_id_seq',
'source_id_seq',
'src_associations_id_seq',
'src_format_id_seq',
'src_uploaders_id_seq',
'suite_id_seq',
'uid_id_seq',
'upload_blocks_id_seq']:
c.execute("""GRANT SELECT, UPDATE, USAGE ON %s TO ftpmaster""" % seq)
c.execute("""GRANT SELECT ON %s TO public""" % seq)
c.execute("UPDATE config SET value = '58' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.ProgrammingError as msg:
self.db.rollback()
raise DBUpdateError('Unable to apply sick update 58, rollback issued. Error message : %s' % (str(msg)))
|
abhi11/dak
|
dak/dakdb/update58.py
|
Python
|
gpl-2.0
| 3,526
|
#NVDAObjects/IAccessible/sysTreeView32.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2007-2010 Michael Curran <mick@kulgan.net>, James Teh <jamie@jantrid.net>
from ctypes import *
from ctypes.wintypes import *
import api
import winKernel
import controlTypes
import speech
import UIAHandler
from . import IAccessible
if UIAHandler.isUIAAvailable: from ..UIA import UIA
from .. import NVDAObject
from logHandler import log
import watchdog
TV_FIRST=0x1100
TVIS_STATEIMAGEMASK=0xf000
#Window messages
TVM_GETITEMSTATE=TV_FIRST+39
TVM_GETITEM=TV_FIRST+62
TVM_MAPACCIDTOHTREEITEM=TV_FIRST+42
TVM_MAPHTREEITEMTOACCID=TV_FIRST+43
TVM_GETNEXTITEM=TV_FIRST+10
#item mask flags
TVIF_CHILDREN=0x40
#Relation codes
TVGN_ROOT=0
TVGN_NEXT=1
TVGN_PREVIOUS=2
TVGN_PARENT=3
TVGN_CHILD=4
class TVItemStruct(Structure):
_fields_=[
('mask',c_uint),
('hItem',c_void_p),
('state',c_uint),
('stateMask',c_uint),
('pszText',LPWSTR),
('cchTextMax',c_int),
('iImage',c_int),
('iSelectedImage',c_int),
('cChildren',c_int),
('lParam',LPARAM),
]
class TreeView(IAccessible):
def _get_firstChild(self):
try:
return super(TreeView, self).firstChild
except:
# Broken commctrl 5 tree view.
return BrokenCommctrl5Item.getFirstItem(self)
class TreeViewItem(IAccessible):
def _get_role(self):
return controlTypes.ROLE_TREEVIEWITEM
def _get_treeview_hItem(self):
if not hasattr(self,'_treeview_hItem'):
self._treeview_hItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPACCIDTOHTREEITEM,self.IAccessibleChildID,0)
if not self._treeview_hItem:
# Tree views from comctl < 6.0 use the hItem as the child ID.
self._treeview_hItem=self.IAccessibleChildID
return self._treeview_hItem
def _get_treeview_level(self):
return int(self.IAccessibleObject.accValue(self.IAccessibleChildID))
def _get_states(self):
states=super(TreeViewItem,self)._get_states()
hItem=self.treeview_hItem
itemStates=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETITEMSTATE,hItem,TVIS_STATEIMAGEMASK)
ch=(itemStates>>12)&3
if ch>0:
states.add(controlTypes.STATE_CHECKABLE)
if ch==2:
states.add(controlTypes.STATE_CHECKED)
elif ch==3:
states.add(controlTypes.STATE_HALFCHECKED)
return states
def _get_value(self):
return None
def _get_parent(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_parent()
hItem=self.treeview_hItem
if not hItem:
return super(TreeViewItem,self)._get_parent()
parentItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_PARENT,hItem)
if parentItem<=0:
return super(TreeViewItem,self)._get_parent()
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,parentItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=parentItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_firstChild(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_firstChild()
hItem=self.treeview_hItem
if not hItem:
return super(TreeViewItem,self)._get_firstChild()
childItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_CHILD,hItem)
if childItem<=0:
return super(TreeViewItem,self)._get_firstChild()
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,childItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=childItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_next(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_next()
hItem=self.treeview_hItem
if not hItem:
return None
nextItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_NEXT,hItem)
if nextItem<=0:
return None
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,nextItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=nextItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_previous(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_previous()
hItem=self.treeview_hItem
if not hItem:
return None
prevItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_PREVIOUS,hItem)
if prevItem<=0:
return None
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,prevItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=prevItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_children(self):
children=[]
child=self.firstChild
while child:
children.append(child)
child=child.next
return children
def _get_childCount(self):
hItem=self.treeview_hItem
if not hItem:
return 0
childItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_CHILD,hItem)
if childItem<=0:
return 0
numItems=0
while childItem>0:
numItems+=1
childItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_NEXT,childItem)
return numItems
def _get_positionInfo(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_positionInfo()
info={}
info['level']=self.treeview_level
hItem=self.treeview_hItem
if not hItem:
return info
newItem=hItem
index=0
while newItem>0:
index+=1
newItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_PREVIOUS,newItem)
newItem=hItem
numItems=index-1
while newItem>0:
numItems+=1
newItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_NEXT,newItem)
info['indexInGroup']=index
info['similarItemsInGroup']=numItems
return info
def event_stateChange(self):
announceContains = self is api.getFocusObject() and controlTypes.STATE_EXPANDED in self.states and controlTypes.STATE_EXPANDED not in getattr(self,'_speakObjectPropertiesCache',{}).get('states',frozenset())
super(TreeViewItem,self).event_stateChange()
if announceContains:
speech.speakMessage(_("%s items")%self.childCount)
class BrokenCommctrl5Item(IAccessible):
"""Handle broken CommCtrl v5 SysTreeView32 items in 64 bit applications.
In these controls, IAccessible fails to retrieve any info, so we must retrieve it using UIA.
We do this by obtaining a UIA NVDAObject and redirecting properties to it.
We can't simply use UIA objects alone for these controls because UIA events are also broken.
"""
def __init__(self, _uiaObj=None, **kwargs):
# This class is being directly instantiated.
if not _uiaObj:
raise ValueError("Cannot instantiate directly without supplying _uiaObj")
self._uiaObj = _uiaObj
super(BrokenCommctrl5Item, self).__init__(**kwargs)
def initOverlayClass(self):
self._uiaObj = None
if UIAHandler.handler:
parent=super(BrokenCommctrl5Item, self).parent
if parent and parent.hasFocus:
try:
kwargs = {}
UIA.kwargsFromSuper(kwargs, relation="focus")
self._uiaObj = UIA(**kwargs)
except:
log.debugWarning("Retrieving UIA focus failed", exc_info=True)
def _get_role(self):
return self._uiaObj.role if self._uiaObj else controlTypes.ROLE_UNKNOWN
def _get_name(self):
return self._uiaObj.name if self._uiaObj else None
def _get_description(self):
return self._uiaObj.description if self._uiaObj else None
def _get_value(self):
return self._uiaObj.value if self._uiaObj else None
def _get_states(self):
return self._uiaObj.states if self._uiaObj else set()
def _get_positionInfo(self):
return self._uiaObj.positionInfo if self._uiaObj else {}
def _get_location(self):
return self._uiaObj.location if self._uiaObj else None
def _makeRelatedObj(self, uiaObj):
# We need to wrap related UIA objects so that the ancestry will return to IAccessible for the tree view itself.
if not uiaObj:
return None
return BrokenCommctrl5Item(IAccessibleObject=self.IAccessibleObject, IAccessibleChildID=self.IAccessibleChildID, windowHandle=self.windowHandle, _uiaObj=uiaObj)
def _get_parent(self):
if self._uiaObj:
uiaParent = self._uiaObj.parent
# If the parent is the tree view itself (root window object), just use super's parent. IAccessible isn't broken on the container itself.
if not uiaParent.UIAElement.cachedNativeWindowHandle:
return self._makeRelatedObj(uiaParent)
return super(BrokenCommctrl5Item, self).parent
def _get_next(self):
return self._makeRelatedObj(self._uiaObj.next) if self._uiaObj else None
def _get_previous(self):
return self._makeRelatedObj(self._uiaObj.previous) if self._uiaObj else None
def _get_firstChild(self):
return self._makeRelatedObj(self._uiaObj.firstChild) if self._uiaObj else None
def _get_lastChild(self):
return self._makeRelatedObj(self._uiaObj.lastChild) if self._uiaObj else None
def _get_children(self):
# Use the base algorithm, which uses firstChild and next.
return NVDAObject._get_children(self)
@classmethod
def getFirstItem(cls, treeObj):
"""Get an instance for the first item in a given tree view.
"""
if not UIAHandler.handler:
return None
# Get a UIA object for the tree view by getting the root object for the window.
try:
kwargs = {"windowHandle": treeObj.windowHandle}
UIA.kwargsFromSuper(kwargs)
uiaObj = UIA(**kwargs)
except:
log.debugWarning("Error retrieving UIA object for tree view", exc_info=True)
return None
# Get the first tree item.
uiaObj = uiaObj.firstChild
if not uiaObj:
return None
# The IAccessibleChildID for this object isn't really used.
# However, it must not be 0, as 0 is the tree view itself.
return cls(IAccessibleObject=treeObj.IAccessibleObject, IAccessibleChildID=1, windowHandle=treeObj.windowHandle, _uiaObj=uiaObj)
|
daisymax/nvda
|
source/NVDAObjects/IAccessible/sysTreeView32.py
|
Python
|
gpl-2.0
| 10,467
|
#!/usr/bin/env python
import os
import sys
import django
from django.core.management import call_command
from django.conf import settings
from django.test.utils import get_runner
def runtests():
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
django.setup()
call_command("makemigrations")
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1)
failures = test_runner.run_tests(['tests'])
sys.exit(bool(failures))
if __name__ == "__main__":
runtests()
|
aaronc-bixly/notifications
|
runtests.py
|
Python
|
mit
| 512
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fieldsight', '0052_auto_20180109_1839'),
]
operations = [
migrations.RemoveField(
model_name='userinvite',
name='project',
),
migrations.AddField(
model_name='userinvite',
name='project',
field=models.ManyToManyField(related_name='invite_project_roles', null=True, to='fieldsight.Project', blank=True),
),
migrations.RemoveField(
model_name='userinvite',
name='site',
),
migrations.AddField(
model_name='userinvite',
name='site',
field=models.ManyToManyField(related_name='invite_site_roles', null=True, to='fieldsight.Site', blank=True),
),
]
|
awemulya/fieldsight-kobocat
|
onadata/apps/fieldsight/migrations/0053_auto_20180329_1453.py
|
Python
|
bsd-2-clause
| 920
|
import json
from core.rule_core import *
from core import yapi
class YunoModule:
name = "greylist"
cfg_ver = None
config = {
"expiry": 24,
"score": 1,
"list_path": "Käyttäjä:VakauttajaBot/greylist.json"
}
list_ver = None
api = yapi.MWAPI
greylist = None
def run(self, rev):
score = 0
expiry = None
lastrev = self.api.getLatestRev(self.config["list_path"])
if not lastrev:
logger.critical("greylist not found")
return score, expiry
if lastrev != self.list_ver:
self.greylist = json.loads(self.api.getText(self.config["list_path"]))
self.list_ver = lastrev
for user in self.greylist["values"]:
if user == rev["user"]:
score = self.config["score"]
expiry = self.config["expiry"]
break
return score, expiry
|
4shadoww/stabilizerbot
|
core/rules/greylist.py
|
Python
|
mit
| 941
|
import operator
import pytest
from bonobo.util.objects import ValueHolder, Wrapper, get_attribute_or_create, get_name
from bonobo.util.testing import optional_contextmanager
class foo:
pass
class bar:
__name__ = "baz"
def test_get_name():
assert get_name(42) == "int"
assert get_name("eat at joe.") == "str"
assert get_name(str) == "str"
assert get_name(object) == "object"
assert get_name(get_name) == "get_name"
assert get_name(foo) == "foo"
assert get_name(foo()) == "foo"
assert get_name(bar) == "bar"
assert get_name(bar()) == "baz"
def test_wrapper_name():
assert get_name(Wrapper(42)) == "int"
assert get_name(Wrapper("eat at joe.")) == "str"
assert get_name(Wrapper(str)) == "str"
assert get_name(Wrapper(object)) == "object"
assert get_name(Wrapper(foo)) == "foo"
assert get_name(Wrapper(foo())) == "foo"
assert get_name(Wrapper(bar)) == "bar"
assert get_name(Wrapper(bar())) == "baz"
assert get_name(Wrapper(get_name)) == "get_name"
def test_valueholder():
x = ValueHolder(42)
assert x == 42
x += 1
assert x == 43
assert x + 1 == 44
assert x == 43
y = ValueHolder(44)
assert y == 44
y -= 1
assert y == 43
assert y - 1 == 42
assert y == 43
assert y == x
assert y is not x
assert repr(x) == repr(y) == repr(43)
def test_valueholder_notequal():
x = ValueHolder(42)
assert x != 41
assert not (x != 42)
@pytest.mark.parametrize("rlo,rhi", [(1, 2), ("a", "b")])
def test_valueholder_ordering(rlo, rhi):
vlo, vhi = ValueHolder(rlo), ValueHolder(rhi)
for lo in (rlo, vlo):
for hi in (rhi, vhi):
assert lo < hi
assert hi > lo
assert lo <= lo
assert not (lo < lo)
assert lo >= lo
def test_valueholder_negpos():
neg, zero, pos = ValueHolder(-1), ValueHolder(0), ValueHolder(1)
assert -neg == pos
assert -pos == neg
assert -zero == zero
assert +pos == pos
assert +neg == neg
def test_valueholders_containers():
x = ValueHolder({1, 2, 3, 5, 8, 13})
assert 5 in x
assert 42 not in x
y = ValueHolder({"foo": "bar", "corp": "acme"})
assert "foo" in y
assert y["foo"] == "bar"
with pytest.raises(KeyError):
y["no"]
y["no"] = "oh, wait"
assert "no" in y
assert "oh, wait" == y["no"]
def test_get_attribute_or_create():
class X:
pass
x = X()
with pytest.raises(AttributeError):
x.foo
foo = get_attribute_or_create(x, "foo", "bar")
assert foo == "bar"
assert x.foo == "bar"
foo = get_attribute_or_create(x, "foo", "baz")
assert foo == "bar"
assert x.foo == "bar"
unsupported_operations = {
int: {operator.matmul},
str: {
operator.sub,
operator.mul,
operator.matmul,
operator.floordiv,
operator.truediv,
operator.mod,
divmod,
operator.pow,
operator.lshift,
operator.rshift,
operator.and_,
operator.xor,
operator.or_,
},
}
@pytest.mark.parametrize("x,y", [(5, 3), (0, 10), (0, 0), (1, 1), ("foo", "bar"), ("", "baz!")])
@pytest.mark.parametrize(
"operation,inplace_operation",
[
(operator.add, operator.iadd),
(operator.sub, operator.isub),
(operator.mul, operator.imul),
(operator.matmul, operator.imatmul),
(operator.truediv, operator.itruediv),
(operator.floordiv, operator.ifloordiv),
(operator.mod, operator.imod),
(divmod, None),
(operator.pow, operator.ipow),
(operator.lshift, operator.ilshift),
(operator.rshift, operator.irshift),
(operator.and_, operator.iand),
(operator.xor, operator.ixor),
(operator.or_, operator.ior),
],
)
def test_valueholder_integer_operations(x, y, operation, inplace_operation):
v = ValueHolder(x)
is_supported = operation not in unsupported_operations.get(type(x), set())
isdiv = ("div" in operation.__name__) or ("mod" in operation.__name__)
# forward...
with optional_contextmanager(pytest.raises(TypeError), ignore=is_supported):
with optional_contextmanager(pytest.raises(ZeroDivisionError), ignore=y or not isdiv):
assert operation(x, y) == operation(v, y)
# backward...
with optional_contextmanager(pytest.raises(TypeError), ignore=is_supported):
with optional_contextmanager(pytest.raises(ZeroDivisionError), ignore=x or not isdiv):
assert operation(y, x) == operation(y, v)
# in place...
if inplace_operation is not None:
with optional_contextmanager(pytest.raises(TypeError), ignore=is_supported):
with optional_contextmanager(pytest.raises(ZeroDivisionError), ignore=y or not isdiv):
inplace_operation(v, y)
assert v == operation(x, y)
|
hartym/bonobo
|
tests/util/test_objects.py
|
Python
|
apache-2.0
| 4,935
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.