code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from cms.models import Page
from menus.templatetags.menu_tags import show_menu
from django.conf import settings
from cms.tests.base import CMSTestCase
from menus.menu_pool import menu_pool
from cms.tests.util.menu_extender import TestMenu
class NavExtenderTestCase(CMSTestCase):
def setUp(self):
settings.CMS_MODERATOR = False
u = User(username="test", is_staff = True, is_active = True, is_superuser = True)
u.set_password("test")
u.save()
self.login_user(u)
menu_pool.clear(settings.SITE_ID)
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'CMSMenu':self.old_menu['CMSMenu'], 'TestMenu':TestMenu()}
def tearDown(self):
menu_pool.menus = self.old_menu
def create_some_nodes(self):
self.page1 = self.create_page(parent_page=None, published=True, in_navigation=True)
self.page2 = self.create_page(parent_page=self.page1, published=True, in_navigation=True)
self.page3 = self.create_page(parent_page=self.page2, published=True, in_navigation=True)
self.page4 = self.create_page(parent_page=None, published=True, in_navigation=True)
self.page5 = self.create_page(parent_page=self.page4, published=True, in_navigation=True)
def test_01_menu_registration(self):
self.assertEqual(len(menu_pool.menus), 2)
self.assertEqual(len(menu_pool.modifiers) >=4, True)
def test_02_extenders_on_root(self):
self.create_some_nodes()
page1 = Page.objects.get(pk=self.page1.pk)
page1.navigation_extenders = "TestMenu"
page1.save()
context = self.get_context()
nodes = show_menu(context, 0, 100, 100, 100)['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 4)
self.assertEqual(len(nodes[0].children[3].children), 1)
page1.in_navigation = False
page1.save()
nodes = show_menu(context)['children']
self.assertEqual(len(nodes), 5)
def test_03_extenders_on_root_child(self):
self.create_some_nodes()
page4 = Page.objects.get(pk=self.page4.pk)
page4.navigation_extenders = "TestMenu"
page4.save()
context = self.get_context()
nodes = show_menu(context, 0, 100, 100, 100)['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[1].children), 4)
def test_04_extenders_on_child(self):
self.create_some_nodes()
page1 = Page.objects.get(pk=self.page1.pk)
page1.in_navigation = False
page1.save()
page2 = Page.objects.get(pk=self.page2.pk)
page2.navigation_extenders = "TestMenu"
page2.save()
context = self.get_context()
nodes = show_menu(context, 0, 100, 100, 100)['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 4)
self.assertEqual(nodes[0].children[1].get_absolute_url(), "/" )
def test_05_incorrect_nav_extender_in_db(self):
self.create_some_nodes()
page2 = Page.objects.get(pk=self.page2.pk)
page2.navigation_extenders = "SomethingWrong"
page2.save()
context = self.get_context()
nodes = show_menu(context)['children']
self.assertEqual(len(nodes), 2)
|
dibaunaumh/tikal-corp-website
|
cms/tests/navextender.py
|
Python
|
bsd-3-clause
| 3,535
|
#!/usr/bin/env python
import rospy
from python_qt_binding.QtCore import Qt, QSize, QRegExp, Signal, Slot
from python_qt_binding.QtGui import QWidget, QRegExpValidator, QHBoxLayout, QPushButton, QComboBox, QIcon
# widget for topic selection
class QTopicWidget(QWidget):
topic_changed_signal = Signal(str)
def __init__(self, parent = None, topic_type = str(), is_action_topic = False):
QWidget.__init__(self, parent)
if is_action_topic:
self.topic_type = topic_type + "Goal"
else:
self.topic_type = topic_type
self.is_action_topic = is_action_topic
# start widget
hbox = QHBoxLayout()
hbox.setMargin(0)
hbox.setContentsMargins(0, 0, 0, 0)
# topic combo box
self.topic_combo_box = QComboBox()
self.topic_combo_box.setEnabled(False)
self.topic_combo_box.blockSignals(True)
self.topic_combo_box.setValidator(QRegExpValidator(QRegExp('((\d|\w|/)(?!//))*'), self))
self.topic_combo_box.currentIndexChanged[str].connect(self.topic_changed)
hbox.addWidget(self.topic_combo_box)
# get system icon
icon = QIcon.fromTheme("view-refresh")
size = icon.actualSize(QSize(32, 32))
# add refresh button
refresh_topics_button = QPushButton()
refresh_topics_button.clicked.connect(self.update_topic_list)
refresh_topics_button.setIcon(icon)
refresh_topics_button.setFixedSize(size.width()+2, size.height()+2)
hbox.addWidget(refresh_topics_button)
# end widget
self.setLayout(hbox)
# init widget
self.update_topic_list()
def emit_topic_name(self):
self.topic_changed_signal.emit(self.current_topic())
def set_editable(self, enable):
self.topic_combo_box.setEditable(enable)
def current_topic(self):
if self.topic_combo_box.isEnabled():
return self.topic_combo_box.currentText()
else:
return ""
@Slot(str)
def topic_changed(self, topic_name):
self.topic_changed_signal.emit(topic_name)
@Slot()
def update_topic_list(self):
self.topic_combo_box.clear()
self.topic_combo_box.setEnabled(False)
self.topic_combo_box.blockSignals(True)
self.topic_combo_box.addItem('Updating...')
# get topic list
_, _, topic_type = rospy.get_master().getTopicTypes()
topic_dict = dict(topic_type)
# filter list
topic_dict_filtered = dict()
for k, v in topic_dict.items():
if (len(topic_type) == 0) or (v == self.topic_type):
if (self.is_action_topic):
topic_dict_filtered[k[:-5]] = v
else:
topic_dict_filtered[k] = v
self.topic_combo_box.clear()
self.topic_combo_box.addItems(sorted(topic_dict_filtered.keys()))
if (self.topic_combo_box.count() > 0):
self.topic_combo_box.setEnabled(True)
self.topic_combo_box.blockSignals(False)
self.topic_changed(self.topic_combo_box.currentText())
else:
self.topic_combo_box.addItem('No topics available!')
|
TRECVT/vigir_footstep_planning_basics
|
vigir_footstep_planning_lib/src/vigir_footstep_planning_lib/topic_widget.py
|
Python
|
gpl-3.0
| 3,225
|
# -*- coding: utf-8 -*-
import json
from collections import namedtuple
from Exceptions import FieldLengthOverflow
__author__ = 'flavio@casacurta.com'
class Fixed_files(object):
def __init__(self, filejson, obj=False, dic=False, checklength=False):
self.dic = dic
self.checklength = checklength
try:
if obj:
self.lattrs = filejson
else:
filejson = filejson if filejson.endswith('.json') else '{}.json'.format(filejson)
attrs = open(filejson).readlines()
self.lattrs = [json.loads(line.decode('utf-8')) for line in attrs]
except:
self.lattrs = []
self.attr = [att['field'] for att in self.lattrs]
start = 0
for att in self.lattrs:
if att['sign']:
att['length'] = att['length'] + 1
exec ("self.{} = slice({}, {})".format(att['field'], start, (start + att['length'])))
start += att['length']
self.slices = ''
for att in self.lattrs:
if att['type'] == 'str':
self.slices += 'record[self.{}], '.format(att['field'])
elif att['type'] == 'int':
if att['decimals']:
self.slices += 'round('
if att['sign']:
self.slices += "int(record[self.{0}][:-1])*int(record[self.{0}][-1]+{1})".format(
att['field'], "'1'")
else:
self.slices += 'int(record[self.{}])'.format(att['field'])
if att['decimals']:
self.slices += ' * .{0:>0{1}}, {1})'.format('1', att['decimals'])
self.slices += ', '
fmt_out_str = ''
fmt_out_fmt = ''
for att in self.lattrs:
if att['sign']:
att['length'] = str(int(att['length']) - 1)
if att['type'] == 'str':
fmt_out_str += "{}".format('{:<' + str(att['length']) + '}')
if self.dic:
fmt_out_fmt += 'record["{}"][:{}], '.format(att['field'], att['length'])
else:
fmt_out_fmt += 'record.{}[:{}], '.format(att['field'], att['length'])
elif att['type'] == 'int':
if att['decimals']:
dec = ' * {}'.format(int('{:<0{}}'.format('1', att['decimals']+1)))
else:
dec = ''
if att['sign']:
fmt_out_str += '{}'.format('{:>0' + str(att['length']) + '}{}')
else:
fmt_out_str += '{}'.format('{:>0' + str(att['length']) + '}')
if self.dic:
if att['sign']:
fmt_out_fmt += '''str(int(round(record["{0}"]{1}, 0) * -1))[:{2}]
if record["{0}"] < 0
else str(int(round(record["{0}"]{1}, 0)))[:{2}],
'-' if record["{0}"] < 0 else '+'
'''.format(att['field'],
dec,
att['length'])
else:
fmt_out_fmt += 'str(int(round(record["{}"]{}, 0)))[:{}]'.format(att['field'],
dec,
att['length'])
else:
if att['sign']:
fmt_out_fmt += '''str(int(round(record.{0}{1}, 0) * -1))[:{2}]
if record.{0} < 0
else str(int(round(record.{0}{1}, 0)))[:{2}],
'-' if record.{0} < 0 else '+'
'''.format(att['field'],
dec,
att['length'])
else:
fmt_out_fmt += 'str(int(round(record.{}{}, 0)))[:{}]'.format(att['field'],
dec,
att['length'])
fmt_out_fmt += ', '
self.fmt_out = "'" + fmt_out_str + "\\n'.format(" + fmt_out_fmt + ")"
self.Record = namedtuple('Record', self.attr)
def parse(self, record):
nt = eval("self.Record({})".format(self.slices))
if self.dic:
return {k:nt[n] for n, k in enumerate(self.attr)}
return nt
def unparse(self, record):
return eval("{}".format(self.fmt_out))
|
flavio-casacurta/File-FixedS
|
fixed_files.py
|
Python
|
mit
| 4,849
|
from nose.core import collector, main, run, run_exit, runmodule
# backwards compatibility
from nose.exc import SkipTest, DeprecatedTest
from nose.tools import with_setup
__author__ = 'Jason Pellerin'
__versioninfo__ = (1, 1, 0)
__version__ = '.'.join(map(str, __versioninfo__))
__all__ = [
'main', 'run', 'run_exit', 'runmodule', 'with_setup',
'SkipTest', 'DeprecatedTest', 'collector'
]
|
mozilla/sheriffs
|
vendor-local/src/python-nose/nose/__init__.py
|
Python
|
bsd-3-clause
| 404
|
def invert_index(idx):
out = {}
for k,v in idx.iteritems():
out[v] = k
return out
FILE_TYPES = {
'png':'image/png',
'jpg':'image/jpeg',
'gif':'image/gif',
'html':'text/html',
'css':'text/css',
'js':'text/javascript'
}
EXTENSION_TYPES = invert_index(FILE_TYPES)
def extension_for_file(f):
handler = f['handler']
ctype = f['content_type']
if handler in ['binary', 'passthrough']:
return EXTENSION_TYPES.get(ctype, 'txt')
elif handler == 'mqlquery':
return "mql"
elif handler == 'mjt':
return "mjt"
elif handler == 'acre_script':
return "sjs"
def type_for_extension(f, ext):
ct = FILE_TYPES.get(ext, 'text/plain')
if ct == 'text/plain' and ext == 'sjs':
return (ct, 'acre_script')
elif ct == 'text/plain' and ext == 'mql':
return (ct, 'mqlquery')
elif ct == 'text/plain' and ext == 'mjt':
return (ct, 'mjt')
elif ct == 'text/plain':
return (ct, 'passthrough')
elif ct.startswith('image'):
return (ct, 'binary')
else:
return (ct, 'passthrough')
|
gagoel/acre
|
utilities/acp/extension_map.py
|
Python
|
apache-2.0
| 1,124
|
"""Create member database
Revision ID: 4beef05c5ec
Revises:
Create Date: 2017-02-05 15:54:06.706073
"""
# revision identifiers, used by Alembic.
revision = '4beef05c5ec'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'member',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('global_nick', sa.Unicode(255)),
sa.Column('local_nick', sa.Unicode(255)),
sa.Column('points', sa.Integer),
sa.Column('rank_id', sa.Integer),
)
def downgrade():
op.drop_table('member')
|
dark-echo/Bay-Oh-Woolph
|
alembic/versions/4beef05c5ec_create_member_database.py
|
Python
|
agpl-3.0
| 629
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
urls = {
"home": "/",
"discover": "/discover",
"login": "/login",
"register": "/register",
"forgot-password": "/forgot-password",
"change-password": "/change-password/{0}", # user.token
"change-email": "/change-email/{0}", # user.email_token
"cancel-account": "/cancel-account/{0}", # auth.token.get_token_for_user(user)
"invitation": "/invitation/{0}", # membership.token
"user": "/profile/{0}", # user.username
"project": "/project/{0}", # project.slug
"epics": "/project/{0}/epics/", # project.slug
"epic": "/project/{0}/epic/{1}", # project.slug, epic.ref
"backlog": "/project/{0}/backlog/", # project.slug
"taskboard": "/project/{0}/taskboard/{1}", # project.slug, milestone.slug
"kanban": "/project/{0}/kanban/", # project.slug
"userstory": "/project/{0}/us/{1}", # project.slug, us.ref
"task": "/project/{0}/task/{1}", # project.slug, task.ref
"issues": "/project/{0}/issues", # project.slug
"issue": "/project/{0}/issue/{1}", # project.slug, issue.ref
"wiki": "/project/{0}/wiki/{1}", # project.slug, wikipage.slug
"team": "/project/{0}/team/", # project.slug
"project-transfer": "/project/{0}/transfer/{1}", # project.slug, project.transfer_token
"project-admin": "/login?next=/project/{0}/admin/project-profile/details", # project.slug
}
|
xdevelsistemas/taiga-back-community
|
taiga/front/urls.py
|
Python
|
agpl-3.0
| 2,292
|
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QProcess
import re, os
import mooseutils
from peacock.base import MooseWidget
from peacock.utils import TerminalUtils
class JobRunner(QObject, MooseWidget):
"""
Actually runs the process. It will read the output and
translate any terminal color codes into html.
It will also attempt to parse the output to check to
see if we are at a new time step and emit the
timestep_updated signal.
Signals:
started: Emitted when we start running.
finished: Emitted when we are finished. Arguments are exit code and status message.
outputAdded: Emitted when there is new output.
timeStepUpdated: A new time step has started
error: Emitted when an error is encountered. Arguments are QProcess code and error description
"""
started = pyqtSignal()
finished = pyqtSignal(int, str)
outputAdded = pyqtSignal(str)
timeStepUpdated = pyqtSignal(int)
error = pyqtSignal(int, str)
def __init__(self, **kwds):
super(JobRunner, self).__init__(**kwds)
self.process = QProcess(self)
self.process.setProcessChannelMode(QProcess.MergedChannels)
self.process.readyReadStandardOutput.connect(self._readOutput)
self.process.finished.connect(self._jobFinished)
self.process.started.connect(self.started)
self.process.error.connect(self._error)
self._error_map = { QProcess.FailedToStart: "Failed to start",
QProcess.Crashed: "Crashed",
QProcess.Timedout: "Timedout",
QProcess.WriteError: "Write error",
QProcess.ReadError: "Read error",
QProcess.UnknownError: "Unknown error",
}
self.killed = False
self.setup()
def run(self, cmd, args):
"""
Start the command.
Arguments:
cmd: The command to run
args: A list of string arguments
"""
self.killed = False
self._sendMessage("Running command: %s %s" % (cmd, ' '.join(args)))
self._sendMessage("Working directory: %s" % os.getcwd())
self.process.start(cmd, args)
self.process.waitForStarted()
def _sendMessage(self, msg):
mooseutils.mooseMessage(msg, color="MAGENTA")
self.outputAdded.emit('<span style="color:magenta;">%s</span>' % msg)
@pyqtSlot(QProcess.ProcessError)
def _error(self, err):
"""
Slot called when the QProcess encounters an error.
Inputs:
err: One of the QProcess.ProcessError enums
"""
if not self.killed:
msg = self._error_map.get(err, "Unknown error")
self.error.emit(int(err), msg)
mooseutils.mooseMessage(msg, color="RED")
self.outputAdded.emit(msg)
@pyqtSlot(int, QProcess.ExitStatus)
def _jobFinished(self, code, status):
"""
Slot called when the QProcess is finished.
Inputs:
code: Exit code of the process.
status: QProcess.ExitStatus
"""
exit_status = "Finished"
if status != QProcess.NormalExit:
if self.killed:
exit_status = "Killed by user"
else:
exit_status = "Crashed"
self.finished.emit(code, exit_status)
self._sendMessage("%s: Exit code: %s" % (exit_status, code))
def kill(self):
"""
Kills the QProcess
"""
self.killed = True
mooseutils.mooseMessage("Killing")
self.process.terminate()
self.process.waitForFinished(1000)
if self.isRunning():
mooseutils.mooseMessage("Failed to terminate job cleanly. Doing a hard kill.")
self.process.kill()
self.process.waitForFinished()
@pyqtSlot()
def _readOutput(self):
"""
Slot called when the QProcess produces output.
"""
lines = []
while self.process.canReadLine():
tmp = self.process.readLine().data().decode("utf-8").rstrip()
lines.append(TerminalUtils.terminalOutputToHtml(tmp))
match = re.search(r'Time\sStep\s*([0-9]{1,})', tmp)
if match:
ts = int(match.group(1))
self.timeStepUpdated.emit(ts)
output = '<pre style="display: inline; margin: 0;">%s</pre>' % '\n'.join(lines)
self.outputAdded.emit(output)
def isRunning(self):
return self.process.state() == QProcess.Running
|
Chuban/moose
|
python/peacock/Execute/JobRunner.py
|
Python
|
lgpl-2.1
| 4,549
|
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a tuple in this format:
(view_function, function_args, function_kwargs)
"""
from __future__ import unicode_literals
import re
from threading import local
from django.http import Http404
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import memoize, lazy
from django.utils.http import urlquote
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils import six
from django.utils.translation import get_language
_resolver_cache = {} # Maps URLconf modules to RegexURLResolver instances.
_ns_resolver_cache = {} # Maps namespaces to RegexURLResolver instances.
_callable_cache = {} # Maps view and url pattern names to their view functions.
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_name=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.app_name = app_name
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
if not url_name:
if not hasattr(func, '__name__'):
# An instance of a callable class
url_name = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function
url_name = '.'.join([func.__module__, func.__name__])
self.url_name = url_name
@property
def namespace(self):
return ':'.join(self.namespaces)
@property
def view_name(self):
return ':'.join([ x for x in [ self.namespace, self.url_name ] if x ])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name='%s', app_name='%s', namespace='%s')" % (
self.func, self.args, self.kwargs, self.url_name, self.app_name, self.namespace)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
# Don't make this raise an error when used in a template.
silent_variable_failure = True
def get_callable(lookup_view, can_fail=False):
"""
Convert a string version of a function name to the callable object.
If the lookup_view is not an import path, it is assumed to be a URL pattern
label and the original string is returned.
If can_fail is True, lookup_view might be a URL pattern label, so errors
during the import fail and the string is returned.
"""
if not callable(lookup_view):
mod_name, func_name = get_mod_func(lookup_view)
if func_name == '':
return lookup_view
try:
mod = import_module(mod_name)
except ImportError:
parentmod, submod = get_mod_func(mod_name)
if (not can_fail and submod != '' and
not module_has_submodule(import_module(parentmod), submod)):
raise ViewDoesNotExist(
"Could not import %s. Parent module %s does not exist." %
(lookup_view, mod_name))
if not can_fail:
raise
else:
try:
lookup_view = getattr(mod, func_name)
if not callable(lookup_view):
raise ViewDoesNotExist(
"Could not import %s.%s. View is not callable." %
(mod_name, func_name))
except AttributeError:
if not can_fail:
raise ViewDoesNotExist(
"Could not import %s. View does not exist in module %s." %
(lookup_view, mod_name))
return lookup_view
get_callable = memoize(get_callable, _callable_cache, 1)
def get_resolver(urlconf):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
get_resolver = memoize(get_resolver, _resolver_cache, 1)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern,
resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
get_ns_resolver = memoize(get_ns_resolver, _ns_resolver_cache, 2)
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot+1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or not hasattr(self, '_callback_str'):
return
self._callback_str = prefix + '.' + self._callback_str
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@property
def callback(self):
if self._callback is not None:
return self._callback
self._callback = get_callable(self._callback_str)
return self._callback
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is a string representing the module containing URLconfs.
self.urlconf_name = urlconf_name
if not isinstance(urlconf_name, six.string_types):
self._urlconf_module = self.urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return str('<%s %s (%s:%s) %s>') % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent = normalize(pattern.regex.pattern)
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = []
for piece, p_args in parent:
new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
lookups.appendlist(name, (new_matches, p_pattern + pat, dict(defaults, **pattern.default_kwargs)))
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def resolve(self, path):
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([[pattern] + t for t in sub_tried])
else:
tried.append([pattern])
else:
if sub_match:
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
return ResolverMatch(sub_match.func, sub_match.args, sub_match_dict, sub_match.url_name, self.app_name or sub_match.app_name, [self.namespace] + sub_match.namespaces)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path' : path})
@property
def urlconf_module(self):
try:
return self._urlconf_module
except AttributeError:
self._urlconf_module = import_module(self.urlconf_name)
return self._urlconf_module
@property
def url_patterns(self):
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
raise ImproperlyConfigured("The included urlconf %s doesn't have any patterns in it" % self.urlconf_name)
return patterns
def _resolve_special(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use default
# Lazy import, since django.urls imports this file
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def resolve403(self):
return self._resolve_special('403')
def resolve404(self):
return self._resolve_special('404')
def resolve500(self):
return self._resolve_special('500')
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [force_text(v) for v in args]
text_kwargs = dict((k, force_text(v)) for (k, v) in kwargs.items())
try:
lookup_view = get_callable(lookup_view, True)
except (ImportError, AttributeError) as e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
possibilities = self.reverse_dict.getlist(lookup_view)
prefix_norm, prefix_args = normalize(urlquote(_prefix))[0]
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params) + len(prefix_args):
continue
candidate_subs = dict(zip(prefix_args + params, text_args))
else:
if set(kwargs.keys()) | set(defaults.keys()) != set(params) | set(defaults.keys()) | set(prefix_args):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = prefix_norm.replace('%', '%%') + result
if re.search('^%s%s' % (prefix_norm, pattern), candidate_pat % candidate_subs, re.UNICODE):
candidate_subs = dict((k, urlquote(v)) for (k, v) in candidate_subs.items())
return candidate_pat % candidate_subs
# lookup_view can be URL label, or dotted path, or callable, Any of
# these can be passed in at the top, but callables are not friendly in
# error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found." % (lookup_view_s, args, kwargs))
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace)
@property
def regex(self):
language_code = get_language()
if language_code not in self._regex_dict:
regex_compiled = re.compile('^%s/' % language_code, re.UNICODE)
self._regex_dict[language_code] = regex_compiled
return self._regex_dict[language_code]
def resolve(path, urlconf=None):
if urlconf is None:
urlconf = get_urlconf()
return get_resolver(urlconf).resolve(path)
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None):
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
if prefix is None:
prefix = get_script_prefix()
if not isinstance(viewname, six.string_types):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
resolved_path = []
ns_pattern = ''
while path:
ns = path.pop()
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_app and current_app in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_app
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'" %
(key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" %
key)
if ns_pattern:
resolver = get_ns_resolver(ns_pattern, resolver)
return iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs))
reverse_lazy = lazy(reverse, str)
def clear_url_caches():
global _resolver_cache
global _ns_resolver_cache
global _callable_cache
_resolver_cache.clear()
_ns_resolver_cache.clear()
_callable_cache.clear()
def set_script_prefix(prefix):
"""
Sets the script prefix for the current thread.
"""
if not prefix.endswith('/'):
prefix += '/'
_prefixes.value = prefix
def get_script_prefix():
"""
Returns the currently active script prefix. Useful for client code that
wishes to construct their own URLs manually (although accessing the request
instance is normally going to be a lot cleaner).
"""
return getattr(_prefixes, "value", '/')
def clear_script_prefix():
"""
Unsets the script prefix for the current thread.
"""
try:
del _prefixes.value
except AttributeError:
pass
def set_urlconf(urlconf_name):
"""
Sets the URLconf for the current thread (overriding the default one in
settings). Set to None to revert back to the default.
"""
if urlconf_name:
_urlconfs.value = urlconf_name
else:
if hasattr(_urlconfs, "value"):
del _urlconfs.value
def get_urlconf(default=None):
"""
Returns the root URLconf to use for the current thread if it has been
changed from the default one.
"""
return getattr(_urlconfs, "value", default)
def is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
|
postrational/django
|
django/core/urlresolvers.py
|
Python
|
bsd-3-clause
| 22,114
|
import re
import logging
from functools import wraps
from flask import render_template
from flask.ext.login import current_user
logger = logging.getLogger('PRG')
def requires_roles(*roles):
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
for rol in current_user.roles:
if rol.descripcion in roles:
return f(*args, **kwargs)
break
return render_template("index.html")
return wrapped
return wrapper
def sorted_nicely( l ):
""" Sort the given iterable in the way that humans expect."""
# convert = lambda text: int(text.numero) if text.numero.isdigit() else text.numero
# alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
# return sorted(l, key = alphanum_key)
return l
|
matibarriento/pririgardus
|
Pririgardus/helpers.py
|
Python
|
gpl-2.0
| 845
|
# Copyright (C) 2016-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from perftest import perftest
class SkipCommand (perftest.TestCaseWithBasicMeasurements):
def __init__(self, name, step):
super (SkipCommand, self).__init__ (name)
self.step = step
def warm_up(self):
for _ in range(0, 10):
gdb.execute("step", False, True)
def _run(self, r):
for _ in range(0, r):
gdb.execute("step", False, True)
def execute_test(self):
for i in range(1, 5):
func = lambda: self._run(i * self.step)
self.measure.measure(func, i * self.step)
|
mattstock/binutils-bexkat1
|
gdb/testsuite/gdb.perf/skip-command.py
|
Python
|
gpl-2.0
| 1,259
|
# coding=utf8
# Copyright © 2015-2017 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
import ambari_helpers as helpers
from resource_management import *
class Master(Script):
def install(self, env):
print('Install the CDAP Master')
import params
# Add repository file
helpers.add_repo(
params.files_dir + params.repo_file,
params.os_repo_dir
)
# Install any global packages
self.install_packages(env)
# Workaround for CDAP-3961
helpers.package('cdap-hbase-compat-1.1')
# Install package
helpers.package('cdap-master')
self.configure(env)
def start(self, env, upgrade_type=None):
print('Start the CDAP Master')
import params
import status_params
env.set_params(params)
self.configure(env)
helpers.create_hdfs_dir(params.hdfs_namespace, params.cdap_hdfs_user, 775)
# Create user's HDFS home
helpers.create_hdfs_dir('/user/' + params.cdap_user, params.cdap_user, 775)
if params.cdap_hdfs_user != params.cdap_user:
helpers.create_hdfs_dir('/user/' + params.cdap_hdfs_user, params.cdap_hdfs_user, 775)
# Hack to work around CDAP-1967
self.remove_jackson(env)
daemon_cmd = format('/opt/cdap/master/bin/cdap master start')
no_op_test = format('ls {status_params.cdap_master_pid_file} >/dev/null 2>&1 && ps -p $(<{status_params.cdap_master_pid_file}) >/dev/null 2>&1')
Execute(
daemon_cmd,
user=params.cdap_user,
not_if=no_op_test
)
def stop(self, env, upgrade_type=None):
print('Stop the CDAP Master')
import status_params
daemon_cmd = format('service cdap-master stop')
no_op_test = format('ls {status_params.cdap_master_pid_file} >/dev/null 2>&1 && ps -p $(<{status_params.cdap_master_pid_file}) >/dev/null 2>&1')
Execute(
daemon_cmd,
only_if=no_op_test
)
def status(self, env):
import status_params
check_process_status(status_params.cdap_master_pid_file)
def configure(self, env):
print('Configure the CDAP Master')
import params
env.set_params(params)
helpers.cdap_config('master')
def upgrade(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.UpgradeTool',
label='CDAP Upgrade Tool',
arguments='upgrade force'
)
def upgrade_hbase(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.UpgradeTool',
label='CDAP HBase Coprocessor Upgrade Tool',
arguments='upgrade_hbase force'
)
def postupgrade(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.flow.FlowQueuePendingCorrector',
label='CDAP Post-Upgrade Tool'
)
def queue_debugger(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.SimpleHBaseQueueDebugger',
label='CDAP Queue Debugger Tool'
)
def jobqueue_debugger(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.JobQueueDebugger',
label='CDAP Job Queue Debugger Tool'
)
def run_class(self, env, classname, label=None, arguments=''):
if label is None:
label = classname
print('Running: ' + label)
import params
cmd = format("/opt/cdap/master/bin/cdap run %s %s" % (classname, arguments))
Execute(
cmd,
user=params.cdap_user
)
def remove_jackson(self, env):
jackson_check = format('ls -1 /opt/cdap/master/lib/org.codehaus.jackson* 2>/dev/null')
Execute(
'rm -f /opt/cdap/master/lib/org.codehaus.jackson.jackson-*',
not_if=jackson_check
)
if __name__ == "__main__":
Master().execute()
|
cdapio/cdap-ambari-service
|
src/main/resources/common-services/CDAP/6.0.0/package/scripts/master.py
|
Python
|
apache-2.0
| 4,572
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ContractType(models.Model):
_name = 'hr.contract.type'
_description = 'Contract Type'
name = fields.Char(required=True)
class HrPayrollStructureType(models.Model):
_name = 'hr.payroll.structure.type'
_description = 'Contract Type'
name = fields.Char('Contract Type')
default_resource_calendar_id = fields.Many2one(
'resource.calendar', 'Default Working Hours',
default=lambda self: self.env.company.resource_calendar_id)
country_id = fields.Many2one('res.country', string='Country', default=lambda self: self.env.company.country_id)
|
jeremiahyan/odoo
|
addons/hr_contract/models/hr_contract_type.py
|
Python
|
gpl-3.0
| 724
|
# -*- coding:utf-8 -*-
# !/usr/bin/env python
import os
import time
from ruia import Spider, Item, TextField, AttrField
from ruia_ua import middleware
os.environ['MODE'] = 'PRO'
from owllook.database.mongodb import MotorBaseOld
class QidianNovelInfoItem(Item):
"""
定义继承自item的Item类
"""
novel_name = TextField(css_select='.book-info>h1>em')
author = TextField(css_select='a.writer')
# 当提取的值是属性的时候,要定义AttrField
cover = AttrField(css_select='a#bookImg>img', attr='src')
abstract = TextField(css_select='div.book-intro>p')
status = TextField(css_select='p.tag>span.blue')
novels_type = TextField(css_select='p.tag>a.red')
latest_chapter = TextField(css_select='li.update>div.detail>p.cf>a')
latest_chapter_time = TextField(css_select='div.detail>p.cf>em')
async def clean_cover(self, cover):
return 'http:' + cover
async def clean_status(self, status):
"""
当目标值的对象只有一个,默认将值提取出来,否则返回list,可以在这里定义一个函数进行循环提取
:param ele_tag:
:return:
"""
return '#'.join([i.text for i in status])
async def clean_novels_type(self, novels_type):
return '#'.join([i.text for i in novels_type])
async def clean_latest_chapter_time(self, latest_chapter_time):
return latest_chapter_time.replace(u'今天', str(time.strftime("%Y-%m-%d ", time.localtime()))).replace(u'昨日', str(
time.strftime("%Y-%m-%d ", time.localtime(time.time() - 24 * 60 * 60))))
class QidianNovelInfoSpider(Spider):
request_config = {
'RETRIES': 3,
'TIMEOUT': 10
}
async def parse(self, res):
motor_db = MotorBaseOld().db
item = await QidianNovelInfoItem.get_item(html=res.html)
item_data = {
'novel_name': item.novel_name,
'author': item.author,
'cover': item.cover,
'abstract': item.abstract,
'status': item.status,
'novels_type': item.novels_type,
'latest_chapter': item.latest_chapter,
'latest_chapter_time': item.latest_chapter_time,
'spider': 'qidian',
'target_url': res.url,
'updated_at': time.strftime("%Y-%m-%d %X", time.localtime())
}
print('获取 {} 小说信息成功'.format(item.novel_name))
await motor_db.all_novels_info.update_one(
{'novel_name': item_data['novel_name'], 'spider': item_data['spider']},
{'$set': item_data},
upsert=True)
if __name__ == '__main__':
import random
# 其他多item示例:https://gist.github.com/howie6879/3ef4168159e5047d42d86cb7fb706a2f
QidianNovelInfoSpider.start_urls = ['https://book.qidian.com/info/1004608738',
'https://book.qidian.com/info/3602691',
'https://book.qidian.com/info/3347595', 'https://book.qidian.com/info/1887208']
# QidianNovelInfoSpider.start_urls = all_urls
QidianNovelInfoSpider.start(middleware=middleware)
|
howie6879/novels-search
|
owllook/spiders/qidian_novel_info.py
|
Python
|
apache-2.0
| 3,167
|
"""Scraper for the 1st District Court of Appeals
CourtID: ohio
Court Short Name: Ohio
Author: Andrei Chelaru
"""
from juriscraper.opinions.united_states.state import ohio
class Site(ohio.Site):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.court_index = 10
self.url = self.make_url(self.court_index, self.year)
|
brianwc/juriscraper
|
opinions/united_states/state/ohioctapp_10.py
|
Python
|
bsd-2-clause
| 390
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base minibatch sampler module.
The job of the minibatch_sampler is to subsample a minibatch based on some
criterion.
The main function call is:
subsample(indicator, batch_size, **params).
Indicator is a 1d boolean tensor where True denotes which examples can be
sampled. It returns a boolean indicator where True denotes an example has been
sampled..
Subclasses should implement the Subsample function and can make use of the
@staticmethod SubsampleIndicator.
This is originally implemented in TensorFlow Object Detection API.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow_models.mlperf.models.rough.mask_rcnn.object_detection import ops
class MinibatchSampler(object):
"""Abstract base class for subsampling minibatches."""
__metaclass__ = ABCMeta
def __init__(self):
"""Constructs a minibatch sampler."""
pass
@abstractmethod
def subsample(self, indicator, batch_size, **params):
"""Returns subsample of entries in indicator.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size.
**params: additional keyword arguments for specific implementations of
the MinibatchSampler.
Returns:
sample_indicator: boolean tensor of shape [N] whose True entries have been
sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size
"""
pass
@staticmethod
def subsample_indicator(indicator, num_samples):
"""Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Args:
indicator: a 1-dimensional boolean tensor indicating which elements
are allowed to be sampled and which are not.
num_samples: int32 scalar tensor
Returns:
a boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random_shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indices,
tf.shape(indicator)[0])
return tf.equal(selected_indicator, 1)
|
mlperf/training_results_v0.7
|
Google/benchmarks/maskrcnn/implementations/maskrcnn-research-TF-tpu-v4-16/object_detection/minibatch_sampler.py
|
Python
|
apache-2.0
| 3,226
|
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib
# fix can't draw figure with docker
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# candidate colors
LINE_STYLE_CANDIDATE = ['b-o', 'r-o', 'k-o', 'm-o', 'c-o', 'g-o', 'y-o',
'b-s', 'r-s', 'k-s', 'm-s', 'c-s', 'g-s', 'y-s']
def draw_line_chart(file_name, title, x_label, y_label, data_list):
"""
draw line chart and save to file.
:param file_name: abs/relative file name to save chart figure
:param title: chart title
:param x_label: x-axis label
:param y_label: y-axis label
:param data_list: a list of line data.
each line is a dict of ("x-axis": list, "y-axis": list, "label": string)
"""
plt.figure(figsize=(12, 6))
plt.grid(True)
for i, data in enumerate(data_list):
plt.plot(data["x-axis"], data["y-axis"], LINE_STYLE_CANDIDATE[i], label=data["label"])
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(fontsize=12)
plt.title(title)
plt.tight_layout(pad=3, w_pad=3, h_pad=3)
plt.savefig(file_name)
plt.close()
|
www220/esp-idf
|
tools/tiny-test-fw/Utility/LineChart.py
|
Python
|
apache-2.0
| 1,681
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Bus scheduling in Google CP Solver.
Problem from Taha "Introduction to Operations Research", page 58.
This is a slightly more general model than Taha's.
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/bus_scheduling.mzn
* Comet : http://www.hakank.org/comet/bus_schedule.co
* ECLiPSe : http://www.hakank.org/eclipse/bus_schedule.ecl
* Gecode : http://www.hakank.org/gecode/bus_schedule.cpp
* Tailor/Essence' : http://www.hakank.org/tailor/bus_schedule.eprime
* SICStus: http://hakank.org/sicstus/bus_schedule.pl
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main(num_buses_check=0):
# Create the solver.
solver = pywrapcp.Solver("Bus scheduling")
# data
time_slots = 6
demands = [8, 10, 7, 12, 4, 4]
max_num = sum(demands)
# declare variables
x = [solver.IntVar(0, max_num, "x%i" % i) for i in range(time_slots)]
num_buses = solver.IntVar(0, max_num, "num_buses")
#
# constraints
#
solver.Add(num_buses == solver.Sum(x))
# Meet the demands for this and the next time slot
for i in range(time_slots - 1):
solver.Add(x[i] + x[i + 1] >= demands[i])
# The demand "around the clock"
solver.Add(x[time_slots - 1] + x[0] == demands[time_slots - 1])
if num_buses_check > 0:
solver.Add(num_buses == num_buses_check)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.Add(num_buses)
collector = solver.AllSolutionCollector(solution)
cargs = [collector]
# objective
if num_buses_check == 0:
objective = solver.Minimize(num_buses, 1)
cargs.extend([objective])
solver.Solve(solver.Phase(x,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE),
cargs)
num_solutions = collector.SolutionCount()
num_buses_check_value = 0
for s in range(num_solutions):
print "x:", [collector.Value(s, x[i]) for i in range(len(x))],
num_buses_check_value = collector.Value(s, num_buses)
print " num_buses:", num_buses_check_value
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
print
if num_buses_check == 0:
return num_buses_check_value
if __name__ == "__main__":
print "Check for minimun number of buses"
num_buses_check = main()
print "... got ", num_buses_check, "buses"
print "All solutions:"
main(num_buses_check)
|
pombredanne/or-tools
|
examples/python/bus_schedule.py
|
Python
|
apache-2.0
| 3,281
|
from datetime import datetime
from itertools import product
import numpy as np
import pytest
from xarray import (
DataArray,
Dataset,
auto_combine,
combine_by_coords,
combine_nested,
concat,
)
from xarray.core import dtypes
from xarray.core.combine import (
_check_shape_tile_ids,
_combine_all_along_first_dim,
_combine_nd,
_infer_concat_order_from_coords,
_infer_concat_order_from_positions,
_new_tile_id,
)
from . import assert_equal, assert_identical, raises_regex, requires_cftime
from .test_dataset import create_test_data
def assert_combined_tile_ids_equal(dict1, dict2):
assert len(dict1) == len(dict2)
for k, v in dict1.items():
assert k in dict2.keys()
assert_equal(dict1[k], dict2[k])
class TestTileIDsFromNestedList:
def test_1d(self):
ds = create_test_data
input = [ds(0), ds(1)]
expected = {(0,): ds(0), (1,): ds(1)}
actual = _infer_concat_order_from_positions(input)
assert_combined_tile_ids_equal(expected, actual)
def test_2d(self):
ds = create_test_data
input = [[ds(0), ds(1)], [ds(2), ds(3)], [ds(4), ds(5)]]
expected = {
(0, 0): ds(0),
(0, 1): ds(1),
(1, 0): ds(2),
(1, 1): ds(3),
(2, 0): ds(4),
(2, 1): ds(5),
}
actual = _infer_concat_order_from_positions(input)
assert_combined_tile_ids_equal(expected, actual)
def test_3d(self):
ds = create_test_data
input = [
[[ds(0), ds(1)], [ds(2), ds(3)], [ds(4), ds(5)]],
[[ds(6), ds(7)], [ds(8), ds(9)], [ds(10), ds(11)]],
]
expected = {
(0, 0, 0): ds(0),
(0, 0, 1): ds(1),
(0, 1, 0): ds(2),
(0, 1, 1): ds(3),
(0, 2, 0): ds(4),
(0, 2, 1): ds(5),
(1, 0, 0): ds(6),
(1, 0, 1): ds(7),
(1, 1, 0): ds(8),
(1, 1, 1): ds(9),
(1, 2, 0): ds(10),
(1, 2, 1): ds(11),
}
actual = _infer_concat_order_from_positions(input)
assert_combined_tile_ids_equal(expected, actual)
def test_single_dataset(self):
ds = create_test_data(0)
input = [ds]
expected = {(0,): ds}
actual = _infer_concat_order_from_positions(input)
assert_combined_tile_ids_equal(expected, actual)
def test_redundant_nesting(self):
ds = create_test_data
input = [[ds(0)], [ds(1)]]
expected = {(0, 0): ds(0), (1, 0): ds(1)}
actual = _infer_concat_order_from_positions(input)
assert_combined_tile_ids_equal(expected, actual)
def test_ignore_empty_list(self):
ds = create_test_data(0)
input = [ds, []]
expected = {(0,): ds}
actual = _infer_concat_order_from_positions(input)
assert_combined_tile_ids_equal(expected, actual)
def test_uneven_depth_input(self):
# Auto_combine won't work on ragged input
# but this is just to increase test coverage
ds = create_test_data
input = [ds(0), [ds(1), ds(2)]]
expected = {(0,): ds(0), (1, 0): ds(1), (1, 1): ds(2)}
actual = _infer_concat_order_from_positions(input)
assert_combined_tile_ids_equal(expected, actual)
def test_uneven_length_input(self):
# Auto_combine won't work on ragged input
# but this is just to increase test coverage
ds = create_test_data
input = [[ds(0)], [ds(1), ds(2)]]
expected = {(0, 0): ds(0), (1, 0): ds(1), (1, 1): ds(2)}
actual = _infer_concat_order_from_positions(input)
assert_combined_tile_ids_equal(expected, actual)
def test_infer_from_datasets(self):
ds = create_test_data
input = [ds(0), ds(1)]
expected = {(0,): ds(0), (1,): ds(1)}
actual = _infer_concat_order_from_positions(input)
assert_combined_tile_ids_equal(expected, actual)
class TestTileIDsFromCoords:
def test_1d(self):
ds0 = Dataset({"x": [0, 1]})
ds1 = Dataset({"x": [2, 3]})
expected = {(0,): ds0, (1,): ds1}
actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0])
assert_combined_tile_ids_equal(expected, actual)
assert concat_dims == ["x"]
def test_2d(self):
ds0 = Dataset({"x": [0, 1], "y": [10, 20, 30]})
ds1 = Dataset({"x": [2, 3], "y": [10, 20, 30]})
ds2 = Dataset({"x": [0, 1], "y": [40, 50, 60]})
ds3 = Dataset({"x": [2, 3], "y": [40, 50, 60]})
ds4 = Dataset({"x": [0, 1], "y": [70, 80, 90]})
ds5 = Dataset({"x": [2, 3], "y": [70, 80, 90]})
expected = {
(0, 0): ds0,
(1, 0): ds1,
(0, 1): ds2,
(1, 1): ds3,
(0, 2): ds4,
(1, 2): ds5,
}
actual, concat_dims = _infer_concat_order_from_coords(
[ds1, ds0, ds3, ds5, ds2, ds4]
)
assert_combined_tile_ids_equal(expected, actual)
assert concat_dims == ["x", "y"]
def test_no_dimension_coords(self):
ds0 = Dataset({"foo": ("x", [0, 1])})
ds1 = Dataset({"foo": ("x", [2, 3])})
with raises_regex(ValueError, "Could not find any dimension"):
_infer_concat_order_from_coords([ds1, ds0])
def test_coord_not_monotonic(self):
ds0 = Dataset({"x": [0, 1]})
ds1 = Dataset({"x": [3, 2]})
with raises_regex(
ValueError,
"Coordinate variable x is neither " "monotonically increasing nor",
):
_infer_concat_order_from_coords([ds1, ds0])
def test_coord_monotonically_decreasing(self):
ds0 = Dataset({"x": [3, 2]})
ds1 = Dataset({"x": [1, 0]})
expected = {(0,): ds0, (1,): ds1}
actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0])
assert_combined_tile_ids_equal(expected, actual)
assert concat_dims == ["x"]
def test_no_concatenation_needed(self):
ds = Dataset({"foo": ("x", [0, 1])})
expected = {(): ds}
actual, concat_dims = _infer_concat_order_from_coords([ds])
assert_combined_tile_ids_equal(expected, actual)
assert concat_dims == []
def test_2d_plus_bystander_dim(self):
ds0 = Dataset({"x": [0, 1], "y": [10, 20, 30], "t": [0.1, 0.2]})
ds1 = Dataset({"x": [2, 3], "y": [10, 20, 30], "t": [0.1, 0.2]})
ds2 = Dataset({"x": [0, 1], "y": [40, 50, 60], "t": [0.1, 0.2]})
ds3 = Dataset({"x": [2, 3], "y": [40, 50, 60], "t": [0.1, 0.2]})
expected = {(0, 0): ds0, (1, 0): ds1, (0, 1): ds2, (1, 1): ds3}
actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0, ds3, ds2])
assert_combined_tile_ids_equal(expected, actual)
assert concat_dims == ["x", "y"]
def test_string_coords(self):
ds0 = Dataset({"person": ["Alice", "Bob"]})
ds1 = Dataset({"person": ["Caroline", "Daniel"]})
expected = {(0,): ds0, (1,): ds1}
actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0])
assert_combined_tile_ids_equal(expected, actual)
assert concat_dims == ["person"]
# Decided against natural sorting of string coords GH #2616
def test_lexicographic_sort_string_coords(self):
ds0 = Dataset({"simulation": ["run8", "run9"]})
ds1 = Dataset({"simulation": ["run10", "run11"]})
expected = {(0,): ds1, (1,): ds0}
actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0])
assert_combined_tile_ids_equal(expected, actual)
assert concat_dims == ["simulation"]
def test_datetime_coords(self):
ds0 = Dataset({"time": [datetime(2000, 3, 6), datetime(2001, 3, 7)]})
ds1 = Dataset({"time": [datetime(1999, 1, 1), datetime(1999, 2, 4)]})
expected = {(0,): ds1, (1,): ds0}
actual, concat_dims = _infer_concat_order_from_coords([ds0, ds1])
assert_combined_tile_ids_equal(expected, actual)
assert concat_dims == ["time"]
@pytest.fixture(scope="module")
def create_combined_ids():
return _create_combined_ids
def _create_combined_ids(shape):
tile_ids = _create_tile_ids(shape)
nums = range(len(tile_ids))
return {tile_id: create_test_data(num) for tile_id, num in zip(tile_ids, nums)}
def _create_tile_ids(shape):
tile_ids = product(*(range(i) for i in shape))
return list(tile_ids)
class TestNewTileIDs:
@pytest.mark.parametrize(
"old_id, new_id",
[((3, 0, 1), (0, 1)), ((0, 0), (0,)), ((1,), ()), ((0,), ()), ((1, 0), (0,))],
)
def test_new_tile_id(self, old_id, new_id):
ds = create_test_data
assert _new_tile_id((old_id, ds)) == new_id
def test_get_new_tile_ids(self, create_combined_ids):
shape = (1, 2, 3)
combined_ids = create_combined_ids(shape)
expected_tile_ids = sorted(combined_ids.keys())
actual_tile_ids = _create_tile_ids(shape)
assert expected_tile_ids == actual_tile_ids
class TestCombineND:
@pytest.mark.parametrize("concat_dim", ["dim1", "new_dim"])
def test_concat_once(self, create_combined_ids, concat_dim):
shape = (2,)
combined_ids = create_combined_ids(shape)
ds = create_test_data
result = _combine_all_along_first_dim(
combined_ids,
dim=concat_dim,
data_vars="all",
coords="different",
compat="no_conflicts",
)
expected_ds = concat([ds(0), ds(1)], dim=concat_dim)
assert_combined_tile_ids_equal(result, {(): expected_ds})
def test_concat_only_first_dim(self, create_combined_ids):
shape = (2, 3)
combined_ids = create_combined_ids(shape)
result = _combine_all_along_first_dim(
combined_ids,
dim="dim1",
data_vars="all",
coords="different",
compat="no_conflicts",
)
ds = create_test_data
partway1 = concat([ds(0), ds(3)], dim="dim1")
partway2 = concat([ds(1), ds(4)], dim="dim1")
partway3 = concat([ds(2), ds(5)], dim="dim1")
expected_datasets = [partway1, partway2, partway3]
expected = {(i,): ds for i, ds in enumerate(expected_datasets)}
assert_combined_tile_ids_equal(result, expected)
@pytest.mark.parametrize("concat_dim", ["dim1", "new_dim"])
def test_concat_twice(self, create_combined_ids, concat_dim):
shape = (2, 3)
combined_ids = create_combined_ids(shape)
result = _combine_nd(combined_ids, concat_dims=["dim1", concat_dim])
ds = create_test_data
partway1 = concat([ds(0), ds(3)], dim="dim1")
partway2 = concat([ds(1), ds(4)], dim="dim1")
partway3 = concat([ds(2), ds(5)], dim="dim1")
expected = concat([partway1, partway2, partway3], dim=concat_dim)
assert_equal(result, expected)
class TestCheckShapeTileIDs:
def test_check_depths(self):
ds = create_test_data(0)
combined_tile_ids = {(0,): ds, (0, 1): ds}
with raises_regex(ValueError, "sub-lists do not have consistent depths"):
_check_shape_tile_ids(combined_tile_ids)
def test_check_lengths(self):
ds = create_test_data(0)
combined_tile_ids = {(0, 0): ds, (0, 1): ds, (0, 2): ds, (1, 0): ds, (1, 1): ds}
with raises_regex(ValueError, "sub-lists do not have consistent lengths"):
_check_shape_tile_ids(combined_tile_ids)
class TestNestedCombine:
def test_nested_concat(self):
objs = [Dataset({"x": [0]}), Dataset({"x": [1]})]
expected = Dataset({"x": [0, 1]})
actual = combine_nested(objs, concat_dim="x")
assert_identical(expected, actual)
actual = combine_nested(objs, concat_dim=["x"])
assert_identical(expected, actual)
actual = combine_nested([actual], concat_dim=None)
assert_identical(expected, actual)
actual = combine_nested([actual], concat_dim="x")
assert_identical(expected, actual)
objs = [Dataset({"x": [0, 1]}), Dataset({"x": [2]})]
actual = combine_nested(objs, concat_dim="x")
expected = Dataset({"x": [0, 1, 2]})
assert_identical(expected, actual)
# ensure combine_nested handles non-sorted variables
objs = [
Dataset({"x": ("a", [0]), "y": ("a", [0])}),
Dataset({"y": ("a", [1]), "x": ("a", [1])}),
]
actual = combine_nested(objs, concat_dim="a")
expected = Dataset({"x": ("a", [0, 1]), "y": ("a", [0, 1])})
assert_identical(expected, actual)
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1]})]
actual = combine_nested(objs, concat_dim="x")
expected = Dataset({"x": [0, 1], "y": [0]})
assert_identical(expected, actual)
@pytest.mark.parametrize(
"join, expected",
[
("outer", Dataset({"x": [0, 1], "y": [0, 1]})),
("inner", Dataset({"x": [0, 1], "y": []})),
("left", Dataset({"x": [0, 1], "y": [0]})),
("right", Dataset({"x": [0, 1], "y": [1]})),
],
)
def test_combine_nested_join(self, join, expected):
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})]
actual = combine_nested(objs, concat_dim="x", join=join)
assert_identical(expected, actual)
def test_combine_nested_join_exact(self):
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})]
with raises_regex(ValueError, "indexes along dimension"):
combine_nested(objs, concat_dim="x", join="exact")
def test_empty_input(self):
assert_identical(Dataset(), combine_nested([], concat_dim="x"))
# Fails because of concat's weird treatment of dimension coords, see #2975
@pytest.mark.xfail
def test_nested_concat_too_many_dims_at_once(self):
objs = [Dataset({"x": [0], "y": [1]}), Dataset({"y": [0], "x": [1]})]
with pytest.raises(ValueError, match="not equal across datasets"):
combine_nested(objs, concat_dim="x", coords="minimal")
def test_nested_concat_along_new_dim(self):
objs = [
Dataset({"a": ("x", [10]), "x": [0]}),
Dataset({"a": ("x", [20]), "x": [0]}),
]
expected = Dataset({"a": (("t", "x"), [[10], [20]]), "x": [0]})
actual = combine_nested(objs, concat_dim="t")
assert_identical(expected, actual)
# Same but with a DataArray as new dim, see GH #1988 and #2647
dim = DataArray([100, 150], name="baz", dims="baz")
expected = Dataset(
{"a": (("baz", "x"), [[10], [20]]), "x": [0], "baz": [100, 150]}
)
actual = combine_nested(objs, concat_dim=dim)
assert_identical(expected, actual)
def test_nested_merge(self):
data = Dataset({"x": 0})
actual = combine_nested([data, data, data], concat_dim=None)
assert_identical(data, actual)
ds1 = Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
ds2 = Dataset({"a": ("x", [2, 3]), "x": [1, 2]})
expected = Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]})
actual = combine_nested([ds1, ds2], concat_dim=None)
assert_identical(expected, actual)
actual = combine_nested([ds1, ds2], concat_dim=[None])
assert_identical(expected, actual)
tmp1 = Dataset({"x": 0})
tmp2 = Dataset({"x": np.nan})
actual = combine_nested([tmp1, tmp2], concat_dim=None)
assert_identical(tmp1, actual)
actual = combine_nested([tmp1, tmp2], concat_dim=[None])
assert_identical(tmp1, actual)
# Single object, with a concat_dim explicitly provided
# Test the issue reported in GH #1988
objs = [Dataset({"x": 0, "y": 1})]
dim = DataArray([100], name="baz", dims="baz")
actual = combine_nested(objs, concat_dim=[dim])
expected = Dataset({"x": ("baz", [0]), "y": ("baz", [1])}, {"baz": [100]})
assert_identical(expected, actual)
# Just making sure that auto_combine is doing what is
# expected for non-scalar values, too.
objs = [Dataset({"x": ("z", [0, 1]), "y": ("z", [1, 2])})]
dim = DataArray([100], name="baz", dims="baz")
actual = combine_nested(objs, concat_dim=[dim])
expected = Dataset(
{"x": (("baz", "z"), [[0, 1]]), "y": (("baz", "z"), [[1, 2]])},
{"baz": [100]},
)
assert_identical(expected, actual)
def test_concat_multiple_dims(self):
objs = [
[Dataset({"a": (("x", "y"), [[0]])}), Dataset({"a": (("x", "y"), [[1]])})],
[Dataset({"a": (("x", "y"), [[2]])}), Dataset({"a": (("x", "y"), [[3]])})],
]
actual = combine_nested(objs, concat_dim=["x", "y"])
expected = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])})
assert_identical(expected, actual)
def test_concat_name_symmetry(self):
"""Inspired by the discussion on GH issue #2777"""
da1 = DataArray(name="a", data=[[0]], dims=["x", "y"])
da2 = DataArray(name="b", data=[[1]], dims=["x", "y"])
da3 = DataArray(name="a", data=[[2]], dims=["x", "y"])
da4 = DataArray(name="b", data=[[3]], dims=["x", "y"])
x_first = combine_nested([[da1, da2], [da3, da4]], concat_dim=["x", "y"])
y_first = combine_nested([[da1, da3], [da2, da4]], concat_dim=["y", "x"])
assert_identical(x_first, y_first)
def test_concat_one_dim_merge_another(self):
data = create_test_data()
data1 = data.copy(deep=True)
data2 = data.copy(deep=True)
objs = [
[data1.var1.isel(dim2=slice(4)), data2.var1.isel(dim2=slice(4, 9))],
[data1.var2.isel(dim2=slice(4)), data2.var2.isel(dim2=slice(4, 9))],
]
expected = data[["var1", "var2"]]
actual = combine_nested(objs, concat_dim=[None, "dim2"])
assert expected.identical(actual)
def test_auto_combine_2d(self):
ds = create_test_data
partway1 = concat([ds(0), ds(3)], dim="dim1")
partway2 = concat([ds(1), ds(4)], dim="dim1")
partway3 = concat([ds(2), ds(5)], dim="dim1")
expected = concat([partway1, partway2, partway3], dim="dim2")
datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4), ds(5)]]
result = combine_nested(datasets, concat_dim=["dim1", "dim2"])
assert_equal(result, expected)
def test_auto_combine_2d_combine_attrs_kwarg(self):
ds = create_test_data
partway1 = concat([ds(0), ds(3)], dim="dim1")
partway2 = concat([ds(1), ds(4)], dim="dim1")
partway3 = concat([ds(2), ds(5)], dim="dim1")
expected = concat([partway1, partway2, partway3], dim="dim2")
expected_dict = {}
expected_dict["drop"] = expected.copy(deep=True)
expected_dict["drop"].attrs = {}
expected_dict["no_conflicts"] = expected.copy(deep=True)
expected_dict["no_conflicts"].attrs = {
"a": 1,
"b": 2,
"c": 3,
"d": 4,
"e": 5,
"f": 6,
}
expected_dict["override"] = expected.copy(deep=True)
expected_dict["override"].attrs = {"a": 1}
datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4), ds(5)]]
datasets[0][0].attrs = {"a": 1}
datasets[0][1].attrs = {"a": 1, "b": 2}
datasets[0][2].attrs = {"a": 1, "c": 3}
datasets[1][0].attrs = {"a": 1, "d": 4}
datasets[1][1].attrs = {"a": 1, "e": 5}
datasets[1][2].attrs = {"a": 1, "f": 6}
with raises_regex(ValueError, "combine_attrs='identical'"):
result = combine_nested(
datasets, concat_dim=["dim1", "dim2"], combine_attrs="identical"
)
for combine_attrs in expected_dict:
result = combine_nested(
datasets, concat_dim=["dim1", "dim2"], combine_attrs=combine_attrs
)
assert_identical(result, expected_dict[combine_attrs])
def test_combine_nested_missing_data_new_dim(self):
# Your data includes "time" and "station" dimensions, and each year's
# data has a different set of stations.
datasets = [
Dataset({"a": ("x", [2, 3]), "x": [1, 2]}),
Dataset({"a": ("x", [1, 2]), "x": [0, 1]}),
]
expected = Dataset(
{"a": (("t", "x"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {"x": [0, 1, 2]}
)
actual = combine_nested(datasets, concat_dim="t")
assert_identical(expected, actual)
def test_invalid_hypercube_input(self):
ds = create_test_data
datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4)]]
with raises_regex(ValueError, "sub-lists do not have " "consistent lengths"):
combine_nested(datasets, concat_dim=["dim1", "dim2"])
datasets = [[ds(0), ds(1)], [[ds(3), ds(4)]]]
with raises_regex(ValueError, "sub-lists do not have " "consistent depths"):
combine_nested(datasets, concat_dim=["dim1", "dim2"])
datasets = [[ds(0), ds(1)], [ds(3), ds(4)]]
with raises_regex(ValueError, "concat_dims has length"):
combine_nested(datasets, concat_dim=["dim1"])
def test_merge_one_dim_concat_another(self):
objs = [
[Dataset({"foo": ("x", [0, 1])}), Dataset({"bar": ("x", [10, 20])})],
[Dataset({"foo": ("x", [2, 3])}), Dataset({"bar": ("x", [30, 40])})],
]
expected = Dataset({"foo": ("x", [0, 1, 2, 3]), "bar": ("x", [10, 20, 30, 40])})
actual = combine_nested(objs, concat_dim=["x", None], compat="equals")
assert_identical(expected, actual)
# Proving it works symmetrically
objs = [
[Dataset({"foo": ("x", [0, 1])}), Dataset({"foo": ("x", [2, 3])})],
[Dataset({"bar": ("x", [10, 20])}), Dataset({"bar": ("x", [30, 40])})],
]
actual = combine_nested(objs, concat_dim=[None, "x"], compat="equals")
assert_identical(expected, actual)
def test_combine_concat_over_redundant_nesting(self):
objs = [[Dataset({"x": [0]}), Dataset({"x": [1]})]]
actual = combine_nested(objs, concat_dim=[None, "x"])
expected = Dataset({"x": [0, 1]})
assert_identical(expected, actual)
objs = [[Dataset({"x": [0]})], [Dataset({"x": [1]})]]
actual = combine_nested(objs, concat_dim=["x", None])
expected = Dataset({"x": [0, 1]})
assert_identical(expected, actual)
objs = [[Dataset({"x": [0]})]]
actual = combine_nested(objs, concat_dim=[None, None])
expected = Dataset({"x": [0]})
assert_identical(expected, actual)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_combine_nested_fill_value(self, fill_value):
datasets = [
Dataset({"a": ("x", [2, 3]), "x": [1, 2]}),
Dataset({"a": ("x", [1, 2]), "x": [0, 1]}),
]
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
expected = Dataset(
{"a": (("t", "x"), [[fill_value, 2, 3], [1, 2, fill_value]])},
{"x": [0, 1, 2]},
)
actual = combine_nested(datasets, concat_dim="t", fill_value=fill_value)
assert_identical(expected, actual)
class TestCombineAuto:
def test_combine_by_coords(self):
objs = [Dataset({"x": [0]}), Dataset({"x": [1]})]
actual = combine_by_coords(objs)
expected = Dataset({"x": [0, 1]})
assert_identical(expected, actual)
actual = combine_by_coords([actual])
assert_identical(expected, actual)
objs = [Dataset({"x": [0, 1]}), Dataset({"x": [2]})]
actual = combine_by_coords(objs)
expected = Dataset({"x": [0, 1, 2]})
assert_identical(expected, actual)
# ensure auto_combine handles non-sorted variables
objs = [
Dataset({"x": ("a", [0]), "y": ("a", [0]), "a": [0]}),
Dataset({"x": ("a", [1]), "y": ("a", [1]), "a": [1]}),
]
actual = combine_by_coords(objs)
expected = Dataset({"x": ("a", [0, 1]), "y": ("a", [0, 1]), "a": [0, 1]})
assert_identical(expected, actual)
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})]
actual = combine_by_coords(objs)
expected = Dataset({"x": [0, 1], "y": [0, 1]})
assert_equal(actual, expected)
objs = [Dataset({"x": 0}), Dataset({"x": 1})]
with raises_regex(ValueError, "Could not find any dimension coordinates"):
combine_by_coords(objs)
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [0]})]
with raises_regex(ValueError, "Every dimension needs a coordinate"):
combine_by_coords(objs)
def test_empty_input(self):
assert_identical(Dataset(), combine_by_coords([]))
@pytest.mark.parametrize(
"join, expected",
[
("outer", Dataset({"x": [0, 1], "y": [0, 1]})),
("inner", Dataset({"x": [0, 1], "y": []})),
("left", Dataset({"x": [0, 1], "y": [0]})),
("right", Dataset({"x": [0, 1], "y": [1]})),
],
)
def test_combine_coords_join(self, join, expected):
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})]
actual = combine_nested(objs, concat_dim="x", join=join)
assert_identical(expected, actual)
def test_combine_coords_join_exact(self):
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})]
with raises_regex(ValueError, "indexes along dimension"):
combine_nested(objs, concat_dim="x", join="exact")
@pytest.mark.parametrize(
"combine_attrs, expected",
[
("drop", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={})),
(
"no_conflicts",
Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "b": 2}),
),
("override", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1})),
],
)
def test_combine_coords_combine_attrs(self, combine_attrs, expected):
objs = [
Dataset({"x": [0], "y": [0]}, attrs={"a": 1}),
Dataset({"x": [1], "y": [1]}, attrs={"a": 1, "b": 2}),
]
actual = combine_nested(
objs, concat_dim="x", join="outer", combine_attrs=combine_attrs
)
assert_identical(expected, actual)
if combine_attrs == "no_conflicts":
objs[1].attrs["a"] = 2
with raises_regex(ValueError, "combine_attrs='no_conflicts'"):
actual = combine_nested(
objs, concat_dim="x", join="outer", combine_attrs=combine_attrs
)
def test_combine_coords_combine_attrs_identical(self):
objs = [
Dataset({"x": [0], "y": [0]}, attrs={"a": 1}),
Dataset({"x": [1], "y": [1]}, attrs={"a": 1}),
]
expected = Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1})
actual = combine_nested(
objs, concat_dim="x", join="outer", combine_attrs="identical"
)
assert_identical(expected, actual)
objs[1].attrs["b"] = 2
with raises_regex(ValueError, "combine_attrs='identical'"):
actual = combine_nested(
objs, concat_dim="x", join="outer", combine_attrs="identical"
)
def test_infer_order_from_coords(self):
data = create_test_data()
objs = [data.isel(dim2=slice(4, 9)), data.isel(dim2=slice(4))]
actual = combine_by_coords(objs)
expected = data
assert expected.broadcast_equals(actual)
def test_combine_leaving_bystander_dimensions(self):
# Check non-monotonic bystander dimension coord doesn't raise
# ValueError on combine (https://github.com/pydata/xarray/issues/3150)
ycoord = ["a", "c", "b"]
data = np.random.rand(7, 3)
ds1 = Dataset(
data_vars=dict(data=(["x", "y"], data[:3, :])),
coords=dict(x=[1, 2, 3], y=ycoord),
)
ds2 = Dataset(
data_vars=dict(data=(["x", "y"], data[3:, :])),
coords=dict(x=[4, 5, 6, 7], y=ycoord),
)
expected = Dataset(
data_vars=dict(data=(["x", "y"], data)),
coords=dict(x=[1, 2, 3, 4, 5, 6, 7], y=ycoord),
)
actual = combine_by_coords((ds1, ds2))
assert_identical(expected, actual)
def test_combine_by_coords_previously_failed(self):
# In the above scenario, one file is missing, containing the data for
# one year's data for one variable.
datasets = [
Dataset({"a": ("x", [0]), "x": [0]}),
Dataset({"b": ("x", [0]), "x": [0]}),
Dataset({"a": ("x", [1]), "x": [1]}),
]
expected = Dataset({"a": ("x", [0, 1]), "b": ("x", [0, np.nan])}, {"x": [0, 1]})
actual = combine_by_coords(datasets)
assert_identical(expected, actual)
def test_combine_by_coords_still_fails(self):
# concat can't handle new variables (yet):
# https://github.com/pydata/xarray/issues/508
datasets = [Dataset({"x": 0}, {"y": 0}), Dataset({"x": 1}, {"y": 1, "z": 1})]
with pytest.raises(ValueError):
combine_by_coords(datasets, "y")
def test_combine_by_coords_no_concat(self):
objs = [Dataset({"x": 0}), Dataset({"y": 1})]
actual = combine_by_coords(objs)
expected = Dataset({"x": 0, "y": 1})
assert_identical(expected, actual)
objs = [Dataset({"x": 0, "y": 1}), Dataset({"y": np.nan, "z": 2})]
actual = combine_by_coords(objs)
expected = Dataset({"x": 0, "y": 1, "z": 2})
assert_identical(expected, actual)
def test_check_for_impossible_ordering(self):
ds0 = Dataset({"x": [0, 1, 5]})
ds1 = Dataset({"x": [2, 3]})
with raises_regex(
ValueError, "does not have monotonic global indexes" " along dimension x"
):
combine_by_coords([ds1, ds0])
def test_combine_by_coords_incomplete_hypercube(self):
# test that this succeeds with default fill_value
x1 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]})
x2 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [1], "x": [0]})
x3 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [1]})
actual = combine_by_coords([x1, x2, x3])
expected = Dataset(
{"a": (("y", "x"), [[1, 1], [1, np.nan]])},
coords={"y": [0, 1], "x": [0, 1]},
)
assert_identical(expected, actual)
# test that this fails if fill_value is None
with pytest.raises(ValueError):
combine_by_coords([x1, x2, x3], fill_value=None)
@pytest.mark.filterwarnings(
"ignore:In xarray version 0.15 `auto_combine` " "will be deprecated"
)
@pytest.mark.filterwarnings("ignore:Also `open_mfdataset` will no longer")
@pytest.mark.filterwarnings("ignore:The datasets supplied")
class TestAutoCombineOldAPI:
"""
Set of tests which check that old 1-dimensional auto_combine behaviour is
still satisfied. #2616
"""
def test_auto_combine(self):
objs = [Dataset({"x": [0]}), Dataset({"x": [1]})]
actual = auto_combine(objs)
expected = Dataset({"x": [0, 1]})
assert_identical(expected, actual)
actual = auto_combine([actual])
assert_identical(expected, actual)
objs = [Dataset({"x": [0, 1]}), Dataset({"x": [2]})]
actual = auto_combine(objs)
expected = Dataset({"x": [0, 1, 2]})
assert_identical(expected, actual)
# ensure auto_combine handles non-sorted variables
objs = [
Dataset({"x": ("a", [0]), "y": ("a", [0])}),
Dataset({"y": ("a", [1]), "x": ("a", [1])}),
]
actual = auto_combine(objs)
expected = Dataset({"x": ("a", [0, 1]), "y": ("a", [0, 1])})
assert_identical(expected, actual)
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})]
with raises_regex(ValueError, "too many .* dimensions"):
auto_combine(objs)
objs = [Dataset({"x": 0}), Dataset({"x": 1})]
with raises_regex(ValueError, "cannot infer dimension"):
auto_combine(objs)
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [0]})]
with raises_regex(ValueError, "'y' is not present in all datasets"):
auto_combine(objs)
def test_auto_combine_previously_failed(self):
# In the above scenario, one file is missing, containing the data for
# one year's data for one variable.
datasets = [
Dataset({"a": ("x", [0]), "x": [0]}),
Dataset({"b": ("x", [0]), "x": [0]}),
Dataset({"a": ("x", [1]), "x": [1]}),
]
expected = Dataset({"a": ("x", [0, 1]), "b": ("x", [0, np.nan])}, {"x": [0, 1]})
actual = auto_combine(datasets)
assert_identical(expected, actual)
# Your data includes "time" and "station" dimensions, and each year's
# data has a different set of stations.
datasets = [
Dataset({"a": ("x", [2, 3]), "x": [1, 2]}),
Dataset({"a": ("x", [1, 2]), "x": [0, 1]}),
]
expected = Dataset(
{"a": (("t", "x"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {"x": [0, 1, 2]}
)
actual = auto_combine(datasets, concat_dim="t")
assert_identical(expected, actual)
def test_auto_combine_with_new_variables(self):
datasets = [Dataset({"x": 0}, {"y": 0}), Dataset({"x": 1}, {"y": 1, "z": 1})]
actual = auto_combine(datasets, "y")
expected = Dataset({"x": ("y", [0, 1])}, {"y": [0, 1], "z": 1})
assert_identical(expected, actual)
def test_auto_combine_no_concat(self):
objs = [Dataset({"x": 0}), Dataset({"y": 1})]
actual = auto_combine(objs)
expected = Dataset({"x": 0, "y": 1})
assert_identical(expected, actual)
objs = [Dataset({"x": 0, "y": 1}), Dataset({"y": np.nan, "z": 2})]
actual = auto_combine(objs)
expected = Dataset({"x": 0, "y": 1, "z": 2})
assert_identical(expected, actual)
data = Dataset({"x": 0})
actual = auto_combine([data, data, data], concat_dim=None)
assert_identical(data, actual)
# Single object, with a concat_dim explicitly provided
# Test the issue reported in GH #1988
objs = [Dataset({"x": 0, "y": 1})]
dim = DataArray([100], name="baz", dims="baz")
actual = auto_combine(objs, concat_dim=dim)
expected = Dataset({"x": ("baz", [0]), "y": ("baz", [1])}, {"baz": [100]})
assert_identical(expected, actual)
# Just making sure that auto_combine is doing what is
# expected for non-scalar values, too.
objs = [Dataset({"x": ("z", [0, 1]), "y": ("z", [1, 2])})]
dim = DataArray([100], name="baz", dims="baz")
actual = auto_combine(objs, concat_dim=dim)
expected = Dataset(
{"x": (("baz", "z"), [[0, 1]]), "y": (("baz", "z"), [[1, 2]])},
{"baz": [100]},
)
assert_identical(expected, actual)
def test_auto_combine_order_by_appearance_not_coords(self):
objs = [
Dataset({"foo": ("x", [0])}, coords={"x": ("x", [1])}),
Dataset({"foo": ("x", [1])}, coords={"x": ("x", [0])}),
]
actual = auto_combine(objs)
expected = Dataset({"foo": ("x", [0, 1])}, coords={"x": ("x", [1, 0])})
assert_identical(expected, actual)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_auto_combine_fill_value(self, fill_value):
datasets = [
Dataset({"a": ("x", [2, 3]), "x": [1, 2]}),
Dataset({"a": ("x", [1, 2]), "x": [0, 1]}),
]
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
expected = Dataset(
{"a": (("t", "x"), [[fill_value, 2, 3], [1, 2, fill_value]])},
{"x": [0, 1, 2]},
)
actual = auto_combine(datasets, concat_dim="t", fill_value=fill_value)
assert_identical(expected, actual)
class TestAutoCombineDeprecation:
"""
Set of tests to check that FutureWarnings are correctly raised until the
deprecation cycle is complete. #2616
"""
def test_auto_combine_with_concat_dim(self):
objs = [Dataset({"x": [0]}), Dataset({"x": [1]})]
with pytest.warns(FutureWarning, match="`concat_dim`"):
auto_combine(objs, concat_dim="x")
def test_auto_combine_with_merge_and_concat(self):
objs = [Dataset({"x": [0]}), Dataset({"x": [1]}), Dataset({"z": ((), 99)})]
with pytest.warns(FutureWarning, match="require both concatenation"):
auto_combine(objs)
def test_auto_combine_with_coords(self):
objs = [
Dataset({"foo": ("x", [0])}, coords={"x": ("x", [0])}),
Dataset({"foo": ("x", [1])}, coords={"x": ("x", [1])}),
]
with pytest.warns(FutureWarning, match="supplied have global"):
auto_combine(objs)
def test_auto_combine_without_coords(self):
objs = [Dataset({"foo": ("x", [0])}), Dataset({"foo": ("x", [1])})]
with pytest.warns(FutureWarning, match="supplied do not have global"):
auto_combine(objs)
@requires_cftime
def test_combine_by_coords_distant_cftime_dates():
# Regression test for https://github.com/pydata/xarray/issues/3535
import cftime
time_1 = [cftime.DatetimeGregorian(4500, 12, 31)]
time_2 = [cftime.DatetimeGregorian(4600, 12, 31)]
time_3 = [cftime.DatetimeGregorian(5100, 12, 31)]
da_1 = DataArray([0], dims=["time"], coords=[time_1], name="a").to_dataset()
da_2 = DataArray([1], dims=["time"], coords=[time_2], name="a").to_dataset()
da_3 = DataArray([2], dims=["time"], coords=[time_3], name="a").to_dataset()
result = combine_by_coords([da_1, da_2, da_3])
expected_time = np.concatenate([time_1, time_2, time_3])
expected = DataArray(
[0, 1, 2], dims=["time"], coords=[expected_time], name="a"
).to_dataset()
assert_identical(result, expected)
|
shoyer/xarray
|
xarray/tests/test_combine.py
|
Python
|
apache-2.0
| 38,646
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.middleware.csrf import get_token
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.contrib import messages
from django.views.generic import View
from deploy_board.settings import IS_PINTEREST
if IS_PINTEREST:
from deploy_board.settings import DEFAULT_PROVIDER, DEFAULT_CMP_IMAGE, \
DEFAULT_CMP_HOST_TYPE, DEFAULT_CMP_PINFO_ENVIRON, DEFAULT_CMP_ACCESS_ROLE, DEFAULT_CELL, \
DEFAULT_PLACEMENT, USER_DATA_CONFIG_SETTINGS_WIKI
import json
import logging
from helpers import baseimages_helper, hosttypes_helper, securityzones_helper, placements_helper, \
autoscaling_groups_helper, groups_helper, cells_helper
from helpers import clusters_helper, environs_helper, environ_hosts_helper
from helpers.exceptions import NotAuthorizedException, TeletraanException
import common
import traceback
log = logging.getLogger(__name__)
DEFAULT_PAGE_SIZE = 200
class EnvCapacityBasicCreateView(View):
def get(self, request, name, stage):
host_types = hosttypes_helper.get_by_provider(
request, DEFAULT_PROVIDER)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
security_zones = securityzones_helper.get_by_provider_and_cell_name(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
placements = placements_helper.get_by_provider_and_cell_name(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
default_base_image = get_base_image_info_by_name(request, DEFAULT_CMP_IMAGE, DEFAULT_CELL)
env = environs_helper.get_env_by_stage(request, name, stage)
capacity_creation_info = {
'environment': env,
'hostTypes': host_types,
'securityZones': security_zones,
'placements': placements,
'baseImages': default_base_image,
'defaultCMPConfigs': get_default_cmp_configs(name, stage),
'defaultProvider': DEFAULT_PROVIDER,
'defaultHostType': DEFAULT_CMP_HOST_TYPE,
'defaultSeurityZone': DEFAULT_PLACEMENT
}
# cluster manager
return render(request, 'configs/new_capacity.html', {
'env': env,
'capacity_creation_info': json.dumps(capacity_creation_info)})
def post(self, request, name, stage):
log.info("Post to capacity with data {0}".format(request.body))
try:
cluster_name = '{}-{}'.format(name, stage)
cluster_info = json.loads(request.body)
log.info("Create Capacity in the provider")
if 'configs' in cluster_info:
if 'spiffe_id' in cluster_info['configs']:
log.error("Teletraan does not support user to change spiffe_id %s" % cluster_info['spiffe_id'])
raise TeletraanException("Teletraan does not support user to create spiffe_id")
clusters_helper.create_cluster_with_env(request, cluster_name, name, stage, cluster_info)
log.info("Associate cluster_name to environment")
# Update cluster info
environs_helper.update_env_basic_config(
request, name, stage, data={"clusterName": cluster_name})
log.info("Update capacity to the environment")
# set up env and group relationship
environs_helper.add_env_capacity(
request, name, stage, capacity_type="GROUP", data=cluster_name)
return HttpResponse("{}", content_type="application/json")
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Have an error {}".format(e))
return HttpResponse(e, status=500, content_type="application/json")
class EnvCapacityAdvCreateView(View):
def get(self, request, name, stage):
host_types = hosttypes_helper.get_by_provider(
request, DEFAULT_PROVIDER)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
security_zones = securityzones_helper.get_by_provider_and_cell_name(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
placements = placements_helper.get_by_provider_and_cell_name(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
cells = cells_helper.get_by_provider(request, DEFAULT_PROVIDER)
base_images = get_base_image_info_by_name(request, DEFAULT_CMP_IMAGE, DEFAULT_CELL)
base_images_names = baseimages_helper.get_image_names(
request, DEFAULT_PROVIDER, DEFAULT_CELL)
env = environs_helper.get_env_by_stage(request, name, stage)
provider_list = baseimages_helper.get_all_providers(request)
capacity_creation_info = {
'environment': env,
'hostTypes': host_types,
'securityZones': security_zones,
'placements': placements,
'cells': cells,
'baseImages': base_images,
'baseImageNames': base_images_names,
'defaultBaseImage': DEFAULT_CMP_IMAGE,
'defaultCMPConfigs': get_default_cmp_configs(name, stage),
'defaultProvider': DEFAULT_PROVIDER,
'defaultCell': DEFAULT_CELL,
'defaultHostType': DEFAULT_CMP_HOST_TYPE,
'defaultSeurityZone': DEFAULT_PLACEMENT,
'providerList': provider_list,
'configList': get_aws_config_name_list_by_image(DEFAULT_CMP_IMAGE)
}
# cluster manager
return render(request, 'configs/new_capacity_adv.html', {
'env': env,
'capacity_creation_info': json.dumps(capacity_creation_info),
'user_data_config_settings_wiki': USER_DATA_CONFIG_SETTINGS_WIKI,
'is_pinterest': IS_PINTEREST})
def post(self, request, name, stage):
log.info("Post to capacity with data {0}".format(request.body))
try:
cluster_name = '{}-{}'.format(name, stage)
cluster_info = json.loads(request.body)
log.info("Create Capacity in the provider")
clusters_helper.create_cluster(request, cluster_name, cluster_info)
log.info("Update cluster_name to environment")
# Update environment
environs_helper.update_env_basic_config(request, name, stage,
data={"clusterName": cluster_name, "IsDocker": True})
log.info("Update capacity to the environment")
# set up env and group relationship
environs_helper.add_env_capacity(
request, name, stage, capacity_type="GROUP", data=cluster_name)
return HttpResponse("{}", content_type="application/json")
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Have an error {}", e)
return HttpResponse(e, status=500, content_type="application/json")
class ClusterConfigurationView(View):
def get(self, request, name, stage):
cluster_name = '{}-{}'.format(name, stage)
current_cluster = clusters_helper.get_cluster(request, cluster_name)
host_types = hosttypes_helper.get_by_provider(
request, DEFAULT_PROVIDER)
current_image = baseimages_helper.get_by_id(
request, current_cluster['baseImageId'])
current_cluster['baseImageName'] = current_image['abstract_name']
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
cells = cells_helper.get_by_provider(request, current_cluster['provider'])
security_zones = securityzones_helper.get_by_provider_and_cell_name(
request, current_cluster['provider'], current_cluster['cellName'])
placements = placements_helper.get_by_provider_and_cell_name(
request, current_cluster['provider'], current_cluster['cellName'])
base_images = get_base_image_info_by_name(
request, current_image['abstract_name'], current_cluster['cellName'])
base_images_names = baseimages_helper.get_image_names(
request, current_cluster['provider'], current_cluster['cellName'])
env = environs_helper.get_env_by_stage(request, name, stage)
provider_list = baseimages_helper.get_all_providers(request)
capacity_creation_info = {
'environment': env,
'cells': cells,
'hostTypes': host_types,
'securityZones': security_zones,
'placements': placements,
'baseImages': base_images,
'baseImageNames': base_images_names,
'defaultBaseImage': DEFAULT_CMP_IMAGE,
'defaultCMPConfigs': get_default_cmp_configs(name, stage),
'defaultProvider': DEFAULT_PROVIDER,
'providerList': provider_list,
'configList': get_aws_config_name_list_by_image(DEFAULT_CMP_IMAGE),
'currentCluster': current_cluster
}
return render(request, 'clusters/cluster_configuration.html', {
'env': env,
'capacity_creation_info': json.dumps(capacity_creation_info),
'user_data_config_settings_wiki': USER_DATA_CONFIG_SETTINGS_WIKI,
'is_pinterest': IS_PINTEREST})
def post(self, request, name, stage):
try:
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = env.get('clusterName')
cluster_info = json.loads(request.body)
log.info("Update Cluster Configuration with {}", cluster_info)
cluster_name = '{}-{}'.format(name, stage)
current_cluster = clusters_helper.get_cluster(request, cluster_name)
log.info("getting current Cluster Configuration is {}", current_cluster)
if 'configs' in current_cluster and 'configs' in cluster_info:
if 'spiffe_id' in current_cluster['configs'] and 'spiffe_id' in cluster_info['configs']:
if current_cluster['configs']['spiffe_id'] != cluster_info['configs']['spiffe_id']:
log.error("Teletraan does not support user to update spiffe_id %s" % cluster_info['spiffe_id'])
raise TeletraanException("Teletraan does not support user to update spiffe_id")
if 'spiffe_id' in current_cluster['configs'] and 'spiffe_id' not in cluster_info['configs']:
log.error("Teletraan does not support user to remove spiffe_id %s" % cluster_info['spiffe_id'])
raise TeletraanException("Teletraan does not support user to remove spiffe_id")
image = baseimages_helper.get_by_id(request, cluster_info['baseImageId'])
clusters_helper.update_cluster(request, cluster_name, cluster_info)
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Post to cluster configuration view has an error {}", e)
return HttpResponse(e, status=500, content_type="application/json")
return HttpResponse(json.dumps(cluster_info), content_type="application/json")
class ClusterCapacityUpdateView(View):
def post(self, request, name, stage):
log.info("Update Cluster Capacity with data {}".format(request.body))
try:
settings = json.loads(request.body)
cluster_name = '{}-{}'.format(name, stage)
log.info("Update cluster {0} with {1}".format(
cluster_name, settings))
minSize = int(settings['minsize'])
maxSize = int(settings['maxsize'])
clusters_helper.update_cluster_capacity(
request, cluster_name, minSize, maxSize)
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Post to cluster capacity view has an error {}", e)
return HttpResponse(e, status=500, content_type="application/json")
return HttpResponse(json.dumps(settings), content_type="application/json")
def create_base_image(request):
params = request.POST
base_image_info = {}
base_image_info['abstract_name'] = params['abstractName']
base_image_info['provider_name'] = params['providerName']
base_image_info['provider'] = params['provider']
base_image_info['description'] = params['description']
base_image_info['cell_name'] = params['cellName']
if 'basic' in params:
base_image_info['basic'] = True
else:
base_image_info['basic'] = False
baseimages_helper.create_base_image(request, base_image_info)
return redirect('/clouds/baseimages')
def get_base_images(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
base_images = baseimages_helper.get_all(request, index, size)
provider_list = baseimages_helper.get_all_providers(request)
cells_list = cells_helper.get_by_provider(request, DEFAULT_PROVIDER)
return render(request, 'clusters/base_images.html', {
'base_images': base_images,
'provider_list': provider_list,
'cells_list': cells_list,
'pageIndex': index,
'pageSize': DEFAULT_PAGE_SIZE,
'disablePrevious': index <= 1,
'disableNext': len(base_images) < DEFAULT_PAGE_SIZE,
})
def get_image_names_by_provider_and_cell(request, provider, cell):
image_names = baseimages_helper.get_image_names(request, provider, cell)
return HttpResponse(json.dumps(image_names), content_type="application/json")
def get_images_by_provider_and_cell(request, provider, cell):
images = baseimages_helper.get_all_by(request, provider, cell)
return HttpResponse(json.dumps(images), content_type="application/json")
def get_placements_by_provider_and_cell(request, provider, cell):
data = placements_helper.get_by_provider_and_cell_name(request, provider, cell)
return HttpResponse(json.dumps(data), content_type="application/json")
def get_security_zones_by_provider_and_cell(request, provider, cell):
data = securityzones_helper.get_by_provider_and_cell_name(request, provider, cell)
return HttpResponse(json.dumps(data), content_type="application/json")
def get_image_names(request):
params = request.GET
provider = params['provider']
env_name = params['env']
stage_name = params['stage']
cell = params.get('cell', DEFAULT_CELL)
image_names = baseimages_helper.get_image_names(request, provider, cell)
curr_image_name = None
curr_base_image = None
if 'curr_base_image' in params:
curr_base_image = params['curr_base_image']
image = baseimages_helper.get_by_id(request, curr_base_image)
curr_image_name = image.get('abstract_name')
contents = render_to_string("clusters/get_image_name.tmpl", {
'image_names': image_names,
'curr_image_name': curr_image_name,
'curr_base_image': curr_base_image,
'provider': provider,
'env_name': env_name,
'stage_name': stage_name,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_base_images_by_name(request):
params = request.GET
cell = params.get('cell', DEFAULT_CELL)
base_images = None
if 'name' in params:
name = params['name']
base_images = baseimages_helper.get_by_name(request, name, cell)
curr_base_image = None
if 'curr_base_image' in params:
curr_base_image = params['curr_base_image']
image = baseimages_helper.get_by_id(request, curr_base_image)
curr_image_name = image.get('abstract_name')
base_images = baseimages_helper.get_by_name(request, curr_image_name, cell)
contents = render_to_string("clusters/get_base_image.tmpl", {
'base_images': base_images,
'curr_base_image': curr_base_image,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_base_image_info_by_name(request, name, cell):
if name.startswith('cmp_base'):
base_images = baseimages_helper.get_acceptance_by_name(request, name, cell)
with_acceptance_rs = []
if base_images:
for image in base_images:
r = image.get('baseImage')
if r:
r['acceptance'] = image.get('acceptance', 'UNKNOWN')
with_acceptance_rs.append(r)
return with_acceptance_rs
return baseimages_helper.get_by_name(request, name, cell)
def get_base_images_by_name_json(request, name):
cell = DEFAULT_CELL
params = request.GET
if params:
cell = params.get('cell', DEFAULT_CELL)
base_images = get_base_image_info_by_name(request, name, cell)
return HttpResponse(json.dumps(base_images), content_type="application/json")
def create_host_type(request):
params = request.POST
host_type_info = {}
host_type_info['abstract_name'] = params['abstractName']
host_type_info['provider_name'] = params['providerName']
host_type_info['provider'] = params['provider']
host_type_info['description'] = params['description']
host_type_info['mem'] = float(params['mem']) * 1024
host_type_info['core'] = int(params['core'])
host_type_info['storage'] = params['storage']
if 'basic' in params:
host_type_info['basic'] = True
else:
host_type_info['basic'] = False
hosttypes_helper.create_host_type(request, host_type_info)
return redirect('/clouds/hosttypes')
def get_host_types(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
host_types = hosttypes_helper.get_all(request, index, size)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
provider_list = baseimages_helper.get_all_providers(request)
return render(request, 'clusters/host_types.html', {
'host_types': host_types,
'provider_list': provider_list,
'pageIndex': index,
'pageSize': DEFAULT_PAGE_SIZE,
'disablePrevious': index <= 1,
'disableNext': len(host_types) < DEFAULT_PAGE_SIZE,
})
def get_host_types_by_provider(request):
params = request.GET
provider = params['provider']
curr_host_type = None
if 'curr_host_type' in params:
curr_host_type = params['curr_host_type']
host_types = hosttypes_helper.get_by_provider(request, provider)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
contents = render_to_string("clusters/get_host_type.tmpl", {
'host_types': host_types,
'curr_host_type': curr_host_type,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_host_type_info(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
host_types = hosttypes_helper.get_all(request, index, size)
for host_type in host_types:
host_type['mem'] = float(host_type['mem']) / 1024
return HttpResponse(json.dumps(host_types), content_type="application/json")
def create_security_zone(request):
params = request.POST
security_zone_info = {}
security_zone_info['abstract_name'] = params['abstractName']
security_zone_info['provider_name'] = params['providerName']
security_zone_info['provider'] = params['provider']
security_zone_info['description'] = params['description']
security_zone_info['cell_name'] = params.get('cellName', DEFAULT_CELL)
if 'basic' in params:
security_zone_info['basic'] = True
else:
security_zone_info['basic'] = False
securityzones_helper.create_security_zone(request, security_zone_info)
return redirect('/clouds/securityzones')
def get_security_zones(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
security_zones = securityzones_helper.get_all(request, index, size)
provider_list = baseimages_helper.get_all_providers(request)
cells_list = cells_helper.get_by_provider(request, DEFAULT_PROVIDER)
return render(request, 'clusters/security_zones.html', {
'security_zones': security_zones,
'provider_list': provider_list,
'cells_list': cells_list,
'pageIndex': index,
'pageSize': DEFAULT_PAGE_SIZE,
'disablePrevious': index <= 1,
'disableNext': len(security_zones) < DEFAULT_PAGE_SIZE,
})
def get_security_zones_by_provider(request):
params = request.GET
provider = params['provider']
curr_security_zone = None
if 'curr_security_zone' in params:
curr_security_zone = params['curr_security_zone']
cell = params.get('cell', DEFAULT_CELL)
security_zones = securityzones_helper.get_by_provider_and_cell_name(request, provider, cell)
contents = render_to_string("clusters/get_security_zone.tmpl", {
'security_zones': security_zones,
'curr_security_zone': curr_security_zone,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_security_zone_info(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
security_zones = securityzones_helper.get_all(request, index, size)
return HttpResponse(json.dumps(security_zones), content_type="application/json")
def create_placement(request):
params = request.POST
placement_info = {}
placement_info['abstract_name'] = params['abstractName']
placement_info['provider_name'] = params['providerName']
placement_info['provider'] = params['provider']
placement_info['description'] = params['description']
placement_info['cell_name'] = params.get('cellName', DEFAULT_CELL)
if 'basic' in params:
placement_info['basic'] = True
else:
placement_info['basic'] = False
placements_helper.create_placement(request, placement_info)
return redirect('/clouds/placements')
def get_placements(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
placements = placements_helper.get_all(request, index, size)
provider_list = baseimages_helper.get_all_providers(request)
cells_list = cells_helper.get_by_provider(request, DEFAULT_PROVIDER)
return render(request, 'clusters/placements.html', {
'placements': placements,
'provider_list': provider_list,
'cells_list': cells_list,
'pageIndex': index,
'pageSize': DEFAULT_PAGE_SIZE,
'disablePrevious': index <= 1,
'disableNext': len(placements) < DEFAULT_PAGE_SIZE,
})
def get_placements_by_provider(request):
params = request.GET
provider = params['provider']
cell = params.get('cell', DEFAULT_CELL)
curr_placement_arrays = None
if 'curr_placement' in params:
curr_placement = params['curr_placement']
curr_placement_arrays = curr_placement.split(',')
placements = placements_helper.get_by_provider_and_cell_name(request, provider, cell)
contents = render_to_string("clusters/get_placement.tmpl", {
'placements': placements,
'curr_placement_arrays': curr_placement_arrays,
})
return HttpResponse(json.dumps(contents), content_type="application/json")
def get_placement_infos(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
placements = placements_helper.get_all(request, index, size)
return HttpResponse(json.dumps(placements), content_type="application/json")
def parse_configs(query_dict):
configs = {}
for key, value in query_dict.iteritems():
if not value:
continue
if key.startswith('TELETRAAN_'):
name = key[len('TELETRAAN_'):]
configs[name] = value
return configs
def get_default_cmp_configs(name, stage):
config_map = {}
config_map['iam_role'] = 'base'
config_map['cmp_group'] = 'CMP,{}-{}'.format(name, stage)
config_map['pinfo_environment'] = DEFAULT_CMP_PINFO_ENVIRON
config_map['pinfo_team'] = 'cloudeng'
config_map['pinfo_role'] = 'cmp_base'
config_map['access_role'] = DEFAULT_CMP_ACCESS_ROLE
return config_map
def parse_cluster_info(request, env_name, env_stage, cluster_name):
params = request.POST
cluster_info = {}
cluster_info['capacity'] = params['capacity']
cluster_info['baseImageId'] = params['baseImageId']
cluster_info['provider'] = params['provider']
cluster_info['hostType'] = params['hostTypeId']
cluster_info['securityZone'] = params['securityZoneId']
cluster_info['placement'] = ",".join(params.getlist('placementId'))
# Update cluster name and isDocker in env
env_info = {}
env_info['clusterName'] = cluster_name
if 'isDocker' in params:
env_info['isDocker'] = True
else:
env_info['isDocker'] = False
environs_helper.update_env_basic_config(
request, env_name, env_stage, data=env_info)
return cluster_info
def delete_cluster(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
log.info("Delete cluster {}".format(cluster_name))
clusters_helper.delete_cluster(request, cluster_name)
# Remove group and env relationship
environs_helper.remove_env_capacity(
request, name, stage, capacity_type="GROUP", data=cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def clone_cluster(request, src_name, src_stage):
try:
params = request.POST
dest_name = params.get('new_environment', src_name)
dest_stage = params.get('new_stage', src_stage + '_clone')
src_cluster_name = '{}-{}'.format(src_name, src_stage)
dest_cluster_name = '{}-{}'.format(dest_name, dest_stage)
##0. teletraan service get src env buildName
src_env = environs_helper.get_env_by_stage(request, src_name, src_stage)
build_name = src_env.get('buildName', None)
external_id = src_env.get('externalId', None)
##1. teletraan service create a new env
dest_env = environs_helper.create_env(request, {
'envName': dest_name,
'stageName': dest_stage,
'buildName': build_name,
'externalId': external_id
})
log.info('clone_cluster, created a new env %s' % dest_env)
##2. rodimus service get src_cluster config
src_cluster_info = clusters_helper.get_cluster(request, src_cluster_name)
log.info('clone_cluster, src cluster info %s' % src_cluster_info)
configs = src_cluster_info.get('configs')
if configs:
cmp_group = configs.get('cmp_group')
if cmp_group:
cmp_groups_set = set(cmp_group.split(','))
cmp_groups_set.remove(src_cluster_name)
cmp_groups_set.remove('CMP')
cmp_groups_set.add(dest_cluster_name)
# CMP needs to be the first in the list
configs['cmp_group'] = ','.join(['CMP'] + list(cmp_groups_set))
src_cluster_info['configs'] = configs
##3. rodimus service post create cluster
src_cluster_info['clusterName'] = dest_cluster_name
src_cluster_info['capacity'] = 0
log.info('clone_cluster, request clone cluster info %s' % src_cluster_info)
dest_cluster_info = clusters_helper.create_cluster_with_env(request, dest_cluster_name, dest_name, dest_stage, src_cluster_info)
log.info('clone_cluster, cloned cluster info %s' % dest_cluster_info)
##4. teletraan service update_env_basic_config
environs_helper.update_env_basic_config(request, dest_name, dest_stage,
data={"clusterName": dest_cluster_name}
)
##5. teletraan service set up env and group relationship
environs_helper.update_env_capacity(request, dest_name, dest_stage, capacity_type="GROUP",
data=[dest_cluster_name])
##6. get src script_config
src_script_configs = environs_helper.get_env_script_config(request, src_name, src_stage)
src_agent_configs = environs_helper.get_env_agent_config(request, src_name, src_stage)
src_alarms_configs = environs_helper.get_env_alarms_config(request, src_name, src_stage)
src_metrics_configs = environs_helper.get_env_metrics_config(request, src_name, src_stage)
src_webhooks_configs = environs_helper.get_env_hooks_config(request, src_name, src_stage)
##8. clone all the extra configs
if src_agent_configs:
environs_helper.update_env_agent_config(request, dest_name, dest_stage, src_agent_configs)
if src_script_configs:
environs_helper.update_env_script_config(request, dest_name, dest_stage, src_script_configs)
if src_alarms_configs:
environs_helper.update_env_alarms_config(request, dest_name, dest_stage, src_alarms_configs)
if src_metrics_configs:
environs_helper.update_env_metrics_config(request, dest_name, dest_stage, src_metrics_configs)
if src_webhooks_configs:
environs_helper.update_env_hooks_config(request, dest_name, dest_stage, src_webhooks_configs)
return HttpResponse(json.dumps(src_cluster_info), content_type="application/json")
except NotAuthorizedException as e:
log.error("Have an NotAuthorizedException error {}".format(e))
return HttpResponse(e, status=403, content_type="application/json")
except Exception as e:
log.error("Failed to clone cluster env_name: %s, stage_name: %s" % (src_name, src_stage))
log.error(traceback.format_exc())
return HttpResponse(e, status=500, content_type="application/json")
def get_aws_config_name_list_by_image(image_name):
config_map = {}
config_map['iam_role'] = 'base'
config_map['assign_public_ip'] = 'true'
if IS_PINTEREST:
config_map['pinfo_environment'] = 'prod'
config_map['raid'] = 'true'
config_map['raid_mount'] = '/mnt'
config_map['raid_device'] = '/dev/md0'
config_map['raid_fs'] = 'xfs'
config_map['ebs'] = 'true'
config_map['ebs_size'] = 500
config_map['ebs_mount'] = '/backup'
config_map['ebs_volume_type'] = 'gp2'
config_map['root_volume_size'] = 100
if image_name == DEFAULT_CMP_IMAGE:
config_map['pinfo_role'] = 'cmp_base'
config_map['pinfo_team'] = 'cloudeng'
else:
config_map['pinfo_role'] = ''
config_map['pinfo_team'] = ''
return config_map
def launch_hosts(request, name, stage):
params = request.POST
num = int(params['num'])
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.launch_hosts(request, cluster_name, num)
return redirect('/env/{}/{}/'.format(name, stage))
def terminate_hosts(request, name, stage):
get_params = request.GET
post_params = request.POST
host_ids = None
if 'host_id' in get_params:
host_ids = [get_params.get('host_id')]
if 'hostIds' in post_params:
hosts_str = post_params['hostIds']
host_ids = [x.strip() for x in hosts_str.split(',')]
environ_hosts_helper.stop_service_on_host(request, name, stage, host_ids)
return redirect('/env/{}/{}'.format(name, stage))
def force_terminate_hosts(request, name, stage):
get_params = request.GET
post_params = request.POST
host_ids = None
if 'host_id' in get_params:
host_ids = [get_params.get('host_id')]
if 'hostIds' in post_params:
hosts_str = post_params['hostIds']
host_ids = [x.strip() for x in hosts_str.split(',')]
if 'replaceHost' in post_params:
replace_host = True
else:
replace_host = False
cluster_name = common.get_cluster_name(request, name, stage)
if not cluster_name:
groups = environs_helper.get_env_capacity(
request, name, stage, capacity_type="GROUP")
for group_name in groups:
cluster_name = group_name
clusters_helper.force_terminate_hosts(
request, cluster_name, host_ids, replace_host)
return redirect('/env/{}/{}'.format(name, stage))
def enable_cluster_replacement(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.enable_cluster_replacement(request, cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def pause_cluster_replacement(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.pause_cluster_replacement(request, cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def resume_cluster_replacement(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.resume_cluster_replacement(request, cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def cancel_cluster_replacement(request, name, stage):
cluster_name = common.get_cluster_name(request, name, stage)
clusters_helper.cancel_cluster_replacement(request, cluster_name)
return redirect('/env/{}/{}/config/capacity/'.format(name, stage))
def get_replacement_summary(request, cluster_name, event, current_capacity):
host_ids = event.get('host_ids')
state = event.get('state')
status = event.get('status')
progress_type = 'success' if status in [
'SUCCEEDING', 'SUCCEEDED'] else 'danger'
if not host_ids:
num_finished_host_ids = 0
else:
num_finished_host_ids = len(host_ids.split(','))
if state == 'COMPLETED':
if status == 'SUCCEEDED':
# successful
succeeded = num_finished_host_ids
progress_rate = 100
msg = event.get('error_message', '')
return {
'id': event.get('id'),
'state': state,
'status': status,
'startDate': event.get('start_time'),
'lastUpdateDate': event.get('last_worked_on'),
'progressType': progress_type,
'progressTip': 'Among total {} hosts, {} successfully replaced and {} are pending'.format(
succeeded, succeeded, 0),
'successRatePercentage': progress_rate,
'successRate': '{}% ({}/{})'.format(progress_rate, succeeded, succeeded),
'description': msg
}
else:
# failed
succeeded = num_finished_host_ids
progress_rate = succeeded * 100 / current_capacity
msg = event.get('error_message', '')
return {
'id': event.get('id'),
'state': state,
'status': status,
'startDate': event.get('start_time'),
'lastUpdateDate': event.get('last_worked_on'),
'progressType': progress_type,
'progressTip': 'Among total {} hosts, {} successfully replaced and {} are pending. Reason: {}'.format(
current_capacity, succeeded, current_capacity - succeeded, msg),
'successRatePercentage': progress_rate,
'successRate': '{}% ({}/{})'.format(progress_rate, succeeded, current_capacity),
'description': msg
}
else:
# on-going event
replaced_and_succeeded_hosts = groups_helper.get_replaced_and_good_hosts(
request, cluster_name)
succeeded = len(replaced_and_succeeded_hosts)
progress_rate = succeeded * 100 / current_capacity
# its not necessarily error message
on_going_msg = event.get('error_message')
return {
'id': event.get('id'),
'state': state,
'status': status,
'startDate': event.get('start_time'),
'lastUpdateDate': event.get('last_worked_on'),
'progressType': progress_type,
'progressTip': 'Among total {} hosts, {} successfully replaced and {} are pending. {}'.format(
current_capacity, succeeded, current_capacity - succeeded, on_going_msg),
'successRatePercentage': progress_rate,
'successRate': '{}% ({}/{})'.format(progress_rate, succeeded, current_capacity)
}
def cluster_replacement_progress(request, name, stage):
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = '{}-{}'.format(name, stage)
replacement_event = clusters_helper.get_latest_cluster_replacement_progress(
request, cluster_name)
if not replacement_event:
log.info("There is no on-going replacement event for cluster %s." %
cluster_name)
return HttpResponse("There is no on-going replacement.")
# basic_cluster_info = clusters_helper.get_cluster(request, cluster_name)
# capacity = basic_cluster_info.get("capacity")
# should not respect the cluster capacity here, when min != max, the capacity is not a right number
asg_summary = autoscaling_groups_helper.get_autoscaling_summary(request, cluster_name)
desired_capacity = None
if asg_summary:
desired_capacity = asg_summary.get("desiredCapacity")
if not desired_capacity:
error_msg = "cluster %s has wrong desired_capacity: %s, asg_summary: %s" % \
(cluster_name, desired_capacity, asg_summary)
log.error(error_msg)
return HttpResponse(error_msg, status=500, content_type="application/json")
replacement_progress = get_replacement_summary(
request, cluster_name, replacement_event, desired_capacity)
html = render_to_string('clusters/replace_progress.tmpl', {
"env": env,
"replace_progress_report": replacement_progress
})
response = HttpResponse(html)
return response
def cluster_replacement_details(request, name, stage):
cluster_name = '{}-{}'.format(name, stage)
replacement_event = clusters_helper.get_latest_cluster_replacement_progress(
request, cluster_name)
if not replacement_event:
return HttpResponse("{}", content_type="application/json")
return HttpResponse(json.dumps(replacement_event), content_type="application/json")
def view_cluster_replacement_details(request, name, stage, replacement_id):
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = '{}-{}'.format(name, stage)
replacement_event = clusters_helper.get_cluster_replacement_info(
request, cluster_name, replacement_id)
if not replacement_event:
raise Exception("Replacement Id: %s Not Found.")
basic_cluster_info = clusters_helper.get_cluster(request, cluster_name)
capacity = basic_cluster_info.get("capacity")
replacement_details = get_replacement_summary(
request, cluster_name, replacement_event, capacity)
config_histories = clusters_helper.get_cluster_replacement_config_histories(
request, cluster_name, replacement_id)
return render(request, 'clusters/cluster_replace_details.html', {
"replace": replacement_details,
"config_histories": config_histories,
"env": env
})
def view_cluster_replacement_scaling_activities(request, name, stage):
cluster_name = '{}-{}'.format(name, stage)
scaling_activities = autoscaling_groups_helper.get_scaling_activities(
request, cluster_name, 20, '')
activities = json.dumps(scaling_activities["activities"])
return HttpResponse(activities, content_type="application/json")
def view_cluster_replacement_schedule(request, name, stage, replacement_id):
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = '{}-{}'.format(name, stage)
schedule = clusters_helper.get_cluster_replacement_schedule(
request, cluster_name, replacement_id)
return render(request, 'clusters/replace_schedule.html', {
"env": env,
"schedule": schedule
})
class ClusterHistoriesView(View):
def get(self, request, name, stage):
env = environs_helper.get_env_by_stage(request, name, stage)
cluster_name = '{}-{}'.format(name, stage)
page_index = request.GET.get('index')
page_size = request.GET.get('size')
histories = clusters_helper.get_cluster_replacement_histories(
request, cluster_name, page_index, page_size)
replace_summaries = []
if histories:
basic_cluster_info = clusters_helper.get_cluster(
request, cluster_name)
capacity = basic_cluster_info.get("capacity")
for history in histories:
replace_summaries.append(get_replacement_summary(
request, cluster_name, history, capacity))
data = {
"env": env,
"replace_summaries": replace_summaries
}
return render(request, 'clusters/replace_histories.html', data)
|
yongwen/teletraan
|
deploy-board/deploy_board/webapp/cluster_view.py
|
Python
|
apache-2.0
| 42,506
|
#coding=utf-8
from fontTools.pens.basePen import BasePen
class AbstractShiftPen(BasePen):
"""
Move a glyph’s x and y coordinates by xShift and yShift respectively.
In itself, the pen doesn’t ‘draw’ anything, it just collects coordinates,
adds xShift and yShift, and prints the resulting coordinates for each contour.
"""
def __init__(self, xShift=0, yShift=0):
self.xShift = xShift
self.yShift = yShift
self.contourCounter = 0
def _moveTo(self, pt):
self.newCoordinates = []
shiftedPoint, = self.shiftCoordinates(pt)
self.newCoordinates.append((nx, ny))
def _lineTo(self, pt):
shiftedPoint, = self.shiftCoordinates(pt)
self.newCoordinates.append((nx, ny))
def _curveToOne(self, pt1, pt2, pt3):
for pt in [pt1, pt2, pt3]:
shiftedPoint, = self.shiftCoordinates(pt)
self.newCoordinates.append((nx, ny))
def endPath(self):
"""Print shifted coordinates for the currentContour as well as its index."""
print self.contourCounter
print '\n'.join([str(coordinates) for coordinates in self.newCoordinates])
print '\n'
self.contourCounter += 1
# As there’s no drawing happening with this pen, self.closePath() can do the same as self.endPath().
closePath = endPath
def shiftCoordinates(self, *points):
"""For given (x, y) coordinates, return said coordinates shifted by (xShift, yShift)."""
return [(x + self.xShift, y + self.yShift) for x, y in points]
class ShiftPen(AbstractShiftPen):
"""
Implementation of an AbstractShiftPen that actually draws the resulting shifted glyph.
Needs to be provided an external pen to do so.
"""
def __init__(self, otherPen, xShift=0, yShift=0):
super(ShiftPen, self).__init__(xShift, yShift)
self.otherPen = otherPen
def _moveTo(self, pt):
shiftedPoint, = self.shiftCoordinates(pt)
self.otherPen.moveTo(shiftedPoint)
def _lineTo(self, pt):
shiftedPoint, = self.shiftCoordinates(pt)
self.otherPen.lineTo(shiftedPoint)
def _curveToOne(self, pt1, pt2, pt3):
self.otherPen.curveTo(*self.shiftCoordinates(pt1, pt2, pt3))
def endPath(self):
self.otherPen.endPath()
def closePath(self):
self.otherPen.closePath()
|
loicsander/RobofabPens
|
examples/shiftPen.py
|
Python
|
mit
| 2,386
|
#from sys import argv
#script, filename = argv
#txt = open(filename)
#print "Here's your file %r:" % filename
#print txt.read()
cctxt = open("CentralCorridor_text.txt")
print cctxt.read()
|
mwweinberg/classgame
|
sketchpad.py
|
Python
|
mit
| 194
|
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import json
import os
from ansible.module_utils.k8s.common import DateTimeEncoder, remove_secret_data, to_snake
from ansible.module_utils.k8s.helper import AUTH_ARG_SPEC
try:
from openshift.helper.kubernetes import KubernetesObjectHelper
from openshift.helper.openshift import OpenShiftObjectHelper
from openshift.helper.exceptions import KubernetesException
HAS_K8S_MODULE_HELPER = True
except ImportError as exc:
HAS_K8S_MODULE_HELPER = False
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
class KubernetesLookup(object):
def __init__(self):
if not HAS_K8S_MODULE_HELPER:
raise Exception(
"Requires the OpenShift Python client. Try `pip install openshift`"
)
if not HAS_YAML:
raise Exception(
"Requires PyYAML. Try `pip install PyYAML`"
)
self.kind = None
self.name = None
self.namespace = None
self.api_version = None
self.label_selector = None
self.field_selector = None
self.include_uninitialized = None
self.resource_definition = None
self.helper = None
self.connection = {}
def run(self, terms, variables=None, **kwargs):
self.mylog('Here!')
self.kind = kwargs.get('kind')
self.name = kwargs.get('resource_name')
self.namespace = kwargs.get('namespace')
self.api_version = kwargs.get('api_version', 'v1')
self.label_selector = kwargs.get('label_selector')
self.field_selector = kwargs.get('field_selector')
self.include_uninitialized = kwargs.get('include_uninitialized', False)
resource_definition = kwargs.get('resource_definition')
src = kwargs.get('src')
if src:
resource_definition = self.load_resource_definition(src)
if resource_definition:
self.params_from_resource_definition(resource_definition)
if not self.kind:
raise Exception(
"Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration "
"using the 'resource_definition' parameter."
)
self.kind = to_snake(self.kind)
self.helper = self.get_helper()
for arg in AUTH_ARG_SPEC:
self.connection[arg] = kwargs.get(arg)
try:
self.helper.set_client_config(**self.connection)
except Exception as exc:
raise Exception(
"Client authentication failed: {0}".format(exc.message)
)
if self.name:
self.mylog("Calling get_object()")
return self.get_object()
return self.list_objects()
def mylog(self, msg):
with open('loggit.txt', 'a') as f:
f.write(msg + '\n')
def get_helper(self):
try:
helper = KubernetesObjectHelper(api_version=self.api_version, kind=self.kind, debug=False)
helper.get_model(self.api_version, self.kind)
return helper
except KubernetesException as exc:
raise Exception("Error initializing helper: {0}".format(exc.message))
def load_resource_definition(self, src):
""" Load the requested src path """
path = os.path.normpath(src)
if not os.path.exists(path):
raise Exception("Error accessing {0}. Does the file exist?".format(path))
try:
result = yaml.safe_load(open(path, 'r'))
except (IOError, yaml.YAMLError) as exc:
raise Exception("Error loading resource_definition: {0}".format(exc))
return result
def params_from_resource_definition(self, defn):
if defn.get('apiVersion'):
self.api_version = defn['apiVersion']
if defn.get('kind'):
self.kind = defn['kind']
if defn.get('metadata', {}).get('name'):
self.name = defn['metadata']['name']
if defn.get('metadata', {}).get('namespace'):
self.namespace = defn['metadata']['namespace']
def get_object(self):
""" Fetch a named object """
try:
result = self.helper.get_object(self.name, self.namespace)
except KubernetesException as exc:
raise Exception('Failed to retrieve requested object: {0}'.format(exc.message))
self.mylog("Got restult")
response = []
if result is not None:
# Convert Datetime objects to ISO format
result_json = json.loads(json.dumps(result.to_dict(), cls=DateTimeEncoder))
if self.kind == 'secret':
remove_secret_data(result_json)
response.append(result_json)
return response
def list_objects(self):
""" Query for a set of objects """
if self.namespace:
method_name = 'list_namespaced_{0}'.format(self.kind)
try:
method = self.helper.lookup_method(method_name=method_name)
except KubernetesException:
raise Exception(
"Failed to find method {0} for API {1}".format(method_name, self.api_version)
)
else:
method_name = 'list_{0}_for_all_namespaces'.format(self.kind)
try:
method = self.helper.lookup_method(method_name=method_name)
except KubernetesException:
method_name = 'list_{0}'.format(self.kind)
try:
method = self.helper.lookup_method(method_name=method_name)
except KubernetesException:
raise Exception(
"Failed to find method for API {0} and Kind {1}".format(self.api_version, self.kind)
)
params = {}
if self.field_selector:
params['field_selector'] = self.field_selector
if self.label_selector:
params['label_selector'] = self.label_selector
params['include_uninitialized'] = self.include_uninitialized
if self.namespace:
try:
result = method(self.namespace, **params)
except KubernetesException as exc:
raise Exception(exc.message)
else:
try:
result = method(**params)
except KubernetesException as exc:
raise Exception(exc.message)
response = []
if result is not None:
# Convert Datetime objects to ISO format
result_json = json.loads(json.dumps(result.to_dict(), cls=DateTimeEncoder))
response = result_json.get('items', [])
if self.kind == 'secret':
for item in response:
remove_secret_data(item)
return response
class OpenShiftLookup(KubernetesLookup):
def get_helper(self):
try:
helper = OpenShiftObjectHelper(api_version=self.api_version, kind=self.kind, debug=False)
helper.get_model(self.api_version, self.kind)
return helper
except KubernetesException as exc:
raise Exception("Error initializing helper: {0}".format(exc.message))
|
cryptobanana/ansible
|
lib/ansible/module_utils/k8s/lookup.py
|
Python
|
gpl-3.0
| 7,977
|
import os, sys
if __name__ == '__main__':
execfile(os.path.join(sys.path[0], 'framework.py'))
from Products.COBAInternship.tests.COBAInternshipTestCase import COBAInternshipTestCase
from Products.CMFCore.WorkflowCore import WorkflowException
import transaction
class TestCOBAInternship(COBAInternshipTestCase):
"""Ensure product is properly installed"""
def createApp(self):
self.login(self._default_user)
self.portal.invokeFactory(type_name="COBAInternship", id="testapplication")
app = self.portal['testapplication']
self.fill_out_application(app)
transaction.commit()
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestCOBAInternship))
return suite
if __name__ == '__main__':
framework()
|
uwosh/COBAInternship
|
tests/testCOBAInternshipApplication.py
|
Python
|
gpl-2.0
| 833
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class JobsOperations:
"""JobsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
device_name: str,
name: str,
resource_group_name: str,
**kwargs
) -> "_models.Job":
"""Gets the details of a specified job on a Data Box Edge/Data Box Gateway device.
Gets the details of a specified job on a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The job name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Job, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2019_08_01.models.Job
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/jobs/{name}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_08_01/aio/operations/_jobs_operations.py
|
Python
|
mit
| 4,734
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
# DEBUG = False
## webserver host and port
# HOST = '0.0.0.0'
# PORT = 5000
SECRET = 'foobar'
SECRET_KEY = 'my-session-secret'
SQLALCHEMY_DATABASE_URI = 'postgresql://pybossa:tester@dbpostgres/pybossa'
##Slave configuration for DB
#SQLALCHEMY_BINDS = {
# 'slave': 'postgresql://user:password@server/db'
#}
ITSDANGEROUSKEY = 'its-dangerous-key'
## project configuration
BRAND = 'PyBossa'
TITLE = 'PyBossa'
LOGO = 'default_logo.png'
COPYRIGHT = 'Set Your Institution'
DESCRIPTION = 'Set the description in your config'
TERMSOFUSE = 'http://okfn.org/terms-of-use/'
DATAUSE = 'http://opendatacommons.org/licenses/by/'
CONTACT_EMAIL = 'info@pybossa.com'
CONTACT_TWITTER = 'PyBossa'
## Default number of projects per page
## APPS_PER_PAGE = 20
## External Auth providers
# TWITTER_CONSUMER_KEY=''
# TWITTER_CONSUMER_SECRET=''
# FACEBOOK_APP_ID=''
# FACEBOOK_APP_SECRET=''
# GOOGLE_CLIENT_ID=''
# GOOGLE_CLIENT_SECRET=''
## Supported Languages
## NOTE: You need to create a symbolic link to the translations folder, otherwise
## this wont work.
## ln -s pybossa/themes/your-theme/translations pybossa/translations
# LOCALES = ['en', 'es', 'fr', 'de', 'it']
## list of administrator emails to which error emails get sent
# ADMINS = ['me@sysadmin.org']
## CKAN URL for API calls
#CKAN_NAME = "Demo CKAN server"
#CKAN_URL = "http://demo.ckan.org"
## logging config
# Sentry configuration
# SENTRY_DSN=''
## set path to enable
# LOG_FILE = '/path/to/log/file'
## Optional log level
# import logging
# LOG_LEVEL = logging.DEBUG
## Mail setup
MAIL_SERVER = 'localhost'
MAIL_USERNAME = None
MAIL_PASSWORD = None
MAIL_PORT = 25
MAIL_FAIL_SILENTLY = False
MAIL_DEFAULT_SENDER = 'PyBossa Support <info@pybossa.com>'
## Announcement messages
## Use any combination of the next type of messages: root, user, and app owners
## ANNOUNCEMENT = {'admin': 'Root Message', 'user': 'User Message', 'owner': 'Owner Message'}
## Enforce Privacy Mode, by default is disabled
## This config variable will disable all related user pages except for admins
## Stats, top users, leaderboard, etc
ENFORCE_PRIVACY = False
## Cache setup. By default it is enabled
## Redis Sentinel
# List of Sentinel servers (IP, port)
REDIS_CACHE_ENABLED = True
REDIS_SENTINEL = [('dbredissentinel', 26379)]
REDIS_MASTER = 'mymaster'
REDIS_DB = 0
REDIS_KEYPREFIX = 'pybossa_cache'
## Allowed upload extensions
ALLOWED_EXTENSIONS = ['js', 'css', 'png', 'jpg', 'jpeg', 'gif', 'zip']
## If you want to use the local uploader configure which folder
UPLOAD_METHOD = 'local'
UPLOAD_FOLDER = 'uploads'
## If you want to use Rackspace for uploads, configure it here
# RACKSPACE_USERNAME = 'username'
# RACKSPACE_API_KEY = 'apikey'
# RACKSPACE_REGION = 'ORD'
## Default number of users shown in the leaderboard
# LEADERBOARD = 20
## Default shown presenters
# PRESENTERS = ["basic", "image", "sound", "video", "map", "pdf"]
# Default Google Docs spreadsheet template tasks URLs
TEMPLATE_TASKS = {
'image': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdHFEN29mZUF0czJWMUhIejF6dWZXdkE&usp=sharing",
'sound': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEczcWduOXRUb1JUc1VGMmJtc2xXaXc&usp=sharing",
'video': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZ2UGhxSTJjQl9YNVhfUVhGRUdoRWc&usp=sharing",
'map': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZnbjdwcnhKRVNlN1dGXy0tTnNWWXc&usp=sharing",
'pdf': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEVVamc0R0hrcjlGdXRaUXlqRXlJMEE&usp=sharing"}
# Expiration time for password protected project cookies
PASSWD_COOKIE_TIMEOUT = 60 * 30
# Expiration time for account confirmation / password recovery links
ACCOUNT_LINK_EXPIRATION = 5 * 60 * 60
## Ratelimit configuration
# LIMIT = 300
# PER = 15 * 60
# Disable new account confirmation (via email)
ACCOUNT_CONFIRMATION_DISABLED = True
# Mailchimp API key
# MAILCHIMP_API_KEY = "your-key"
# MAILCHIMP_LIST_ID = "your-list-ID"
# Flickr API key and secret
# FLICKR_API_KEY = 'your-key'
# FLICKR_SHARED_SECRET = 'your-secret'
# DROPBOX APP KEY
# DROPBOX_APP_KEY = 'your-key'
# Send emails weekly update every
# WEEKLY_UPDATE_STATS = 'Sunday'
|
littleq0903/build_pybossa_docker
|
conf/settings_local.py
|
Python
|
mit
| 4,934
|
"""Add a column user_id which will be used to persist uuid.
Revision ID: 8608c3fa6d42
Revises: 62010067944d
Create Date: 2020-12-28 18:42:36.142944
"""
# revision identifiers, used by Alembic.
revision = '8608c3fa6d42'
down_revision = '62010067944d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
"""Upgrade the database to a newer revision."""
op.add_column('stack_analyses_request', sa.Column('user_id',
postgresql.UUID(as_uuid=True), nullable=True))
# ### end Alembic commands ###
def downgrade():
"""Downgrade the database to an older revision."""
op.drop_column('stack_analyses_request', 'user_id')
# ### end Alembic commands ###
|
fabric8-analytics/fabric8-analytics-worker
|
alembic/versions/8608c3fa6d42_add_a_column_user_id_which_will_be_used_.py
|
Python
|
gpl-3.0
| 778
|
#!/usr/bin/python
from flask import Flask, request
from flask_debugtoolbar import DebugToolbarExtension
from linode import api as l
from cloudflare import CloudFlare as CF
import os
import yaml
config = yaml.load(open('config.yml'))
app = Flask(__name__)
app.debug = config['DYN_DEBUG'] == '1'
if app.debug:
app.config['SECRET_KEY'] = config['DYN_DBG_SECRET']
toolbar = DebugToolbarExtension(app)
@app.route('/update')
def update():
ip = request.remote_addr
print('Received new ip: ' + ip)
status = update_ip(service=config['DYN_SRV_TYPE'],
ip=ip,
root=config['DYN_DOMAIN'],
name=config['DYN_RES'],
rec_type=config['DYN_TYPE'])
if status == -1:
pass
return ip
def update_ip(service, ip, root, name, rec_type):
status = -1
if service == 'linode':
status = linode_update_ip(ip, root, name, rec_type)
elif service == 'cf':
status = cf_update_ip(ip, root, name, rec_type)
return status
def linode_update_ip(ip, root, name, rec_type='cname'):
linode = l.Api(key=config['DYN_LINODE_KEY'])
domain_id = -1
domain_list = linode.domain_list()
for d in domain_list:
if d['DOMAIN'] == root:
domain_id = d['DOMAINID']
status = -1
if domain_id != -1:
# found a domain_id
res = linode.domain_resource_list(domainid=domain_id)
found = False
resource_id = -1
for r in res:
if r['TYPE'] == rec_type and r['NAME'] == name:
found = True
resource_id = r['RESOURCEID']
break
if found and resource_id != -1:
linode.domain_resource_update(domainid=domain_id,
resourceid=resource_id,
target=ip)
status = 0
else:
linode.domain_resource_create(domainid=domain_id,
name=name,
type=rec_type,
target=ip)
status = 0
return status
def cf_update_ip(ip, root, name, rec_type='cname'):
status = -1
service_mode = 1
if rec_type == 'A' or rec_type == 'AAAA':
service_mode = 0
target_name = name + '.' + root
cf = CF(config['DYN_CF_EMAIL'], config['DYN_CF_KEY'])
domain_list = cf.rec_load_all(z=root)['response']['recs']['objs']
create_new = True
for d in domain_list:
if d['name'] == target_name:
cf.rec_edit(z=root, _type=rec_type, _id=d['rec_id'], name=name, content=ip, service_mode=service_mode)
create_new = False
status = 0
break
if create_new:
cf.rec_new(zone=root, _type=rec_type, content=ip, name=target_name, service_mode=service_mode)
status = 0
return status
if __name__ == '__main__':
app.run(host='::', port=5000)
|
ashneo76/dynip
|
app.py
|
Python
|
mit
| 3,009
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import tempfile
import numpy
import h5py
from pyscf import gto, scf, ao2mo
'''
Integral transformation for irregular operators
'''
mol = gto.M(
verbose = 0,
atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'ccpvdz',
)
mf = scf.RHF(mol)
e = mf.scf()
print('E = %.15g, ref -76.0267656731' % e)
#
# Given four MOs, compute the MO-integral gradients
#
gradtmp = tempfile.NamedTemporaryFile()
nocc = mol.nelectron // 2
nvir = len(mf.mo_energy) - nocc
co = mf.mo_coeff[:,:nocc]
cv = mf.mo_coeff[:,nocc:]
# Note the AO integrals cint2e_ip1_sph have 3 components (x,y,z) and only have
# permutation symmetry k>=l.
ao2mo.kernel(mol, (co,cv,co,cv), gradtmp.name, intor='cint2e_ip1_sph',
aosym='s2kl')#, verbose=5)
feri = h5py.File(gradtmp.name, 'r')
grad = feri['eri_mo']
print('gradient integrals (d/dR i j|kl) have shape %s == (3,%dx%d,%dx%d)'
% (str(grad.shape), nocc,nvir,nocc,nvir))
#
# Hessian integrals have 9 components
# 1 d/dX d/dX
# 2 d/dX d/dY
# 3 d/dX d/dZ
# 4 d/dY d/dX
# 5 d/dY d/dY
# 6 d/dY d/dZ
# 7 d/dZ d/dX
# 8 d/dZ d/dY
# 9 d/dZ d/dZ
#
orb = mf.mo_coeff
hesstmp = tempfile.NamedTemporaryFile()
ao2mo.kernel(mol, orb, hesstmp.name, intor='cint2e_ipvip1_sph',
dataname='hessints1', aosym='s4')
with ao2mo.load(hesstmp, 'hessints1') as eri:
print('(d/dR i d/dR j| kl) have shape %s due to the 4-fold permutation '
'symmetry i >= j, k >= l' % str(eri.shape))
ao2mo.kernel(mol, orb, hesstmp.name, intor='cint2e_ipip1_sph',
dataname='hessints2', aosym='s2kl')
feri = h5py.File(hesstmp.name, 'r')
print('(d/dR d/dR i j| kl) have shape %s due to the 2-fold permutation '
'symmetry k >= l' % str(feri['hessints2'].shape))
feri.close()
with ao2mo.load(ao2mo.kernel(mol, orb, hesstmp.name, intor='cint2e_ip1ip2_sph',
aosym='s1')) as eri:
print('(d/dR i j|d/dR k l) have shape %s because there is no permutation '
'symmetry' % str(eri.shape))
|
gkc1000/pyscf
|
examples/ao2mo/20-eri_grad_hess.py
|
Python
|
apache-2.0
| 2,223
|
#!/usr/bin/python
# Copyright (C) Vladimir Prus 2005.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE.txt or copy at
# https://www.bfgroup.xyz/b2/LICENSE.txt)
# Tests that we can build a project when the current directory is outside of
# that project tree, that is 'bjam some_dir' works.
import BoostBuild
# Create a temporary working directory.
t = BoostBuild.Tester(use_test_config=False)
# Create the needed files.
t.write("p1/jamroot.jam", "exe hello : hello.cpp ;")
t.write("p1/hello.cpp", "int main() {}\n")
t.write("p2/jamroot.jam", """\
exe hello2 : hello.cpp ;
exe hello3 : hello.cpp ;
""")
t.write("p2/hello.cpp", "int main() {}\n")
t.run_build_system(["p1", "p2//hello3"])
t.expect_addition("p1/bin/$toolset/debug*/hello.exe")
t.expect_addition("p2/bin/$toolset/debug*/hello3.exe")
t.cleanup()
|
davehorton/drachtio-server
|
deps/boost_1_77_0/tools/build/test/out_of_tree.py
|
Python
|
mit
| 858
|
import cherrypy
import logging
from hqlib.sql.models import Worker
import datetime
from hqmanager import unix_time_millis
class WorkerAPIController(object):
exposed = True
def __init__(self, database):
self.logger = logging.getLogger("hq.manager.api.worker")
self.database = database
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.worker.get")
def GET(self, framework=None, target=None, datacenter=None):
workers = []
with self.database.session() as session:
worker_objects = session.query(Worker).filter(Worker.deleted == False).order_by(Worker.id.asc())
if framework is not None:
worker_objects = worker_objects.filter(Worker.framework == framework)
if target is not None:
worker_objects = worker_objects.filter(Worker.target == target)
if datacenter is not None:
worker_objects = worker_objects.filter(Worker.datacenter == datacenter)
for worker in worker_objects:
data = {'id': worker.id,
'target': worker.target,
'framework': worker.framework,
'datacenter': worker.datacenter,
'tags': worker.tags,
'created_at': unix_time_millis(worker.created_at),
'updated_at': unix_time_millis(worker.updated_at)}
workers.append(data)
return {"workers": workers}
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.worker.delete")
def DELETE(self, id):
# TODO: delete the exchange
with self.database.session() as session:
worker = session.query(Worker).filter(Worker.id == id).filter(Worker.deleted == False).first()
if worker is None:
raise cherrypy.HTTPError(404, "Unknown worker")
worker.deleted = True
worker.deleted_at = datetime.datetime.now()
session.commit()
return {str(worker.id): "deleted"}
|
herqles-io/hq-manager
|
src/hqmanager/api/worker.py
|
Python
|
mit
| 2,108
|
'''
Created on Aug 26, 2014
@author: moloyc
'''
import unittest
from sqlalchemy import exc
from jnpr.openclos.dao import Dao
from jnpr.openclos.model import Pod, Device, Interface, InterfaceLogical, InterfaceDefinition
class TestDao(unittest.TestCase):
def setUp(self):
'''Creates Dao with in-memory DB'''
self.conf = {}
self.conf['dbUrl'] = 'sqlite:///'
def tearDown(self):
pass
def testInvalidConfig(self):
with self.assertRaises(ValueError):
dao = Dao({})
def testCreateObjects(self):
from test_model import createDevice
#self.conf['debugSql'] = True
dao = Dao(self.conf)
session = dao.Session()
device = createDevice(session, "test")
ifd1 = InterfaceDefinition('ifd1', device, 'downlink')
ifd2 = InterfaceDefinition('ifd2', device, 'downlink')
ifd3 = InterfaceDefinition('ifd1', device, 'downlink')
ifd4 = InterfaceDefinition('ifd2', device, 'downlink')
dao.createObjects([ifd1, ifd2, ifd3, ifd4])
self.assertEqual(4, len(dao.getAll(InterfaceDefinition)))
self.assertEqual(2, len(dao.getObjectsByName(InterfaceDefinition, 'ifd1')))
self.assertEqual(2, len(dao.getObjectsByName(InterfaceDefinition, 'ifd2')))
def testDeleteNonExistingPod(self):
dao = Dao(self.conf)
dict = {}
pod = Pod('unknown', **dict)
with self.assertRaises(exc.InvalidRequestError):
dao.deleteObject(pod)
def testCascadeDeletePodDevice(self):
from test_model import createDevice
#self.conf['debugSql'] = True
dao = Dao(self.conf)
session = dao.Session()
device = createDevice(session, "test")
self.assertEqual(1, len(dao.getAll(Pod)))
self.assertEqual(1, len(dao.getAll(Device)))
dao.deleteObject(device.pod)
self.assertEqual(0, len(dao.getAll(Pod)))
self.assertEqual(0, len(dao.getAll(Device)))
def testCascadeDeletePodDeviceInterface(self):
from test_model import createInterface
#self.conf['debugSql'] = True
dao = Dao(self.conf)
session = dao.Session()
interface = createInterface(session, "test")
self.assertEqual(1, len(dao.getAll(Pod)))
self.assertEqual(1, len(dao.getAll(Device)))
self.assertEqual(1, len(dao.getAll(Interface)))
dao.deleteObject(interface.device.pod)
self.assertEqual(0, len(dao.getAll(Pod)))
self.assertEqual(0, len(dao.getAll(Device)))
self.assertEqual(0, len(dao.getAll(Interface)))
|
sysbot/OpenClos
|
jnpr/openclos/tests/unit/test_dao.py
|
Python
|
apache-2.0
| 2,662
|
# -*- coding: utf-8 -*-
#
# CampbellSiegert.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Campbell & Siegert approximation example
----------------------------------------
Example script that applies Campbell's theorem and Siegert's rate
approximation to and integrate-and-fire neuron.
This script calculates the firing rate of an integrate-and-fire neuron
in response to a series of Poisson generators, each specified with a
rate and a synaptic weight. The calculated rate is compared with a
simulation using the iaf_psc_alpha model
Sven Schrader, Nov 2008, Siegert implementation by Tom Tetzlaff
'''
from scipy.special import erf
from scipy.optimize import fmin
import numpy as np
import nest
'''
We first set the parameters of neurons, noise and the simulation.
First settings are with a single Poisson source, second is with two
Poisson sources with half the rate of the single source. Both should
lead to the same results.
'''
weights = [0.1] # (mV) psp amplitudes
rates = [10000.] # (1/s) rate of Poisson sources
# weights = [0.1, 0.1] # (mV) psp amplitudes
# rates = [5000., 5000.] # (1/s) rate of Poisson sources
C_m = 250.0 # (pF) capacitance
E_L = -70.0 # (mV) resting potential
I_e = 0.0 # (nA) external current
V_reset = -70.0 # (mV) reset potential
V_th = -55.0 # (mV) firing threshold
t_ref = 2.0 # (ms) refractory period
tau_m = 10.0 # (ms) membrane time constant
tau_syn_ex = .5 # (ms) excitatory synaptic time constant
tau_syn_in = 2.0 # (ms) inhibitory synaptic time constant
simtime = 20000 # (ms) duration of simulation
n_neurons = 10 # number of simulated neurons
'''
For convenience we define some units.
'''
pF = 1e-12
ms = 1e-3
pA = 1e-12
mV = 1e-3
mu = 0.0
sigma2 = 0.0
J = []
assert(len(weights) == len(rates))
'''
In the following we analytically compute the firing rate of the
neuron based on Campbell's theorem [1] and Siegerts approximation [2].
References:
[1] Papoulis A (1991) **Probability, Random Variables, and
Stochastic Processes**, *McGraw-Hill*
[2] Siegert AJ (1951) **On the first passage time probability problem**,
*Phys Rev 81: 617-623*
'''
for rate, weight in zip(rates, weights):
if weight > 0:
tau_syn = tau_syn_ex
else:
tau_syn = tau_syn_in
t_psp = np.arange(0., 10. * (tau_m * ms + tau_syn * ms), 0.0001)
'''
We define the form of a single PSP, which allows us to match the
maximal value to or chosen weight.
'''
def psp(x):
return - ((C_m * pF) / (tau_syn * ms) * (1 / (C_m * pF)) *
(np.exp(1) / (tau_syn * ms)) *
(((-x * np.exp(-x / (tau_syn * ms))) /
(1 / (tau_syn * ms) - 1 / (tau_m * ms))) +
(np.exp(-x / (tau_m * ms)) - np.exp(-x / (tau_syn * ms))) /
((1 / (tau_syn * ms) - 1 / (tau_m * ms)) ** 2)))
min_result = fmin(psp, [0], full_output=1, disp=0)
'''
We need to calculate the PSC amplitude (i.e., the weight we set in NEST)
from the PSP amplitude, that we have specified above.
'''
fudge = -1. / min_result[1]
J.append(C_m * weight / (tau_syn) * fudge)
'''
We now use Campbell's theorem to calculate mean and variance of
the input due to the Poisson sources. The mean and variance add up
for each Poisson source.
'''
mu += (rate * (J[-1] * pA) * (tau_syn * ms) *
np.exp(1) * (tau_m * ms) / (C_m * pF))
sigma2 += rate * (2 * tau_m * ms + tau_syn * ms) * \
(J[-1] * pA * tau_syn * ms * np.exp(1) * tau_m * ms /
(2 * (C_m * pF) * (tau_m * ms + tau_syn * ms))) ** 2
mu += (E_L * mV)
sigma = np.sqrt(sigma2)
'''
Having calculate mean and variance of the input, we can now employ
Siegert's rate approximation.
'''
num_iterations = 100
upper = (V_th * mV - mu) / sigma / np.sqrt(2)
lower = (E_L * mV - mu) / sigma / np.sqrt(2)
interval = (upper - lower) / num_iterations
tmpsum = 0.0
for cu in range(0, num_iterations + 1):
u = lower + cu * interval
f = np.exp(u ** 2) * (1 + erf(u))
tmpsum += interval * np.sqrt(np.pi) * f
r = 1. / (t_ref * ms + tau_m * ms * tmpsum)
'''
We now simulate neurons receiving Poisson spike trains as input,
and compare the theoretical result to the empirical value.
'''
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
neurondict = {'V_th': V_th, 'tau_m': tau_m, 'tau_syn_ex': tau_syn_ex,
'tau_syn_in': tau_syn_in, 'C_m': C_m, 'E_L': E_L, 't_ref': t_ref,
'V_m': E_L, 'V_reset': E_L}
'''
Neurons and devices are instantiated. We set a high threshold as
we want free membrane potential. In addition we choose a small
resolution for recording the membrane to collect good statistics.
'''
nest.SetDefaults('iaf_psc_alpha', neurondict)
n = nest.Create('iaf_psc_alpha', n_neurons)
n_free = nest.Create('iaf_psc_alpha', 1, [{'V_th': 1e12}])
pg = nest.Create('poisson_generator', len(rates),
[{'rate': float(rate_i)} for rate_i in rates])
vm = nest.Create('voltmeter', 1, [{'interval': .1}])
sd = nest.Create('spike_detector', 1)
'''
We connect devices and neurons and start the simulation.
'''
for i, currentpg in enumerate(pg):
nest.Connect([currentpg], n,
syn_spec={'weight': float(J[i]), 'delay': 0.1})
nest.Connect([currentpg], n_free,
syn_spec={'weight': J[i]})
nest.Connect(vm, n_free)
nest.Connect(n, sd)
nest.Simulate(simtime)
'''
Here we read out the recorded membrane potential. The first 500
steps are omitted so initial transients do not perturb our results.
We then print the results from theory and simulation.
'''
v_free = nest.GetStatus(vm, 'events')[0]['V_m'][500:-1]
print('mean membrane potential (actual / calculated): {0} / {1}'
.format(np.mean(v_free), mu * 1000))
print('variance (actual / calculated): {0} / {1}'
.format(np.var(v_free), sigma2 * 1e6))
print('firing rate (actual / calculated): {0} / {1}'
.format(nest.GetStatus(sd, 'n_events')[0] /
(n_neurons * simtime * ms), r))
|
mdjurfeldt/nest-simulator
|
pynest/examples/CampbellSiegert.py
|
Python
|
gpl-2.0
| 6,677
|
#Exercise 1.7: Find error(s) in a program
#Author: Andreas Solberg Sagen - University of Oslo
|
exTerEX/PrimeOnScientificProgramming
|
Chapter 1/find_errors_sin1.py
|
Python
|
mit
| 94
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import paramiko.ssh_exception as exc
from fuel_health.common.ssh import Client as SSHClient
from fuel_health import exceptions
from fuel_health import nmanager
LOG = logging.getLogger(__name__)
class SanityConfigurationTest(nmanager.SanityChecksTest):
"""TestClass contains tests for default creadentials usage.
Special requirements:
1. A controller's IP address should be specified.
2. A compute's IP address should be specified.
3. SSH user credentials for the controller and the compute
should be specified in the controller_node_ssh_user parameter
"""
@classmethod
def setUpClass(cls):
super(SanityConfigurationTest, cls).setUpClass()
cls.controllers = cls.config.compute.online_controllers
cls.computes = cls.config.compute.online_computes
cls.usr = cls.config.compute.controller_node_ssh_user
cls.pwd = cls.config.compute.controller_node_ssh_password
cls.key = cls.config.compute.path_to_private_key
cls.timeout = cls.config.compute.ssh_timeout
@classmethod
def tearDownClass(cls):
pass
def test_001_check_default_master_node_credential_usage(self):
"""Check usage of default credentials on master node
Target component: Configuration
Scenario:
1. Check user can not ssh on master node with default credentials.
Duration: 20 s.
Available since release: 2014.2-6.1
"""
ssh_client = SSHClient('localhost',
self.usr,
self.pwd,
timeout=self.timeout)
cmd = "date"
output = []
try:
output = ssh_client.exec_command(cmd)
LOG.debug(output)
except exceptions.SSHExecCommandFailed:
self.verify_response_true(len(output) == 0,
'Step 1 failed: Default credentials for '
'ssh on master node were not changed')
except exceptions.TimeoutException:
self.verify_response_true(len(output) == 0,
'Step 1 failed: Default credentials for '
'ssh on master node were not changed')
except exc.SSHException:
self.verify_response_true(len(output) == 0,
'Step 1 failed: Default credentials for '
'ssh on master node were not changed')
self.verify_response_true(len(output) == 0,
'Step 1 failed: Default credentials for '
'ssh on master node were not changed')
def test_002_check_default_openstack_credential_usage(self):
"""Check usage of default credentials for Openstack cluster
Target component: Configuration
Scenario:
1. Check default credentials for Openstack cluster are changed.
Duration: 20 s.
Available since release: 2014.2-6.1
"""
cluster_data = {
'password': self.config.identity.admin_password,
'username': self.config.identity.admin_username,
'tenant': self.config.identity.admin_tenant_name}
for key in cluster_data:
self.verify_response_body_not_equal(
exp_content='admin',
act_content=cluster_data[key],
msg='Default credentials value for {0} is using. '
'We kindly recommend to change all defaults'.format(key),
failed_step='1')
|
mcloudv/fuel-ostf
|
fuel_health/tests/configuration/test_configuration.py
|
Python
|
apache-2.0
| 4,244
|
import bpy
import arm.assets as assets
import arm.utils
import arm.log as log
import arm.make_state as state
import arm.api
callback = None
def add_world_defs():
wrd = bpy.data.worlds['Arm']
rpdat = arm.utils.get_rp()
# Screen-space ray-traced shadows
if rpdat.arm_ssrs:
wrd.world_defs += '_SSRS'
if rpdat.arm_micro_shadowing:
wrd.world_defs += '_MicroShadowing'
if rpdat.arm_two_sided_area_light:
wrd.world_defs += '_TwoSidedAreaLight'
# Store contexts
if rpdat.rp_hdr == False:
wrd.world_defs += '_LDR'
if wrd.arm_light_ies_texture != '':
wrd.world_defs += '_LightIES'
assets.add_embedded_data('iestexture.png')
if wrd.arm_light_clouds_texture != '':
wrd.world_defs += '_LightClouds'
assets.add_embedded_data('cloudstexture.png')
if rpdat.rp_renderer == 'Deferred':
assets.add_khafile_def('arm_deferred')
wrd.world_defs += '_Deferred'
# Shadows
if rpdat.rp_shadows:
wrd.world_defs += '_ShadowMap'
if rpdat.rp_shadowmap_cascades != '1':
wrd.world_defs += '_CSM'
assets.add_khafile_def('arm_csm')
# SS
if rpdat.rp_ssgi == 'RTGI' or rpdat.rp_ssgi == 'RTAO':
if rpdat.rp_ssgi == 'RTGI':
wrd.world_defs += '_RTGI'
if rpdat.arm_ssgi_rays == '9':
wrd.world_defs += '_SSGICone9'
if rpdat.rp_autoexposure:
wrd.world_defs += '_AutoExposure'
has_voxels = arm.utils.voxel_support()
if rpdat.rp_voxelao and has_voxels and rpdat.arm_material_model == 'Full':
wrd.world_defs += '_VoxelCones' + rpdat.arm_voxelgi_cones
if rpdat.arm_voxelgi_revoxelize:
assets.add_khafile_def('arm_voxelgi_revox')
if rpdat.arm_voxelgi_camera:
wrd.world_defs += '_VoxelGICam'
if rpdat.arm_voxelgi_temporal:
assets.add_khafile_def('arm_voxelgi_temporal')
wrd.world_defs += '_VoxelGITemporal'
wrd.world_defs += '_VoxelAOvar' # Write a shader variant
if rpdat.arm_voxelgi_shadows:
wrd.world_defs += '_VoxelShadow'
if rpdat.arm_voxelgi_occ == 0.0:
wrd.world_defs += '_VoxelAONoTrace'
if arm.utils.get_legacy_shaders() or 'ios' in state.target:
wrd.world_defs += '_Legacy'
assets.add_khafile_def('arm_legacy')
# Light defines
point_lights = 0
for bo in bpy.data.objects: # TODO: temp
if bo.type == 'LIGHT':
light = bo.data
if light.type == 'AREA' and '_LTC' not in wrd.world_defs:
point_lights += 1
wrd.world_defs += '_LTC'
assets.add_khafile_def('arm_ltc')
if light.type == 'SUN' and '_Sun' not in wrd.world_defs:
wrd.world_defs += '_Sun'
if light.type == 'POINT' or light.type == 'SPOT':
point_lights += 1
if light.type == 'SPOT' and '_Spot' not in wrd.world_defs:
wrd.world_defs += '_Spot'
assets.add_khafile_def('arm_spot')
if point_lights == 1:
wrd.world_defs += '_SinglePoint'
elif point_lights > 1:
wrd.world_defs += '_Clusters'
assets.add_khafile_def('arm_clusters')
if '_Rad' in wrd.world_defs and '_Brdf' not in wrd.world_defs:
wrd.world_defs += '_Brdf'
def build():
rpdat = arm.utils.get_rp()
if rpdat.rp_driver != 'Armory' and arm.api.drivers[rpdat.rp_driver]['make_rpath'] != None:
arm.api.drivers[rpdat.rp_driver]['make_rpath']()
return
assets_path = arm.utils.get_sdk_path() + '/armory/Assets/'
wrd = bpy.data.worlds['Arm']
add_world_defs()
mobile_mat = rpdat.arm_material_model == 'Mobile' or rpdat.arm_material_model == 'Solid'
if not mobile_mat:
# Always include
assets.add(assets_path + 'brdf.png')
assets.add_embedded_data('brdf.png')
if rpdat.rp_hdr:
assets.add_khafile_def('rp_hdr')
assets.add_khafile_def('rp_renderer={0}'.format(rpdat.rp_renderer))
if rpdat.rp_depthprepass:
assets.add_khafile_def('rp_depthprepass')
if rpdat.rp_shadows:
assets.add_khafile_def('rp_shadowmap')
assets.add_khafile_def('rp_shadowmap_cascade={0}'.format(arm.utils.get_cascade_size(rpdat)))
assets.add_khafile_def('rp_shadowmap_cube={0}'.format(rpdat.rp_shadowmap_cube))
assets.add_khafile_def('rp_background={0}'.format(rpdat.rp_background))
if rpdat.rp_background == 'World':
assets.add_shader_pass('world_pass')
if '_EnvClouds' in wrd.world_defs:
assets.add(assets_path + 'clouds_base.raw')
assets.add_embedded_data('clouds_base.raw')
assets.add(assets_path + 'clouds_detail.raw')
assets.add_embedded_data('clouds_detail.raw')
assets.add(assets_path + 'clouds_map.png')
assets.add_embedded_data('clouds_map.png')
if rpdat.rp_renderer == 'Deferred' and not rpdat.rp_compositornodes:
assets.add_shader_pass('copy_pass')
if rpdat.rp_render_to_texture:
assets.add_khafile_def('rp_render_to_texture')
if rpdat.rp_renderer == 'Forward' and not rpdat.rp_compositornodes:
assets.add_shader_pass('copy_pass')
if rpdat.rp_compositornodes:
assets.add_khafile_def('rp_compositornodes')
compo_depth = False
if rpdat.arm_tonemap != 'Off':
wrd.compo_defs = '_CTone' + rpdat.arm_tonemap
if rpdat.rp_antialiasing == 'FXAA':
wrd.compo_defs += '_CFXAA'
if rpdat.arm_letterbox:
wrd.compo_defs += '_CLetterbox'
if rpdat.arm_grain:
wrd.compo_defs += '_CGrain'
if rpdat.arm_sharpen:
wrd.compo_defs += '_CSharpen'
if bpy.data.scenes[0].view_settings.exposure != 0.0:
wrd.compo_defs += '_CExposure'
if rpdat.arm_fog:
wrd.compo_defs += '_CFog'
compo_depth = True
focus_distance = 0.0 # TODO: deprecated
if len(bpy.data.cameras) > 0:
cam = bpy.data.cameras[0]
if hasattr(cam, 'dof'):
if cam.dof.use_dof:
focus_distance = cam.dof.focus_distance
else:
focus_distance = cam.dof_distance
if len(bpy.data.cameras) > 0 and focus_distance > 0.0:
wrd.compo_defs += '_CDOF'
compo_depth = True
if compo_depth:
wrd.compo_defs += '_CDepth'
assets.add_khafile_def('rp_compositordepth')
if rpdat.arm_lens_texture != '':
wrd.compo_defs += '_CLensTex'
assets.add_embedded_data('lenstexture.jpg')
if rpdat.arm_fisheye:
wrd.compo_defs += '_CFishEye'
if rpdat.arm_vignette:
wrd.compo_defs += '_CVignette'
if rpdat.arm_lensflare:
wrd.compo_defs += '_CGlare'
if rpdat.arm_lut_texture != '':
wrd.compo_defs += '_CLUT'
assets.add_embedded_data('luttexture.jpg')
if '_CDOF' in wrd.compo_defs or '_CFXAA' in wrd.compo_defs or '_CSharpen' in wrd.compo_defs:
wrd.compo_defs += '_CTexStep'
if '_CDOF' in wrd.compo_defs or '_CFog' in wrd.compo_defs or '_CGlare' in wrd.compo_defs:
wrd.compo_defs += '_CCameraProj'
assets.add_shader_pass('compositor_pass')
assets.add_khafile_def('rp_antialiasing={0}'.format(rpdat.rp_antialiasing))
if rpdat.rp_antialiasing == 'SMAA' or rpdat.rp_antialiasing == 'TAA':
assets.add_shader_pass('smaa_edge_detect')
assets.add_shader_pass('smaa_blend_weight')
assets.add_shader_pass('smaa_neighborhood_blend')
assets.add(assets_path + 'smaa_area.png')
assets.add(assets_path + 'smaa_search.png')
assets.add_embedded_data('smaa_area.png')
assets.add_embedded_data('smaa_search.png')
wrd.world_defs += '_SMAA'
if rpdat.rp_antialiasing == 'TAA':
assets.add_shader_pass('taa_pass')
assets.add_shader_pass('copy_pass')
if rpdat.rp_antialiasing == 'TAA' or rpdat.rp_motionblur == 'Object':
assets.add_khafile_def('arm_veloc')
wrd.world_defs += '_Veloc'
if rpdat.rp_antialiasing == 'TAA':
assets.add_khafile_def('arm_taa')
assets.add_khafile_def('rp_supersampling={0}'.format(rpdat.rp_supersampling))
if rpdat.rp_supersampling == '4':
assets.add_shader_pass('supersample_resolve')
assets.add_khafile_def('rp_ssgi={0}'.format(rpdat.rp_ssgi))
if rpdat.rp_ssgi != 'Off':
wrd.world_defs += '_SSAO'
if rpdat.rp_ssgi == 'SSAO':
assets.add_shader_pass('ssao_pass')
assets.add_shader_pass('blur_edge_pass')
else:
assets.add_shader_pass('ssgi_pass')
assets.add_shader_pass('blur_edge_pass')
if rpdat.arm_ssgi_half_res:
assets.add_khafile_def('rp_ssgi_half')
if rpdat.rp_bloom:
assets.add_khafile_def('rp_bloom')
assets.add_shader_pass('bloom_pass')
assets.add_shader_pass('blur_gaus_pass')
if rpdat.rp_ssr:
assets.add_khafile_def('rp_ssr')
assets.add_shader_pass('ssr_pass')
assets.add_shader_pass('blur_adaptive_pass')
if rpdat.arm_ssr_half_res:
assets.add_khafile_def('rp_ssr_half')
if rpdat.rp_overlays:
assets.add_khafile_def('rp_overlays')
if rpdat.rp_translucency:
assets.add_khafile_def('rp_translucency')
assets.add_shader_pass('translucent_resolve')
if rpdat.rp_stereo:
assets.add_khafile_def('rp_stereo')
assets.add_khafile_def('arm_vr')
wrd.world_defs += '_VR'
has_voxels = arm.utils.voxel_support()
if rpdat.rp_voxelao and has_voxels and rpdat.arm_material_model == 'Full':
assets.add_khafile_def('rp_voxelao')
assets.add_khafile_def('rp_voxelgi_resolution={0}'.format(rpdat.rp_voxelgi_resolution))
assets.add_khafile_def('rp_voxelgi_resolution_z={0}'.format(rpdat.rp_voxelgi_resolution_z))
if rpdat.arm_rp_resolution == 'Custom':
assets.add_khafile_def('rp_resolution_filter={0}'.format(rpdat.arm_rp_resolution_filter))
if rpdat.rp_renderer == 'Deferred':
if rpdat.arm_material_model == 'Full':
assets.add_shader_pass('deferred_light')
else: # mobile, solid
assets.add_shader_pass('deferred_light_' + rpdat.arm_material_model.lower())
assets.add_khafile_def('rp_material_' + rpdat.arm_material_model.lower())
if len(bpy.data.lightprobes) > 0:
wrd.world_defs += '_Probes'
assets.add_khafile_def('rp_probes')
assets.add_shader_pass('probe_planar')
assets.add_shader_pass('probe_cubemap')
assets.add_shader_pass('copy_pass')
if rpdat.rp_volumetriclight:
assets.add_khafile_def('rp_volumetriclight')
assets.add_shader_pass('volumetric_light')
assets.add_shader_pass('blur_bilat_pass')
assets.add_shader_pass('blur_bilat_blend_pass')
assets.add(assets_path + 'blue_noise64.png')
assets.add_embedded_data('blue_noise64.png')
if rpdat.rp_decals:
assets.add_khafile_def('rp_decals')
if rpdat.rp_water:
assets.add_khafile_def('rp_water')
assets.add_shader_pass('water_pass')
assets.add_shader_pass('copy_pass')
assets.add(assets_path + 'water_base.png')
assets.add_embedded_data('water_base.png')
assets.add(assets_path + 'water_detail.png')
assets.add_embedded_data('water_detail.png')
assets.add(assets_path + 'water_foam.png')
assets.add_embedded_data('water_foam.png')
if rpdat.rp_blending:
assets.add_khafile_def('rp_blending')
if rpdat.rp_sss:
assets.add_khafile_def('rp_sss')
wrd.world_defs += '_SSS'
assets.add_shader_pass('sss_pass')
if (rpdat.rp_ssr and rpdat.arm_ssr_half_res) or (rpdat.rp_ssgi != 'Off' and rpdat.arm_ssgi_half_res):
assets.add_shader_pass('downsample_depth')
if rpdat.rp_motionblur != 'Off':
assets.add_khafile_def('rp_motionblur={0}'.format(rpdat.rp_motionblur))
assets.add_shader_pass('copy_pass')
if rpdat.rp_motionblur == 'Camera':
assets.add_shader_pass('motion_blur_pass')
else:
assets.add_shader_pass('motion_blur_veloc_pass')
if rpdat.rp_compositornodes and rpdat.rp_autoexposure:
assets.add_khafile_def('rp_autoexposure')
assets.add_shader_pass('histogram_pass')
if rpdat.rp_dynres:
assets.add_khafile_def('rp_dynres')
gbuffer2 = '_Veloc' in wrd.world_defs
if gbuffer2:
assets.add_khafile_def('rp_gbuffer2')
wrd.world_defs += '_gbuffer2'
if callback != None:
callback()
|
luboslenco/cyclesgame
|
blender/arm/make_renderpath.py
|
Python
|
lgpl-3.0
| 13,259
|
from django.contrib import admin
from .models import Contact
# Register your models here.
class ContactModelAdmin(admin.ModelAdmin):
list_display = ('from_name', 'to_name', 'date_added')
admin.site.register(Contact)
|
howinator/GimmeThat
|
src/contact/admin.py
|
Python
|
gpl-3.0
| 224
|
# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
# Use, modification and distribution is subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test all_reduce() collective.
from __future__ import print_function
import mpi
from generators import *
def all_reduce_test(comm, generator, kind, op, op_kind):
if comm.rank == 0:
print ("Reducing to %s of %s..." % (op_kind, kind)),
my_value = generator(comm.rank)
result = mpi.all_reduce(comm, my_value, op)
expected_result = generator(0);
for p in range(1, comm.size):
expected_result = op(expected_result, generator(p))
assert result == expected_result
if comm.rank == 0:
print ("OK.")
return
all_reduce_test(mpi.world, int_generator, "integers", lambda x,y:x + y, "sum")
all_reduce_test(mpi.world, int_generator, "integers", lambda x,y:x * y, "product")
all_reduce_test(mpi.world, string_generator, "strings", lambda x,y:x + y, "concatenation")
all_reduce_test(mpi.world, string_list_generator, "list of strings", lambda x,y:x + y, "concatenation")
|
davehorton/drachtio-server
|
deps/boost_1_77_0/libs/mpi/test/python/all_reduce_test.py
|
Python
|
mit
| 1,175
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-23 16:19
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_user_indeces'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(help_text='Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters', max_length=30, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[\\w.@+-]+$', 32), 'Enter a valid username.', 'invalid')], verbose_name='Username'),
),
]
|
ta2-1/pootle
|
pootle/apps/accounts/migrations/0006_unicode_usernames_are_valid.py
|
Python
|
gpl-3.0
| 735
|
#!/usr/bin/env python
# coding=utf-8
#
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
import httplib
import sys
def test(name,A,B):
if A != B:
print "Error :" + name
print "-----Actual--"
print A,"---Expected--"
print B,"-------------"
sys.exit(1)
else:
print "Ok:"+name
h=httplib.HTTPConnection('localhost:8080');
h.request('GET','/test')
r=h.getresponse()
body=r.read();
ref_body = \
"""\
non loaded<br>
<form action="/test" method="post" >
<p>text <span class="cppcms_form_input"><input type="text" name="_1" ></span></p>
<p>textarea <span class="cppcms_form_input"><textarea name="_2" ></textarea></span></p>
<p>int <span class="cppcms_form_input"><input type="text" name="_3" value="" ></span></p>
<p>double <span class="cppcms_form_input"><input type="text" name="_4" value="" ></span></p>
<p>pass <span class="cppcms_form_input"><input type="password" name="_5" ></span></p>
<p>pass2 <span class="cppcms_form_input"><input type="password" name="_6" ></span></p>
<p>yes or not <span class="cppcms_form_input"><input type="text" name="_7" ></span></p>
<p>E-Mail <span class="cppcms_form_input"><input type="text" name="_8" ></span></p>
<p>Checkbox <span class="cppcms_form_input"><input type="checkbox" name="_9" value="y" ></span></p>
<p>Select Multiple <span class="cppcms_form_input"><select multiple name="_10" >
<option value="0" selected >a</option>
<option value="1" selected >b</option>
<option value="2" >c</option>
<option value="id1" >tr1</option>
</select></span></p>
<p>Select <span class="cppcms_form_input"><select name="_11" >
<option value="0" >a</option>
<option value="1" >b</option>
<option value="2" >c</option>
<option value="id2" selected >tr2</option>
</select></span></p>
<p>Radio <span class="cppcms_form_input"><div class="cppcms_radio" >
<input type="radio" value="0" name="_12" checked > x<br>
<input type="radio" value="1" name="_12" > y<br>
<input type="radio" value="id3" name="_12" > tr3<br>
</div></span></p>
<p>Submit <span class="cppcms_form_input"><input type="submit" name="_13" value="Button" ></span></p>
</form>
"""
test("/test",body,ref_body)
def test_valid(name,params,ans,url='/non_empty'):
h=httplib.HTTPConnection('localhost:8080');
h.request('GET','/test' + url + '?' + params)
r=h.getresponse()
test(name,r.read()[:len(ans)],ans)
test_valid('non_empty1','_1=1&_2=1&_3=1&_4=1&_5=1&_6=1&_7=yes&_8=a@a&_9=10&_10=1&_11=1&_12=1&_13=1','valid')
test_valid('non_empty2','_1=&_2=1&_3=1&_4=1&_5=1&_6=1&_7=yes&_8=a@a&_9=10&_10=1&_11=1&_12=1&_13=1','invalid')
test_valid('non_empty3','_1=1&_2=&_3=1&_4=1&_5=1&_6=1&_7=yes&_8=a@a&_9=10&_10=1&_11=1&_12=1&_13=1','invalid')
test_valid('non_empty4','_1=1&_2=1&_3=&_4=1&_5=1&_6=1&_7=yes&_8=a@a&_9=10&_10=1&_11=1&_12=1&_13=1','invalid')
test_valid('non_empty5','_1=1&_2=1&_3=1&_4=1&_5=&_6=1&_7=yes&_8=a@a&_9=10&_10=1&_11=1&_12=1&_13=1','invalid')
test_valid('non_empty6','_1=1&_2=1&_3=1&_4=1&_5=1&_6=&_7=yes&_8=a@a&_9=10&_10=1&_11=1&_12=1&_13=1','invalid')
test_valid('non_empty7','_1=1&_2=1&_3=1&_4=1&_5=1&_6=1&_7=&_8=a@a&_9=10&_10=1&_11=1&_12=1&_13=1','invalid')
test_valid('non_empty8','_1=1&_2=1&_3=1&_4=1&_5=1&_6=1&_7=yes&_8=&_9=10&_10=1&_11=1&_12=1&_13=1','invalid')
test_valid('non_empty9','_1=1&_2=1&_3=1&_4=1&_5=1&_6=1&_7=yes&_8=a@a&_9=&_10=1&_11=1&_12=1&_13=1','valid') # checkbox ok
test_valid('non_empty10','_1=1&_2=1&_3=1&_4=1&_5=1&_6=1&_7=yes&_8=a@a&_9=10&_10=&_11=1&_12=1&_13=1','invalid')
test_valid('non_empty11','_1=1&_2=1&_3=1&_4=1&_5=1&_6=1&_7=yes&_8=a@a&_9=10&_10=1&_11=&_12=1&_13=1','invalid')
test_valid('non_empty12','_1=1&_2=1&_3=1&_4=1&_5=1&_6=1&_7=yes&_8=a@a&_9=10&_10=1&_11=1&_12=&_13=1','invalid')
test_valid('non_empty12','_1=1&_2=1&_3=1&_4=1&_5=1&_6=1&_7=yes&_8=a@a&_9=10&_10=1&_11=1&_12=1&_13=','valid') # Submit ok
test_valid('empty','_1=&_2=&_3=&_4=&_5=&_6=&_7=yes&_8=a@a&_9=&_10=&_11=&_12=&_13=','valid','') # Empty ok only regex, email fails
test_valid('empty1','_1=&_2=&_3=&_4=&_5=&_6=&_7=yes&_8=&_9=&_10=&_11=&_12=&_13=','invalid','') # Empty ok only regex, email fails
test_valid('empty2','_1=&_2=&_3=&_4=&_5=&_6=&_7=&_8=a@a&_9=&_10=&_11=&_12=&_13=','invalid','') # Empty ok only regex, email fails
h=httplib.HTTPConnection('localhost:8080');
h.request('GET','/test/sub')
r=h.getresponse()
body=r.read();
ref_body = \
"""\
<p>pass <span class="cppcms_form_input"><input type="password" name="_5" ></span></p>
<p>pass2 <span class="cppcms_form_input"><input type="password" name="_6" ></span></p>
<p>yes or not <span class="cppcms_form_input"><input type="text" name="_7" ></span></p>
<p>E-Mail <span class="cppcms_form_input"><input type="text" name="_8" ></span></p>
<p>Checkbox <span class="cppcms_form_input"><input type="checkbox" name="_9" value="y" ></span></p>
<p>Select Multiple <span class="cppcms_form_input"><select multiple name="_10" >
<option value="0" selected >a</option>
<option value="1" selected >b</option>
<option value="2" >c</option>
<option value="id1" >tr1</option>
</select></span></p>
"""
test("subset",body,ref_body)
def test_valid(name,url,params,ans):
def get():
h=httplib.HTTPConnection('localhost:8080');
h.request('GET','/test' + url + '?' + params)
r=h.getresponse()
test(name+' GET',r.read(),ans)
def post():
h=httplib.HTTPConnection('localhost:8080');
headers = {"Content-type": "application/x-www-form-urlencoded"}
h.request('POST','/test' + url,params,headers)
r=h.getresponse()
test(name+' POST',r.read(),ans)
get()
post()
test_valid('text','/text','_1=','invalid\n')
test_valid('text1','/text','_1=x','invalid\nx')
test_valid('text2','/text','_1=xx','valid\nxx')
test_valid('text3','/text','_1=xxxxx','valid\nxxxxx')
test_valid('text4','/text','_1=xxxxxx','invalid\nxxxxxx')
test_valid('text5','/text','_1=%d7%a9%d6%b8%d7%9c%d7%95%d7%9d','valid\nשָלום')
test_valid('text6','/text','_1=%d7%a9%d7%9c','valid\nשל')
test_valid('text7','/text','_1=%FF%FF','invalid\n\xFF\xFF')
test_valid('text8','/text','_1=%01%01','invalid\n\x01\x01')
test_valid('text9.1','/text','_1=xx%DF%7F','invalid\nxx\xDF\x7F')
test_valid('text9.2','/text','_1=xx%C2%7F','invalid\nxx\xC2\x7F')
test_valid('text9.3','/text','_1=xx%e0%7F%80','invalid\nxx\xe0\x7F\x80')
test_valid('text9.4','/text','_1=xx%f0%7F%80%80','invalid\nxx\xf0\x7F\x80\x80')
test_valid('number','/number','_1=','invalid\n')
test_valid('number1','/number','_1=10','valid\n10')
test_valid('number2','/number','_1=10.0','valid\n10')
test_valid('number3','/number','_1=10.0e+','invalid\n')
test_valid('number5','/number','_1=10.0e1','valid\n100')
test_valid('number6','/number','_1=10.0x','invalid\n')
test_valid('number7','/number','_1=A10.0','invalid\n')
test_valid('number8','/number','_1=0','invalid\n0')
test_valid('number9','/number','_1=1000','invalid\n1000')
test_valid('number10','/number','_1=10A','invalid\n')
test_valid('pass1','/pass','_1=&_2=','invalid\n')
test_valid('pass2','/pass','_1=x&_2=x','valid\n')
test_valid('pass3','/pass','_1=x1&_2=x2','invalid\n')
test_valid('checkbox1','/checkbox','_1=n','valid\n0')
test_valid('checkbox2','/checkbox','_1=y','valid\n1')
test_valid('sm1','/sm','foo=bar','invalid\n0 0 0 0 \n\n')
test_valid('sm2','/sm','_1=1&_1=0','valid\n1 1 0 0 \n0 1 \n')
test_valid('sm3','/sm','_1=1&_1=id1','valid\n0 1 0 1 \n1 id1 \n')
test_valid('sm4','/sm','_1=0&_1=1&_1=2','invalid\n1 1 1 0 \n0 1 2 \n')
test_valid('select1','/select','foo=bar','invalid\n-1 ')
test_valid('select2','/select','_1=0','valid\n0 0')
test_valid('select3','/select','_1=0&_1=1','invalid\n-1 ')
test_valid('select4','/select','_1=10','invalid\n-1 ')
test_valid('radio1','/radio','foo=bar','invalid\n-1 ')
test_valid('radio2','/radio','_1=0','valid\n0 0')
test_valid('radio3','/radio','_1=0&_1=1','invalid\n-1 ')
test_valid('radio4','/radio','_1=10','invalid\n-1 ')
test_valid('submit1','/submit','_1=1','valid\n1')
test_valid('submit2','/submit','_2=1','valid\n0')
body='<p><label for="submit_id">message</label> <span class="cppcms_form_error">error</span> <span class="cppcms_form_input"><input type="submit" id="submit_id" name="submit_name" value="test" ></span><span class="cppcms_form_help">help</span></p>\n'
test_valid('submit3','/submitl','',body)
def test_upload(name,url,content,ans):
h=httplib.HTTPConnection('localhost:8080');
headers = {"Content-type": "multipart/form-data; boundary=123456"}
h.request('POST','/test' + url,content,headers)
r=h.getresponse()
test(name,r.read(),ans)
def make_multipart_form_data(content,mime,name='test.txt'):
return \
'--123456\r\n' + \
'Content-Type: ' + mime + '\r\n' + \
'Content-Disposition: form-data; name="file"; filename="' + name +'"\r\n' + \
'\r\n' + \
content + \
'\r\n--123456--\r\n'
test_upload('file 1','/upload',make_multipart_form_data('foo','text/plain'),'valid\n')
test_upload('file 2','/upload',make_multipart_form_data('foob','text/plain'),'valid\n')
test_upload('file 3','/upload',make_multipart_form_data('P3','text/plain'),'valid\n')
test_upload('file 4','/upload',make_multipart_form_data('P3 ' + 'x' * 17,'text/plain'),'valid\n')
test_upload('file 5','/upload_regex',make_multipart_form_data('P3','text/html'),'valid\n')
test_upload('file mime','/upload',make_multipart_form_data('foo','text/html'),'invalid\n')
test_upload('file magic 1','/upload',make_multipart_form_data('fo','text/plain'),'invalid\n')
test_upload('file magic 2','/upload',make_multipart_form_data('P','text/plain'),'invalid\n')
test_upload('file magic 3','/upload',make_multipart_form_data('','text/plain'),'invalid\n')
test_upload('file size','/upload',make_multipart_form_data('P3 ' + 'x' * 18,'text/plain'),'invalid\n')
test_upload('file regex-mime','/upload_regex',make_multipart_form_data('P3','text/xhtml'),'invalid\n')
test_upload('file encoding','/upload',make_multipart_form_data('foo','text/plain','\xFF\xFF.txt'),'invalid\n')
test_upload('file empty','/upload','--123456--\r\n','invalid\n')
|
invicnaper/MWF
|
Utils/t/form_test.py
|
Python
|
gpl-2.0
| 10,150
|
import logging, argparse, os, sys, re
import numpy as np
from collections import OrderedDict
from .utils import getWorkDirs, getEnergy4Key
from ..ccsgp.ccsgp import make_panel, make_plot
from ..ccsgp.utils import getOpts
from ..ccsgp.config import default_colors
from decimal import Decimal
import uncertainties.umath as umath
import uncertainties.unumpy as unp
from fnmatch import fnmatch
def getMeeLabel(s):
if s == 'pi0': return '{/Symbol \160}^0'
if s == 'omega': return '{/Symbol \167}'
if s == 'phi': return '{/Symbol \152}'
if s == 'jpsi': return 'J/{/Symbol \171}'
return s
def splitFileName(fn):
# energy, mee_name, mee_range, data_type
split_arr = fn.split('_')
return (
re.compile('\d+').search(split_arr[0]).group(),
split_arr[1], split_arr[2],
re.compile('(?i)[a-z]+').search(split_arr[0]).group()
)
def getSubplotTitle(mn, mr):
return ' '.join([getMeeLabel(mn), ':', mr, ' GeV/c^{2}'])
def gp_ptspec():
"""example for a 2D-panel plot etc."""
fenergies = ['19', '27', '39', '62', ]# '200']
nen = len(fenergies)
mee_keys = ['pi0', 'LMR', 'omega', 'phi', 'IMR', 'jpsi']
#mee_keys = ['LMR', ]
mee_dict = OrderedDict((k,'') for k in mee_keys)
yscale = { '200': '300', '62': '5000', '39': '50', '27': '0.3', '19': '0.001' }
inDir, outDir = getWorkDirs()
data, data_avpt, dpt_dict = {}, {}, {}
yvals, yvalsPt = [], []
scale = {
'19': 1.3410566491548412, '200': 1.0, '39': 1.2719203877292842,
'27': 1.350873678084769, '62': 1.2664666321635087
}
lmr_label = None
for filename in os.listdir(inDir):
# import data
file_url = os.path.join(inDir, filename)
filebase = os.path.splitext(filename)[0] # unique
energy, mee_name, mee_range, data_type = splitFileName(filebase)
if mee_name == 'LMR':
mee_range_split = map(float, mee_range.split('-'))
lmr_label = 'LMR: %g < M_{ee} < %g GeV/c^{2}' % (
mee_range_split[0], mee_range_split[1]
)
if energy == '200': continue
if mee_name not in mee_keys: continue
mee_dict[mee_name] = mee_range
data[filebase] = np.loadtxt(open(file_url, 'rb'))
if data_type == 'data':
#print data[filebase]
data[filebase] = data[filebase][:-1] # skip mT<0.4 point
if energy == '200': data[filebase][:,(1,3,4)] /= 0.5
# calculate average pT first
mask = (data[filebase][:,0] > 0.4) & (data[filebase][:,0] < 2.2)
avpt_data = data[filebase][mask]
pTs = avpt_data[:,0]
wghts = avpt_data[:,1]
probs = unp.uarray(avpt_data[:,1], avpt_data[:,3]) # dN/pT
probs /= umath.fsum(probs) # probabilities
avpt = umath.fsum(pTs*probs)
logging.info(('%s: {} %g' % (
filebase, np.average(pTs, weights = wghts)
)).format(avpt)) # TODO: syst. uncertainties
# save datapoint for average pT and append to yvalsPt for yaxis range
dp = [ float(getEnergy4Key(energy)), avpt.nominal_value, 0., avpt.std_dev, 0. ]
avpt_key = mee_name
if data_type == 'cocktail': avpt_key += '_c'
if data_type == 'medium': avpt_key += '_m'
if data_type == 'mediumMedOnly': avpt_key += '_mMed'
if data_type == 'mediumQgpOnly': avpt_key += '_mQgp'
if avpt_key in data_avpt: data_avpt[avpt_key].append(dp)
else: data_avpt[avpt_key] = [ dp ]
yvalsPt.append(avpt.nominal_value)
# now adjust data for panel plot and append to yvals
if data_type != 'data':
data[filebase][:,(1,3,4)] /= scale[energy]
data[filebase][:,(1,3,4)] *= float(yscale[energy])
if data_type == 'cocktail' or fnmatch(data_type, '*medium*'):
data[filebase][:,2:] = 0.
yvals += [v for v in data[filebase][:,1] if v > 0]
# prepare dict for panel plot
dpt_dict_key = getSubplotTitle(mee_name, mee_range)
if dpt_dict_key not in dpt_dict:
ndsets = nen*2
# TODO: currently only 19/39/62 medium avail. w/ med/qgp/tot for each
# July14: all energies available; TODO: fix dsidx
if mee_name == 'LMR': ndsets += 4*3
dpt_dict[dpt_dict_key] = [ [None]*ndsets, [None]*ndsets, [None]*ndsets ]
enidx = fenergies.index(energy)
dsidx = enidx
if fnmatch(data_type, '*medium*'):
# 19: 0-2, 27: 3-5, 39: 6-8, 62: 9-11
dsidx = (energy=='19')*0 + (energy=='27')*3 + (energy=='39')*6 + (energy=='62')*9
dsidx += (data_type=='mediumQgpOnly')*0 + (data_type=='mediumMedOnly')*1
dsidx += (data_type=='medium')*2
else:
dsidx += int(mee_name == 'LMR') * 4 * 3 # number of medium calc avail.
dsidx += int(data_type == 'data') * len(fenergies)
dpt_dict[dpt_dict_key][0][dsidx] = data[filebase] # data
if data_type == 'data': # properties
dpt_dict[dpt_dict_key][1][dsidx] = 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[enidx]
elif data_type == 'medium':
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt 1 lw 5 lc %s' % default_colors[enidx]
else:
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt %d lw 5 lc %s' % (
2+(data_type=='mediumMedOnly')+(data_type=='mediumQgpOnly')*2, default_colors[enidx]
)
dpt_dict[dpt_dict_key][2][dsidx] = ' '.join([ # legend titles
getEnergy4Key(energy), 'GeV', '{/Symbol \264} %g' % (
Decimal(yscale[energy])#.as_tuple().exponent
)
]) if data_type == 'data' else ''
# use mass range in dict key to sort dpt_dict with increasing mass
plot_key_order = dpt_dict.keys()
plot_key_order.sort(key=lambda x: float(x.split(':')[1].split('-')[0]))
# sort data_avpt by energy and apply x-shift for better visibility
for k in data_avpt: data_avpt[k].sort(key=lambda x: x[0])
energies = [ dp[0] for dp in data_avpt[mee_keys[0]] ]
energies.append(215.) # TODO: think of better upper limit
linsp = {}
for start,stop in zip(energies[:-1],energies[1:]):
linsp[start] = np.linspace(start, stop, num = 4*len(mee_keys))
for k in data_avpt:
key = k.split('_')[0]
for i in xrange(len(data_avpt[k])):
data_avpt[k][i][0] = linsp[energies[i]][mee_keys.index(key)]
# make panel plot
yMin, yMax = 0.5*min(yvals), 3*max(yvals)
make_panel(
dpt_dict = OrderedDict((k,dpt_dict[k]) for k in plot_key_order),
name = os.path.join(outDir, 'ptspec'),
ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)',
xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)',
ylog = True, xr = [0, 2.2], yr = [1e-9, 1e4],
#lmargin = 0.12, bmargin = 0.10, tmargin = 1., rmargin = 1.,
key = ['bottom left', 'samplen 0.5', 'width -2', 'opaque'],
arrow_bar = 0.002, layout = '3x2', size = '8in,8in'
)
#make plot for LMR spectra only
#lmr_key = getSubplotTitle('LMR', '0.4-0.76')
#if energy == '200':
# lmr_key = getSubplotTitle('LMR', '0.3-0.76')
#pseudo_point = np.array([[-1,0,0,0,0]])
#model_titles = ['Cocktail + Model', 'Cocktail', 'in-Medium', 'QGP']
#model_props = [
# 'with lines lc %s lw 5 lt %d' % (default_colors[-2], i+1)
# for i in xrange(len(model_titles))
#]
#make_plot(
# data = dpt_dict[lmr_key][0] + [ pseudo_point ] * len(model_titles),
# properties = dpt_dict[lmr_key][1] + model_props,
# titles = dpt_dict[lmr_key][2] + model_titles,
# name = os.path.join(outDir, 'ptspecLMR'),
# ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)',
# xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)',
# ylog = True, xr = [0, 2.0], yr = [1e-8, 100],
# lmargin = 0.15, bmargin = 0.08, rmargin = 0.98, tmargin = 0.84,
# key = ['maxrows 4', 'samplen 0.7', 'width -2', 'at graph 1.,1.2'],
# arrow_bar = 0.005, size = '10in,13in',
# labels = {
# 'stat. errors only': [0.7,0.95,False], lmr_label: [0.05,0.03,False],
# 'STAR Preliminary': [0.05,0.07,False],
# }
#)
# make mean pt plot
#yMinPt, yMaxPt = 0.95*min(yvalsPt), 1.05*max(yvalsPt)
#make_plot(
# data = [ # cocktail
# np.array(data_avpt[k+'_c']) for k in mee_keys
# ] + [ # medium
# np.array(data_avpt['LMR_m'])
# ] + [ # data
# np.array(data_avpt[k]) for k in mee_keys
# ],
# properties = [
# 'with lines lt 1 lw 4 lc %s' % default_colors[i if i < 5 else i+1]
# for i in xrange(len(mee_keys))
# ] + [
# 'with lines lt 2 lw 4 lc %s' % default_colors[mee_keys.index('LMR')]
# ] + [
# 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[i if i < 5 else i+1]
# for i in xrange(len(mee_keys))
# ],
# titles = [ getMeeLabel(k) for k in mee_keys ] + ['']*(len(mee_keys)+1),
# name = os.path.join(outDir, 'meanPt'),
# xlabel = '{/Symbol \326}s_{NN} (GeV)',
# ylabel = '{/Symbol \341}p_{T}{/Symbol \361} in STAR Acceptance (GeV/c)',
# xlog = True, xr = [17,220], yr = [yMinPt, yMaxPt], size = '11in,9in',
# key = [ 'maxrows 1', 'at graph 1, 1.1' ],
# lmargin = 0.11, bmargin = 0.11, tmargin = 1., rmargin = 1.,
# gpcalls = [
# 'format x "%g"',
# 'xtics (20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
# ]
#)
## make mean pt plot for LMR only
#make_plot(
# data = [
# np.array(data_avpt['LMR_c']),
# np.array(data_avpt['LMR_m']),
# np.array(data_avpt['LMR'])
# ],
# properties = [
# 'with lines lt 2 lw 4 lc %s' % default_colors[0],
# 'with lines lt 1 lw 4 lc %s' % default_colors[0],
# 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[0]
# ],
# titles = [
# 'cocktail', 'HMBT', getMeeLabel('data')
# ],
# name = os.path.join(outDir, 'meanPtLMR'),
# xlabel = '{/Symbol \326}s_{NN} (GeV)',
# ylabel = 'LMR {/Symbol \341}p_{T}{/Symbol \361} in STAR Acceptance (GeV/c)',
# lmargin = 0.17, bmargin = 0.15, tmargin = 0.95, xlog = True, xr = [17,80],
# yr = [0.65,1.05], #yr = [yMinPt, yMaxPt],
# key = [ 'bottom right' ],
# gpcalls = [
# 'format x "%g"',
# 'xtics (20, 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
# ],
# labels = {
# 'stat. errors only': [0.7,0.95,False], lmr_label: [0.05,0.07,False],
# '0.4 < p_{T} < 2.2 GeV/c': [0.05,0.14,False]
# }
#)
return 'done'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
print gp_ptspec()
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_ptspec.py
|
Python
|
mit
| 10,339
|
#!/usr/bin/env python2
# This is a component of AXIS, a front-end for linuxcnc
# Copyright 2004, 2005, 2006 Jeff Epler <jepler@unpythonic.net>
# and Chris Radek <chris@timeguy.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os
import linuxcnc, time
import rs274.options
import gettext
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
gettext.install("linuxcnc", localedir=os.path.join(BASE, "share", "locale"), unicode=True)
if len(sys.argv) > 1 and sys.argv[1] == '-ini':
ini = linuxcnc.ini(sys.argv[2])
linuxcnc.nmlfile = ini.find("EMC", "NML_FILE") or linuxcnc.nmlfile
del sys.argv[1:3]
s = linuxcnc.stat(); s.poll()
def show_mcodes(l):
return " ".join(["M%g" % i for i in l[1:] if i != -1])
def show_gcodes(l):
return " ".join(["G%g" % (i/10.) for i in l[1:] if i != -1])
def show_position(p):
return " ".join(["%-8.4f" % n for i, n in enumerate(p) if s.axis_mask & (1<<i)])
joint_position = " ".join(["%-8.4f"] * s.joints)
def show_joint_position(p):
return joint_position % p[:s.joints]
perjoint = " ".join(["%s"] * s.joints)
def show_perjoint(p):
return perjoint % p[:s.joints]
def show_float(p): return "%-8.4f" % p
maps = {
'exec_state': {linuxcnc.EXEC_ERROR: 'error',
linuxcnc.EXEC_DONE: 'done',
linuxcnc.EXEC_WAITING_FOR_MOTION: 'motion',
linuxcnc.EXEC_WAITING_FOR_MOTION_QUEUE: 'motion queue',
linuxcnc.EXEC_WAITING_FOR_IO: 'io',
linuxcnc.EXEC_WAITING_FOR_MOTION_AND_IO: 'motion and io',
linuxcnc.EXEC_WAITING_FOR_DELAY: 'delay',
linuxcnc.EXEC_WAITING_FOR_SYSTEM_CMD: 'system command'},
'motion_mode':{linuxcnc.TRAJ_MODE_FREE: 'free', linuxcnc.TRAJ_MODE_COORD: 'coord',
linuxcnc.TRAJ_MODE_TELEOP: 'teleop'},
'interp_state':{linuxcnc.INTERP_IDLE: 'idle', linuxcnc.INTERP_PAUSED: 'paused',
linuxcnc.INTERP_READING: 'reading', linuxcnc.INTERP_WAITING: 'waiting'},
'task_state': {linuxcnc.STATE_ESTOP: 'estop', linuxcnc.STATE_ESTOP_RESET: 'estop reset',
linuxcnc.STATE_ON: 'on', linuxcnc.STATE_OFF: 'off'},
'task_mode': {linuxcnc.MODE_AUTO: 'auto', linuxcnc.MODE_MDI: 'mdi',
linuxcnc.MODE_MANUAL: 'manual'},
'state': {1: 'rcs_done', 2: 'rcs_exec', 3: 'rcs_error'},
'motion_type': {0: 'none', 1: 'traverse', 2: 'feed', 3: 'arc', 4: 'toolchange', 5: 'probing'},
'program_units': {1: 'inch', 2: 'mm'},
'kinematics_type': {linuxcnc.KINEMATICS_IDENTITY: 'identity', linuxcnc.KINEMATICS_FORWARD_ONLY: 'forward_only',
linuxcnc.KINEMATICS_INVERSE_ONLY: 'inverse_only', linuxcnc.KINEMATICS_BOTH: 'both'},
'mcodes': show_mcodes, 'gcodes': show_gcodes, 'poll': None, 'tool_table': None,
'axis': None, 'joint': None, 'gettaskfile': None,
'actual_position': show_position,
'position': show_position,
'dtg': show_position,
'origin': show_position,
'rotation_xy': show_float,
'probed_position': show_position,
'tool_offset': show_position,
'g5x_offset': show_position,
'g92_offset': show_position,
'linear_units': show_float,
'max_acceleration': show_float,
'max_velocity': show_float,
'angular_units': show_float,
'distance_to_go': show_float,
'current_vel': show_float,
'limit': show_perjoint,
'homed': show_perjoint,
'joint_position': show_joint_position,
'joint_actual_position': show_joint_position,
}
if s.kinematics_type == 1:
maps['joint_position'] = None
maps['joint_actual_position'] = None
def gui():
import Tkinter
from _tkinter import TclError
root = Tkinter.Tk(className="LinuxCNCTop")
rs274.options.install(root)
root.title(_("LinuxCNC Status"))
t = Tkinter.Text()
sb = Tkinter.Scrollbar(command=t.yview)
t.configure(yscrollcommand=sb.set)
t.configure(tabs="150")
base_font = t.tk.call("set", "BASE_FONT")
fixed_font = t.tk.call("set", "FIXED_FONT")
t.tag_configure("key", foreground="blue", font=base_font)
t.tag_configure("value", foreground="black", font=fixed_font)
t.tag_configure("changedvalue", foreground="black", background="red", font=fixed_font)
t.tag_configure("sel", foreground="white")
t.tag_raise("sel")
t.bind("<KeyPress>", "break")
b = Tkinter.Button(text=_("Copy All"),
command="%s tag add sel 0.0 end; tk_textCopy %s" % (t, t))
b.pack(side="bottom", anchor="sw")
t.pack(side="left", expand=1, fill="both")
sb.pack(side="left", expand=0, fill="y")
changetime = {}
oldvalues = {}
def timer():
try:
s.poll()
except linuxcnc.error:
root.destroy()
pos = t.yview()[0]
selection = t.tag_ranges("sel")
insert_point = t.index("insert")
insert_gravity = t.mark_gravity("insert")
try:
anchor_point = t.index("anchor")
anchor_gravity = t.mark_gravity("anchor")
except TclError:
anchor_point = None
t.delete("0.0", "end")
first = True
for k in dir(s):
if k.startswith("_"): continue
if maps.has_key(k) and maps[k] == None: continue
v = getattr(s, k)
if maps.has_key(k):
m = maps[k]
if callable(m):
v = m(v)
else:
v = m.get(v, v)
if oldvalues.has_key(k):
changed = oldvalues[k] != v
if changed: changetime[k] = time.time() + 2
oldvalues[k] = v
if changetime.has_key(k) and changetime[k] >= time.time():
vtag = "changedvalue"
else:
vtag = "value"
if first: first = False
else: t.insert("end", "\n")
t.insert("end", k, "key", "\t")
t.insert("end", v, vtag)
t.yview_moveto(pos)
if selection:
t.tag_add("sel", *selection)
t.mark_set("insert", insert_point)
t.mark_gravity("insert", insert_gravity)
if anchor_point is not None:
t.mark_set("anchor", anchor_point)
t.mark_gravity("anchor", anchor_gravity)
t.after(100, timer)
timer()
t.mainloop()
def text():
s.poll()
for k in dir(s):
if k.startswith("_"): continue
if maps.has_key(k) and maps[k] == None: continue
v = getattr(s, k)
if maps.has_key(k):
m = maps[k]
if callable(m):
v = m(v)
else:
v = m.get(v, v)
print "%-20s %-.58s" % (k, v)
if len(sys.argv) > 1 and sys.argv[1] == '-t':
text()
else:
gui()
# vim:sw=4:sts=4:et
|
araisrobo/linuxcnc
|
src/emc/usr_intf/axis/scripts/linuxcnctop.py
|
Python
|
lgpl-2.1
| 7,389
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView, ListView
from accounts.views import DashTemplateView
import accounts.views
urlpatterns = [
url(r'^$', login_required(DashTemplateView.as_view(template_name="accounts/dashboard.html")), name="dashboard"),
url(r'^logout/$', 'accounts.views.logout_view', name='auth_logout'),
url(r'^login/$', 'accounts.views.login_view', name='auth_login'),
url(r'^password_reset/', include('password_reset.urls')),
url(r'^signup/$', 'accounts.views.signup', name='signup'),
]
|
davogler/POSTv3
|
accounts/urls.py
|
Python
|
mit
| 676
|
#!/usr/bin/env python
import argparse
import logging
import sys
from Bio import SeqIO, Alphabet
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio.SeqFeature import SeqFeature, FeatureLocation
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
def fiveColToGbk(tabIn, seqIn):
seqList = list(SeqIO.parse(seqIn, "fasta"))
seqOut = None
startLoc = 0
endLoc = -1
featList = []
featType = ""
recOut = []
for line in tabIn:
if line[0] == ">":
if featType:
#if len(seqList) == 1
# seqOut = seqList[0].seq
#else:
for x in seqList:
if x.id == recID:
seqOut = x.seq
seqOut.alphabet = IUPAC.IUPACUnambiguousDNA()
if not seqOut:
log.error("Unable to find associated sequence for 5-column record " + recID + ", unable to construct Genbank")
recOut.append(SeqRecord(seqOut, id=recID, features=sorted(featList, key=lambda x: x.location.start)))
featType = ""
featList = []
seqOut = None
recID = line[9:].strip()
continue
fields = line.split("\t")
if len(fields) > 2 and fields[2]:
if featType:
featList.append(SeqFeature(featLoc, featType, qualifiers=featQuals))
featType = fields[2].strip()
if int(fields[0]) > int(fields[1]):
featStrand = -1
else:
featStrand = 1
featLoc = FeatureLocation(min(int(fields[0]), int(fields[1])) -1 , max(int(fields[0]), int(fields[1])), strand=featStrand)
featQuals = {}
elif fields[0].strip():
if int(fields[0]) > int(fields[1]):
featStrand = -1
else:
featStrand = 1
if featStrand == -1:
featLoc = FeatureLocation(min(int(fields[0]), int(fields[1])) - 1, max(int(fields[0]), int(fields[1])), strand=featStrand) + featLoc
else:
featLoc = featLoc + FeatureLocation(min(int(fields[0]), int(fields[1])) - 1, max(int(fields[0]), int(fields[1])), strand=featStrand)
elif fields[3].strip():
if fields[3].strip() in featQuals.keys():
featQuals[fields[3].strip()].append(fields[4].strip())
else:
featQuals[fields[3].strip()] = [fields[4].strip()]
# else blank line
if len(seqList) == 1:
seqOut = seqList[0].seq
seqOut.alphabet = IUPAC.IUPACUnambiguousDNA()
else:
for x in seqList:
if x.id == recID:
seqOut = x.seq
seqOut.alphabet = IUPAC.IUPACUnambiguousDNA()
if not seqOut:
log.error("Unable to find associated sequence for 5-column record " + recID + ", unable to construct Genbank")
recOut.append(SeqRecord(seqOut, id=recID, features=sorted(featList, key=lambda x: x.location.start)))
return recOut
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert a Genbank file into five column format"
)
parser.add_argument("tabIn", type=argparse.FileType("r"), help="Five Column tabular input")
parser.add_argument("seqIn", type=argparse.FileType("r"), help="Associated Sequence(s) (Fasta input)")
args = vars(parser.parse_args())
for rec in fiveColToGbk(**args):
SeqIO.write(rec, sys.stdout, "genbank")
|
TAMU-CPT/galaxy-tools
|
tools/gbk/five_col_to_gbk.py
|
Python
|
gpl-3.0
| 3,243
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-05-21 16:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0050_merge'),
]
operations = [
migrations.AlterModelOptions(
name='problem',
options={'ordering': ['-id'], 'permissions': (('retest_problem', 'Can start a retest'), ('change_visibility_of_problem', 'Can change the visibility of a problem'), ('see_hidden_problems', 'Can see hidden problems'), ('add_media_to_problem', 'Can upload media for a problem'), ('add_checker_to_problem', 'Can add a checker for a problem'), ('add_grader_to_problem', 'Can add a custom grader for a problem'), ('import_problem', 'Can import problems'))},
),
migrations.AddField(
model_name='problem',
name='custom_grader',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='problem',
name='grader_header',
field=models.TextField(blank=True, verbose_name='Grader header code'),
),
migrations.AddField(
model_name='problem',
name='grader_header_file_name',
field=models.CharField(blank=True, max_length=32, verbose_name='Filename for the grader header file'),
),
migrations.AddField(
model_name='problem',
name='grader_source',
field=models.TextField(blank=True, verbose_name='Grader source code'),
),
]
|
Alaxe/judgeSystem
|
judge/migrations/0051_auto_20160521_1932.py
|
Python
|
gpl-2.0
| 1,608
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: feature-scale-param.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='feature-scale-param.proto',
package='com.webank.ai.fate.core.mlmodel.buffer',
syntax='proto3',
serialized_options=_b('B\017ScaleParamProto'),
serialized_pb=_b('\n\x19\x66\x65\x61ture-scale-param.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"\xec\x01\n\nScaleParam\x12^\n\x0f\x63ol_scale_param\x18\x01 \x03(\x0b\x32\x45.com.webank.ai.fate.core.mlmodel.buffer.ScaleParam.ColScaleParamEntry\x12\x0e\n\x06header\x18\x02 \x03(\t\x1an\n\x12\x43olScaleParamEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12G\n\x05value\x18\x02 \x01(\x0b\x32\x38.com.webank.ai.fate.core.mlmodel.buffer.ColumnScaleParam:\x02\x38\x01\"Y\n\x10\x43olumnScaleParam\x12\x14\n\x0c\x63olumn_upper\x18\x03 \x01(\x01\x12\x14\n\x0c\x63olumn_lower\x18\x04 \x01(\x01\x12\x0c\n\x04mean\x18\x05 \x01(\x01\x12\x0b\n\x03std\x18\x06 \x01(\x01\x42\x11\x42\x0fScaleParamProtob\x06proto3')
)
_SCALEPARAM_COLSCALEPARAMENTRY = _descriptor.Descriptor(
name='ColScaleParamEntry',
full_name='com.webank.ai.fate.core.mlmodel.buffer.ScaleParam.ColScaleParamEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='com.webank.ai.fate.core.mlmodel.buffer.ScaleParam.ColScaleParamEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='com.webank.ai.fate.core.mlmodel.buffer.ScaleParam.ColScaleParamEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=196,
serialized_end=306,
)
_SCALEPARAM = _descriptor.Descriptor(
name='ScaleParam',
full_name='com.webank.ai.fate.core.mlmodel.buffer.ScaleParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='col_scale_param', full_name='com.webank.ai.fate.core.mlmodel.buffer.ScaleParam.col_scale_param', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='header', full_name='com.webank.ai.fate.core.mlmodel.buffer.ScaleParam.header', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SCALEPARAM_COLSCALEPARAMENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=306,
)
_COLUMNSCALEPARAM = _descriptor.Descriptor(
name='ColumnScaleParam',
full_name='com.webank.ai.fate.core.mlmodel.buffer.ColumnScaleParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='column_upper', full_name='com.webank.ai.fate.core.mlmodel.buffer.ColumnScaleParam.column_upper', index=0,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='column_lower', full_name='com.webank.ai.fate.core.mlmodel.buffer.ColumnScaleParam.column_lower', index=1,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean', full_name='com.webank.ai.fate.core.mlmodel.buffer.ColumnScaleParam.mean', index=2,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='std', full_name='com.webank.ai.fate.core.mlmodel.buffer.ColumnScaleParam.std', index=3,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=308,
serialized_end=397,
)
_SCALEPARAM_COLSCALEPARAMENTRY.fields_by_name['value'].message_type = _COLUMNSCALEPARAM
_SCALEPARAM_COLSCALEPARAMENTRY.containing_type = _SCALEPARAM
_SCALEPARAM.fields_by_name['col_scale_param'].message_type = _SCALEPARAM_COLSCALEPARAMENTRY
DESCRIPTOR.message_types_by_name['ScaleParam'] = _SCALEPARAM
DESCRIPTOR.message_types_by_name['ColumnScaleParam'] = _COLUMNSCALEPARAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ScaleParam = _reflection.GeneratedProtocolMessageType('ScaleParam', (_message.Message,), dict(
ColScaleParamEntry = _reflection.GeneratedProtocolMessageType('ColScaleParamEntry', (_message.Message,), dict(
DESCRIPTOR = _SCALEPARAM_COLSCALEPARAMENTRY,
__module__ = 'feature_scale_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.ScaleParam.ColScaleParamEntry)
))
,
DESCRIPTOR = _SCALEPARAM,
__module__ = 'feature_scale_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.ScaleParam)
))
_sym_db.RegisterMessage(ScaleParam)
_sym_db.RegisterMessage(ScaleParam.ColScaleParamEntry)
ColumnScaleParam = _reflection.GeneratedProtocolMessageType('ColumnScaleParam', (_message.Message,), dict(
DESCRIPTOR = _COLUMNSCALEPARAM,
__module__ = 'feature_scale_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.ColumnScaleParam)
))
_sym_db.RegisterMessage(ColumnScaleParam)
DESCRIPTOR._options = None
_SCALEPARAM_COLSCALEPARAMENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
FederatedAI/FATE
|
python/federatedml/protobuf/generated/feature_scale_param_pb2.py
|
Python
|
apache-2.0
| 7,513
|
def b36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Converts an integer to a base36 string.
http://en.wikipedia.org/wiki/Base_36#Python_implementation
"""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36.lower()
def b36decode(number):
return int(number.upper(), 36)
|
okffi/decisions
|
web/decisions/ahjo/utils.py
|
Python
|
bsd-3-clause
| 664
|
'''Batch index URLs to tracker items.'''
import argparse
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('file')
args = arg_parser.parse_args()
batch = set()
category = True
with open(args.file) as f:
for line in f:
name = line.strip().replace('http://voices.yahoo.com/', '').replace('.html', '')
if not name:
continue
assert ',' not in name
batch.add(name)
if len(batch) >= 10 or ('?cat' not in name and category):
if category:
print('cat:' + ','.join(sorted(batch)))
else:
print('index:' + ','.join(sorted(batch)))
batch.clear()
if '?cat' not in name and category:
category = False
if batch:
print('index:' + ','.join(sorted(batch)))
if __name__ == '__main__':
main()
|
ArchiveTeam/yahoo-voices-grab
|
utils/index2items.py
|
Python
|
unlicense
| 958
|
from biicode.client.exception import ClientException
from biicode.client.store.sqlite import SQLiteDB, encode_serialized_value, decode_serialized_value
from biicode.common.model.content import ContentDeserializer
from biicode.common.model.cells import CellDeserializer
from biicode.common.utils.bii_logging import logger
from biicode.common.model.id import ID
from biicode.common.model.symbolic.reference import Reference, ReferencedResources
from biicode.common.model.resource import Resource
from biicode.common.model.symbolic.block_version_table import BlockVersionTable
from biicode.client.store.blob_sqlite import BlobSQLite
from biicode.common.model.brl.cell_name import CellName
from biicode.common.utils.serializer import ListDeserializer
from biicode.common.model.block_delta import BlockDelta
import traceback
PUBLISHED_CELLS = "cells" # ID => PublishedCell
PUBLISHED_CONTENTS = "contents" # ID => PublishedContent
SNAPSHOTS = 'snapshots'
# There are more than one reference to same ID, we store ID and the
# cell/content in the previous tables
# (for not repeat contents!)
PUBLISHED_REFERENCES = "refs" # Reference => Cell ID, Content ID
DEP_TABLES = "dep_tables"
DELTAS = "deltas"
class LocalDB(BlobSQLite):
def __init__(self, dbfile):
super(LocalDB, self).__init__(dbfile)
self.connect()
self.init()
def connect(self):
SQLiteDB.connect(self)
statement = None
try:
statement = self.connection.cursor()
except Exception as e:
raise ClientException(e)
finally:
if statement:
statement.close()
def init(self):
SQLiteDB.init(self)
cursor = None
try:
cursor = self.connection.cursor()
self.create_table(cursor, PUBLISHED_CONTENTS)
self.create_table(cursor, PUBLISHED_CELLS)
self.create_table(cursor, SNAPSHOTS)
self.create_table(cursor, DEP_TABLES)
self.create_table(cursor, DELTAS)
# To avoid multiple usernames in the login table, use always "login" as id
cursor.execute("create table if not exists login (id TEXT UNIQUE, "
"username TEXT UNIQUE, token TEXT)")
cursor.execute("create table if not exists %s "
"(reference TEXT UNIQUE, cell_id TEXT, content_id TEXT)"
% PUBLISHED_REFERENCES)
cursor.execute("CREATE INDEX if not exists cell_id_index ON %s (cell_id)"
% (PUBLISHED_REFERENCES))
cursor.execute("CREATE INDEX if not exists content_id_index ON %s (content_id)"
% (PUBLISHED_REFERENCES))
except Exception as e:
message = "Could not initalize local cache"
raise ClientException(message, e)
finally:
if cursor:
cursor.close()
def get_login(self):
'''Returns login credentials.
This method is also in charge of expiring them.
'''
try:
statement = self.connection.cursor()
statement.execute('select * from login where id="login"')
rs = statement.fetchone()
if not rs:
return None, None
name = rs[1]
token = rs[2]
return name, token
except Exception:
raise ClientException("Could not retrieve login from local cache\n"
"Try removing the .biicode folder in your home folder")
def get_username(self):
return self.get_login()[0]
def set_login(self, login):
"""Login is a tuple of (login, token)"""
try:
statement = self.connection.cursor()
statement.execute("INSERT OR REPLACE INTO login (id, username, token) "
"VALUES (?, ?, ?)",
("login", login[0], login[1]))
self.connection.commit()
except Exception as e:
raise ClientException("Could not store credentials in local cache", e)
def get_dep_table(self, block_version):
ID = encode_serialized_value(block_version.serialize())
return self.read(ID, DEP_TABLES, BlockVersionTable)
def set_dep_table(self, block_version, dep_table):
assert isinstance(dep_table, BlockVersionTable)
ID = encode_serialized_value(block_version.serialize())
self.create(ID, dep_table, DEP_TABLES)
def get_cells_snapshot(self, block_version):
ID = encode_serialized_value(block_version.serialize())
return self.read(ID, SNAPSHOTS, ListDeserializer(CellName))
def create_cells_snapshot(self, block_version, snapshot):
"""Snapshot is a list with cell names for an specific block version ''' """
ID = encode_serialized_value(block_version.serialize())
self.create(ID, snapshot, SNAPSHOTS)
def get_delta_info(self, block_version):
ID = encode_serialized_value(block_version.serialize())
return self.read(ID, DELTAS, BlockDelta)
def upsert_delta_info(self, block_version, delta_info):
"""Snapshot is a list with cell names for an specific block version ''' """
# Don't store origin info in localdb (url field crashes because of ://)
delta_info.origin = None
ID = encode_serialized_value(block_version.serialize())
self.upsert(ID, delta_info, DELTAS)
def remove_dev_references(self, block_version):
ser_version = encode_serialized_value(block_version.serialize())
self.delete(ser_version, DEP_TABLES)
self.delete(ser_version, SNAPSHOTS)
self.delete(ser_version, DELTAS)
c = self.connection.cursor()
command = 'DELETE from {table} where reference LIKE (?);'.format(table=PUBLISHED_REFERENCES)
c.execute(command, ("%{}%".format(ser_version),))
self.connection.commit()
# TODO: What happens to cells & contents? Not deleted?
def get_published_resources(self, references):
'''
Parameters:
references: a References (biicode.common.model.symbolic.reference.References) object
'''
simple_refs = references.explode() # references object to reference list
#each reference has to be stored as json string, cause it is a tuple
ids = ",".join(["\"%s\"" % encode_serialized_value(v.serialize()) for v in simple_refs])
return self._read_referenced_resources(self.__query_published_references(ids), ID)
def create_published_resources(self, referenced_resources):
'''
Params:
referenced_resources = ReferencedResources (biicode.common.model.symbolic.reference)
'''
statement = self.connection.cursor()
for reference, resource in referenced_resources.explode().iteritems():
self._query_create_published_reference(reference, resource, statement)
self.connection.commit()
def _read_referenced_resources(self, query, id_type):
statement = query
ret = ReferencedResources()
rs = statement.fetchall()
cell_des = CellDeserializer(id_type)
content_des = ContentDeserializer(id_type)
for r in rs:
try:
v = Reference.deserialize(decode_serialized_value(r[0]))
scontent = decode_serialized_value(r[2]) if r[2] else None
res = Resource(cell_des.deserialize(decode_serialized_value(r[1])),
content_des.deserialize(scontent))
cell_name = v.ref
ret[v.block_version][cell_name] = res
# logger.debug("Local found: %s/%s" % (str(v.block_version), str(cell_name)))
except Exception as e:
tb = traceback.format_exc()
logger.error("Error while reading resources %s" % str(e))
logger.debug(tb)
return ret
def __query_published_references(self, urls):
q = '''SELECT %(pub_ref)s.reference as reference,
%(pub_cells)s.blob as cell,
%(pub_contents)s.blob as content
FROM %(pub_ref)s
JOIN %(pub_cells)s ON %(pub_ref)s.cell_id=%(pub_cells)s.id
LEFT JOIN %(pub_contents)s ON %(pub_ref)s.content_id=%(pub_contents)s.id
WHERE reference IN (%(refs)s)''' % {"pub_ref": PUBLISHED_REFERENCES,
"pub_cells": PUBLISHED_CELLS,
"pub_contents": PUBLISHED_CONTENTS,
"refs": urls}
statement = self.connection.cursor()
return statement.execute(q)
def _query_create_published_reference(self, reference, resource, statement):
query = ("INSERT OR REPLACE INTO %s (reference, cell_id, content_id) VALUES (?, ?, ?)"
% PUBLISHED_REFERENCES)
content_id = resource.content.ID.__repr__() if resource.content else None
statement.execute(query, (encode_serialized_value(reference.serialize()),
resource.cell.ID.__repr__(),
content_id))
query = "REPLACE INTO %s (id, blob) VALUES (?, ?)" % (PUBLISHED_CELLS)
statement.execute(query, (resource.cell.ID.__repr__(),
encode_serialized_value(resource.cell.serialize())))
if content_id:
query = "REPLACE INTO %s (id, blob) VALUES (?, ?)" % (PUBLISHED_CONTENTS)
statement.execute(query, (resource.content.ID.__repr__(),
encode_serialized_value(resource.content.serialize())))
def clean(self):
self.delete_all(PUBLISHED_CELLS)
self.delete_all(PUBLISHED_CONTENTS)
self.delete_all(PUBLISHED_REFERENCES)
self.delete_all(SNAPSHOTS)
self.delete_all(DEP_TABLES)
self.delete_all(DELTAS)
# Never loose who the user is. Only invalidate token
login, _ = self.get_login()
self.set_login((login, None))
self.vacuum()
|
drodri/client
|
store/localdb.py
|
Python
|
mit
| 10,218
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-15 14:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('phrasebook', '0010_auto_20170415_1417'),
]
operations = [
migrations.AlterField(
model_name='word',
name='created_on',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date created'),
),
]
|
DanCatchpole/phrasebook-django
|
phrasebook/migrations/0011_auto_20170415_1518.py
|
Python
|
mit
| 538
|
# -*- coding: utf-8 -*-
import re
from .base import BikeShareSystem, BikeShareStation
from . import utils
class Movete(BikeShareSystem):
meta = {
'system': 'Movete',
'company': ['Sistema de Transporte Metropolitano',
'Intendencia de Montevideo']
}
url = 'http://movete.montevideo.gub.uy/index.php?option=com_content&view=article&id=1&Itemid=2' # NOQA
def update(self, scraper=None):
if scraper is None:
scraper = utils.PyBikesScraper()
self.stations = []
data = scraper.request(self.url)
station_list_m = re.search(r'var paradas\s*=\s*(\[.+\]);', data,
flags=re.DOTALL)
if not station_list_m:
return
station_list = re.findall(r'\[\'.+?\'\]', station_list_m.group(1))
for data in station_list:
data = eval(data)
if int(data[4]) == -1:
# Office marker
continue
name = data[0]
latitude = float(data[1])
longitude = float(data[2])
bikes = int(data[6])
slots = int(data[7])
free = slots - bikes
number = next(iter(re.findall(r'(\d+)', name)), None)
extra = {
'slots': slots,
'uid': data[3],
'number': number
}
station = BikeShareStation(name, latitude, longitude, bikes, free,
extra)
self.stations.append(station)
|
javnik36/pybikes
|
pybikes/movete.py
|
Python
|
lgpl-3.0
| 1,561
|
import logging
import pickle
import random
from gevent import Greenlet, sleep
from threading import Lock
from app import create_app
from dota_bot import DotaBot
from models import db, DynamicConfiguration, Game, GameStatus, GameVIP
from helpers.general import divide_vip_list_per_type
# Log
logging.basicConfig(format='[%(asctime)s] %(levelname)s %(message)s', level=logging.INFO)
class Credential:
"""A Steam account credentials.
Attributes:
login: Steam user login.
password: Steam user password.
"""
def __init__(self, login, password):
"""Create a user credentials.
Args:
login: user login.
password: user password.
"""
self.login = login
self.password = password
class WorkerManager(Greenlet):
"""Master class starting Dota bots to process jobs.
The manager contains a initial pool of Steam Credentials.
It is a thread pooling jobs from the database, starting new Dota bots when a new job is available.
After a job process, the Dota bot informs that the credentials are available again.
Attributes:
app: The flask application the manager is linked to, containing configuration objects and database access.
working_bots: A dictionary of all currently working Dota bots, indexed by bot login.
"""
def __init__(self):
"""Initialize the worker manager thread."""
Greenlet.__init__(self)
# Initialize
self.app = create_app()
self.working_bots = {}
self.credentials = []
self.mutex = Lock()
# Parse credentials from config
bot_credentials_string = self.app.config['STEAM_BOTS']
bot_credentials = bot_credentials_string.split('@')
i = 0
while i < len(bot_credentials):
login = bot_credentials[i]
password = bot_credentials[i+1]
self.credentials.append(Credential(login, password))
i = i + 2
def _run(self):
"""Start the main loop of the thread, creating Dota bots to process available jobs."""
while True:
with self.app.app_context():
admins, casters = divide_vip_list_per_type(GameVIP.get_all_vips())
bot_pause = DynamicConfiguration.get('bot_pause', 'False')
for game in db.session().query(Game)\
.filter(Game.status==GameStatus.WAITING_FOR_BOT)\
.order_by(Game.id).all():
if len(self.credentials) == 0 or bot_pause == 'True':
continue
# Start a Dota bot to process the game
self.mutex.acquire()
credential = self.credentials.pop(random.randint(0, len(self.credentials) - 1))
g = DotaBot(self, credential, admins, casters, game.id, game.name, game.password,
game.team1, game.team2, game.team1_ids, game.team2_ids, game.team_choosing_first)
self.working_bots[credential.login] = g
game.status = GameStatus.CREATION_IN_PROGRESS
game.bot = credential.login
db.session().commit()
g.start()
self.mutex.release()
sleep(60)
def bot_end(self, credential):
"""Signal that a bot has finished it work and the credential is free to use again.
Args:
credential: `Credential` of the bot.
"""
self.mutex.acquire()
self.working_bots.pop(credential.login)
self.credentials.append(credential)
self.mutex.release()
# Start a Manager if this file is the main script.
if __name__ == '__main__':
g = WorkerManager()
g.start()
g.join()
|
FroggedTV/grenouilleAPI
|
backend/bot_app.py
|
Python
|
gpl-3.0
| 3,850
|
import numpy as np
import pandas as pd
from pipeline_helper_functions import *
from bag_of_words import *
# list of possible vertex metrics
vertex_metrics = ['indegree', 'outdegree', 'degree', 'd_pagerank',
'u_pagerank', 'd_closeness', 'u_closeness',
'd_betweenness', 'u_betweenness', 'authorities',
'hubs', 'd_eigen', 'u_eigen']
vertex_metrics += ['recentcite_' + str(t) for t in np.arange(100+1)]
def get_edge_data(G, edgelist, snapshot_df, columns_to_use,
tfidf_matrix=None, op_id_to_bow_id=None,
metric_normalization=None, edge_status=None):
"""
Returns a data frame for all edges from given edge list
for a given snapshot
Parameters
----------
G: graph (igraph object)
edgelist: igraph indices of edges whose data to get
snapshot_df: dictionary containing the snapshot information
columns_to_use: list of columns to use
tfidf_matrix: precomputed tfidf_matrix
op_id_to_bow_id: dict that maps CL ids to indices of tdidf matrix
edge_status: are the edges all present or absent or do we need to find out
metric_normalization: normalize the snapshot metrics
"""
# make sure columns_to_use is a list
if type(columns_to_use) == str:
columns_to_use = [columns_to_use]
num_edges = len(edgelist)
# CL ids of ed cases (indexes the snap_df rows)
ed_op_ids = [G.vs[edge[1]]['name'] for edge in edgelist]
ing_op_ids = [G.vs[edge[0]]['name'] for edge in edgelist]
# case dates
ed_year = np.array([G.vs[edge[1]]['year'] for edge in edgelist])
ing_year = np.array([G.vs[edge[0]]['year'] for edge in edgelist])
# ed metrics in ing year ordered by ed_op_ids
# note snapshot_df indices are ints
ed_metrics = snapshot_df.loc[[int(i) for i in ed_op_ids]]
# initialize edge data frame
edge_data = pd.DataFrame(index=zip(ing_op_ids, ed_op_ids))
edge_data.index.name = 'op_id' # op_id
# add columns to edge data frame
for metric in columns_to_use:
# which vertex metrics from the snapshot df to grab
# i.e. only grab vertex metric columns
vertex_metrics_to_use = set(ed_metrics.columns).difference('year')
if metric in vertex_metrics_to_use:
edge_data[metric] = ed_metrics[metric].tolist()
elif metric == 'age':
edge_data[metric] = ing_year - ed_year
elif metric == 'ing_year':
edge_data[metric] = ing_year
elif metric == 'ed_year':
edge_data[metric] = ed_year
elif metric == 'similarity':
edge_data[metric] = compute_similarities(ing_op_ids, ed_op_ids,
tfidf_matrix,
op_id_to_bow_id)
# possibly normalize metrics
if metric_normalization:
# only normalize graph vertex metrics i.e. not age
metrics_to_normalize = set(columns_to_use).intersection(set(vertex_metrics))
# normalize metics that deserve it
for metric in metrics_to_normalize:
values = edge_data[metric]
scaling = get_scaling(values, metric_normalization, alpha=.05)
edge_data[metric] = values / scaling
# add edge status
if edge_status is not None:
if edge_status == 'present':
is_edge = [1] * num_edges
elif edge_status == 'absent':
is_edge = [0] * num_edges
elif edge_status == 'find':
# look up edge status
is_edge = [int(edge_is_present(G, e[0], e[1])) for e in edgelist]
edge_data['is_edge'] = is_edge
return edge_data
def get_scaling(values, scaling, alpha=.05):
"""
Returns scaling
"""
if scaling == 'mean':
return np.mean(values)
# robust mean
elif scaling == 'upper trimmed mean':
upper_bound = np.percentile(values, 1 - alpha)
values_trimmed = values[values <= upper_bound]
return np.mean(values_trimmed)
elif scaling == 'lower trimmed mean':
lower_bound = np.percentile(values, alpha)
values_trimmed = values[values >= lower_bound]
return np.mean(values_trimmed)
elif scaling == 'trimmed mean':
upper_bound = np.percentile(values, 1 - alpha)
lower_bound = np.percentile(values, alpha)
values_trimmed = values[(values >= lower_bound) & (values <= upper_bound)]
return np.mean(values_trimmed)
elif scaling == 'median':
return np.median(values)
elif scaling == 'max':
return np.max(values)
elif scaling == 'percentile':
return np.percentile(values, alpha)
else:
raise ValueError('%s not implemented' % scaling)
|
idc9/law-net
|
vertex_metrics_experiment/code/get_edge_data.py
|
Python
|
mit
| 4,783
|
# Copyright (C) 2018-2019 Matthias Klumpp <matthias@tenstral.net>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import logging as log
from glob import glob
from laniakea import LkModule
from laniakea.dud import Dud
from laniakea.utils import get_dir_shorthand_for_uuid, random_string
from laniakea.db import session_scope, Job, JobResult, JobKind, SourcePackage
from laniakea.msgstream import EventEmitter
from .rubiconfig import RubiConfig
from .utils import safe_rename
def accept_upload(conf, dud, event_emitter):
'''
Accept the upload and move its data to the right places.
'''
job_success = dud.get('X-Spark-Success') == 'Yes'
job_id = dud.get('X-Spark-Job')
# mark job as accepted and done
with session_scope() as session:
job = session.query(Job).filter(Job.uuid == job_id).one_or_none()
if not job:
log.error('Unable to mark job \'{}\' as done: The Job was not found.'.format(job_id))
# this is a weird situation, there is no proper way to handle it as this indicates a bug
# in the Laniakea setup or some other oddity.
# The least harmful thing to do is to just leave the upload alone and try again later.
return
job.result = JobResult.SUCCESS if job_success else JobResult.FAILURE
job.latest_log_excerpt = None
# move the log file and Firehose reports to the log storage
log_target_dir = os.path.join(conf.log_storage_dir, get_dir_shorthand_for_uuid(job_id))
firehose_target_dir = os.path.join(log_target_dir, 'firehose')
for fname in dud.get_files():
if fname.endswith('.log'):
os.makedirs(log_target_dir, exist_ok=True)
# move the logfile to its destination and ensure it is named correctly
target_fname = os.path.join(log_target_dir, job_id + '.log')
safe_rename(fname, target_fname)
elif fname.endswith('.firehose.xml'):
os.makedirs(firehose_target_dir, exist_ok=True)
# move the firehose report to its own directory and rename it
fh_target_fname = os.path.join(firehose_target_dir, job_id + '.firehose.xml')
safe_rename(fname, fh_target_fname)
# handle different job data
if job.module == LkModule.ISOTOPE:
from .import_isotope import handle_isotope_upload
handle_isotope_upload(session,
success=job_success,
conf=conf,
dud=dud,
job=job,
event_emitter=event_emitter)
elif job.kind == JobKind.PACKAGE_BUILD:
# the package has been imported by Dak, so we just announce this
# event to the world
spkg = session.query(SourcePackage) \
.filter(SourcePackage.source_uuid == job.trigger) \
.filter(SourcePackage.version == job.version) \
.one_or_none()
if spkg:
suite_target_name = '?'
if job.data:
suite_target_name = job.data.get('suite', '?')
event_data = {'pkgname': spkg.name,
'version': job.version,
'architecture': job.architecture,
'suite': suite_target_name,
'job_id': job_id}
if job_success:
event_emitter.submit_event_for_mod(LkModule.ARCHIVE, 'package-build-success', event_data)
else:
event_emitter.submit_event_for_mod(LkModule.ARCHIVE, 'package-build-failed', event_data)
else:
event_emitter.submit_event('upload-accepted', {'job_id': job_id, 'job_failed': not job_success})
# remove the upload description file from incoming
os.remove(dud.get_dud_file())
log.info("Upload {} accepted.", dud.get_filename())
def reject_upload(conf, dud, reason='Unknown', event_emitter=None):
'''
If a file has issues, we reject it and put it into the rejected queue.
'''
os.makedirs(conf.rejected_dir, exist_ok=True)
# move the files referenced by the .dud file
random_suffix = random_string(4)
for fname in dud.get_files():
target_fname = os.path.join(conf.rejected_dir, os.path.basename(fname))
if os.path.isfile(target_fname):
target_fname = target_fname + '+' + random_suffix
# move the file to the rejected dir
safe_rename(fname, target_fname)
# move the .dud file itself
target_fname = os.path.join(conf.rejected_dir, dud.get_filename())
if os.path.isfile(target_fname):
target_fname = target_fname + '+' + random_suffix
safe_rename(dud.get_dud_file(), target_fname)
# also store the reject reason for future reference
with open(target_fname + '.reason', 'w') as f:
f.write(reason + '\n')
log.info('Upload {} rejected.', dud.get_filename())
if event_emitter:
event_emitter.submit_event('upload-rejected', {'dud_filename': dud.get_filename(), 'reason': reason})
def import_files_from(conf, incoming_dir):
'''
Import files from an untrusted incoming source.
IMPORTANT: We assume that the uploader can not edit their files post-upload.
If they could, we would be vulnerable to timing attacks here.
'''
emitter = EventEmitter(LkModule.RUBICON)
for dud_file in glob(os.path.join(incoming_dir, '*.dud')):
dud = Dud(dud_file)
try:
dud.validate(keyrings=conf.trusted_gpg_keyrings)
except Exception as e:
reason = 'Signature validation failed: {}'.format(str(e))
reject_upload(conf, dud, reason, emitter)
continue
# if we are here, the file is good to go
accept_upload(conf, dud, emitter)
def import_files(options):
conf = RubiConfig()
if not options.incoming_dir:
print('No incoming directory set. Can not process any files.')
sys.exit(1)
import_files_from(conf, options.incoming_dir)
|
lkorigin/laniakea
|
src/rubicon/rubicon/fileimport.py
|
Python
|
gpl-3.0
| 6,906
|
import pandas as pd
import datetime
def createTW(df):
def add_tw(x):
minute = x.minute
tw_minute = -1
if minute < 20:
tw_minute = 0
elif minute < 40:
tw_minute = 20
elif minute <= 60:
tw_minute = 40
return x.replace(minute=tw_minute, second=0)
# add tw
df['starting_time'] = pd.to_datetime(df['starting_time'])
df['tw'] = df['starting_time'].apply(lambda x: add_tw(x))
# get daterange
date_start = df['starting_time'].min()
date_end = df['starting_time'].max()
# enddate is next day midnight #normalize=True sets it to midnight
date_end = pd.to_datetime(date_end) + datetime.timedelta(days=1)
daterange = pd.date_range(start=date_start, end=date_end, normalize=True, closed='left', freq='2h')
return df, daterange
|
Superchicken1/SambaFlow
|
python/traffic-prediction/src/vector_gen/createTW.py
|
Python
|
apache-2.0
| 847
|
import urllib
import json
import requests
import time
from collections import namedtuple
from config import CONFIG
class GoogleStreetView(object):
# For passing the parameters to the API
StreetViewParam = namedtuple("StreetViewParam", ["lat", "lng", "heading", "pov", "pitch"])
# the api request address
GOOGLE_API_KEY = CONFIG["gmap"]["apiKey"]
STREET_IMAGE_API = "https://maps.googleapis.com/maps/api/streetview?" \
"size=640x640&" \
"location=%f,%f&" \
"heading=%f&" \
"pov=%f&" \
"pitch=%f&" \
"key=" + GOOGLE_API_KEY
# the metadata request address
METADATA_API = "https://maps.googleapis.com/maps/api/streetview/metadata?"
# Google API query limit, query per second
TIME_TO_PAUSE_REQUEST = 9
queryTimes = 0
# Google API Error code
OVER_QUERY_LIMIT = "OVER_QUERY_LIMIT"
OK = "OK"
@classmethod
def isValidPoint(cls, params):
data = cls.getMetadata(params)
if data["status"] == cls.OVER_QUERY_LIMIT:
print "OVER_QUERY_LIMIT!!!"
exit(0)
return data["status"] == cls.OK
@classmethod
def getMetadata(cls, params):
cls.timeToPause()
response = requests.get(url=cls.METADATA_API, params=params)
return json.loads(response.text)
@classmethod
def getStreetViewLink(cls, params):
"""
get
:param params: (tuple) lat, lng, heading, pov
:param outputName: The link to the street view
"""
cls.timeToPause()
requestUrl = cls.STREET_IMAGE_API % params
return requestUrl
@classmethod
def timeToPause(cls):
cls.queryTimes += 1
if cls.queryTimes == cls.TIME_TO_PAUSE_REQUEST:
time.sleep(1)
cls.queryTimes = 0
@classmethod
def makeParameterDict(cls, lat, lng, heading, fov=90, pitch=0):
params = dict(
size="640x640",
location=str(lat) + "," + str(lng),
heading=str(heading),
fov=str(fov),
pitch=str(pitch),
key=cls.GOOGLE_API_KEY
)
return params
@classmethod
def makeParameter(cls, lat, lng, heading, fov=90, pitch=0):
"""
Make a StreetViewParam tuple according to the given values.
There is a default value for fov and pitch. If other value
:param lat: (float) latitude
:param lng: (float) longitude
:param heading: (float) the direction:
0 or 360 = North; 90 = East; 180 = South; 270 = West
:param fov: (float) width of the view (0 - 120)
:param pitch: (float) up or down angle (0 - 90)
:return: a StreetViewParam tuple
"""
return cls.StreetViewParam(lat, lng, heading, fov, pitch)
|
jasonlingo/StreetViewRoadSafety
|
src/googleStreetView.py
|
Python
|
mit
| 2,942
|
#!/usr/bin/python
#
#Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import argparse
import ConfigParser
from provision_dns import DnsProvisioner
from requests.exceptions import ConnectionError
class AddVirtualDns(object):
def __init__(self, args_str = None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
if not self._args.ttl:
self._args.ttl = 86400
if self._args.ttl < 0 or self._args.ttl > 2147483647:
print 'Invalid ttl value ' , self._args.ttl
return
if not DnsProvisioner.is_valid_ipv4_address(self._args.api_server_ip):
print 'Invalid IPv4 address ', self._args.api_server_ip
return
if not DnsProvisioner.is_valid_dns_name(self._args.dns_domain):
print 'Domain name does not satisfy DNS name requirements: ', self._args.dns_domain
return
try:
dp_obj = DnsProvisioner(self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip, self._args.api_server_port)
except ConnectionError:
print 'Connection to API server failed '
return
if self._args.dyn_updates:
dyn_updates = 'true'
else:
dyn_updates = 'false'
dp_obj.add_virtual_dns(self._args.name, self._args.domain_name,
self._args.dns_domain, dyn_updates,
self._args.record_order, self._args.ttl,
self._args.next_vdns)
#end __init__
def _parse_args(self, args_str):
'''
Eg. python add_virtual_dns.py --name vdns1 --domain_name default-domain
--dns_domain example.com --dyn-updates
--record_order fixed --ttl 20000
--next_vdns default-domain:vdns2
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help = False)
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip' : '127.0.0.1',
'api_server_port' : '8082',
'admin_user': None,
'admin_password': None,
'admin_tenant_name': None
}
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**defaults)
parser.add_argument("--name", help = "Virtual DNS Name")
parser.add_argument("--domain_name", help = "Domain Name")
parser.add_argument("--dns_domain", help = "DNS Domain name")
parser.add_argument("--dyn_updates", help = "Enable Dynamic DNS updates", action="store_true")
parser.add_argument("--record_order", choices=['fixed', 'random', 'round-robin'], help = "Order used for DNS resolution")
parser.add_argument("--ttl", type = int, help = "Time to Live for DNS records")
parser.add_argument("--next_vdns", help = "Next Virtual DNS Server")
parser.add_argument("--api_server_ip", help = "IP address of api server")
parser.add_argument("--api_server_port", type = int, help = "Port of api server")
parser.add_argument("--admin_user", help = "Name of keystone admin user")
parser.add_argument("--admin_password", help = "Password of keystone admin user")
parser.add_argument("--admin_tenant_name", help = "Tenamt name for keystone admin user")
self._args = parser.parse_args(remaining_argv)
#end _parse_args
# end class AddVirtualDns
def main(args_str = None):
AddVirtualDns(args_str)
#end main
if __name__ == "__main__":
main()
|
Juniper/contrail-controller-test
|
src/dns/scripts/add_virtual_dns.py
|
Python
|
apache-2.0
| 4,324
|
""" JSON service providing IEMRE data for a given point """
import os
import datetime
import json
import numpy as np
from paste.request import parse_formvars
from pyiem import iemre
from pyiem.util import ncopen, convert_value
import pyiem.prism as prismutil
def myrounder(val, precision):
"""round a float or give back None"""
if val is None or np.isnan(val) or np.ma.is_masked(val):
return None
return round(val, precision)
def application(environ, start_response):
"""Do Something Fun!"""
form = parse_formvars(environ)
ts = datetime.datetime.strptime(form.get("date", "2019-03-01"), "%Y-%m-%d")
lat = float(form.get("lat", 41.99))
lon = float(form.get("lon", -95.1))
fmt = form.get("format", "json")
if fmt != "json":
headers = [("Content-type", "text/plain")]
start_response("200 OK", headers)
return [b"ERROR: Service only emits json at this time"]
i, j = iemre.find_ij(lon, lat)
offset = iemre.daily_offset(ts)
res = {"data": []}
fn = iemre.get_daily_ncname(ts.year)
headers = [("Content-type", "application/json")]
start_response("200 OK", headers)
if not os.path.isfile(fn):
return [json.dumps(res).encode("ascii")]
if i is None or j is None:
data = {"error": "Coordinates outside of domain"}
return [json.dumps(data).encode("ascii")]
if ts.year > 1980:
ncfn = "/mesonet/data/prism/%s_daily.nc" % (ts.year,)
if not os.path.isfile(ncfn):
prism_precip = None
else:
i2, j2 = prismutil.find_ij(lon, lat)
with ncopen(ncfn) as nc:
prism_precip = nc.variables["ppt"][offset, j2, i2] / 25.4
else:
prism_precip = None
if ts.year > 2000:
ncfn = iemre.get_daily_mrms_ncname(ts.year)
if not os.path.isfile(ncfn):
mrms_precip = None
else:
j2 = int((lat - iemre.SOUTH) * 100.0)
i2 = int((lon - iemre.WEST) * 100.0)
with ncopen(ncfn) as nc:
mrms_precip = nc.variables["p01d"][offset, j2, i2] / 25.4
else:
mrms_precip = None
c2000 = ts.replace(year=2000)
coffset = iemre.daily_offset(c2000)
with ncopen(fn) as nc:
with ncopen(iemre.get_dailyc_ncname()) as cnc:
res["data"].append(
{
"prism_precip_in": myrounder(prism_precip, 2),
"mrms_precip_in": myrounder(mrms_precip, 2),
"daily_high_f": myrounder(
convert_value(
nc.variables["high_tmpk"][offset, j, i],
"degK",
"degF",
),
1,
),
"12z_high_f": myrounder(
convert_value(
nc.variables["high_tmpk_12z"][offset, j, i],
"degK",
"degF",
),
1,
),
"climate_daily_high_f": myrounder(
convert_value(
cnc.variables["high_tmpk"][coffset, j, i],
"degK",
"degF",
),
1,
),
"daily_low_f": myrounder(
convert_value(
nc.variables["low_tmpk"][offset, j, i],
"degK",
"degF",
),
1,
),
"12z_low_f": myrounder(
convert_value(
nc.variables["low_tmpk_12z"][offset, j, i],
"degK",
"degF",
),
1,
),
"avg_dewpoint_f": myrounder(
convert_value(
nc.variables["avg_dwpk"][offset, j, i],
"degK",
"degF",
),
1,
),
"climate_daily_low_f": myrounder(
convert_value(
cnc.variables["low_tmpk"][coffset, j, i],
"degK",
"degF",
),
1,
),
"daily_precip_in": myrounder(
nc.variables["p01d"][offset, j, i] / 25.4, 2
),
"12z_precip_in": myrounder(
nc.variables["p01d_12z"][offset, j, i] / 25.4, 2
),
"climate_daily_precip_in": myrounder(
cnc.variables["p01d"][coffset, j, i] / 25.4, 2
),
"srad_mj": myrounder(
nc.variables["rsds"][offset, j, i]
* 86400.0
/ 1000000.0,
2,
),
"avg_windspeed_mps": myrounder(
nc.variables["wind_speed"][offset, j, i], 2
),
}
)
return [json.dumps(res).encode("ascii")]
|
akrherz/iem
|
htdocs/iemre/daily.py
|
Python
|
mit
| 5,523
|
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: redis_kv
author: Jan-Piet Mens <jpmens(at)gmail.com>
version_added: "0.9"
short_description: fetch data from Redis
description:
- this looup returns a list of items given to it, if any of the top level items is also a list it will flatten it, but it will not recurse
requirements:
- redis (python library https://github.com/andymccurdy/redis-py/)
options:
_terms:
description: Two element comma separated strings composed of url of the Redis server and key to query
options:
_url:
description: location of redis host in url format
default: 'redis://localhost:6379'
_key:
description: key to query
required: True
"""
EXAMPLES = """
- name: query redis for somekey
debug: msg="{{ lookup('redis_kv', 'redis://localhost:6379,somekey') }} is value in Redis for somekey"
"""
RETURN = """
_raw:
description: values stored in Redis
"""
import os
import re
HAVE_REDIS = False
try:
import redis
HAVE_REDIS = True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
ret = []
for term in terms:
(url, key) = term.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
ret.append(res)
except:
ret.append("") # connection failed or key not found
return ret
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/lookup/redis_kv.py
|
Python
|
bsd-3-clause
| 2,846
|
# coding=utf-8
# Copyright 2022 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
google-research/tensor2robot
|
utils/__init__.py
|
Python
|
apache-2.0
| 605
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
# Copyright (c) 2011 Neal H. Walfield
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# util.py -- Misc utility functions
# Thomas Perl <thp@perli.net> 2007-08-04
#
"""Miscellaneous helper functions for gPodder
This module provides helper and utility functions for gPodder that
are not tied to any specific part of gPodder.
"""
import gpodder
import logging
logger = logging.getLogger(__name__)
import os
import os.path
import platform
import glob
import stat
import shlex
import shutil
import socket
import sys
import string
import re
import subprocess
from htmlentitydefs import entitydefs
import time
import gzip
import datetime
import threading
import urlparse
import urllib
import urllib2
import httplib
import webbrowser
import mimetypes
import itertools
import feedparser
import StringIO
import xml.dom.minidom
if gpodder.ui.win32:
try:
import win32file
except ImportError:
logger.warn('Running on Win32 but win32api/win32file not installed.')
win32file = None
_ = gpodder.gettext
N_ = gpodder.ngettext
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except Exception, e:
logger.warn('Cannot set locale (%s)', e, exc_info=True)
# Native filesystem encoding detection
encoding = sys.getfilesystemencoding()
if encoding is None:
if 'LANG' in os.environ and '.' in os.environ['LANG']:
lang = os.environ['LANG']
(language, encoding) = lang.rsplit('.', 1)
logger.info('Detected encoding: %s', encoding)
elif gpodder.ui.harmattan:
encoding = 'utf-8'
elif gpodder.ui.win32:
# To quote http://docs.python.org/howto/unicode.html:
# ,,on Windows, Python uses the name "mbcs" to refer
# to whatever the currently configured encoding is``
encoding = 'mbcs'
else:
encoding = 'iso-8859-15'
logger.info('Assuming encoding: ISO-8859-15 ($LANG not set).')
# Filename / folder name sanitization
def _sanitize_char(c):
if c in string.whitespace:
return ' '
elif c in ',-.()':
return c
elif c in string.punctuation or ord(c) <= 31:
return '_'
return c
SANITIZATION_TABLE = ''.join(map(_sanitize_char, map(chr, range(256))))
del _sanitize_char
_MIME_TYPE_LIST = [
('.aac', 'audio/aac'),
('.axa', 'audio/annodex'),
('.flac', 'audio/flac'),
('.m4b', 'audio/m4b'),
('.m4a', 'audio/mp4'),
('.mp3', 'audio/mpeg'),
('.spx', 'audio/ogg'),
('.oga', 'audio/ogg'),
('.ogg', 'audio/ogg'),
('.wma', 'audio/x-ms-wma'),
('.3gp', 'video/3gpp'),
('.axv', 'video/annodex'),
('.divx', 'video/divx'),
('.m4v', 'video/m4v'),
('.mp4', 'video/mp4'),
('.ogv', 'video/ogg'),
('.mov', 'video/quicktime'),
('.flv', 'video/x-flv'),
('.mkv', 'video/x-matroska'),
('.wmv', 'video/x-ms-wmv'),
('.opus', 'audio/opus'),
]
_MIME_TYPES = dict((k, v) for v, k in _MIME_TYPE_LIST)
_MIME_TYPES_EXT = dict(_MIME_TYPE_LIST)
def make_directory( path):
"""
Tries to create a directory if it does not exist already.
Returns True if the directory exists after the function
call, False otherwise.
"""
if os.path.isdir( path):
return True
try:
os.makedirs( path)
except:
logger.warn('Could not create directory: %s', path)
return False
return True
def normalize_feed_url(url):
"""
Converts any URL to http:// or ftp:// so that it can be
used with "wget". If the URL cannot be converted (invalid
or unknown scheme), "None" is returned.
This will also normalize feed:// and itpc:// to http://.
>>> normalize_feed_url('itpc://example.org/podcast.rss')
'http://example.org/podcast.rss'
If no URL scheme is defined (e.g. "curry.com"), we will
simply assume the user intends to add a http:// feed.
>>> normalize_feed_url('curry.com')
'http://curry.com/'
There are even some more shortcuts for advanced users
and lazy typists (see the source for details).
>>> normalize_feed_url('fb:43FPodcast')
'http://feeds.feedburner.com/43FPodcast'
It will also take care of converting the domain name to
all-lowercase (because domains are not case sensitive):
>>> normalize_feed_url('http://Example.COM/')
'http://example.com/'
Some other minimalistic changes are also taken care of,
e.g. a ? with an empty query is removed:
>>> normalize_feed_url('http://example.org/test?')
'http://example.org/test'
Username and password in the URL must not be affected
by URL normalization (see gPodder bug 1942):
>>> normalize_feed_url('http://UserName:PassWord@Example.com/')
'http://UserName:PassWord@example.com/'
"""
if not url or len(url) < 8:
return None
# This is a list of prefixes that you can use to minimize the amount of
# keystrokes that you have to use.
# Feel free to suggest other useful prefixes, and I'll add them here.
PREFIXES = {
'fb:': 'http://feeds.feedburner.com/%s',
'yt:': 'http://www.youtube.com/rss/user/%s/videos.rss',
'sc:': 'http://soundcloud.com/%s',
# YouTube playlists. To get a list of playlists per-user, use:
# https://gdata.youtube.com/feeds/api/users/<username>/playlists
'ytpl:': 'http://gdata.youtube.com/feeds/api/playlists/%s',
}
for prefix, expansion in PREFIXES.iteritems():
if url.startswith(prefix):
url = expansion % (url[len(prefix):],)
break
# Assume HTTP for URLs without scheme
if not '://' in url:
url = 'http://' + url
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
# Domain name is case insensitive, but username/password is not (bug 1942)
if '@' in netloc:
authentication, netloc = netloc.rsplit('@', 1)
netloc = '@'.join((authentication, netloc.lower()))
else:
netloc = netloc.lower()
# Schemes and domain names are case insensitive
scheme = scheme.lower()
# Normalize empty paths to "/"
if path == '':
path = '/'
# feed://, itpc:// and itms:// are really http://
if scheme in ('feed', 'itpc', 'itms'):
scheme = 'http'
if scheme not in ('http', 'https', 'ftp', 'file'):
return None
# urlunsplit might return "a slighty different, but equivalent URL"
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def username_password_from_url(url):
r"""
Returns a tuple (username,password) containing authentication
data from the specified URL or (None,None) if no authentication
data can be found in the URL.
See Section 3.1 of RFC 1738 (http://www.ietf.org/rfc/rfc1738.txt)
>>> username_password_from_url('https://@host.com/')
('', None)
>>> username_password_from_url('telnet://host.com/')
(None, None)
>>> username_password_from_url('ftp://foo:@host.com/')
('foo', '')
>>> username_password_from_url('http://a:b@host.com/')
('a', 'b')
>>> username_password_from_url(1)
Traceback (most recent call last):
...
ValueError: URL has to be a string or unicode object.
>>> username_password_from_url(None)
Traceback (most recent call last):
...
ValueError: URL has to be a string or unicode object.
>>> username_password_from_url('http://a@b:c@host.com/')
('a@b', 'c')
>>> username_password_from_url('ftp://a:b:c@host.com/')
('a', 'b:c')
>>> username_password_from_url('http://i%2Fo:P%40ss%3A@host.com/')
('i/o', 'P@ss:')
>>> username_password_from_url('ftp://%C3%B6sterreich@host.com/')
('\xc3\xb6sterreich', None)
>>> username_password_from_url('http://w%20x:y%20z@example.org/')
('w x', 'y z')
>>> username_password_from_url('http://example.com/x@y:z@test.com/')
(None, None)
"""
if type(url) not in (str, unicode):
raise ValueError('URL has to be a string or unicode object.')
(username, password) = (None, None)
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if '@' in netloc:
(authentication, netloc) = netloc.rsplit('@', 1)
if ':' in authentication:
(username, password) = authentication.split(':', 1)
# RFC1738 dictates that we should not allow ['/', '@', ':']
# characters in the username and password field (Section 3.1):
#
# 1. The "/" can't be in there at this point because of the way
# urlparse (which we use above) works.
# 2. Due to gPodder bug 1521, we allow "@" in the username and
# password field. We use netloc.rsplit('@', 1), which will
# make sure that we split it at the last '@' in netloc.
# 3. The colon must be excluded (RFC2617, Section 2) in the
# username, but is apparently allowed in the password. This
# is handled by the authentication.split(':', 1) above, and
# will cause any extraneous ':'s to be part of the password.
username = urllib.unquote(username)
password = urllib.unquote(password)
else:
username = urllib.unquote(authentication)
return (username, password)
def directory_is_writable(path):
"""
Returns True if the specified directory exists and is writable
by the current user.
"""
return os.path.isdir(path) and os.access(path, os.W_OK)
def calculate_size( path):
"""
Tries to calculate the size of a directory, including any
subdirectories found. The returned value might not be
correct if the user doesn't have appropriate permissions
to list all subdirectories of the given path.
"""
if path is None:
return 0L
if os.path.dirname( path) == '/':
return 0L
if os.path.isfile( path):
return os.path.getsize( path)
if os.path.isdir( path) and not os.path.islink( path):
sum = os.path.getsize( path)
try:
for item in os.listdir(path):
try:
sum += calculate_size(os.path.join(path, item))
except:
logger.warn('Cannot get size for %s', path, exc_info=True)
except:
logger.warn('Cannot access %s', path, exc_info=True)
return sum
return 0L
def file_modification_datetime(filename):
"""
Returns the modification date of the specified file
as a datetime.datetime object or None if the modification
date cannot be determined.
"""
if filename is None:
return None
if not os.access(filename, os.R_OK):
return None
try:
s = os.stat(filename)
timestamp = s[stat.ST_MTIME]
return datetime.datetime.fromtimestamp(timestamp)
except:
logger.warn('Cannot get mtime for %s', filename, exc_info=True)
return None
def file_age_in_days(filename):
"""
Returns the age of the specified filename in days or
zero if the modification date cannot be determined.
"""
dt = file_modification_datetime(filename)
if dt is None:
return 0
else:
return (datetime.datetime.now()-dt).days
def file_modification_timestamp(filename):
"""
Returns the modification date of the specified file as a number
or -1 if the modification date cannot be determined.
"""
if filename is None:
return -1
try:
s = os.stat(filename)
return s[stat.ST_MTIME]
except:
logger.warn('Cannot get modification timestamp for %s', filename)
return -1
def file_age_to_string(days):
"""
Converts a "number of days" value to a string that
can be used in the UI to display the file age.
>>> file_age_to_string(0)
''
>>> file_age_to_string(1)
u'1 day ago'
>>> file_age_to_string(2)
u'2 days ago'
"""
if days < 1:
return ''
else:
return N_('%(count)d day ago', '%(count)d days ago', days) % {'count':days}
def is_system_file(filename):
"""
Checks to see if the given file is a system file.
"""
if gpodder.ui.win32 and win32file is not None:
result = win32file.GetFileAttributes(filename)
#-1 is returned by GetFileAttributes when an error occurs
#0x4 is the FILE_ATTRIBUTE_SYSTEM constant
return result != -1 and result & 0x4 != 0
else:
return False
def get_free_disk_space_win32(path):
"""
Win32-specific code to determine the free disk space remaining
for a given path. Uses code from:
http://mail.python.org/pipermail/python-list/2003-May/203223.html
"""
if win32file is None:
# Cannot determine free disk space
return 0
drive, tail = os.path.splitdrive(path)
userFree, userTotal, freeOnDisk = win32file.GetDiskFreeSpaceEx(drive)
return userFree
def get_free_disk_space(path):
"""
Calculates the free disk space available to the current user
on the file system that contains the given path.
If the path (or its parent folder) does not yet exist, this
function returns zero.
"""
if not os.path.exists(path):
return 0
if gpodder.ui.win32:
return get_free_disk_space_win32(path)
s = os.statvfs(path)
return s.f_bavail * s.f_bsize
def format_date(timestamp):
"""
Converts a UNIX timestamp to a date representation. This
function returns "Today", "Yesterday", a weekday name or
the date in %x format, which (according to the Python docs)
is the "Locale's appropriate date representation".
Returns None if there has been an error converting the
timestamp to a string representation.
"""
if timestamp is None:
return None
seconds_in_a_day = 60*60*24
today = time.localtime()[:3]
yesterday = time.localtime(time.time() - seconds_in_a_day)[:3]
try:
timestamp_date = time.localtime(timestamp)[:3]
except ValueError, ve:
logger.warn('Cannot convert timestamp', exc_info=True)
return None
if timestamp_date == today:
return _('Today')
elif timestamp_date == yesterday:
return _('Yesterday')
try:
diff = int( (time.time() - timestamp)/seconds_in_a_day )
except:
logger.warn('Cannot convert "%s" to date.', timestamp, exc_info=True)
return None
try:
timestamp = datetime.datetime.fromtimestamp(timestamp)
except:
return None
if diff < 7:
# Weekday name
return str(timestamp.strftime('%A').decode(encoding))
else:
# Locale's appropriate date representation
return str(timestamp.strftime('%x'))
def format_filesize(bytesize, use_si_units=False, digits=2):
"""
Formats the given size in bytes to be human-readable,
Returns a localized "(unknown)" string when the bytesize
has a negative value.
"""
si_units = (
( 'kB', 10**3 ),
( 'MB', 10**6 ),
( 'GB', 10**9 ),
)
binary_units = (
( 'KiB', 2**10 ),
( 'MiB', 2**20 ),
( 'GiB', 2**30 ),
)
try:
bytesize = float( bytesize)
except:
return _('(unknown)')
if bytesize < 0:
return _('(unknown)')
if use_si_units:
units = si_units
else:
units = binary_units
( used_unit, used_value ) = ( 'B', bytesize )
for ( unit, value ) in units:
if bytesize >= value:
used_value = bytesize / float(value)
used_unit = unit
return ('%.'+str(digits)+'f %s') % (used_value, used_unit)
def delete_file(filename):
"""Delete a file from the filesystem
Errors (permissions errors or file not found)
are silently ignored.
"""
try:
os.remove(filename)
except:
pass
def remove_html_tags(html):
"""
Remove HTML tags from a string and replace numeric and
named entities with the corresponding character, so the
HTML text can be displayed in a simple text view.
"""
if html is None:
return None
# If we would want more speed, we could make these global
re_strip_tags = re.compile('<[^>]*>')
re_unicode_entities = re.compile('&#(\d{2,4});')
re_html_entities = re.compile('&(.{2,8});')
re_newline_tags = re.compile('(<br[^>]*>|<[/]?ul[^>]*>|</li>)', re.I)
re_listing_tags = re.compile('<li[^>]*>', re.I)
result = html
# Convert common HTML elements to their text equivalent
result = re_newline_tags.sub('\n', result)
result = re_listing_tags.sub('\n * ', result)
result = re.sub('<[Pp]>', '\n\n', result)
# Remove all HTML/XML tags from the string
result = re_strip_tags.sub('', result)
# Convert numeric XML entities to their unicode character
result = re_unicode_entities.sub(lambda x: unichr(int(x.group(1))), result)
# Convert named HTML entities to their unicode character
result = re_html_entities.sub(lambda x: unicode(entitydefs.get(x.group(1),''), 'iso-8859-1'), result)
# Convert more than two newlines to two newlines
result = re.sub('([\r\n]{2})([\r\n])+', '\\1', result)
return result.strip()
def wrong_extension(extension):
"""
Determine if a given extension looks like it's
wrong (e.g. empty, extremely long or spaces)
Returns True if the extension most likely is a
wrong one and should be replaced.
>>> wrong_extension('.mp3')
False
>>> wrong_extension('.divx')
False
>>> wrong_extension('mp3')
True
>>> wrong_extension('')
True
>>> wrong_extension('.12 - Everybody')
True
>>> wrong_extension('.mp3 ')
True
>>> wrong_extension('.')
True
>>> wrong_extension('.42')
True
"""
if not extension:
return True
elif len(extension) > 5:
return True
elif ' ' in extension:
return True
elif extension == '.':
return True
elif not extension.startswith('.'):
return True
else:
try:
# ".<number>" is an invalid extension
float(extension)
return True
except:
pass
return False
def extension_from_mimetype(mimetype):
"""
Simply guesses what the file extension should be from the mimetype
>>> extension_from_mimetype('audio/mp4')
'.m4a'
>>> extension_from_mimetype('audio/ogg')
'.ogg'
>>> extension_from_mimetype('audio/mpeg')
'.mp3'
>>> extension_from_mimetype('video/x-matroska')
'.mkv'
>>> extension_from_mimetype('wrong-mimetype')
''
"""
if mimetype in _MIME_TYPES:
return _MIME_TYPES[mimetype]
return mimetypes.guess_extension(mimetype) or ''
def mimetype_from_extension(extension):
"""
Simply guesses what the mimetype should be from the file extension
>>> mimetype_from_extension('.m4a')
'audio/mp4'
>>> mimetype_from_extension('.ogg')
'audio/ogg'
>>> mimetype_from_extension('.mp3')
'audio/mpeg'
>>> mimetype_from_extension('.mkv')
'video/x-matroska'
>>> mimetype_from_extension('._invalid_file_extension_')
''
"""
if extension in _MIME_TYPES_EXT:
return _MIME_TYPES_EXT[extension]
# Need to prepend something to the extension, so guess_type works
type, encoding = mimetypes.guess_type('file'+extension)
return type or ''
def extension_correct_for_mimetype(extension, mimetype):
"""
Check if the given filename extension (e.g. ".ogg") is a possible
extension for a given mimetype (e.g. "application/ogg") and return
a boolean value (True if it's possible, False if not). Also do
>>> extension_correct_for_mimetype('.ogg', 'application/ogg')
True
>>> extension_correct_for_mimetype('.ogv', 'video/ogg')
True
>>> extension_correct_for_mimetype('.ogg', 'audio/mpeg')
False
>>> extension_correct_for_mimetype('.m4a', 'audio/mp4')
True
>>> extension_correct_for_mimetype('mp3', 'audio/mpeg')
Traceback (most recent call last):
...
ValueError: "mp3" is not an extension (missing .)
>>> extension_correct_for_mimetype('.mp3', 'audio mpeg')
Traceback (most recent call last):
...
ValueError: "audio mpeg" is not a mimetype (missing /)
"""
if not '/' in mimetype:
raise ValueError('"%s" is not a mimetype (missing /)' % mimetype)
if not extension.startswith('.'):
raise ValueError('"%s" is not an extension (missing .)' % extension)
if (extension, mimetype) in _MIME_TYPE_LIST:
return True
# Create a "default" extension from the mimetype, e.g. "application/ogg"
# becomes ".ogg", "audio/mpeg" becomes ".mpeg", etc...
default = ['.'+mimetype.split('/')[-1]]
return extension in default+mimetypes.guess_all_extensions(mimetype)
def filename_from_url(url):
"""
Extracts the filename and (lowercase) extension (with dot)
from a URL, e.g. http://server.com/file.MP3?download=yes
will result in the string ("file", ".mp3") being returned.
This function will also try to best-guess the "real"
extension for a media file (audio, video) by
trying to match an extension to these types and recurse
into the query string to find better matches, if the
original extension does not resolve to a known type.
http://my.net/redirect.php?my.net/file.ogg => ("file", ".ogg")
http://server/get.jsp?file=/episode0815.MOV => ("episode0815", ".mov")
http://s/redirect.mp4?http://serv2/test.mp4 => ("test", ".mp4")
"""
(scheme, netloc, path, para, query, fragid) = urlparse.urlparse(url)
(filename, extension) = os.path.splitext(os.path.basename( urllib.unquote(path)))
if file_type_by_extension(extension) is not None and not \
query.startswith(scheme+'://'):
# We have found a valid extension (audio, video)
# and the query string doesn't look like a URL
return ( filename, extension.lower() )
# If the query string looks like a possible URL, try that first
if len(query.strip()) > 0 and query.find('/') != -1:
query_url = '://'.join((scheme, urllib.unquote(query)))
(query_filename, query_extension) = filename_from_url(query_url)
if file_type_by_extension(query_extension) is not None:
return os.path.splitext(os.path.basename(query_url))
# No exact match found, simply return the original filename & extension
return ( filename, extension.lower() )
def file_type_by_extension(extension):
"""
Tries to guess the file type by looking up the filename
extension from a table of known file types. Will return
"audio", "video" or None.
>>> file_type_by_extension('.aif')
'audio'
>>> file_type_by_extension('.3GP')
'video'
>>> file_type_by_extension('.m4a')
'audio'
>>> file_type_by_extension('.txt') is None
True
>>> file_type_by_extension(None) is None
True
>>> file_type_by_extension('ogg')
Traceback (most recent call last):
...
ValueError: Extension does not start with a dot: ogg
"""
if not extension:
return None
if not extension.startswith('.'):
raise ValueError('Extension does not start with a dot: %s' % extension)
extension = extension.lower()
if extension in _MIME_TYPES_EXT:
return _MIME_TYPES_EXT[extension].split('/')[0]
# Need to prepend something to the extension, so guess_type works
type, encoding = mimetypes.guess_type('file'+extension)
if type is not None and '/' in type:
filetype, rest = type.split('/', 1)
if filetype in ('audio', 'video', 'image'):
return filetype
return None
def get_first_line( s):
"""
Returns only the first line of a string, stripped so
that it doesn't have whitespace before or after.
"""
return s.strip().split('\n')[0].strip()
def object_string_formatter(s, **kwargs):
"""
Makes attributes of object passed in as keyword
arguments available as {OBJECTNAME.ATTRNAME} in
the passed-in string and returns a string with
the above arguments replaced with the attribute
values of the corresponding object.
>>> class x: pass
>>> a = x()
>>> a.title = 'Hello world'
>>> object_string_formatter('{episode.title}', episode=a)
'Hello world'
>>> class x: pass
>>> a = x()
>>> a.published = 123
>>> object_string_formatter('Hi {episode.published} 456', episode=a)
'Hi 123 456'
"""
result = s
for key, o in kwargs.iteritems():
matches = re.findall(r'\{%s\.([^\}]+)\}' % key, s)
for attr in matches:
if hasattr(o, attr):
try:
from_s = '{%s.%s}' % (key, attr)
to_s = str(getattr(o, attr))
result = result.replace(from_s, to_s)
except:
logger.warn('Replace of "%s" failed for "%s".', attr, s)
return result
def format_desktop_command(command, filenames, start_position=None):
"""
Formats a command template from the "Exec=" line of a .desktop
file to a string that can be invoked in a shell.
Handled format strings: %U, %u, %F, %f and a fallback that
appends the filename as first parameter of the command.
Also handles non-standard %p which is replaced with the start_position
(probably only makes sense if starting a single file). (see bug 1140)
See http://standards.freedesktop.org/desktop-entry-spec/1.0/ar01s06.html
Returns a list of commands to execute, either one for
each filename if the application does not support multiple
file names or one for all filenames (%U, %F or unknown).
"""
# Replace backslashes with slashes to fix win32 issues
# (even on win32, "/" works, but "\" does not)
command = command.replace('\\', '/')
if start_position is not None:
command = command.replace('%p', str(start_position))
command = shlex.split(command)
command_before = command
command_after = []
multiple_arguments = True
for fieldcode in ('%U', '%F', '%u', '%f'):
if fieldcode in command:
command_before = command[:command.index(fieldcode)]
command_after = command[command.index(fieldcode)+1:]
multiple_arguments = fieldcode in ('%U', '%F')
break
if multiple_arguments:
return [command_before + filenames + command_after]
commands = []
for filename in filenames:
commands.append(command_before+[filename]+command_after)
return commands
def url_strip_authentication(url):
"""
Strips authentication data from an URL. Returns the URL with
the authentication data removed from it.
>>> url_strip_authentication('https://host.com/')
'https://host.com/'
>>> url_strip_authentication('telnet://foo:bar@host.com/')
'telnet://host.com/'
>>> url_strip_authentication('ftp://billy@example.org')
'ftp://example.org'
>>> url_strip_authentication('ftp://billy:@example.org')
'ftp://example.org'
>>> url_strip_authentication('http://aa:bc@localhost/x')
'http://localhost/x'
>>> url_strip_authentication('http://i%2Fo:P%40ss%3A@blubb.lan/u.html')
'http://blubb.lan/u.html'
>>> url_strip_authentication('http://c:d@x.org/')
'http://x.org/'
>>> url_strip_authentication('http://P%40%3A:i%2F@cx.lan')
'http://cx.lan'
>>> url_strip_authentication('http://x@x.com:s3cret@example.com/')
'http://example.com/'
"""
url_parts = list(urlparse.urlsplit(url))
# url_parts[1] is the HOST part of the URL
# Remove existing authentication data
if '@' in url_parts[1]:
url_parts[1] = url_parts[1].rsplit('@', 1)[1]
return urlparse.urlunsplit(url_parts)
def url_add_authentication(url, username, password):
"""
Adds authentication data (username, password) to a given
URL in order to construct an authenticated URL.
>>> url_add_authentication('https://host.com/', '', None)
'https://host.com/'
>>> url_add_authentication('http://example.org/', None, None)
'http://example.org/'
>>> url_add_authentication('telnet://host.com/', 'foo', 'bar')
'telnet://foo:bar@host.com/'
>>> url_add_authentication('ftp://example.org', 'billy', None)
'ftp://billy@example.org'
>>> url_add_authentication('ftp://example.org', 'billy', '')
'ftp://billy:@example.org'
>>> url_add_authentication('http://localhost/x', 'aa', 'bc')
'http://aa:bc@localhost/x'
>>> url_add_authentication('http://blubb.lan/u.html', 'i/o', 'P@ss:')
'http://i%2Fo:P@ss:@blubb.lan/u.html'
>>> url_add_authentication('http://a:b@x.org/', 'c', 'd')
'http://c:d@x.org/'
>>> url_add_authentication('http://i%2F:P%40%3A@cx.lan', 'P@x', 'i/')
'http://P@x:i%2F@cx.lan'
>>> url_add_authentication('http://x.org/', 'a b', 'c d')
'http://a%20b:c%20d@x.org/'
"""
if username is None or username == '':
return url
# Relaxations of the strict quoting rules (bug 1521):
# 1. Accept '@' in username and password
# 2. Acecpt ':' in password only
username = urllib.quote(username, safe='@')
if password is not None:
password = urllib.quote(password, safe='@:')
auth_string = ':'.join((username, password))
else:
auth_string = username
url = url_strip_authentication(url)
url_parts = list(urlparse.urlsplit(url))
# url_parts[1] is the HOST part of the URL
url_parts[1] = '@'.join((auth_string, url_parts[1]))
return urlparse.urlunsplit(url_parts)
def urlopen(url, headers=None, data=None, timeout=None):
"""
An URL opener with the User-agent set to gPodder (with version)
"""
username, password = username_password_from_url(url)
if username is not None or password is not None:
url = url_strip_authentication(url)
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
else:
opener = urllib2.build_opener()
if headers is None:
headers = {}
else:
headers = dict(headers)
headers.update({'User-agent': gpodder.user_agent})
request = urllib2.Request(url, data=data, headers=headers)
if timeout is None:
return opener.open(request)
else:
return opener.open(request, timeout=timeout)
def get_real_url(url):
"""
Gets the real URL of a file and resolves all redirects.
"""
try:
return urlopen(url).geturl()
except:
logger.error('Getting real url for %s', url, exc_info=True)
return url
def find_command(command):
"""
Searches the system's PATH for a specific command that is
executable by the user. Returns the first occurence of an
executable binary in the PATH, or None if the command is
not available.
On Windows, this also looks for "<command>.bat" and
"<command>.exe" files if "<command>" itself doesn't exist.
"""
if 'PATH' not in os.environ:
return None
for path in os.environ['PATH'].split(os.pathsep):
command_file = os.path.join(path, command)
if gpodder.ui.win32 and not os.path.exists(command_file):
for extension in ('.bat', '.exe'):
cmd = command_file + extension
if os.path.isfile(cmd):
command_file = cmd
break
if os.path.isfile(command_file) and os.access(command_file, os.X_OK):
return command_file
return None
idle_add_handler = None
def idle_add(func, *args):
"""Run a function in the main GUI thread
This is a wrapper function that does the Right Thing depending on if we are
running on Gtk+, Qt or CLI.
You should use this function if you are calling from a Python thread and
modify UI data, so that you make sure that the function is called as soon
as possible from the main UI thread.
"""
if gpodder.ui.gtk:
import gobject
gobject.idle_add(func, *args)
elif gpodder.ui.qml:
from PySide.QtCore import Signal, QTimer, QThread, Qt, QObject
class IdleAddHandler(QObject):
signal = Signal(object)
def __init__(self):
QObject.__init__(self)
self.main_thread_id = QThread.currentThreadId()
self.signal.connect(self.run_func)
def run_func(self, func):
assert QThread.currentThreadId() == self.main_thread_id, \
("Running in %s, not %s"
% (str(QThread.currentThreadId()),
str(self.main_thread_id)))
func()
def idle_add(self, func, *args):
def doit():
try:
func(*args)
except Exception, e:
logger.exception("Running %s%s: %s",
func, str(tuple(args)), str(e))
if QThread.currentThreadId() == self.main_thread_id:
# If we emit the signal in the main thread,
# then the function will be run immediately.
# Instead, use a single shot timer with a 0
# timeout: this will run the function when the
# event loop next iterates.
QTimer.singleShot(0, doit)
else:
self.signal.emit(doit)
global idle_add_handler
if idle_add_handler is None:
idle_add_handler = IdleAddHandler()
idle_add_handler.idle_add(func, *args)
else:
func(*args)
def bluetooth_available():
"""
Returns True or False depending on the availability
of bluetooth functionality on the system.
"""
if find_command('bluetooth-sendto') or \
find_command('gnome-obex-send'):
return True
else:
return False
def bluetooth_send_file(filename):
"""
Sends a file via bluetooth.
This function tries to use "bluetooth-sendto", and if
it is not available, it also tries "gnome-obex-send".
"""
command_line = None
if find_command('bluetooth-sendto'):
command_line = ['bluetooth-sendto']
elif find_command('gnome-obex-send'):
command_line = ['gnome-obex-send']
if command_line is not None:
command_line.append(filename)
return (subprocess.Popen(command_line).wait() == 0)
else:
logger.error('Cannot send file. Please install "bluetooth-sendto" or "gnome-obex-send".')
return False
def format_time(value):
"""Format a seconds value to a string
>>> format_time(0)
'00:00'
>>> format_time(20)
'00:20'
>>> format_time(3600)
'01:00:00'
>>> format_time(10921)
'03:02:01'
"""
dt = datetime.datetime.utcfromtimestamp(value)
if dt.hour == 0:
return dt.strftime('%M:%S')
else:
return dt.strftime('%H:%M:%S')
def parse_time(value):
"""Parse a time string into seconds
>>> parse_time('00:00')
0
>>> parse_time('00:00:00')
0
>>> parse_time('00:20')
20
>>> parse_time('00:00:20')
20
>>> parse_time('01:00:00')
3600
>>> parse_time('03:02:01')
10921
>>> parse_time('61:08')
3668
>>> parse_time('25:03:30')
90210
>>> parse_time('25:3:30')
90210
>>> parse_time('61.08')
3668
"""
if value == '':
return 0
if not value:
raise ValueError('Invalid value: %s' % (str(value),))
m = re.match(r'(\d+)[:.](\d\d?)[:.](\d\d?)', value)
if m:
hours, minutes, seconds = m.groups()
return (int(hours) * 60 + int(minutes)) * 60 + int(seconds)
m = re.match(r'(\d+)[:.](\d\d?)', value)
if m:
minutes, seconds = m.groups()
return int(minutes) * 60 + int(seconds)
return int(value)
def format_seconds_to_hour_min_sec(seconds):
"""
Take the number of seconds and format it into a
human-readable string (duration).
>>> format_seconds_to_hour_min_sec(3834)
u'1 hour, 3 minutes and 54 seconds'
>>> format_seconds_to_hour_min_sec(3600)
u'1 hour'
>>> format_seconds_to_hour_min_sec(62)
u'1 minute and 2 seconds'
"""
if seconds < 1:
return N_('%(count)d second', '%(count)d seconds', seconds) % {'count':seconds}
result = []
seconds = int(seconds)
hours = seconds/3600
seconds = seconds%3600
minutes = seconds/60
seconds = seconds%60
if hours:
result.append(N_('%(count)d hour', '%(count)d hours', hours) % {'count':hours})
if minutes:
result.append(N_('%(count)d minute', '%(count)d minutes', minutes) % {'count':minutes})
if seconds:
result.append(N_('%(count)d second', '%(count)d seconds', seconds) % {'count':seconds})
if len(result) > 1:
return (' '+_('and')+' ').join((', '.join(result[:-1]), result[-1]))
else:
return result[0]
def http_request(url, method='HEAD'):
(scheme, netloc, path, parms, qry, fragid) = urlparse.urlparse(url)
conn = httplib.HTTPConnection(netloc)
start = len(scheme) + len('://') + len(netloc)
conn.request(method, url[start:])
return conn.getresponse()
def gui_open(filename):
"""
Open a file or folder with the default application set
by the Desktop environment. This uses "xdg-open" on all
systems with a few exceptions:
on Win32, os.startfile() is used
"""
try:
if gpodder.ui.win32:
os.startfile(filename)
elif gpodder.ui.osx:
subprocess.Popen(['open', filename])
else:
subprocess.Popen(['xdg-open', filename])
return True
except:
logger.error('Cannot open file/folder: "%s"', filename, exc_info=True)
return False
def open_website(url):
"""
Opens the specified URL using the default system web
browser. This uses Python's "webbrowser" module, so
make sure your system is set up correctly.
"""
run_in_background(lambda: webbrowser.open(url))
def convert_bytes(d):
"""
Convert byte strings to unicode strings
This function will decode byte strings into unicode
strings. Any other data types will be left alone.
>>> convert_bytes(None)
>>> convert_bytes(1)
1
>>> convert_bytes(4711L)
4711L
>>> convert_bytes(True)
True
>>> convert_bytes(3.1415)
3.1415
>>> convert_bytes('Hello')
u'Hello'
>>> convert_bytes(u'Hey')
u'Hey'
"""
if d is None:
return d
if any(isinstance(d, t) for t in (int, long, bool, float)):
return d
elif not isinstance(d, unicode):
return d.decode('utf-8', 'ignore')
return d
def sanitize_encoding(filename):
r"""
Generate a sanitized version of a string (i.e.
remove invalid characters and encode in the
detected native language encoding).
>>> sanitize_encoding('\x80')
''
>>> sanitize_encoding(u'unicode')
'unicode'
"""
# The encoding problem goes away in Python 3.. hopefully!
if sys.version_info >= (3, 0):
return filename
global encoding
if not isinstance(filename, unicode):
filename = filename.decode(encoding, 'ignore')
return filename.encode(encoding, 'ignore')
def sanitize_filename(filename, max_length=0, use_ascii=False):
"""
Generate a sanitized version of a filename that can
be written on disk (i.e. remove/replace invalid
characters and encode in the native language) and
trim filename if greater than max_length (0 = no limit).
If use_ascii is True, don't encode in the native language,
but use only characters from the ASCII character set.
"""
if not isinstance(filename, unicode):
filename = filename.decode(encoding, 'ignore')
if max_length > 0 and len(filename) > max_length:
logger.info('Limiting file/folder name "%s" to %d characters.',
filename, max_length)
filename = filename[:max_length]
filename = filename.encode('ascii' if use_ascii else encoding, 'ignore')
filename = filename.translate(SANITIZATION_TABLE)
filename = filename.strip('.' + string.whitespace)
return filename
def find_mount_point(directory):
"""
Try to find the mount point for a given directory.
If the directory is itself a mount point, return
it. If not, remove the last part of the path and
re-check if it's a mount point. If the directory
resides on your root filesystem, "/" is returned.
>>> find_mount_point('/')
'/'
>>> find_mount_point(u'/something')
Traceback (most recent call last):
...
ValueError: Convert unicode objects to str first.
>>> find_mount_point(None)
Traceback (most recent call last):
...
ValueError: Directory names should be of type str.
>>> find_mount_point(42)
Traceback (most recent call last):
...
ValueError: Directory names should be of type str.
>>> from minimock import mock, restore
>>> mocked_mntpoints = ('/', '/home', '/media/usbdisk', '/media/cdrom')
>>> mock('os.path.ismount', returns_func=lambda x: x in mocked_mntpoints)
>>>
>>> # For mocking os.getcwd(), we simply use a lambda to avoid the
>>> # massive output of "Called os.getcwd()" lines in this doctest
>>> os.getcwd = lambda: '/home/thp'
>>>
>>> find_mount_point('.')
Called os.path.ismount('/home/thp')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('relativity')
Called os.path.ismount('/home/thp/relativity')
Called os.path.ismount('/home/thp')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('/media/usbdisk/')
Called os.path.ismount('/media/usbdisk')
'/media/usbdisk'
>>> find_mount_point('/home/thp/Desktop')
Called os.path.ismount('/home/thp/Desktop')
Called os.path.ismount('/home/thp')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('/media/usbdisk/Podcasts/With Spaces')
Called os.path.ismount('/media/usbdisk/Podcasts/With Spaces')
Called os.path.ismount('/media/usbdisk/Podcasts')
Called os.path.ismount('/media/usbdisk')
'/media/usbdisk'
>>> find_mount_point('/home/')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('/media/cdrom/../usbdisk/blubb//')
Called os.path.ismount('/media/usbdisk/blubb')
Called os.path.ismount('/media/usbdisk')
'/media/usbdisk'
>>> restore()
"""
if isinstance(directory, unicode):
# XXX: This is only valid for Python 2 - misleading error in Python 3?
# We do not accept unicode strings, because they could fail when
# trying to be converted to some native encoding, so fail loudly
# and leave it up to the callee to encode into the proper encoding.
raise ValueError('Convert unicode objects to str first.')
if not isinstance(directory, str):
# In Python 2, we assume it's a byte str; in Python 3, we assume
# that it's a unicode str. The abspath/ismount/split functions of
# os.path work with unicode str in Python 3, but not in Python 2.
raise ValueError('Directory names should be of type str.')
directory = os.path.abspath(directory)
while directory != '/':
if os.path.ismount(directory):
return directory
else:
(directory, tail_data) = os.path.split(directory)
return '/'
# matches http:// and ftp:// and mailto://
protocolPattern = re.compile(r'^\w+://')
def isabs(string):
"""
@return true if string is an absolute path or protocoladdress
for addresses beginning in http:// or ftp:// or ldap:// -
they are considered "absolute" paths.
Source: http://code.activestate.com/recipes/208993/
"""
if protocolPattern.match(string): return 1
return os.path.isabs(string)
def commonpath(l1, l2, common=[]):
"""
helper functions for relpath
Source: http://code.activestate.com/recipes/208993/
"""
if len(l1) < 1: return (common, l1, l2)
if len(l2) < 1: return (common, l1, l2)
if l1[0] != l2[0]: return (common, l1, l2)
return commonpath(l1[1:], l2[1:], common+[l1[0]])
def relpath(p1, p2):
"""
Finds relative path from p1 to p2
Source: http://code.activestate.com/recipes/208993/
"""
pathsplit = lambda s: s.split(os.path.sep)
(common,l1,l2) = commonpath(pathsplit(p1), pathsplit(p2))
p = []
if len(l1) > 0:
p = [ ('..'+os.sep) * len(l1) ]
p = p + l2
if len(p) is 0:
return "."
return os.path.join(*p)
def get_hostname():
"""Return the hostname of this computer
This can be implemented in a different way on each
platform and should yield a unique-per-user device ID.
"""
nodename = platform.node()
if nodename:
return nodename
# Fallback - but can this give us "localhost"?
return socket.gethostname()
def detect_device_type():
"""Device type detection for gpodder.net
This function tries to detect on which
kind of device gPodder is running on.
Possible return values:
desktop, laptop, mobile, server, other
"""
if gpodder.ui.harmattan:
return 'mobile'
elif glob.glob('/proc/acpi/battery/*'):
# Linux: If we have a battery, assume Laptop
return 'laptop'
return 'desktop'
def write_m3u_playlist(m3u_filename, episodes, extm3u=True):
"""Create an M3U playlist from a episode list
If the parameter "extm3u" is False, the list of
episodes should be a list of filenames, and no
extended information will be written into the
M3U files (#EXTM3U / #EXTINF).
If the parameter "extm3u" is True (default), then the
list of episodes should be PodcastEpisode objects,
as the extended metadata will be taken from them.
"""
f = open(m3u_filename, 'w')
if extm3u:
# Mandatory header for extended playlists
f.write('#EXTM3U\n')
for episode in episodes:
if not extm3u:
# Episode objects are strings that contain file names
f.write(episode+'\n')
continue
if episode.was_downloaded(and_exists=True):
filename = episode.local_filename(create=False)
assert filename is not None
if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
f.write('#EXTINF:0,'+episode.playlist_title()+'\n')
f.write(filename+'\n')
f.close()
def generate_names(filename):
basename, ext = os.path.splitext(filename)
for i in itertools.count():
if i:
yield '%s (%d)%s' % (basename, i+1, ext)
else:
yield filename
def is_known_redirecter(url):
"""Check if a URL redirect is expected, and no filenames should be updated
We usually honor URL redirects, and update filenames accordingly.
In some cases (e.g. Soundcloud) this results in a worse filename,
so we hardcode and detect these cases here to avoid renaming files
for which we know that a "known good default" exists.
The problem here is that by comparing the currently-assigned filename
with the new filename determined by the URL, we cannot really determine
which one is the "better" URL (e.g. "n5rMSpXrqmR9.128.mp3" for Soundcloud).
"""
# Soundcloud-hosted media downloads (we take the track name as filename)
if url.startswith('http://ak-media.soundcloud.com/'):
return True
return False
def atomic_rename(old_name, new_name):
"""Atomically rename/move a (temporary) file
This is usually used when updating a file safely by writing
the new contents into a temporary file and then moving the
temporary file over the original file to replace it.
"""
if gpodder.ui.win32:
# Win32 does not support atomic rename with os.rename
shutil.move(old_name, new_name)
else:
os.rename(old_name, new_name)
def check_command(self, cmd):
"""Check if a command line command/program exists"""
# Prior to Python 2.7.3, this module (shlex) did not support Unicode input.
cmd = sanitize_encoding(cmd)
program = shlex.split(cmd)[0]
return (find_command(program) is not None)
def rename_episode_file(episode, filename):
"""Helper method to update a PodcastEpisode object
Useful after renaming/converting its download file.
"""
if not os.path.exists(filename):
raise ValueError('Target filename does not exist.')
basename, extension = os.path.splitext(filename)
episode.download_filename = os.path.basename(filename)
episode.file_size = os.path.getsize(filename)
episode.mime_type = mimetype_from_extension(extension)
episode.save()
episode.db.commit()
def get_update_info(url='http://gpodder.org/downloads'):
"""
Get up to date release information from gpodder.org.
Returns a tuple: (up_to_date, latest_version, release_date, days_since)
Example result (up to date version, 20 days after release):
(True, '3.0.4', '2012-01-24', 20)
Example result (outdated version, 10 days after release):
(False, '3.0.5', '2012-02-29', 10)
"""
data = urlopen(url).read()
id_field_re = re.compile(r'<([a-z]*)[^>]*id="([^"]*)"[^>]*>([^<]*)</\1>')
info = dict((m.group(2), m.group(3)) for m in id_field_re.finditer(data))
latest_version = info['latest-version']
release_date = info['release-date']
release_parsed = datetime.datetime.strptime(release_date, '%Y-%m-%d')
days_since_release = (datetime.datetime.today() - release_parsed).days
convert = lambda s: tuple(int(x) for x in s.split('.'))
up_to_date = (convert(gpodder.__version__) >= convert(latest_version))
return up_to_date, latest_version, release_date, days_since_release
def run_in_background(function, daemon=False):
logger.debug('run_in_background: %s (%s)', function, str(daemon))
thread = threading.Thread(target=function)
thread.setDaemon(daemon)
thread.start()
return thread
def linux_get_active_interfaces():
"""Get active network interfaces using 'ip link'
Returns a list of active network interfaces or an
empty list if the device is offline. The loopback
interface is not included.
"""
process = subprocess.Popen(['ip', 'link'], stdout=subprocess.PIPE)
data, _ = process.communicate()
for interface, _ in re.findall(r'\d+: ([^:]+):.*state (UP|UNKNOWN)', data):
if interface != 'lo':
yield interface
def osx_get_active_interfaces():
"""Get active network interfaces using 'ifconfig'
Returns a list of active network interfaces or an
empty list if the device is offline. The loopback
interface is not included.
"""
process = subprocess.Popen(['ifconfig'], stdout=subprocess.PIPE)
stdout, _ = process.communicate()
for i in re.split('\n(?!\t)', stdout, re.MULTILINE):
b = re.match('(\\w+):.*status: (active|associated)$', i, re.MULTILINE | re.DOTALL)
if b:
yield b.group(1)
def unix_get_active_interfaces():
"""Get active network interfaces using 'ifconfig'
Returns a list of active network interfaces or an
empty list if the device is offline. The loopback
interface is not included.
"""
process = subprocess.Popen(['ifconfig'], stdout=subprocess.PIPE)
stdout, _ = process.communicate()
for i in re.split('\n(?!\t)', stdout, re.MULTILINE):
b = re.match('(\\w+):.*status: active$', i, re.MULTILINE | re.DOTALL)
if b:
yield b.group(1)
def connection_available():
"""Check if an Internet connection is available
Returns True if a connection is available (or if there
is no way to determine the connection). Returns False
if no network interfaces are up (i.e. no connectivity).
"""
try:
if gpodder.ui.win32:
# FIXME: Implement for Windows
return True
elif gpodder.ui.osx:
return len(list(osx_get_active_interfaces())) > 0
else:
# By default, we assume we're not offline (bug 1730)
offline = False
if find_command('ifconfig') is not None:
# If ifconfig is available, and it says we don't have
# any active interfaces, assume we're offline
if len(list(unix_get_active_interfaces())) == 0:
offline = True
# If we assume we're offline, try the "ip" command as fallback
if offline and find_command('ip') is not None:
if len(list(linux_get_active_interfaces())) == 0:
offline = True
else:
offline = False
return not offline
return False
except Exception, e:
logger.warn('Cannot get connection status: %s', e, exc_info=True)
# When we can't determine the connection status, act as if we're online (bug 1730)
return True
def website_reachable(url):
"""
Check if a specific website is available.
"""
if not connection_available():
# No network interfaces up - assume website not reachable
return (False, None)
try:
response = urllib2.urlopen(url, timeout=1)
return (True, response)
except urllib2.URLError as err:
pass
return (False, None)
def delete_empty_folders(top):
for root, dirs, files in os.walk(top, topdown=False):
for name in dirs:
dirname = os.path.join(root, name)
if not os.listdir(dirname):
os.rmdir(dirname)
|
somini/gpodder
|
src/gpodder/util.py
|
Python
|
gpl-3.0
| 54,845
|
import greengraph
if __name__ == '__main__':
from matplotlib import pyplot as plt
mygraph = greengraph.Greengraph('New York','Chicago')
data = mygraph.green_between(20)
plt.plot(data)
plt.show()
|
padraic-padraic/MPHYSG001_CW1
|
example.py
|
Python
|
gpl-2.0
| 216
|
import numpy as np
class Conversions:
MPH_TO_MS = 1.609/3.6
MS_TO_MPH = 3.6/1.609
KPH_TO_MS = 1./3.6
MS_TO_KPH = 3.6
MPH_TO_KPH = 1.609
KPH_TO_MPH = 1./1.609
KNOTS_TO_MS = 1/1.9438
MS_TO_KNOTS = 1.9438
# Car tecode decimal minutes into decimal degrees, can work with numpy arrays as input
@staticmethod
def dm2d(dm):
degs = np.round(dm/100.)
mins = dm - degs*100.
return degs + mins/60.
# Car button codes
class CruiseButtons:
RES_ACCEL = 4
DECEL_SET = 3
CANCEL = 2
MAIN = 1
# Image params for color cam on acura, calibrated on pre las vegas drive (2016-05-21)
class ImageParams:
def __init__(self):
self.SX_R = 160 # top left corner pixel shift of the visual region considered by the model
self.SY_R = 180 # top left corner pixel shift of the visual region considered by the model
self.VPX_R = 319 # vanishing point reference, as calibrated in Vegas drive
self.VPY_R = 201 # vanishing point reference, as calibrated in Vegas drive
self.X = 320 # pixel length of image for model
self.Y = 160 # pixel length of image for model
self.SX = self.SX_R # current visual region with shift
self.SY = self.SY_R # current visual region with shift
self.VPX = self.VPX_R # current vanishing point with shift
self.VPY = self.VPY_R # current vanishing point with shift
def shift(self, shift):
def to_int(fl):
return int(round(fl))
# shift comes from calibration and says how much to shift the viual region
self.SX = self.SX_R + to_int(shift[0]) # current visual region with shift
self.SY = self.SY_R + to_int(shift[1]) # current visual region with shift
self.VPX = self.VPX_R + to_int(shift[0]) # current vanishing point with shift
self.VPY = self.VPY_R + to_int(shift[1]) # current vanishing point with shift
class UIParams:
lidar_x, lidar_y, lidar_zoom = 384, 960, 8
lidar_car_x, lidar_car_y = lidar_x/2., lidar_y/1.1
car_hwidth = 1.7272/2 * lidar_zoom
car_front = 2.6924 * lidar_zoom
car_back = 1.8796 * lidar_zoom
car_color = 110
class VehicleParams:
def __init__(self, civic, brake_only=False, torque_mod=False):
if civic:
self.wheelbase = 2.67
self.steer_ratio = 15.3
self.slip_factor = 0.0014
self.civic = True
else:
self.wheelbase = 2.67 # from http://www.edmunds.com/acura/ilx/2016/sedan/features-specs/
self.steer_ratio = 15.3 # from http://www.edmunds.com/acura/ilx/2016/road-test-specs/
self.slip_factor = 0.0014
self.civic = False
self.brake_only = brake_only
self.torque_mod = torque_mod
self.ui_speed_fudge = 1.01 if self.civic else 1.025
|
damienstanton/nanodegree
|
selfdriving_vehicle/openpilot/selfdrive/config.py
|
Python
|
mit
| 2,706
|
import random
random.seed()
randint = random.randint
def roll2d6():
"""Returns a tuple containing the results of the two d6 rolls"""
return (randint(1,6), randint(1,6))
|
ddossett/python-tutorial
|
scripts/dungeonworld/dwtools.py
|
Python
|
mit
| 178
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
dacs = {
0: 'DACA',
1: 'DACB',
2: 'DACC',
3: 'DACD',
}
class Decoder(srd.Decoder):
api_version = 2
id = 'tlc5620'
name = 'TI TLC5620'
longname = 'Texas Instruments TLC5620'
desc = 'Texas Instruments TLC5620 8-bit quad DAC.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['tlc5620']
channels = (
{'id': 'clk', 'name': 'CLK', 'desc': 'Serial interface clock'},
{'id': 'data', 'name': 'DATA', 'desc': 'Serial interface data'},
)
optional_channels = (
{'id': 'load', 'name': 'LOAD', 'desc': 'Serial interface load control'},
{'id': 'ldac', 'name': 'LDAC', 'desc': 'Load DAC'},
)
annotations = (
('dac-select', 'DAC select'),
('gain', 'Gain'),
('value', 'DAC value'),
('data-latch', 'Data latch point'),
('ldac-fall', 'LDAC falling edge'),
)
def __init__(self, **kwargs):
self.oldpins = self.oldclk = self.oldload = self.oldldac = None
self.datapin = None
self.bits = []
self.ss_dac = self.es_dac = 0
self.ss_gain = self.es_gain = 0
self.ss_value = self.es_value = 0
self.dac_select = self.gain = self.dac_value = None
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def handle_11bits(self):
s = ''.join(str(i) for i in self.bits[:2])
self.dac_select = s = dacs[int(s, 2)]
self.put(self.ss_dac, self.es_dac, self.out_ann,
[0, ['DAC select: %s' % s, 'DAC sel: %s' % s,
'DAC: %s' % s, 'D: %s' % s, s, s[3]]])
self.gain = g = 1 + self.bits[2]
self.put(self.ss_gain, self.es_gain, self.out_ann,
[1, ['Gain: x%d' % g, 'G: x%d' % g, 'x%d' % g]])
s = ''.join(str(i) for i in self.bits[3:])
self.dac_value = v = int(s, 2)
self.put(self.ss_value, self.es_value, self.out_ann,
[2, ['DAC value: %d' % v, 'Value: %d' % v, 'Val: %d' % v,
'V: %d' % v, '%d' % v]])
def handle_falling_edge_load(self):
s, v, g = self.dac_select, self.dac_value, self.gain
self.put(self.samplenum, self.samplenum, self.out_ann,
[3, ['Setting %s value to %d (x%d gain)' % (s, v, g),
'%s=%d (x%d gain)' % (s, v, g)]])
def handle_falling_edge_ldac(self):
self.put(self.samplenum, self.samplenum, self.out_ann,
[4, ['Falling edge on LDAC pin', 'LDAC fall', 'LDAC']])
def handle_new_dac_bit(self):
self.bits.append(self.datapin)
# Wait until we have read 11 bits, then parse them.
l, s = len(self.bits), self.samplenum
if l == 1:
self.ss_dac = s
elif l == 2:
self.es_dac = self.ss_gain = s
elif l == 3:
self.es_gain = self.ss_value = s
elif l == 11:
self.es_value = s
self.handle_11bits()
self.bits = []
def decode(self, ss, es, data):
for (self.samplenum, pins) in data:
# Ignore identical samples early on (for performance reasons).
if self.oldpins == pins:
continue
self.oldpins, (clk, self.datapin, load, ldac) = pins, pins
# DATA is shifted in the DAC on the falling CLK edge (MSB-first).
# A falling edge of LOAD will latch the data.
if self.oldload == 1 and load == 0:
self.handle_falling_edge_load()
if self.oldldac == 1 and ldac == 0:
self.handle_falling_edge_ldac()
if self.oldclk == 1 and clk == 0:
self.handle_new_dac_bit()
self.oldclk = clk
self.oldload = load
self.oldldac = ldac
|
robacklin/sigrok
|
libsigrokdecode/decoders/tlc5620/pd.py
|
Python
|
gpl-3.0
| 4,650
|
from itertools import cycle, chain
def fence_pattern(rails, size):
zig_zag = cycle(chain(range(rails), range(rails - 2, 0, -1)))
return zip(zig_zag, range(size))
def encode(msg, rails):
fence = fence_pattern(rails, len(msg))
return ''.join(msg[i] for _, i in sorted(fence))
def decode(msg, rails):
fence = fence_pattern(rails, len(msg))
fence_msg = zip(msg, sorted(fence))
return ''.join(char for char, _ in sorted(fence_msg, key=lambda item: item[1][1]))
|
rootulp/xpython
|
exercises/rail-fence-cipher/example.py
|
Python
|
mit
| 490
|
#!/usr/bin/env python
# Experiments with coefficients of a geometric plane
# Resources:
# http://www.wolframalpha.com/input/?i=plane+through+(1,-2,0),(4,-2,-2),(4,1,4)&lk=3
# ==> 2 x - 6 y + 3 z + 14 == 0
# Translate a point relative to some origin
from __future__ import print_function
def translate(point, origin):
return tuple([a - b for a, b in zip(point, origin)])
# Given two points in 3d space, define a vector
def vector(p1, p2):
return tuple([b - a for a, b in zip(p1, p2)])
# Given two vectors in a plane, find the normal vector
def normal(u, v):
# A normal vector is the cross-product of two coplanar vectors
return tuple(
[
u[1] * v[2] - u[2] * v[1],
u[2] * v[0] - u[0] * v[2],
u[0] * v[1] - u[1] * v[0],
]
)
def plane_from_three_points(P, Q, R):
u = vector(P, Q)
v = vector(P, R)
n = normal(u, v)
# Find the coefficients
(A, B, C) = n
# The equation of the plane is thus Ax+By+Cz+K=0.
# Solve for K to get the final coefficient
(x, y, z) = P
K = -(A * x + B * y + C * z)
return (A, B, C, K)
# find the Z offset for any x,y
# z = -(Ax + By + K) / C
def calcz(x, y, plane, translation=(0, 0, 0)):
(A, B, C, K) = plane
(tx, ty, tz) = translation
return -(A * (x - tx) + B * (y - ty) + K) / C + tz
# Verify a point is on this plane
def validate(plane, point):
(A, B, C, K) = plane
(x, y, z) = point
return z == calcz(x, y, plane)
def verify_plane(points):
print(" ", "\n ".join([str(p) for p in points]))
plane = plane_from_three_points(*points)
print("Plane coordinates: ", plane)
if plane[2] == 0:
print(" Error: points are colinear")
return
valid = True
for p in points:
if not validate(plane, p):
print("Failed: sample point not on plane, ", p)
valid = False
print("Validation:", "Failed" if not valid else "Passed")
samples = [
# canonical example
[(1, -2, 0), (4, -2, -2), (4, 1, 4)],
# three colinear points (infinite planes)
[(2, 2, 2), (4, 4, 4), (10, 10, 10)],
# Extreme tilt example in mm
[(57, 123, -5), (200, 0, 35), (0, 207, 2)],
# Some more examples in um
[(0, 0, 1300), (200000, 200000, 3500), (0, 150000, -1000)],
[(20000, 20000, -300), (220000, 120000, -1700), (120000, 220000, -700)],
# some example in tenths of mm
[(200, 200, -300), (2200, 1200, -1700), (1200, 2200, -700)],
[(20000, 20000, -300), (220000, 120000, -1700), (120000, 220000, -700)],
[(200, 200, -300), (2200, 1200, -1700), (1200, 2200, -700)],
]
for points in samples:
verify_plane(points)
print("====[Translated]=========")
# Translate plane to origin at P (simplifies by removing K coefficient)
# A*x' + B*y' + C*z' = 0
P = points[0]
T = translate((0, 0, 0), P)
xpoints = [translate(p, P) for p in points]
verify_plane(xpoints)
print("=========================\n")
|
Traumflug/Teacup_Firmware
|
research/planes.py
|
Python
|
gpl-2.0
| 2,998
|
from enigma import eDVBFrontendParametersSatellite, eDVBFrontendParametersCable, eDVBFrontendParametersTerrestrial, eDVBFrontendParametersATSC
from Components.NimManager import nimmanager
def orbpos(pos):
return pos > 3600 and "N/A" or "%d.%d\xc2\xb0%s" % (pos > 1800 and ((3600 - pos) / 10, (3600 - pos) % 10, "W") or (pos / 10, pos % 10, "E"))
def getTunerDescription(nim):
try:
return nimmanager.getTerrestrialDescription(nim)
except:
print "[ChannelNumber] nimmanager.getTerrestrialDescription(nim) failed, nim:", nim
return ""
def getMHz(frequency):
if str(frequency).endswith('MHz'):
return float(frequency.split()[0])
return (frequency+50000)/100000/10.
def getChannelNumber(frequency, nim):
if nim == "DVB-T":
for n in nimmanager.nim_slots:
if n.isCompatible("DVB-T"):
nim = n.slot
break
f = getMHz(frequency)
descr = getTunerDescription(nim)
if "DVB-T" in descr:
if "Europe" in descr:
if 174 < f < 230: # III
d = (f + 1) % 7
return str(int(f - 174)/7 + 5) + (d < 3 and "-" or d > 4 and "+" or "")
elif 470 <= f < 863: # IV,V
d = (f + 2) % 8
return str(int(f - 470) / 8 + 21) + (d < 3.5 and "-" or d > 4.5 and "+" or "")
elif "Australia" in descr:
d = (f + 1) % 7
ds = (d < 3 and "-" or d > 4 and "+" or "")
if 174 < f < 202: # CH6-CH9
return str(int(f - 174)/7 + 6) + ds
elif 202 <= f < 209: # CH9A
return "9A" + ds
elif 209 <= f < 230: # CH10-CH12
return str(int(f - 209)/7 + 10) + ds
elif 526 < f < 820: # CH28-CH69
d = (f - 1) % 7
return str(int(f - 526)/7 + 28) + (d < 3 and "-" or d > 4 and "+" or "")
return ""
def supportedChannels(nim):
descr = getTunerDescription(nim)
return "Europe" in descr and "DVB-T" in descr
def channel2frequency(channel, nim):
descr = getTunerDescription(nim)
if "Europe" in descr and "DVB-T" in descr:
if 5 <= channel <= 12:
return (177500 + 7000*(channel- 5))*1000
elif 21 <= channel <= 69:
return (474000 + 8000*(channel-21))*1000
return 474000000
def ConvertToHumanReadable(tp, tunertype = None):
ret = { }
if tunertype is None:
tunertype = tp.get("tuner_type", "None")
if tunertype == "DVB-S":
ret["tuner_type"] = _("Satellite")
ret["inversion"] = {
eDVBFrontendParametersSatellite.Inversion_Unknown : _("Auto"),
eDVBFrontendParametersSatellite.Inversion_On : _("On"),
eDVBFrontendParametersSatellite.Inversion_Off : _("Off")}.get(tp.get("inversion"))
ret["fec_inner"] = {
eDVBFrontendParametersSatellite.FEC_None : _("None"),
eDVBFrontendParametersSatellite.FEC_Auto : _("Auto"),
eDVBFrontendParametersSatellite.FEC_1_2 : "1/2",
eDVBFrontendParametersSatellite.FEC_2_3 : "2/3",
eDVBFrontendParametersSatellite.FEC_3_4 : "3/4",
eDVBFrontendParametersSatellite.FEC_5_6 : "5/6",
eDVBFrontendParametersSatellite.FEC_6_7 : "6/7",
eDVBFrontendParametersSatellite.FEC_7_8 : "7/8",
eDVBFrontendParametersSatellite.FEC_3_5 : "3/5",
eDVBFrontendParametersSatellite.FEC_4_5 : "4/5",
eDVBFrontendParametersSatellite.FEC_8_9 : "8/9",
eDVBFrontendParametersSatellite.FEC_9_10 : "9/10"}.get(tp.get("fec_inner"))
ret["modulation"] = {
eDVBFrontendParametersSatellite.Modulation_Auto : _("Auto"),
eDVBFrontendParametersSatellite.Modulation_QPSK : "QPSK",
eDVBFrontendParametersSatellite.Modulation_QAM16 : "QAM16",
eDVBFrontendParametersSatellite.Modulation_8PSK : "8PSK",
eDVBFrontendParametersSatellite.Modulation_16APSK : "16APSK",
eDVBFrontendParametersSatellite.Modulation_32APSK : "32APSK"}.get(tp.get("modulation"))
ret["orbital_position"] = nimmanager.getSatName(int(tp.get("orbital_position")))
ret["orb_pos"] = orbpos(int(tp.get("orbital_position")))
ret["polarization"] = {
eDVBFrontendParametersSatellite.Polarisation_Horizontal : _("Horizontal"),
eDVBFrontendParametersSatellite.Polarisation_Vertical : _("Vertical"),
eDVBFrontendParametersSatellite.Polarisation_CircularLeft : _("Circular left"),
eDVBFrontendParametersSatellite.Polarisation_CircularRight : _("Circular right")}.get(tp.get("polarization"))
ret["polarization_abbreviation"] = {
eDVBFrontendParametersSatellite.Polarisation_Horizontal : "H",
eDVBFrontendParametersSatellite.Polarisation_Vertical : "V",
eDVBFrontendParametersSatellite.Polarisation_CircularLeft : "L",
eDVBFrontendParametersSatellite.Polarisation_CircularRight : "R"}.get(tp.get("polarization"))
ret["system"] = {
eDVBFrontendParametersSatellite.System_DVB_S : "DVB-S",
eDVBFrontendParametersSatellite.System_DVB_S2 : "DVB-S2"}.get(tp.get("system"))
if ret["system"] == "DVB-S2":
ret["rolloff"] = {
eDVBFrontendParametersSatellite.RollOff_alpha_0_35 : "0.35",
eDVBFrontendParametersSatellite.RollOff_alpha_0_25 : "0.25",
eDVBFrontendParametersSatellite.RollOff_alpha_0_20 : "0.20",
eDVBFrontendParametersSatellite.RollOff_auto : _("Auto")}.get(tp.get("rolloff"))
ret["pilot"] = {
eDVBFrontendParametersSatellite.Pilot_Unknown : _("Auto"),
eDVBFrontendParametersSatellite.Pilot_On : _("On"),
eDVBFrontendParametersSatellite.Pilot_Off : _("Off")}.get(tp.get("pilot"))
ret["frequency"] = (tp.get("frequency") and str(tp.get("frequency")/1000) + ' MHz') or '0 MHz'
ret["symbol_rate"] = (tp.get("symbol_rate") and tp.get("symbol_rate")/1000) or 0
elif tunertype == "DVB-C":
ret["tuner_type"] = _("Cable")
ret["modulation"] = {
eDVBFrontendParametersCable.Modulation_Auto: _("Auto"),
eDVBFrontendParametersCable.Modulation_QAM16 : "QAM16",
eDVBFrontendParametersCable.Modulation_QAM32 : "QAM32",
eDVBFrontendParametersCable.Modulation_QAM64 : "QAM64",
eDVBFrontendParametersCable.Modulation_QAM128 : "QAM128",
eDVBFrontendParametersCable.Modulation_QAM256 : "QAM256"}.get(tp.get("modulation"))
ret["inversion"] = {
eDVBFrontendParametersCable.Inversion_Unknown : _("Auto"),
eDVBFrontendParametersCable.Inversion_On : _("On"),
eDVBFrontendParametersCable.Inversion_Off : _("Off")}.get(tp.get("inversion"))
ret["fec_inner"] = {
eDVBFrontendParametersCable.FEC_None : _("None"),
eDVBFrontendParametersCable.FEC_Auto : _("Auto"),
eDVBFrontendParametersCable.FEC_1_2 : "1/2",
eDVBFrontendParametersCable.FEC_2_3 : "2/3",
eDVBFrontendParametersCable.FEC_3_4 : "3/4",
eDVBFrontendParametersCable.FEC_5_6 : "5/6",
eDVBFrontendParametersCable.FEC_7_8 : "7/8",
eDVBFrontendParametersCable.FEC_8_9 : "8/9",
eDVBFrontendParametersCable.FEC_3_5 : "3/5",
eDVBFrontendParametersCable.FEC_4_5 : "4/5",
eDVBFrontendParametersCable.FEC_9_10 : "9/10"}.get(tp.get("fec_inner"))
ret["system"] = {
eDVBFrontendParametersCable.System_DVB_C_ANNEX_A : "DVB-C",
eDVBFrontendParametersCable.System_DVB_C_ANNEX_C : "DVB-C ANNEX C"}.get(tp.get("system"))
ret["frequency"] = (tp.get("frequency") and ('%s MHz' % str(tp.get("frequency")/1000.))) or '0 MHz'
ret["symbol_rate"] = (tp.get("symbol_rate") and tp.get("symbol_rate")/1000) or 0
elif tunertype == "DVB-T":
ret["tuner_type"] = _("Terrestrial")
ret["bandwidth"] = {
0 : _("Auto"),
10000000 : "10 MHz",
8000000 : "8 MHz",
7000000 : "7 MHz",
6000000 : "6 MHz",
5000000 : "5 MHz",
1712000 : "1.712 MHz"}.get(tp.get("bandwidth"))
#print 'bandwidth:',tp.get("bandwidth")
ret["code_rate_lp"] = {
eDVBFrontendParametersTerrestrial.FEC_Auto : _("Auto"),
eDVBFrontendParametersTerrestrial.FEC_1_2 : "1/2",
eDVBFrontendParametersTerrestrial.FEC_2_3 : "2/3",
eDVBFrontendParametersTerrestrial.FEC_3_4 : "3/4",
eDVBFrontendParametersTerrestrial.FEC_5_6 : "5/6",
eDVBFrontendParametersTerrestrial.FEC_6_7 : "6/7",
eDVBFrontendParametersTerrestrial.FEC_7_8 : "7/8",
eDVBFrontendParametersTerrestrial.FEC_8_9 : "8/9"}.get(tp.get("code_rate_lp"))
#print 'code_rate_lp:',tp.get("code_rate_lp")
ret["code_rate_hp"] = {
eDVBFrontendParametersTerrestrial.FEC_Auto : _("Auto"),
eDVBFrontendParametersTerrestrial.FEC_1_2 : "1/2",
eDVBFrontendParametersTerrestrial.FEC_2_3 : "2/3",
eDVBFrontendParametersTerrestrial.FEC_3_4 : "3/4",
eDVBFrontendParametersTerrestrial.FEC_5_6 : "5/6",
eDVBFrontendParametersTerrestrial.FEC_6_7 : "6/7",
eDVBFrontendParametersTerrestrial.FEC_7_8 : "7/8",
eDVBFrontendParametersTerrestrial.FEC_8_9 : "8/9"}.get(tp.get("code_rate_hp"))
#print 'code_rate_hp:',tp.get("code_rate_hp")
ret["constellation"] = {
eDVBFrontendParametersTerrestrial.Modulation_Auto : _("Auto"),
eDVBFrontendParametersTerrestrial.Modulation_QPSK : "QPSK",
eDVBFrontendParametersTerrestrial.Modulation_QAM16 : "QAM16",
eDVBFrontendParametersTerrestrial.Modulation_QAM64 : "QAM64",
eDVBFrontendParametersTerrestrial.Modulation_QAM256 : "QAM256"}.get(tp.get("constellation"))
#print 'constellation:',tp.get("constellation")
ret["transmission_mode"] = {
eDVBFrontendParametersTerrestrial.TransmissionMode_Auto : _("Auto"),
eDVBFrontendParametersTerrestrial.TransmissionMode_1k : "1k",
eDVBFrontendParametersTerrestrial.TransmissionMode_2k : "2k",
eDVBFrontendParametersTerrestrial.TransmissionMode_4k : "4k",
eDVBFrontendParametersTerrestrial.TransmissionMode_8k : "8k",
eDVBFrontendParametersTerrestrial.TransmissionMode_16k : "16k",
eDVBFrontendParametersTerrestrial.TransmissionMode_32k : "32k"}.get(tp.get("transmission_mode"))
#print 'transmission_mode:',tp.get("transmission_mode")
ret["guard_interval"] = {
eDVBFrontendParametersTerrestrial.GuardInterval_Auto : _("Auto"),
eDVBFrontendParametersTerrestrial.GuardInterval_19_256 : "19/256",
eDVBFrontendParametersTerrestrial.GuardInterval_19_128 : "19/128",
eDVBFrontendParametersTerrestrial.GuardInterval_1_128 : "1/128",
eDVBFrontendParametersTerrestrial.GuardInterval_1_32 : "1/32",
eDVBFrontendParametersTerrestrial.GuardInterval_1_16 : "1/16",
eDVBFrontendParametersTerrestrial.GuardInterval_1_8 : "1/8",
eDVBFrontendParametersTerrestrial.GuardInterval_1_4 : "1/4"}.get(tp.get("guard_interval"))
#print 'guard_interval:',tp.get("guard_interval")
ret["hierarchy_information"] = {
eDVBFrontendParametersTerrestrial.Hierarchy_Auto : _("Auto"),
eDVBFrontendParametersTerrestrial.Hierarchy_None : _("None"),
eDVBFrontendParametersTerrestrial.Hierarchy_1 : "1",
eDVBFrontendParametersTerrestrial.Hierarchy_2 : "2",
eDVBFrontendParametersTerrestrial.Hierarchy_4 : "4"}.get(tp.get("hierarchy_information"))
#print 'hierarchy_information:',tp.get("hierarchy_information")
ret["inversion"] = {
eDVBFrontendParametersTerrestrial.Inversion_Unknown : _("Auto"),
eDVBFrontendParametersTerrestrial.Inversion_On : _("On"),
eDVBFrontendParametersTerrestrial.Inversion_Off : _("Off")}.get(tp.get("inversion"))
#print 'inversion:',tp.get("inversion")
ret["system"] = {
eDVBFrontendParametersTerrestrial.System_DVB_T_T2 : "DVB-T/T2",
eDVBFrontendParametersTerrestrial.System_DVB_T : "DVB-T",
eDVBFrontendParametersTerrestrial.System_DVB_T2 : "DVB-T2"}.get(tp.get("system"))
# print 'system:',tp.get("system")
ret["frequency"] = (tp.get("frequency") and ('%s MHz' % str(tp.get("frequency")/1000000.))) or '0 MHz'
# print 'frequency:',tp.get("frequency")
ret["channel"] = _("CH%s") % getChannelNumber(tp.get("frequency"), "DVB-T")
elif tunertype == "ATSC":
ret["tuner_type"] = "ATSC"
ret["modulation"] = {
eDVBFrontendParametersATSC.Modulation_Auto: _("Auto"),
eDVBFrontendParametersATSC.Modulation_QAM16 : "QAM16",
eDVBFrontendParametersATSC.Modulation_QAM32 : "QAM32",
eDVBFrontendParametersATSC.Modulation_QAM64 : "QAM64",
eDVBFrontendParametersATSC.Modulation_QAM128 : "QAM128",
eDVBFrontendParametersATSC.Modulation_QAM256 : "QAM256",
eDVBFrontendParametersATSC.Modulation_VSB_8 : "8VSB",
eDVBFrontendParametersATSC.Modulation_VSB_16 : "16VSB"}.get(tp.get("modulation"))
ret["inversion"] = {
eDVBFrontendParametersATSC.Inversion_Unknown : _("Auto"),
eDVBFrontendParametersATSC.Inversion_On : _("On"),
eDVBFrontendParametersATSC.Inversion_Off : _("Off")}.get(tp.get("inversion"))
ret["system"] = {
eDVBFrontendParametersATSC.System_ATSC : "ATSC",
eDVBFrontendParametersATSC.System_DVB_C_ANNEX_B : "DVB-C ANNEX B"}.get(tp.get("system"))
elif tunertype != "None":
print "ConvertToHumanReadable: no or unknown tunertype in tpdata dict for tunertype:", tunertype
for k,v in tp.items():
if k not in ret:
ret[k] = v
return ret
|
BlackHole/enigma2-1
|
lib/python/Tools/Transponder.py
|
Python
|
gpl-2.0
| 12,359
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ctypes
import socket
import sys
if sys.platform == 'win32':
import wmi
class HostUtils(object):
def __init__(self):
if sys.platform == 'win32':
self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
def get_cpus_info(self):
cpus = self._conn_cimv2.query("SELECT * FROM Win32_Processor "
"WHERE ProcessorType = 3")
cpus_list = []
for cpu in cpus:
cpu_info = {'Architecture': cpu.Architecture,
'Name': cpu.Name,
'Manufacturer': cpu.Manufacturer,
'NumberOfCores': cpu.NumberOfCores,
'NumberOfLogicalProcessors':
cpu.NumberOfLogicalProcessors}
cpus_list.append(cpu_info)
return cpus_list
def is_cpu_feature_present(self, feature_key):
return ctypes.windll.kernel32.IsProcessorFeaturePresent(feature_key)
def get_memory_info(self):
"""
Returns a tuple with total visible memory and free physical memory
expressed in kB.
"""
mem_info = self._conn_cimv2.query("SELECT TotalVisibleMemorySize, "
"FreePhysicalMemory "
"FROM win32_operatingsystem")[0]
return (long(mem_info.TotalVisibleMemorySize),
long(mem_info.FreePhysicalMemory))
def get_volume_info(self, drive):
"""
Returns a tuple with total size and free space
expressed in bytes.
"""
logical_disk = self._conn_cimv2.query("SELECT Size, FreeSpace "
"FROM win32_logicaldisk "
"WHERE DeviceID='%s'"
% drive)[0]
return (long(logical_disk.Size), long(logical_disk.FreeSpace))
def get_windows_version(self):
return self._conn_cimv2.Win32_OperatingSystem()[0].Version
def get_local_ips(self):
addr_info = socket.getaddrinfo(socket.gethostname(), None, 0, 0, 0)
# Returns IPv4 and IPv6 addresses, ordered by protocol family
addr_info.sort()
return [a[4][0] for a in addr_info]
|
shootstar/novatest
|
nova/virt/hyperv/hostutils.py
|
Python
|
apache-2.0
| 2,964
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SingleSidedBuffer.py
--------------------
Date : August 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'August 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
from qgis.core import (QgsGeometry,
QgsWkbTypes,
QgsProcessing,
QgsProcessingParameterDistance,
QgsProcessingParameterNumber,
QgsProcessingParameterEnum,
QgsProcessingException)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class SingleSidedBuffer(QgisFeatureBasedAlgorithm):
DISTANCE = 'DISTANCE'
SIDE = 'SIDE'
SEGMENTS = 'SEGMENTS'
JOIN_STYLE = 'JOIN_STYLE'
MITER_LIMIT = 'MITER_LIMIT'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
self.distance = None
self.segments = None
self.join_style = None
self.side = None
self.miter_limit = None
self.sides = [self.tr('Left'),
'Right']
self.join_styles = [self.tr('Round'),
'Miter',
'Bevel']
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterDistance(self.DISTANCE,
self.tr('Distance'), parentParameterName='INPUT',
defaultValue=10.0))
self.addParameter(QgsProcessingParameterEnum(
self.SIDE,
self.tr('Side'),
options=self.sides))
self.addParameter(QgsProcessingParameterNumber(self.SEGMENTS,
self.tr('Segments'), QgsProcessingParameterNumber.Integer,
minValue=1, defaultValue=8))
self.addParameter(QgsProcessingParameterEnum(
self.JOIN_STYLE,
self.tr('Join style'),
options=self.join_styles))
self.addParameter(QgsProcessingParameterNumber(self.MITER_LIMIT,
self.tr('Miter limit'), QgsProcessingParameterNumber.Double,
minValue=1, defaultValue=2))
def name(self):
return 'singlesidedbuffer'
def displayName(self):
return self.tr('Single sided buffer')
def outputName(self):
return self.tr('Buffer')
def inputLayerTypes(self):
return [QgsProcessing.TypeVectorLine]
def outputType(self):
return QgsProcessing.TypeVectorPolygon
def outputWkbType(self, input_wkb_type):
return QgsWkbTypes.Polygon
def prepareAlgorithm(self, parameters, context, feedback):
self.distance = self.parameterAsDouble(parameters, self.DISTANCE, context)
self.segments = self.parameterAsInt(parameters, self.SEGMENTS, context)
self.join_style = self.parameterAsEnum(parameters, self.JOIN_STYLE, context) + 1
if self.parameterAsEnum(parameters, self.SIDE, context) == 0:
self.side = QgsGeometry.SideLeft
else:
self.side = QgsGeometry.SideRight
self.miter_limit = self.parameterAsDouble(parameters, self.MITER_LIMIT, context)
return True
def processFeature(self, feature, context, feedback):
input_geometry = feature.geometry()
if input_geometry:
output_geometry = input_geometry.singleSidedBuffer(self.distance, self.segments,
self.side, self.join_style, self.miter_limit)
if not output_geometry:
raise QgsProcessingException(
self.tr('Error calculating single sided buffer'))
feature.setGeometry(output_geometry)
return [feature]
|
geopython/QGIS
|
python/plugins/processing/algs/qgis/SingleSidedBuffer.py
|
Python
|
gpl-2.0
| 4,922
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'j בF Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j בF Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j בF'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i:s'
# FIRST_DAY_OF_WEEK =
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/conf/locale/he/formats.py
|
Python
|
bsd-3-clause
| 470
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('branches', '0008_project_change_branch_task_id'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='change_branch_task_id',
),
migrations.AddField(
model_name='changebranchlog',
name='task_id',
field=models.CharField(max_length=255, null=True, blank=True),
),
]
|
codedbyjay/django-branches
|
branches/migrations/0009_auto_20151225_0235.py
|
Python
|
gpl-2.0
| 565
|
from time import clock
start_time = 0
def start(string):
start_time = clock()
print string
def finish(string):
print string, "\nTime Taken : " + str(clock() - start_time)
|
saurv4u/Deep-Learning-for-Content-Retrieval
|
Code/time_logging.py
|
Python
|
mit
| 187
|
import lsst.meas.modelfit
import lsst.shapelet
# ==> WARNING <== CModel cannot be run on forcedCcd at the moment
#config.measurement.plugins.names |= ["modelfit_DoubleShapeletPsfApprox", "modelfit_CModel"]
#config.measurement.slots.modelFlux = "modelfit_CModel"
# modelfit_CModel is the default to compute source extendedness, so we have to redefine the modelFlux
config.measurement.slots.modelFlux = "base_GaussianFlux"
import lsst.meas.extensions.shapeHSM
config.measurement.plugins.names |= ["ext_shapeHSM_HsmShapeRegauss", "ext_shapeHSM_HsmSourceMoments",
"ext_shapeHSM_HsmPsfMoments"]
config.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild=''
config.measurement.slots.shape = "ext_shapeHSM_HsmSourceMoments"
config.doApCorr=True
#config.measurement.plugins['base_PixelFlags'].masksFpAnywhere.append('CLIPPED')
#config.measurement.plugins['base_PixelFlags'].masksFpCenter.append('BRIGHT_OBJECT')
#config.measurement.plugins['base_PixelFlags'].masksFpAnywhere.append('BRIGHT_OBJECT')
|
LSSTDESC/ReprocessingTaskForce
|
config/w_2018_07/cfht/forcedPhotCcdConfig.py
|
Python
|
gpl-2.0
| 1,047
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Type
from rl_coach.base_parameters import NetworkComponentParameters
class HeadParameters(NetworkComponentParameters):
def __init__(self, parameterized_class_name: str, activation_function: str = 'relu', name: str= 'head',
num_output_head_copies: int=1, rescale_gradient_from_head_by_factor: float=1.0,
loss_weight: float=1.0, dense_layer=None, is_training=False):
super().__init__(dense_layer=dense_layer)
self.activation_function = activation_function
self.name = name
self.num_output_head_copies = num_output_head_copies
self.rescale_gradient_from_head_by_factor = rescale_gradient_from_head_by_factor
self.loss_weight = loss_weight
self.parameterized_class_name = parameterized_class_name
self.is_training = is_training
@property
def path(self):
return 'rl_coach.architectures.tensorflow_components.heads:' + self.parameterized_class_name
class PPOHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='ppo_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="PPOHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class VHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns',
output_bias_initializer=None):
super().__init__(parameterized_class_name="VHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.initializer = initializer
self.output_bias_initializer = output_bias_initializer
class DDPGVHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='ddpg_v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns',
output_bias_initializer=None):
super().__init__(parameterized_class_name="DDPGVHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.initializer = initializer
self.output_bias_initializer = output_bias_initializer
class CategoricalQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='categorical_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None,
output_bias_initializer=None):
super().__init__(parameterized_class_name="CategoricalQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class RegressionHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, scheme=None,
output_bias_initializer=None):
super().__init__(parameterized_class_name="RegressionHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class DDPGActorHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='policy_head_params', batchnorm: bool=True,
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="DDPGActor", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.batchnorm = batchnorm
class WolpertingerActorHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='policy_head_params', batchnorm: bool=True,
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="WolpertingerActorHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.batchnorm = batchnorm
class DNDQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='dnd_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="DNDQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class DuelingQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='dueling_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="DuelingQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class MeasurementsPredictionHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='measurements_prediction_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="MeasurementsPredictionHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class NAFHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='naf_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="NAFHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class PolicyHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='policy_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="PolicyHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class PPOVHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='ppo_v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None):
super().__init__(parameterized_class_name="PPOVHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class QHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None):
super().__init__(parameterized_class_name="QHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class ClassificationHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='classification_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="ClassificationHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class QuantileRegressionQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='quantile_regression_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None):
super().__init__(parameterized_class_name="QuantileRegressionQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class RainbowQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='rainbow_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="RainbowQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class ACERPolicyHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='acer_policy_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="ACERPolicyHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class SACPolicyHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='sac_policy_head_params', dense_layer=None):
super().__init__(parameterized_class_name='SACPolicyHead', activation_function=activation_function, name=name,
dense_layer=dense_layer)
class SACQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='sac_q_head_params', dense_layer=None,
layers_sizes: tuple = (256, 256), output_bias_initializer=None):
super().__init__(parameterized_class_name='SACQHead', activation_function=activation_function, name=name,
dense_layer=dense_layer)
self.network_layers_sizes = layers_sizes
self.output_bias_initializer = output_bias_initializer
class TD3VHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='td3_v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, initializer='xavier',
output_bias_initializer=None):
super().__init__(parameterized_class_name="TD3VHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.initializer = initializer
self.output_bias_initializer = output_bias_initializer
class RNDHeadParameters(HeadParameters):
def __init__(self, name: str = 'rnd_head_params', dense_layer=None, is_predictor=False):
super().__init__(parameterized_class_name="RNDHead", name=name, dense_layer=dense_layer)
self.is_predictor = is_predictor
|
NervanaSystems/coach
|
rl_coach/architectures/head_parameters.py
|
Python
|
apache-2.0
| 16,531
|
"""activities URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from events.views import Agenda, Calendar, Monitor, \
EventsApprovationView, EventsApprovationConfirmView
app_name = "events"
urlpatterns = [
url(r'^agenda/(?P<page>\d+)$', Agenda.as_view(), name="agenda"),
url(r'^agenda/(?P<clas>[A-Z]+)$', Agenda.as_view(), name="agenda"),
url(r'^agenda/(?P<clas>[A-Z]+)/(?P<page>\d+)$', Agenda.as_view(), name="agenda"),
url(r'^agenda$', Agenda.as_view(), name="agenda"),
url(r'^calendar/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})$', Calendar.as_view(),
name="calendar"),
url(r'^calendar$', Calendar.as_view(), name="calendar"),
url(r'^monitor$', Monitor.as_view(), name="monitor"),
url(r'^approvation$', EventsApprovationView.as_view(), name="approvation"),
url(r'^approvation/(?P<id>\d+)/(?P<action>\d{1})$',
EventsApprovationConfirmView.as_view(), name="approvationConfirm"),
]
|
studentisgss/booking
|
events/urls.py
|
Python
|
gpl-3.0
| 1,541
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Parameter.is_input'
db.delete_column(u'damis_parameter', 'is_input')
# Deleting field 'Parameter.is_output'
db.delete_column(u'damis_parameter', 'is_output')
def backwards(self, orm):
# Adding field 'Parameter.is_input'
db.add_column(u'damis_parameter', 'is_input',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Parameter.is_output'
db.add_column(u'damis_parameter', 'is_output',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'damis.algorithm': {
'Meta': {'object_name': 'Algorithm'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'executable_file': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'damis.dataset': {
'Meta': {'object_name': 'Dataset'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'damis.experiment': {
'Meta': {'object_name': 'Experiment'},
'cluster': ('django.db.models.fields.CharField', [], {'default': "'MII-CLUSTER'", 'max_length': '255', 'null': 'True'}),
'finish': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_calc_time': ('django.db.models.fields.TimeField', [], {'default': "'2:00'", 'null': 'True'}),
'p': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'SAVED'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiments'", 'null': 'True', 'to': u"orm['auth.User']"}),
'workflow_state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'damis.parameter': {
'Meta': {'object_name': 'Parameter'},
'algorithm': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parameters'", 'null': 'True', 'to': u"orm['damis.Algorithm']"}),
'connection_type': ('django.db.models.fields.CharField', [], {'default': "'INPUT_VALUE'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'damis.parametervalue': {
'Meta': {'object_name': 'ParameterValue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['damis.Parameter']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['damis.ParameterValue']", 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameter_values'", 'to': u"orm['damis.Task']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'damis.task': {
'Meta': {'object_name': 'Task'},
'algorithm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['damis.Algorithm']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'null': 'True', 'to': u"orm['damis.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['damis']
|
InScience/DAMIS-old
|
src/damis/migrations/0029_auto__del_field_parameter_is_input__del_field_parameter_is_output.py
|
Python
|
agpl-3.0
| 9,408
|
# -*- coding:utf-8 -*-
# @author xupingmao
# @since 2021/12/04 15:36:42
# @modified 2021/12/11 11:10:20
# @filename dbutil.py
from xutils.dbutil_base import *
from xutils.dbutil_table import *
from xutils.dbutil_hash import *
from xutils.dbutil_sortedset import *
def _get_table_no_lock(table_name):
table = LDB_TABLE_DICT.get(table_name)
if table is None:
table = LdbTable(table_name)
return table
def get_table_old(table_name, type = "rdb"):
"""获取table对象
@param {str} table_name 表名
@return {LdbTable}
"""
check_table_name(table_name)
if type == "hash":
return get_hash_table(table_name)
table = LDB_TABLE_DICT.get(table_name)
if table is None:
with READ_LOCK:
table = _get_table_no_lock(table_name)
LDB_TABLE_DICT[table_name] = table
return table
def get_table(table_name, type = "rdb", user_name = None):
"""获取table对象
@param {str} table_name 表名
@return {LdbTable|LdbHashTable}
"""
check_table_name(table_name)
if type == "hash":
return get_hash_table(table_name)
return LdbTable(table_name, user_name = user_name)
def get_hash_table(table_name, user_name = None):
return LdbHashTable(table_name, user_name = user_name)
|
xupingmao/xnote
|
xutils/dbutil.py
|
Python
|
gpl-3.0
| 1,292
|
#!/usr/bin/python3
# This file is part of NDR.
#
# NDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NDR. If not, see <http://www.gnu.org/licenses/>.
'''Processes an incoming message from uux'''
import os
import argparse
import shutil
import tempfile
import subprocess
import ndr
def main():
# Get our list of log files
parser = argparse.ArgumentParser(
description="Process a remote message")
parser.add_argument('-k', "--keep", help='base name of the log file to upload', action='store_true')
parser.add_argument('messages', nargs='+',
help='remote messages to process')
parser.add_argument('-c', '--config',
default='/etc/ndr/config.yml',
help='NDR Configuration File')
args = parser.parse_args()
ndr_config = ndr.Config(args.config)
logger = ndr_config.logger
validated_messages = []
for message_file in args.messages:
# DEBUG, write message to file
if not os.path.isdir("/tmp/incoming_messages"):
os.makedirs("/tmp/incoming_messages")
logger.info("Processing %s", message_file)
shutil.copy(
message_file, "/tmp/incoming_messages/" + os.path.basename(message_file))
# If we're operating in UUCP mode, then we only accept messages signed
# by our ingest
accepted_cns = None
if ndr_config.upload_method == 'uucp':
accepted_cns = ndr_config.ingest_uucp_host
else:
logger.warning("accepting any signed messages due to local mode!")
message = ndr.IngestMessage.verify_and_load_message(
ndr_config, message_file, only_accept_cn=accepted_cns)
if message is not None:
logger.info("Successfully parsed and validated the message!")
# Now some magic is required. If we're running via uux, we're running in the context of
# the uucp user and not under NDR. The UUCP user can run ndr-process-message as sudo so
# we need to copy the message over to a temporary directory if we're not root, and then
# recurse into ourselves. We'll do this for all the messages in the stack.
validated_messages.append(message_file)
# If we're root, just process it
if os.getuid() == 0:
logger.info("Running in root context")
# Now do things with it based on the type of message it is
if message.message_type == ndr.IngestMessageTypes.CERTIFICATE_REQUEST:
logger.info("Got a certificate request message ...")
cert_request_msg = ndr.CertificateRequest()
cert_request_msg.from_message(message)
# Write out the signed certificates to the root filesystem
if cert_request_msg.certificate is not None:
logger.info("Writing out signed certificate ...")
with open(ndr_config.ssl_certfile, 'w') as f:
f.write(cert_request_msg.certificate)
if cert_request_msg.certificate_chain is not None:
logger.info("Writing out certificate chain ...")
with open(ndr_config.ssl_bundle, 'w') as f:
f.write(cert_request_msg.certificate_chain)
logger.info("Updated certificate chain for device")
elif message.message_type == ndr.IngestMessageTypes.REBOOT_REQUEST:
logger.info("Got a reboot request")
shutdown_args = [ "shutdown", "-r", "now"]
shutdown_process = subprocess.run(
args=shutdown_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
check=False)
if shutdown_process.returncode != 0:
logger.error("shutdown failed: %s", str(shutdown_process.stderr, 'utf-8'))
return
elif message.message_type == ndr.IngestMessageTypes.FILE_UPDATE:
logger.info("Got a file update message")
file_update_message = ndr.FileUpdateMessage(ndr_config)
file_update_message.from_message(message)
file_update_message.write_updates()
else:
logger.error("Got non-client accepted %s message", message.message_type.value)
else:
logger.info("Not running as root, saving messages to run in sudo")
# Non-root fall through. We should have a pile of validated messages
if os.getuid() != 0:
files_for_sudo = []
for valid_message in validated_messages:
# The messages will be revalidated on the second go around
msg_fd, vm_file = tempfile.mkstemp()
os.close(msg_fd)
shutil.copy(valid_message, vm_file)
files_for_sudo.append(vm_file)
# Attempt to call ourselves via sudo
sudo_process_message = [ "sudo", "ndr-process-message"]
sudo_process_message += files_for_sudo
# Here goes nothing
sudo_proc = subprocess.run(
args=sudo_process_message, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
check=False)
if sudo_proc.returncode != 0:
logger.error("sudo run failed: %s", str(sudo_proc.stderr, 'utf-8'))
return
# And done (we log this here so it only prints once)
logger.info("Finished processing messages")
# Make a final loop through to clean up our files
if args.keep is False:
for message in args.messages:
os.remove(message)
return
if __name__ == "__main__":
main()
|
SecuredByTHEM/ndr
|
ndr/tools/process_message.py
|
Python
|
gpl-3.0
| 6,145
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic utils."""
import codecs
import logging
import os
import pipes
import Queue
import re
import stat
import subprocess
import sys
import tempfile
import threading
import time
import urlparse
import subprocess2
RETRY_MAX = 3
RETRY_INITIAL_SLEEP = 0.5
class Error(Exception):
"""gclient exception class."""
def __init__(self, msg, *args, **kwargs):
index = getattr(threading.currentThread(), 'index', 0)
if index:
msg = '\n'.join('%d> %s' % (index, l) for l in msg.splitlines())
super(Error, self).__init__(msg, *args, **kwargs)
def SplitUrlRevision(url):
"""Splits url and returns a two-tuple: url, rev"""
if url.startswith('ssh:'):
# Make sure ssh://user-name@example.com/~/test.git@stable works
regex = r'(ssh://(?:[-.\w]+@)?[-\w:\.]+/[-~\w\./]+)(?:@(.+))?'
components = re.search(regex, url).groups()
else:
components = url.split('@', 1)
if len(components) == 1:
components += [None]
return tuple(components)
def IsDateRevision(revision):
"""Returns true if the given revision is of the form "{ ... }"."""
return bool(revision and re.match(r'^\{.+\}$', str(revision)))
def MakeDateRevision(date):
"""Returns a revision representing the latest revision before the given
date."""
return "{" + date + "}"
def SyntaxErrorToError(filename, e):
"""Raises a gclient_utils.Error exception with the human readable message"""
try:
# Try to construct a human readable error message
if filename:
error_message = 'There is a syntax error in %s\n' % filename
else:
error_message = 'There is a syntax error\n'
error_message += 'Line #%s, character %s: "%s"' % (
e.lineno, e.offset, re.sub(r'[\r\n]*$', '', e.text))
except:
# Something went wrong, re-raise the original exception
raise e
else:
raise Error(error_message)
class PrintableObject(object):
def __str__(self):
output = ''
for i in dir(self):
if i.startswith('__'):
continue
output += '%s = %s\n' % (i, str(getattr(self, i, '')))
return output
def FileRead(filename, mode='rU'):
with open(filename, mode=mode) as f:
# codecs.open() has different behavior than open() on python 2.6 so use
# open() and decode manually.
s = f.read()
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s
def FileWrite(filename, content, mode='w'):
with codecs.open(filename, mode=mode, encoding='utf-8') as f:
f.write(content)
def safe_rename(old, new):
"""Renames a file reliably.
Sometimes os.rename does not work because a dying git process keeps a handle
on it for a few seconds. An exception is then thrown, which make the program
give up what it was doing and remove what was deleted.
The only solution is to catch the exception and try again until it works.
"""
# roughly 10s
retries = 100
for i in range(retries):
try:
os.rename(old, new)
break
except OSError:
if i == (retries - 1):
# Give up.
raise
# retry
logging.debug("Renaming failed from %s to %s. Retrying ..." % (old, new))
time.sleep(0.1)
def rmtree(path):
"""shutil.rmtree() on steroids.
Recursively removes a directory, even if it's marked read-only.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
On POSIX systems, things are a little bit simpler. The modes of the files
to be deleted doesn't matter, only the modes of the directories containing
them are significant. As the directory tree is traversed, each directory
has its mode set appropriately before descending into it. This should
result in the entire tree being removed, with the possible exception of
*path itself, because nothing attempts to change the mode of its parent.
Doing so would be hazardous, as it's not a directory slated for removal.
In the ordinary case, this is not a problem: for our purposes, the user
will never lack write permission on *path's parent.
"""
if not os.path.exists(path):
return
if os.path.islink(path) or not os.path.isdir(path):
raise Error('Called rmtree(%s) in non-directory' % path)
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
path = os.path.normcase(path)
for _ in xrange(3):
exitcode = subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', path])
if exitcode == 0:
return
else:
print >> sys.stderr, 'rd exited with code %d' % exitcode
time.sleep(3)
raise Exception('Failed to remove path %s' % path)
# On POSIX systems, we need the x-bit set on the directory to access it,
# the r-bit to see its contents, and the w-bit to remove files from it.
# The actual modes of the files within the directory is irrelevant.
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def remove(func, subpath):
func(subpath)
for fn in os.listdir(path):
# If fullpath is a symbolic link that points to a directory, isdir will
# be True, but we don't want to descend into that as a directory, we just
# want to remove the link. Check islink and treat links as ordinary files
# would be treated regardless of what they reference.
fullpath = os.path.join(path, fn)
if os.path.islink(fullpath) or not os.path.isdir(fullpath):
remove(os.remove, fullpath)
else:
# Recurse.
rmtree(fullpath)
remove(os.rmdir, path)
def safe_makedirs(tree):
"""Creates the directory in a safe manner.
Because multiple threads can create these directories concurently, trap the
exception and pass on.
"""
count = 0
while not os.path.exists(tree):
count += 1
try:
os.makedirs(tree)
except OSError, e:
# 17 POSIX, 183 Windows
if e.errno not in (17, 183):
raise
if count > 40:
# Give up.
raise
def CommandToStr(args):
"""Converts an arg list into a shell escaped string."""
return ' '.join(pipes.quote(arg) for arg in args)
def CheckCallAndFilterAndHeader(args, always=False, header=None, **kwargs):
"""Adds 'header' support to CheckCallAndFilter.
If |always| is True, a message indicating what is being done
is printed to stdout all the time even if not output is generated. Otherwise
the message header is printed only if the call generated any ouput.
"""
stdout = kwargs.setdefault('stdout', sys.stdout)
if header is None:
header = "\n________ running '%s' in '%s'\n" % (
' '.join(args), kwargs.get('cwd', '.'))
if always:
stdout.write(header)
else:
filter_fn = kwargs.get('filter_fn')
def filter_msg(line):
if line is None:
stdout.write(header)
elif filter_fn:
filter_fn(line)
kwargs['filter_fn'] = filter_msg
kwargs['call_filter_on_first_line'] = True
# Obviously.
kwargs.setdefault('print_stdout', True)
return CheckCallAndFilter(args, **kwargs)
class Wrapper(object):
"""Wraps an object, acting as a transparent proxy for all properties by
default.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
class AutoFlush(Wrapper):
"""Creates a file object clone to automatically flush after N seconds."""
def __init__(self, wrapped, delay):
super(AutoFlush, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__last_flushed_at = time.time()
self.delay = delay
@property
def autoflush(self):
return self
def write(self, out, *args, **kwargs):
self._wrapped.write(out, *args, **kwargs)
should_flush = False
self.lock.acquire()
try:
if self.delay and (time.time() - self.__last_flushed_at) > self.delay:
should_flush = True
self.__last_flushed_at = time.time()
finally:
self.lock.release()
if should_flush:
self.flush()
class Annotated(Wrapper):
"""Creates a file object clone to automatically prepends every line in worker
threads with a NN> prefix.
"""
def __init__(self, wrapped, include_zero=False):
super(Annotated, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__output_buffers = {}
self.__include_zero = include_zero
@property
def annotated(self):
return self
def write(self, out):
index = getattr(threading.currentThread(), 'index', 0)
if not index and not self.__include_zero:
# Unindexed threads aren't buffered.
return self._wrapped.write(out)
self.lock.acquire()
try:
# Use a dummy array to hold the string so the code can be lockless.
# Strings are immutable, requiring to keep a lock for the whole dictionary
# otherwise. Using an array is faster than using a dummy object.
if not index in self.__output_buffers:
obj = self.__output_buffers[index] = ['']
else:
obj = self.__output_buffers[index]
finally:
self.lock.release()
# Continue lockless.
obj[0] += out
while '\n' in obj[0]:
line, remaining = obj[0].split('\n', 1)
if line:
self._wrapped.write('%d>%s\n' % (index, line))
obj[0] = remaining
def flush(self):
"""Flush buffered output."""
orphans = []
self.lock.acquire()
try:
# Detect threads no longer existing.
indexes = (getattr(t, 'index', None) for t in threading.enumerate())
indexes = filter(None, indexes)
for index in self.__output_buffers:
if not index in indexes:
orphans.append((index, self.__output_buffers[index][0]))
for orphan in orphans:
del self.__output_buffers[orphan[0]]
finally:
self.lock.release()
# Don't keep the lock while writting. Will append \n when it shouldn't.
for orphan in orphans:
if orphan[1]:
self._wrapped.write('%d>%s\n' % (orphan[0], orphan[1]))
return self._wrapped.flush()
def MakeFileAutoFlush(fileobj, delay=10):
autoflush = getattr(fileobj, 'autoflush', None)
if autoflush:
autoflush.delay = delay
return fileobj
return AutoFlush(fileobj, delay)
def MakeFileAnnotated(fileobj, include_zero=False):
if getattr(fileobj, 'annotated', None):
return fileobj
return Annotated(fileobj)
GCLIENT_CHILDREN = []
GCLIENT_CHILDREN_LOCK = threading.Lock()
class GClientChildren(object):
@staticmethod
def add(popen_obj):
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN.append(popen_obj)
@staticmethod
def remove(popen_obj):
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN.remove(popen_obj)
@staticmethod
def _attemptToKillChildren():
global GCLIENT_CHILDREN
with GCLIENT_CHILDREN_LOCK:
zombies = [c for c in GCLIENT_CHILDREN if c.poll() is None]
for zombie in zombies:
try:
zombie.kill()
except OSError:
pass
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN = [k for k in GCLIENT_CHILDREN if k.poll() is not None]
@staticmethod
def _areZombies():
with GCLIENT_CHILDREN_LOCK:
return bool(GCLIENT_CHILDREN)
@staticmethod
def KillAllRemainingChildren():
GClientChildren._attemptToKillChildren()
if GClientChildren._areZombies():
time.sleep(0.5)
GClientChildren._attemptToKillChildren()
with GCLIENT_CHILDREN_LOCK:
if GCLIENT_CHILDREN:
print >> sys.stderr, 'Could not kill the following subprocesses:'
for zombie in GCLIENT_CHILDREN:
print >> sys.stderr, ' ', zombie.pid
def CheckCallAndFilter(args, stdout=None, filter_fn=None,
print_stdout=None, call_filter_on_first_line=False,
retry=False, **kwargs):
"""Runs a command and calls back a filter function if needed.
Accepts all subprocess2.Popen() parameters plus:
print_stdout: If True, the command's stdout is forwarded to stdout.
filter_fn: A function taking a single string argument called with each line
of the subprocess2's output. Each line has the trailing newline
character trimmed.
stdout: Can be any bufferable output.
retry: If the process exits non-zero, sleep for a brief interval and try
again, up to RETRY_MAX times.
stderr is always redirected to stdout.
"""
assert print_stdout or filter_fn
stdout = stdout or sys.stdout
filter_fn = filter_fn or (lambda x: None)
sleep_interval = RETRY_INITIAL_SLEEP
run_cwd = kwargs.get('cwd', os.getcwd())
for _ in xrange(RETRY_MAX + 1):
kid = subprocess2.Popen(
args, bufsize=0, stdout=subprocess2.PIPE, stderr=subprocess2.STDOUT,
**kwargs)
GClientChildren.add(kid)
# Do a flush of stdout before we begin reading from the subprocess2's stdout
stdout.flush()
# Also, we need to forward stdout to prevent weird re-ordering of output.
# This has to be done on a per byte basis to make sure it is not buffered:
# normally buffering is done for each line, but if svn requests input, no
# end-of-line character is output after the prompt and it would not show up.
try:
in_byte = kid.stdout.read(1)
if in_byte:
if call_filter_on_first_line:
filter_fn(None)
in_line = ''
while in_byte:
if in_byte != '\r':
if print_stdout:
stdout.write(in_byte)
if in_byte != '\n':
in_line += in_byte
else:
filter_fn(in_line)
in_line = ''
else:
filter_fn(in_line)
in_line = ''
in_byte = kid.stdout.read(1)
# Flush the rest of buffered output. This is only an issue with
# stdout/stderr not ending with a \n.
if len(in_line):
filter_fn(in_line)
rv = kid.wait()
# Don't put this in a 'finally,' since the child may still run if we get
# an exception.
GClientChildren.remove(kid)
except KeyboardInterrupt:
print >> sys.stderr, 'Failed while running "%s"' % ' '.join(args)
raise
if rv == 0:
return 0
if not retry:
break
print ("WARNING: subprocess '%s' in %s failed; will retry after a short "
'nap...' % (' '.join('"%s"' % x for x in args), run_cwd))
time.sleep(sleep_interval)
sleep_interval *= 2
raise subprocess2.CalledProcessError(
rv, args, kwargs.get('cwd', None), None, None)
def FindGclientRoot(from_dir, filename='.gclient'):
"""Tries to find the gclient root."""
real_from_dir = os.path.realpath(from_dir)
path = real_from_dir
while not os.path.exists(os.path.join(path, filename)):
split_path = os.path.split(path)
if not split_path[1]:
return None
path = split_path[0]
# If we did not find the file in the current directory, make sure we are in a
# sub directory that is controlled by this configuration.
if path != real_from_dir:
entries_filename = os.path.join(path, filename + '_entries')
if not os.path.exists(entries_filename):
# If .gclient_entries does not exist, a previous call to gclient sync
# might have failed. In that case, we cannot verify that the .gclient
# is the one we want to use. In order to not to cause too much trouble,
# just issue a warning and return the path anyway.
print >> sys.stderr, ("%s file in parent directory %s might not be the "
"file you want to use" % (filename, path))
return path
scope = {}
try:
exec(FileRead(entries_filename), scope)
except SyntaxError, e:
SyntaxErrorToError(filename, e)
all_directories = scope['entries'].keys()
path_to_check = real_from_dir[len(path)+1:]
while path_to_check:
if path_to_check in all_directories:
return path
path_to_check = os.path.dirname(path_to_check)
return None
logging.info('Found gclient root at ' + path)
return path
def PathDifference(root, subpath):
"""Returns the difference subpath minus root."""
root = os.path.realpath(root)
subpath = os.path.realpath(subpath)
if not subpath.startswith(root):
return None
# If the root does not have a trailing \ or /, we add it so the returned
# path starts immediately after the seperator regardless of whether it is
# provided.
root = os.path.join(root, '')
return subpath[len(root):]
def FindFileUpwards(filename, path=None):
"""Search upwards from the a directory (default: current) to find a file.
Returns nearest upper-level directory with the passed in file.
"""
if not path:
path = os.getcwd()
path = os.path.realpath(path)
while True:
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
return path
(new_path, _) = os.path.split(path)
if new_path == path:
return None
path = new_path
def GetGClientRootAndEntries(path=None):
"""Returns the gclient root and the dict of entries."""
config_file = '.gclient_entries'
root = FindFileUpwards(config_file, path)
if not root:
print "Can't find %s" % config_file
return None
config_path = os.path.join(root, config_file)
env = {}
execfile(config_path, env)
config_dir = os.path.dirname(config_path)
return config_dir, env['entries']
def lockedmethod(method):
"""Method decorator that holds self.lock for the duration of the call."""
def inner(self, *args, **kwargs):
try:
try:
self.lock.acquire()
except KeyboardInterrupt:
print >> sys.stderr, 'Was deadlocked'
raise
return method(self, *args, **kwargs)
finally:
self.lock.release()
return inner
class WorkItem(object):
"""One work item."""
# On cygwin, creating a lock throwing randomly when nearing ~100 locks.
# As a workaround, use a single lock. Yep you read it right. Single lock for
# all the 100 objects.
lock = threading.Lock()
def __init__(self, name):
# A unique string representing this work item.
self._name = name
def run(self, work_queue):
"""work_queue is passed as keyword argument so it should be
the last parameters of the function when you override it."""
pass
@property
def name(self):
return self._name
class ExecutionQueue(object):
"""Runs a set of WorkItem that have interdependencies and were WorkItem are
added as they are processed.
In gclient's case, Dependencies sometime needs to be run out of order due to
From() keyword. This class manages that all the required dependencies are run
before running each one.
Methods of this class are thread safe.
"""
def __init__(self, jobs, progress, ignore_requirements):
"""jobs specifies the number of concurrent tasks to allow. progress is a
Progress instance."""
# Set when a thread is done or a new item is enqueued.
self.ready_cond = threading.Condition()
# Maximum number of concurrent tasks.
self.jobs = jobs
# List of WorkItem, for gclient, these are Dependency instances.
self.queued = []
# List of strings representing each Dependency.name that was run.
self.ran = []
# List of items currently running.
self.running = []
# Exceptions thrown if any.
self.exceptions = Queue.Queue()
# Progress status
self.progress = progress
if self.progress:
self.progress.update(0)
self.ignore_requirements = ignore_requirements
def enqueue(self, d):
"""Enqueue one Dependency to be executed later once its requirements are
satisfied.
"""
assert isinstance(d, WorkItem)
self.ready_cond.acquire()
try:
self.queued.append(d)
total = len(self.queued) + len(self.ran) + len(self.running)
logging.debug('enqueued(%s)' % d.name)
if self.progress:
self.progress._total = total + 1
self.progress.update(0)
self.ready_cond.notifyAll()
finally:
self.ready_cond.release()
def flush(self, *args, **kwargs):
"""Runs all enqueued items until all are executed."""
kwargs['work_queue'] = self
self.ready_cond.acquire()
try:
while True:
# Check for task to run first, then wait.
while True:
if not self.exceptions.empty():
# Systematically flush the queue when an exception logged.
self.queued = []
self._flush_terminated_threads()
if (not self.queued and not self.running or
self.jobs == len(self.running)):
logging.debug('No more worker threads or can\'t queue anything.')
break
# Check for new tasks to start.
for i in xrange(len(self.queued)):
# Verify its requirements.
if (self.ignore_requirements or
not (set(self.queued[i].requirements) - set(self.ran))):
# Start one work item: all its requirements are satisfied.
self._run_one_task(self.queued.pop(i), args, kwargs)
break
else:
# Couldn't find an item that could run. Break out the outher loop.
break
if not self.queued and not self.running:
# We're done.
break
# We need to poll here otherwise Ctrl-C isn't processed.
try:
self.ready_cond.wait(10)
except KeyboardInterrupt:
# Help debugging by printing some information:
print >> sys.stderr, (
('\nAllowed parallel jobs: %d\n# queued: %d\nRan: %s\n'
'Running: %d') % (
self.jobs,
len(self.queued),
', '.join(self.ran),
len(self.running)))
for i in self.queued:
print >> sys.stderr, '%s: %s' % (i.name, ', '.join(i.requirements))
raise
# Something happened: self.enqueue() or a thread terminated. Loop again.
finally:
self.ready_cond.release()
assert not self.running, 'Now guaranteed to be single-threaded'
if not self.exceptions.empty():
# To get back the stack location correctly, the raise a, b, c form must be
# used, passing a tuple as the first argument doesn't work.
e = self.exceptions.get()
raise e[0], e[1], e[2]
if self.progress:
self.progress.end()
def _flush_terminated_threads(self):
"""Flush threads that have terminated."""
running = self.running
self.running = []
for t in running:
if t.isAlive():
self.running.append(t)
else:
t.join()
sys.stdout.flush()
if self.progress:
self.progress.update(1, t.item.name)
if t.item.name in self.ran:
raise Error(
'gclient is confused, "%s" is already in "%s"' % (
t.item.name, ', '.join(self.ran)))
if not t.item.name in self.ran:
self.ran.append(t.item.name)
def _run_one_task(self, task_item, args, kwargs):
if self.jobs > 1:
# Start the thread.
index = len(self.ran) + len(self.running) + 1
new_thread = self._Worker(task_item, index, args, kwargs)
self.running.append(new_thread)
new_thread.start()
else:
# Run the 'thread' inside the main thread. Don't try to catch any
# exception.
task_item.run(*args, **kwargs)
self.ran.append(task_item.name)
if self.progress:
self.progress.update(1, ', '.join(t.item.name for t in self.running))
class _Worker(threading.Thread):
"""One thread to execute one WorkItem."""
def __init__(self, item, index, args, kwargs):
threading.Thread.__init__(self, name=item.name or 'Worker')
logging.info('_Worker(%s) reqs:%s' % (item.name, item.requirements))
self.item = item
self.index = index
self.args = args
self.kwargs = kwargs
self.daemon = True
def run(self):
"""Runs in its own thread."""
logging.debug('_Worker.run(%s)' % self.item.name)
work_queue = self.kwargs['work_queue']
try:
self.item.run(*self.args, **self.kwargs)
except KeyboardInterrupt:
logging.info('Caught KeyboardInterrupt in thread %s', self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put(sys.exc_info())
raise
except Exception:
# Catch exception location.
logging.info('Caught exception in thread %s', self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put(sys.exc_info())
finally:
logging.info('_Worker.run(%s) done', self.item.name)
work_queue.ready_cond.acquire()
try:
work_queue.ready_cond.notifyAll()
finally:
work_queue.ready_cond.release()
def GetEditor(git, git_editor=None):
"""Returns the most plausible editor to use.
In order of preference:
- GIT_EDITOR/SVN_EDITOR environment variable
- core.editor git configuration variable (if supplied by git-cl)
- VISUAL environment variable
- EDITOR environment variable
- vim (non-Windows) or notepad (Windows)
In the case of git-cl, this matches git's behaviour, except that it does not
include dumb terminal detection.
In the case of gcl, this matches svn's behaviour, except that it does not
accept a command-line flag or check the editor-cmd configuration variable.
"""
if git:
editor = os.environ.get('GIT_EDITOR') or git_editor
else:
editor = os.environ.get('SVN_EDITOR')
if not editor:
editor = os.environ.get('VISUAL')
if not editor:
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform.startswith('win'):
editor = 'notepad'
else:
editor = 'vim'
return editor
def RunEditor(content, git, git_editor=None):
"""Opens up the default editor in the system to get the CL description."""
file_handle, filename = tempfile.mkstemp(text=True, prefix='cl_description')
# Make sure CRLF is handled properly by requiring none.
if '\r' in content:
print >> sys.stderr, (
'!! Please remove \\r from your change description !!')
fileobj = os.fdopen(file_handle, 'w')
# Still remove \r if present.
fileobj.write(re.sub('\r?\n', '\n', content))
fileobj.close()
try:
editor = GetEditor(git, git_editor=git_editor)
if not editor:
return None
cmd = '%s %s' % (editor, filename)
if sys.platform == 'win32' and os.environ.get('TERM') == 'msys':
# Msysgit requires the usage of 'env' to be present.
cmd = 'env ' + cmd
try:
# shell=True to allow the shell to handle all forms of quotes in
# $EDITOR.
subprocess2.check_call(cmd, shell=True)
except subprocess2.CalledProcessError:
return None
return FileRead(filename)
finally:
os.remove(filename)
def UpgradeToHttps(url):
"""Upgrades random urls to https://.
Do not touch unknown urls like ssh:// or git://.
Do not touch http:// urls with a port number,
Fixes invalid GAE url.
"""
if not url:
return url
if not re.match(r'[a-z\-]+\://.*', url):
# Make sure it is a valid uri. Otherwise, urlparse() will consider it a
# relative url and will use http:///foo. Note that it defaults to http://
# for compatibility with naked url like "localhost:8080".
url = 'http://%s' % url
parsed = list(urlparse.urlparse(url))
# Do not automatically upgrade http to https if a port number is provided.
if parsed[0] == 'http' and not re.match(r'^.+?\:\d+$', parsed[1]):
parsed[0] = 'https'
return urlparse.urlunparse(parsed)
def ParseCodereviewSettingsContent(content):
"""Process a codereview.settings file properly."""
lines = (l for l in content.splitlines() if not l.strip().startswith("#"))
try:
keyvals = dict([x.strip() for x in l.split(':', 1)] for l in lines if l)
except ValueError:
raise Error(
'Failed to process settings, please fix. Content:\n\n%s' % content)
def fix_url(key):
if keyvals.get(key):
keyvals[key] = UpgradeToHttps(keyvals[key])
fix_url('CODE_REVIEW_SERVER')
fix_url('VIEW_VC')
return keyvals
def NumLocalCpus():
"""Returns the number of processors.
Python on OSX 10.6 raises a NotImplementedError exception.
"""
try:
import multiprocessing
return multiprocessing.cpu_count()
except: # pylint: disable=W0702
# Mac OS 10.6 only
# pylint: disable=E1101
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
|
coreos/depot_tools
|
gclient_utils.py
|
Python
|
bsd-3-clause
| 28,957
|
import json
import logging
from collections import namedtuple
from typing import TYPE_CHECKING, List, Dict
import arrow
import requests
from six import StringIO, BytesIO
from pyEchosign.classes.documents import AgreementDocument
from pyEchosign.exceptions.internal import ApiError
from pyEchosign.utils.utils import find_user_in_list
from .users import User
from pyEchosign.utils.request_parameters import get_headers
from pyEchosign.utils.handle_response import check_error, response_success
log = logging.getLogger('pyEchosign.' + __name__)
if TYPE_CHECKING:
from .account import EchosignAccount
__all__ = ['Agreement']
class Agreement(object):
""" Represents either a created agreement in Echosign, or one built in Python which can be sent through, and created
in Echosign.
Args:
account (EchosignAccount): An instance of :class:`EchosignAccount <pyEchosign.classes.account.EchosignAccount>`.
All Agreement actions will be conducted under this account.
Keyword Args:
fully_retrieved (bool): Whether or not the agreement has all information retrieved,
or if only the basic information was pulled (such as when getting all agreements instead
of requesting the specific agreement)
echosign_id (str): The ID assigned to the agreement by Echosign, used to identify the agreement via the API
name (str): The name of the document as specified by the sender
status (Agreement.Status): The current status of the document (OUT_FOR_SIGNATURE, SIGNED, APPROVED, etc)
users (list[DisplayUser]): The users associated with this agreement, represented by
:class:`EchosignAccount <pyEchosign.classes.account.EchosignAccount>`
files (list): A list of :class:`TransientDocument <pyEchosign.classes.documents.TransientDocument>` instances
which will become the documents within the agreement. This information is not provided when retrieving
agreements from Echosign.
Attributes:
account (EchosignAccount): An instance of :class:`EchosignAccount <pyEchosign.classes.account.EchosignAccount>`.
All Agreement actions will be conducted under this account.
fully_retrieved (bool): Whether or not the agreement has all information retrieved,
or if only the basic information was pulled (such as when getting all agreements instead
of requesting the specific agreement)
echosign_id (str): The ID assigned to the agreement by Echosign, used to identify the agreement via the API
name (str): The name of the document as specified by the sender
status (Agreement.Status): The current status of the document (OUT_FOR_SIGNATURE, SIGNED, APPROVED, etc)
users (list[DisplayUser]): The users associated with this agreement, represented by
:class:`EchosignAccount <pyEchosign.classes.account.EchosignAccount>`
files (list): A list of :class:`TransientDocument <pyEchosign.classes.documents.TransientDocument>` instances
which will become the documents within the agreement. This information is not provided when retrieving
agreements from Echosign.
"""
def __init__(self, account, **kwargs):
# type: (EchosignAccount) -> None
self.account = account
self.fully_retrieved = kwargs.pop('fully_retrieved', None)
self.echosign_id = kwargs.pop('echosign_id', None)
self.name = kwargs.pop('name', None)
self.date = kwargs.pop('date', None)
self.users = kwargs.pop('users', [])
status = kwargs.pop('status', None)
if status is not None:
self.status = self.Status.__getattribute__(self.Status, status)
# Used for the creation of Agreements in Echosign
self.files = kwargs.pop('files', [])
self._documents = None
self._signing_url = None
def __str__(self):
if self.name is not None:
return 'Echosign Agreement: {}'.format(self.name)
elif self.echosign_id is not None:
return 'Echosign Agreement: {}'.format(self.echosign_id)
else:
return super(Agreement, self).__str__()
def __repr__(self):
return str(self)
class Status(object):
""" Possible status of agreements
Note:
Echosign provides 'WAITING_FOR_FAXIN' in their API documentation, so pyEchosign has also included
'WAITING_FOR_FAXING' in case that's just a typo in their documentation. Once it's determined
which is used, the other will be removed.
"""
WAITING_FOR_MY_SIGNATURE = 'WAITING_FOR_MY_SIGNATURE'
WAITING_FOR_MY_APPROVAL = 'WAITING_FOR_MY_APPROVAL'
WAITING_FOR_MY_DELEGATION = 'WAITING_FOR_MY_DELEGATION'
WAITING_FOR_MY_ACKNOWLEDGEMENT = 'WAITING_FOR_MY_ACKNOWLEDGEMENT'
WAITING_FOR_MY_ACCEPTANCE = 'WAITING_FOR_MY_ACCEPTANCE'
WAITING_FOR_MY_FORM_FILLING = 'WAITING_FOR_MY_FORM_FILLING'
OUT_FOR_SIGNATURE = 'OUT_FOR_SIGNATURE'
OUT_FOR_APPROVAL = 'OUT_FOR_APPROVAL'
OUT_FOR_DELIVERY = 'OUT_FOR_DELIVERY'
OUT_FOR_ACCEPTANCE = 'OUT_FOR_ACCEPTANCE'
OUT_FOR_FORM_FILLING = 'OUT_FOR_FORM_FILLING'
SIGNED = 'SIGNED'
APPROVED = 'APPROVED'
DELIVERED = 'DELIVERED'
ACCEPTED = 'ACCEPTED'
FORM_FILLED = 'FORM_FILLED'
RECALLED = 'RECALLED'
# This was directly taken from Echosign
# not sure if the typo is only in their documentation or also in response. Adding both in case.
WAITING_FOR_FAXIN = 'WAITING_FOR_FAXIN'
WAITING_FOR_FAXING = 'WAITING_FOR_FAXING'
ARCHIVED = 'ARCHIVED'
FORM = 'FORM'
EXPIRED = 'EXPIRED'
WIDGET = 'WIDGET'
WAITING_FOR_AUTHORING = 'WAITING_FOR_AUTHORING'
OTHER = 'OTHER'
@classmethod
def json_to_agreement(cls, account, json_data):
echosign_id = json_data.get('agreementId', None)
name = json_data.get('name', None)
status = json_data.get('status', None)
user_set = json_data.get('displayUserSetInfos', None)[0]
user_set = user_set.get('displayUserSetMemberInfos', None)
users = User.json_to_users(user_set)
date = json_data.get('displayDate', None)
if date is not None:
date = arrow.get(date)
new_agreement = Agreement(echosign_id=echosign_id, name=name, account=account, status=status, date=date)
new_agreement.users = users
return new_agreement
@classmethod
def json_to_agreements(cls, account, json_data):
json_data = json_data.get('userAgreementList')
return [cls.json_to_agreement(account, agreement_data) for agreement_data in json_data]
@property
def documents(self):
""" Retrieve the :class:`AgreementDocuments <pyEchosign.classes.documents.AgreementDocument>` associated with
this agreement. If the files have not already been retrieved, this will result in an additional request to
the API.
Returns: A list of :class:`AgreementDocument <pyEchosign.classes.documents.AgreementDocument>`
"""
# If _documents is None, no (successful) API call has been made to retrieve them
if self._documents is None:
url = self.account.api_access_point + 'agreements/{}/documents'.format(self.echosign_id)
r = requests.get(url, headers=get_headers(self.account.access_token))
# Raise Exception if there was an error
check_error(r)
try:
data = r.json()
except ValueError:
raise ApiError('Unexpected response from Echosign API: Status {} - {}'.format(r.status_code, r.content))
else:
self._documents = []
# Take both sections of documents from the response and turn into AgreementDocuments
documents = self._document_data_to_document(data.get('documents', []))
supporting_documents = self._document_data_to_document(data.get('supportingDocuments', []))
self._documents = documents + supporting_documents
return self._documents
@property
def combined_document(self):
# type: () -> BytesIO
""" The PDF file containing all documents within this agreement."""
endpoint = '{}agreements/{}/combinedDocument'.format(self.account.api_access_point, self.echosign_id)
response = requests.get(endpoint, headers=get_headers(self.account.access_token))
check_error(response)
return BytesIO(response.content)
@property
def audit_trail_file(self):
# type: () -> BytesIO
""" The PDF file of the audit trail."""
endpoint = '{}agreements/{}/auditTrail'.format(self.account.api_access_point, self.echosign_id)
response = requests.get(endpoint, headers=get_headers(self.account.access_token))
check_error(response)
return BytesIO(response.content)
@staticmethod
def _document_data_to_document(json_data):
# type: (dict) -> list
""" Coverts JSON received from API into an AgreementDocument and appends to Agreement.documents """
documents = []
for document_data in json_data:
# Documents and Supporting Documents are not mixed together - we could get either ID
try:
echosign_id = document_data.get('documentId')
except KeyError:
echosign_id = document_data.get('supportingDocumentId')
mime_type = document_data.get('mimeType')
name = document_data.get('name')
page_count = document_data.get('numPages')
document = AgreementDocument(echosign_id, mime_type, name, page_count)
# If this is a supporting document, there will be a field name
field_name = document_data.get('fieldName', None)
if field_name is not None:
document.field_name = field_name
documents.append(document)
return documents
@staticmethod
def __construct_recipient_agreement_request(recipients):
# type: (List[User]) -> list
""" Takes a list of :class:`Recipients <pyEchosign.classes.users.Recipient>` and returns the JSON required by
the Echosign API.
Args:
recipients: A list of :class:`Recipients <pyEchosign.classes.users.Recipient>`
"""
recipient_set = []
for recipient in recipients:
recipient_info = dict(email=recipient.email)
recipient_set_info = dict(recipientSetMemberInfos=recipient_info,
securityOptions=[dict(authenticationMethod="", password="CONTENT FILTERED",
phoneInfos=[dict(phone="", countryCode="")])],
recipientSetRole="SIGNER")
recipient_set.append(recipient_set_info)
return recipient_set
def cancel(self):
""" Cancels the agreement on Echosign. Agreement will still be visible in the Manage page. """
url = '{}agreements/{}/status'.format(self.account.api_access_point, self.echosign_id)
body = dict(value='CANCEL')
r = requests.put(url, headers=get_headers(self.account.access_token), data=json.dumps(body))
if response_success(r):
log.debug('Request to cancel agreement {} successful.'.format(self.echosign_id))
else:
try:
log.error('Error encountered cancelling agreement {}. Received message: {}'.format(self.echosign_id,
r.content))
finally:
check_error(r)
def delete(self):
""" Deletes the agreement on Echosign. Agreement will not be visible in the Manage page.
Note:
This action requires the 'agreement_retention' scope, which doesn't appear
to be actually available via OAuth
"""
url = self.account.api_access_point + 'agreements/' + self.echosign_id
r = requests.delete(url, headers=get_headers(self.account.access_token))
if response_success(r):
log.debug('Request to delete agreement {} successful.'.format(self.echosign_id))
else:
try:
log.error('Error encountered deleting agreement {}. Received message:{}'.format(self.echosign_id,
r.content))
finally:
check_error(r)
class SignatureFlow(object):
SEQUENTIAL = 'SEQUENTIAL'
PARALLEL = 'PARALLEL'
SENDER_SIGNS_ONLY = 'SENDER_SIGNS_ONLY'
def send(self, recipients, agreement_name=None, ccs=None, days_until_signing_deadline=0,
external_id='', signature_flow=SignatureFlow.SEQUENTIAL, message='',
merge_fields=None):
# type: (List[User], str, list, int, str, Agreement.SignatureFlow, str, List[Dict[str, str]]) -> None
""" Sends this agreement to Echosign for signature
Args:
agreement_name: A string for the document name which will appear in the Echosign Manage page, the email
to recipients, etc. Defaults to the name for the Agreement.
recipients: A list of :class:`Users <pyEchosign.classes.users.User>`.
The order which they are provided in the list determines the order in which they sign.
ccs: (optional) A list of email addresses to be CC'd on the Echosign agreement emails
(document sent, document fully signed, etc)
days_until_signing_deadline: (optional) "The number of days that remain before the document expires.
You cannot sign the document after it expires" Defaults to 0, for no expiration.
external_id: (optional) "A unique identifier for your transaction...
You can use the ExternalID to search for your transaction through [the] API"
signature_flow: (optional) (SignatureFlow): The routing style of this agreement, defaults to Sequential.
merge_fields: (optional) A list of dictionaries, with each one providing the 'field_name' and
'default_value' keys. The field name maps to the field on the document, and the default value is
what will be placed inside.
message: (optional) A message which will be displayed to recipients of the agreement
Returns:
A namedtuple representing the information received back from the API. Contains the following attributes
`agreement_id`
*"The unique identifier that can be used to query status and download signed documents"*
`embedded_code`
*"Javascript snippet suitable for an embedded page taking a user to a URL"*
`expiration`
*"Expiration date for autologin. This is based on the user setting, API_AUTO_LOGIN_LIFETIME"*
`url`
*"Standalone URL to direct end users to"*
Raises:
ApiError: If the API returns an error, such as a 403. The exact response from the API is provided.
"""
if agreement_name is None:
agreement_name = self.name
if ccs is None:
ccs = []
security_options = dict(passwordProtection="NONE", kbaProtection="NONE", webIdentityProtection="NONE",
protectOpen=False, internalPassword="", externalPassword="", openPassword="")
files_data = [{'transientDocumentId': file.document_id} for file in self.files]
if merge_fields is None:
merge_fields = []
converted_merge_fields = [dict(fieldName=field['field_name'], defaultValue=field['default_value']) for field in
merge_fields]
recipients_data = self.__construct_recipient_agreement_request(recipients)
document_creation_info = dict(signatureType="ESIGN", name=agreement_name, callbackInfo="",
securityOptions=security_options, locale="", ccs=ccs,
externalId=external_id, signatureFlow=signature_flow,
fileInfos=files_data, mergeFieldInfo=converted_merge_fields,
recipientSetInfos=recipients_data, message=message,
daysUntilSigningDeadline=days_until_signing_deadline, )
request_data = dict(documentCreationInfo=document_creation_info)
url = self.account.api_access_point + 'agreements'
api_response = requests.post(url, headers=self.account.headers(), data=json.dumps(request_data))
if response_success(api_response):
response = namedtuple('Response', ('agreement_id', 'embedded_code', 'expiration', 'url'))
response_data = api_response.json()
embedded_code = response_data.get('embeddedCode', None)
expiration = response_data.get('expiration', None)
url = response_data.get('url', None)
response = response(response_data['agreementId'], embedded_code, expiration, url)
return response
else:
check_error(api_response)
def get_signing_urls(self):
""" Associate the signing URLs for this agreement with its
:class:`recipients <pyEchosign.classes.users.User>` """
endpoint = '{}agreements/{}/signingUrls'.format(self.account.api_access_point, self.echosign_id)
headers = get_headers(self.account.access_token)
r = requests.get(endpoint, headers=headers)
if response_success(r):
data = r.json()
url_sets = data['signingUrlSetInfos']
# Each signing set will have its own URLs
for set in url_sets:
urls = set['signingUrls']
for url in urls:
try:
email = url['email']
# Find the user in this Agreement's list of users that has a matching email
matching_user = find_user_in_list(self.users, 'email', email)
except KeyError:
continue
# Set the signing URL for that recipient
matching_user._signing_url = url['esignUrl']
def send_reminder(self, comment=''):
""" Send a reminder for an agreement to the participants.
Args:
comment: An optional comment that will be sent with the reminder
"""
url = self.account.api_access_point + 'reminders'
payload = dict(agreementId=self.echosign_id, comment=comment)
r = requests.post(url, data=json.dumps(payload), headers=self.account.headers())
check_error(r)
def get_form_data(self):
""" Retrieves the form data for this agreement as CSV.
Returns: StringIO
"""
url = '{}agreements/{}/formData'.format(self.account.api_access_point, self.echosign_id)
r = requests.get(url, headers=self.account.headers())
check_error(r)
return StringIO(r.text)
|
JensAstrup/pyEchosign
|
pyEchosign/classes/agreement.py
|
Python
|
mit
| 19,895
|
'''
Created on Apr 6, 2016
@author: Alex Ip, Geoscience Australia
'''
import sys
import netCDF4
import subprocess
import re
from geophys2netcdf import ERS2NetCDF
def main():
assert len(
sys.argv) == 4, 'Usage: %s <root_dir> <file_template> <start_id>' % sys.argv[0]
root_dir = sys.argv[1]
file_template = sys.argv[2]
start_id = int(sys.argv[3]) # This will fail for invalid integers
nc_path_list = sorted([filename for filename in subprocess.check_output(
['find', root_dir, '-name', file_template]).split('\n') if re.search('\.nc$', filename)])
ecat_id = start_id
for nc_path in nc_path_list:
print 'Setting eCat ID in %s' % nc_path
nc_dataset = netCDF4.Dataset(nc_path, 'r+')
try:
existing_id = int(nc_dataset.ecat_id)
print '%s already has existing eCat ID %d' % (nc_path, existing_id)
except Exception as e:
nc_dataset.ecat_id = ecat_id
print '%s now has eCat ID set to %d' % (nc_path, ecat_id)
ecat_id += 1
nc_dataset.close()
# print 'Updating metadata in %s' % nc_path
# try:
# g2n_object = ERS2NetCDF()
# g2n_object.update_nc_metadata(nc_path, do_stats=True)
# # Kind of redundant, but possibly useful for debugging
# g2n_object.check_json_metadata()
# except Exception as e:
# print 'Metadata update failed: %s' % e.message
if __name__ == '__main__':
main()
|
alex-ip/geophys2netcdf
|
utils/set_ecat_ids.py
|
Python
|
apache-2.0
| 1,510
|
#!~/python
import fluidity_tools
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
import myfun
import numpy as np
from scipy import optimize
import os
import scipy.stats as sp
import scipy
import lagrangian_stats
import advect_functions
import csv
# read offline
print 'reading offline'
#exp = 'm_25_1_tracer'
#label = 'm_25_1_tracer'
#filename2D = 'traj_m_25_1_tracer_0_640_2D.csv'
#filename3D = 'traj_m_25_1_tracer_0_640_3D.csv'
#tt = 640 # IC + 24-48 included
label = 'm_25_1b_particles'
dim = '2D'
filename2D_B = './csv/RD_'+dim+'_'+label+'.csv'
time2D_B, RD_2D_B = lagrangian_stats.read_dispersion(filename2D_B)
time = time2D_B[:] - time2D_B[0]
depths = [5, 10, 15]
depthid = [1, 2, 3]
nl = len(depths)
# relative D
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig, ax = plt.subplots()
RD_2D = np.log10(RD_2D_B)
RD_2D = RD_2D[1:]
x = np.log10(time)
x = x[1:]
if dim == '2D' and label == 'm_25_1b_particles':
print dim
new_x = np.linspace(np.log10(40000),5.35,10)
y = 3.7 -3.*new_x[0] + 3.*new_x
plt.plot(new_x,y,'k',linewidth=1.5)
plt.text(5.0,y[-4],'$t^3$',fontsize=16)
if dim == '2D' and label == 'm_25_2b_particles':
new_x = np.linspace(np.log10(50000),5.35,10)
y = 5.1 -3.*new_x[0] + 3.*new_x
plt.plot(new_x,y,'k',linewidth=1.5)
plt.text(5.0,y[-4],'$t^3$',fontsize=16)
if dim == '3D' and label == 'm_25_1b_particles':
print dim
new_x = np.linspace(np.log10(50000),5.35,10)
y = 4.3 -3.*new_x[0] + 3.*new_x
plt.plot(new_x,y,'k',linewidth=1.5)
plt.text(5.0,y[-4],'$t^3$',fontsize=16)
if dim == '3D' and label == 'm_25_2b_particles':
new_x = np.linspace(np.log10(85000),5.35,10)
y = 6.2 -3.*new_x[0] + 3.*new_x
plt.plot(new_x,y,'k',linewidth=1.5)
plt.text(5.1,y[-4],'$t^3$',fontsize=16)
if dim == '2D':
p_2D, = plt.plot(x,RD_2D[:,0],color=[0,0,1],linewidth=2)
z = 1
p_2D5, = plt.plot(x,RD_2D[:,z],'--',color=[0,0,1],linewidth=2)
z = 2
p_2D17, = plt.plot(x,RD_2D[:,z],'-.',color=[0,0,1],linewidth=2)
plt.legend((p_2D,p_2D5,p_2D17),(dim+' 5m',dim+' 10m',dim+' 15m'),bbox_to_anchor=(1., 0.023),loc=4,fontsize=14)
else:
p_2D, = plt.plot(x,RD_2D[:,0],color=[1,0,0],linewidth=2,label=str(dim+' 5m'))
plt.legend(bbox_to_anchor=(1., 0.023),loc=4,fontsize=14)
# z = 1
# p_2D5, = plt.plot(x,RD_2D[:,z],'--',color=[1,0,0],linewidth=2)
# z = 2
# p_2D17, = plt.plot(x,RD_2D[:,z],'-.',color=[1,0,0],linewidth=2)
plt.xlabel(r'Time $[hr]$',fontsize=20)
plt.ylabel(r'$\sigma^2_D$ $[m^2]$',fontsize=20)
plt.xlim((np.log10(1440),np.log10(24*3600*3)))
plt.ylim((2-.5,8.5))
ind = []
vind = []
M_xticks = []
M_xticks_labels = []
m_xticks = []
for i in range(0,24*3+2,2):
m_xticks.append(np.log10(i*3600.))
if i%24==0:
M_xticks_labels.append(i+72)
M_xticks.append(np.log10(i*3600))
m_xticks[0] = np.log10(1440)
M_xticks_labels[0] = 72.4
M_xticks[0] = np.log10(1440)
# Specify tick label size
ax.tick_params(axis = 'both', which = 'major', labelsize = 16, length=10)
ax.tick_params(axis = 'both', which = 'minor', labelsize = 0, length=7)
# Suppress minor tick labels
ax.set_xticks(M_xticks)
ax.set_xticks(m_xticks, minor = True)
ax.set_xticklabels(M_xticks_labels, minor=False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#plt.yticks(fontsize=16)
M_yticks = []
M_yticks_labels = []
m_yticks = []
for i in range(2,9,1):
M_yticks_labels.append('$10^{'+str(i)+'}$')
M_yticks.append(i)
if i < 9:
for j in range(1,10,1):
m_yticks.append(np.log10(np.power(10.,i)*j))
ax.set_yticks(M_yticks)
ax.set_yticks(m_yticks, minor = True)
ax.set_yticklabels(M_yticks_labels, minor=False)
#ax.ticklabel_format(axis = 'y', style='sci', scilimits=(0,0))
############################################################### INSET
for d in range(3):
RD_2D_B[:,d] = RD_2D_B[:,d]/(time**3)
RD_2D = np.log10(RD_2D_B)
RD_2D = RD_2D[1:]
a = plt.axes([.29, .59, .4, .35])
if dim == '2D':
print dim
p_2D, = plt.plot(x,RD_2D[:,0],color=[0,0,1],linewidth=2)
z = 1
p_2D5, = plt.plot(x,RD_2D[:,z],'--',color=[0,0,1],linewidth=2)
z = 2
p_2D17, = plt.plot(x,RD_2D[:,z],'-.',color=[0,0,1],linewidth=2)
else:
p_2D, = plt.plot(x,RD_2D[:,0],color=[1,0,0],linewidth=2,label=str(dim+' 5m'))
plt.xlabel(r'Time $[hr]$',fontsize=13)
plt.ylabel(r'$log(\sigma^2_D\,t^{-3})$ $[m^2s^{-3}]$',fontsize=13)
plt.xlim((np.log10(1440),np.log10(24*3*3600)))
plt.ylim(-11.5,-7)
M_xticks = []
M_xticks_labels = []
m_xticks = []
for i in range(0,24*3+2,2):
m_xticks.append(np.log10(i*3600.))
if i%24==0:
M_xticks.append(np.log10(i*3600))
M_xticks_labels.append(i+72)
m_xticks[0] = np.log10(1440)
M_xticks_labels[0] = 72.4
#M_xticks_labels = [72.4, 96, '', 144, '', 192, '']
M_xticks[0] = np.log10(1440)
# Specify tick label size
a.tick_params(axis = 'both', which = 'major', labelsize = 10, length=10)
a.tick_params(axis = 'both', which = 'minor', labelsize = 0, length=7)
# Suppress minor tick labels
a.set_xticks(M_xticks)
a.set_xticks(m_xticks, minor = True)
a.set_xticklabels(M_xticks_labels, minor=False)
a.yaxis.set_ticks_position('left')
a.xaxis.set_ticks_position('bottom')
plt.yticks(fontsize=10)
plt.tight_layout()
plt.savefig('./plot/'+label+'/RD_'+dim+'_'+label+'.eps')
print './plot/'+label+'/RD_'+dim+'_'+label+'.eps'
plt.close()
|
jungla/ICOM-fluidity-toolbox
|
Detectors/offline_advection/plot_RDinset_fast.py
|
Python
|
gpl-2.0
| 5,228
|
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config Drive v2 helper."""
import os
import shutil
from oslo_utils import fileutils
from oslo_utils import units
import nova.conf
from nova import exception
from nova.objects import fields
from nova import utils
from nova import version
CONF = nova.conf.CONF
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * units.Mi
class ConfigDriveBuilder(object):
"""Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
self.imagefile = None
self.mdfiles = []
if instance_md is not None:
self.add_instance_metadata(instance_md)
def __enter__(self):
return self
def __exit__(self, exctype, excval, exctb):
if exctype is not None:
# NOTE(mikal): this means we're being cleaned up because an
# exception was thrown. All bets are off now, and we should not
# swallow the exception
return False
self.cleanup()
def _add_file(self, basedir, path, data):
filepath = os.path.join(basedir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
with open(filepath, 'wb') as f:
f.write(data)
def add_instance_metadata(self, instance_md):
for (path, data) in instance_md.metadata_for_config_drive():
self.mdfiles.append((path, data))
def _write_md_files(self, basedir):
for data in self.mdfiles:
self._add_file(basedir, data[0], data[1])
def _make_iso9660(self, path, tmpdir):
publisher = "%(product)s %(version)s" % {
'product': version.product_string(),
'version': version.version_string_with_package()
}
utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
'-allow-lowercase',
'-allow-multidot',
'-l',
'-publisher',
publisher,
'-quiet',
'-J',
'-r',
'-V', 'config-2',
tmpdir,
attempts=1,
run_as_root=False)
def _make_vfat(self, path, tmpdir):
# NOTE(mikal): This is a little horrible, but I couldn't find an
# equivalent to genisoimage for vfat filesystems.
with open(path, 'wb') as f:
f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
with utils.tempdir() as mountdir:
mounted = False
try:
_, err = utils.trycmd(
'mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
path,
mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
# NOTE(mikal): I can't just use shutils.copytree here,
# because the destination directory already
# exists. This is annoying.
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('umount', mountdir, run_as_root=True)
def make_drive(self, path):
"""Make the config drive.
:param path: the path to place the config drive image at
:raises ProcessExecuteError if a helper process has failed.
"""
with utils.tempdir() as tmpdir:
self._write_md_files(tmpdir)
if CONF.config_drive_format == 'iso9660':
self._make_iso9660(path, tmpdir)
elif CONF.config_drive_format == 'vfat':
self._make_vfat(path, tmpdir)
else:
raise exception.ConfigDriveUnknownFormat(
format=CONF.config_drive_format)
def cleanup(self):
if self.imagefile:
fileutils.delete_if_exists(self.imagefile)
def __repr__(self):
return "<ConfigDriveBuilder: " + str(self.mdfiles) + ">"
def required_by(instance):
image_prop = instance.image_meta.properties.get(
"img_config_drive",
fields.ConfigDrivePolicy.OPTIONAL)
return False
# return (instance.config_drive or
# CONF.force_config_drive or
# image_prop == fields.ConfigDrivePolicy.MANDATORY
# )
#
def update_instance(instance):
"""Update the instance config_drive setting if necessary
The image or configuration file settings may override the default instance
setting. In this case the instance needs to mirror the actual
virtual machine configuration.
"""
if not instance.config_drive and required_by(instance):
instance.config_drive = True
|
xuweiliang/Codelibrary
|
nova/virt/configdrive.py
|
Python
|
apache-2.0
| 5,826
|
import paho.mqtt.client as mqtt
import time, sys, rrdtool, os
last_topic = ""
last_payload = ""
# main
def on_connect(client, userdata, flags, rc):
print("Connected")
client.is_connected = True
def on_message(client, userdata, message):
''' note: message is a tuple of (topic, payload, qos, retain)'''
global last_topic, last_payload
last_topic = message.topic
last_payload = message.payload
if "Factor" in last_payload:
newpayload = last_payload.split(':')
#newpayload = map(float, last_payload.split(':'))
newpayload = newpayload[9]
newpayload = newpayload[:-9]
print(newpayload)
try:
Update = 'N:'+ (newpayload)
rrdtool.update(
"%s/sonoffPwrs.rrd" % (os.path.dirname(os.path.abspath(__file__))),
Update)
except rrdtool.error, e:
pass
else:
pass
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.is_connected = False
client.loop_start()
client.connect("localhost")
time.sleep(6)
if not client.is_connected:
print("problem connecting to the MQTT server; please check your settings")
sys.exit(1)
client.subscribe("stat/sonoff/POWER")
# wait a little bit
time.sleep(15)
# ask for system status
time.sleep(1)
# now wait for a time stamp from the sonoff; this could take an hour
client.subscribe("tele/sonoff/+")
while 1:
if last_topic.startswith("tele/") and last_topic.endswith("STATE"):
locate_time = last_payload.find('"Time":')
the_time = last_payload[locate_time+8:locate_time+8+19]
print("the sonoff thinks the time is: "+the_time)
continue
time.sleep(5)
client.loop_stop()
client.disconnect()
|
raspberrypisig/raspberrypisig.github.io
|
assets/files/mqttSonoff.py
|
Python
|
mit
| 1,670
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import logging
from pathlib import Path
import subprocess
import synthtool as s
from synthtool.languages import php
from synthtool import _tracked_paths
logging.basicConfig(level=logging.DEBUG)
src = Path(f"../{php.STAGING_DIR}/DataCatalog").resolve()
dest = Path().resolve()
# Added so that we can pass copy_excludes in the owlbot_main() call
_tracked_paths.add(src)
php.owlbot_main(src=src, dest=dest)
# fix namespace casing
s.replace(
"**/*.php",
r"(namespace|use) Google\\Cloud\\Datacatalog",
r"\1 Google\\Cloud\\DataCatalog",
)
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# V1 is GA, so remove @experimental tags
s.replace(
'src/V1/**/*Client.php',
r'^(\s+\*\n)?\s+\*\s@experimental\n',
'')
# Change the wording for the deprecation warning.
s.replace(
'src/*/*_*.php',
r'will be removed in the next major release',
'will be removed in a future release')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
|
googleapis/google-cloud-php-data-catalog
|
owlbot.py
|
Python
|
apache-2.0
| 2,935
|
"""
django-helpdesk - A Django powered ticket tracker for small enterprise.
The is_helpdesk_staff template filter returns True if the user qualifies as Helpdesk staff.
templatetags/helpdesk_staff.py
"""
import logging
from django.template import Library
from helpdesk.decorators import is_helpdesk_staff
logger = logging.getLogger(__name__)
register = Library()
@register.filter(name='is_helpdesk_staff')
def helpdesk_staff(user):
try:
return is_helpdesk_staff(user)
except Exception:
logger.exception("'helpdesk_staff' template tag (django-helpdesk) crashed")
|
rossp/django-helpdesk
|
helpdesk/templatetags/helpdesk_staff.py
|
Python
|
bsd-3-clause
| 591
|
"""
Django settings for openhack project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ih11*lcf4-$d$(pq!c@%7@01issx-v&zgav0xiv+5*@ck*w=!e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['172.4.160.211', '127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'corsheaders',
'pinguin.apps.PinguinConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'openhack.urls'
CORS_ORIGIN_WHITELIST = (
'localhost:8000',
'127.0.0.1:8000'
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'openhack.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
horken7/openhack
|
openhack/settings.py
|
Python
|
mit
| 3,386
|
from webapp2_extras import i18n
from webapp2_extras.i18n import gettext as _
known_locales = ["en_US", "es"]
def get_preferred_locale(request):
preferred_locale = "en_US"
if request.get("lang"):
preferred_locale = request.get("lang")
elif request.cookies.has_key("language"):
preferred_locale = request.cookies["language"]
elif request.headers.get("accept_language"):
preferred_locale = request.headers.get("accept_language")
if preferred_locale not in known_locales:
if preferred_locale.split("_")[0] not in known_locales:
# this locale has no known match
preferred_locale = "en_US"
else:
# this locale has a similar match
preferred_locale = preferred_locale.split("_")[0]
return preferred_locale
def get_client_side_translations():
return {
"disclaimer": _("disclaimer"),
"Error!": _("Error!"),
"We couldn't find that document": _("We couldn't find that document"),
"Registered in our servers since:": _("Registered in our servers since:"),
"Registered in the bitcoin blockchain since:": _("Registered in the bitcoin blockchain since:"),
"transaction timestamp": _("transaction timestamp"),
"Document proof embedded in the Bitcoin blockchain!": _("Document proof embedded in the Bitcoin blockchain!"),
"Document proof not yet embedded in the bitcoin blockchain.": _("Document proof not yet embedded in the bitcoin blockchain."),
"Payment being processed. Please wait while the bitcoin transaction is confirmed by the network.": _("Payment being processed. Please wait while the bitcoin transaction is confirmed by the network."),
"Transaction": _("Transaction"),
"Must select a file to upload": _("Must select a file to upload"),
"File already exists in the system since %s. Redirecting...": _("File already exists in the system since %s. Redirecting..."),
"File successfully added to system. Redirecting...": _("File successfully added to system. Redirecting..."),
"Document Digest": _("Document Digest"),
"Document Hash": _("Document hash: "),
"Timestamp": _("Timestamp"),
"Initializing": _("Initializing"),
"Now hashing... ": _("Now hashing... "),
"Loading document...": _("Loading document..."),
"Preparing to hash ": _("Preparing to hash "),
" bytes, last modified: ": _(" bytes, last modified: "),
"n/a": _("n/a")
}
|
tuomassiren/proofofexistence
|
translation.py
|
Python
|
mit
| 2,309
|
short_name = "godot"
name = "Godot Engine"
major = 3
minor = 0
status = "alpha"
|
morrow1nd/godot
|
version.py
|
Python
|
mit
| 80
|
# Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import base64
import inspect
import json
import os
import unittest
from feaas import api, plugin, storage
from feaas.managers import ec2
from . import managers
class APITestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.manager = managers.FakeManager()
api.get_manager = lambda: cls.manager
cls.api = api.api.test_client()
def setUp(self):
self.manager.reset()
def test_start_instance(self):
resp = self.api.post("/resources", data={"name": "someapp"})
self.assertEqual(201, resp.status_code)
self.assertEqual("someapp", self.manager.instances[0].name)
def test_start_instance_without_name(self):
resp = self.api.post("/resources", data={"names": "someapp"})
self.assertEqual(400, resp.status_code)
self.assertEqual("name is required", resp.data)
self.assertEqual([], self.manager.instances)
def test_start_instance_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources", method="POST",
data={"names": "someapp"},
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_remove_instance(self):
self.manager.new_instance("someapp")
resp = self.api.delete("/resources/someapp")
self.assertEqual(200, resp.status_code)
self.assertEqual("", resp.data)
self.assertEqual([], self.manager.instances)
def test_remove_instance_not_found(self):
resp = self.api.delete("/resources/someapp")
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
self.assertEqual([], self.manager.instances)
def test_remove_instance_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp", method="DELETE",
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_bind_app(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/bind-app",
data={"app-host": "someapp.cloud.tsuru.io"})
self.assertEqual(201, resp.status_code)
self.assertEqual("null", resp.data)
self.assertEqual("application/json", resp.mimetype)
bind = self.manager.instances[0].bound[0]
self.assertEqual("someapp.cloud.tsuru.io", bind)
def test_bind_without_app_host(self):
resp = self.api.post("/resources/someapp/bind-app",
data={"app_hooost": "someapp.cloud.tsuru.io"})
self.assertEqual(400, resp.status_code)
self.assertEqual("app-host is required", resp.data)
def test_bind_instance_not_found(self):
resp = self.api.post("/resources/someapp/bind-app",
data={"app-host": "someapp.cloud.tsuru.io"})
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_bind_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp/bind-app", method="POST",
data={"app-host": "someapp.cloud.tsuru.io"},
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_unbind(self):
self.manager.new_instance("someapp")
self.manager.bind("someapp", "someapp.cloud.tsuru.io")
resp = self.api.delete("/resources/someapp/bind-app",
data={"app-host": "someapp.cloud.tsuru.io"},
headers={"Content-Type": "application/x-www-form-urlencoded"})
self.assertEqual(200, resp.status_code)
self.assertEqual("", resp.data)
self.assertEqual([], self.manager.instances[0].bound)
def test_unbind_instance_not_found(self):
resp = self.api.delete("/resources/someapp/bind-app",
data={"app-host": "someapp.cloud.tsuru.io"})
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_unbind_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp/bind-app",
method="DELETE",
data={"app-host": "someapp.cloud.tsuru.io"},
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_info(self):
self.manager.new_instance("someapp")
resp = self.api.get("/resources/someapp")
self.assertEqual(200, resp.status_code)
self.assertEqual("application/json", resp.mimetype)
data = json.loads(resp.data)
self.assertEqual({"name": "someapp"}, data)
def test_info_instance_not_found(self):
resp = self.api.get("/resources/someapp")
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_info_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp", method="GET",
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_status_started(self):
self.manager.new_instance("someapp", state="started")
resp = self.api.get("/resources/someapp/status")
self.assertEqual(204, resp.status_code)
def test_status_pending(self):
self.manager.new_instance("someapp", state="pending")
resp = self.api.get("/resources/someapp/status")
self.assertEqual(202, resp.status_code)
def test_status_error(self):
self.manager.new_instance("someapp", state="error")
resp = self.api.get("/resources/someapp/status")
self.assertEqual(500, resp.status_code)
def test_status_scaling(self):
self.manager.new_instance("someapp", state="scaling")
resp = self.api.get("/resources/someapp/status")
self.assertEqual(204, resp.status_code)
def test_status_not_found(self):
resp = self.api.get("/resources/someapp/status")
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_status_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp/status", method="GET",
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_scale_instance(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/scale",
data={"quantity": "3"})
self.assertEqual(201, resp.status_code)
_, instance = self.manager.find_instance("someapp")
self.assertEqual(3, instance.units)
def test_scale_instance_invalid_quantity(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/scale",
data={"quantity": "chico"})
self.assertEqual(400, resp.status_code)
self.assertEqual("invalid quantity: chico", resp.data)
def test_scale_instance_negative_quantity(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/scale",
data={"quantity": "-2"})
self.assertEqual(400, resp.status_code)
self.assertEqual("invalid quantity: -2", resp.data)
def test_scale_instance_missing_quantity(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/scale",
data={"quality": "-2"})
self.assertEqual(400, resp.status_code)
self.assertEqual("missing quantity", resp.data)
def test_scale_instance_not_found(self):
resp = self.api.post("/resources/someapp/scale",
data={"quantity": "2"})
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_scale_instance_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp/scale", method="POST",
data={"quantity": "2"},
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_plugin(self):
expected = inspect.getsource(plugin)
resp = self.api.get("/plugin")
self.assertEqual(200, resp.status_code)
self.assertEqual(expected, resp.data)
def test_plugin_does_not_require_authentication(self):
expected = inspect.getsource(plugin)
resp = self.api.get("/plugin")
self.assertEqual(200, resp.status_code)
self.assertEqual(expected, resp.data)
def open_with_auth(self, url, method, user, password, data=None, headers=None):
encoded = base64.b64encode(user + ":" + password)
if not headers:
headers = {}
headers["Authorization"] = "Basic " + encoded
return self.api.open(url, method=method, headers=headers, data=data)
def set_auth_env(self, user, password):
os.environ["API_USERNAME"] = user
os.environ["API_PASSWORD"] = password
def delete_auth_env(self):
del os.environ["API_USERNAME"], os.environ["API_PASSWORD"]
class ManagerTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
reload(api)
def setUp(self):
if "waaat" in api.managers:
del api.managers["waaat"]
def tearDown(self):
if "API_MANAGER" in os.environ:
del os.environ["API_MANAGER"]
def test_register_manager(self):
manager = lambda x: x
api.register_manager("waaat", manager)
self.assertEqual(manager, api.managers["waaat"])
def test_register_manager_override(self):
first_manager = lambda x: x
second_manager = lambda x, y: x + y
api.register_manager("waaat", first_manager)
api.register_manager("waaat", second_manager, override=True)
self.assertEqual(second_manager, api.managers["waaat"])
def test_register_manager_without_override(self):
first_manager = lambda x: x
second_manager = lambda x, y: x + y
api.register_manager("waaat", first_manager)
with self.assertRaises(ValueError) as cm:
api.register_manager("waaat", second_manager, override=False)
exc = cm.exception
self.assertEqual(("Manager already registered",), exc.args)
def test_get_manager(self):
os.environ["API_MANAGER"] = "ec2"
os.environ["API_MONGODB_URI"] = "mongodb://localhost:27017"
manager = api.get_manager()
self.assertIsInstance(manager, ec2.EC2Manager)
self.assertIsInstance(manager.storage, storage.MongoDBStorage)
def test_get_manager_unknown(self):
os.environ["API_MANAGER"] = "ec3"
with self.assertRaises(ValueError) as cm:
api.get_manager()
exc = cm.exception
self.assertEqual(("ec3 is not a valid manager",),
exc.args)
def test_get_manager_default(self):
os.environ["API_MONGODB_URI"] = "mongodb://localhost:27017"
manager = api.get_manager()
self.assertIsInstance(manager, ec2.EC2Manager)
self.assertIsInstance(manager.storage, storage.MongoDBStorage)
|
tsuru/varnishapi
|
tests/test_api.py
|
Python
|
bsd-3-clause
| 12,846
|
import datetime
import pytz
from app.models import Match
from app.business.slot_business import create_slot_from_json
from app.util.dota_util import GAME_MODES, LOBBY_TYPES
def create_from_json(match_json):
utc = pytz.UTC
match = Match(
match_id=match_json['match_id'],
match_seq_num=match_json['match_seq_num'],
radiant_win=match_json['radiant_win'],
duration=match_json['duration'],
#pylint: disable=no-value-for-parameter
start_time=utc.localize(datetime.datetime.fromtimestamp(match_json['start_time'])),
patch=match_json['patch'],
tower_status_radiant=match_json['tower_status_radiant'],
tower_status_dire=match_json['tower_status_dire'],
barracks_status_radiant=match_json['barracks_status_radiant'],
barracks_status_dire=match_json['barracks_status_dire'],
cluster=match_json['cluster'],
first_blood_time=match_json['first_blood_time'],
lobby_type=LOBBY_TYPES[match_json['lobby_type']],
human_players=match_json['human_players'],
leagueid=match_json['leagueid'],
game_mode=GAME_MODES[match_json['game_mode']],
flags=match_json['flags'],
engine=match_json['engine'],
radiant_score=match_json['radiant_score'],
dire_score=match_json['dire_score'],
skill=match_json['skill']
)
match.save()
for slot_json in match_json['players']:
create_slot_from_json(slot_json, match)
return match
def get_heroes_list(match):
return [slot.hero_id for slot in match.slots.all()]
#pylint: disable=invalid-name
def get_heroes_with_winning_team_info(match):
team = 'radiant' if match.radiant_win else 'dire'
return [(-slot.hero_id if slot.team == team else slot.hero_id)
for slot in match.slots.all()]
def get_teams_heroes_list(match):
teams = []
for team in ['radiant', 'dire']:
teams.append(
sorted([slot.hero_id for slot in match.slots.filter(team=team)]))
return teams
def get_winning_team_heroes_list(match):
team = 'radiant' if match.radiant_win else 'dire'
return sorted([slot.hero_id for slot in match.slots.filter(team=team)])
|
lucashanke/houseofdota
|
app/business/match_business.py
|
Python
|
mit
| 2,203
|
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
import sys
if sys.version_info < (3, 0):
from wsgiserver2 import *
else:
# Le sigh. Boo for backward-incompatible syntax.
exec('from .wsgiserver3 import *')
|
enddo/HatKey
|
Lib/web/wsgiserver/__init__.py
|
Python
|
gpl-3.0
| 595
|
def linear_search(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
print(str(value) + " was found at index " + str(index))
if __name__ == '__main__':
main()
|
EverythingAbout/Python
|
Searches/linear_search.py
|
Python
|
mit
| 775
|
"""
Tools for generating forms based on SQLAlchemy models.
"""
import inspect
from wtforms import fields as f
from wtforms import validators
from wtforms.form import Form
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField
from wtforms.ext.sqlalchemy.validators import Unique
__all__ = (
'model_fields', 'model_form',
)
def converts(*args):
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
class ModelConverterBase(object):
def __init__(self, converters, use_mro=True):
self.use_mro = use_mro
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
class ModelConverterBase(object):
def __init__(self, converters, use_mro=True):
self.use_mro = use_mro
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
def convert(self, model, mapper, prop, field_args, db_session=None):
if not hasattr(prop, 'columns') and not hasattr(prop, 'direction'):
return
elif not hasattr(prop, 'direction') and len(prop.columns) != 1:
raise TypeError('Do not know how to convert multiple-column '
+ 'properties currently')
kwargs = {
'validators': [],
'filters': [],
'default': None,
}
converter = None
column = None
if not hasattr(prop, 'direction'):
column = prop.columns[0]
# Support sqlalchemy.schema.ColumnDefault, so users can benefit
# from setting defaults for fields, e.g.:
# field = Column(DateTimeField, default=datetime.utcnow)
default = getattr(column, 'default', None)
if default is not None:
# Only actually change default if it has an attribute named
# 'arg' that's callable.
callable_default = getattr(default, 'arg', None)
if callable_default and callable(callable_default):
default = callable_default(None)
kwargs['default'] = default
if column.nullable:
kwargs['validators'].append(validators.Optional())
else:
kwargs['validators'].append(validators.Required())
if db_session and column.unique:
kwargs['validators'].append(Unique(lambda: db_session, model,
column))
if self.use_mro:
types = inspect.getmro(type(column.type))
else:
types = [type(column.type)]
for col_type in types:
type_string = '%s.%s' % (col_type.__module__,
col_type.__name__)
if type_string.startswith('sqlalchemy'):
type_string = type_string[11:]
if type_string in self.converters:
converter = self.converters[type_string]
break
else:
for col_type in types:
if col_type.__name__ in self.converters:
converter = self.converters[col_type.__name__]
break
else:
return
if db_session and hasattr(prop, 'direction'):
foreign_model = prop.mapper.class_
nullable = True
for pair in prop.local_remote_pairs:
if not pair[0].nullable:
nullable = False
kwargs.update({
'allow_blank': nullable,
'query_factory': lambda: db_session.query(foreign_model).all()
})
converter = self.converters[prop.direction.name]
if field_args:
kwargs.update(field_args)
return converter(model=model, mapper=mapper, prop=prop, column=column,
field_args=kwargs)
class ModelConverter(ModelConverterBase):
def __init__(self, extra_converters=None):
super(ModelConverter, self).__init__(extra_converters)
@classmethod
def _string_common(cls, column, field_args, **extra):
if column.type.length:
field_args['validators'].append(validators.Length(max=column.type.length))
@converts('String', 'Unicode')
def conv_String(self, field_args, **extra):
self._string_common(field_args=field_args, **extra)
return f.TextField(**field_args)
@converts('Text', 'UnicodeText', 'types.LargeBinary', 'types.Binary')
def conv_Text(self, field_args, **extra):
self._string_common(field_args=field_args, **extra)
return f.TextAreaField(**field_args)
@converts('Boolean')
def conv_Boolean(self, field_args, **extra):
return f.BooleanField(**field_args)
@converts('Date')
def conv_Date(self, field_args, **extra):
return f.DateField(**field_args)
@converts('DateTime')
def conv_DateTime(self, field_args, **extra):
return f.DateTimeField(**field_args)
@converts('Integer', 'SmallInteger')
def handle_integer_types(self, column, field_args, **extra):
unsigned = getattr(column.type, 'unsigned', False)
if unsigned:
field_args['validators'].append(validators.NumberRange(min=0))
return f.IntegerField(**field_args)
@converts('Numeric', 'Float')
def handle_decimal_types(self, column, field_args, **extra):
places = getattr(column.type, 'scale', 2)
if places is not None:
field_args['places'] = places
return f.DecimalField(**field_args)
@converts('databases.mysql.MSYear')
def conv_MSYear(self, field_args, **extra):
field_args['validators'].append(validators.NumberRange(min=1901, max=2155))
return f.TextField(**field_args)
@converts('databases.postgres.PGInet', 'dialects.postgresql.base.INET')
def conv_PGInet(self, field_args, **extra):
field_args.setdefault('label', u'IP Address')
field_args['validators'].append(validators.IPAddress())
return f.TextField(**field_args)
@converts('dialects.postgresql.base.MACADDR')
def conv_PGMacaddr(self, field_args, **extra):
field_args.setdefault('label', u'MAC Address')
field_args['validators'].append(validators.MacAddress())
return f.TextField(**field_args)
@converts('dialects.postgresql.base.UUID')
def conv_PGUuid(self, field_args, **extra):
field_args.setdefault('label', u'UUID')
field_args['validators'].append(validators.UUID())
return f.TextField(**field_args)
@converts('MANYTOONE')
def conv_ManyToOne(self, field_args, **extra):
return QuerySelectField(**field_args)
@converts('MANYTOMANY', 'ONETOMANY')
def conv_ManyToMany(self, field_args, **extra):
return QuerySelectMultipleField(**field_args)
def model_fields(model, db_session=None, only=None, exclude=None,
field_args=None, converter=None):
"""
Generate a dictionary of fields for a given SQLAlchemy model.
See `model_form` docstring for description of parameters.
"""
if not hasattr(model, '_sa_class_manager'):
raise TypeError('model must be a sqlalchemy mapped model')
mapper = model._sa_class_manager.mapper
converter = converter or ModelConverter()
field_args = field_args or {}
properties = ((p.key, p) for p in mapper.iterate_properties)
if only:
properties = (x for x in properties if x[0] in only)
elif exclude:
properties = (x for x in properties if x[0] not in exclude)
field_dict = {}
for name, prop in properties:
field = converter.convert(model, mapper, prop,
field_args.get(name), db_session)
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, db_session=None, base_class=Form, only=None,
exclude=None, field_args=None, converter=None, exclude_pk=True,
exclude_fk=True, type_name=None):
"""
Create a wtforms Form for a given SQLAlchemy model class::
from wtalchemy.orm import model_form
from myapp.models import User
UserForm = model_form(User)
:param model:
A SQLAlchemy mapped model class.
:param db_session:
An optional SQLAlchemy Session.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
:param exclude_pk:
An optional boolean to force primary key exclusion.
:param exclude_fk:
An optional boolean to force foreign keys exclusion.
:param type_name:
An optional string to set returned type name.
"""
class ModelForm(base_class):
"""Sets object as form attribute."""
def __init__(self, *args, **kwargs):
if 'obj' in kwargs:
self._obj = kwargs['obj']
super(ModelForm, self).__init__(*args, **kwargs)
if not exclude:
exclude = []
model_mapper = model.__mapper__
for prop in model_mapper.iterate_properties:
if not hasattr(prop, 'direction') and prop.columns[0].primary_key:
if exclude_pk:
exclude.append(prop.key)
if hasattr(prop, 'direction') and exclude_fk and \
prop.direction.name != 'MANYTOMANY':
for pair in prop.local_remote_pairs:
exclude.append(pair[0].key)
type_name = type_name or model.__name__ + 'Form'
field_dict = model_fields(model, db_session, only, exclude, field_args,
converter)
return type(type_name, (ModelForm, ), field_dict)
|
hnakamur/site-alive-checker
|
wtforms/ext/sqlalchemy/orm.py
|
Python
|
mit
| 10,723
|
import bpy, inspect, re
from collections import defaultdict
class Documentation:
def __init__(self):
self.is_build = False
self.reset()
def build_if_necessary(self):
if not self.is_build:
self.build()
def build(self):
self.reset()
all_bpy_types = inspect.getmembers(bpy.types)
self.build_type_documentation(all_bpy_types)
self.build_attribute_documentation(all_bpy_types)
self.build_operator_documentation()
self.add_custom_properties()
self.find_registered_menu_names()
self.load_modules()
self.categorize_data()
self.is_build = True
def reset(self):
self.types = defaultdict(TypeDocumentation)
self.functions = []
self.functions_by_name = defaultdict(list)
self.functions_by_owner = defaultdict(list)
self.properties = []
self.properties_by_name = defaultdict(list)
self.properties_by_owner = defaultdict(list)
self.operators = []
self.operators_by_container = defaultdict(list)
self.operators_by_full_name = {}
self.menu_names = []
self.is_build = False
def build_type_documentation(self, bpy_types):
for type in bpy_types:
type_doc = self.get_documentation_of_type(type[1].bl_rna)
self.types[type_doc.name] = type_doc
def get_documentation_of_type(self, type):
type_doc = TypeDocumentation(type.identifier)
type_doc.description = type.description
return type_doc
def build_attribute_documentation(self, bpy_types):
for type in bpy_types:
self.build_attribute_lists_of_type(type[1])
def build_attribute_lists_of_type(self, type):
identifier = type.bl_rna.identifier
for function in type.bl_rna.functions:
function_doc = self.get_documentation_of_function(function, identifier)
self.functions.append(function_doc)
for property in type.bl_rna.properties:
property_doc = self.get_documentation_of_property(property, identifier)
self.properties.append(property_doc)
def get_documentation_of_function(self, function, owner):
function_doc = FunctionDocumentation(function.identifier)
function_doc.description = function.description
function_doc.owner = owner
function_doc.inputs, function_doc.outputs = self.get_function_parameters(function)
return function_doc
def get_function_parameters(self, function):
inputs = []
outputs = []
for parameter in function.parameters:
parameter_doc = self.get_documentation_of_property(parameter, function.identifier)
if parameter.is_output: outputs.append(parameter_doc)
else: inputs.append(parameter_doc)
return inputs, outputs
def build_operator_documentation(self):
container_names = dir(bpy.ops)
for container_name in container_names:
self.build_docs_for_container_name(container_name)
def build_docs_for_container_name(self, container_name):
container = getattr(bpy.ops, container_name)
operator_names = dir(container)
for operator_name in operator_names:
self.build_doc_for_operator_name(container_name, container, operator_name)
def build_doc_for_operator_name(self, container_name, container, operator_name):
operator = getattr(container, operator_name)
operator_rna = operator.get_rna().bl_rna
inputs = self.get_operator_inputs(operator_rna)
operator_doc = OperatorDocumentation(container_name, operator_name, operator_rna.description, inputs)
self.operators.append(operator_doc)
def get_operator_inputs(self, operator_rna):
inputs = []
for property in operator_rna.properties:
if property.identifier != "rna_type":
inputs.append(self.get_documentation_of_property(property, None))
return inputs
def get_documentation_of_property(self, property, owner):
property_doc = PropertyDocumentation(property.identifier)
property_doc.type = self.get_property_type(property)
if property_doc.type == "Enum":
property_doc.enum_items = self.get_enum_items(property)
property_doc.description = property.description
property_doc.is_readonly = property.is_readonly
property_doc.owner = owner
return property_doc
def get_property_type(self, property):
type = property.type
if type == "POINTER":
return property.fixed_type.identifier
if type == "COLLECTION":
srna = getattr(property, "srna", None)
if srna is None: return "bpy_prop_collection"
else: return srna.identifier
if type in ["FLOAT", "INT", "STRING", "BOOLEAN"]:
array_length = getattr(property, "array_length", 0)
type_name = self.convert_to_nicer_type(type)
if array_length <= 1: return type_name
elif array_length <= 3: return type_name + " Vector " + str(array_length)
else: return type_name + " Array " + str(array_length)
if type == "ENUM":
return "Enum"
return None
def get_enum_items(self, enum_property):
items = []
for item in enum_property.enum_items:
items.append(item.identifier)
return items
def convert_to_nicer_type(self, type):
if type == "INT": return "Integer"
if type == "FLOAT": return "Float"
if type == "BOOLEAN": return "Boolean"
if type == "STRING": return "String"
def load_modules(self):
for name in ["bgl", "blf", "re", "bmesh", "mathutils", "random"]:
self.load_module(name)
def load_module(self, module_name):
names = self.get_attribute_names_in_module(module_name)
for name in names:
self.properties.append(PropertyDocumentation(name, type = "", owner = module_name))
self.properties.append(PropertyDocumentation(module_name, type = module_name, owner = None))
def get_attribute_names_in_module(self, module_name):
code = "import " + module_name + "\n"
code += "names = dir(" + module_name + ")"
names = []
try:
dic = {}
exec(code, dic)
names = [name for name in dic["names"] if not name.startswith("_")]
except: pass
return names
# have to do this manually, because these properties aren't available everywhere
def add_custom_properties(self):
props = self.properties
props.append(PropertyDocumentation("data", type = "BlendData", is_readonly = True, owner = None))
props.append(PropertyDocumentation("kmi", type = "KeyMapItem", owner = None))
props.append(PropertyDocumentation("context", type = "Context", is_readonly = True, owner = None))
context_prop_data = [
("visible_objects", "Object Sequence"),
("visible_bases", "ObjectBase Sequence"),
("selectable_objects", "Object Sequence"),
("selectable_bases", "ObjectBase Sequence"),
("selected_objects", "Object Sequence"),
("selected_editable_objects", "Object Sequence"),
("selected_editable_bases", "ObjectBase Sequence"),
("visible_bones", "EditBone Sequence"),
("editable_bones", "EditBone Sequence"),
("selected_bones", "EditBone Sequence"),
("selected_editable_bones", "EditBone Sequence"),
("visible_pose_bones", "PoseBone Sequence"),
("selected_pose_bones", "PoseBone Sequence"),
("active_bone", "EditBone"),
("active_pose_bone", "PoseBone"),
("active_base", "ObjectBase"),
("active_object", "Object"),
("object", "Object"),
("edit_object", "Object"),
("sculpt_object", "Object"),
("vertex_paint_object", "Object"),
("weight_paint_object", "Object"),
("image_paint_object", "Object"),
("particle_edit_object", "Object"),
("sequences", "Sequence Sequence"),
("selected_sequences", "Sequence Sequence"),
("selected_editable_sequences", "Sequence Sequence"),
("gpencil_data", "GreasePencil"),
("gpencil_data_owner", "ID"),
("visible_gpencil_layers", "GPencilLayer Sequence"),
("editable_gpencil_layers", "GPencilLayer Sequence"),
("editable_gpencil_strokes", "GPencilStroke Sequence"),
("active_gpencil_layer", "GPencilLayer Sequence"),
("active_gpencil_frame", "GPencilLayer Sequence"),
("active_operator", "Operator"),
("texture_slot", "MaterialTextureSlot"),
("world", "World"),
("mesh", "Mesh"),
("armature", "Armature"),
("lattice", "Lattice"),
("curve", "Curve"),
("meta_ball", "MetaBall"),
("lamp", "Lamp"),
("speaker", "Speaker"),
("camera", "Camera"),
("material", "Material"),
("material_slot", "MaterialSlot"),
("texture", "Texture"),
("texture_user", "ID"),
("texture_user_property", "Property"),
("bone", "Bone"),
("edit_bone", "EditBone"),
("pose_bone", "Posebone"),
("particle_system", "ParticleSystem"),
("particle_system_editable", "ParticleSystem"),
("particle_settings", "ParticleSettings"),
("cloth", "ClothModifier"),
("soft_body", "SoftBodyModifier"),
("fluid", "FluidSimulationModifier"),
("smoke", "SmokeModifier"),
("collision", "CollisionModifier"),
("brush", "Brush"),
("dynamic_paint", "DynamicPaintModifier"),
("line_style", "FreestyleLineStyle"),
("edit_image", "Image"),
("edit_mask", "Mask"),
("selected_nodes", "Node Sequence"),
("active_node", "Node"),
("edit_text", "Text"),
("edit_movie_clip", "MovieClip"),
("edit_mask", "Mask") ]
for prop_name, prop_type in context_prop_data:
props.append(PropertyDocumentation(prop_name, type = prop_type, owner = "Context", is_readonly = True))
space_subclass_names = [subclass.__name__ for subclass in bpy.types.Space.__subclasses__()]
for space_name in space_subclass_names:
props.append(PropertyDocumentation("space", type = space_name))
props.append(PropertyDocumentation("space_data", type = space_name))
props.append(PropertyDocumentation("event", type = "Event", is_readonly = True))
for element in ("row", "col", "box", "subrow", "subcol", "subbox", "pie"):
props.append(PropertyDocumentation(element, type = "UILayout"))
def find_registered_menu_names(self):
classes = bpy.types.Menu.__subclasses__()
for cl in classes:
self.menu_names.append(self.get_name_of_menu_class(cl))
def get_name_of_menu_class(self, menu_class):
text = str(menu_class)
match = re.search("\.(?!.*\.)(\w*)", text)
return match.group(1)
def categorize_data(self):
for property in self.properties:
self.properties_by_name[property.name].append(property)
self.properties_by_owner[property.owner].append(property)
for functions in self.functions:
self.functions_by_name[functions.name].append(functions)
self.functions_by_owner[functions.owner].append(functions)
for operator in self.operators:
self.operators_by_container[operator.container_name].append(operator)
self.operators_by_full_name[operator.container_name + "." + operator.name] = operator
# attribute methods
def get_possible_subattributes_of_property(self, property_name):
attributes = []
attributes.extend(self.get_possible_subproperties_of_property(property_name))
attributes.extend(self.get_possible_subfunctions_of_property(property_name))
return attributes
def get_best_matching_subattributes_of_path(self, path):
types = self.get_best_matching_types_of_path(path)
attributes = []
for type in types:
attributes.extend(self.get_attributes_of_type(type))
return attributes
def get_best_matching_types_of_path(self, path):
attributes = self.get_best_matching_attributes_of_path(path)
return list(set([attribute.type for attribute in attributes]))
# "context.active_object.modifiers" -> Object.modifiers (instead of SequenceModifiers, etc.)
def get_best_matching_attributes_of_path(self, path):
attribute_names = path.split(".")
best_attributes = set(self.get_properties_by_name(attribute_names[-1]))
for i in range(len(attribute_names)):
first_name = attribute_names[i]
attributes_behind = attribute_names[i+1:]
attributes = set()
for attribute in self.get_attributes_by_name(first_name):
attributes.update(self.get_matching_attributes_for_child(attribute, attributes_behind))
if len(attributes) > 0:
best_attributes = attributes
break
return list(best_attributes)
# this is recursive
def get_matching_attributes_for_child(self, attribute, attribute_names_behind):
if len(attribute_names_behind) == 0:
return [attribute]
else:
if isinstance(attribute, FunctionDocumentation): return []
attributes = []
property = attribute
type = property.type
first_name = attribute_names_behind[0]
attributes_behind = attribute_names_behind[1:]
for attr in self.get_attributes_of_type(type):
if attr.name == first_name:
attributes.extend(self.get_matching_attributes_for_child(attr, attributes_behind))
return attributes
def get_attributes_by_name(self, attribute_name):
return self.get_properties_by_name(attribute_name) + self.get_functions_by_name(attribute_name)
def get_attributes_of_type(self, attribute_name):
return self.get_properties_of_type(attribute_name) + self.get_functions_of_type(attribute_name)
# property methods
def get_possible_subproperty_names_of_property(self, property_name):
return list(set([property.name for property in self.get_subproperties_of_property(property_name)]))
def get_possible_subproperties_of_property(self, property_name):
properties = []
for type in self.get_possible_type_names_for_property(property_name):
properties.extend(self.get_properties_of_type(type))
return properties
def get_types_with_property(self, property_name):
return [property.owner for property in self.properties_by_name[property_name]]
def get_property_names_of_type(self, type_name):
return [property.name for property in self.get_properties_of_type(type_name)]
def get_properties_of_type(self, type_name):
return self.properties_by_owner[type_name]
def get_type_description(self, type_name):
return self.types[type_name].description
def get_possible_type_names_for_property(self, property_name):
return list(set([property.type for property in self.get_properties_by_name(property_name)]))
def get_descriptions_for_property(self, property_name):
return list(set(property.description for property in self.get_properties_by_name(property_name)))
def get_properties_by_name(self, property_name):
return self.properties_by_name[property_name]
# function methods
def get_possible_subfunctions_of_property(self, property_name):
functions = []
for type in self.get_possible_type_names_for_property(property_name):
functions.extend(self.get_functions_of_type(type))
return functions
def get_function_names_of_type(self, type_name):
return [function.name for function in self.get_functions_of_type(type_name)]
def get_functions_of_type(self, type_name):
return self.functions_by_owner[type_name]
def get_functions_by_name(self, function_name):
return self.functions_by_name[function_name]
# operator methods
def get_operator_container_names(self):
return [container_name for container_name in self.operators_by_container.keys()]
def get_operator_names_in_container(self, container_name):
return [operator.name for operator in self.get_operators_in_container(container_name)]
def get_operators_in_container(self, container_name):
return self.operators_by_container[container_name]
def get_operator_by_full_name(self, full_name):
if "bpy.ops." in full_name:
full_name = full_name[8:]
return self.operators_by_full_name.get(full_name, None)
# menu methods
def get_menu_names(self):
if len(self.menu_names) == 0:
self.find_registered_menu_names()
return self.menu_names
class PropertyDocumentation:
def __init__(self, name = "", description = "", type = None, owner = None, is_readonly = False, enum_items = []):
self.name = name
self.description = description
self.type = type
self.owner = owner
self.is_readonly = is_readonly
self.enum_items = enum_items
def __repr__(self):
if self.owner is None: return self.name
return self.owner + "." + self.name
class FunctionDocumentation:
type = "Function"
def __init__(self, name = "", description = "", owner = None, inputs = [], outputs = []):
self.name = name
self.description = description
self.owner = owner
self.inputs = inputs
self.outputs = outputs
def get_input_names(self):
return [input.name for input in self.inputs]
def get_output_names(self):
return [output.name for output in self.outputs]
def __repr__(self):
output_names = ", ".join(self.get_output_names())
if output_names != "": output_names = " -> " + output_names
function_string = self.name + "(" + ", ".join(self.get_input_names()) + ")" + output_names
if self.owner is None: return function_string
else: return self.owner + "." + function_string
class TypeDocumentation:
def __init__(self, name = "", description = ""):
self.name = name
self.description = description
def __repr__(self):
return self.name
class OperatorDocumentation:
def __init__(self, container_name = "", name = "", description = "", inputs = []):
self.container_name = container_name
self.name = name
self.description = description
self.inputs = inputs
def get_input_names(self):
return [input.name for input in self.inputs]
def __repr__(self):
return self.container_name + "." + self.name
class WordDescription:
def __init__(self, word, description):
self.word = word
self.description = description
documentation = Documentation()
def get_documentation():
global documentation
return documentation
|
kinoraw/kinoraw_repo
|
external/script_auto_complete/documentation.py
|
Python
|
gpl-3.0
| 20,186
|
from troy.interface.base import iBase
import troy.exception
########################################################################
#
#
#
class iScheduler (iBase) :
""" :class:`troy.Scheduler` interface """
############################################################################
#
def __init__ (self, obj, adaptor) :
""" Create a Scheduler """
raise troy.Exception (troy.Error.NotImplemented, "interface not implemented!")
############################################################################
#
def schedule (self, t, ud) :
""" Schedule a work unit on Troy """
raise troy.Exception (troy.Error.NotImplemented, "method not implemented!")
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
andre-merzky/troy_old
|
troy/interface/scheduler.py
|
Python
|
gpl-3.0
| 783
|
# -*- coding: utf-8 -*-
from engine import component
class Collidable(component.Component):
""" Collidable component. """
def __init__(self, *args, **kwargs):
super(Collidable, self).__init__(*args, **kwargs)
self.type = None
self.collided = False
self.collision_distance = 0
|
eeneku/baller
|
src/components/collidable.py
|
Python
|
gpl-3.0
| 328
|
# -*- coding: utf8 -*-
from pycraft.common import ImmutableMeta
from .vector import Vector
class Face(metaclass=ImmutableMeta):
DIRECTION = (
Vector(0,0,-1), Vector(0,0,1),
Vector(0,-1,0), Vector(0,1,0),
Vector(-1,0,0), Vector(1,0,0))
properties = 'h v mix'
@staticmethod
def _vertical(pitch):
return 1 if pitch < 0 else 0
@staticmethod
def _horizontal(yaw):
if 45 <= yaw <= 135:
return 4
if 225 <= yaw <= 315:
return 5
if 135 < yaw < 225:
return 2
else:
return 3
@staticmethod
def _mix(yaw, pitch):
if pitch > 45:
return 0
if pitch < -45:
return 1
return Face._horizontal(yaw)
@classmethod
def by_angle(cls, yaw, pitch):
"""角度から向きをえる
vertical:
1 : 上面(下向き)
0 : 下面(上向き)
horizontal:
(0)
3
(270) 5 4 (90)
2
(180)
mix:
1 : (< -45)
0 : (> +45)
horizontal : (other)
"""
return Face(
cls._horizontal(yaw), cls._vertical(pitch), cls._mix(yaw, pitch))
def inverse(self):
return Face(self.h^1, self.v^1, self.mix^1)
|
nosix/PyCraft
|
src/pycraft/service/primitive/geometry/face.py
|
Python
|
lgpl-3.0
| 1,380
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from homecon.core.database import get_database, Field
from homecon.core.plugin import Plugin
from homecon.plugins.states import States
from homecon.plugins.websocket import Websocket
from homecon.plugins.pages import Pages
from homecon.plugins.scheduler import Scheduler
from homecon.plugins.knx import Knx
logger = logging.getLogger(__name__)
def get_plugins_table():
db = get_database()
if 'plugins' in db:
table = db.plugins
else:
table = db.define_table(
'plugins',
Field('name', type='string', default='', unique=True),
Field('string', type='string', default=''),
Field('package', type='string'),
Field('active', type='boolean'),
)
return table
class Plugins(Plugin):
"""
A class to manage plugins dynamically
"""
def __init__(self):
super().__init__()
self._plugin_folder = 'plugins'
self._core_plugins = {
'plugins': self,
'states': States(),
'websocket': Websocket(),
'pages': Pages(),
'scheduler': Scheduler(),
'knx': Knx()
}
self._active_plugins = {}
# initialize plugins
db_entries = get_database()(get_plugins_table()).select()
for db_entry in db_entries:
if db_entry['active']:
plugin_class = self._import(db_entry['name'], db_entry['package'])
self._active_plugins[db_entry['name']] = plugin_class()
def initialize(self):
logger.debug('Plugins plugin initialized')
def get_available_plugins_list(self):
"""
Generate a list of all available optional plugins and those that are active
"""
db_entries = get_database()(get_plugins_table()).select()
plugins = []
for db_entry in db_entries:
plugins.append({'name': db_entry['name'], 'info': db_entry['info'], 'active': db_entry['active']})
return plugins
def get_active_plugins_list(self):
"""
Generate a list of all active plugins, excluding core plugins
"""
return sorted([key for key in self._active_plugins.keys()])
def activate(self, name):
"""
Activate an available plugin by name
"""
db_entries = get_plugins_table().get(name=name)
if len(db_entries) == 1:
db_entry = db_entries[0]
plugin_class = self._import(name, package=db_entry['package'])
if plugin_class is not None:
self._active_plugins[name] = plugin_class()
self._active_plugins[name].start()
get_plugins_table().put(active=True, where='id=\'{}\''.format(db_entry['id']))
logger.debug("plugin {} activated".format(name))
def deactivate(self, name):
pass
def install(self, url):
logging.debug('installing plugin from'.format(url))
def _import(self, name, package=None):
"""
Imports a plugin module
this attempts to load the plugin with the correct format by name from
the plugins folder
Parameters
----------
name: string
The module name of the plugin
package: string
Package where to find the plugin, defaults to the default _plugin_folder
returns
-------
pluginclass: class
The plugin class if defined in the module otherwise ``None``
"""
if package is None or package == '':
plugin_module = __import__(name, fromlist=[name])
else:
plugin_module = __import__('{}.{}'.format(package, name), fromlist=[name])
plugin_class = None
plugin_class_name = name.capitalize()
if plugin_class_name in dir(plugin_module):
plugin_class = getattr(plugin_module, plugin_class_name)
return plugin_class
def listen_plugins_settings(self, event):
sections = []
for key, plugin in self.items():
section = plugin.settings_sections
if section is not None:
sections += section
event.reply({'plugin': event.data['plugin'], 'value': sections})
# def get_state_config_keys(self):
#
# keyslist = []
#
# keyslist.append({'name': 'states', 'keys': ['type', 'quantity', 'unit', 'label', 'description']})
# keyslist.append({'name': 'permissions', 'keys': ['readusers', 'writeusers', 'readgroups', 'writegroups']})
#
# for name,plugin in core.plugins.items():
# keys = plugin.config_keys
# if len(config)>0:
# keyslist.append({'name':name, 'keys':keys})
#
# return keyslist
#
# def get_components(self):
#
# keyslist = []
#
# keyslist.append({'name':'building', 'components':[
# {
# 'name': 'relay',
# 'states': [
# 'value',
# ]
# },
# ]})
#
# #for name,plugin in self._plugins.items():
# # keys = plugin.components()
# # if len(config)>0:
# # keyslist.append({'name':name, 'keys':keys})
#
# return keyslist
#
# def listen_list_optionalplugins(self,event):
# core.websocket.send({'event':'list_optionalplugins', 'path':'', 'value':self.get_optionalplugins_list()}, clients=[event.client])
#
# def listen_list_activeplugins(self,event):
# core.websocket.send({'event':'list_activeplugins', 'path':'', 'value':self.get_activeplugins_list()}, clients=[event.client])
#
# def listen_list_state_config_keys(self,event):
# core.websocket.send({'event':'list_state_config_keys', 'path':'', 'value':self.get_state_config_keys()}, clients=[event.client])
#
# def listen_activate_plugin(self,event):
# self.activate(event.data['plugin'])
# core.websocket.send({'event':'list_optionalplugins', 'path':'', 'value':self.get_optionalplugins_list()}, clients=[event.client]) # FIXME should all clients be notified of plugin changes?
# core.websocket.send({'event':'list_activeplugins', 'path':'', 'value':self.get_activeplugins_list()}, clients=[event.client])
# core.event.fire('component_types',{},client=event.client)
#
# def listen_deactivate_plugin(self,event):
# self.deactivate(event.data['plugin'])
# core.websocket.send({'event':'list_optionalplugins', 'path':'', 'value':self.get_optionalplugins_list()}, clients=[event.client])
# core.websocket.send({'event':'list_activeplugins', 'path':'', 'value':self.get_activeplugins_list()}, clients=[event.client])
# #FIXME components defined in a plugin should be unregisterd and component instances deleted from the active list
# core.event.fire('component_types',{},client=event.client)
#
# def listen_install_plugin(self,event):
# if self.install(event.data['url']):
# core.websocket.send({'event':'list_optionalplugins', 'path':'', 'value':self.get_optionalplugins_list()}, clients=[event.client])
# core.websocket.send({'event':'list_activeplugins', 'path':'', 'value':self.get_optionalplugins_list()}, clients=[event.client])
def __getitem__(self,path):
return None
def __iter__(self):
return iter([])
def __contains__(self,path):
return False
def keys(self):
plugins = {}
for key, val in self._core_plugins.values():
plugins[key] = val
for key, val in self._active_plugins.values():
plugins[key] = val
return plugins.keys()
def items(self):
plugins = {}
for key, val in self._core_plugins.items():
plugins[key] = val
for key, val in self._active_plugins.items():
plugins[key] = val
return plugins.items()
def values(self):
plugins = {}
for key, val in self._core_plugins.items():
plugins[key] = val
for key, val in self._active_plugins.items():
plugins[key] = val
return plugins.values()
|
BrechtBa/homeconn
|
homecon/plugins/plugins.py
|
Python
|
gpl-3.0
| 8,522
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.